소스 검색

Moving IPA and GSI driver code to dataipa techpack

This is a snapshot of IPA and GSI drivers
from msm-5.4 kernel as of
'commit <137a997bae4f8e1787> ("msm: ipa: Fix using
generic name for offload connect/disconnect")'.

Change-Id: I8452cf76a1c568c1f89de8b329bdc6e7254694b4
Signed-off-by: Ghanim Fodi <[email protected]>
Ghanim Fodi 5 년 전
부모
커밋
20819ba500
100개의 변경된 파일139725개의 추가작업 그리고 7개의 파일을 삭제
  1. 4 2
      Makefile
  2. 0 5
      dataipa.c
  3. 9 0
      gsi/Makefile
  4. 4596 0
      gsi/gsi.c
  5. 351 0
      gsi/gsi.h
  6. 744 0
      gsi/gsi_dbg.c
  7. 227 0
      gsi/gsi_emulation.c
  8. 189 0
      gsi/gsi_emulation.h
  9. 12 0
      gsi/gsi_emulation_stubs.h
  10. 30 0
      gsi/gsi_reg.h
  11. 1098 0
      gsi/gsi_reg_v1.h
  12. 1157 0
      gsi/gsi_reg_v2.h
  13. 61 0
      ipa/Makefile
  14. 3854 0
      ipa/ipa_api.c
  15. 515 0
      ipa/ipa_api.h
  16. 10 0
      ipa/ipa_clients/Makefile
  17. 1487 0
      ipa/ipa_clients/ecm_ipa.c
  18. 1226 0
      ipa/ipa_clients/ipa_gsb.c
  19. 2507 0
      ipa/ipa_clients/ipa_mhi_client.c
  20. 635 0
      ipa/ipa_clients/ipa_uc_offload.c
  21. 2638 0
      ipa/ipa_clients/ipa_usb.c
  22. 774 0
      ipa/ipa_clients/ipa_wdi3.c
  23. 2128 0
      ipa/ipa_clients/ipa_wigig.c
  24. 1212 0
      ipa/ipa_clients/odu_bridge.c
  25. 2440 0
      ipa/ipa_clients/rndis_ipa.c
  26. 74 0
      ipa/ipa_clients/rndis_ipa_trace.h
  27. 487 0
      ipa/ipa_common_i.h
  28. 1184 0
      ipa/ipa_rm.c
  29. 240 0
      ipa/ipa_rm_dependency_graph.c
  30. 42 0
      ipa/ipa_rm_dependency_graph.h
  31. 150 0
      ipa/ipa_rm_i.h
  32. 279 0
      ipa/ipa_rm_inactivity_timer.c
  33. 270 0
      ipa/ipa_rm_peers_list.c
  34. 55 0
      ipa/ipa_rm_peers_list.h
  35. 1204 0
      ipa/ipa_rm_resource.c
  36. 159 0
      ipa/ipa_rm_resource.h
  37. 22 0
      ipa/ipa_uc_offload_common_i.h
  38. 4 0
      ipa/ipa_v3/Makefile
  39. 2392 0
      ipa/ipa_v3/dump/ipa4.5/gsi_hwio.h
  40. 530 0
      ipa/ipa_v3/dump/ipa4.5/gsi_hwio_def.h
  41. 42 0
      ipa/ipa_v3/dump/ipa4.5/ipa_access_control.h
  42. 12 0
      ipa/ipa_v3/dump/ipa4.5/ipa_gcc_hwio.h
  43. 7 0
      ipa/ipa_v3/dump/ipa4.5/ipa_gcc_hwio_def.h
  44. 593 0
      ipa/ipa_v3/dump/ipa4.5/ipa_hw_common_ex.h
  45. 10895 0
      ipa/ipa_v3/dump/ipa4.5/ipa_hwio.h
  46. 2963 0
      ipa/ipa_v3/dump/ipa4.5/ipa_hwio_def.h
  47. 183 0
      ipa/ipa_v3/dump/ipa4.5/ipa_pkt_cntxt.h
  48. 1632 0
      ipa/ipa_v3/dump/ipa_reg_dump.c
  49. 1397 0
      ipa/ipa_v3/dump/ipa_reg_dump.h
  50. 8884 0
      ipa/ipa_v3/ipa.c
  51. 1849 0
      ipa/ipa_v3/ipa_client.c
  52. 3104 0
      ipa/ipa_v3/ipa_debugfs.c
  53. 94 0
      ipa/ipa_v3/ipa_defs.h
  54. 1243 0
      ipa/ipa_v3/ipa_dma.c
  55. 5140 0
      ipa/ipa_v3/ipa_dp.c
  56. 874 0
      ipa/ipa_v3/ipa_dt_replacement.c
  57. 121 0
      ipa/ipa_v3/ipa_emulation_stubs.h
  58. 2137 0
      ipa/ipa_v3/ipa_flt.c
  59. 1377 0
      ipa/ipa_v3/ipa_hdr.c
  60. 2288 0
      ipa/ipa_v3/ipa_hw_stats.c
  61. 3269 0
      ipa/ipa_v3/ipa_i.h
  62. 612 0
      ipa/ipa_v3/ipa_interrupts.c
  63. 810 0
      ipa/ipa_v3/ipa_intf.c
  64. 751 0
      ipa/ipa_v3/ipa_mhi.c
  65. 1088 0
      ipa/ipa_v3/ipa_mhi_proxy.c
  66. 49 0
      ipa/ipa_v3/ipa_mhi_proxy.h
  67. 3286 0
      ipa/ipa_v3/ipa_mpm.c
  68. 2447 0
      ipa/ipa_v3/ipa_nat.c
  69. 772 0
      ipa/ipa_v3/ipa_odl.c
  70. 74 0
      ipa/ipa_v3/ipa_odl.h
  71. 1431 0
      ipa/ipa_v3/ipa_pm.c
  72. 181 0
      ipa/ipa_v3/ipa_pm.h
  73. 2262 0
      ipa/ipa_v3/ipa_qmi_service.c
  74. 540 0
      ipa/ipa_v3/ipa_qmi_service.h
  75. 5110 0
      ipa/ipa_v3/ipa_qmi_service_v01.c
  76. 2518 0
      ipa/ipa_v3/ipa_rt.c
  77. 183 0
      ipa/ipa_v3/ipa_trace.h
  78. 1589 0
      ipa/ipa_v3/ipa_uc.c
  79. 957 0
      ipa/ipa_v3/ipa_uc_mhi.c
  80. 635 0
      ipa/ipa_v3/ipa_uc_ntn.c
  81. 748 0
      ipa/ipa_v3/ipa_uc_offload_i.h
  82. 3092 0
      ipa/ipa_v3/ipa_uc_wdi.c
  83. 9097 0
      ipa/ipa_v3/ipa_utils.c
  84. 986 0
      ipa/ipa_v3/ipa_wdi3_i.c
  85. 1915 0
      ipa/ipa_v3/ipa_wigig_i.c
  86. 1787 0
      ipa/ipa_v3/ipahal/ipahal.c
  87. 701 0
      ipa/ipa_v3/ipahal/ipahal.h
  88. 4366 0
      ipa/ipa_v3/ipahal/ipahal_fltrt.c
  89. 308 0
      ipa/ipa_v3/ipahal/ipahal_fltrt.h
  90. 257 0
      ipa/ipa_v3/ipahal/ipahal_fltrt_i.h
  91. 634 0
      ipa/ipa_v3/ipahal/ipahal_hw_stats.c
  92. 273 0
      ipa/ipa_v3/ipahal/ipahal_hw_stats.h
  93. 54 0
      ipa/ipa_v3/ipahal/ipahal_hw_stats_i.h
  94. 815 0
      ipa/ipa_v3/ipahal/ipahal_i.h
  95. 510 0
      ipa/ipa_v3/ipahal/ipahal_nat.c
  96. 103 0
      ipa/ipa_v3/ipahal/ipahal_nat.h
  97. 146 0
      ipa/ipa_v3/ipahal/ipahal_nat_i.h
  98. 4011 0
      ipa/ipa_v3/ipahal/ipahal_reg.c
  99. 825 0
      ipa/ipa_v3/ipahal/ipahal_reg.h
  100. 702 0
      ipa/ipa_v3/ipahal/ipahal_reg_i.h

+ 4 - 2
Makefile

@@ -1,3 +1,5 @@
 # SPDX-License-Identifier: GPL-2.0-only
-ccflags-y := -Wno-unused-function
-obj-y := dataipa.o
+#obj-$(CONFIG_GSI) += gsi/
+obj-n += gsi/
+#obj-$(CONFIG_IPA3) += ipa/
+obj-n += ipa/

+ 0 - 5
dataipa.c

@@ -1,5 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-
-static void _dataipa_techpack_stub(void)
-{
-}

+ 9 - 0
gsi/Makefile

@@ -0,0 +1,9 @@
+# SPDX-License-Identifier: GPL-2.0-only
+
+obj-$(CONFIG_GSI) += gsim.o
+
+gsim-objs := gsi.o
+
+gsim-$(CONFIG_DEBUG_FS) += gsi_dbg.o
+
+gsim-$(CONFIG_IPA_EMULATION) += gsi_emulation.o

+ 4596 - 0
gsi/gsi.c

@@ -0,0 +1,4596 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2015-2019, The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/of.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/log2.h>
+#include <linux/module.h>
+#include <linux/msm_gsi.h>
+#include <linux/platform_device.h>
+#include <linux/delay.h>
+#include "gsi.h"
+#include "gsi_reg.h"
+#include "gsi_emulation.h"
+
+#define GSI_CMD_TIMEOUT (5*HZ)
+#define GSI_START_CMD_TIMEOUT_MS 1000
+#define GSI_CMD_POLL_CNT 5
+#define GSI_STOP_CMD_TIMEOUT_MS 200
+#define GSI_MAX_CH_LOW_WEIGHT 15
+
+#define GSI_STOP_CMD_POLL_CNT 4
+#define GSI_STOP_IN_PROC_CMD_POLL_CNT 2
+
+#define GSI_RESET_WA_MIN_SLEEP 1000
+#define GSI_RESET_WA_MAX_SLEEP 2000
+#define GSI_CHNL_STATE_MAX_RETRYCNT 10
+
+#define GSI_STTS_REG_BITS 32
+
+#ifndef CONFIG_DEBUG_FS
+void gsi_debugfs_init(void)
+{
+}
+#endif
+
+static const struct of_device_id msm_gsi_match[] = {
+	{ .compatible = "qcom,msm_gsi", },
+	{ },
+};
+
+
+#if defined(CONFIG_IPA_EMULATION)
+static bool running_emulation = true;
+#else
+static bool running_emulation;
+#endif
+
+struct gsi_ctx *gsi_ctx;
+
+static void __gsi_config_type_irq(int ee, uint32_t mask, uint32_t val)
+{
+	uint32_t curr;
+
+	curr = gsi_readl(gsi_ctx->base +
+			GSI_EE_n_CNTXT_TYPE_IRQ_MSK_OFFS(ee));
+	gsi_writel((curr & ~mask) | (val & mask), gsi_ctx->base +
+			GSI_EE_n_CNTXT_TYPE_IRQ_MSK_OFFS(ee));
+}
+
+static void __gsi_config_ch_irq(int ee, uint32_t mask, uint32_t val)
+{
+	uint32_t curr;
+
+	curr = gsi_readl(gsi_ctx->base +
+			GSI_EE_n_CNTXT_SRC_GSI_CH_IRQ_MSK_OFFS(ee));
+	gsi_writel((curr & ~mask) | (val & mask), gsi_ctx->base +
+			GSI_EE_n_CNTXT_SRC_GSI_CH_IRQ_MSK_OFFS(ee));
+}
+
+static void __gsi_config_evt_irq(int ee, uint32_t mask, uint32_t val)
+{
+	uint32_t curr;
+
+	curr = gsi_readl(gsi_ctx->base +
+			GSI_EE_n_CNTXT_SRC_EV_CH_IRQ_MSK_OFFS(ee));
+	gsi_writel((curr & ~mask) | (val & mask), gsi_ctx->base +
+			GSI_EE_n_CNTXT_SRC_EV_CH_IRQ_MSK_OFFS(ee));
+}
+
+static void __gsi_config_ieob_irq(int ee, uint32_t mask, uint32_t val)
+{
+	uint32_t curr;
+
+	curr = gsi_readl(gsi_ctx->base +
+			GSI_EE_n_CNTXT_SRC_IEOB_IRQ_MSK_OFFS(ee));
+	gsi_writel((curr & ~mask) | (val & mask), gsi_ctx->base +
+			GSI_EE_n_CNTXT_SRC_IEOB_IRQ_MSK_OFFS(ee));
+	GSIDBG("current IEO_IRQ_MSK: 0x%x, change to: 0x%x\n",
+		curr, ((curr & ~mask) | (val & mask)));
+}
+
+static void __gsi_config_glob_irq(int ee, uint32_t mask, uint32_t val)
+{
+	uint32_t curr;
+
+	curr = gsi_readl(gsi_ctx->base +
+			GSI_EE_n_CNTXT_GLOB_IRQ_EN_OFFS(ee));
+	gsi_writel((curr & ~mask) | (val & mask), gsi_ctx->base +
+			GSI_EE_n_CNTXT_GLOB_IRQ_EN_OFFS(ee));
+}
+
+static void __gsi_config_gen_irq(int ee, uint32_t mask, uint32_t val)
+{
+	uint32_t curr;
+
+	curr = gsi_readl(gsi_ctx->base +
+			GSI_EE_n_CNTXT_GSI_IRQ_EN_OFFS(ee));
+	gsi_writel((curr & ~mask) | (val & mask), gsi_ctx->base +
+			GSI_EE_n_CNTXT_GSI_IRQ_EN_OFFS(ee));
+}
+
+static void gsi_channel_state_change_wait(unsigned long chan_hdl,
+	struct gsi_chan_ctx *ctx,
+	uint32_t tm, enum gsi_ch_cmd_opcode op)
+{
+	int poll_cnt;
+	int gsi_pending_intr;
+	int res;
+	uint32_t type;
+	uint32_t val;
+	int ee = gsi_ctx->per.ee;
+	enum gsi_chan_state curr_state = GSI_CHAN_STATE_NOT_ALLOCATED;
+	int stop_in_proc_retry = 0;
+	int stop_retry = 0;
+
+	/*
+	 * Start polling the GSI channel for
+	 * duration = tm * GSI_CMD_POLL_CNT.
+	 * We need to do polling of gsi state for improving debugability
+	 * of gsi hw state.
+	 */
+
+	for (poll_cnt = 0;
+		poll_cnt < GSI_CMD_POLL_CNT;
+		poll_cnt++) {
+		res = wait_for_completion_timeout(&ctx->compl,
+			msecs_to_jiffies(tm));
+
+		/* Interrupt received, return */
+		if (res != 0)
+			return;
+
+		type = gsi_readl(gsi_ctx->base +
+			GSI_EE_n_CNTXT_TYPE_IRQ_OFFS(gsi_ctx->per.ee));
+
+		gsi_pending_intr = gsi_readl(gsi_ctx->base +
+			GSI_EE_n_CNTXT_SRC_GSI_CH_IRQ_OFFS(ee));
+
+		/* Update the channel state only if interrupt was raised
+		 * on praticular channel and also checking global interrupt
+		 * is raised for channel control.
+		 */
+		if ((type & GSI_EE_n_CNTXT_TYPE_IRQ_CH_CTRL_BMSK) &&
+				((gsi_pending_intr >> chan_hdl) & 1)) {
+			/*
+			 * Check channel state here in case the channel is
+			 * already started but interrupt is not yet received.
+			 */
+			val = gsi_readl(gsi_ctx->base +
+				GSI_EE_n_GSI_CH_k_CNTXT_0_OFFS(chan_hdl,
+					gsi_ctx->per.ee));
+			curr_state = (val &
+				GSI_EE_n_GSI_CH_k_CNTXT_0_CHSTATE_BMSK) >>
+				GSI_EE_n_GSI_CH_k_CNTXT_0_CHSTATE_SHFT;
+		}
+
+		if (op == GSI_CH_START) {
+			if (curr_state == GSI_CHAN_STATE_STARTED) {
+				ctx->state = curr_state;
+				return;
+			}
+		}
+
+		if (op == GSI_CH_STOP) {
+			if (curr_state == GSI_CHAN_STATE_STOPPED)
+				stop_retry++;
+			else if (curr_state == GSI_CHAN_STATE_STOP_IN_PROC)
+				stop_in_proc_retry++;
+		}
+
+		/* if interrupt marked reg after poll count reaching to max
+		 * keep loop to continue reach max stop proc and max stop count.
+		 */
+		if (stop_retry == 1 || stop_in_proc_retry == 1)
+			poll_cnt = 0;
+
+		/* If stop channel retry reached to max count
+		 * clear the pending interrupt, if channel already stopped.
+		 */
+		if (stop_retry == GSI_STOP_CMD_POLL_CNT) {
+			gsi_writel(gsi_pending_intr, gsi_ctx->base +
+				GSI_EE_n_CNTXT_SRC_GSI_CH_IRQ_CLR_OFFS(ee));
+			ctx->state = curr_state;
+			return;
+		}
+
+		/* If channel state stop in progress case no need
+		 * to wait for long time.
+		 */
+		if (stop_in_proc_retry == GSI_STOP_IN_PROC_CMD_POLL_CNT) {
+			ctx->state = curr_state;
+			return;
+		}
+
+		GSIDBG("GSI wait on chan_hld=%lu irqtyp=%u state=%u intr=%u\n",
+			chan_hdl,
+			type,
+			ctx->state,
+			gsi_pending_intr);
+	}
+
+	GSIDBG("invalidating the channel state when timeout happens\n");
+	ctx->state = curr_state;
+}
+
+static void gsi_handle_ch_ctrl(int ee)
+{
+	uint32_t ch;
+	int i;
+	uint32_t val;
+	struct gsi_chan_ctx *ctx;
+
+	ch = gsi_readl(gsi_ctx->base +
+		GSI_EE_n_CNTXT_SRC_GSI_CH_IRQ_OFFS(ee));
+	gsi_writel(ch, gsi_ctx->base +
+		GSI_EE_n_CNTXT_SRC_GSI_CH_IRQ_CLR_OFFS(ee));
+	GSIDBG("ch %x\n", ch);
+	for (i = 0; i < GSI_STTS_REG_BITS; i++) {
+		if ((1 << i) & ch) {
+			if (i >= gsi_ctx->max_ch || i >= GSI_CHAN_MAX) {
+				GSIERR("invalid channel %d\n", i);
+				break;
+			}
+
+			ctx = &gsi_ctx->chan[i];
+			val = gsi_readl(gsi_ctx->base +
+				GSI_EE_n_GSI_CH_k_CNTXT_0_OFFS(i, ee));
+			ctx->state = (val &
+				GSI_EE_n_GSI_CH_k_CNTXT_0_CHSTATE_BMSK) >>
+				GSI_EE_n_GSI_CH_k_CNTXT_0_CHSTATE_SHFT;
+			GSIDBG("ch %u state updated to %u\n", i, ctx->state);
+			complete(&ctx->compl);
+			gsi_ctx->ch_dbg[i].cmd_completed++;
+		}
+	}
+}
+
+static void gsi_handle_ev_ctrl(int ee)
+{
+	uint32_t ch;
+	int i;
+	uint32_t val;
+	struct gsi_evt_ctx *ctx;
+
+	ch = gsi_readl(gsi_ctx->base +
+		GSI_EE_n_CNTXT_SRC_EV_CH_IRQ_OFFS(ee));
+	gsi_writel(ch, gsi_ctx->base +
+		GSI_EE_n_CNTXT_SRC_EV_CH_IRQ_CLR_OFFS(ee));
+	GSIDBG("ev %x\n", ch);
+	for (i = 0; i < GSI_STTS_REG_BITS; i++) {
+		if ((1 << i) & ch) {
+			if (i >= gsi_ctx->max_ev || i >= GSI_EVT_RING_MAX) {
+				GSIERR("invalid event %d\n", i);
+				break;
+			}
+
+			ctx = &gsi_ctx->evtr[i];
+			val = gsi_readl(gsi_ctx->base +
+				GSI_EE_n_EV_CH_k_CNTXT_0_OFFS(i, ee));
+			ctx->state = (val &
+				GSI_EE_n_EV_CH_k_CNTXT_0_CHSTATE_BMSK) >>
+				GSI_EE_n_EV_CH_k_CNTXT_0_CHSTATE_SHFT;
+			GSIDBG("evt %u state updated to %u\n", i, ctx->state);
+			complete(&ctx->compl);
+		}
+	}
+}
+
+static void gsi_handle_glob_err(uint32_t err)
+{
+	struct gsi_log_err *log;
+	struct gsi_chan_ctx *ch;
+	struct gsi_evt_ctx *ev;
+	struct gsi_chan_err_notify chan_notify;
+	struct gsi_evt_err_notify evt_notify;
+	struct gsi_per_notify per_notify;
+	uint32_t val;
+	enum gsi_err_type err_type;
+
+	log = (struct gsi_log_err *)&err;
+	GSIERR("log err_type=%u ee=%u idx=%u\n", log->err_type, log->ee,
+			log->virt_idx);
+	GSIERR("code=%u arg1=%u arg2=%u arg3=%u\n", log->code, log->arg1,
+			log->arg2, log->arg3);
+
+	err_type = log->err_type;
+	/*
+	 * These are errors thrown by hardware. We need
+	 * BUG_ON() to capture the hardware state right
+	 * when it is unexpected.
+	 */
+	switch (err_type) {
+	case GSI_ERR_TYPE_GLOB:
+		per_notify.evt_id = GSI_PER_EVT_GLOB_ERROR;
+		per_notify.user_data = gsi_ctx->per.user_data;
+		per_notify.data.err_desc = err & 0xFFFF;
+		gsi_ctx->per.notify_cb(&per_notify);
+		break;
+	case GSI_ERR_TYPE_CHAN:
+		if (WARN_ON(log->virt_idx >= gsi_ctx->max_ch)) {
+			GSIERR("Unexpected ch %d\n", log->virt_idx);
+			return;
+		}
+
+		ch = &gsi_ctx->chan[log->virt_idx];
+		chan_notify.chan_user_data = ch->props.chan_user_data;
+		chan_notify.err_desc = err & 0xFFFF;
+		if (log->code == GSI_INVALID_TRE_ERR) {
+			if (log->ee != gsi_ctx->per.ee) {
+				GSIERR("unexpected EE in event %d\n", log->ee);
+				GSI_ASSERT();
+			}
+
+			val = gsi_readl(gsi_ctx->base +
+				GSI_EE_n_GSI_CH_k_CNTXT_0_OFFS(log->virt_idx,
+					gsi_ctx->per.ee));
+			ch->state = (val &
+				GSI_EE_n_GSI_CH_k_CNTXT_0_CHSTATE_BMSK) >>
+				GSI_EE_n_GSI_CH_k_CNTXT_0_CHSTATE_SHFT;
+			GSIDBG("ch %u state updated to %u\n", log->virt_idx,
+					ch->state);
+			ch->stats.invalid_tre_error++;
+			if (ch->state == GSI_CHAN_STATE_ERROR) {
+				GSIERR("Unexpected channel state %d\n",
+					ch->state);
+				GSI_ASSERT();
+			}
+			chan_notify.evt_id = GSI_CHAN_INVALID_TRE_ERR;
+		} else if (log->code == GSI_OUT_OF_BUFFERS_ERR) {
+			if (log->ee != gsi_ctx->per.ee) {
+				GSIERR("unexpected EE in event %d\n", log->ee);
+				GSI_ASSERT();
+			}
+			chan_notify.evt_id = GSI_CHAN_OUT_OF_BUFFERS_ERR;
+		} else if (log->code == GSI_OUT_OF_RESOURCES_ERR) {
+			if (log->ee != gsi_ctx->per.ee) {
+				GSIERR("unexpected EE in event %d\n", log->ee);
+				GSI_ASSERT();
+			}
+			chan_notify.evt_id = GSI_CHAN_OUT_OF_RESOURCES_ERR;
+			complete(&ch->compl);
+		} else if (log->code == GSI_UNSUPPORTED_INTER_EE_OP_ERR) {
+			chan_notify.evt_id =
+				GSI_CHAN_UNSUPPORTED_INTER_EE_OP_ERR;
+		} else if (log->code == GSI_NON_ALLOCATED_EVT_ACCESS_ERR) {
+			if (log->ee != gsi_ctx->per.ee) {
+				GSIERR("unexpected EE in event %d\n", log->ee);
+				GSI_ASSERT();
+			}
+			chan_notify.evt_id =
+				GSI_CHAN_NON_ALLOCATED_EVT_ACCESS_ERR;
+		} else if (log->code == GSI_HWO_1_ERR) {
+			if (log->ee != gsi_ctx->per.ee) {
+				GSIERR("unexpected EE in event %d\n", log->ee);
+				GSI_ASSERT();
+			}
+			chan_notify.evt_id = GSI_CHAN_HWO_1_ERR;
+		} else {
+			GSIERR("unexpected event log code %d\n", log->code);
+			GSI_ASSERT();
+		}
+		ch->props.err_cb(&chan_notify);
+		break;
+	case GSI_ERR_TYPE_EVT:
+		if (WARN_ON(log->virt_idx >= gsi_ctx->max_ev)) {
+			GSIERR("Unexpected ev %d\n", log->virt_idx);
+			return;
+		}
+
+		ev = &gsi_ctx->evtr[log->virt_idx];
+		evt_notify.user_data = ev->props.user_data;
+		evt_notify.err_desc = err & 0xFFFF;
+		if (log->code == GSI_OUT_OF_BUFFERS_ERR) {
+			if (log->ee != gsi_ctx->per.ee) {
+				GSIERR("unexpected EE in event %d\n", log->ee);
+				GSI_ASSERT();
+			}
+			evt_notify.evt_id = GSI_EVT_OUT_OF_BUFFERS_ERR;
+		} else if (log->code == GSI_OUT_OF_RESOURCES_ERR) {
+			if (log->ee != gsi_ctx->per.ee) {
+				GSIERR("unexpected EE in event %d\n", log->ee);
+				GSI_ASSERT();
+			}
+			evt_notify.evt_id = GSI_EVT_OUT_OF_RESOURCES_ERR;
+			complete(&ev->compl);
+		} else if (log->code == GSI_UNSUPPORTED_INTER_EE_OP_ERR) {
+			evt_notify.evt_id = GSI_EVT_UNSUPPORTED_INTER_EE_OP_ERR;
+		} else if (log->code == GSI_EVT_RING_EMPTY_ERR) {
+			if (log->ee != gsi_ctx->per.ee) {
+				GSIERR("unexpected EE in event %d\n", log->ee);
+				GSI_ASSERT();
+			}
+			evt_notify.evt_id = GSI_EVT_EVT_RING_EMPTY_ERR;
+		} else {
+			GSIERR("unexpected event log code %d\n", log->code);
+			GSI_ASSERT();
+		}
+		ev->props.err_cb(&evt_notify);
+		break;
+	}
+}
+
+static void gsi_handle_gp_int1(void)
+{
+	complete(&gsi_ctx->gen_ee_cmd_compl);
+}
+
+static void gsi_handle_glob_ee(int ee)
+{
+	uint32_t val;
+	uint32_t err;
+	struct gsi_per_notify notify;
+	uint32_t clr = ~0;
+
+	val = gsi_readl(gsi_ctx->base +
+			GSI_EE_n_CNTXT_GLOB_IRQ_STTS_OFFS(ee));
+
+	notify.user_data = gsi_ctx->per.user_data;
+
+	if (val & GSI_EE_n_CNTXT_GLOB_IRQ_STTS_ERROR_INT_BMSK) {
+		err = gsi_readl(gsi_ctx->base +
+			GSI_EE_n_ERROR_LOG_OFFS(ee));
+		if (gsi_ctx->per.ver >= GSI_VER_1_2)
+			gsi_writel(0, gsi_ctx->base +
+				GSI_EE_n_ERROR_LOG_OFFS(ee));
+		gsi_writel(clr, gsi_ctx->base +
+			GSI_EE_n_ERROR_LOG_CLR_OFFS(ee));
+		gsi_handle_glob_err(err);
+	}
+
+	if (val & GSI_EE_n_CNTXT_GLOB_IRQ_EN_GP_INT1_BMSK)
+		gsi_handle_gp_int1();
+
+	if (val & GSI_EE_n_CNTXT_GLOB_IRQ_EN_GP_INT2_BMSK) {
+		notify.evt_id = GSI_PER_EVT_GLOB_GP2;
+		gsi_ctx->per.notify_cb(&notify);
+	}
+
+	if (val & GSI_EE_n_CNTXT_GLOB_IRQ_EN_GP_INT3_BMSK) {
+		notify.evt_id = GSI_PER_EVT_GLOB_GP3;
+		gsi_ctx->per.notify_cb(&notify);
+	}
+
+	gsi_writel(val, gsi_ctx->base +
+			GSI_EE_n_CNTXT_GLOB_IRQ_CLR_OFFS(ee));
+}
+
+static void gsi_incr_ring_wp(struct gsi_ring_ctx *ctx)
+{
+	ctx->wp_local += ctx->elem_sz;
+	if (ctx->wp_local == ctx->end)
+		ctx->wp_local = ctx->base;
+}
+
+static void gsi_incr_ring_rp(struct gsi_ring_ctx *ctx)
+{
+	ctx->rp_local += ctx->elem_sz;
+	if (ctx->rp_local == ctx->end)
+		ctx->rp_local = ctx->base;
+}
+
+uint16_t gsi_find_idx_from_addr(struct gsi_ring_ctx *ctx, uint64_t addr)
+{
+	WARN_ON(addr < ctx->base || addr >= ctx->end);
+	return (uint32_t)(addr - ctx->base) / ctx->elem_sz;
+}
+
+static uint16_t gsi_get_complete_num(struct gsi_ring_ctx *ctx, uint64_t addr1,
+		uint64_t addr2)
+{
+	uint32_t addr_diff;
+
+	GSIDBG_LOW("gsi base addr 0x%llx end addr 0x%llx\n",
+		ctx->base, ctx->end);
+
+	if (addr1 < ctx->base || addr1 >= ctx->end) {
+		GSIERR("address = 0x%llx not in range\n", addr1);
+		GSI_ASSERT();
+	}
+
+	if (addr2 < ctx->base || addr2 >= ctx->end) {
+		GSIERR("address = 0x%llx not in range\n", addr2);
+		GSI_ASSERT();
+	}
+
+	addr_diff = (uint32_t)(addr2 - addr1);
+	if (addr1 < addr2)
+		return addr_diff / ctx->elem_sz;
+	else
+		return (addr_diff + ctx->len) / ctx->elem_sz;
+}
+
+static void gsi_process_chan(struct gsi_xfer_compl_evt *evt,
+		struct gsi_chan_xfer_notify *notify, bool callback)
+{
+	uint32_t ch_id;
+	struct gsi_chan_ctx *ch_ctx;
+	uint16_t rp_idx;
+	uint64_t rp;
+
+	ch_id = evt->chid;
+	if (WARN_ON(ch_id >= gsi_ctx->max_ch)) {
+		GSIERR("Unexpected ch %d\n", ch_id);
+		return;
+	}
+
+	ch_ctx = &gsi_ctx->chan[ch_id];
+	if (WARN_ON(ch_ctx->props.prot != GSI_CHAN_PROT_GPI &&
+		ch_ctx->props.prot != GSI_CHAN_PROT_GCI))
+		return;
+
+	if (evt->type != GSI_XFER_COMPL_TYPE_GCI) {
+		rp = evt->xfer_ptr;
+
+		if (ch_ctx->ring.rp_local != rp) {
+			ch_ctx->stats.completed +=
+				gsi_get_complete_num(&ch_ctx->ring,
+				ch_ctx->ring.rp_local, rp);
+			ch_ctx->ring.rp_local = rp;
+		}
+
+
+		/* the element at RP is also processed */
+		gsi_incr_ring_rp(&ch_ctx->ring);
+
+		ch_ctx->ring.rp = ch_ctx->ring.rp_local;
+		rp_idx = gsi_find_idx_from_addr(&ch_ctx->ring, rp);
+		notify->veid = GSI_VEID_DEFAULT;
+	} else {
+		rp_idx = evt->cookie;
+		notify->veid = evt->veid;
+	}
+
+	ch_ctx->stats.completed++;
+
+	WARN_ON(!ch_ctx->user_data[rp_idx].valid);
+	notify->xfer_user_data = ch_ctx->user_data[rp_idx].p;
+	ch_ctx->user_data[rp_idx].valid = false;
+
+	notify->chan_user_data = ch_ctx->props.chan_user_data;
+	notify->evt_id = evt->code;
+	notify->bytes_xfered = evt->len;
+
+	if (callback) {
+		if (atomic_read(&ch_ctx->poll_mode)) {
+			GSIERR("Calling client callback in polling mode\n");
+			WARN_ON(1);
+		}
+		ch_ctx->props.xfer_cb(notify);
+	}
+}
+
+static void gsi_process_evt_re(struct gsi_evt_ctx *ctx,
+		struct gsi_chan_xfer_notify *notify, bool callback)
+{
+	struct gsi_xfer_compl_evt *evt;
+
+	evt = (struct gsi_xfer_compl_evt *)(ctx->ring.base_va +
+			ctx->ring.rp_local - ctx->ring.base);
+	gsi_process_chan(evt, notify, callback);
+	gsi_incr_ring_rp(&ctx->ring);
+	/* recycle this element */
+	gsi_incr_ring_wp(&ctx->ring);
+	ctx->stats.completed++;
+}
+
+static void gsi_ring_evt_doorbell(struct gsi_evt_ctx *ctx)
+{
+	uint32_t val;
+
+	ctx->ring.wp = ctx->ring.wp_local;
+	val = (ctx->ring.wp_local &
+			GSI_EE_n_EV_CH_k_DOORBELL_0_WRITE_PTR_LSB_BMSK) <<
+			GSI_EE_n_EV_CH_k_DOORBELL_0_WRITE_PTR_LSB_SHFT;
+	gsi_writel(val, gsi_ctx->base +
+			GSI_EE_n_EV_CH_k_DOORBELL_0_OFFS(ctx->id,
+				gsi_ctx->per.ee));
+}
+
+static void gsi_ring_chan_doorbell(struct gsi_chan_ctx *ctx)
+{
+	uint32_t val;
+
+	/*
+	 * allocate new events for this channel first
+	 * before submitting the new TREs.
+	 * for TO_GSI channels the event ring doorbell is rang as part of
+	 * interrupt handling.
+	 */
+	if (ctx->evtr && ctx->props.dir == GSI_CHAN_DIR_FROM_GSI)
+		gsi_ring_evt_doorbell(ctx->evtr);
+	ctx->ring.wp = ctx->ring.wp_local;
+
+	val = (ctx->ring.wp_local &
+			GSI_EE_n_GSI_CH_k_DOORBELL_0_WRITE_PTR_LSB_BMSK) <<
+			GSI_EE_n_GSI_CH_k_DOORBELL_0_WRITE_PTR_LSB_SHFT;
+	gsi_writel(val, gsi_ctx->base +
+			GSI_EE_n_GSI_CH_k_DOORBELL_0_OFFS(ctx->props.ch_id,
+				gsi_ctx->per.ee));
+}
+
+static void gsi_handle_ieob(int ee)
+{
+	uint32_t ch;
+	int i;
+	uint64_t rp;
+	struct gsi_evt_ctx *ctx;
+	struct gsi_chan_xfer_notify notify;
+	unsigned long flags;
+	unsigned long cntr;
+	uint32_t msk;
+	bool empty;
+
+	ch = gsi_readl(gsi_ctx->base +
+		GSI_EE_n_CNTXT_SRC_IEOB_IRQ_OFFS(ee));
+	msk = gsi_readl(gsi_ctx->base +
+		GSI_EE_n_CNTXT_SRC_IEOB_IRQ_MSK_OFFS(ee));
+	gsi_writel(ch & msk, gsi_ctx->base +
+		GSI_EE_n_CNTXT_SRC_IEOB_IRQ_CLR_OFFS(ee));
+
+	for (i = 0; i < GSI_STTS_REG_BITS; i++) {
+		if ((1 << i) & ch & msk) {
+			if (i >= gsi_ctx->max_ev || i >= GSI_EVT_RING_MAX) {
+				GSIERR("invalid event %d\n", i);
+				break;
+			}
+			ctx = &gsi_ctx->evtr[i];
+
+			/*
+			 * Don't handle MSI interrupts, only handle IEOB
+			 * IRQs
+			 */
+			if (ctx->props.intr == GSI_INTR_MSI)
+				continue;
+
+			if (ctx->props.intf != GSI_EVT_CHTYPE_GPI_EV) {
+				GSIERR("Unexpected irq intf %d\n",
+					ctx->props.intf);
+				GSI_ASSERT();
+			}
+			spin_lock_irqsave(&ctx->ring.slock, flags);
+check_again:
+			cntr = 0;
+			empty = true;
+			rp = gsi_readl(gsi_ctx->base +
+				GSI_EE_n_EV_CH_k_CNTXT_4_OFFS(i, ee));
+			rp |= ctx->ring.rp & 0xFFFFFFFF00000000;
+
+			ctx->ring.rp = rp;
+			while (ctx->ring.rp_local != rp) {
+				++cntr;
+				if (ctx->props.exclusive &&
+					atomic_read(&ctx->chan->poll_mode)) {
+					cntr = 0;
+					break;
+				}
+				gsi_process_evt_re(ctx, &notify, true);
+				empty = false;
+			}
+			if (!empty)
+				gsi_ring_evt_doorbell(ctx);
+			if (cntr != 0)
+				goto check_again;
+			spin_unlock_irqrestore(&ctx->ring.slock, flags);
+		}
+	}
+}
+
+static void gsi_handle_inter_ee_ch_ctrl(int ee)
+{
+	uint32_t ch;
+	int i;
+
+	ch = gsi_readl(gsi_ctx->base +
+		GSI_INTER_EE_n_SRC_GSI_CH_IRQ_OFFS(ee));
+	gsi_writel(ch, gsi_ctx->base +
+		GSI_INTER_EE_n_SRC_GSI_CH_IRQ_CLR_OFFS(ee));
+	for (i = 0; i < GSI_STTS_REG_BITS; i++) {
+		if ((1 << i) & ch) {
+			/* not currently expected */
+			GSIERR("ch %u was inter-EE changed\n", i);
+		}
+	}
+}
+
+static void gsi_handle_inter_ee_ev_ctrl(int ee)
+{
+	uint32_t ch;
+	int i;
+
+	ch = gsi_readl(gsi_ctx->base +
+		GSI_INTER_EE_n_SRC_EV_CH_IRQ_OFFS(ee));
+	gsi_writel(ch, gsi_ctx->base +
+		GSI_INTER_EE_n_SRC_EV_CH_IRQ_CLR_OFFS(ee));
+	for (i = 0; i < GSI_STTS_REG_BITS; i++) {
+		if ((1 << i) & ch) {
+			/* not currently expected */
+			GSIERR("evt %u was inter-EE changed\n", i);
+		}
+	}
+}
+
+static void gsi_handle_general(int ee)
+{
+	uint32_t val;
+	struct gsi_per_notify notify;
+
+	val = gsi_readl(gsi_ctx->base +
+			GSI_EE_n_CNTXT_GSI_IRQ_STTS_OFFS(ee));
+
+	notify.user_data = gsi_ctx->per.user_data;
+
+	if (val & GSI_EE_n_CNTXT_GSI_IRQ_CLR_GSI_MCS_STACK_OVRFLOW_BMSK)
+		notify.evt_id = GSI_PER_EVT_GENERAL_MCS_STACK_OVERFLOW;
+
+	if (val & GSI_EE_n_CNTXT_GSI_IRQ_CLR_GSI_CMD_FIFO_OVRFLOW_BMSK)
+		notify.evt_id = GSI_PER_EVT_GENERAL_CMD_FIFO_OVERFLOW;
+
+	if (val & GSI_EE_n_CNTXT_GSI_IRQ_CLR_GSI_BUS_ERROR_BMSK)
+		notify.evt_id = GSI_PER_EVT_GENERAL_BUS_ERROR;
+
+	if (val & GSI_EE_n_CNTXT_GSI_IRQ_CLR_GSI_BREAK_POINT_BMSK)
+		notify.evt_id = GSI_PER_EVT_GENERAL_BREAK_POINT;
+
+	if (gsi_ctx->per.notify_cb)
+		gsi_ctx->per.notify_cb(&notify);
+
+	gsi_writel(val, gsi_ctx->base +
+			GSI_EE_n_CNTXT_GSI_IRQ_CLR_OFFS(ee));
+}
+
+#define GSI_ISR_MAX_ITER 50
+
+static void gsi_handle_irq(void)
+{
+	uint32_t type;
+	int ee = gsi_ctx->per.ee;
+	unsigned long cnt = 0;
+
+	while (1) {
+		type = gsi_readl(gsi_ctx->base +
+			GSI_EE_n_CNTXT_TYPE_IRQ_OFFS(ee));
+
+		if (!type)
+			break;
+
+		GSIDBG_LOW("type 0x%x\n", type);
+
+		if (type & GSI_EE_n_CNTXT_TYPE_IRQ_CH_CTRL_BMSK)
+			gsi_handle_ch_ctrl(ee);
+
+		if (type & GSI_EE_n_CNTXT_TYPE_IRQ_EV_CTRL_BMSK)
+			gsi_handle_ev_ctrl(ee);
+
+		if (type & GSI_EE_n_CNTXT_TYPE_IRQ_GLOB_EE_BMSK)
+			gsi_handle_glob_ee(ee);
+
+		if (type & GSI_EE_n_CNTXT_TYPE_IRQ_IEOB_BMSK)
+			gsi_handle_ieob(ee);
+
+		if (type & GSI_EE_n_CNTXT_TYPE_IRQ_INTER_EE_CH_CTRL_BMSK)
+			gsi_handle_inter_ee_ch_ctrl(ee);
+
+		if (type & GSI_EE_n_CNTXT_TYPE_IRQ_INTER_EE_EV_CTRL_BMSK)
+			gsi_handle_inter_ee_ev_ctrl(ee);
+
+		if (type & GSI_EE_n_CNTXT_TYPE_IRQ_GENERAL_BMSK)
+			gsi_handle_general(ee);
+
+		if (++cnt > GSI_ISR_MAX_ITER) {
+			/*
+			 * Max number of spurious interrupts from hardware.
+			 * Unexpected hardware state.
+			 */
+			GSIERR("Too many spurious interrupt from GSI HW\n");
+			GSI_ASSERT();
+		}
+
+	}
+}
+
+static irqreturn_t gsi_isr(int irq, void *ctxt)
+{
+	if (gsi_ctx->per.req_clk_cb) {
+		bool granted = false;
+
+		gsi_ctx->per.req_clk_cb(gsi_ctx->per.user_data, &granted);
+		if (granted) {
+			gsi_handle_irq();
+			gsi_ctx->per.rel_clk_cb(gsi_ctx->per.user_data);
+		}
+	} else {
+		if (!gsi_ctx->per.clk_status_cb())
+			return IRQ_HANDLED;
+		gsi_handle_irq();
+	}
+	return IRQ_HANDLED;
+}
+
+static uint32_t gsi_get_max_channels(enum gsi_ver ver)
+{
+	uint32_t reg = 0;
+
+	switch (ver) {
+	case GSI_VER_ERR:
+	case GSI_VER_MAX:
+		GSIERR("GSI version is not supported %d\n", ver);
+		WARN_ON(1);
+		break;
+	case GSI_VER_1_0:
+		reg = gsi_readl(gsi_ctx->base +
+			GSI_V1_0_EE_n_GSI_HW_PARAM_OFFS(gsi_ctx->per.ee));
+		reg = (reg & GSI_V1_0_EE_n_GSI_HW_PARAM_GSI_CH_NUM_BMSK) >>
+			GSI_V1_0_EE_n_GSI_HW_PARAM_GSI_CH_NUM_SHFT;
+		break;
+	case GSI_VER_1_2:
+		reg = gsi_readl(gsi_ctx->base +
+			GSI_V1_2_EE_n_GSI_HW_PARAM_0_OFFS(gsi_ctx->per.ee));
+		reg = (reg & GSI_V1_2_EE_n_GSI_HW_PARAM_0_GSI_CH_NUM_BMSK) >>
+			GSI_V1_2_EE_n_GSI_HW_PARAM_0_GSI_CH_NUM_SHFT;
+		break;
+	case GSI_VER_1_3:
+		reg = gsi_readl(gsi_ctx->base +
+			GSI_V1_3_EE_n_GSI_HW_PARAM_2_OFFS(gsi_ctx->per.ee));
+		reg = (reg &
+			GSI_V1_3_EE_n_GSI_HW_PARAM_2_GSI_NUM_CH_PER_EE_BMSK) >>
+			GSI_V1_3_EE_n_GSI_HW_PARAM_2_GSI_NUM_CH_PER_EE_SHFT;
+		break;
+	case GSI_VER_2_0:
+		reg = gsi_readl(gsi_ctx->base +
+			GSI_V2_0_EE_n_GSI_HW_PARAM_2_OFFS(gsi_ctx->per.ee));
+		reg = (reg &
+			GSI_V2_0_EE_n_GSI_HW_PARAM_2_GSI_NUM_CH_PER_EE_BMSK) >>
+			GSI_V2_0_EE_n_GSI_HW_PARAM_2_GSI_NUM_CH_PER_EE_SHFT;
+		break;
+	case GSI_VER_2_2:
+		reg = gsi_readl(gsi_ctx->base +
+			GSI_V2_2_EE_n_GSI_HW_PARAM_2_OFFS(gsi_ctx->per.ee));
+		reg = (reg &
+			GSI_V2_2_EE_n_GSI_HW_PARAM_2_GSI_NUM_CH_PER_EE_BMSK) >>
+			GSI_V2_2_EE_n_GSI_HW_PARAM_2_GSI_NUM_CH_PER_EE_SHFT;
+		break;
+	case GSI_VER_2_5:
+		reg = gsi_readl(gsi_ctx->base +
+			GSI_V2_5_EE_n_GSI_HW_PARAM_2_OFFS(gsi_ctx->per.ee));
+		reg = (reg &
+			GSI_V2_5_EE_n_GSI_HW_PARAM_2_GSI_NUM_CH_PER_EE_BMSK) >>
+			GSI_V2_5_EE_n_GSI_HW_PARAM_2_GSI_NUM_CH_PER_EE_SHFT;
+		break;
+	case GSI_VER_2_7:
+		reg = gsi_readl(gsi_ctx->base +
+			GSI_V2_7_EE_n_GSI_HW_PARAM_2_OFFS(gsi_ctx->per.ee));
+		reg = (reg &
+			GSI_V2_7_EE_n_GSI_HW_PARAM_2_GSI_NUM_CH_PER_EE_BMSK) >>
+			GSI_V2_7_EE_n_GSI_HW_PARAM_2_GSI_NUM_CH_PER_EE_SHFT;
+		break;
+	case GSI_VER_2_9:
+		reg = gsi_readl(gsi_ctx->base +
+			GSI_V2_9_EE_n_GSI_HW_PARAM_2_OFFS(gsi_ctx->per.ee));
+		reg = (reg &
+			GSI_V2_9_EE_n_GSI_HW_PARAM_2_GSI_NUM_CH_PER_EE_BMSK) >>
+			GSI_V2_9_EE_n_GSI_HW_PARAM_2_GSI_NUM_CH_PER_EE_SHFT;
+		break;
+	}
+
+	GSIDBG("max channels %d\n", reg);
+
+	return reg;
+}
+
+static uint32_t gsi_get_max_event_rings(enum gsi_ver ver)
+{
+	uint32_t reg = 0;
+
+	switch (ver) {
+	case GSI_VER_ERR:
+	case GSI_VER_MAX:
+		GSIERR("GSI version is not supported %d\n", ver);
+		WARN_ON(1);
+		break;
+	case GSI_VER_1_0:
+		reg = gsi_readl(gsi_ctx->base +
+			GSI_V1_0_EE_n_GSI_HW_PARAM_OFFS(gsi_ctx->per.ee));
+		reg = (reg & GSI_V1_0_EE_n_GSI_HW_PARAM_GSI_EV_CH_NUM_BMSK) >>
+			GSI_V1_0_EE_n_GSI_HW_PARAM_GSI_EV_CH_NUM_SHFT;
+		break;
+	case GSI_VER_1_2:
+		reg = gsi_readl(gsi_ctx->base +
+			GSI_V1_2_EE_n_GSI_HW_PARAM_0_OFFS(gsi_ctx->per.ee));
+		reg = (reg & GSI_V1_2_EE_n_GSI_HW_PARAM_0_GSI_EV_CH_NUM_BMSK) >>
+			GSI_V1_2_EE_n_GSI_HW_PARAM_0_GSI_EV_CH_NUM_SHFT;
+		break;
+	case GSI_VER_1_3:
+		reg = gsi_readl(gsi_ctx->base +
+			GSI_V1_3_EE_n_GSI_HW_PARAM_2_OFFS(gsi_ctx->per.ee));
+		reg = (reg &
+			GSI_V1_3_EE_n_GSI_HW_PARAM_2_GSI_NUM_EV_PER_EE_BMSK) >>
+			GSI_V1_3_EE_n_GSI_HW_PARAM_2_GSI_NUM_EV_PER_EE_SHFT;
+		break;
+	case GSI_VER_2_0:
+		reg = gsi_readl(gsi_ctx->base +
+			GSI_V2_0_EE_n_GSI_HW_PARAM_2_OFFS(gsi_ctx->per.ee));
+		reg = (reg &
+			GSI_V2_0_EE_n_GSI_HW_PARAM_2_GSI_NUM_EV_PER_EE_BMSK) >>
+			GSI_V2_0_EE_n_GSI_HW_PARAM_2_GSI_NUM_EV_PER_EE_SHFT;
+		break;
+	case GSI_VER_2_2:
+		reg = gsi_readl(gsi_ctx->base +
+			GSI_V2_2_EE_n_GSI_HW_PARAM_2_OFFS(gsi_ctx->per.ee));
+		reg = (reg &
+			GSI_V2_2_EE_n_GSI_HW_PARAM_2_GSI_NUM_EV_PER_EE_BMSK) >>
+			GSI_V2_2_EE_n_GSI_HW_PARAM_2_GSI_NUM_EV_PER_EE_SHFT;
+		break;
+	case GSI_VER_2_5:
+		reg = gsi_readl(gsi_ctx->base +
+			GSI_V2_5_EE_n_GSI_HW_PARAM_2_OFFS(gsi_ctx->per.ee));
+		reg = (reg &
+			GSI_V2_5_EE_n_GSI_HW_PARAM_2_GSI_NUM_EV_PER_EE_BMSK) >>
+			GSI_V2_5_EE_n_GSI_HW_PARAM_2_GSI_NUM_EV_PER_EE_SHFT;
+		break;
+	case GSI_VER_2_7:
+		reg = gsi_readl(gsi_ctx->base +
+			GSI_V2_7_EE_n_GSI_HW_PARAM_2_OFFS(gsi_ctx->per.ee));
+		reg = (reg &
+			GSI_V2_7_EE_n_GSI_HW_PARAM_2_GSI_NUM_EV_PER_EE_BMSK) >>
+			GSI_V2_7_EE_n_GSI_HW_PARAM_2_GSI_NUM_EV_PER_EE_SHFT;
+		break;
+	case GSI_VER_2_9:
+		reg = gsi_readl(gsi_ctx->base +
+			GSI_V2_9_EE_n_GSI_HW_PARAM_2_OFFS(gsi_ctx->per.ee));
+		reg = (reg &
+			GSI_V2_9_EE_n_GSI_HW_PARAM_2_GSI_NUM_EV_PER_EE_BMSK) >>
+			GSI_V2_9_EE_n_GSI_HW_PARAM_2_GSI_NUM_EV_PER_EE_SHFT;
+		break;
+	}
+
+	GSIDBG("max event rings %d\n", reg);
+
+	return reg;
+}
+int gsi_complete_clk_grant(unsigned long dev_hdl)
+{
+	unsigned long flags;
+
+	if (!gsi_ctx) {
+		pr_err("%s:%d gsi context not allocated\n", __func__, __LINE__);
+		return -GSI_STATUS_NODEV;
+	}
+
+	if (!gsi_ctx->per_registered) {
+		GSIERR("no client registered\n");
+		return -GSI_STATUS_INVALID_PARAMS;
+	}
+
+	if (dev_hdl != (uintptr_t)gsi_ctx) {
+		GSIERR("bad params dev_hdl=0x%lx gsi_ctx=0x%pK\n", dev_hdl,
+				gsi_ctx);
+		return -GSI_STATUS_INVALID_PARAMS;
+	}
+
+	spin_lock_irqsave(&gsi_ctx->slock, flags);
+	gsi_handle_irq();
+	gsi_ctx->per.rel_clk_cb(gsi_ctx->per.user_data);
+	spin_unlock_irqrestore(&gsi_ctx->slock, flags);
+
+	return GSI_STATUS_SUCCESS;
+}
+EXPORT_SYMBOL(gsi_complete_clk_grant);
+
+int gsi_map_base(phys_addr_t gsi_base_addr, u32 gsi_size)
+{
+	if (!gsi_ctx) {
+		pr_err("%s:%d gsi context not allocated\n", __func__, __LINE__);
+		return -GSI_STATUS_NODEV;
+	}
+
+	gsi_ctx->base = devm_ioremap_nocache(
+		gsi_ctx->dev, gsi_base_addr, gsi_size);
+
+	if (!gsi_ctx->base) {
+		GSIERR("failed to map access to GSI HW\n");
+		return -GSI_STATUS_RES_ALLOC_FAILURE;
+	}
+
+	GSIDBG("GSI base(%pa) mapped to (%pK) with len (0x%x)\n",
+		&gsi_base_addr,
+		gsi_ctx->base,
+		gsi_size);
+
+	return 0;
+}
+EXPORT_SYMBOL(gsi_map_base);
+
+int gsi_unmap_base(void)
+{
+	if (!gsi_ctx) {
+		pr_err("%s:%d gsi context not allocated\n", __func__, __LINE__);
+		return -GSI_STATUS_NODEV;
+	}
+
+	if (!gsi_ctx->base) {
+		GSIERR("access to GSI HW has not been mapped\n");
+		return -GSI_STATUS_INVALID_PARAMS;
+	}
+
+	devm_iounmap(gsi_ctx->dev, gsi_ctx->base);
+
+	gsi_ctx->base = NULL;
+
+	return 0;
+}
+EXPORT_SYMBOL(gsi_unmap_base);
+
+int gsi_register_device(struct gsi_per_props *props, unsigned long *dev_hdl)
+{
+	int res;
+	uint32_t val;
+	int needed_reg_ver;
+
+	if (!gsi_ctx) {
+		pr_err("%s:%d gsi context not allocated\n", __func__, __LINE__);
+		return -GSI_STATUS_NODEV;
+	}
+
+	if (!props || !dev_hdl) {
+		GSIERR("bad params props=%pK dev_hdl=%pK\n", props, dev_hdl);
+		return -GSI_STATUS_INVALID_PARAMS;
+	}
+
+	if (props->ver <= GSI_VER_ERR || props->ver >= GSI_VER_MAX) {
+		GSIERR("bad params gsi_ver=%d\n", props->ver);
+		return -GSI_STATUS_INVALID_PARAMS;
+	}
+
+	if (!props->notify_cb) {
+		GSIERR("notify callback must be provided\n");
+		return -GSI_STATUS_INVALID_PARAMS;
+	}
+
+	if (props->req_clk_cb && !props->rel_clk_cb) {
+		GSIERR("rel callback  must be provided\n");
+		return -GSI_STATUS_INVALID_PARAMS;
+	}
+
+	if (gsi_ctx->per_registered) {
+		GSIERR("per already registered\n");
+		return -GSI_STATUS_UNSUPPORTED_OP;
+	}
+
+	switch (props->ver) {
+	case GSI_VER_1_0:
+	case GSI_VER_1_2:
+	case GSI_VER_1_3:
+	case GSI_VER_2_0:
+	case GSI_VER_2_2:
+		needed_reg_ver = GSI_REGISTER_VER_1;
+		break;
+	case GSI_VER_2_5:
+	case GSI_VER_2_7:
+	case GSI_VER_2_9:
+		needed_reg_ver = GSI_REGISTER_VER_2;
+		break;
+	case GSI_VER_ERR:
+	case GSI_VER_MAX:
+	default:
+		GSIERR("GSI version is not supported %d\n", props->ver);
+		return -GSI_STATUS_INVALID_PARAMS;
+	}
+
+	if (needed_reg_ver != GSI_REGISTER_VER_CURRENT) {
+		GSIERR("Invalid register version. current=%d, needed=%d\n",
+			GSI_REGISTER_VER_CURRENT, needed_reg_ver);
+		return -GSI_STATUS_UNSUPPORTED_OP;
+	}
+	GSIDBG("gsi ver %d register ver %d needed register ver %d\n",
+		props->ver, GSI_REGISTER_VER_CURRENT, needed_reg_ver);
+
+	spin_lock_init(&gsi_ctx->slock);
+	if (props->intr == GSI_INTR_IRQ) {
+		if (!props->irq) {
+			GSIERR("bad irq specified %u\n", props->irq);
+			return -GSI_STATUS_INVALID_PARAMS;
+		}
+		/*
+		 * On a real UE, there are two separate interrupt
+		 * vectors that get directed toward the GSI/IPA
+		 * drivers.  They are handled by gsi_isr() and
+		 * (ipa_isr() or ipa3_isr()) respectively.  In the
+		 * emulation environment, this is not the case;
+		 * instead, interrupt vectors are routed to the
+		 * emualation hardware's interrupt controller, which
+		 * in turn, forwards a single interrupt to the GSI/IPA
+		 * driver.  When the new interrupt vector is received,
+		 * the driver needs to probe the interrupt
+		 * controller's registers so see if one, the other, or
+		 * both interrupts have occurred.  Given the above, we
+		 * now need to handle both situations, namely: the
+		 * emulator's and the real UE.
+		 */
+		if (running_emulation) {
+			/*
+			 * New scheme involving the emulator's
+			 * interrupt controller.
+			 */
+			res = devm_request_threaded_irq(
+				gsi_ctx->dev,
+				props->irq,
+				/* top half handler to follow */
+				emulator_hard_irq_isr,
+				/* threaded bottom half handler to follow */
+				emulator_soft_irq_isr,
+				IRQF_SHARED,
+				"emulator_intcntrlr",
+				gsi_ctx);
+		} else {
+			/*
+			 * Traditional scheme used on the real UE.
+			 */
+			res = devm_request_irq(gsi_ctx->dev, props->irq,
+				gsi_isr,
+				props->req_clk_cb ? IRQF_TRIGGER_RISING :
+					IRQF_TRIGGER_HIGH,
+				"gsi",
+				gsi_ctx);
+		}
+		if (res) {
+			GSIERR(
+			 "failed to register isr for %u\n",
+			 props->irq);
+			return -GSI_STATUS_ERROR;
+		}
+		GSIDBG(
+			"succeeded to register isr for %u\n",
+			props->irq);
+
+		res = enable_irq_wake(props->irq);
+		if (res)
+			GSIERR("failed to enable wake irq %u\n", props->irq);
+		else
+			GSIERR("GSI irq is wake enabled %u\n", props->irq);
+
+	} else {
+		GSIERR("do not support interrupt type %u\n", props->intr);
+		return -GSI_STATUS_UNSUPPORTED_OP;
+	}
+
+	/*
+	 * If base not previously mapped via gsi_map_base(), map it
+	 * now...
+	 */
+	if (!gsi_ctx->base) {
+		res = gsi_map_base(props->phys_addr, props->size);
+		if (res)
+			return res;
+	}
+
+	if (running_emulation) {
+		GSIDBG("GSI SW ver register value 0x%x\n",
+		       gsi_readl(gsi_ctx->base +
+		       GSI_EE_n_GSI_SW_VERSION_OFFS(0)));
+		gsi_ctx->intcntrlr_mem_size =
+		    props->emulator_intcntrlr_size;
+		gsi_ctx->intcntrlr_base =
+		    devm_ioremap_nocache(
+			gsi_ctx->dev,
+			props->emulator_intcntrlr_addr,
+			props->emulator_intcntrlr_size);
+		if (!gsi_ctx->intcntrlr_base) {
+			GSIERR(
+			  "failed to remap emulator's interrupt controller HW\n");
+			gsi_unmap_base();
+			devm_free_irq(gsi_ctx->dev, props->irq, gsi_ctx);
+			return -GSI_STATUS_RES_ALLOC_FAILURE;
+		}
+
+		GSIDBG(
+		    "Emulator's interrupt controller base(%pa) mapped to (%pK) with len (0x%lx)\n",
+		    &(props->emulator_intcntrlr_addr),
+		    gsi_ctx->intcntrlr_base,
+		    props->emulator_intcntrlr_size);
+
+		gsi_ctx->intcntrlr_gsi_isr = gsi_isr;
+		gsi_ctx->intcntrlr_client_isr =
+		    props->emulator_intcntrlr_client_isr;
+	}
+
+	gsi_ctx->per = *props;
+	gsi_ctx->per_registered = true;
+	mutex_init(&gsi_ctx->mlock);
+	atomic_set(&gsi_ctx->num_chan, 0);
+	atomic_set(&gsi_ctx->num_evt_ring, 0);
+	gsi_ctx->max_ch = gsi_get_max_channels(gsi_ctx->per.ver);
+	if (gsi_ctx->max_ch == 0) {
+		gsi_unmap_base();
+		if (running_emulation)
+			devm_iounmap(gsi_ctx->dev, gsi_ctx->intcntrlr_base);
+		gsi_ctx->base = gsi_ctx->intcntrlr_base = NULL;
+		devm_free_irq(gsi_ctx->dev, props->irq, gsi_ctx);
+		GSIERR("failed to get max channels\n");
+		return -GSI_STATUS_ERROR;
+	}
+	gsi_ctx->max_ev = gsi_get_max_event_rings(gsi_ctx->per.ver);
+	if (gsi_ctx->max_ev == 0) {
+		gsi_unmap_base();
+		if (running_emulation)
+			devm_iounmap(gsi_ctx->dev, gsi_ctx->intcntrlr_base);
+		gsi_ctx->base = gsi_ctx->intcntrlr_base = NULL;
+		devm_free_irq(gsi_ctx->dev, props->irq, gsi_ctx);
+		GSIERR("failed to get max event rings\n");
+		return -GSI_STATUS_ERROR;
+	}
+
+	if (gsi_ctx->max_ev > GSI_EVT_RING_MAX) {
+		GSIERR("max event rings are beyond absolute maximum\n");
+		return -GSI_STATUS_ERROR;
+	}
+
+	if (props->mhi_er_id_limits_valid &&
+	    props->mhi_er_id_limits[0] > (gsi_ctx->max_ev - 1)) {
+		gsi_unmap_base();
+		if (running_emulation)
+			devm_iounmap(gsi_ctx->dev, gsi_ctx->intcntrlr_base);
+		gsi_ctx->base = gsi_ctx->intcntrlr_base = NULL;
+		devm_free_irq(gsi_ctx->dev, props->irq, gsi_ctx);
+		GSIERR("MHI event ring start id %u is beyond max %u\n",
+			props->mhi_er_id_limits[0], gsi_ctx->max_ev);
+		return -GSI_STATUS_ERROR;
+	}
+
+	gsi_ctx->evt_bmap = ~((1 << gsi_ctx->max_ev) - 1);
+
+	/* exclude reserved mhi events */
+	if (props->mhi_er_id_limits_valid)
+		gsi_ctx->evt_bmap |=
+			((1 << (props->mhi_er_id_limits[1] + 1)) - 1) ^
+			((1 << (props->mhi_er_id_limits[0])) - 1);
+
+	/*
+	 * enable all interrupts but GSI_BREAK_POINT.
+	 * Inter EE commands / interrupt are no supported.
+	 */
+	__gsi_config_type_irq(props->ee, ~0, ~0);
+	__gsi_config_ch_irq(props->ee, ~0, ~0);
+	__gsi_config_evt_irq(props->ee, ~0, ~0);
+	__gsi_config_ieob_irq(props->ee, ~0, ~0);
+	__gsi_config_glob_irq(props->ee, ~0, ~0);
+	__gsi_config_gen_irq(props->ee, ~0,
+		~GSI_EE_n_CNTXT_GSI_IRQ_CLR_GSI_BREAK_POINT_BMSK);
+
+	gsi_writel(props->intr, gsi_ctx->base +
+			GSI_EE_n_CNTXT_INTSET_OFFS(gsi_ctx->per.ee));
+	/* set GSI_TOP_EE_n_CNTXT_MSI_BASE_LSB/MSB to 0 */
+	if ((gsi_ctx->per.ver >= GSI_VER_2_0) &&
+		(props->intr != GSI_INTR_MSI)) {
+		gsi_writel(0, gsi_ctx->base +
+			GSI_EE_n_CNTXT_MSI_BASE_LSB(gsi_ctx->per.ee));
+		gsi_writel(0, gsi_ctx->base +
+			GSI_EE_n_CNTXT_MSI_BASE_MSB(gsi_ctx->per.ee));
+	}
+
+	val = gsi_readl(gsi_ctx->base +
+			GSI_EE_n_GSI_STATUS_OFFS(gsi_ctx->per.ee));
+	if (val & GSI_EE_n_GSI_STATUS_ENABLED_BMSK)
+		gsi_ctx->enabled = true;
+	else
+		GSIERR("Manager EE has not enabled GSI, GSI un-usable\n");
+
+	if (gsi_ctx->per.ver >= GSI_VER_1_2)
+		gsi_writel(0, gsi_ctx->base +
+			GSI_EE_n_ERROR_LOG_OFFS(gsi_ctx->per.ee));
+
+	if (running_emulation) {
+		/*
+		 * Set up the emulator's interrupt controller...
+		 */
+		res = setup_emulator_cntrlr(
+		    gsi_ctx->intcntrlr_base, gsi_ctx->intcntrlr_mem_size);
+		if (res != 0) {
+			gsi_unmap_base();
+			devm_iounmap(gsi_ctx->dev, gsi_ctx->intcntrlr_base);
+			gsi_ctx->base = gsi_ctx->intcntrlr_base = NULL;
+			devm_free_irq(gsi_ctx->dev, props->irq, gsi_ctx);
+			GSIERR("setup_emulator_cntrlr() failed\n");
+			return res;
+		}
+	}
+
+	*dev_hdl = (uintptr_t)gsi_ctx;
+
+	return GSI_STATUS_SUCCESS;
+}
+EXPORT_SYMBOL(gsi_register_device);
+
+int gsi_write_device_scratch(unsigned long dev_hdl,
+		struct gsi_device_scratch *val)
+{
+	unsigned int max_usb_pkt_size = 0;
+
+	if (!gsi_ctx) {
+		pr_err("%s:%d gsi context not allocated\n", __func__, __LINE__);
+		return -GSI_STATUS_NODEV;
+	}
+
+	if (!gsi_ctx->per_registered) {
+		GSIERR("no client registered\n");
+		return -GSI_STATUS_INVALID_PARAMS;
+	}
+
+	if (dev_hdl != (uintptr_t)gsi_ctx) {
+		GSIERR("bad params dev_hdl=0x%lx gsi_ctx=0x%pK\n", dev_hdl,
+				gsi_ctx);
+		return -GSI_STATUS_INVALID_PARAMS;
+	}
+
+	if (val->max_usb_pkt_size_valid &&
+			val->max_usb_pkt_size != 1024 &&
+			val->max_usb_pkt_size != 512 &&
+			val->max_usb_pkt_size != 64) {
+		GSIERR("bad USB max pkt size dev_hdl=0x%lx sz=%u\n", dev_hdl,
+				val->max_usb_pkt_size);
+		return -GSI_STATUS_INVALID_PARAMS;
+	}
+
+	mutex_lock(&gsi_ctx->mlock);
+	if (val->mhi_base_chan_idx_valid)
+		gsi_ctx->scratch.word0.s.mhi_base_chan_idx =
+			val->mhi_base_chan_idx;
+
+	if (val->max_usb_pkt_size_valid) {
+		max_usb_pkt_size = 2;
+		if (val->max_usb_pkt_size > 64)
+			max_usb_pkt_size =
+				(val->max_usb_pkt_size == 1024) ? 1 : 0;
+		gsi_ctx->scratch.word0.s.max_usb_pkt_size = max_usb_pkt_size;
+	}
+
+	gsi_writel(gsi_ctx->scratch.word0.val,
+			gsi_ctx->base +
+			GSI_EE_n_CNTXT_SCRATCH_0_OFFS(gsi_ctx->per.ee));
+	mutex_unlock(&gsi_ctx->mlock);
+
+	return GSI_STATUS_SUCCESS;
+}
+EXPORT_SYMBOL(gsi_write_device_scratch);
+
+int gsi_deregister_device(unsigned long dev_hdl, bool force)
+{
+	if (!gsi_ctx) {
+		pr_err("%s:%d gsi context not allocated\n", __func__, __LINE__);
+		return -GSI_STATUS_NODEV;
+	}
+
+	if (!gsi_ctx->per_registered) {
+		GSIERR("no client registered\n");
+		return -GSI_STATUS_INVALID_PARAMS;
+	}
+
+	if (dev_hdl != (uintptr_t)gsi_ctx) {
+		GSIERR("bad params dev_hdl=0x%lx gsi_ctx=0x%pK\n", dev_hdl,
+				gsi_ctx);
+		return -GSI_STATUS_INVALID_PARAMS;
+	}
+
+	if (!force && atomic_read(&gsi_ctx->num_chan)) {
+		GSIERR("cannot deregister %u channels are still connected\n",
+				atomic_read(&gsi_ctx->num_chan));
+		return -GSI_STATUS_UNSUPPORTED_OP;
+	}
+
+	if (!force && atomic_read(&gsi_ctx->num_evt_ring)) {
+		GSIERR("cannot deregister %u events are still connected\n",
+				atomic_read(&gsi_ctx->num_evt_ring));
+		return -GSI_STATUS_UNSUPPORTED_OP;
+	}
+
+	/* disable all interrupts */
+	__gsi_config_type_irq(gsi_ctx->per.ee, ~0, 0);
+	__gsi_config_ch_irq(gsi_ctx->per.ee, ~0, 0);
+	__gsi_config_evt_irq(gsi_ctx->per.ee, ~0, 0);
+	__gsi_config_ieob_irq(gsi_ctx->per.ee, ~0, 0);
+	__gsi_config_glob_irq(gsi_ctx->per.ee, ~0, 0);
+	__gsi_config_gen_irq(gsi_ctx->per.ee, ~0, 0);
+
+	devm_free_irq(gsi_ctx->dev, gsi_ctx->per.irq, gsi_ctx);
+	gsi_unmap_base();
+	memset(gsi_ctx, 0, sizeof(*gsi_ctx));
+
+	return GSI_STATUS_SUCCESS;
+}
+EXPORT_SYMBOL(gsi_deregister_device);
+
+static void gsi_program_evt_ring_ctx(struct gsi_evt_ring_props *props,
+		uint8_t evt_id, unsigned int ee)
+{
+	uint32_t val;
+
+	GSIDBG("intf=%u intr=%u re=%u\n", props->intf, props->intr,
+			props->re_size);
+
+	val = (((props->intf << GSI_EE_n_EV_CH_k_CNTXT_0_CHTYPE_SHFT) &
+			GSI_EE_n_EV_CH_k_CNTXT_0_CHTYPE_BMSK) |
+		((props->intr << GSI_EE_n_EV_CH_k_CNTXT_0_INTYPE_SHFT) &
+			GSI_EE_n_EV_CH_k_CNTXT_0_INTYPE_BMSK) |
+		((props->re_size << GSI_EE_n_EV_CH_k_CNTXT_0_ELEMENT_SIZE_SHFT)
+			& GSI_EE_n_EV_CH_k_CNTXT_0_ELEMENT_SIZE_BMSK));
+
+	gsi_writel(val, gsi_ctx->base +
+			GSI_EE_n_EV_CH_k_CNTXT_0_OFFS(evt_id, ee));
+
+	if (gsi_ctx->per.ver >= GSI_VER_2_9) {
+		val = (props->ring_len &
+			GSI_V2_9_EE_n_EV_CH_k_CNTXT_1_R_LENGTH_BMSK)
+			<< GSI_V2_9_EE_n_EV_CH_k_CNTXT_1_R_LENGTH_SHFT;
+		gsi_writel(val, gsi_ctx->base +
+				GSI_V2_9_EE_n_EV_CH_k_CNTXT_1_OFFS(evt_id, ee));
+	} else {
+		val = (props->ring_len & GSI_EE_n_EV_CH_k_CNTXT_1_R_LENGTH_BMSK)
+			 << GSI_EE_n_EV_CH_k_CNTXT_1_R_LENGTH_SHFT;
+		gsi_writel(val, gsi_ctx->base +
+				GSI_EE_n_EV_CH_k_CNTXT_1_OFFS(evt_id, ee));
+	}
+
+	val = (props->ring_base_addr &
+			GSI_EE_n_EV_CH_k_CNTXT_2_R_BASE_ADDR_LSBS_BMSK) <<
+		GSI_EE_n_EV_CH_k_CNTXT_2_R_BASE_ADDR_LSBS_SHFT;
+	gsi_writel(val, gsi_ctx->base +
+			GSI_EE_n_EV_CH_k_CNTXT_2_OFFS(evt_id, ee));
+
+	val = ((props->ring_base_addr >> 32) &
+		GSI_EE_n_EV_CH_k_CNTXT_3_R_BASE_ADDR_MSBS_BMSK) <<
+		GSI_EE_n_EV_CH_k_CNTXT_3_R_BASE_ADDR_MSBS_SHFT;
+	gsi_writel(val, gsi_ctx->base +
+			GSI_EE_n_EV_CH_k_CNTXT_3_OFFS(evt_id, ee));
+
+	val = (((props->int_modt << GSI_EE_n_EV_CH_k_CNTXT_8_INT_MODT_SHFT) &
+		GSI_EE_n_EV_CH_k_CNTXT_8_INT_MODT_BMSK) |
+		((props->int_modc << GSI_EE_n_EV_CH_k_CNTXT_8_INT_MODC_SHFT) &
+		 GSI_EE_n_EV_CH_k_CNTXT_8_INT_MODC_BMSK));
+	gsi_writel(val, gsi_ctx->base +
+			GSI_EE_n_EV_CH_k_CNTXT_8_OFFS(evt_id, ee));
+
+	val = (props->intvec & GSI_EE_n_EV_CH_k_CNTXT_9_INTVEC_BMSK) <<
+		GSI_EE_n_EV_CH_k_CNTXT_9_INTVEC_SHFT;
+	gsi_writel(val, gsi_ctx->base +
+			GSI_EE_n_EV_CH_k_CNTXT_9_OFFS(evt_id, ee));
+
+	val = (props->msi_addr & GSI_EE_n_EV_CH_k_CNTXT_10_MSI_ADDR_LSB_BMSK) <<
+		GSI_EE_n_EV_CH_k_CNTXT_10_MSI_ADDR_LSB_SHFT;
+	gsi_writel(val, gsi_ctx->base +
+			GSI_EE_n_EV_CH_k_CNTXT_10_OFFS(evt_id, ee));
+
+	val = ((props->msi_addr >> 32) &
+		GSI_EE_n_EV_CH_k_CNTXT_11_MSI_ADDR_MSB_BMSK) <<
+		GSI_EE_n_EV_CH_k_CNTXT_11_MSI_ADDR_MSB_SHFT;
+	gsi_writel(val, gsi_ctx->base +
+			GSI_EE_n_EV_CH_k_CNTXT_11_OFFS(evt_id, ee));
+
+	val = (props->rp_update_addr &
+		GSI_EE_n_EV_CH_k_CNTXT_12_RP_UPDATE_ADDR_LSB_BMSK) <<
+		GSI_EE_n_EV_CH_k_CNTXT_12_RP_UPDATE_ADDR_LSB_SHFT;
+	gsi_writel(val, gsi_ctx->base +
+		GSI_EE_n_EV_CH_k_CNTXT_12_OFFS(evt_id, ee));
+
+	val = ((props->rp_update_addr >> 32) &
+		GSI_EE_n_EV_CH_k_CNTXT_13_RP_UPDATE_ADDR_MSB_BMSK) <<
+		GSI_EE_n_EV_CH_k_CNTXT_13_RP_UPDATE_ADDR_MSB_SHFT;
+	gsi_writel(val, gsi_ctx->base +
+		GSI_EE_n_EV_CH_k_CNTXT_13_OFFS(evt_id, ee));
+}
+
+static void gsi_init_evt_ring(struct gsi_evt_ring_props *props,
+		struct gsi_ring_ctx *ctx)
+{
+	ctx->base_va = (uintptr_t)props->ring_base_vaddr;
+	ctx->base = props->ring_base_addr;
+	ctx->wp = ctx->base;
+	ctx->rp = ctx->base;
+	ctx->wp_local = ctx->base;
+	ctx->rp_local = ctx->base;
+	ctx->len = props->ring_len;
+	ctx->elem_sz = props->re_size;
+	ctx->max_num_elem = ctx->len / ctx->elem_sz - 1;
+	ctx->end = ctx->base + (ctx->max_num_elem + 1) * ctx->elem_sz;
+}
+
+static void gsi_prime_evt_ring(struct gsi_evt_ctx *ctx)
+{
+	unsigned long flags;
+	uint32_t val;
+
+	spin_lock_irqsave(&ctx->ring.slock, flags);
+	memset((void *)ctx->ring.base_va, 0, ctx->ring.len);
+	ctx->ring.wp_local = ctx->ring.base +
+		ctx->ring.max_num_elem * ctx->ring.elem_sz;
+
+	/* write order MUST be MSB followed by LSB */
+	val = ((ctx->ring.wp_local >> 32) &
+		GSI_EE_n_EV_CH_k_DOORBELL_1_WRITE_PTR_MSB_BMSK) <<
+		GSI_EE_n_EV_CH_k_DOORBELL_1_WRITE_PTR_MSB_SHFT;
+	gsi_writel(val, gsi_ctx->base +
+		GSI_EE_n_EV_CH_k_DOORBELL_1_OFFS(ctx->id,
+		gsi_ctx->per.ee));
+
+	gsi_ring_evt_doorbell(ctx);
+	spin_unlock_irqrestore(&ctx->ring.slock, flags);
+}
+
+static void gsi_prime_evt_ring_wdi(struct gsi_evt_ctx *ctx)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&ctx->ring.slock, flags);
+	if (ctx->ring.base_va)
+		memset((void *)ctx->ring.base_va, 0, ctx->ring.len);
+	ctx->ring.wp_local = ctx->ring.base +
+		((ctx->ring.max_num_elem + 2) * ctx->ring.elem_sz);
+	gsi_ring_evt_doorbell(ctx);
+	spin_unlock_irqrestore(&ctx->ring.slock, flags);
+}
+
+static int gsi_validate_evt_ring_props(struct gsi_evt_ring_props *props)
+{
+	uint64_t ra;
+
+	if ((props->re_size == GSI_EVT_RING_RE_SIZE_4B &&
+				props->ring_len % 4) ||
+			(props->re_size == GSI_EVT_RING_RE_SIZE_8B &&
+				 props->ring_len % 8) ||
+			(props->re_size == GSI_EVT_RING_RE_SIZE_16B &&
+				 props->ring_len % 16)) {
+		GSIERR("bad params ring_len %u not a multiple of RE size %u\n",
+				props->ring_len, props->re_size);
+		return -GSI_STATUS_INVALID_PARAMS;
+	}
+
+	ra = props->ring_base_addr;
+	do_div(ra, roundup_pow_of_two(props->ring_len));
+
+	if (props->ring_base_addr != ra * roundup_pow_of_two(props->ring_len)) {
+		GSIERR("bad params ring base not aligned 0x%llx align 0x%lx\n",
+				props->ring_base_addr,
+				roundup_pow_of_two(props->ring_len));
+		return -GSI_STATUS_INVALID_PARAMS;
+	}
+
+	if (props->intf == GSI_EVT_CHTYPE_GPI_EV &&
+			!props->ring_base_vaddr) {
+		GSIERR("protocol %u requires ring base VA\n", props->intf);
+		return -GSI_STATUS_INVALID_PARAMS;
+	}
+
+	if (props->intf == GSI_EVT_CHTYPE_MHI_EV &&
+			(!props->evchid_valid ||
+			props->evchid > gsi_ctx->per.mhi_er_id_limits[1] ||
+			props->evchid < gsi_ctx->per.mhi_er_id_limits[0])) {
+		GSIERR("MHI requires evchid valid=%d val=%u\n",
+				props->evchid_valid, props->evchid);
+		return -GSI_STATUS_INVALID_PARAMS;
+	}
+
+	if (props->intf != GSI_EVT_CHTYPE_MHI_EV &&
+			props->evchid_valid) {
+		GSIERR("protocol %u cannot specify evchid\n", props->intf);
+		return -GSI_STATUS_INVALID_PARAMS;
+	}
+
+	if (!props->err_cb) {
+		GSIERR("err callback must be provided\n");
+		return -GSI_STATUS_INVALID_PARAMS;
+	}
+
+	return GSI_STATUS_SUCCESS;
+}
+
+/**
+ * gsi_cleanup_xfer_user_data: cleanup the user data array using callback passed
+ *	by IPA driver. Need to do this in GSI since only GSI knows which TRE
+ *	are being used or not. However, IPA is the one that does cleaning,
+ *	therefore we pass a callback from IPA and call it using params from GSI
+ *
+ * @chan_hdl: hdl of the gsi channel user data array to be cleaned
+ * @cleanup_cb: callback used to clean the user data array. takes 2 inputs
+ *	@chan_user_data: ipa_sys_context of the gsi_channel
+ *	@xfer_uder_data: user data array element (rx_pkt wrapper)
+ *
+ * Returns: 0 on success, negative on failure
+ */
+static int gsi_cleanup_xfer_user_data(unsigned long chan_hdl,
+	void (*cleanup_cb)(void *chan_user_data, void *xfer_user_data))
+{
+	struct gsi_chan_ctx *ctx;
+	uint64_t i;
+	uint16_t rp_idx;
+
+	ctx = &gsi_ctx->chan[chan_hdl];
+	if (ctx->state != GSI_CHAN_STATE_ALLOCATED) {
+		GSIERR("bad state %d\n", ctx->state);
+		return -GSI_STATUS_UNSUPPORTED_OP;
+	}
+
+	/* for coalescing, traverse the whole array */
+	if (ctx->props.prot == GSI_CHAN_PROT_GCI) {
+		size_t user_data_size =
+			ctx->ring.max_num_elem + 1 + GSI_VEID_MAX;
+		for (i = 0; i < user_data_size; i++) {
+			if (ctx->user_data[i].valid)
+				cleanup_cb(ctx->props.chan_user_data,
+					ctx->user_data[i].p);
+		}
+	} else {
+		/* for non-coalescing, clean between RP and WP */
+		while (ctx->ring.rp_local != ctx->ring.wp_local) {
+			rp_idx = gsi_find_idx_from_addr(&ctx->ring,
+				ctx->ring.rp_local);
+			WARN_ON(!ctx->user_data[rp_idx].valid);
+			cleanup_cb(ctx->props.chan_user_data,
+				ctx->user_data[rp_idx].p);
+			gsi_incr_ring_rp(&ctx->ring);
+		}
+	}
+	return 0;
+}
+
+int gsi_alloc_evt_ring(struct gsi_evt_ring_props *props, unsigned long dev_hdl,
+		unsigned long *evt_ring_hdl)
+{
+	unsigned long evt_id;
+	enum gsi_evt_ch_cmd_opcode op = GSI_EVT_ALLOCATE;
+	uint32_t val;
+	struct gsi_evt_ctx *ctx;
+	int res;
+	int ee;
+	unsigned long flags;
+
+	if (!gsi_ctx) {
+		pr_err("%s:%d gsi context not allocated\n", __func__, __LINE__);
+		return -GSI_STATUS_NODEV;
+	}
+
+	if (!props || !evt_ring_hdl || dev_hdl != (uintptr_t)gsi_ctx) {
+		GSIERR("bad params props=%pK dev_hdl=0x%lx evt_ring_hdl=%pK\n",
+				props, dev_hdl, evt_ring_hdl);
+		return -GSI_STATUS_INVALID_PARAMS;
+	}
+
+	if (gsi_validate_evt_ring_props(props)) {
+		GSIERR("invalid params\n");
+		return -GSI_STATUS_INVALID_PARAMS;
+	}
+
+	if (!props->evchid_valid) {
+		mutex_lock(&gsi_ctx->mlock);
+		evt_id = find_first_zero_bit(&gsi_ctx->evt_bmap,
+				sizeof(unsigned long) * BITS_PER_BYTE);
+		if (evt_id == sizeof(unsigned long) * BITS_PER_BYTE) {
+			GSIERR("failed to alloc event ID\n");
+			mutex_unlock(&gsi_ctx->mlock);
+			return -GSI_STATUS_RES_ALLOC_FAILURE;
+		}
+		set_bit(evt_id, &gsi_ctx->evt_bmap);
+		mutex_unlock(&gsi_ctx->mlock);
+	} else {
+		evt_id = props->evchid;
+	}
+	GSIDBG("Using %lu as virt evt id\n", evt_id);
+
+	ctx = &gsi_ctx->evtr[evt_id];
+	memset(ctx, 0, sizeof(*ctx));
+	mutex_init(&ctx->mlock);
+	init_completion(&ctx->compl);
+	atomic_set(&ctx->chan_ref_cnt, 0);
+	ctx->props = *props;
+
+	mutex_lock(&gsi_ctx->mlock);
+	val = (((evt_id << GSI_EE_n_EV_CH_CMD_CHID_SHFT) &
+			GSI_EE_n_EV_CH_CMD_CHID_BMSK) |
+		((op << GSI_EE_n_EV_CH_CMD_OPCODE_SHFT) &
+			GSI_EE_n_EV_CH_CMD_OPCODE_BMSK));
+	ee = gsi_ctx->per.ee;
+	gsi_writel(val, gsi_ctx->base +
+			GSI_EE_n_EV_CH_CMD_OFFS(ee));
+	res = wait_for_completion_timeout(&ctx->compl, GSI_CMD_TIMEOUT);
+	if (res == 0) {
+		GSIERR("evt_id=%lu timed out\n", evt_id);
+		if (!props->evchid_valid)
+			clear_bit(evt_id, &gsi_ctx->evt_bmap);
+		mutex_unlock(&gsi_ctx->mlock);
+		return -GSI_STATUS_TIMED_OUT;
+	}
+
+	if (ctx->state != GSI_EVT_RING_STATE_ALLOCATED) {
+		GSIERR("evt_id=%lu allocation failed state=%u\n",
+				evt_id, ctx->state);
+		if (!props->evchid_valid)
+			clear_bit(evt_id, &gsi_ctx->evt_bmap);
+		mutex_unlock(&gsi_ctx->mlock);
+		return -GSI_STATUS_RES_ALLOC_FAILURE;
+	}
+
+	gsi_program_evt_ring_ctx(props, evt_id, gsi_ctx->per.ee);
+
+	spin_lock_init(&ctx->ring.slock);
+	gsi_init_evt_ring(props, &ctx->ring);
+
+	ctx->id = evt_id;
+	*evt_ring_hdl = evt_id;
+	atomic_inc(&gsi_ctx->num_evt_ring);
+	if (props->intf == GSI_EVT_CHTYPE_GPI_EV)
+		gsi_prime_evt_ring(ctx);
+	else if (props->intf == GSI_EVT_CHTYPE_WDI2_EV)
+		gsi_prime_evt_ring_wdi(ctx);
+	mutex_unlock(&gsi_ctx->mlock);
+
+	spin_lock_irqsave(&gsi_ctx->slock, flags);
+	gsi_writel(1 << evt_id, gsi_ctx->base +
+			GSI_EE_n_CNTXT_SRC_IEOB_IRQ_CLR_OFFS(ee));
+
+	/* enable ieob interrupts for GPI, enable MSI interrupts */
+	if ((props->intf != GSI_EVT_CHTYPE_GPI_EV) &&
+		(props->intr != GSI_INTR_MSI))
+		__gsi_config_ieob_irq(gsi_ctx->per.ee, 1 << evt_id, 0);
+	else
+		__gsi_config_ieob_irq(gsi_ctx->per.ee, 1 << ctx->id, ~0);
+	spin_unlock_irqrestore(&gsi_ctx->slock, flags);
+
+	return GSI_STATUS_SUCCESS;
+}
+EXPORT_SYMBOL(gsi_alloc_evt_ring);
+
+static void __gsi_write_evt_ring_scratch(unsigned long evt_ring_hdl,
+		union __packed gsi_evt_scratch val)
+{
+	gsi_writel(val.data.word1, gsi_ctx->base +
+		GSI_EE_n_EV_CH_k_SCRATCH_0_OFFS(evt_ring_hdl,
+			gsi_ctx->per.ee));
+	gsi_writel(val.data.word2, gsi_ctx->base +
+		GSI_EE_n_EV_CH_k_SCRATCH_1_OFFS(evt_ring_hdl,
+			gsi_ctx->per.ee));
+}
+
+int gsi_write_evt_ring_scratch(unsigned long evt_ring_hdl,
+		union __packed gsi_evt_scratch val)
+{
+	struct gsi_evt_ctx *ctx;
+
+	if (!gsi_ctx) {
+		pr_err("%s:%d gsi context not allocated\n", __func__, __LINE__);
+		return -GSI_STATUS_NODEV;
+	}
+
+	if (evt_ring_hdl >= gsi_ctx->max_ev) {
+		GSIERR("bad params evt_ring_hdl=%lu\n", evt_ring_hdl);
+		return -GSI_STATUS_INVALID_PARAMS;
+	}
+
+	ctx = &gsi_ctx->evtr[evt_ring_hdl];
+
+	if (ctx->state != GSI_EVT_RING_STATE_ALLOCATED) {
+		GSIERR("bad state %d\n",
+				gsi_ctx->evtr[evt_ring_hdl].state);
+		return -GSI_STATUS_UNSUPPORTED_OP;
+	}
+
+	mutex_lock(&ctx->mlock);
+	ctx->scratch = val;
+	__gsi_write_evt_ring_scratch(evt_ring_hdl, val);
+	mutex_unlock(&ctx->mlock);
+
+	return GSI_STATUS_SUCCESS;
+}
+EXPORT_SYMBOL(gsi_write_evt_ring_scratch);
+
+int gsi_dealloc_evt_ring(unsigned long evt_ring_hdl)
+{
+	uint32_t val;
+	enum gsi_evt_ch_cmd_opcode op = GSI_EVT_DE_ALLOC;
+	struct gsi_evt_ctx *ctx;
+	int res;
+
+	if (!gsi_ctx) {
+		pr_err("%s:%d gsi context not allocated\n", __func__, __LINE__);
+		return -GSI_STATUS_NODEV;
+	}
+
+	if (evt_ring_hdl >= gsi_ctx->max_ev ||
+			evt_ring_hdl >= GSI_EVT_RING_MAX) {
+		GSIERR("bad params evt_ring_hdl=%lu\n", evt_ring_hdl);
+		return -GSI_STATUS_INVALID_PARAMS;
+	}
+
+	ctx = &gsi_ctx->evtr[evt_ring_hdl];
+
+	if (atomic_read(&ctx->chan_ref_cnt)) {
+		GSIERR("%d channels still using this event ring\n",
+			atomic_read(&ctx->chan_ref_cnt));
+		return -GSI_STATUS_UNSUPPORTED_OP;
+	}
+
+	if (ctx->state != GSI_EVT_RING_STATE_ALLOCATED) {
+		GSIERR("bad state %d\n", ctx->state);
+		return -GSI_STATUS_UNSUPPORTED_OP;
+	}
+
+	mutex_lock(&gsi_ctx->mlock);
+	reinit_completion(&ctx->compl);
+	val = (((evt_ring_hdl << GSI_EE_n_EV_CH_CMD_CHID_SHFT) &
+			GSI_EE_n_EV_CH_CMD_CHID_BMSK) |
+		((op << GSI_EE_n_EV_CH_CMD_OPCODE_SHFT) &
+			 GSI_EE_n_EV_CH_CMD_OPCODE_BMSK));
+	gsi_writel(val, gsi_ctx->base +
+			GSI_EE_n_EV_CH_CMD_OFFS(gsi_ctx->per.ee));
+	res = wait_for_completion_timeout(&ctx->compl, GSI_CMD_TIMEOUT);
+	if (res == 0) {
+		GSIERR("evt_id=%lu timed out\n", evt_ring_hdl);
+		mutex_unlock(&gsi_ctx->mlock);
+		return -GSI_STATUS_TIMED_OUT;
+	}
+
+	if (ctx->state != GSI_EVT_RING_STATE_NOT_ALLOCATED) {
+		GSIERR("evt_id=%lu unexpected state=%u\n", evt_ring_hdl,
+				ctx->state);
+		/*
+		 * IPA Hardware returned GSI RING not allocated, which is
+		 * unexpected hardware state.
+		 */
+		GSI_ASSERT();
+	}
+	mutex_unlock(&gsi_ctx->mlock);
+
+	if (!ctx->props.evchid_valid) {
+		mutex_lock(&gsi_ctx->mlock);
+		clear_bit(evt_ring_hdl, &gsi_ctx->evt_bmap);
+		mutex_unlock(&gsi_ctx->mlock);
+	}
+	atomic_dec(&gsi_ctx->num_evt_ring);
+
+	return GSI_STATUS_SUCCESS;
+}
+EXPORT_SYMBOL(gsi_dealloc_evt_ring);
+
+int gsi_query_evt_ring_db_addr(unsigned long evt_ring_hdl,
+		uint32_t *db_addr_wp_lsb, uint32_t *db_addr_wp_msb)
+{
+	struct gsi_evt_ctx *ctx;
+
+	if (!gsi_ctx) {
+		pr_err("%s:%d gsi context not allocated\n", __func__, __LINE__);
+		return -GSI_STATUS_NODEV;
+	}
+
+	if (!db_addr_wp_msb || !db_addr_wp_lsb) {
+		GSIERR("bad params msb=%pK lsb=%pK\n", db_addr_wp_msb,
+				db_addr_wp_lsb);
+		return -GSI_STATUS_INVALID_PARAMS;
+	}
+
+	if (evt_ring_hdl >= gsi_ctx->max_ev) {
+		GSIERR("bad params evt_ring_hdl=%lu\n", evt_ring_hdl);
+		return -GSI_STATUS_INVALID_PARAMS;
+	}
+
+	ctx = &gsi_ctx->evtr[evt_ring_hdl];
+
+	if (ctx->state != GSI_EVT_RING_STATE_ALLOCATED) {
+		GSIERR("bad state %d\n",
+				gsi_ctx->evtr[evt_ring_hdl].state);
+		return -GSI_STATUS_UNSUPPORTED_OP;
+	}
+
+	*db_addr_wp_lsb = gsi_ctx->per.phys_addr +
+		GSI_EE_n_EV_CH_k_DOORBELL_0_OFFS(evt_ring_hdl, gsi_ctx->per.ee);
+	*db_addr_wp_msb = gsi_ctx->per.phys_addr +
+		GSI_EE_n_EV_CH_k_DOORBELL_1_OFFS(evt_ring_hdl, gsi_ctx->per.ee);
+
+	return GSI_STATUS_SUCCESS;
+}
+EXPORT_SYMBOL(gsi_query_evt_ring_db_addr);
+
+int gsi_ring_evt_ring_db(unsigned long evt_ring_hdl, uint64_t value)
+{
+	struct gsi_evt_ctx *ctx;
+
+	if (!gsi_ctx) {
+		pr_err("%s:%d gsi context not allocated\n", __func__, __LINE__);
+		return -GSI_STATUS_NODEV;
+	}
+
+	if (evt_ring_hdl >= gsi_ctx->max_ev) {
+		GSIERR("bad params evt_ring_hdl=%lu\n", evt_ring_hdl);
+		return -GSI_STATUS_INVALID_PARAMS;
+	}
+
+	ctx = &gsi_ctx->evtr[evt_ring_hdl];
+
+	if (ctx->state != GSI_EVT_RING_STATE_ALLOCATED) {
+		GSIERR("bad state %d\n",
+				gsi_ctx->evtr[evt_ring_hdl].state);
+		return -GSI_STATUS_UNSUPPORTED_OP;
+	}
+
+	ctx->ring.wp_local = value;
+	gsi_ring_evt_doorbell(ctx);
+
+	return GSI_STATUS_SUCCESS;
+}
+EXPORT_SYMBOL(gsi_ring_evt_ring_db);
+
+int gsi_ring_ch_ring_db(unsigned long chan_hdl, uint64_t value)
+{
+	struct gsi_chan_ctx *ctx;
+	uint32_t val;
+
+	if (!gsi_ctx) {
+		pr_err("%s:%d gsi context not allocated\n", __func__, __LINE__);
+		return -GSI_STATUS_NODEV;
+	}
+
+	if (chan_hdl >= gsi_ctx->max_ch) {
+		GSIERR("bad chan_hdl=%lu\n", chan_hdl);
+		return -GSI_STATUS_INVALID_PARAMS;
+	}
+
+	ctx = &gsi_ctx->chan[chan_hdl];
+
+	if (ctx->state != GSI_CHAN_STATE_STARTED) {
+		GSIERR("bad state %d\n", ctx->state);
+		return -GSI_STATUS_UNSUPPORTED_OP;
+	}
+
+	ctx->ring.wp_local = value;
+
+	/* write MSB first */
+	val = ((ctx->ring.wp_local >> 32) &
+		GSI_EE_n_GSI_CH_k_DOORBELL_1_WRITE_PTR_MSB_BMSK) <<
+		GSI_EE_n_GSI_CH_k_DOORBELL_1_WRITE_PTR_MSB_SHFT;
+	gsi_writel(val, gsi_ctx->base +
+		GSI_EE_n_GSI_CH_k_DOORBELL_1_OFFS(ctx->props.ch_id,
+			gsi_ctx->per.ee));
+
+	gsi_ring_chan_doorbell(ctx);
+
+	return GSI_STATUS_SUCCESS;
+}
+EXPORT_SYMBOL(gsi_ring_ch_ring_db);
+
+int gsi_reset_evt_ring(unsigned long evt_ring_hdl)
+{
+	uint32_t val;
+	enum gsi_evt_ch_cmd_opcode op = GSI_EVT_RESET;
+	struct gsi_evt_ctx *ctx;
+	int res;
+
+	if (!gsi_ctx) {
+		pr_err("%s:%d gsi context not allocated\n", __func__, __LINE__);
+		return -GSI_STATUS_NODEV;
+	}
+
+	if (evt_ring_hdl >= gsi_ctx->max_ev) {
+		GSIERR("bad params evt_ring_hdl=%lu\n", evt_ring_hdl);
+		return -GSI_STATUS_INVALID_PARAMS;
+	}
+
+	ctx = &gsi_ctx->evtr[evt_ring_hdl];
+
+	if (ctx->state != GSI_EVT_RING_STATE_ALLOCATED) {
+		GSIERR("bad state %d\n", ctx->state);
+		return -GSI_STATUS_UNSUPPORTED_OP;
+	}
+
+	mutex_lock(&gsi_ctx->mlock);
+	reinit_completion(&ctx->compl);
+	val = (((evt_ring_hdl << GSI_EE_n_EV_CH_CMD_CHID_SHFT) &
+			GSI_EE_n_EV_CH_CMD_CHID_BMSK) |
+		((op << GSI_EE_n_EV_CH_CMD_OPCODE_SHFT) &
+			 GSI_EE_n_EV_CH_CMD_OPCODE_BMSK));
+	gsi_writel(val, gsi_ctx->base +
+			GSI_EE_n_EV_CH_CMD_OFFS(gsi_ctx->per.ee));
+	res = wait_for_completion_timeout(&ctx->compl, GSI_CMD_TIMEOUT);
+	if (res == 0) {
+		GSIERR("evt_id=%lu timed out\n", evt_ring_hdl);
+		mutex_unlock(&gsi_ctx->mlock);
+		return -GSI_STATUS_TIMED_OUT;
+	}
+
+	if (ctx->state != GSI_EVT_RING_STATE_ALLOCATED) {
+		GSIERR("evt_id=%lu unexpected state=%u\n", evt_ring_hdl,
+				ctx->state);
+		/*
+		 * IPA Hardware returned GSI RING not allocated, which is
+		 * unexpected. Indicates hardware instability.
+		 */
+		GSI_ASSERT();
+	}
+
+	gsi_program_evt_ring_ctx(&ctx->props, evt_ring_hdl, gsi_ctx->per.ee);
+	gsi_init_evt_ring(&ctx->props, &ctx->ring);
+
+	/* restore scratch */
+	__gsi_write_evt_ring_scratch(evt_ring_hdl, ctx->scratch);
+
+	if (ctx->props.intf == GSI_EVT_CHTYPE_GPI_EV)
+		gsi_prime_evt_ring(ctx);
+	if (ctx->props.intf == GSI_EVT_CHTYPE_WDI2_EV)
+		gsi_prime_evt_ring_wdi(ctx);
+	mutex_unlock(&gsi_ctx->mlock);
+
+	return GSI_STATUS_SUCCESS;
+}
+EXPORT_SYMBOL(gsi_reset_evt_ring);
+
+int gsi_get_evt_ring_cfg(unsigned long evt_ring_hdl,
+		struct gsi_evt_ring_props *props, union gsi_evt_scratch *scr)
+{
+	struct gsi_evt_ctx *ctx;
+
+	if (!gsi_ctx) {
+		pr_err("%s:%d gsi context not allocated\n", __func__, __LINE__);
+		return -GSI_STATUS_NODEV;
+	}
+
+	if (!props || !scr) {
+		GSIERR("bad params props=%pK scr=%pK\n", props, scr);
+		return -GSI_STATUS_INVALID_PARAMS;
+	}
+
+	if (evt_ring_hdl >= gsi_ctx->max_ev) {
+		GSIERR("bad params evt_ring_hdl=%lu\n", evt_ring_hdl);
+		return -GSI_STATUS_INVALID_PARAMS;
+	}
+
+	ctx = &gsi_ctx->evtr[evt_ring_hdl];
+
+	if (ctx->state == GSI_EVT_RING_STATE_NOT_ALLOCATED) {
+		GSIERR("bad state %d\n", ctx->state);
+		return -GSI_STATUS_UNSUPPORTED_OP;
+	}
+
+	mutex_lock(&ctx->mlock);
+	*props = ctx->props;
+	*scr = ctx->scratch;
+	mutex_unlock(&ctx->mlock);
+
+	return GSI_STATUS_SUCCESS;
+}
+EXPORT_SYMBOL(gsi_get_evt_ring_cfg);
+
+int gsi_set_evt_ring_cfg(unsigned long evt_ring_hdl,
+		struct gsi_evt_ring_props *props, union gsi_evt_scratch *scr)
+{
+	struct gsi_evt_ctx *ctx;
+
+	if (!gsi_ctx) {
+		pr_err("%s:%d gsi context not allocated\n", __func__, __LINE__);
+		return -GSI_STATUS_NODEV;
+	}
+
+	if (!props || gsi_validate_evt_ring_props(props)) {
+		GSIERR("bad params props=%pK\n", props);
+		return -GSI_STATUS_INVALID_PARAMS;
+	}
+
+	if (evt_ring_hdl >= gsi_ctx->max_ev) {
+		GSIERR("bad params evt_ring_hdl=%lu\n", evt_ring_hdl);
+		return -GSI_STATUS_INVALID_PARAMS;
+	}
+
+	ctx = &gsi_ctx->evtr[evt_ring_hdl];
+
+	if (ctx->state != GSI_EVT_RING_STATE_ALLOCATED) {
+		GSIERR("bad state %d\n", ctx->state);
+		return -GSI_STATUS_UNSUPPORTED_OP;
+	}
+
+	if (ctx->props.exclusive != props->exclusive) {
+		GSIERR("changing immutable fields not supported\n");
+		return -GSI_STATUS_UNSUPPORTED_OP;
+	}
+
+	mutex_lock(&ctx->mlock);
+	ctx->props = *props;
+	if (scr)
+		ctx->scratch = *scr;
+	mutex_unlock(&ctx->mlock);
+
+	return gsi_reset_evt_ring(evt_ring_hdl);
+}
+EXPORT_SYMBOL(gsi_set_evt_ring_cfg);
+
+static void gsi_program_chan_ctx_qos(struct gsi_chan_props *props,
+	unsigned int ee)
+{
+	uint32_t val;
+
+	val =
+	(((props->low_weight <<
+		GSI_EE_n_GSI_CH_k_QOS_WRR_WEIGHT_SHFT) &
+		GSI_EE_n_GSI_CH_k_QOS_WRR_WEIGHT_BMSK) |
+	((props->max_prefetch <<
+		 GSI_EE_n_GSI_CH_k_QOS_MAX_PREFETCH_SHFT) &
+		 GSI_EE_n_GSI_CH_k_QOS_MAX_PREFETCH_BMSK) |
+	((props->use_db_eng <<
+		GSI_EE_n_GSI_CH_k_QOS_USE_DB_ENG_SHFT) &
+		GSI_EE_n_GSI_CH_k_QOS_USE_DB_ENG_BMSK));
+	if (gsi_ctx->per.ver >= GSI_VER_2_0)
+		val |= ((props->prefetch_mode <<
+			GSI_EE_n_GSI_CH_k_QOS_USE_ESCAPE_BUF_ONLY_SHFT)
+			& GSI_EE_n_GSI_CH_k_QOS_USE_ESCAPE_BUF_ONLY_BMSK);
+
+	gsi_writel(val, gsi_ctx->base +
+			GSI_EE_n_GSI_CH_k_QOS_OFFS(props->ch_id, ee));
+}
+
+static void gsi_program_chan_ctx_qos_v2_5(struct gsi_chan_props *props,
+	unsigned int ee)
+{
+	uint32_t val;
+
+	val =
+	(((props->low_weight <<
+		GSI_V2_5_EE_n_GSI_CH_k_QOS_WRR_WEIGHT_SHFT) &
+		GSI_V2_5_EE_n_GSI_CH_k_QOS_WRR_WEIGHT_BMSK) |
+	((props->max_prefetch <<
+		 GSI_V2_5_EE_n_GSI_CH_k_QOS_MAX_PREFETCH_SHFT) &
+		 GSI_V2_5_EE_n_GSI_CH_k_QOS_MAX_PREFETCH_BMSK) |
+	((props->use_db_eng <<
+		GSI_V2_5_EE_n_GSI_CH_k_QOS_USE_DB_ENG_SHFT) &
+		GSI_V2_5_EE_n_GSI_CH_k_QOS_USE_DB_ENG_BMSK) |
+	((props->prefetch_mode <<
+		GSI_V2_5_EE_n_GSI_CH_k_QOS_PREFETCH_MODE_SHFT) &
+		GSI_V2_5_EE_n_GSI_CH_k_QOS_PREFETCH_MODE_BMSK) |
+	((props->empty_lvl_threshold <<
+		GSI_V2_5_EE_n_GSI_CH_k_QOS_EMPTY_LVL_THRSHOLD_SHFT) &
+		GSI_V2_5_EE_n_GSI_CH_k_QOS_EMPTY_LVL_THRSHOLD_BMSK));
+
+	gsi_writel(val, gsi_ctx->base +
+			GSI_V2_5_EE_n_GSI_CH_k_QOS_OFFS(props->ch_id, ee));
+}
+
+static void gsi_program_chan_ctx_qos_v2_9(struct gsi_chan_props *props,
+	unsigned int ee)
+{
+	uint32_t val;
+
+	val =
+	(((props->low_weight <<
+		GSI_V2_9_EE_n_GSI_CH_k_QOS_WRR_WEIGHT_SHFT) &
+		GSI_V2_9_EE_n_GSI_CH_k_QOS_WRR_WEIGHT_BMSK) |
+	((props->max_prefetch <<
+		 GSI_V2_9_EE_n_GSI_CH_k_QOS_MAX_PREFETCH_SHFT) &
+		 GSI_V2_9_EE_n_GSI_CH_k_QOS_MAX_PREFETCH_BMSK) |
+	((props->use_db_eng <<
+		GSI_V2_9_EE_n_GSI_CH_k_QOS_USE_DB_ENG_SHFT) &
+		GSI_V2_9_EE_n_GSI_CH_k_QOS_USE_DB_ENG_BMSK) |
+	((props->prefetch_mode <<
+		GSI_V2_9_EE_n_GSI_CH_k_QOS_PREFETCH_MODE_SHFT) &
+		GSI_V2_9_EE_n_GSI_CH_k_QOS_PREFETCH_MODE_BMSK) |
+	((props->empty_lvl_threshold <<
+		GSI_V2_9_EE_n_GSI_CH_k_QOS_EMPTY_LVL_THRSHOLD_SHFT) &
+		GSI_V2_9_EE_n_GSI_CH_k_QOS_EMPTY_LVL_THRSHOLD_BMSK) |
+	((props->db_in_bytes <<
+		GSI_V2_9_EE_n_GSI_CH_k_QOS_DB_IN_BYTES_SHFT) &
+		GSI_V2_9_EE_n_GSI_CH_k_QOS_DB_IN_BYTES_BMSK));
+
+	gsi_writel(val, gsi_ctx->base +
+		GSI_V2_9_EE_n_GSI_CH_k_QOS_OFFS(props->ch_id, ee));
+}
+
+static void gsi_program_chan_ctx(struct gsi_chan_props *props, unsigned int ee,
+		uint8_t erindex)
+{
+	uint32_t val;
+	uint32_t prot;
+	uint32_t prot_msb;
+
+	switch (props->prot) {
+	case GSI_CHAN_PROT_MHI:
+	case GSI_CHAN_PROT_XHCI:
+	case GSI_CHAN_PROT_GPI:
+	case GSI_CHAN_PROT_XDCI:
+	case GSI_CHAN_PROT_WDI2:
+	case GSI_CHAN_PROT_WDI3:
+	case GSI_CHAN_PROT_GCI:
+	case GSI_CHAN_PROT_MHIP:
+		prot_msb = 0;
+		break;
+	case GSI_CHAN_PROT_AQC:
+	case GSI_CHAN_PROT_11AD:
+		prot_msb = 1;
+		break;
+	default:
+		GSIERR("Unsupported protocol %d\n", props->prot);
+		WARN_ON(1);
+		return;
+	}
+	prot = props->prot;
+
+	val = ((prot <<
+		GSI_EE_n_GSI_CH_k_CNTXT_0_CHTYPE_PROTOCOL_SHFT) &
+		GSI_EE_n_GSI_CH_k_CNTXT_0_CHTYPE_PROTOCOL_BMSK);
+	if (gsi_ctx->per.ver >= GSI_VER_2_5) {
+		val |= ((prot_msb <<
+		GSI_V2_5_EE_n_GSI_CH_k_CNTXT_0_CHTYPE_PROTOCOL_MSB_SHFT) &
+		GSI_V2_5_EE_n_GSI_CH_k_CNTXT_0_CHTYPE_PROTOCOL_MSB_BMSK);
+	}
+
+	val |= (((props->dir << GSI_EE_n_GSI_CH_k_CNTXT_0_CHTYPE_DIR_SHFT) &
+			 GSI_EE_n_GSI_CH_k_CNTXT_0_CHTYPE_DIR_BMSK) |
+		((erindex << GSI_EE_n_GSI_CH_k_CNTXT_0_ERINDEX_SHFT) &
+			 GSI_EE_n_GSI_CH_k_CNTXT_0_ERINDEX_BMSK) |
+		((props->re_size << GSI_EE_n_GSI_CH_k_CNTXT_0_ELEMENT_SIZE_SHFT)
+			 & GSI_EE_n_GSI_CH_k_CNTXT_0_ELEMENT_SIZE_BMSK));
+	gsi_writel(val, gsi_ctx->base +
+			GSI_EE_n_GSI_CH_k_CNTXT_0_OFFS(props->ch_id, ee));
+
+	if (gsi_ctx->per.ver >= GSI_VER_2_9) {
+		val = (props->ring_len &
+				GSI_V2_9_EE_n_GSI_CH_k_CNTXT_1_R_LENGTH_BMSK)
+			<< GSI_V2_9_EE_n_GSI_CH_k_CNTXT_1_R_LENGTH_SHFT;
+		gsi_writel(val, gsi_ctx->base +
+				GSI_V2_9_EE_n_GSI_CH_k_CNTXT_1_OFFS(
+				props->ch_id, ee));
+	} else {
+		val = (props->ring_len &
+			GSI_EE_n_GSI_CH_k_CNTXT_1_R_LENGTH_BMSK)
+			<< GSI_EE_n_GSI_CH_k_CNTXT_1_R_LENGTH_SHFT;
+		gsi_writel(val, gsi_ctx->base +
+				GSI_EE_n_GSI_CH_k_CNTXT_1_OFFS(props->ch_id,
+				ee));
+	}
+
+	val = (props->ring_base_addr &
+			GSI_EE_n_GSI_CH_k_CNTXT_2_R_BASE_ADDR_LSBS_BMSK) <<
+		GSI_EE_n_GSI_CH_k_CNTXT_2_R_BASE_ADDR_LSBS_SHFT;
+	gsi_writel(val, gsi_ctx->base +
+			GSI_EE_n_GSI_CH_k_CNTXT_2_OFFS(props->ch_id, ee));
+
+	val = ((props->ring_base_addr >> 32) &
+		GSI_EE_n_GSI_CH_k_CNTXT_3_R_BASE_ADDR_MSBS_BMSK) <<
+		GSI_EE_n_GSI_CH_k_CNTXT_3_R_BASE_ADDR_MSBS_SHFT;
+	gsi_writel(val, gsi_ctx->base +
+			GSI_EE_n_GSI_CH_k_CNTXT_3_OFFS(props->ch_id, ee));
+
+	if (gsi_ctx->per.ver >= GSI_VER_2_9)
+		gsi_program_chan_ctx_qos_v2_9(props, ee);
+	else if (gsi_ctx->per.ver >= GSI_VER_2_5)
+		gsi_program_chan_ctx_qos_v2_5(props, ee);
+	else
+		gsi_program_chan_ctx_qos(props, ee);
+}
+
+static void gsi_init_chan_ring(struct gsi_chan_props *props,
+		struct gsi_ring_ctx *ctx)
+{
+	ctx->base_va = (uintptr_t)props->ring_base_vaddr;
+	ctx->base = props->ring_base_addr;
+	ctx->wp = ctx->base;
+	ctx->rp = ctx->base;
+	ctx->wp_local = ctx->base;
+	ctx->rp_local = ctx->base;
+	ctx->len = props->ring_len;
+	ctx->elem_sz = props->re_size;
+	ctx->max_num_elem = ctx->len / ctx->elem_sz - 1;
+	ctx->end = ctx->base + (ctx->max_num_elem + 1) *
+		ctx->elem_sz;
+}
+
+static int gsi_validate_channel_props(struct gsi_chan_props *props)
+{
+	uint64_t ra;
+	uint64_t last;
+
+	if (props->ch_id >= gsi_ctx->max_ch) {
+		GSIERR("ch_id %u invalid\n", props->ch_id);
+		return -GSI_STATUS_INVALID_PARAMS;
+	}
+
+	if ((props->re_size == GSI_CHAN_RE_SIZE_4B &&
+				props->ring_len % 4) ||
+			(props->re_size == GSI_CHAN_RE_SIZE_8B &&
+				 props->ring_len % 8) ||
+			(props->re_size == GSI_CHAN_RE_SIZE_16B &&
+				 props->ring_len % 16) ||
+			(props->re_size == GSI_CHAN_RE_SIZE_32B &&
+				 props->ring_len % 32)) {
+		GSIERR("bad params ring_len %u not a multiple of re size %u\n",
+				props->ring_len, props->re_size);
+		return -GSI_STATUS_INVALID_PARAMS;
+	}
+
+	ra = props->ring_base_addr;
+	do_div(ra, roundup_pow_of_two(props->ring_len));
+
+	if (props->ring_base_addr != ra * roundup_pow_of_two(props->ring_len)) {
+		GSIERR("bad params ring base not aligned 0x%llx align 0x%lx\n",
+				props->ring_base_addr,
+				roundup_pow_of_two(props->ring_len));
+		return -GSI_STATUS_INVALID_PARAMS;
+	}
+
+	last = props->ring_base_addr + props->ring_len - props->re_size;
+
+	/* MSB should stay same within the ring */
+	if ((props->ring_base_addr & 0xFFFFFFFF00000000ULL) !=
+	    (last & 0xFFFFFFFF00000000ULL)) {
+		GSIERR("MSB is not fixed on ring base 0x%llx size 0x%x\n",
+			props->ring_base_addr,
+			props->ring_len);
+		return -GSI_STATUS_INVALID_PARAMS;
+	}
+
+	if (props->prot == GSI_CHAN_PROT_GPI &&
+			!props->ring_base_vaddr) {
+		GSIERR("protocol %u requires ring base VA\n", props->prot);
+		return -GSI_STATUS_INVALID_PARAMS;
+	}
+
+	if (props->low_weight > GSI_MAX_CH_LOW_WEIGHT) {
+		GSIERR("invalid channel low weight %u\n", props->low_weight);
+		return -GSI_STATUS_INVALID_PARAMS;
+	}
+
+	if (props->prot == GSI_CHAN_PROT_GPI && !props->xfer_cb) {
+		GSIERR("xfer callback must be provided\n");
+		return -GSI_STATUS_INVALID_PARAMS;
+	}
+
+	if (!props->err_cb) {
+		GSIERR("err callback must be provided\n");
+		return -GSI_STATUS_INVALID_PARAMS;
+	}
+
+	return GSI_STATUS_SUCCESS;
+}
+
+int gsi_alloc_channel(struct gsi_chan_props *props, unsigned long dev_hdl,
+		unsigned long *chan_hdl)
+{
+	struct gsi_chan_ctx *ctx;
+	uint32_t val;
+	int res;
+	int ee;
+	enum gsi_ch_cmd_opcode op = GSI_CH_ALLOCATE;
+	uint8_t erindex;
+	struct gsi_user_data *user_data;
+	size_t user_data_size;
+
+	if (!gsi_ctx) {
+		pr_err("%s:%d gsi context not allocated\n", __func__, __LINE__);
+		return -GSI_STATUS_NODEV;
+	}
+
+	if (!props || !chan_hdl || dev_hdl != (uintptr_t)gsi_ctx) {
+		GSIERR("bad params props=%pK dev_hdl=0x%lx chan_hdl=%pK\n",
+				props, dev_hdl, chan_hdl);
+		return -GSI_STATUS_INVALID_PARAMS;
+	}
+
+	if (gsi_validate_channel_props(props)) {
+		GSIERR("bad params\n");
+		return -GSI_STATUS_INVALID_PARAMS;
+	}
+
+	if (props->evt_ring_hdl != ~0) {
+		if (props->evt_ring_hdl >= gsi_ctx->max_ev) {
+			GSIERR("invalid evt ring=%lu\n", props->evt_ring_hdl);
+			return -GSI_STATUS_INVALID_PARAMS;
+		}
+
+		if (atomic_read(
+			&gsi_ctx->evtr[props->evt_ring_hdl].chan_ref_cnt) &&
+			gsi_ctx->evtr[props->evt_ring_hdl].props.exclusive &&
+			gsi_ctx->evtr[props->evt_ring_hdl].chan->props.prot !=
+			GSI_CHAN_PROT_GCI) {
+			GSIERR("evt ring=%lu exclusively used by ch_hdl=%pK\n",
+				props->evt_ring_hdl, chan_hdl);
+			return -GSI_STATUS_UNSUPPORTED_OP;
+		}
+	}
+
+	ctx = &gsi_ctx->chan[props->ch_id];
+	if (ctx->allocated) {
+		GSIERR("chan %d already allocated\n", props->ch_id);
+		return -GSI_STATUS_NODEV;
+	}
+	memset(ctx, 0, sizeof(*ctx));
+
+	/* For IPA offloaded WDI channels not required user_data pointer */
+	if (props->prot != GSI_CHAN_PROT_WDI2 &&
+		props->prot != GSI_CHAN_PROT_WDI3)
+		user_data_size = props->ring_len / props->re_size;
+	else
+		user_data_size = props->re_size;
+	/*
+	 * GCI channels might have OOO event completions up to GSI_VEID_MAX.
+	 * user_data needs to be large enough to accommodate those.
+	 * TODO: increase user data size if GSI_VEID_MAX is not enough
+	 */
+	if (props->prot == GSI_CHAN_PROT_GCI)
+		user_data_size += GSI_VEID_MAX;
+
+	user_data = devm_kzalloc(gsi_ctx->dev,
+		user_data_size * sizeof(*user_data),
+		GFP_KERNEL);
+	if (user_data == NULL) {
+		GSIERR("context not allocated\n");
+		return -GSI_STATUS_RES_ALLOC_FAILURE;
+	}
+
+	mutex_init(&ctx->mlock);
+	init_completion(&ctx->compl);
+	atomic_set(&ctx->poll_mode, GSI_CHAN_MODE_CALLBACK);
+	ctx->props = *props;
+
+	if (gsi_ctx->per.ver != GSI_VER_2_2) {
+		mutex_lock(&gsi_ctx->mlock);
+		ee = gsi_ctx->per.ee;
+		gsi_ctx->ch_dbg[props->ch_id].ch_allocate++;
+		val = (((props->ch_id << GSI_EE_n_GSI_CH_CMD_CHID_SHFT) &
+					GSI_EE_n_GSI_CH_CMD_CHID_BMSK) |
+				((op << GSI_EE_n_GSI_CH_CMD_OPCODE_SHFT) &
+				 GSI_EE_n_GSI_CH_CMD_OPCODE_BMSK));
+		gsi_writel(val, gsi_ctx->base +
+				GSI_EE_n_GSI_CH_CMD_OFFS(ee));
+		res = wait_for_completion_timeout(&ctx->compl, GSI_CMD_TIMEOUT);
+		if (res == 0) {
+			GSIERR("chan_hdl=%u timed out\n", props->ch_id);
+			mutex_unlock(&gsi_ctx->mlock);
+			devm_kfree(gsi_ctx->dev, user_data);
+			return -GSI_STATUS_TIMED_OUT;
+		}
+		if (ctx->state != GSI_CHAN_STATE_ALLOCATED) {
+			GSIERR("chan_hdl=%u allocation failed state=%d\n",
+					props->ch_id, ctx->state);
+			mutex_unlock(&gsi_ctx->mlock);
+			devm_kfree(gsi_ctx->dev, user_data);
+			return -GSI_STATUS_RES_ALLOC_FAILURE;
+		}
+		mutex_unlock(&gsi_ctx->mlock);
+	} else {
+		mutex_lock(&gsi_ctx->mlock);
+		ctx->state = GSI_CHAN_STATE_ALLOCATED;
+		mutex_unlock(&gsi_ctx->mlock);
+	}
+	erindex = props->evt_ring_hdl != ~0 ? props->evt_ring_hdl :
+		GSI_NO_EVT_ERINDEX;
+	if (erindex != GSI_NO_EVT_ERINDEX && erindex >= GSI_EVT_RING_MAX) {
+		GSIERR("invalid erindex %u\n", erindex);
+		devm_kfree(gsi_ctx->dev, user_data);
+		return -GSI_STATUS_INVALID_PARAMS;
+	}
+
+	if (erindex < GSI_EVT_RING_MAX) {
+		ctx->evtr = &gsi_ctx->evtr[erindex];
+		if (props->prot != GSI_CHAN_PROT_GCI)
+			atomic_inc(&ctx->evtr->chan_ref_cnt);
+		if (props->prot != GSI_CHAN_PROT_GCI &&
+			ctx->evtr->props.exclusive &&
+			atomic_read(&ctx->evtr->chan_ref_cnt) == 1)
+			ctx->evtr->chan = ctx;
+	}
+
+	gsi_program_chan_ctx(props, gsi_ctx->per.ee, erindex);
+
+	spin_lock_init(&ctx->ring.slock);
+	gsi_init_chan_ring(props, &ctx->ring);
+	if (!props->max_re_expected)
+		ctx->props.max_re_expected = ctx->ring.max_num_elem;
+	ctx->user_data = user_data;
+	*chan_hdl = props->ch_id;
+	ctx->allocated = true;
+	ctx->stats.dp.last_timestamp = jiffies_to_msecs(jiffies);
+	atomic_inc(&gsi_ctx->num_chan);
+
+	if (props->prot == GSI_CHAN_PROT_GCI) {
+		gsi_ctx->coal_info.ch_id = props->ch_id;
+		gsi_ctx->coal_info.evchid = props->evt_ring_hdl;
+	}
+
+	return GSI_STATUS_SUCCESS;
+}
+EXPORT_SYMBOL(gsi_alloc_channel);
+
+static int gsi_alloc_ap_channel(unsigned int chan_hdl)
+{
+	struct gsi_chan_ctx *ctx;
+	uint32_t val;
+	int res;
+	int ee;
+	enum gsi_ch_cmd_opcode op = GSI_CH_ALLOCATE;
+
+	if (!gsi_ctx) {
+		pr_err("%s:%d gsi context not allocated\n", __func__, __LINE__);
+		return -GSI_STATUS_NODEV;
+	}
+
+	ctx = &gsi_ctx->chan[chan_hdl];
+	if (ctx->allocated) {
+		GSIERR("chan %d already allocated\n", chan_hdl);
+		return -GSI_STATUS_NODEV;
+	}
+
+	memset(ctx, 0, sizeof(*ctx));
+
+	mutex_init(&ctx->mlock);
+	init_completion(&ctx->compl);
+	atomic_set(&ctx->poll_mode, GSI_CHAN_MODE_CALLBACK);
+
+	mutex_lock(&gsi_ctx->mlock);
+	ee = gsi_ctx->per.ee;
+	gsi_ctx->ch_dbg[chan_hdl].ch_allocate++;
+	val = (((chan_hdl << GSI_EE_n_GSI_CH_CMD_CHID_SHFT) &
+				GSI_EE_n_GSI_CH_CMD_CHID_BMSK) |
+			((op << GSI_EE_n_GSI_CH_CMD_OPCODE_SHFT) &
+			 GSI_EE_n_GSI_CH_CMD_OPCODE_BMSK));
+	gsi_writel(val, gsi_ctx->base +
+			GSI_EE_n_GSI_CH_CMD_OFFS(ee));
+	res = wait_for_completion_timeout(&ctx->compl, GSI_CMD_TIMEOUT);
+	if (res == 0) {
+		GSIERR("chan_hdl=%u timed out\n", chan_hdl);
+		mutex_unlock(&gsi_ctx->mlock);
+		return -GSI_STATUS_TIMED_OUT;
+	}
+	if (ctx->state != GSI_CHAN_STATE_ALLOCATED) {
+		GSIERR("chan_hdl=%u allocation failed state=%d\n",
+				chan_hdl, ctx->state);
+		mutex_unlock(&gsi_ctx->mlock);
+		return -GSI_STATUS_RES_ALLOC_FAILURE;
+	}
+	mutex_unlock(&gsi_ctx->mlock);
+
+	return GSI_STATUS_SUCCESS;
+}
+
+static void __gsi_write_channel_scratch(unsigned long chan_hdl,
+		union __packed gsi_channel_scratch val)
+{
+	gsi_writel(val.data.word1, gsi_ctx->base +
+		GSI_EE_n_GSI_CH_k_SCRATCH_0_OFFS(chan_hdl,
+			gsi_ctx->per.ee));
+	gsi_writel(val.data.word2, gsi_ctx->base +
+		GSI_EE_n_GSI_CH_k_SCRATCH_1_OFFS(chan_hdl,
+			gsi_ctx->per.ee));
+	gsi_writel(val.data.word3, gsi_ctx->base +
+		GSI_EE_n_GSI_CH_k_SCRATCH_2_OFFS(chan_hdl,
+			gsi_ctx->per.ee));
+
+	gsi_writel(val.data.word4, gsi_ctx->base +
+		GSI_EE_n_GSI_CH_k_SCRATCH_3_OFFS(chan_hdl,
+			gsi_ctx->per.ee));
+}
+
+static void __gsi_write_wdi3_channel_scratch2_reg(unsigned long chan_hdl,
+		union __packed gsi_wdi3_channel_scratch2_reg val)
+{
+	gsi_writel(val.data.word1, gsi_ctx->base +
+		GSI_EE_n_GSI_CH_k_SCRATCH_2_OFFS(chan_hdl,
+			gsi_ctx->per.ee));
+}
+
+
+int gsi_write_channel_scratch3_reg(unsigned long chan_hdl,
+		union __packed gsi_wdi_channel_scratch3_reg val)
+{
+	struct gsi_chan_ctx *ctx;
+
+	if (!gsi_ctx) {
+		pr_err("%s:%d gsi context not allocated\n", __func__, __LINE__);
+		return -GSI_STATUS_NODEV;
+	}
+
+	if (chan_hdl >= gsi_ctx->max_ch) {
+		GSIERR("bad params chan_hdl=%lu\n", chan_hdl);
+		return -GSI_STATUS_INVALID_PARAMS;
+	}
+
+	ctx = &gsi_ctx->chan[chan_hdl];
+
+	mutex_lock(&ctx->mlock);
+
+	ctx->scratch.wdi.endp_metadatareg_offset =
+				val.wdi.endp_metadatareg_offset;
+	ctx->scratch.wdi.qmap_id = val.wdi.qmap_id;
+
+	gsi_writel(val.data.word1, gsi_ctx->base +
+		GSI_EE_n_GSI_CH_k_SCRATCH_3_OFFS(chan_hdl,
+			gsi_ctx->per.ee));
+	mutex_unlock(&ctx->mlock);
+	return GSI_STATUS_SUCCESS;
+}
+EXPORT_SYMBOL(gsi_write_channel_scratch3_reg);
+
+int gsi_write_channel_scratch2_reg(unsigned long chan_hdl,
+		union __packed gsi_wdi2_channel_scratch2_reg val)
+{
+	struct gsi_chan_ctx *ctx;
+
+	if (!gsi_ctx) {
+		pr_err("%s:%d gsi context not allocated\n", __func__, __LINE__);
+		return -GSI_STATUS_NODEV;
+	}
+
+	if (chan_hdl >= gsi_ctx->max_ch) {
+		GSIERR("bad params chan_hdl=%lu\n", chan_hdl);
+		return -GSI_STATUS_INVALID_PARAMS;
+	}
+
+	ctx = &gsi_ctx->chan[chan_hdl];
+
+	mutex_lock(&ctx->mlock);
+
+	ctx->scratch.wdi2_new.endp_metadatareg_offset =
+				val.wdi.endp_metadatareg_offset;
+	ctx->scratch.wdi2_new.qmap_id = val.wdi.qmap_id;
+	val.wdi.update_ri_moderation_threshold =
+		ctx->scratch.wdi2_new.update_ri_moderation_threshold;
+	gsi_writel(val.data.word1, gsi_ctx->base +
+		GSI_EE_n_GSI_CH_k_SCRATCH_2_OFFS(chan_hdl,
+			gsi_ctx->per.ee));
+	mutex_unlock(&ctx->mlock);
+	return GSI_STATUS_SUCCESS;
+}
+EXPORT_SYMBOL(gsi_write_channel_scratch2_reg);
+
+static void __gsi_read_channel_scratch(unsigned long chan_hdl,
+		union __packed gsi_channel_scratch * val)
+{
+	val->data.word1 = gsi_readl(gsi_ctx->base +
+		GSI_EE_n_GSI_CH_k_SCRATCH_0_OFFS(chan_hdl,
+			gsi_ctx->per.ee));
+
+	val->data.word2 = gsi_readl(gsi_ctx->base +
+		GSI_EE_n_GSI_CH_k_SCRATCH_1_OFFS(chan_hdl,
+			gsi_ctx->per.ee));
+
+	val->data.word3 = gsi_readl(gsi_ctx->base +
+		GSI_EE_n_GSI_CH_k_SCRATCH_2_OFFS(chan_hdl,
+			gsi_ctx->per.ee));
+
+	val->data.word4 = gsi_readl(gsi_ctx->base +
+		GSI_EE_n_GSI_CH_k_SCRATCH_3_OFFS(chan_hdl,
+			gsi_ctx->per.ee));
+}
+
+static void __gsi_read_wdi3_channel_scratch2_reg(unsigned long chan_hdl,
+		union __packed gsi_wdi3_channel_scratch2_reg * val)
+{
+
+	val->data.word1 = gsi_readl(gsi_ctx->base +
+		GSI_EE_n_GSI_CH_k_SCRATCH_2_OFFS(chan_hdl,
+			gsi_ctx->per.ee));
+
+}
+
+
+static union __packed gsi_channel_scratch __gsi_update_mhi_channel_scratch(
+	unsigned long chan_hdl, struct __packed gsi_mhi_channel_scratch mscr)
+{
+	union __packed gsi_channel_scratch scr;
+
+	/* below sequence is not atomic. assumption is sequencer specific fields
+	 * will remain unchanged across this sequence
+	 */
+
+	/* READ */
+	scr.data.word1 = gsi_readl(gsi_ctx->base +
+		GSI_EE_n_GSI_CH_k_SCRATCH_0_OFFS(chan_hdl,
+			gsi_ctx->per.ee));
+
+	scr.data.word2 = gsi_readl(gsi_ctx->base +
+		GSI_EE_n_GSI_CH_k_SCRATCH_1_OFFS(chan_hdl,
+			gsi_ctx->per.ee));
+
+	scr.data.word3 = gsi_readl(gsi_ctx->base +
+		GSI_EE_n_GSI_CH_k_SCRATCH_2_OFFS(chan_hdl,
+			gsi_ctx->per.ee));
+
+	scr.data.word4 = gsi_readl(gsi_ctx->base +
+		GSI_EE_n_GSI_CH_k_SCRATCH_3_OFFS(chan_hdl,
+			gsi_ctx->per.ee));
+
+	/* UPDATE */
+	scr.mhi.mhi_host_wp_addr = mscr.mhi_host_wp_addr;
+	scr.mhi.assert_bit40 = mscr.assert_bit40;
+	scr.mhi.polling_configuration = mscr.polling_configuration;
+	scr.mhi.burst_mode_enabled = mscr.burst_mode_enabled;
+	scr.mhi.polling_mode = mscr.polling_mode;
+	scr.mhi.oob_mod_threshold = mscr.oob_mod_threshold;
+
+	if (gsi_ctx->per.ver < GSI_VER_2_5) {
+		scr.mhi.max_outstanding_tre = mscr.max_outstanding_tre;
+		scr.mhi.outstanding_threshold = mscr.outstanding_threshold;
+	}
+
+	/* WRITE */
+	gsi_writel(scr.data.word1, gsi_ctx->base +
+		GSI_EE_n_GSI_CH_k_SCRATCH_0_OFFS(chan_hdl,
+			gsi_ctx->per.ee));
+
+	gsi_writel(scr.data.word2, gsi_ctx->base +
+		GSI_EE_n_GSI_CH_k_SCRATCH_1_OFFS(chan_hdl,
+			gsi_ctx->per.ee));
+
+	gsi_writel(scr.data.word3, gsi_ctx->base +
+		GSI_EE_n_GSI_CH_k_SCRATCH_2_OFFS(chan_hdl,
+			gsi_ctx->per.ee));
+
+	gsi_writel(scr.data.word4, gsi_ctx->base +
+		GSI_EE_n_GSI_CH_k_SCRATCH_3_OFFS(chan_hdl,
+			gsi_ctx->per.ee));
+
+	return scr;
+}
+
+int gsi_write_channel_scratch(unsigned long chan_hdl,
+		union __packed gsi_channel_scratch val)
+{
+	struct gsi_chan_ctx *ctx;
+
+	if (!gsi_ctx) {
+		pr_err("%s:%d gsi context not allocated\n", __func__, __LINE__);
+		return -GSI_STATUS_NODEV;
+	}
+
+	if (chan_hdl >= gsi_ctx->max_ch) {
+		GSIERR("bad params chan_hdl=%lu\n", chan_hdl);
+		return -GSI_STATUS_INVALID_PARAMS;
+	}
+
+	if (gsi_ctx->chan[chan_hdl].state != GSI_CHAN_STATE_ALLOCATED &&
+		gsi_ctx->chan[chan_hdl].state != GSI_CHAN_STATE_STOPPED) {
+		GSIERR("bad state %d\n",
+				gsi_ctx->chan[chan_hdl].state);
+		return -GSI_STATUS_UNSUPPORTED_OP;
+	}
+
+	ctx = &gsi_ctx->chan[chan_hdl];
+
+	mutex_lock(&ctx->mlock);
+	ctx->scratch = val;
+	__gsi_write_channel_scratch(chan_hdl, val);
+	mutex_unlock(&ctx->mlock);
+
+	return GSI_STATUS_SUCCESS;
+}
+EXPORT_SYMBOL(gsi_write_channel_scratch);
+
+int gsi_write_wdi3_channel_scratch2_reg(unsigned long chan_hdl,
+		union __packed gsi_wdi3_channel_scratch2_reg val)
+{
+	struct gsi_chan_ctx *ctx;
+
+	if (!gsi_ctx) {
+		pr_err("%s:%d gsi context not allocated\n", __func__, __LINE__);
+		return -GSI_STATUS_NODEV;
+	}
+
+	if (chan_hdl >= gsi_ctx->max_ch) {
+		GSIERR("bad params chan_hdl=%lu\n", chan_hdl);
+		return -GSI_STATUS_INVALID_PARAMS;
+	}
+
+	if (gsi_ctx->chan[chan_hdl].state != GSI_CHAN_STATE_ALLOCATED &&
+		gsi_ctx->chan[chan_hdl].state != GSI_CHAN_STATE_STARTED &&
+		gsi_ctx->chan[chan_hdl].state != GSI_CHAN_STATE_STOPPED) {
+		GSIERR("bad state %d\n",
+				gsi_ctx->chan[chan_hdl].state);
+		return -GSI_STATUS_UNSUPPORTED_OP;
+	}
+
+	ctx = &gsi_ctx->chan[chan_hdl];
+
+	mutex_lock(&ctx->mlock);
+	ctx->scratch.data.word3 = val.data.word1;
+	__gsi_write_wdi3_channel_scratch2_reg(chan_hdl, val);
+	mutex_unlock(&ctx->mlock);
+
+	return GSI_STATUS_SUCCESS;
+}
+EXPORT_SYMBOL(gsi_write_wdi3_channel_scratch2_reg);
+
+
+int gsi_read_channel_scratch(unsigned long chan_hdl,
+		union __packed gsi_channel_scratch *val)
+{
+	struct gsi_chan_ctx *ctx;
+
+	if (!gsi_ctx) {
+		pr_err("%s:%d gsi context not allocated\n", __func__, __LINE__);
+		return -GSI_STATUS_NODEV;
+	}
+
+	if (chan_hdl >= gsi_ctx->max_ch) {
+		GSIERR("bad params chan_hdl=%lu\n", chan_hdl);
+		return -GSI_STATUS_INVALID_PARAMS;
+	}
+
+	if (gsi_ctx->chan[chan_hdl].state != GSI_CHAN_STATE_ALLOCATED &&
+		gsi_ctx->chan[chan_hdl].state != GSI_CHAN_STATE_STARTED &&
+		gsi_ctx->chan[chan_hdl].state != GSI_CHAN_STATE_STOPPED) {
+		GSIERR("bad state %d\n",
+				gsi_ctx->chan[chan_hdl].state);
+		return -GSI_STATUS_UNSUPPORTED_OP;
+	}
+
+	ctx = &gsi_ctx->chan[chan_hdl];
+
+	mutex_lock(&ctx->mlock);
+	__gsi_read_channel_scratch(chan_hdl, val);
+	mutex_unlock(&ctx->mlock);
+
+	return GSI_STATUS_SUCCESS;
+}
+EXPORT_SYMBOL(gsi_read_channel_scratch);
+
+int gsi_read_wdi3_channel_scratch2_reg(unsigned long chan_hdl,
+		union __packed gsi_wdi3_channel_scratch2_reg * val)
+{
+	struct gsi_chan_ctx *ctx;
+
+	if (!gsi_ctx) {
+		pr_err("%s:%d gsi context not allocated\n", __func__, __LINE__);
+		return -GSI_STATUS_NODEV;
+	}
+
+	if (chan_hdl >= gsi_ctx->max_ch) {
+		GSIERR("bad params chan_hdl=%lu\n", chan_hdl);
+		return -GSI_STATUS_INVALID_PARAMS;
+	}
+
+	if (gsi_ctx->chan[chan_hdl].state != GSI_CHAN_STATE_ALLOCATED &&
+		gsi_ctx->chan[chan_hdl].state != GSI_CHAN_STATE_STARTED &&
+		gsi_ctx->chan[chan_hdl].state != GSI_CHAN_STATE_STOPPED) {
+		GSIERR("bad state %d\n",
+				gsi_ctx->chan[chan_hdl].state);
+		return -GSI_STATUS_UNSUPPORTED_OP;
+	}
+
+	ctx = &gsi_ctx->chan[chan_hdl];
+
+	mutex_lock(&ctx->mlock);
+	__gsi_read_wdi3_channel_scratch2_reg(chan_hdl, val);
+	mutex_unlock(&ctx->mlock);
+
+	return GSI_STATUS_SUCCESS;
+}
+EXPORT_SYMBOL(gsi_read_wdi3_channel_scratch2_reg);
+
+
+int gsi_update_mhi_channel_scratch(unsigned long chan_hdl,
+		struct __packed gsi_mhi_channel_scratch mscr)
+{
+	struct gsi_chan_ctx *ctx;
+
+	if (!gsi_ctx) {
+		pr_err("%s:%d gsi context not allocated\n", __func__, __LINE__);
+		return -GSI_STATUS_NODEV;
+	}
+
+	if (chan_hdl >= gsi_ctx->max_ch) {
+		GSIERR("bad params chan_hdl=%lu\n", chan_hdl);
+		return -GSI_STATUS_INVALID_PARAMS;
+	}
+
+	if (gsi_ctx->chan[chan_hdl].state != GSI_CHAN_STATE_ALLOCATED &&
+		gsi_ctx->chan[chan_hdl].state != GSI_CHAN_STATE_STOPPED) {
+		GSIERR("bad state %d\n",
+				gsi_ctx->chan[chan_hdl].state);
+		return -GSI_STATUS_UNSUPPORTED_OP;
+	}
+
+	ctx = &gsi_ctx->chan[chan_hdl];
+
+	mutex_lock(&ctx->mlock);
+	ctx->scratch = __gsi_update_mhi_channel_scratch(chan_hdl, mscr);
+	mutex_unlock(&ctx->mlock);
+
+	return GSI_STATUS_SUCCESS;
+}
+EXPORT_SYMBOL(gsi_update_mhi_channel_scratch);
+
+int gsi_query_channel_db_addr(unsigned long chan_hdl,
+		uint32_t *db_addr_wp_lsb, uint32_t *db_addr_wp_msb)
+{
+	if (!gsi_ctx) {
+		pr_err("%s:%d gsi context not allocated\n", __func__, __LINE__);
+		return -GSI_STATUS_NODEV;
+	}
+
+	if (!db_addr_wp_msb || !db_addr_wp_lsb) {
+		GSIERR("bad params msb=%pK lsb=%pK\n", db_addr_wp_msb,
+				db_addr_wp_lsb);
+		return -GSI_STATUS_INVALID_PARAMS;
+	}
+
+	if (chan_hdl >= gsi_ctx->max_ch) {
+		GSIERR("bad params chan_hdl=%lu\n", chan_hdl);
+		return -GSI_STATUS_INVALID_PARAMS;
+	}
+
+	if (gsi_ctx->chan[chan_hdl].state == GSI_CHAN_STATE_NOT_ALLOCATED) {
+		GSIERR("bad state %d\n",
+				gsi_ctx->chan[chan_hdl].state);
+		return -GSI_STATUS_UNSUPPORTED_OP;
+	}
+
+	*db_addr_wp_lsb = gsi_ctx->per.phys_addr +
+		GSI_EE_n_GSI_CH_k_DOORBELL_0_OFFS(chan_hdl, gsi_ctx->per.ee);
+	*db_addr_wp_msb = gsi_ctx->per.phys_addr +
+		GSI_EE_n_GSI_CH_k_DOORBELL_1_OFFS(chan_hdl, gsi_ctx->per.ee);
+
+	return GSI_STATUS_SUCCESS;
+}
+EXPORT_SYMBOL(gsi_query_channel_db_addr);
+
+int gsi_start_channel(unsigned long chan_hdl)
+{
+	enum gsi_ch_cmd_opcode op = GSI_CH_START;
+	uint32_t val;
+	struct gsi_chan_ctx *ctx;
+
+	if (!gsi_ctx) {
+		pr_err("%s:%d gsi context not allocated\n", __func__, __LINE__);
+		return -GSI_STATUS_NODEV;
+	}
+
+	if (chan_hdl >= gsi_ctx->max_ch) {
+		GSIERR("bad params chan_hdl=%lu\n", chan_hdl);
+		return -GSI_STATUS_INVALID_PARAMS;
+	}
+
+	ctx = &gsi_ctx->chan[chan_hdl];
+
+	if (ctx->state != GSI_CHAN_STATE_ALLOCATED &&
+		ctx->state != GSI_CHAN_STATE_STOP_IN_PROC &&
+		ctx->state != GSI_CHAN_STATE_STOPPED) {
+		GSIERR("bad state %d\n", ctx->state);
+		return -GSI_STATUS_UNSUPPORTED_OP;
+	}
+
+	mutex_lock(&gsi_ctx->mlock);
+	reinit_completion(&ctx->compl);
+
+	/* check if INTSET is in IRQ mode for GPI channel */
+	val = gsi_readl(gsi_ctx->base +
+			GSI_EE_n_CNTXT_INTSET_OFFS(gsi_ctx->per.ee));
+	if (ctx->evtr->props.intf == GSI_EVT_CHTYPE_GPI_EV &&
+		val != GSI_INTR_IRQ) {
+		GSIERR("GSI_EE_n_CNTXT_INTSET_OFFS %d\n", val);
+		BUG();
+	}
+
+	gsi_ctx->ch_dbg[chan_hdl].ch_start++;
+	val = (((chan_hdl << GSI_EE_n_GSI_CH_CMD_CHID_SHFT) &
+			GSI_EE_n_GSI_CH_CMD_CHID_BMSK) |
+		((op << GSI_EE_n_GSI_CH_CMD_OPCODE_SHFT) &
+		 GSI_EE_n_GSI_CH_CMD_OPCODE_BMSK));
+	gsi_writel(val, gsi_ctx->base +
+			GSI_EE_n_GSI_CH_CMD_OFFS(gsi_ctx->per.ee));
+
+	GSIDBG("GSI Channel Start, waiting for completion\n");
+	gsi_channel_state_change_wait(chan_hdl,
+		ctx,
+		GSI_START_CMD_TIMEOUT_MS, op);
+
+	if (ctx->state != GSI_CHAN_STATE_STARTED) {
+		/*
+		 * Hardware returned unexpected status, unexpected
+		 * hardware state.
+		 */
+		GSIERR("chan=%lu timed out, unexpected state=%u\n",
+			chan_hdl, ctx->state);
+		GSI_ASSERT();
+	}
+
+	GSIDBG("GSI Channel=%lu Start success\n", chan_hdl);
+
+	/* write order MUST be MSB followed by LSB */
+	val = ((ctx->ring.wp_local >> 32) &
+		GSI_EE_n_GSI_CH_k_DOORBELL_1_WRITE_PTR_MSB_BMSK) <<
+		GSI_EE_n_GSI_CH_k_DOORBELL_1_WRITE_PTR_MSB_SHFT;
+	gsi_writel(val, gsi_ctx->base +
+		GSI_EE_n_GSI_CH_k_DOORBELL_1_OFFS(ctx->props.ch_id,
+		gsi_ctx->per.ee));
+
+	mutex_unlock(&gsi_ctx->mlock);
+
+	return GSI_STATUS_SUCCESS;
+}
+EXPORT_SYMBOL(gsi_start_channel);
+
+int gsi_stop_channel(unsigned long chan_hdl)
+{
+	enum gsi_ch_cmd_opcode op = GSI_CH_STOP;
+	int res;
+	uint32_t val;
+	struct gsi_chan_ctx *ctx;
+
+	if (!gsi_ctx) {
+		pr_err("%s:%d gsi context not allocated\n", __func__, __LINE__);
+		return -GSI_STATUS_NODEV;
+	}
+
+	if (chan_hdl >= gsi_ctx->max_ch) {
+		GSIERR("bad params chan_hdl=%lu\n", chan_hdl);
+		return -GSI_STATUS_INVALID_PARAMS;
+	}
+
+	ctx = &gsi_ctx->chan[chan_hdl];
+
+	if (ctx->state == GSI_CHAN_STATE_STOPPED) {
+		GSIDBG("chan_hdl=%lu already stopped\n", chan_hdl);
+		return GSI_STATUS_SUCCESS;
+	}
+
+	if (ctx->state != GSI_CHAN_STATE_STARTED &&
+		ctx->state != GSI_CHAN_STATE_STOP_IN_PROC &&
+		ctx->state != GSI_CHAN_STATE_ERROR) {
+		GSIERR("bad state %d\n", ctx->state);
+		return -GSI_STATUS_UNSUPPORTED_OP;
+	}
+
+	mutex_lock(&gsi_ctx->mlock);
+	reinit_completion(&ctx->compl);
+
+	/* check if INTSET is in IRQ mode for GPI channel */
+	val = gsi_readl(gsi_ctx->base +
+			GSI_EE_n_CNTXT_INTSET_OFFS(gsi_ctx->per.ee));
+	if (ctx->evtr->props.intf == GSI_EVT_CHTYPE_GPI_EV &&
+		val != GSI_INTR_IRQ) {
+		GSIERR("GSI_EE_n_CNTXT_INTSET_OFFS %d\n", val);
+		BUG();
+	}
+
+	gsi_ctx->ch_dbg[chan_hdl].ch_stop++;
+	val = (((chan_hdl << GSI_EE_n_GSI_CH_CMD_CHID_SHFT) &
+			GSI_EE_n_GSI_CH_CMD_CHID_BMSK) |
+		((op << GSI_EE_n_GSI_CH_CMD_OPCODE_SHFT) &
+		 GSI_EE_n_GSI_CH_CMD_OPCODE_BMSK));
+	gsi_writel(val, gsi_ctx->base +
+			GSI_EE_n_GSI_CH_CMD_OFFS(gsi_ctx->per.ee));
+
+	GSIDBG("GSI Channel Stop, waiting for completion\n");
+	gsi_channel_state_change_wait(chan_hdl,
+		ctx,
+		GSI_STOP_CMD_TIMEOUT_MS, op);
+
+	if (ctx->state != GSI_CHAN_STATE_STOPPED &&
+		ctx->state != GSI_CHAN_STATE_STOP_IN_PROC) {
+		GSIERR("chan=%lu unexpected state=%u\n", chan_hdl, ctx->state);
+		res = -GSI_STATUS_BAD_STATE;
+		BUG();
+		goto free_lock;
+	}
+
+	if (ctx->state == GSI_CHAN_STATE_STOP_IN_PROC) {
+		GSIERR("chan=%lu busy try again\n", chan_hdl);
+		res = -GSI_STATUS_AGAIN;
+		goto free_lock;
+	}
+
+	res = GSI_STATUS_SUCCESS;
+
+free_lock:
+	mutex_unlock(&gsi_ctx->mlock);
+	return res;
+}
+EXPORT_SYMBOL(gsi_stop_channel);
+
+int gsi_stop_db_channel(unsigned long chan_hdl)
+{
+	enum gsi_ch_cmd_opcode op = GSI_CH_DB_STOP;
+	int res;
+	uint32_t val;
+	struct gsi_chan_ctx *ctx;
+
+	if (!gsi_ctx) {
+		pr_err("%s:%d gsi context not allocated\n", __func__, __LINE__);
+		return -GSI_STATUS_NODEV;
+	}
+
+	if (chan_hdl >= gsi_ctx->max_ch) {
+		GSIERR("bad params chan_hdl=%lu\n", chan_hdl);
+		return -GSI_STATUS_INVALID_PARAMS;
+	}
+
+	ctx = &gsi_ctx->chan[chan_hdl];
+
+	if (ctx->state == GSI_CHAN_STATE_STOPPED) {
+		GSIDBG("chan_hdl=%lu already stopped\n", chan_hdl);
+		return GSI_STATUS_SUCCESS;
+	}
+
+	if (ctx->state != GSI_CHAN_STATE_STARTED &&
+		ctx->state != GSI_CHAN_STATE_STOP_IN_PROC) {
+		GSIERR("bad state %d\n", ctx->state);
+		return -GSI_STATUS_UNSUPPORTED_OP;
+	}
+
+	mutex_lock(&gsi_ctx->mlock);
+	reinit_completion(&ctx->compl);
+
+	gsi_ctx->ch_dbg[chan_hdl].ch_db_stop++;
+	val = (((chan_hdl << GSI_EE_n_GSI_CH_CMD_CHID_SHFT) &
+			GSI_EE_n_GSI_CH_CMD_CHID_BMSK) |
+		((op << GSI_EE_n_GSI_CH_CMD_OPCODE_SHFT) &
+		 GSI_EE_n_GSI_CH_CMD_OPCODE_BMSK));
+	gsi_writel(val, gsi_ctx->base +
+			GSI_EE_n_GSI_CH_CMD_OFFS(gsi_ctx->per.ee));
+	res = wait_for_completion_timeout(&ctx->compl,
+			msecs_to_jiffies(GSI_STOP_CMD_TIMEOUT_MS));
+	if (res == 0) {
+		GSIERR("chan_hdl=%lu timed out\n", chan_hdl);
+		res = -GSI_STATUS_TIMED_OUT;
+		goto free_lock;
+	}
+
+	if (ctx->state != GSI_CHAN_STATE_STOPPED &&
+		ctx->state != GSI_CHAN_STATE_STOP_IN_PROC) {
+		GSIERR("chan=%lu unexpected state=%u\n", chan_hdl, ctx->state);
+		res = -GSI_STATUS_BAD_STATE;
+		goto free_lock;
+	}
+
+	if (ctx->state == GSI_CHAN_STATE_STOP_IN_PROC) {
+		GSIERR("chan=%lu busy try again\n", chan_hdl);
+		res = -GSI_STATUS_AGAIN;
+		goto free_lock;
+	}
+
+	res = GSI_STATUS_SUCCESS;
+
+free_lock:
+	mutex_unlock(&gsi_ctx->mlock);
+	return res;
+}
+EXPORT_SYMBOL(gsi_stop_db_channel);
+
+int gsi_reset_channel(unsigned long chan_hdl)
+{
+	enum gsi_ch_cmd_opcode op = GSI_CH_RESET;
+	int res;
+	uint32_t val;
+	struct gsi_chan_ctx *ctx;
+	bool reset_done = false;
+	uint32_t retry_cnt = 0;
+
+	if (!gsi_ctx) {
+		pr_err("%s:%d gsi context not allocated\n", __func__, __LINE__);
+		return -GSI_STATUS_NODEV;
+	}
+
+	if (chan_hdl >= gsi_ctx->max_ch) {
+		GSIERR("bad params chan_hdl=%lu\n", chan_hdl);
+		return -GSI_STATUS_INVALID_PARAMS;
+	}
+
+	ctx = &gsi_ctx->chan[chan_hdl];
+
+	/*
+	 * In WDI3 case, if SAP enabled but no client connected,
+	 * GSI will be in allocated state. When SAP disabled,
+	 * gsi_reset_channel will be called and reset is needed.
+	 */
+	if (ctx->state != GSI_CHAN_STATE_STOPPED &&
+		ctx->state != GSI_CHAN_STATE_ALLOCATED) {
+		GSIERR("bad state %d\n", ctx->state);
+		return -GSI_STATUS_UNSUPPORTED_OP;
+	}
+
+	mutex_lock(&gsi_ctx->mlock);
+
+reset:
+	reinit_completion(&ctx->compl);
+	gsi_ctx->ch_dbg[chan_hdl].ch_reset++;
+	val = (((chan_hdl << GSI_EE_n_GSI_CH_CMD_CHID_SHFT) &
+			GSI_EE_n_GSI_CH_CMD_CHID_BMSK) |
+		((op << GSI_EE_n_GSI_CH_CMD_OPCODE_SHFT) &
+		 GSI_EE_n_GSI_CH_CMD_OPCODE_BMSK));
+	gsi_writel(val, gsi_ctx->base +
+			GSI_EE_n_GSI_CH_CMD_OFFS(gsi_ctx->per.ee));
+	res = wait_for_completion_timeout(&ctx->compl, GSI_CMD_TIMEOUT);
+	if (res == 0) {
+		GSIERR("chan_hdl=%lu timed out\n", chan_hdl);
+		mutex_unlock(&gsi_ctx->mlock);
+		return -GSI_STATUS_TIMED_OUT;
+	}
+
+revrfy_chnlstate:
+	if (ctx->state != GSI_CHAN_STATE_ALLOCATED) {
+		GSIERR("chan_hdl=%lu unexpected state=%u\n", chan_hdl,
+				ctx->state);
+		/* GSI register update state not sync with gsi channel
+		 * context state not sync, need to wait for 1ms to sync.
+		 */
+		retry_cnt++;
+		if (retry_cnt <= GSI_CHNL_STATE_MAX_RETRYCNT) {
+			usleep_range(GSI_RESET_WA_MIN_SLEEP,
+				GSI_RESET_WA_MAX_SLEEP);
+			goto revrfy_chnlstate;
+		}
+		/*
+		 * Hardware returned incorrect state, unexpected
+		 * hardware state.
+		 */
+		GSI_ASSERT();
+	}
+
+	/* Hardware issue fixed from GSI 2.0 and no need for the WA */
+	if (gsi_ctx->per.ver >= GSI_VER_2_0)
+		reset_done = true;
+
+	/* workaround: reset GSI producers again */
+	if (ctx->props.dir == GSI_CHAN_DIR_FROM_GSI && !reset_done) {
+		usleep_range(GSI_RESET_WA_MIN_SLEEP, GSI_RESET_WA_MAX_SLEEP);
+		reset_done = true;
+		goto reset;
+	}
+
+	if (ctx->props.cleanup_cb)
+		gsi_cleanup_xfer_user_data(chan_hdl, ctx->props.cleanup_cb);
+
+	gsi_program_chan_ctx(&ctx->props, gsi_ctx->per.ee,
+			ctx->evtr ? ctx->evtr->id : GSI_NO_EVT_ERINDEX);
+	gsi_init_chan_ring(&ctx->props, &ctx->ring);
+
+	/* restore scratch */
+	__gsi_write_channel_scratch(chan_hdl, ctx->scratch);
+
+	mutex_unlock(&gsi_ctx->mlock);
+
+	return GSI_STATUS_SUCCESS;
+}
+EXPORT_SYMBOL(gsi_reset_channel);
+
+int gsi_dealloc_channel(unsigned long chan_hdl)
+{
+	enum gsi_ch_cmd_opcode op = GSI_CH_DE_ALLOC;
+	int res;
+	uint32_t val;
+	struct gsi_chan_ctx *ctx;
+
+	if (!gsi_ctx) {
+		pr_err("%s:%d gsi context not allocated\n", __func__, __LINE__);
+		return -GSI_STATUS_NODEV;
+	}
+
+	if (chan_hdl >= gsi_ctx->max_ch) {
+		GSIERR("bad params chan_hdl=%lu\n", chan_hdl);
+		return -GSI_STATUS_INVALID_PARAMS;
+	}
+
+	ctx = &gsi_ctx->chan[chan_hdl];
+
+	if (ctx->state != GSI_CHAN_STATE_ALLOCATED) {
+		GSIERR("bad state %d\n", ctx->state);
+		return -GSI_STATUS_UNSUPPORTED_OP;
+	}
+
+	/*In GSI_VER_2_2 version deallocation channel not supported*/
+	if (gsi_ctx->per.ver != GSI_VER_2_2) {
+		mutex_lock(&gsi_ctx->mlock);
+		reinit_completion(&ctx->compl);
+
+		gsi_ctx->ch_dbg[chan_hdl].ch_de_alloc++;
+		val = (((chan_hdl << GSI_EE_n_GSI_CH_CMD_CHID_SHFT) &
+					GSI_EE_n_GSI_CH_CMD_CHID_BMSK) |
+				((op << GSI_EE_n_GSI_CH_CMD_OPCODE_SHFT) &
+				 GSI_EE_n_GSI_CH_CMD_OPCODE_BMSK));
+		gsi_writel(val, gsi_ctx->base +
+				GSI_EE_n_GSI_CH_CMD_OFFS(gsi_ctx->per.ee));
+		res = wait_for_completion_timeout(&ctx->compl, GSI_CMD_TIMEOUT);
+		if (res == 0) {
+			GSIERR("chan_hdl=%lu timed out\n", chan_hdl);
+			mutex_unlock(&gsi_ctx->mlock);
+			return -GSI_STATUS_TIMED_OUT;
+		}
+		if (ctx->state != GSI_CHAN_STATE_NOT_ALLOCATED) {
+			GSIERR("chan_hdl=%lu unexpected state=%u\n", chan_hdl,
+					ctx->state);
+			/* Hardware returned incorrect value */
+			GSI_ASSERT();
+		}
+
+		mutex_unlock(&gsi_ctx->mlock);
+	} else {
+		mutex_lock(&gsi_ctx->mlock);
+		GSIDBG("In GSI_VER_2_2 channel deallocation not supported\n");
+		ctx->state = GSI_CHAN_STATE_NOT_ALLOCATED;
+		GSIDBG("chan_hdl=%lu Channel state = %u\n", chan_hdl,
+								ctx->state);
+		mutex_unlock(&gsi_ctx->mlock);
+	}
+	devm_kfree(gsi_ctx->dev, ctx->user_data);
+	ctx->allocated = false;
+	if (ctx->evtr && (ctx->props.prot != GSI_CHAN_PROT_GCI))
+		atomic_dec(&ctx->evtr->chan_ref_cnt);
+	atomic_dec(&gsi_ctx->num_chan);
+
+	if (ctx->props.prot == GSI_CHAN_PROT_GCI) {
+		gsi_ctx->coal_info.ch_id = GSI_CHAN_MAX;
+		gsi_ctx->coal_info.evchid = GSI_EVT_RING_MAX;
+	}
+	return GSI_STATUS_SUCCESS;
+}
+EXPORT_SYMBOL(gsi_dealloc_channel);
+
+void gsi_update_ch_dp_stats(struct gsi_chan_ctx *ctx, uint16_t used)
+{
+	unsigned long now = jiffies_to_msecs(jiffies);
+	unsigned long elapsed;
+
+	if (used == 0) {
+		elapsed = now - ctx->stats.dp.last_timestamp;
+		if (ctx->stats.dp.empty_time < elapsed)
+			ctx->stats.dp.empty_time = elapsed;
+	}
+
+	if (used <= ctx->props.max_re_expected / 3)
+		++ctx->stats.dp.ch_below_lo;
+	else if (used <= 2 * ctx->props.max_re_expected / 3)
+		++ctx->stats.dp.ch_below_hi;
+	else
+		++ctx->stats.dp.ch_above_hi;
+	ctx->stats.dp.last_timestamp = now;
+}
+
+static void __gsi_query_channel_free_re(struct gsi_chan_ctx *ctx,
+		uint16_t *num_free_re)
+{
+	uint16_t start;
+	uint16_t end;
+	uint64_t rp;
+	int ee = gsi_ctx->per.ee;
+	uint16_t used;
+
+	WARN_ON(ctx->props.prot != GSI_CHAN_PROT_GPI);
+
+	if (!ctx->evtr) {
+		rp = gsi_readl(gsi_ctx->base +
+			GSI_EE_n_GSI_CH_k_CNTXT_4_OFFS(ctx->props.ch_id, ee));
+		rp |= ctx->ring.rp & 0xFFFFFFFF00000000;
+
+		ctx->ring.rp = rp;
+	} else {
+		rp = ctx->ring.rp_local;
+	}
+
+	start = gsi_find_idx_from_addr(&ctx->ring, rp);
+	end = gsi_find_idx_from_addr(&ctx->ring, ctx->ring.wp_local);
+
+	if (end >= start)
+		used = end - start;
+	else
+		used = ctx->ring.max_num_elem + 1 - (start - end);
+
+	*num_free_re = ctx->ring.max_num_elem - used;
+}
+
+int gsi_query_channel_info(unsigned long chan_hdl,
+		struct gsi_chan_info *info)
+{
+	struct gsi_chan_ctx *ctx;
+	spinlock_t *slock;
+	unsigned long flags;
+	uint64_t rp;
+	uint64_t wp;
+	int ee;
+
+	if (!gsi_ctx) {
+		pr_err("%s:%d gsi context not allocated\n", __func__, __LINE__);
+		return -GSI_STATUS_NODEV;
+	}
+
+	if (chan_hdl >= gsi_ctx->max_ch || !info) {
+		GSIERR("bad params chan_hdl=%lu info=%pK\n", chan_hdl, info);
+		return -GSI_STATUS_INVALID_PARAMS;
+	}
+
+	ctx = &gsi_ctx->chan[chan_hdl];
+	if (ctx->evtr) {
+		slock = &ctx->evtr->ring.slock;
+		info->evt_valid = true;
+	} else {
+		slock = &ctx->ring.slock;
+		info->evt_valid = false;
+	}
+
+	spin_lock_irqsave(slock, flags);
+
+	ee = gsi_ctx->per.ee;
+	rp = gsi_readl(gsi_ctx->base +
+		GSI_EE_n_GSI_CH_k_CNTXT_4_OFFS(ctx->props.ch_id, ee));
+	rp |= ((uint64_t)gsi_readl(gsi_ctx->base +
+		GSI_EE_n_GSI_CH_k_CNTXT_5_OFFS(ctx->props.ch_id, ee))) << 32;
+	ctx->ring.rp = rp;
+	info->rp = rp;
+
+	wp = gsi_readl(gsi_ctx->base +
+		GSI_EE_n_GSI_CH_k_CNTXT_6_OFFS(ctx->props.ch_id, ee));
+	wp |= ((uint64_t)gsi_readl(gsi_ctx->base +
+		GSI_EE_n_GSI_CH_k_CNTXT_7_OFFS(ctx->props.ch_id, ee))) << 32;
+	ctx->ring.wp = wp;
+	info->wp = wp;
+
+	if (info->evt_valid) {
+		rp = gsi_readl(gsi_ctx->base +
+			GSI_EE_n_EV_CH_k_CNTXT_4_OFFS(ctx->evtr->id, ee));
+		rp |= ((uint64_t)gsi_readl(gsi_ctx->base +
+			GSI_EE_n_EV_CH_k_CNTXT_5_OFFS(ctx->evtr->id, ee)))
+			<< 32;
+		info->evt_rp = rp;
+
+		wp = gsi_readl(gsi_ctx->base +
+			GSI_EE_n_EV_CH_k_CNTXT_6_OFFS(ctx->evtr->id, ee));
+		wp |= ((uint64_t)gsi_readl(gsi_ctx->base +
+			GSI_EE_n_EV_CH_k_CNTXT_7_OFFS(ctx->evtr->id, ee)))
+			<< 32;
+		info->evt_wp = wp;
+	}
+
+	spin_unlock_irqrestore(slock, flags);
+
+	GSIDBG("ch=%lu RP=0x%llx WP=0x%llx ev_valid=%d ERP=0x%llx EWP=0x%llx\n",
+			chan_hdl, info->rp, info->wp,
+			info->evt_valid, info->evt_rp, info->evt_wp);
+
+	return GSI_STATUS_SUCCESS;
+}
+EXPORT_SYMBOL(gsi_query_channel_info);
+
+int gsi_is_channel_empty(unsigned long chan_hdl, bool *is_empty)
+{
+	struct gsi_chan_ctx *ctx;
+	spinlock_t *slock;
+	unsigned long flags;
+	uint64_t rp;
+	uint64_t wp;
+	uint64_t rp_local;
+	int ee;
+
+	if (!gsi_ctx) {
+		pr_err("%s:%d gsi context not allocated\n", __func__, __LINE__);
+		return -GSI_STATUS_NODEV;
+	}
+
+	if (chan_hdl >= gsi_ctx->max_ch || !is_empty) {
+		GSIERR("bad params chan_hdl=%lu is_empty=%pK\n",
+				chan_hdl, is_empty);
+		return -GSI_STATUS_INVALID_PARAMS;
+	}
+
+	ctx = &gsi_ctx->chan[chan_hdl];
+	ee = gsi_ctx->per.ee;
+
+	if (ctx->props.prot != GSI_CHAN_PROT_GPI &&
+		ctx->props.prot != GSI_CHAN_PROT_GCI) {
+		GSIERR("op not supported for protocol %u\n", ctx->props.prot);
+		return -GSI_STATUS_UNSUPPORTED_OP;
+	}
+
+	if (ctx->evtr)
+		slock = &ctx->evtr->ring.slock;
+	else
+		slock = &ctx->ring.slock;
+
+	spin_lock_irqsave(slock, flags);
+
+	if (ctx->props.dir == GSI_CHAN_DIR_FROM_GSI && ctx->evtr) {
+		rp = gsi_readl(gsi_ctx->base +
+			GSI_EE_n_EV_CH_k_CNTXT_4_OFFS(ctx->evtr->id, ee));
+		rp |= ctx->evtr->ring.rp & 0xFFFFFFFF00000000;
+		ctx->evtr->ring.rp = rp;
+
+		wp = gsi_readl(gsi_ctx->base +
+			GSI_EE_n_EV_CH_k_CNTXT_6_OFFS(ctx->evtr->id, ee));
+		wp |= ctx->evtr->ring.wp & 0xFFFFFFFF00000000;
+		ctx->evtr->ring.wp = wp;
+
+		rp_local = ctx->evtr->ring.rp_local;
+	} else {
+		rp = gsi_readl(gsi_ctx->base +
+			GSI_EE_n_GSI_CH_k_CNTXT_4_OFFS(ctx->props.ch_id, ee));
+		rp |= ctx->ring.rp & 0xFFFFFFFF00000000;
+		ctx->ring.rp = rp;
+
+		wp = gsi_readl(gsi_ctx->base +
+			GSI_EE_n_GSI_CH_k_CNTXT_6_OFFS(ctx->props.ch_id, ee));
+		wp |= ctx->ring.wp & 0xFFFFFFFF00000000;
+		ctx->ring.wp = wp;
+
+		rp_local = ctx->ring.rp_local;
+	}
+
+	if (ctx->props.dir == GSI_CHAN_DIR_FROM_GSI)
+		*is_empty = (rp_local == rp) ? true : false;
+	else
+		*is_empty = (wp == rp) ? true : false;
+
+	spin_unlock_irqrestore(slock, flags);
+
+	if (ctx->props.dir == GSI_CHAN_DIR_FROM_GSI && ctx->evtr)
+		GSIDBG("ch=%ld ev=%d RP=0x%llx WP=0x%llx RP_LOCAL=0x%llx\n",
+			chan_hdl, ctx->evtr->id, rp, wp, rp_local);
+	else
+		GSIDBG("ch=%lu RP=0x%llx WP=0x%llx RP_LOCAL=0x%llx\n",
+			chan_hdl, rp, wp, rp_local);
+
+	return GSI_STATUS_SUCCESS;
+}
+EXPORT_SYMBOL(gsi_is_channel_empty);
+
+int __gsi_get_gci_cookie(struct gsi_chan_ctx *ctx, uint16_t idx)
+{
+	int i;
+	int end;
+
+	if (!ctx->user_data[idx].valid) {
+		ctx->user_data[idx].valid = true;
+		return idx;
+	}
+
+	/*
+	 * at this point we need to find an "escape buffer" for the cookie
+	 * as the userdata in this spot is in use. This happens if the TRE at
+	 * idx is not completed yet and it is getting reused by a new TRE.
+	 */
+	ctx->stats.userdata_in_use++;
+	end = ctx->ring.max_num_elem + 1;
+	for (i = 0; i < GSI_VEID_MAX; i++) {
+		if (!ctx->user_data[end + i].valid) {
+			ctx->user_data[end + i].valid = true;
+			return end + i;
+		}
+	}
+
+	/* Go over original userdata when escape buffer is full (costly) */
+	GSIDBG("escape buffer is full\n");
+	for (i = 0; i < end; i++) {
+		if (!ctx->user_data[i].valid) {
+			ctx->user_data[i].valid = true;
+			return i;
+		}
+	}
+
+	/* Everything is full (possibly a stall) */
+	GSIERR("both userdata array and escape buffer is full\n");
+	BUG();
+	return 0xFFFF;
+}
+
+int __gsi_populate_gci_tre(struct gsi_chan_ctx *ctx,
+	struct gsi_xfer_elem *xfer)
+{
+	struct gsi_gci_tre gci_tre;
+	struct gsi_gci_tre *tre_gci_ptr;
+	uint16_t idx;
+
+	memset(&gci_tre, 0, sizeof(gci_tre));
+	if (xfer->addr & 0xFFFFFF0000000000) {
+		GSIERR("chan_hdl=%u add too large=%llx\n",
+			ctx->props.ch_id, xfer->addr);
+		return -EINVAL;
+	}
+
+	if (xfer->type != GSI_XFER_ELEM_DATA) {
+		GSIERR("chan_hdl=%u bad RE type=%u\n", ctx->props.ch_id,
+			xfer->type);
+		return -EINVAL;
+	}
+
+	idx = gsi_find_idx_from_addr(&ctx->ring, ctx->ring.wp_local);
+	tre_gci_ptr = (struct gsi_gci_tre *)(ctx->ring.base_va +
+		idx * ctx->ring.elem_sz);
+
+	gci_tre.buffer_ptr = xfer->addr;
+	gci_tre.buf_len = xfer->len;
+	gci_tre.re_type = GSI_RE_COAL;
+	gci_tre.cookie = __gsi_get_gci_cookie(ctx, idx);
+	if (gci_tre.cookie > (ctx->ring.max_num_elem + GSI_VEID_MAX))
+		return -EPERM;
+
+	/* write the TRE to ring */
+	*tre_gci_ptr = gci_tre;
+	ctx->user_data[gci_tre.cookie].p = xfer->xfer_user_data;
+
+	return 0;
+}
+
+int __gsi_populate_tre(struct gsi_chan_ctx *ctx,
+	struct gsi_xfer_elem *xfer)
+{
+	struct gsi_tre tre;
+	struct gsi_tre *tre_ptr;
+	uint16_t idx;
+
+	memset(&tre, 0, sizeof(tre));
+	tre.buffer_ptr = xfer->addr;
+	tre.buf_len = xfer->len;
+	if (xfer->type == GSI_XFER_ELEM_DATA) {
+		tre.re_type = GSI_RE_XFER;
+	} else if (xfer->type == GSI_XFER_ELEM_IMME_CMD) {
+		tre.re_type = GSI_RE_IMMD_CMD;
+	} else if (xfer->type == GSI_XFER_ELEM_NOP) {
+		tre.re_type = GSI_RE_NOP;
+	} else {
+		GSIERR("chan_hdl=%u bad RE type=%u\n", ctx->props.ch_id,
+			xfer->type);
+		return -EINVAL;
+	}
+
+	tre.bei = (xfer->flags & GSI_XFER_FLAG_BEI) ? 1 : 0;
+	tre.ieot = (xfer->flags & GSI_XFER_FLAG_EOT) ? 1 : 0;
+	tre.ieob = (xfer->flags & GSI_XFER_FLAG_EOB) ? 1 : 0;
+	tre.chain = (xfer->flags & GSI_XFER_FLAG_CHAIN) ? 1 : 0;
+
+	idx = gsi_find_idx_from_addr(&ctx->ring, ctx->ring.wp_local);
+	tre_ptr = (struct gsi_tre *)(ctx->ring.base_va +
+		idx * ctx->ring.elem_sz);
+
+	/* write the TRE to ring */
+	*tre_ptr = tre;
+	ctx->user_data[idx].valid = true;
+	ctx->user_data[idx].p = xfer->xfer_user_data;
+
+	return 0;
+}
+
+int gsi_queue_xfer(unsigned long chan_hdl, uint16_t num_xfers,
+		struct gsi_xfer_elem *xfer, bool ring_db)
+{
+	struct gsi_chan_ctx *ctx;
+	uint16_t free;
+	uint64_t wp_rollback;
+	int i;
+	spinlock_t *slock;
+	unsigned long flags;
+
+	if (!gsi_ctx) {
+		pr_err("%s:%d gsi context not allocated\n", __func__, __LINE__);
+		return -GSI_STATUS_NODEV;
+	}
+
+	if (chan_hdl >= gsi_ctx->max_ch || (num_xfers && !xfer)) {
+		GSIERR("bad params chan_hdl=%lu num_xfers=%u xfer=%pK\n",
+				chan_hdl, num_xfers, xfer);
+		return -GSI_STATUS_INVALID_PARAMS;
+	}
+
+	if (unlikely(gsi_ctx->chan[chan_hdl].state
+				 == GSI_CHAN_STATE_NOT_ALLOCATED)) {
+		GSIERR("bad state %d\n",
+			   gsi_ctx->chan[chan_hdl].state);
+		return -GSI_STATUS_UNSUPPORTED_OP;
+	}
+
+
+	ctx = &gsi_ctx->chan[chan_hdl];
+
+	if (ctx->props.prot != GSI_CHAN_PROT_GPI &&
+			ctx->props.prot != GSI_CHAN_PROT_GCI) {
+		GSIERR("op not supported for protocol %u\n", ctx->props.prot);
+		return -GSI_STATUS_UNSUPPORTED_OP;
+	}
+
+	if (ctx->evtr)
+		slock = &ctx->evtr->ring.slock;
+	else
+		slock = &ctx->ring.slock;
+
+	spin_lock_irqsave(slock, flags);
+
+	/* allow only ring doorbell */
+	if (!num_xfers)
+		goto ring_doorbell;
+
+	/*
+	 * for GCI channels the responsibility is on the caller to make sure
+	 * there is enough room in the TRE.
+	 */
+	if (ctx->props.prot != GSI_CHAN_PROT_GCI) {
+		__gsi_query_channel_free_re(ctx, &free);
+		if (num_xfers > free) {
+			GSIERR("chan_hdl=%lu num_xfers=%u free=%u\n",
+				chan_hdl, num_xfers, free);
+			spin_unlock_irqrestore(slock, flags);
+			return -GSI_STATUS_RING_INSUFFICIENT_SPACE;
+		}
+	}
+
+	wp_rollback = ctx->ring.wp_local;
+	for (i = 0; i < num_xfers; i++) {
+		if (ctx->props.prot == GSI_CHAN_PROT_GCI) {
+			if (__gsi_populate_gci_tre(ctx, &xfer[i]))
+				break;
+		} else {
+			if (__gsi_populate_tre(ctx, &xfer[i]))
+				break;
+		}
+		gsi_incr_ring_wp(&ctx->ring);
+	}
+
+	if (i != num_xfers) {
+		/* reject all the xfers */
+		ctx->ring.wp_local = wp_rollback;
+		spin_unlock_irqrestore(slock, flags);
+		return -GSI_STATUS_INVALID_PARAMS;
+	}
+
+	ctx->stats.queued += num_xfers;
+
+ring_doorbell:
+	if (ring_db) {
+		/* ensure TRE is set before ringing doorbell */
+		wmb();
+		gsi_ring_chan_doorbell(ctx);
+	}
+
+	spin_unlock_irqrestore(slock, flags);
+
+	return GSI_STATUS_SUCCESS;
+}
+EXPORT_SYMBOL(gsi_queue_xfer);
+
+int gsi_start_xfer(unsigned long chan_hdl)
+{
+	struct gsi_chan_ctx *ctx;
+
+	if (!gsi_ctx) {
+		pr_err("%s:%d gsi context not allocated\n", __func__, __LINE__);
+		return -GSI_STATUS_NODEV;
+	}
+
+	if (chan_hdl >= gsi_ctx->max_ch) {
+		GSIERR("bad params chan_hdl=%lu\n", chan_hdl);
+		return -GSI_STATUS_INVALID_PARAMS;
+	}
+
+	ctx = &gsi_ctx->chan[chan_hdl];
+
+	if (ctx->props.prot != GSI_CHAN_PROT_GPI &&
+		ctx->props.prot != GSI_CHAN_PROT_GCI) {
+		GSIERR("op not supported for protocol %u\n", ctx->props.prot);
+		return -GSI_STATUS_UNSUPPORTED_OP;
+	}
+
+	if (ctx->state == GSI_CHAN_STATE_NOT_ALLOCATED) {
+		GSIERR("bad state %d\n", ctx->state);
+		return -GSI_STATUS_UNSUPPORTED_OP;
+	}
+
+	if (ctx->ring.wp == ctx->ring.wp_local)
+		return GSI_STATUS_SUCCESS;
+
+	gsi_ring_chan_doorbell(ctx);
+
+	return GSI_STATUS_SUCCESS;
+};
+EXPORT_SYMBOL(gsi_start_xfer);
+
+int gsi_poll_channel(unsigned long chan_hdl,
+		struct gsi_chan_xfer_notify *notify)
+{
+	int unused_var;
+
+	return gsi_poll_n_channel(chan_hdl, notify, 1, &unused_var);
+}
+EXPORT_SYMBOL(gsi_poll_channel);
+
+int gsi_poll_n_channel(unsigned long chan_hdl,
+		struct gsi_chan_xfer_notify *notify,
+		int expected_num, int *actual_num)
+{
+	struct gsi_chan_ctx *ctx;
+	uint64_t rp;
+	int ee;
+	int i;
+	unsigned long flags;
+
+	if (!gsi_ctx) {
+		pr_err("%s:%d gsi context not allocated\n", __func__, __LINE__);
+		return -GSI_STATUS_NODEV;
+	}
+
+	if (chan_hdl >= gsi_ctx->max_ch || !notify ||
+	    !actual_num || expected_num <= 0) {
+		GSIERR("bad params chan_hdl=%lu notify=%pK\n",
+			chan_hdl, notify);
+		GSIERR("actual_num=%pK expected_num=%d\n",
+			actual_num, expected_num);
+		return -GSI_STATUS_INVALID_PARAMS;
+	}
+
+	ctx = &gsi_ctx->chan[chan_hdl];
+	ee = gsi_ctx->per.ee;
+
+	if (ctx->props.prot != GSI_CHAN_PROT_GPI &&
+		ctx->props.prot != GSI_CHAN_PROT_GCI) {
+		GSIERR("op not supported for protocol %u\n", ctx->props.prot);
+		return -GSI_STATUS_UNSUPPORTED_OP;
+	}
+
+	if (!ctx->evtr) {
+		GSIERR("no event ring associated chan_hdl=%lu\n", chan_hdl);
+		return -GSI_STATUS_UNSUPPORTED_OP;
+	}
+
+	spin_lock_irqsave(&ctx->evtr->ring.slock, flags);
+	if (ctx->evtr->ring.rp == ctx->evtr->ring.rp_local) {
+		/* update rp to see of we have anything new to process */
+		rp = gsi_readl(gsi_ctx->base +
+			GSI_EE_n_EV_CH_k_CNTXT_4_OFFS(ctx->evtr->id, ee));
+		rp |= ctx->ring.rp & 0xFFFFFFFF00000000ULL;
+
+		ctx->evtr->ring.rp = rp;
+		/* read gsi event ring rp again if last read is empty */
+		if (rp == ctx->evtr->ring.rp_local) {
+			/* event ring is empty */
+			gsi_writel(1 << ctx->evtr->id, gsi_ctx->base +
+				GSI_EE_n_CNTXT_SRC_IEOB_IRQ_CLR_OFFS(ee));
+			/* do another read to close a small window */
+			__iowmb();
+			rp = gsi_readl(gsi_ctx->base +
+				GSI_EE_n_EV_CH_k_CNTXT_4_OFFS(
+				ctx->evtr->id, ee));
+			rp |= ctx->ring.rp & 0xFFFFFFFF00000000ULL;
+			ctx->evtr->ring.rp = rp;
+			if (rp == ctx->evtr->ring.rp_local) {
+				spin_unlock_irqrestore(
+					&ctx->evtr->ring.slock,
+					flags);
+				ctx->stats.poll_empty++;
+				return GSI_STATUS_POLL_EMPTY;
+			}
+		}
+	}
+
+	*actual_num = gsi_get_complete_num(&ctx->evtr->ring,
+			ctx->evtr->ring.rp_local, ctx->evtr->ring.rp);
+
+	if (*actual_num > expected_num)
+		*actual_num = expected_num;
+
+	for (i = 0; i < *actual_num; i++)
+		gsi_process_evt_re(ctx->evtr, notify + i, false);
+
+	spin_unlock_irqrestore(&ctx->evtr->ring.slock, flags);
+	ctx->stats.poll_ok++;
+
+	return GSI_STATUS_SUCCESS;
+}
+EXPORT_SYMBOL(gsi_poll_n_channel);
+
+int gsi_config_channel_mode(unsigned long chan_hdl, enum gsi_chan_mode mode)
+{
+	struct gsi_chan_ctx *ctx, *coal_ctx;
+	enum gsi_chan_mode curr;
+	unsigned long flags;
+	enum gsi_chan_mode chan_mode;
+
+	if (!gsi_ctx) {
+		pr_err("%s:%d gsi context not allocated\n", __func__, __LINE__);
+		return -GSI_STATUS_NODEV;
+	}
+
+	if (chan_hdl >= gsi_ctx->max_ch) {
+		GSIERR("bad params chan_hdl=%lu mode=%u\n", chan_hdl, mode);
+		return -GSI_STATUS_INVALID_PARAMS;
+	}
+
+	ctx = &gsi_ctx->chan[chan_hdl];
+
+	if (ctx->props.prot != GSI_CHAN_PROT_GPI &&
+		ctx->props.prot != GSI_CHAN_PROT_GCI) {
+		GSIERR("op not supported for protocol %u\n", ctx->props.prot);
+		return -GSI_STATUS_UNSUPPORTED_OP;
+	}
+
+	if (!ctx->evtr || !ctx->evtr->props.exclusive) {
+		GSIERR("cannot configure mode on chan_hdl=%lu\n",
+				chan_hdl);
+		return -GSI_STATUS_UNSUPPORTED_OP;
+	}
+
+	if (atomic_read(&ctx->poll_mode))
+		curr = GSI_CHAN_MODE_POLL;
+	else
+		curr = GSI_CHAN_MODE_CALLBACK;
+
+	if (mode == curr) {
+		GSIDBG("already in requested mode %u chan_hdl=%lu\n",
+				curr, chan_hdl);
+		return -GSI_STATUS_UNSUPPORTED_OP;
+	}
+	spin_lock_irqsave(&gsi_ctx->slock, flags);
+	if (curr == GSI_CHAN_MODE_CALLBACK &&
+			mode == GSI_CHAN_MODE_POLL) {
+		__gsi_config_ieob_irq(gsi_ctx->per.ee, 1 << ctx->evtr->id, 0);
+		gsi_writel(1 << ctx->evtr->id, gsi_ctx->base +
+			GSI_EE_n_CNTXT_SRC_IEOB_IRQ_CLR_OFFS(gsi_ctx->per.ee));
+		atomic_set(&ctx->poll_mode, mode);
+		if ((ctx->props.prot == GSI_CHAN_PROT_GCI) && ctx->evtr->chan) {
+			atomic_set(&ctx->evtr->chan->poll_mode, mode);
+		} else if (gsi_ctx->coal_info.evchid == ctx->evtr->id) {
+			coal_ctx = &gsi_ctx->chan[gsi_ctx->coal_info.ch_id];
+			if (coal_ctx != NULL)
+				atomic_set(&coal_ctx->poll_mode, mode);
+		}
+
+		GSIDBG("set gsi_ctx evtr_id %d to %d mode\n",
+			ctx->evtr->id, mode);
+		ctx->stats.callback_to_poll++;
+	}
+
+	if (curr == GSI_CHAN_MODE_POLL &&
+			mode == GSI_CHAN_MODE_CALLBACK) {
+		atomic_set(&ctx->poll_mode, mode);
+		if ((ctx->props.prot == GSI_CHAN_PROT_GCI) && ctx->evtr->chan) {
+			atomic_set(&ctx->evtr->chan->poll_mode, mode);
+		} else if (gsi_ctx->coal_info.evchid == ctx->evtr->id) {
+			coal_ctx = &gsi_ctx->chan[gsi_ctx->coal_info.ch_id];
+			if (coal_ctx != NULL)
+				atomic_set(&coal_ctx->poll_mode, mode);
+		}
+		__gsi_config_ieob_irq(gsi_ctx->per.ee, 1 << ctx->evtr->id, ~0);
+		GSIDBG("set gsi_ctx evtr_id %d to %d mode\n",
+			ctx->evtr->id, mode);
+
+		/*
+		 * In GSI 2.2 and 2.5 there is a limitation that can lead
+		 * to losing an interrupt. For these versions an
+		 * explicit check is needed after enabling the interrupt
+		 */
+		if ((gsi_ctx->per.ver == GSI_VER_2_2 ||
+		    gsi_ctx->per.ver == GSI_VER_2_5) &&
+			!gsi_ctx->per.skip_ieob_mask_wa) {
+			u32 src = gsi_readl(gsi_ctx->base +
+				GSI_EE_n_CNTXT_SRC_IEOB_IRQ_OFFS(
+					gsi_ctx->per.ee));
+			if (src & (1 << ctx->evtr->id)) {
+				__gsi_config_ieob_irq(
+					gsi_ctx->per.ee, 1 << ctx->evtr->id, 0);
+				gsi_writel(1 << ctx->evtr->id, gsi_ctx->base +
+					GSI_EE_n_CNTXT_SRC_IEOB_IRQ_CLR_OFFS(
+							gsi_ctx->per.ee));
+				spin_unlock_irqrestore(&gsi_ctx->slock, flags);
+				spin_lock_irqsave(&ctx->evtr->ring.slock,
+									flags);
+				chan_mode = atomic_xchg(&ctx->poll_mode,
+						GSI_CHAN_MODE_POLL);
+				spin_unlock_irqrestore(
+					&ctx->evtr->ring.slock, flags);
+				ctx->stats.poll_pending_irq++;
+				GSIDBG("IEOB WA pnd cnt = %ld prvmode = %d\n",
+						ctx->stats.poll_pending_irq,
+						chan_mode);
+				if (chan_mode == GSI_CHAN_MODE_POLL)
+					return GSI_STATUS_SUCCESS;
+				else
+					return -GSI_STATUS_PENDING_IRQ;
+			}
+		}
+		ctx->stats.poll_to_callback++;
+	}
+	spin_unlock_irqrestore(&gsi_ctx->slock, flags);
+	return GSI_STATUS_SUCCESS;
+}
+EXPORT_SYMBOL(gsi_config_channel_mode);
+
+int gsi_get_channel_cfg(unsigned long chan_hdl, struct gsi_chan_props *props,
+		union gsi_channel_scratch *scr)
+{
+	struct gsi_chan_ctx *ctx;
+
+	if (!gsi_ctx) {
+		pr_err("%s:%d gsi context not allocated\n", __func__, __LINE__);
+		return -GSI_STATUS_NODEV;
+	}
+
+	if (!props || !scr) {
+		GSIERR("bad params props=%pK scr=%pK\n", props, scr);
+		return -GSI_STATUS_INVALID_PARAMS;
+	}
+
+	if (chan_hdl >= gsi_ctx->max_ch) {
+		GSIERR("bad params chan_hdl=%lu\n", chan_hdl);
+		return -GSI_STATUS_INVALID_PARAMS;
+	}
+
+	ctx = &gsi_ctx->chan[chan_hdl];
+
+	if (ctx->state == GSI_CHAN_STATE_NOT_ALLOCATED) {
+		GSIERR("bad state %d\n", ctx->state);
+		return -GSI_STATUS_UNSUPPORTED_OP;
+	}
+
+	mutex_lock(&ctx->mlock);
+	*props = ctx->props;
+	*scr = ctx->scratch;
+	mutex_unlock(&ctx->mlock);
+
+	return GSI_STATUS_SUCCESS;
+}
+EXPORT_SYMBOL(gsi_get_channel_cfg);
+
+int gsi_set_channel_cfg(unsigned long chan_hdl, struct gsi_chan_props *props,
+		union gsi_channel_scratch *scr)
+{
+	struct gsi_chan_ctx *ctx;
+
+	if (!gsi_ctx) {
+		pr_err("%s:%d gsi context not allocated\n", __func__, __LINE__);
+		return -GSI_STATUS_NODEV;
+	}
+
+	if (!props || gsi_validate_channel_props(props)) {
+		GSIERR("bad params props=%pK\n", props);
+		return -GSI_STATUS_INVALID_PARAMS;
+	}
+
+	if (chan_hdl >= gsi_ctx->max_ch) {
+		GSIERR("bad params chan_hdl=%lu\n", chan_hdl);
+		return -GSI_STATUS_INVALID_PARAMS;
+	}
+
+	ctx = &gsi_ctx->chan[chan_hdl];
+
+	if (ctx->state != GSI_CHAN_STATE_ALLOCATED) {
+		GSIERR("bad state %d\n", ctx->state);
+		return -GSI_STATUS_UNSUPPORTED_OP;
+	}
+
+	if (ctx->props.ch_id != props->ch_id ||
+		ctx->props.evt_ring_hdl != props->evt_ring_hdl) {
+		GSIERR("changing immutable fields not supported\n");
+		return -GSI_STATUS_UNSUPPORTED_OP;
+	}
+
+	mutex_lock(&ctx->mlock);
+	ctx->props = *props;
+	if (scr)
+		ctx->scratch = *scr;
+	gsi_program_chan_ctx(&ctx->props, gsi_ctx->per.ee,
+			ctx->evtr ? ctx->evtr->id : GSI_NO_EVT_ERINDEX);
+	gsi_init_chan_ring(&ctx->props, &ctx->ring);
+
+	/* restore scratch */
+	__gsi_write_channel_scratch(chan_hdl, ctx->scratch);
+	mutex_unlock(&ctx->mlock);
+
+	return GSI_STATUS_SUCCESS;
+}
+EXPORT_SYMBOL(gsi_set_channel_cfg);
+
+static void gsi_configure_ieps(void *base, enum gsi_ver ver)
+{
+	void __iomem *gsi_base = base;
+
+	gsi_writel(1, gsi_base + GSI_GSI_IRAM_PTR_CH_CMD_OFFS);
+	gsi_writel(2, gsi_base + GSI_GSI_IRAM_PTR_CH_DB_OFFS);
+	gsi_writel(3, gsi_base + GSI_GSI_IRAM_PTR_CH_DIS_COMP_OFFS);
+	gsi_writel(4, gsi_base + GSI_GSI_IRAM_PTR_CH_EMPTY_OFFS);
+	gsi_writel(5, gsi_base + GSI_GSI_IRAM_PTR_EE_GENERIC_CMD_OFFS);
+	gsi_writel(6, gsi_base + GSI_GSI_IRAM_PTR_EVENT_GEN_COMP_OFFS);
+	gsi_writel(7, gsi_base + GSI_GSI_IRAM_PTR_INT_MOD_STOPPED_OFFS);
+	gsi_writel(8, gsi_base + GSI_GSI_IRAM_PTR_PERIPH_IF_TLV_IN_0_OFFS);
+	gsi_writel(9, gsi_base + GSI_GSI_IRAM_PTR_PERIPH_IF_TLV_IN_2_OFFS);
+	gsi_writel(10, gsi_base + GSI_GSI_IRAM_PTR_PERIPH_IF_TLV_IN_1_OFFS);
+	gsi_writel(11, gsi_base + GSI_GSI_IRAM_PTR_NEW_RE_OFFS);
+	gsi_writel(12, gsi_base + GSI_GSI_IRAM_PTR_READ_ENG_COMP_OFFS);
+	gsi_writel(13, gsi_base + GSI_GSI_IRAM_PTR_TIMER_EXPIRED_OFFS);
+	gsi_writel(14, gsi_base + GSI_GSI_IRAM_PTR_EV_DB_OFFS);
+	gsi_writel(15, gsi_base + GSI_GSI_IRAM_PTR_UC_GP_INT_OFFS);
+	gsi_writel(16, gsi_base + GSI_GSI_IRAM_PTR_WRITE_ENG_COMP_OFFS);
+
+	if (ver >= GSI_VER_2_5)
+		gsi_writel(17,
+			gsi_base + GSI_V2_5_GSI_IRAM_PTR_TLV_CH_NOT_FULL_OFFS);
+}
+
+static void gsi_configure_bck_prs_matrix(void *base)
+{
+	void __iomem *gsi_base = (void __iomem *) base;
+
+	/*
+	 * For now, these are default values. In the future, GSI FW image will
+	 * produce optimized back-pressure values based on the FW image.
+	 */
+	gsi_writel(0xfffffffe,
+		gsi_base + GSI_IC_DISABLE_CHNL_BCK_PRS_LSB_OFFS);
+	gsi_writel(0xffffffff,
+		gsi_base + GSI_IC_DISABLE_CHNL_BCK_PRS_MSB_OFFS);
+	gsi_writel(0xffffffbf, gsi_base + GSI_IC_GEN_EVNT_BCK_PRS_LSB_OFFS);
+	gsi_writel(0xffffffff, gsi_base + GSI_IC_GEN_EVNT_BCK_PRS_MSB_OFFS);
+	gsi_writel(0xffffefff, gsi_base + GSI_IC_GEN_INT_BCK_PRS_LSB_OFFS);
+	gsi_writel(0xffffffff, gsi_base + GSI_IC_GEN_INT_BCK_PRS_MSB_OFFS);
+	gsi_writel(0xffffefff,
+		gsi_base + GSI_IC_STOP_INT_MOD_BCK_PRS_LSB_OFFS);
+	gsi_writel(0xffffffff,
+		gsi_base + GSI_IC_STOP_INT_MOD_BCK_PRS_MSB_OFFS);
+	gsi_writel(0x00000000,
+		gsi_base + GSI_IC_PROCESS_DESC_BCK_PRS_LSB_OFFS);
+	gsi_writel(0x00000000,
+		gsi_base + GSI_IC_PROCESS_DESC_BCK_PRS_MSB_OFFS);
+	gsi_writel(0xf9ffffff, gsi_base + GSI_IC_TLV_STOP_BCK_PRS_LSB_OFFS);
+	gsi_writel(0xffffffff, gsi_base + GSI_IC_TLV_STOP_BCK_PRS_MSB_OFFS);
+	gsi_writel(0xf9ffffff, gsi_base + GSI_IC_TLV_RESET_BCK_PRS_LSB_OFFS);
+	gsi_writel(0xffffffff, gsi_base + GSI_IC_TLV_RESET_BCK_PRS_MSB_OFFS);
+	gsi_writel(0xffffffff, gsi_base + GSI_IC_RGSTR_TIMER_BCK_PRS_LSB_OFFS);
+	gsi_writel(0xfffffffe, gsi_base + GSI_IC_RGSTR_TIMER_BCK_PRS_MSB_OFFS);
+	gsi_writel(0xffffffff, gsi_base + GSI_IC_READ_BCK_PRS_LSB_OFFS);
+	gsi_writel(0xffffefff, gsi_base + GSI_IC_READ_BCK_PRS_MSB_OFFS);
+	gsi_writel(0xffffffff, gsi_base + GSI_IC_WRITE_BCK_PRS_LSB_OFFS);
+	gsi_writel(0xffffdfff, gsi_base + GSI_IC_WRITE_BCK_PRS_MSB_OFFS);
+	gsi_writel(0xffffffff,
+		gsi_base + GSI_IC_UCONTROLLER_GPR_BCK_PRS_LSB_OFFS);
+	gsi_writel(0xff03ffff,
+		gsi_base + GSI_IC_UCONTROLLER_GPR_BCK_PRS_MSB_OFFS);
+}
+
+int gsi_configure_regs(phys_addr_t per_base_addr, enum gsi_ver ver)
+{
+	if (!gsi_ctx) {
+		pr_err("%s:%d gsi context not allocated\n", __func__, __LINE__);
+		return -GSI_STATUS_NODEV;
+	}
+
+	if (!gsi_ctx->base) {
+		GSIERR("access to GSI HW has not been mapped\n");
+		return -GSI_STATUS_INVALID_PARAMS;
+	}
+
+	if (ver <= GSI_VER_ERR || ver >= GSI_VER_MAX) {
+		GSIERR("Incorrect version %d\n", ver);
+		return -GSI_STATUS_ERROR;
+	}
+
+	gsi_writel(0, gsi_ctx->base + GSI_GSI_PERIPH_BASE_ADDR_MSB_OFFS);
+	gsi_writel(per_base_addr,
+			gsi_ctx->base + GSI_GSI_PERIPH_BASE_ADDR_LSB_OFFS);
+	gsi_configure_bck_prs_matrix((void *)gsi_ctx->base);
+	gsi_configure_ieps(gsi_ctx->base, ver);
+
+	return 0;
+}
+EXPORT_SYMBOL(gsi_configure_regs);
+
+int gsi_enable_fw(phys_addr_t gsi_base_addr, u32 gsi_size, enum gsi_ver ver)
+{
+	void __iomem *gsi_base;
+	uint32_t value;
+
+	if (ver <= GSI_VER_ERR || ver >= GSI_VER_MAX) {
+		GSIERR("Incorrect version %d\n", ver);
+		return -GSI_STATUS_ERROR;
+	}
+
+	gsi_base = ioremap_nocache(gsi_base_addr, gsi_size);
+	if (!gsi_base) {
+		GSIERR("ioremap failed\n");
+		return -GSI_STATUS_RES_ALLOC_FAILURE;
+	}
+
+	/* Enable the MCS and set to x2 clocks */
+	if (ver >= GSI_VER_1_2) {
+		value = ((1 << GSI_GSI_MCS_CFG_MCS_ENABLE_SHFT) &
+				GSI_GSI_MCS_CFG_MCS_ENABLE_BMSK);
+		gsi_writel(value, gsi_base + GSI_GSI_MCS_CFG_OFFS);
+
+		value = (((1 << GSI_GSI_CFG_GSI_ENABLE_SHFT) &
+				GSI_GSI_CFG_GSI_ENABLE_BMSK) |
+			((0 << GSI_GSI_CFG_MCS_ENABLE_SHFT) &
+				GSI_GSI_CFG_MCS_ENABLE_BMSK) |
+			((1 << GSI_GSI_CFG_DOUBLE_MCS_CLK_FREQ_SHFT) &
+				GSI_GSI_CFG_DOUBLE_MCS_CLK_FREQ_BMSK) |
+			((0 << GSI_GSI_CFG_UC_IS_MCS_SHFT) &
+				GSI_GSI_CFG_UC_IS_MCS_BMSK) |
+			((0 << GSI_GSI_CFG_GSI_PWR_CLPS_SHFT) &
+				GSI_GSI_CFG_GSI_PWR_CLPS_BMSK) |
+			((0 << GSI_GSI_CFG_BP_MTRIX_DISABLE_SHFT) &
+				GSI_GSI_CFG_BP_MTRIX_DISABLE_BMSK));
+	} else {
+		value = (((1 << GSI_GSI_CFG_GSI_ENABLE_SHFT) &
+				GSI_GSI_CFG_GSI_ENABLE_BMSK) |
+			((1 << GSI_GSI_CFG_MCS_ENABLE_SHFT) &
+				GSI_GSI_CFG_MCS_ENABLE_BMSK) |
+			((1 << GSI_GSI_CFG_DOUBLE_MCS_CLK_FREQ_SHFT) &
+				GSI_GSI_CFG_DOUBLE_MCS_CLK_FREQ_BMSK) |
+			((0 << GSI_GSI_CFG_UC_IS_MCS_SHFT) &
+				GSI_GSI_CFG_UC_IS_MCS_BMSK));
+	}
+
+	/* GSI frequency is peripheral frequency divided by 3 (2+1) */
+	if (ver >= GSI_VER_2_5)
+		value |= ((2 << GSI_V2_5_GSI_CFG_SLEEP_CLK_DIV_SHFT) &
+			GSI_V2_5_GSI_CFG_SLEEP_CLK_DIV_BMSK);
+	gsi_writel(value, gsi_base + GSI_GSI_CFG_OFFS);
+	iounmap(gsi_base);
+
+	return 0;
+
+}
+EXPORT_SYMBOL(gsi_enable_fw);
+
+void gsi_get_inst_ram_offset_and_size(unsigned long *base_offset,
+		unsigned long *size, enum gsi_ver ver)
+{
+	unsigned long maxn;
+
+	if (!gsi_ctx) {
+		pr_err("%s:%d gsi context not allocated\n", __func__, __LINE__);
+		return;
+	}
+
+	switch (ver) {
+	case GSI_VER_1_0:
+	case GSI_VER_1_2:
+	case GSI_VER_1_3:
+		maxn = GSI_GSI_INST_RAM_n_MAXn;
+		break;
+	case GSI_VER_2_0:
+		maxn = GSI_V2_0_GSI_INST_RAM_n_MAXn;
+		break;
+	case GSI_VER_2_2:
+		maxn = GSI_V2_2_GSI_INST_RAM_n_MAXn;
+		break;
+	case GSI_VER_2_5:
+		maxn = GSI_V2_5_GSI_INST_RAM_n_MAXn;
+		break;
+	case GSI_VER_2_7:
+		maxn = GSI_V2_7_GSI_INST_RAM_n_MAXn;
+		break;
+	case GSI_VER_2_9:
+		maxn = GSI_V2_9_GSI_INST_RAM_n_MAXn;
+		break;
+	case GSI_VER_ERR:
+	case GSI_VER_MAX:
+	default:
+		GSIERR("GSI version is not supported %d\n", ver);
+		WARN_ON(1);
+		return;
+	}
+	if (size)
+		*size = GSI_GSI_INST_RAM_n_WORD_SZ * (maxn + 1);
+
+	if (base_offset) {
+		if (ver < GSI_VER_2_5)
+			*base_offset = GSI_GSI_INST_RAM_n_OFFS(0);
+		else
+			*base_offset = GSI_V2_5_GSI_INST_RAM_n_OFFS(0);
+	}
+}
+EXPORT_SYMBOL(gsi_get_inst_ram_offset_and_size);
+
+int gsi_halt_channel_ee(unsigned int chan_idx, unsigned int ee, int *code)
+{
+	enum gsi_generic_ee_cmd_opcode op = GSI_GEN_EE_CMD_HALT_CHANNEL;
+	uint32_t val;
+	int res;
+
+	if (!gsi_ctx) {
+		pr_err("%s:%d gsi context not allocated\n", __func__, __LINE__);
+		return -GSI_STATUS_NODEV;
+	}
+
+	if (chan_idx >= gsi_ctx->max_ch || !code) {
+		GSIERR("bad params chan_idx=%d\n", chan_idx);
+		return -GSI_STATUS_INVALID_PARAMS;
+	}
+
+	mutex_lock(&gsi_ctx->mlock);
+	reinit_completion(&gsi_ctx->gen_ee_cmd_compl);
+
+	/* invalidate the response */
+	gsi_ctx->scratch.word0.val = gsi_readl(gsi_ctx->base +
+			GSI_EE_n_CNTXT_SCRATCH_0_OFFS(gsi_ctx->per.ee));
+	gsi_ctx->scratch.word0.s.generic_ee_cmd_return_code = 0;
+	gsi_writel(gsi_ctx->scratch.word0.val, gsi_ctx->base +
+			GSI_EE_n_CNTXT_SCRATCH_0_OFFS(gsi_ctx->per.ee));
+
+	gsi_ctx->gen_ee_cmd_dbg.halt_channel++;
+	val = (((op << GSI_EE_n_GSI_EE_GENERIC_CMD_OPCODE_SHFT) &
+		GSI_EE_n_GSI_EE_GENERIC_CMD_OPCODE_BMSK) |
+		((chan_idx << GSI_EE_n_GSI_EE_GENERIC_CMD_VIRT_CHAN_IDX_SHFT) &
+			GSI_EE_n_GSI_EE_GENERIC_CMD_VIRT_CHAN_IDX_BMSK) |
+		((ee << GSI_EE_n_GSI_EE_GENERIC_CMD_EE_SHFT) &
+			GSI_EE_n_GSI_EE_GENERIC_CMD_EE_BMSK));
+	gsi_writel(val, gsi_ctx->base +
+		GSI_EE_n_GSI_EE_GENERIC_CMD_OFFS(gsi_ctx->per.ee));
+
+	res = wait_for_completion_timeout(&gsi_ctx->gen_ee_cmd_compl,
+		msecs_to_jiffies(GSI_CMD_TIMEOUT));
+	if (res == 0) {
+		GSIERR("chan_idx=%u ee=%u timed out\n", chan_idx, ee);
+		res = -GSI_STATUS_TIMED_OUT;
+		goto free_lock;
+	}
+
+	gsi_ctx->scratch.word0.val = gsi_readl(gsi_ctx->base +
+		GSI_EE_n_CNTXT_SCRATCH_0_OFFS(gsi_ctx->per.ee));
+	if (gsi_ctx->scratch.word0.s.generic_ee_cmd_return_code ==
+		GSI_GEN_EE_CMD_RETURN_CODE_RETRY) {
+		GSIDBG("chan_idx=%u ee=%u busy try again\n", chan_idx, ee);
+		*code = GSI_GEN_EE_CMD_RETURN_CODE_RETRY;
+		res = -GSI_STATUS_AGAIN;
+		goto free_lock;
+	}
+	if (gsi_ctx->scratch.word0.s.generic_ee_cmd_return_code == 0) {
+		GSIERR("No response received\n");
+		res = -GSI_STATUS_ERROR;
+		goto free_lock;
+	}
+
+	res = GSI_STATUS_SUCCESS;
+	*code = gsi_ctx->scratch.word0.s.generic_ee_cmd_return_code;
+free_lock:
+	mutex_unlock(&gsi_ctx->mlock);
+
+	return res;
+}
+EXPORT_SYMBOL(gsi_halt_channel_ee);
+
+int gsi_alloc_channel_ee(unsigned int chan_idx, unsigned int ee, int *code)
+{
+	enum gsi_generic_ee_cmd_opcode op = GSI_GEN_EE_CMD_ALLOC_CHANNEL;
+	struct gsi_chan_ctx *ctx;
+	uint32_t val;
+	int res;
+
+	if (chan_idx >= gsi_ctx->max_ch || !code) {
+		GSIERR("bad params chan_idx=%d\n", chan_idx);
+		return -GSI_STATUS_INVALID_PARAMS;
+	}
+
+	if (ee == 0)
+		return gsi_alloc_ap_channel(chan_idx);
+
+	mutex_lock(&gsi_ctx->mlock);
+	reinit_completion(&gsi_ctx->gen_ee_cmd_compl);
+
+	/* invalidate the response */
+	gsi_ctx->scratch.word0.val = gsi_readl(gsi_ctx->base +
+			GSI_EE_n_CNTXT_SCRATCH_0_OFFS(gsi_ctx->per.ee));
+	gsi_ctx->scratch.word0.s.generic_ee_cmd_return_code = 0;
+	gsi_writel(gsi_ctx->scratch.word0.val, gsi_ctx->base +
+			GSI_EE_n_CNTXT_SCRATCH_0_OFFS(gsi_ctx->per.ee));
+
+	val = (((op << GSI_EE_n_GSI_EE_GENERIC_CMD_OPCODE_SHFT) &
+		GSI_EE_n_GSI_EE_GENERIC_CMD_OPCODE_BMSK) |
+		((chan_idx << GSI_EE_n_GSI_EE_GENERIC_CMD_VIRT_CHAN_IDX_SHFT) &
+			GSI_EE_n_GSI_EE_GENERIC_CMD_VIRT_CHAN_IDX_BMSK) |
+		((ee << GSI_EE_n_GSI_EE_GENERIC_CMD_EE_SHFT) &
+			GSI_EE_n_GSI_EE_GENERIC_CMD_EE_BMSK));
+	gsi_writel(val, gsi_ctx->base +
+		GSI_EE_n_GSI_EE_GENERIC_CMD_OFFS(gsi_ctx->per.ee));
+
+	res = wait_for_completion_timeout(&gsi_ctx->gen_ee_cmd_compl,
+		msecs_to_jiffies(GSI_CMD_TIMEOUT));
+	if (res == 0) {
+		GSIERR("chan_idx=%u ee=%u timed out\n", chan_idx, ee);
+		res = -GSI_STATUS_TIMED_OUT;
+		goto free_lock;
+	}
+
+	gsi_ctx->scratch.word0.val = gsi_readl(gsi_ctx->base +
+		GSI_EE_n_CNTXT_SCRATCH_0_OFFS(gsi_ctx->per.ee));
+	if (gsi_ctx->scratch.word0.s.generic_ee_cmd_return_code ==
+		GSI_GEN_EE_CMD_RETURN_CODE_OUT_OF_RESOURCES) {
+		GSIDBG("chan_idx=%u ee=%u out of resources\n", chan_idx, ee);
+		*code = GSI_GEN_EE_CMD_RETURN_CODE_OUT_OF_RESOURCES;
+		res = -GSI_STATUS_RES_ALLOC_FAILURE;
+		goto free_lock;
+	}
+	if (gsi_ctx->scratch.word0.s.generic_ee_cmd_return_code == 0) {
+		GSIERR("No response received\n");
+		res = -GSI_STATUS_ERROR;
+		goto free_lock;
+	}
+	if (ee == 0) {
+		ctx = &gsi_ctx->chan[chan_idx];
+		gsi_ctx->ch_dbg[chan_idx].ch_allocate++;
+	}
+	res = GSI_STATUS_SUCCESS;
+	*code = gsi_ctx->scratch.word0.s.generic_ee_cmd_return_code;
+free_lock:
+	mutex_unlock(&gsi_ctx->mlock);
+
+	return res;
+}
+EXPORT_SYMBOL(gsi_alloc_channel_ee);
+
+int gsi_map_virtual_ch_to_per_ep(u32 ee, u32 chan_num, u32 per_ep_index)
+{
+	if (!gsi_ctx) {
+		pr_err("%s:%d gsi context not allocated\n", __func__, __LINE__);
+		return -GSI_STATUS_NODEV;
+	}
+
+	if (!gsi_ctx->base) {
+		GSIERR("access to GSI HW has not been mapped\n");
+		return -GSI_STATUS_INVALID_PARAMS;
+	}
+
+	gsi_writel(per_ep_index,
+		gsi_ctx->base +
+		GSI_V2_5_GSI_MAP_EE_n_CH_k_VP_TABLE_OFFS(chan_num, ee));
+
+	return 0;
+}
+EXPORT_SYMBOL(gsi_map_virtual_ch_to_per_ep);
+
+void gsi_wdi3_write_evt_ring_db(unsigned long evt_ring_hdl,
+	uint32_t db_addr_low, uint32_t db_addr_high)
+{
+	if (!gsi_ctx) {
+		pr_err("%s:%d gsi context not allocated\n", __func__, __LINE__);
+		return;
+	}
+
+	if (gsi_ctx->per.ver >= GSI_VER_2_9) {
+		gsi_writel(db_addr_low, gsi_ctx->base +
+			GSI_EE_n_EV_CH_k_CNTXT_10_OFFS(evt_ring_hdl,
+			gsi_ctx->per.ee));
+
+		gsi_writel(db_addr_high, gsi_ctx->base +
+			GSI_EE_n_EV_CH_k_CNTXT_11_OFFS(evt_ring_hdl,
+			gsi_ctx->per.ee));
+	} else {
+		gsi_writel(db_addr_low, gsi_ctx->base +
+			GSI_EE_n_EV_CH_k_CNTXT_12_OFFS(evt_ring_hdl,
+			gsi_ctx->per.ee));
+
+		gsi_writel(db_addr_high, gsi_ctx->base +
+			GSI_EE_n_EV_CH_k_CNTXT_13_OFFS(evt_ring_hdl,
+			gsi_ctx->per.ee));
+	}
+}
+EXPORT_SYMBOL(gsi_wdi3_write_evt_ring_db);
+
+void gsi_wdi3_dump_register(unsigned long chan_hdl)
+{
+	uint32_t val;
+
+	if (!gsi_ctx) {
+		pr_err("%s:%d gsi context not allocated\n", __func__, __LINE__);
+		return;
+	}
+	GSIDBG("reg dump ch id %ld\n", chan_hdl);
+	val = gsi_readl(gsi_ctx->base +
+		GSI_EE_n_GSI_CH_k_CNTXT_0_OFFS(chan_hdl,
+			gsi_ctx->per.ee));
+	GSIDBG("GSI_EE_n_GSI_CH_k_CNTXT_0_OFFS 0x%x\n", val);
+	val = gsi_readl(gsi_ctx->base +
+		GSI_EE_n_GSI_CH_k_CNTXT_1_OFFS(chan_hdl,
+			gsi_ctx->per.ee));
+	GSIDBG("GSI_EE_n_GSI_CH_k_CNTXT_1_OFFS 0x%x\n", val);
+	val = gsi_readl(gsi_ctx->base +
+		GSI_EE_n_GSI_CH_k_CNTXT_2_OFFS(chan_hdl,
+			gsi_ctx->per.ee));
+	GSIDBG("GSI_EE_n_GSI_CH_k_CNTXT_2_OFFS 0x%x\n", val);
+	val = gsi_readl(gsi_ctx->base +
+		GSI_EE_n_GSI_CH_k_CNTXT_3_OFFS(chan_hdl,
+			gsi_ctx->per.ee));
+	GSIDBG("GSI_EE_n_GSI_CH_k_CNTXT_3_OFFS 0x%x\n", val);
+	val = gsi_readl(gsi_ctx->base +
+		GSI_EE_n_GSI_CH_k_CNTXT_4_OFFS(chan_hdl,
+			gsi_ctx->per.ee));
+	GSIDBG("GSI_EE_n_GSI_CH_k_CNTXT_4_OFFS 0x%x\n", val);
+	val = gsi_readl(gsi_ctx->base +
+		GSI_EE_n_GSI_CH_k_CNTXT_5_OFFS(chan_hdl,
+			gsi_ctx->per.ee));
+	GSIDBG("GSI_EE_n_GSI_CH_k_CNTXT_5_OFFS 0x%x\n", val);
+	val = gsi_readl(gsi_ctx->base +
+		GSI_EE_n_GSI_CH_k_CNTXT_6_OFFS(chan_hdl,
+			gsi_ctx->per.ee));
+	GSIDBG("GSI_EE_n_GSI_CH_k_CNTXT_6_OFFS 0x%x\n", val);
+	val = gsi_readl(gsi_ctx->base +
+		GSI_EE_n_GSI_CH_k_CNTXT_7_OFFS(chan_hdl,
+			gsi_ctx->per.ee));
+	GSIDBG("GSI_EE_n_GSI_CH_k_CNTXT_7_OFFS 0x%x\n", val);
+	val = gsi_readl(gsi_ctx->base +
+		GSI_EE_n_GSI_CH_k_RE_FETCH_READ_PTR_OFFS(chan_hdl,
+			gsi_ctx->per.ee));
+	GSIDBG("GSI_EE_n_GSI_CH_k_RE_FETCH_READ_PTR_OFFS 0x%x\n", val);
+	val = gsi_readl(gsi_ctx->base +
+		GSI_EE_n_GSI_CH_k_RE_FETCH_WRITE_PTR_OFFS(chan_hdl,
+			gsi_ctx->per.ee));
+	GSIDBG("GSI_EE_n_GSI_CH_k_RE_FETCH_WRITE_PTR_OFFS 0x%x\n", val);
+	val = gsi_readl(gsi_ctx->base +
+		GSI_EE_n_GSI_CH_k_QOS_OFFS(chan_hdl,
+			gsi_ctx->per.ee));
+	GSIDBG("GSI_EE_n_GSI_CH_k_QOS_OFFS 0x%x\n", val);
+	val = gsi_readl(gsi_ctx->base +
+		GSI_EE_n_GSI_CH_k_SCRATCH_0_OFFS(chan_hdl,
+			gsi_ctx->per.ee));
+	GSIDBG("GSI_EE_n_GSI_CH_k_SCRATCH_0_OFFS 0x%x\n", val);
+	val = gsi_readl(gsi_ctx->base +
+		GSI_EE_n_GSI_CH_k_SCRATCH_1_OFFS(chan_hdl,
+			gsi_ctx->per.ee));
+	GSIDBG("GSI_EE_n_GSI_CH_k_SCRATCH_1_OFFS 0x%x\n", val);
+	val = gsi_readl(gsi_ctx->base +
+		GSI_EE_n_GSI_CH_k_SCRATCH_2_OFFS(chan_hdl,
+			gsi_ctx->per.ee));
+	GSIDBG("GSI_EE_n_GSI_CH_k_SCRATCH_2_OFFS 0x%x\n", val);
+	val = gsi_readl(gsi_ctx->base +
+		GSI_EE_n_GSI_CH_k_SCRATCH_3_OFFS(chan_hdl,
+			gsi_ctx->per.ee));
+	GSIDBG("GSI_EE_n_GSI_CH_k_SCRATCH_3_OFFS 0x%x\n", val);
+}
+EXPORT_SYMBOL(gsi_wdi3_dump_register);
+
+static int msm_gsi_probe(struct platform_device *pdev)
+{
+	struct device *dev = &pdev->dev;
+
+	pr_debug("gsi_probe\n");
+	gsi_ctx = devm_kzalloc(dev, sizeof(*gsi_ctx), GFP_KERNEL);
+	if (!gsi_ctx) {
+		dev_err(dev, "failed to allocated gsi context\n");
+		return -ENOMEM;
+	}
+
+	gsi_ctx->ipc_logbuf = ipc_log_context_create(GSI_IPC_LOG_PAGES,
+		"gsi", 0);
+	if (gsi_ctx->ipc_logbuf == NULL)
+		GSIERR("failed to create IPC log, continue...\n");
+
+	gsi_ctx->dev = dev;
+	init_completion(&gsi_ctx->gen_ee_cmd_compl);
+	gsi_debugfs_init();
+
+	return 0;
+}
+
+static struct platform_driver msm_gsi_driver = {
+	.probe          = msm_gsi_probe,
+	.driver		= {
+		.name	= "gsi",
+		.of_match_table = msm_gsi_match,
+	},
+};
+
+static struct platform_device *pdev;
+
+/**
+ * Module Init.
+ */
+static int __init gsi_init(void)
+{
+	int ret;
+
+	pr_debug("%s\n", __func__);
+
+	ret = platform_driver_register(&msm_gsi_driver);
+	if (ret < 0)
+		goto out;
+
+	if (running_emulation) {
+		pdev = platform_device_register_simple("gsi", -1, NULL, 0);
+		if (IS_ERR(pdev)) {
+			ret = PTR_ERR(pdev);
+			platform_driver_unregister(&msm_gsi_driver);
+			goto out;
+		}
+	}
+
+out:
+	return ret;
+}
+arch_initcall(gsi_init);
+
+/*
+ * Module exit.
+ */
+static void __exit gsi_exit(void)
+{
+	if (running_emulation && pdev)
+		platform_device_unregister(pdev);
+	platform_driver_unregister(&msm_gsi_driver);
+}
+module_exit(gsi_exit);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("Generic Software Interface (GSI)");

+ 351 - 0
gsi/gsi.h

@@ -0,0 +1,351 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2015-2019, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef GSI_H
+#define GSI_H
+
+#include <linux/device.h>
+#include <linux/types.h>
+#include <linux/completion.h>
+#include <linux/mutex.h>
+#include <linux/spinlock.h>
+#include <linux/msm_gsi.h>
+#include <linux/errno.h>
+#include <linux/ipc_logging.h>
+
+/*
+ * The following for adding code (ie. for EMULATION) not found on x86.
+ */
+#if defined(CONFIG_IPA_EMULATION)
+# include "gsi_emulation_stubs.h"
+#endif
+
+#define GSI_ASSERT() \
+	BUG()
+
+#define GSI_CHAN_MAX      31
+#define GSI_EVT_RING_MAX  24
+#define GSI_NO_EVT_ERINDEX 31
+
+#define gsi_readl(c)	(readl_relaxed(c))
+#define gsi_writel(v, c)	({ __iowmb(); writel_relaxed((v), (c)); })
+
+#define GSI_IPC_LOGGING(buf, fmt, args...) \
+	do { \
+		if (buf) \
+			ipc_log_string((buf), fmt, __func__, __LINE__, \
+				## args); \
+	} while (0)
+
+#define GSIDBG(fmt, args...) \
+	do { \
+		dev_dbg(gsi_ctx->dev, "%s:%d " fmt, __func__, __LINE__, \
+		## args);\
+		if (gsi_ctx) { \
+			GSI_IPC_LOGGING(gsi_ctx->ipc_logbuf, \
+				"%s:%d " fmt, ## args); \
+			GSI_IPC_LOGGING(gsi_ctx->ipc_logbuf_low, \
+				"%s:%d " fmt, ## args); \
+		} \
+	} while (0)
+
+#define GSIDBG_LOW(fmt, args...) \
+	do { \
+		dev_dbg(gsi_ctx->dev, "%s:%d " fmt, __func__, __LINE__, \
+		## args);\
+		if (gsi_ctx) { \
+			GSI_IPC_LOGGING(gsi_ctx->ipc_logbuf_low, \
+				"%s:%d " fmt, ## args); \
+		} \
+	} while (0)
+
+#define GSIERR(fmt, args...) \
+	do { \
+		dev_err(gsi_ctx->dev, "%s:%d " fmt, __func__, __LINE__, \
+		## args);\
+		if (gsi_ctx) { \
+			GSI_IPC_LOGGING(gsi_ctx->ipc_logbuf, \
+				"%s:%d " fmt, ## args); \
+			GSI_IPC_LOGGING(gsi_ctx->ipc_logbuf_low, \
+				"%s:%d " fmt, ## args); \
+		} \
+	} while (0)
+
+#define GSI_IPC_LOG_PAGES 50
+
+enum gsi_evt_ring_state {
+	GSI_EVT_RING_STATE_NOT_ALLOCATED = 0x0,
+	GSI_EVT_RING_STATE_ALLOCATED = 0x1,
+	GSI_EVT_RING_STATE_ERROR = 0xf
+};
+
+enum gsi_chan_state {
+	GSI_CHAN_STATE_NOT_ALLOCATED = 0x0,
+	GSI_CHAN_STATE_ALLOCATED = 0x1,
+	GSI_CHAN_STATE_STARTED = 0x2,
+	GSI_CHAN_STATE_STOPPED = 0x3,
+	GSI_CHAN_STATE_STOP_IN_PROC = 0x4,
+	GSI_CHAN_STATE_ERROR = 0xf
+};
+
+struct gsi_ring_ctx {
+	spinlock_t slock;
+	unsigned long base_va;
+	uint64_t base;
+	uint64_t wp;
+	uint64_t rp;
+	uint64_t wp_local;
+	uint64_t rp_local;
+	uint16_t len;
+	uint8_t elem_sz;
+	uint16_t max_num_elem;
+	uint64_t end;
+};
+
+struct gsi_chan_dp_stats {
+	unsigned long ch_below_lo;
+	unsigned long ch_below_hi;
+	unsigned long ch_above_hi;
+	unsigned long empty_time;
+	unsigned long last_timestamp;
+};
+
+struct gsi_chan_stats {
+	unsigned long queued;
+	unsigned long completed;
+	unsigned long callback_to_poll;
+	unsigned long poll_to_callback;
+	unsigned long poll_pending_irq;
+	unsigned long invalid_tre_error;
+	unsigned long poll_ok;
+	unsigned long poll_empty;
+	unsigned long userdata_in_use;
+	struct gsi_chan_dp_stats dp;
+};
+
+/**
+ * struct gsi_user_data - user_data element pointed by the TRE
+ * @valid: valid to be cleaned. if its true that means it is being used.
+ *	false means its free to overwrite
+ * @p: pointer to the user data array element
+ */
+struct gsi_user_data {
+	bool valid;
+	void *p;
+};
+
+struct gsi_chan_ctx {
+	struct gsi_chan_props props;
+	enum gsi_chan_state state;
+	struct gsi_ring_ctx ring;
+	struct gsi_user_data *user_data;
+	struct gsi_evt_ctx *evtr;
+	struct mutex mlock;
+	struct completion compl;
+	bool allocated;
+	atomic_t poll_mode;
+	union __packed gsi_channel_scratch scratch;
+	struct gsi_chan_stats stats;
+	bool enable_dp_stats;
+	bool print_dp_stats;
+};
+
+struct gsi_evt_stats {
+	unsigned long completed;
+};
+
+struct gsi_evt_ctx {
+	struct gsi_evt_ring_props props;
+	enum gsi_evt_ring_state state;
+	uint8_t id;
+	struct gsi_ring_ctx ring;
+	struct mutex mlock;
+	struct completion compl;
+	struct gsi_chan_ctx *chan;
+	atomic_t chan_ref_cnt;
+	union __packed gsi_evt_scratch scratch;
+	struct gsi_evt_stats stats;
+};
+
+struct gsi_ee_scratch {
+	union __packed {
+		struct {
+			uint32_t inter_ee_cmd_return_code:3;
+			uint32_t resvd1:2;
+			uint32_t generic_ee_cmd_return_code:3;
+			uint32_t resvd2:7;
+			uint32_t max_usb_pkt_size:1;
+			uint32_t resvd3:8;
+			uint32_t mhi_base_chan_idx:8;
+		} s;
+		uint32_t val;
+	} word0;
+	uint32_t word1;
+};
+
+struct ch_debug_stats {
+	unsigned long ch_allocate;
+	unsigned long ch_start;
+	unsigned long ch_stop;
+	unsigned long ch_reset;
+	unsigned long ch_de_alloc;
+	unsigned long ch_db_stop;
+	unsigned long cmd_completed;
+};
+
+struct gsi_generic_ee_cmd_debug_stats {
+	unsigned long halt_channel;
+};
+
+struct gsi_coal_chan_info {
+	uint8_t ch_id;
+	uint8_t evchid;
+};
+
+struct gsi_ctx {
+	void __iomem *base;
+	struct device *dev;
+	struct gsi_per_props per;
+	bool per_registered;
+	struct gsi_chan_ctx chan[GSI_CHAN_MAX];
+	struct ch_debug_stats ch_dbg[GSI_CHAN_MAX];
+	struct gsi_evt_ctx evtr[GSI_EVT_RING_MAX];
+	struct gsi_generic_ee_cmd_debug_stats gen_ee_cmd_dbg;
+	struct mutex mlock;
+	spinlock_t slock;
+	unsigned long evt_bmap;
+	bool enabled;
+	atomic_t num_chan;
+	atomic_t num_evt_ring;
+	struct gsi_ee_scratch scratch;
+	int num_ch_dp_stats;
+	struct workqueue_struct *dp_stat_wq;
+	u32 max_ch;
+	u32 max_ev;
+	struct completion gen_ee_cmd_compl;
+	void *ipc_logbuf;
+	void *ipc_logbuf_low;
+	struct gsi_coal_chan_info coal_info;
+	/*
+	 * The following used only on emulation systems.
+	 */
+	void __iomem *intcntrlr_base;
+	u32 intcntrlr_mem_size;
+	irq_handler_t intcntrlr_gsi_isr;
+	irq_handler_t intcntrlr_client_isr;
+};
+
+enum gsi_re_type {
+	GSI_RE_XFER = 0x2,
+	GSI_RE_IMMD_CMD = 0x3,
+	GSI_RE_NOP = 0x4,
+	GSI_RE_COAL = 0x8,
+};
+
+struct __packed gsi_tre {
+	uint64_t buffer_ptr;
+	uint16_t buf_len;
+	uint16_t resvd1;
+	uint16_t chain:1;
+	uint16_t resvd4:7;
+	uint16_t ieob:1;
+	uint16_t ieot:1;
+	uint16_t bei:1;
+	uint16_t resvd3:5;
+	uint8_t re_type;
+	uint8_t resvd2;
+};
+
+struct __packed gsi_gci_tre {
+	uint64_t buffer_ptr:41;
+	uint64_t resvd1:7;
+	uint64_t buf_len:16;
+	uint64_t cookie:40;
+	uint64_t resvd2:8;
+	uint64_t re_type:8;
+	uint64_t resvd3:8;
+};
+
+#define GSI_XFER_COMPL_TYPE_GCI 0x28
+
+struct __packed gsi_xfer_compl_evt {
+	union {
+		uint64_t xfer_ptr;
+		struct {
+			uint64_t cookie:40;
+			uint64_t resvd1:24;
+		};
+	};
+	uint16_t len;
+	uint8_t veid;
+	uint8_t code;  /* see gsi_chan_evt */
+	uint16_t resvd;
+	uint8_t type;
+	uint8_t chid;
+};
+
+enum gsi_err_type {
+	GSI_ERR_TYPE_GLOB = 0x1,
+	GSI_ERR_TYPE_CHAN = 0x2,
+	GSI_ERR_TYPE_EVT = 0x3,
+};
+
+enum gsi_err_code {
+	GSI_INVALID_TRE_ERR = 0x1,
+	GSI_OUT_OF_BUFFERS_ERR = 0x2,
+	GSI_OUT_OF_RESOURCES_ERR = 0x3,
+	GSI_UNSUPPORTED_INTER_EE_OP_ERR = 0x4,
+	GSI_EVT_RING_EMPTY_ERR = 0x5,
+	GSI_NON_ALLOCATED_EVT_ACCESS_ERR = 0x6,
+	GSI_HWO_1_ERR = 0x8
+};
+
+struct __packed gsi_log_err {
+	uint32_t arg3:4;
+	uint32_t arg2:4;
+	uint32_t arg1:4;
+	uint32_t code:4;
+	uint32_t resvd:3;
+	uint32_t virt_idx:5;
+	uint32_t err_type:4;
+	uint32_t ee:4;
+};
+
+enum gsi_ch_cmd_opcode {
+	GSI_CH_ALLOCATE = 0x0,
+	GSI_CH_START = 0x1,
+	GSI_CH_STOP = 0x2,
+	GSI_CH_RESET = 0x9,
+	GSI_CH_DE_ALLOC = 0xa,
+	GSI_CH_DB_STOP = 0xb,
+};
+
+enum gsi_evt_ch_cmd_opcode {
+	GSI_EVT_ALLOCATE = 0x0,
+	GSI_EVT_RESET = 0x9,
+	GSI_EVT_DE_ALLOC = 0xa,
+};
+
+enum gsi_generic_ee_cmd_opcode {
+	GSI_GEN_EE_CMD_HALT_CHANNEL = 0x1,
+	GSI_GEN_EE_CMD_ALLOC_CHANNEL = 0x2,
+};
+
+enum gsi_generic_ee_cmd_return_code {
+	GSI_GEN_EE_CMD_RETURN_CODE_SUCCESS = 0x1,
+	GSI_GEN_EE_CMD_RETURN_CODE_CHANNEL_NOT_RUNNING = 0x2,
+	GSI_GEN_EE_CMD_RETURN_CODE_INCORRECT_DIRECTION = 0x3,
+	GSI_GEN_EE_CMD_RETURN_CODE_INCORRECT_CHANNEL_TYPE = 0x4,
+	GSI_GEN_EE_CMD_RETURN_CODE_INCORRECT_CHANNEL_INDEX = 0x5,
+	GSI_GEN_EE_CMD_RETURN_CODE_RETRY = 0x6,
+	GSI_GEN_EE_CMD_RETURN_CODE_OUT_OF_RESOURCES = 0x7,
+};
+
+extern struct gsi_ctx *gsi_ctx;
+void gsi_debugfs_init(void);
+uint16_t gsi_find_idx_from_addr(struct gsi_ring_ctx *ctx, uint64_t addr);
+void gsi_update_ch_dp_stats(struct gsi_chan_ctx *ctx, uint16_t used);
+
+#endif

+ 744 - 0
gsi/gsi_dbg.c

@@ -0,0 +1,744 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2015-2019, The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/completion.h>
+#include <linux/debugfs.h>
+#include <linux/dma-mapping.h>
+#include <linux/random.h>
+#include <linux/uaccess.h>
+#include <linux/msm_gsi.h>
+#include "gsi_reg.h"
+#include "gsi.h"
+
+#define TERR(fmt, args...) \
+		pr_err("%s:%d " fmt, __func__, __LINE__, ## args)
+#define TDBG(fmt, args...) \
+		pr_debug("%s:%d " fmt, __func__, __LINE__, ## args)
+#define PRT_STAT(fmt, args...) \
+		pr_err(fmt, ## args)
+
+static struct dentry *dent;
+static char dbg_buff[4096];
+static void *gsi_ipc_logbuf_low;
+
+static void gsi_wq_print_dp_stats(struct work_struct *work);
+static DECLARE_DELAYED_WORK(gsi_print_dp_stats_work, gsi_wq_print_dp_stats);
+static void gsi_wq_update_dp_stats(struct work_struct *work);
+static DECLARE_DELAYED_WORK(gsi_update_dp_stats_work, gsi_wq_update_dp_stats);
+
+static ssize_t gsi_dump_evt(struct file *file,
+		const char __user *buf, size_t count, loff_t *ppos)
+{
+	u32 arg1;
+	u32 arg2;
+	unsigned long missing;
+	char *sptr, *token;
+	uint32_t val;
+	struct gsi_evt_ctx *ctx;
+	uint16_t i;
+
+	if (count >= sizeof(dbg_buff))
+		return -EINVAL;
+
+	missing = copy_from_user(dbg_buff, buf, count);
+	if (missing)
+		return -EFAULT;
+
+	dbg_buff[count] = '\0';
+
+	sptr = dbg_buff;
+
+	token = strsep(&sptr, " ");
+	if (!token)
+		return -EINVAL;
+	if (kstrtou32(token, 0, &arg1))
+		return -EINVAL;
+
+	token = strsep(&sptr, " ");
+	if (!token)
+		return -EINVAL;
+	if (kstrtou32(token, 0, &arg2))
+		return -EINVAL;
+
+	TDBG("arg1=%u arg2=%u\n", arg1, arg2);
+
+	if (arg1 >= gsi_ctx->max_ev) {
+		TERR("invalid evt ring id %u\n", arg1);
+		return -EINVAL;
+	}
+
+	val = gsi_readl(gsi_ctx->base +
+		GSI_EE_n_EV_CH_k_CNTXT_0_OFFS(arg1, gsi_ctx->per.ee));
+	TERR("EV%2d CTX0  0x%x\n", arg1, val);
+	val = gsi_readl(gsi_ctx->base +
+		GSI_EE_n_EV_CH_k_CNTXT_1_OFFS(arg1, gsi_ctx->per.ee));
+	TERR("EV%2d CTX1  0x%x\n", arg1, val);
+	val = gsi_readl(gsi_ctx->base +
+		GSI_EE_n_EV_CH_k_CNTXT_2_OFFS(arg1, gsi_ctx->per.ee));
+	TERR("EV%2d CTX2  0x%x\n", arg1, val);
+	val = gsi_readl(gsi_ctx->base +
+		GSI_EE_n_EV_CH_k_CNTXT_3_OFFS(arg1, gsi_ctx->per.ee));
+	TERR("EV%2d CTX3  0x%x\n", arg1, val);
+	val = gsi_readl(gsi_ctx->base +
+		GSI_EE_n_EV_CH_k_CNTXT_4_OFFS(arg1, gsi_ctx->per.ee));
+	TERR("EV%2d CTX4  0x%x\n", arg1, val);
+	val = gsi_readl(gsi_ctx->base +
+		GSI_EE_n_EV_CH_k_CNTXT_5_OFFS(arg1, gsi_ctx->per.ee));
+	TERR("EV%2d CTX5  0x%x\n", arg1, val);
+	val = gsi_readl(gsi_ctx->base +
+		GSI_EE_n_EV_CH_k_CNTXT_6_OFFS(arg1, gsi_ctx->per.ee));
+	TERR("EV%2d CTX6  0x%x\n", arg1, val);
+	val = gsi_readl(gsi_ctx->base +
+		GSI_EE_n_EV_CH_k_CNTXT_7_OFFS(arg1, gsi_ctx->per.ee));
+	TERR("EV%2d CTX7  0x%x\n", arg1, val);
+	val = gsi_readl(gsi_ctx->base +
+		GSI_EE_n_EV_CH_k_CNTXT_8_OFFS(arg1, gsi_ctx->per.ee));
+	TERR("EV%2d CTX8  0x%x\n", arg1, val);
+	val = gsi_readl(gsi_ctx->base +
+		GSI_EE_n_EV_CH_k_CNTXT_9_OFFS(arg1, gsi_ctx->per.ee));
+	TERR("EV%2d CTX9  0x%x\n", arg1, val);
+	val = gsi_readl(gsi_ctx->base +
+		GSI_EE_n_EV_CH_k_CNTXT_10_OFFS(arg1, gsi_ctx->per.ee));
+	TERR("EV%2d CTX10 0x%x\n", arg1, val);
+	val = gsi_readl(gsi_ctx->base +
+		GSI_EE_n_EV_CH_k_CNTXT_11_OFFS(arg1, gsi_ctx->per.ee));
+	TERR("EV%2d CTX11 0x%x\n", arg1, val);
+	val = gsi_readl(gsi_ctx->base +
+		GSI_EE_n_EV_CH_k_CNTXT_12_OFFS(arg1, gsi_ctx->per.ee));
+	TERR("EV%2d CTX12 0x%x\n", arg1, val);
+	val = gsi_readl(gsi_ctx->base +
+		GSI_EE_n_EV_CH_k_CNTXT_13_OFFS(arg1, gsi_ctx->per.ee));
+	TERR("EV%2d CTX13 0x%x\n", arg1, val);
+	val = gsi_readl(gsi_ctx->base +
+		GSI_EE_n_EV_CH_k_SCRATCH_0_OFFS(arg1, gsi_ctx->per.ee));
+	TERR("EV%2d SCR0  0x%x\n", arg1, val);
+	val = gsi_readl(gsi_ctx->base +
+		GSI_EE_n_EV_CH_k_SCRATCH_1_OFFS(arg1, gsi_ctx->per.ee));
+	TERR("EV%2d SCR1  0x%x\n", arg1, val);
+
+	if (arg2) {
+		ctx = &gsi_ctx->evtr[arg1];
+
+		if (ctx->props.ring_base_vaddr) {
+			for (i = 0; i < ctx->props.ring_len / 16; i++)
+				TERR("EV%2d (0x%08llx) %08x %08x %08x %08x\n",
+				arg1, ctx->props.ring_base_addr + i * 16,
+				*(u32 *)((u8 *)ctx->props.ring_base_vaddr +
+					i * 16 + 0),
+				*(u32 *)((u8 *)ctx->props.ring_base_vaddr +
+					i * 16 + 4),
+				*(u32 *)((u8 *)ctx->props.ring_base_vaddr +
+					i * 16 + 8),
+				*(u32 *)((u8 *)ctx->props.ring_base_vaddr +
+					i * 16 + 12));
+		} else {
+			TERR("No VA supplied for event ring id %u\n", arg1);
+		}
+	}
+
+	return count;
+}
+
+static ssize_t gsi_dump_ch(struct file *file,
+		const char __user *buf, size_t count, loff_t *ppos)
+{
+	u32 arg1;
+	u32 arg2;
+	unsigned long missing;
+	char *sptr, *token;
+	uint32_t val;
+	struct gsi_chan_ctx *ctx;
+	uint16_t i;
+
+	if (count >= sizeof(dbg_buff))
+		return -EINVAL;
+
+	missing = copy_from_user(dbg_buff, buf, count);
+	if (missing)
+		return -EFAULT;
+
+	dbg_buff[count] = '\0';
+
+	sptr = dbg_buff;
+
+	token = strsep(&sptr, " ");
+	if (!token)
+		return -EINVAL;
+	if (kstrtou32(token, 0, &arg1))
+		return -EINVAL;
+
+	token = strsep(&sptr, " ");
+	if (!token)
+		return -EINVAL;
+	if (kstrtou32(token, 0, &arg2))
+		return -EINVAL;
+
+	TDBG("arg1=%u arg2=%u\n", arg1, arg2);
+
+	if (arg1 >= gsi_ctx->max_ch) {
+		TERR("invalid chan id %u\n", arg1);
+		return -EINVAL;
+	}
+
+	val = gsi_readl(gsi_ctx->base +
+		GSI_EE_n_GSI_CH_k_CNTXT_0_OFFS(arg1, gsi_ctx->per.ee));
+	TERR("CH%2d CTX0  0x%x\n", arg1, val);
+	val = gsi_readl(gsi_ctx->base +
+		GSI_EE_n_GSI_CH_k_CNTXT_1_OFFS(arg1, gsi_ctx->per.ee));
+	TERR("CH%2d CTX1  0x%x\n", arg1, val);
+	val = gsi_readl(gsi_ctx->base +
+		GSI_EE_n_GSI_CH_k_CNTXT_2_OFFS(arg1, gsi_ctx->per.ee));
+	TERR("CH%2d CTX2  0x%x\n", arg1, val);
+	val = gsi_readl(gsi_ctx->base +
+		GSI_EE_n_GSI_CH_k_CNTXT_3_OFFS(arg1, gsi_ctx->per.ee));
+	TERR("CH%2d CTX3  0x%x\n", arg1, val);
+	val = gsi_readl(gsi_ctx->base +
+		GSI_EE_n_GSI_CH_k_CNTXT_4_OFFS(arg1, gsi_ctx->per.ee));
+	TERR("CH%2d CTX4  0x%x\n", arg1, val);
+	val = gsi_readl(gsi_ctx->base +
+		GSI_EE_n_GSI_CH_k_CNTXT_5_OFFS(arg1, gsi_ctx->per.ee));
+	TERR("CH%2d CTX5  0x%x\n", arg1, val);
+	val = gsi_readl(gsi_ctx->base +
+		GSI_EE_n_GSI_CH_k_CNTXT_6_OFFS(arg1, gsi_ctx->per.ee));
+	TERR("CH%2d CTX6  0x%x\n", arg1, val);
+	val = gsi_readl(gsi_ctx->base +
+		GSI_EE_n_GSI_CH_k_CNTXT_7_OFFS(arg1, gsi_ctx->per.ee));
+	TERR("CH%2d CTX7  0x%x\n", arg1, val);
+	val = gsi_readl(gsi_ctx->base +
+		GSI_EE_n_GSI_CH_k_RE_FETCH_READ_PTR_OFFS(arg1,
+			gsi_ctx->per.ee));
+	TERR("CH%2d REFRP 0x%x\n", arg1, val);
+	val = gsi_readl(gsi_ctx->base +
+		GSI_EE_n_GSI_CH_k_RE_FETCH_WRITE_PTR_OFFS(arg1,
+			gsi_ctx->per.ee));
+	TERR("CH%2d REFWP 0x%x\n", arg1, val);
+	if (gsi_ctx->per.ver >= GSI_VER_2_5) {
+		val = gsi_readl(gsi_ctx->base +
+			GSI_V2_5_EE_n_GSI_CH_k_QOS_OFFS(arg1, gsi_ctx->per.ee));
+	} else {
+		val = gsi_readl(gsi_ctx->base +
+			GSI_EE_n_GSI_CH_k_QOS_OFFS(arg1, gsi_ctx->per.ee));
+	}
+	TERR("CH%2d QOS   0x%x\n", arg1, val);
+	val = gsi_readl(gsi_ctx->base +
+		GSI_EE_n_GSI_CH_k_SCRATCH_0_OFFS(arg1, gsi_ctx->per.ee));
+	TERR("CH%2d SCR0  0x%x\n", arg1, val);
+	val = gsi_readl(gsi_ctx->base +
+		GSI_EE_n_GSI_CH_k_SCRATCH_1_OFFS(arg1, gsi_ctx->per.ee));
+	TERR("CH%2d SCR1  0x%x\n", arg1, val);
+	val = gsi_readl(gsi_ctx->base +
+		GSI_EE_n_GSI_CH_k_SCRATCH_2_OFFS(arg1, gsi_ctx->per.ee));
+	TERR("CH%2d SCR2  0x%x\n", arg1, val);
+	val = gsi_readl(gsi_ctx->base +
+		GSI_EE_n_GSI_CH_k_SCRATCH_3_OFFS(arg1, gsi_ctx->per.ee));
+	TERR("CH%2d SCR3  0x%x\n", arg1, val);
+
+	if (arg2) {
+		ctx = &gsi_ctx->chan[arg1];
+
+		if (ctx->props.ring_base_vaddr) {
+			for (i = 0; i < ctx->props.ring_len / 16; i++)
+				TERR("CH%2d (0x%08llx) %08x %08x %08x %08x\n",
+				arg1, ctx->props.ring_base_addr + i * 16,
+				*(u32 *)((u8 *)ctx->props.ring_base_vaddr +
+					i * 16 + 0),
+				*(u32 *)((u8 *)ctx->props.ring_base_vaddr +
+					i * 16 + 4),
+				*(u32 *)((u8 *)ctx->props.ring_base_vaddr +
+					i * 16 + 8),
+				*(u32 *)((u8 *)ctx->props.ring_base_vaddr +
+					i * 16 + 12));
+		} else {
+			TERR("No VA supplied for chan id %u\n", arg1);
+		}
+	}
+
+	return count;
+}
+
+static void gsi_dump_ch_stats(struct gsi_chan_ctx *ctx)
+{
+	if (!ctx->allocated)
+		return;
+
+	PRT_STAT("CH%2d:\n", ctx->props.ch_id);
+	PRT_STAT("queued=%lu compl=%lu\n",
+		ctx->stats.queued,
+		ctx->stats.completed);
+	PRT_STAT("cb->poll=%lu poll->cb=%lu poll_pend_irq=%lu\n",
+		ctx->stats.callback_to_poll,
+		ctx->stats.poll_to_callback,
+		ctx->stats.poll_pending_irq);
+	PRT_STAT("invalid_tre_error=%lu\n",
+		ctx->stats.invalid_tre_error);
+	PRT_STAT("poll_ok=%lu poll_empty=%lu\n",
+		ctx->stats.poll_ok, ctx->stats.poll_empty);
+	if (ctx->evtr)
+		PRT_STAT("compl_evt=%lu\n",
+			ctx->evtr->stats.completed);
+	PRT_STAT("userdata_in_use=%lu\n", ctx->stats.userdata_in_use);
+
+	PRT_STAT("ch_below_lo=%lu\n", ctx->stats.dp.ch_below_lo);
+	PRT_STAT("ch_below_hi=%lu\n", ctx->stats.dp.ch_below_hi);
+	PRT_STAT("ch_above_hi=%lu\n", ctx->stats.dp.ch_above_hi);
+	PRT_STAT("time_empty=%lums\n", ctx->stats.dp.empty_time);
+	PRT_STAT("\n");
+}
+
+static ssize_t gsi_dump_stats(struct file *file,
+		const char __user *buf, size_t count, loff_t *ppos)
+{
+	int ch_id;
+	int min, max, ret;
+
+	ret = kstrtos32_from_user(buf, count, 0, &ch_id);
+	if (ret)
+		return ret;
+
+	if (ch_id == -1) {
+		min = 0;
+		max = gsi_ctx->max_ch;
+	} else if (ch_id < 0 || ch_id >= gsi_ctx->max_ch ||
+		   !gsi_ctx->chan[ch_id].allocated) {
+		goto error;
+	} else {
+		min = ch_id;
+		max = ch_id + 1;
+	}
+
+	for (ch_id = min; ch_id < max; ch_id++)
+		gsi_dump_ch_stats(&gsi_ctx->chan[ch_id]);
+
+	return count;
+error:
+	TERR("Usage: echo ch_id > stats. Use -1 for all\n");
+	return -EINVAL;
+}
+
+static int gsi_dbg_create_stats_wq(void)
+{
+	gsi_ctx->dp_stat_wq =
+		create_singlethread_workqueue("gsi_stat");
+	if (!gsi_ctx->dp_stat_wq) {
+		TERR("failed create workqueue\n");
+		return -ENOMEM;
+	}
+
+	return 0;
+}
+
+static void gsi_dbg_destroy_stats_wq(void)
+{
+	cancel_delayed_work_sync(&gsi_update_dp_stats_work);
+	cancel_delayed_work_sync(&gsi_print_dp_stats_work);
+	flush_workqueue(gsi_ctx->dp_stat_wq);
+	destroy_workqueue(gsi_ctx->dp_stat_wq);
+	gsi_ctx->dp_stat_wq = NULL;
+}
+
+static ssize_t gsi_enable_dp_stats(struct file *file,
+	const char __user *buf, size_t count, loff_t *ppos)
+{
+	int ch_id;
+	bool enable;
+	int ret;
+
+	if (count >= sizeof(dbg_buff))
+		goto error;
+
+	if (copy_from_user(dbg_buff, buf, count))
+		goto error;
+
+	dbg_buff[count] = '\0';
+
+	if (dbg_buff[0] != '+' && dbg_buff[0] != '-')
+		goto error;
+
+	enable = (dbg_buff[0] == '+');
+
+	if (kstrtos32(dbg_buff + 1, 0, &ch_id))
+		goto error;
+
+	if (ch_id < 0 || ch_id >= gsi_ctx->max_ch ||
+	    !gsi_ctx->chan[ch_id].allocated) {
+		goto error;
+	}
+
+	if (gsi_ctx->chan[ch_id].enable_dp_stats == enable) {
+		TERR("ch_%d: already enabled/disabled\n", ch_id);
+		return -EINVAL;
+	}
+	gsi_ctx->chan[ch_id].enable_dp_stats = enable;
+
+	if (enable)
+		gsi_ctx->num_ch_dp_stats++;
+	else
+		gsi_ctx->num_ch_dp_stats--;
+
+	if (enable) {
+		if (gsi_ctx->num_ch_dp_stats == 1) {
+			ret = gsi_dbg_create_stats_wq();
+			if (ret)
+				return ret;
+		}
+		cancel_delayed_work_sync(&gsi_update_dp_stats_work);
+		queue_delayed_work(gsi_ctx->dp_stat_wq,
+			&gsi_update_dp_stats_work, msecs_to_jiffies(10));
+	} else if (!enable && gsi_ctx->num_ch_dp_stats == 0) {
+		gsi_dbg_destroy_stats_wq();
+	}
+
+	return count;
+error:
+	TERR("Usage: echo [+-]ch_id > enable_dp_stats\n");
+	return -EINVAL;
+}
+
+static ssize_t gsi_set_max_elem_dp_stats(struct file *file,
+		const char __user *buf, size_t count, loff_t *ppos)
+{
+	u32 ch_id;
+	u32 max_elem;
+	unsigned long missing;
+	char *sptr, *token;
+
+
+	if (count >= sizeof(dbg_buff))
+		goto error;
+
+	missing = copy_from_user(dbg_buff, buf, count);
+	if (missing)
+		goto error;
+
+	dbg_buff[count] = '\0';
+
+	sptr = dbg_buff;
+
+	token = strsep(&sptr, " ");
+	if (!token) {
+		TERR("\n");
+		goto error;
+	}
+
+	if (kstrtou32(token, 0, &ch_id)) {
+		TERR("\n");
+		goto error;
+	}
+
+	token = strsep(&sptr, " ");
+	if (!token) {
+		/* get */
+		if (kstrtou32(dbg_buff, 0, &ch_id))
+			goto error;
+		if (ch_id >= gsi_ctx->max_ch)
+			goto error;
+		PRT_STAT("ch %d: max_re_expected=%d\n", ch_id,
+			gsi_ctx->chan[ch_id].props.max_re_expected);
+		return count;
+	}
+	if (kstrtou32(token, 0, &max_elem)) {
+		TERR("\n");
+		goto error;
+	}
+
+	TDBG("ch_id=%u max_elem=%u\n", ch_id, max_elem);
+
+	if (ch_id >= gsi_ctx->max_ch) {
+		TERR("invalid chan id %u\n", ch_id);
+		goto error;
+	}
+
+	gsi_ctx->chan[ch_id].props.max_re_expected = max_elem;
+
+	return count;
+
+error:
+	TERR("Usage: (set) echo <ch_id> <max_elem> > max_elem_dp_stats\n");
+	TERR("Usage: (get) echo <ch_id> > max_elem_dp_stats\n");
+	return -EINVAL;
+}
+
+static void gsi_wq_print_dp_stats(struct work_struct *work)
+{
+	int ch_id;
+
+	for (ch_id = 0; ch_id < gsi_ctx->max_ch; ch_id++) {
+		if (gsi_ctx->chan[ch_id].print_dp_stats)
+			gsi_dump_ch_stats(&gsi_ctx->chan[ch_id]);
+	}
+
+	queue_delayed_work(gsi_ctx->dp_stat_wq, &gsi_print_dp_stats_work,
+		msecs_to_jiffies(1000));
+}
+
+static void gsi_dbg_update_ch_dp_stats(struct gsi_chan_ctx *ctx)
+{
+	uint16_t start_hw;
+	uint16_t end_hw;
+	uint64_t rp_hw;
+	uint64_t wp_hw;
+	int ee = gsi_ctx->per.ee;
+	uint16_t used_hw;
+
+	rp_hw = gsi_readl(gsi_ctx->base +
+		GSI_EE_n_GSI_CH_k_CNTXT_4_OFFS(ctx->props.ch_id, ee));
+	rp_hw |= ((uint64_t)gsi_readl(gsi_ctx->base +
+		GSI_EE_n_GSI_CH_k_CNTXT_5_OFFS(ctx->props.ch_id, ee)))
+		<< 32;
+
+	wp_hw = gsi_readl(gsi_ctx->base +
+		GSI_EE_n_GSI_CH_k_CNTXT_6_OFFS(ctx->props.ch_id, ee));
+	wp_hw |= ((uint64_t)gsi_readl(gsi_ctx->base +
+		GSI_EE_n_GSI_CH_k_CNTXT_7_OFFS(ctx->props.ch_id, ee)))
+		<< 32;
+
+	start_hw = gsi_find_idx_from_addr(&ctx->ring, rp_hw);
+	end_hw = gsi_find_idx_from_addr(&ctx->ring, wp_hw);
+
+	if (end_hw >= start_hw)
+		used_hw = end_hw - start_hw;
+	else
+		used_hw = ctx->ring.max_num_elem + 1 - (start_hw - end_hw);
+
+	TDBG("ch %d used %d\n", ctx->props.ch_id, used_hw);
+	gsi_update_ch_dp_stats(ctx, used_hw);
+}
+
+static void gsi_wq_update_dp_stats(struct work_struct *work)
+{
+	int ch_id;
+
+	for (ch_id = 0; ch_id < gsi_ctx->max_ch; ch_id++) {
+		if (gsi_ctx->chan[ch_id].allocated &&
+		    gsi_ctx->chan[ch_id].enable_dp_stats)
+			gsi_dbg_update_ch_dp_stats(&gsi_ctx->chan[ch_id]);
+	}
+
+	queue_delayed_work(gsi_ctx->dp_stat_wq, &gsi_update_dp_stats_work,
+		msecs_to_jiffies(10));
+}
+
+
+static ssize_t gsi_rst_stats(struct file *file,
+		const char __user *buf, size_t count, loff_t *ppos)
+{
+	int ch_id;
+	int min, max, ret;
+
+	ret = kstrtos32_from_user(buf, count, 0, &ch_id);
+	if (ret)
+		return ret;
+
+	if (ch_id == -1) {
+		min = 0;
+		max = gsi_ctx->max_ch;
+	} else if (ch_id < 0 || ch_id >= gsi_ctx->max_ch ||
+		   !gsi_ctx->chan[ch_id].allocated) {
+		goto error;
+	} else {
+		min = ch_id;
+		max = ch_id + 1;
+	}
+
+	for (ch_id = min; ch_id < max; ch_id++)
+		memset(&gsi_ctx->chan[ch_id].stats, 0,
+			sizeof(gsi_ctx->chan[ch_id].stats));
+
+	return count;
+error:
+	TERR("Usage: echo ch_id > rst_stats. Use -1 for all\n");
+	return -EINVAL;
+}
+
+static ssize_t gsi_print_dp_stats(struct file *file,
+	const char __user *buf, size_t count, loff_t *ppos)
+{
+	int ch_id;
+	bool enable;
+	int ret;
+
+	if (count >= sizeof(dbg_buff))
+		goto error;
+
+	if (copy_from_user(dbg_buff, buf, count))
+		goto error;
+
+	dbg_buff[count] = '\0';
+
+	if (dbg_buff[0] != '+' && dbg_buff[0] != '-')
+		goto error;
+
+	enable = (dbg_buff[0] == '+');
+
+	if (kstrtos32(dbg_buff + 1, 0, &ch_id))
+		goto error;
+
+	if (ch_id < 0 || ch_id >= gsi_ctx->max_ch ||
+	    !gsi_ctx->chan[ch_id].allocated) {
+		goto error;
+	}
+
+	if (gsi_ctx->chan[ch_id].print_dp_stats == enable) {
+		TERR("ch_%d: already enabled/disabled\n", ch_id);
+		return -EINVAL;
+	}
+	gsi_ctx->chan[ch_id].print_dp_stats = enable;
+
+	if (enable)
+		gsi_ctx->num_ch_dp_stats++;
+	else
+		gsi_ctx->num_ch_dp_stats--;
+
+	if (enable) {
+		if (gsi_ctx->num_ch_dp_stats == 1) {
+			ret = gsi_dbg_create_stats_wq();
+			if (ret)
+				return ret;
+		}
+		cancel_delayed_work_sync(&gsi_print_dp_stats_work);
+		queue_delayed_work(gsi_ctx->dp_stat_wq,
+			&gsi_print_dp_stats_work, msecs_to_jiffies(10));
+	} else if (!enable && gsi_ctx->num_ch_dp_stats == 0) {
+		gsi_dbg_destroy_stats_wq();
+	}
+
+	return count;
+error:
+	TERR("Usage: echo [+-]ch_id > print_dp_stats\n");
+	return -EINVAL;
+}
+
+static ssize_t gsi_enable_ipc_low(struct file *file,
+	const char __user *ubuf, size_t count, loff_t *ppos)
+{
+	s8 option = 0;
+	int ret;
+
+	ret = kstrtos8_from_user(ubuf, count, 0, &option);
+	if (ret)
+		return ret;
+
+	mutex_lock(&gsi_ctx->mlock);
+	if (option) {
+		if (!gsi_ipc_logbuf_low) {
+			gsi_ipc_logbuf_low =
+				ipc_log_context_create(GSI_IPC_LOG_PAGES,
+					"gsi_low", 0);
+			if (gsi_ipc_logbuf_low == NULL)
+				TERR("failed to get ipc_logbuf_low\n");
+		}
+		gsi_ctx->ipc_logbuf_low = gsi_ipc_logbuf_low;
+	} else {
+		gsi_ctx->ipc_logbuf_low = NULL;
+	}
+	mutex_unlock(&gsi_ctx->mlock);
+
+	return count;
+}
+
+static const struct file_operations gsi_ev_dump_ops = {
+	.write = gsi_dump_evt,
+};
+
+static const struct file_operations gsi_ch_dump_ops = {
+	.write = gsi_dump_ch,
+};
+
+static const struct file_operations gsi_stats_ops = {
+	.write = gsi_dump_stats,
+};
+
+static const struct file_operations gsi_enable_dp_stats_ops = {
+	.write = gsi_enable_dp_stats,
+};
+
+static const struct file_operations gsi_max_elem_dp_stats_ops = {
+	.write = gsi_set_max_elem_dp_stats,
+};
+
+static const struct file_operations gsi_rst_stats_ops = {
+	.write = gsi_rst_stats,
+};
+
+static const struct file_operations gsi_print_dp_stats_ops = {
+	.write = gsi_print_dp_stats,
+};
+
+static const struct file_operations gsi_ipc_low_ops = {
+	.write = gsi_enable_ipc_low,
+};
+
+void gsi_debugfs_init(void)
+{
+	static struct dentry *dfile;
+	const mode_t write_only_mode = 0220;
+
+	dent = debugfs_create_dir("gsi", 0);
+	if (IS_ERR(dent)) {
+		TERR("fail to create dir\n");
+		return;
+	}
+
+	dfile = debugfs_create_file("ev_dump", write_only_mode,
+			dent, 0, &gsi_ev_dump_ops);
+	if (!dfile || IS_ERR(dfile)) {
+		TERR("fail to create ev_dump file\n");
+		goto fail;
+	}
+
+	dfile = debugfs_create_file("ch_dump", write_only_mode,
+			dent, 0, &gsi_ch_dump_ops);
+	if (!dfile || IS_ERR(dfile)) {
+		TERR("fail to create ch_dump file\n");
+		goto fail;
+	}
+
+	dfile = debugfs_create_file("stats", write_only_mode, dent,
+			0, &gsi_stats_ops);
+	if (!dfile || IS_ERR(dfile)) {
+		TERR("fail to create stats file\n");
+		goto fail;
+	}
+
+	dfile = debugfs_create_file("enable_dp_stats", write_only_mode, dent,
+			0, &gsi_enable_dp_stats_ops);
+	if (!dfile || IS_ERR(dfile)) {
+		TERR("fail to create stats file\n");
+		goto fail;
+	}
+
+	dfile = debugfs_create_file("max_elem_dp_stats", write_only_mode,
+		dent, 0, &gsi_max_elem_dp_stats_ops);
+	if (!dfile || IS_ERR(dfile)) {
+		TERR("fail to create stats file\n");
+		goto fail;
+	}
+
+	dfile = debugfs_create_file("rst_stats", write_only_mode,
+		dent, 0, &gsi_rst_stats_ops);
+	if (!dfile || IS_ERR(dfile)) {
+		TERR("fail to create stats file\n");
+		goto fail;
+	}
+
+	dfile = debugfs_create_file("print_dp_stats",
+		write_only_mode, dent, 0, &gsi_print_dp_stats_ops);
+	if (!dfile || IS_ERR(dfile)) {
+		TERR("fail to create stats file\n");
+		goto fail;
+	}
+
+	dfile = debugfs_create_file("ipc_low", write_only_mode,
+		dent, 0, &gsi_ipc_low_ops);
+	if (!dfile || IS_ERR(dfile)) {
+		TERR("could not create ipc_low\n");
+		goto fail;
+	}
+
+	return;
+fail:
+	debugfs_remove_recursive(dent);
+}
+

+ 227 - 0
gsi/gsi_emulation.c

@@ -0,0 +1,227 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.
+ */
+
+#include "gsi_emulation.h"
+
+/*
+ * *****************************************************************************
+ * The following used to set up the EMULATION interrupt controller...
+ * *****************************************************************************
+ */
+int setup_emulator_cntrlr(
+	void __iomem *intcntrlr_base,
+	u32           intcntrlr_mem_size)
+{
+	uint32_t val, ver, intrCnt, rangeCnt, range;
+
+	val = gsi_emu_readl(intcntrlr_base + GE_INT_CTL_VER_CNT);
+
+	intrCnt  = val & 0xFFFF;
+	ver      = (val >> 16) & 0xFFFF;
+	rangeCnt = intrCnt / 32;
+
+	GSIDBG(
+	    "CTL_VER_CNT reg val(0x%x) intr cnt(%u) cntrlr ver(0x%x) rangeCnt(%u)\n",
+	    val, intrCnt, ver, rangeCnt);
+
+	/*
+	 * Verify the interrupt controller version
+	 */
+	if (ver == 0 || ver == 0xFFFF || ver < DEO_IC_INT_CTL_VER_MIN) {
+		GSIERR(
+		  "Error: invalid interrupt controller version 0x%x\n",
+		  ver);
+		return -GSI_STATUS_INVALID_PARAMS;
+	}
+
+	/*
+	 * Verify the interrupt count
+	 *
+	 * NOTE: intrCnt must be at least one block and multiple of 32
+	 */
+	if ((intrCnt % 32) != 0) {
+		GSIERR(
+		  "Invalid interrupt count read from HW 0x%04x\n",
+		  intrCnt);
+		return -GSI_STATUS_ERROR;
+	}
+
+	/*
+	 * Calculate number of ranges used, each range handles 32 int lines
+	 */
+	if (rangeCnt > DEO_IC_MAX_RANGE_CNT) {
+		GSIERR(
+		  "SW interrupt limit(%u) passed, increase DEO_IC_MAX_RANGE_CNT(%u)\n",
+		  rangeCnt,
+		  DEO_IC_MAX_RANGE_CNT);
+		return -GSI_STATUS_ERROR;
+	}
+
+	/*
+	 * Let's take the last register offset minus the first
+	 * register offset (ie. range) and compare it to the interrupt
+	 * controller's dtsi defined memory size.  The range better
+	 * fit within the size.
+	 */
+	val = GE_SOFT_INT_n(rangeCnt-1) - GE_INT_CTL_VER_CNT;
+	if (val > intcntrlr_mem_size) {
+		GSIERR(
+		    "Interrupt controller register range (%u) exceeds dtsi provisioned size (%u)\n",
+		    val, intcntrlr_mem_size);
+		return -GSI_STATUS_ERROR;
+	}
+
+	/*
+	 * The following will disable the emulators interrupt controller,
+	 * so that we can config it...
+	 */
+	GSIDBG("Writing GE_INT_MASTER_ENABLE\n");
+	gsi_emu_writel(
+		0x0,
+		intcntrlr_base + GE_INT_MASTER_ENABLE);
+
+	/*
+	 * Init register maps of all ranges
+	 */
+	for (range = 0; range < rangeCnt; range++) {
+		/*
+		 * Disable all int sources by setting all enable clear bits
+		 */
+		GSIDBG("Writing GE_INT_ENABLE_CLEAR_n(%u)\n", range);
+		gsi_emu_writel(
+		    0xFFFFFFFF,
+		    intcntrlr_base + GE_INT_ENABLE_CLEAR_n(range));
+
+		/*
+		 * Clear all raw statuses
+		 */
+		GSIDBG("Writing GE_INT_CLEAR_n(%u)\n", range);
+		gsi_emu_writel(
+		    0xFFFFFFFF,
+		    intcntrlr_base + GE_INT_CLEAR_n(range));
+
+		/*
+		 * Init all int types
+		 */
+		GSIDBG("Writing GE_INT_TYPE_n(%u)\n", range);
+		gsi_emu_writel(
+		    0x0,
+		    intcntrlr_base + GE_INT_TYPE_n(range));
+	}
+
+	/*
+	 * The following tells the interrupt controller to interrupt us
+	 * when it sees interrupts from ipa and/or gsi.
+	 *
+	 * Interrupts:
+	 * ===================================================================
+	 * DUT0                       [  63 :   16 ]
+	 * ipa_irq                                        [ 3 : 0 ] <---HERE
+	 * ipa_gsi_bam_irq                                [ 7 : 4 ] <---HERE
+	 * ipa_bam_apu_sec_error_irq                      [ 8 ]
+	 * ipa_bam_apu_non_sec_error_irq                  [ 9 ]
+	 * ipa_bam_xpu2_msa_intr                          [ 10 ]
+	 * ipa_vmidmt_nsgcfgirpt                          [ 11 ]
+	 * ipa_vmidmt_nsgirpt                             [ 12 ]
+	 * ipa_vmidmt_gcfgirpt                            [ 13 ]
+	 * ipa_vmidmt_girpt                               [ 14 ]
+	 * bam_xpu3_qad_non_secure_intr_sp                [ 15 ]
+	 */
+	GSIDBG("Writing GE_INT_ENABLE_n(0)\n");
+	gsi_emu_writel(
+	    0x00FF, /* See <---HERE above */
+	    intcntrlr_base + GE_INT_ENABLE_n(0));
+
+	/*
+	 * The following will enable the IC post config...
+	 */
+	GSIDBG("Writing GE_INT_MASTER_ENABLE\n");
+	gsi_emu_writel(
+	    0x1,
+	    intcntrlr_base + GE_INT_MASTER_ENABLE);
+
+	return 0;
+}
+
+/*
+ * *****************************************************************************
+ * The following for EMULATION hard irq...
+ * *****************************************************************************
+ */
+irqreturn_t emulator_hard_irq_isr(
+	int   irq,
+	void *ctxt)
+{
+	struct gsi_ctx *gsi_ctx_ptr = (struct gsi_ctx *) ctxt;
+
+	uint32_t val;
+
+	val = gsi_emu_readl(gsi_ctx_ptr->intcntrlr_base + GE_INT_MASTER_STATUS);
+
+	/*
+	 * If bit zero is set, interrupt is for us, hence return IRQ_NONE
+	 * when it's not set...
+	 */
+	if (!(val & 0x00000001))
+		return IRQ_NONE;
+
+	/*
+	 * The following will mask (ie. turn off) future interrupts from
+	 * the emulator's interrupt controller. It wil stay this way until
+	 * we turn back on...which will be done in the bottom half
+	 * (ie. emulator_soft_irq_isr)...
+	 */
+	gsi_emu_writel(
+		0x0,
+		gsi_ctx_ptr->intcntrlr_base + GE_INT_OUT_ENABLE);
+
+	return IRQ_WAKE_THREAD;
+}
+
+/*
+ * *****************************************************************************
+ * The following for EMULATION soft irq...
+ * *****************************************************************************
+ */
+irqreturn_t emulator_soft_irq_isr(
+	int   irq,
+	void *ctxt)
+{
+	struct gsi_ctx *gsi_ctx_ptr = (struct gsi_ctx *) ctxt;
+
+	irqreturn_t retVal = IRQ_HANDLED;
+	uint32_t	val;
+
+	val = gsi_emu_readl(gsi_ctx_ptr->intcntrlr_base + GE_IRQ_STATUS_n(0));
+
+	GSIDBG("Got irq(%d) with status(0x%08X)\n", irq, val);
+
+	if (val & 0xF0 && gsi_ctx_ptr->intcntrlr_gsi_isr) {
+		GSIDBG("Got gsi interrupt\n");
+		retVal = gsi_ctx_ptr->intcntrlr_gsi_isr(irq, ctxt);
+	}
+
+	if (val & 0x0F && gsi_ctx_ptr->intcntrlr_client_isr) {
+		GSIDBG("Got ipa interrupt\n");
+		retVal = gsi_ctx_ptr->intcntrlr_client_isr(irq, 0);
+	}
+
+	/*
+	 * The following will clear the interrupts...
+	 */
+	gsi_emu_writel(
+		0xFFFFFFFF,
+		gsi_ctx_ptr->intcntrlr_base + GE_INT_CLEAR_n(0));
+
+	/*
+	 * The following will unmask (ie. turn on) future interrupts from
+	 * the emulator's interrupt controller...
+	 */
+	gsi_emu_writel(
+		0x1,
+		gsi_ctx_ptr->intcntrlr_base + GE_INT_OUT_ENABLE);
+
+	return retVal;
+}

+ 189 - 0
gsi/gsi_emulation.h

@@ -0,0 +1,189 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.
+ */
+
+#if !defined(_GSI_EMULATION_H_)
+# define _GSI_EMULATION_H_
+
+# include <linux/interrupt.h>
+
+# include "gsi.h"
+# include "gsi_reg.h"
+
+#if defined(CONFIG_IPA_EMULATION)
+# include "gsi_emulation_stubs.h"
+#endif
+
+# define gsi_emu_readl(c)     (readl_relaxed(c))
+# define gsi_emu_writel(v, c) ({ __iowmb(); writel_relaxed((v), (c)); })
+
+# define CNTRLR_BASE 0
+
+/*
+ * The following file contains definitions and declarations that are
+ * germane only to the IPA emulation system, which is run from an X86
+ * environment.  Declaration's for non-X86 (ie. arm) are merely stubs
+ * to facilitate compile and link.
+ *
+ * Interrupt controller registers.
+ * Descriptions taken from the EMULATION interrupt controller SWI.
+ * - There is only one Master Enable register
+ * - Each group of 32 interrupt lines (range) is controlled by 8 registers,
+ *   which are consecutive in memory:
+ *      GE_INT_ENABLE_n
+ *      GE_INT_ENABLE_CLEAR_n
+ *      GE_INT_ENABLE_SET_n
+ *      GE_INT_TYPE_n
+ *      GE_IRQ_STATUS_n
+ *      GE_RAW_STATUS_n
+ *      GE_INT_CLEAR_n
+ *      GE_SOFT_INT_n
+ * - After the above 8 registers, there are the registers of the next
+ *   group (range) of 32 interrupt lines, and so on.
+ */
+
+/** @brief The interrupt controller version and interrupt count register.
+ *         Specifies interrupt controller version (upper 16 bits) and the
+ *         number of interrupt lines supported by HW (lower 16 bits).
+ */
+# define GE_INT_CTL_VER_CNT              \
+	(CNTRLR_BASE + 0x0000)
+
+/** @brief Enable or disable physical IRQ output signal to the system,
+ *         not affecting any status registers.
+ *
+ *         0x0 : DISABLE IRQ output disabled
+ *         0x1 : ENABLE  IRQ output enabled
+ */
+# define GE_INT_OUT_ENABLE               \
+	(CNTRLR_BASE + 0x0004)
+
+/** @brief The IRQ master enable register.
+ *         Bit #0: IRQ_ENABLE, set 0 to disable, 1 to enable.
+ */
+# define GE_INT_MASTER_ENABLE            \
+	(CNTRLR_BASE + 0x0008)
+
+# define GE_INT_MASTER_STATUS            \
+	(CNTRLR_BASE + 0x000C)
+
+/** @brief Each bit disables (bit=0, default) or enables (bit=1) the
+ *         corresponding interrupt source
+ */
+# define GE_INT_ENABLE_n(n)              \
+	(CNTRLR_BASE + 0x0010 + 0x20 * (n))
+
+/** @brief Write bit=1 to clear (to 0) the corresponding bit(s) in INT_ENABLE.
+ *         Does nothing for bit=0
+ */
+# define GE_INT_ENABLE_CLEAR_n(n)        \
+	(CNTRLR_BASE + 0x0014 + 0x20 * (n))
+
+/** @brief Write bit=1 to set (to 1) the corresponding bit(s) in INT_ENABLE.
+ *         Does nothing for bit=0
+ */
+# define GE_INT_ENABLE_SET_n(n)          \
+	(CNTRLR_BASE + 0x0018 + 0x20 * (n))
+
+/** @brief Select level (bit=0, default) or edge (bit=1) sensitive input
+ *         detection logic for each corresponding interrupt source
+ */
+# define GE_INT_TYPE_n(n)                \
+	(CNTRLR_BASE + 0x001C + 0x20 * (n))
+
+/** @brief Shows the interrupt sources captured in RAW_STATUS that have been
+ *         steered to irq_n by INT_SELECT. Interrupts must also be enabled by
+ *         INT_ENABLE and MASTER_ENABLE. Read only register.
+ *         Bit values: 1=active, 0=inactive
+ */
+# define GE_IRQ_STATUS_n(n)                      \
+	(CNTRLR_BASE + 0x0020 + 0x20 * (n))
+
+/** @brief Shows the interrupt sources that have been latched by the input
+ *         logic of the Interrupt Controller. Read only register.
+ *         Bit values: 1=active, 0=inactive
+ */
+# define GE_RAW_STATUS_n(n)                      \
+	(CNTRLR_BASE + 0x0024 + 0x20 * (n))
+
+/** @brief Write bit=1 to clear the corresponding bit(s) in RAW_STATUS.
+ *         Does nothing for bit=0
+ */
+# define GE_INT_CLEAR_n(n)               \
+	(CNTRLR_BASE + 0x0028 + 0x20 * (n))
+
+/** @brief Write bit=1 to set the corresponding bit(s) in RAW_STATUS.
+ *         Does nothing for bit=0.
+ *  @note  Only functional for edge detected interrupts
+ */
+# define GE_SOFT_INT_n(n)                        \
+	(CNTRLR_BASE + 0x002C + 0x20 * (n))
+
+/** @brief Maximal number of ranges in SW. Each range supports 32 interrupt
+ *         lines. If HW is extended considerably, increase this value
+ */
+# define DEO_IC_MAX_RANGE_CNT            8
+
+/** @brief Size of the registers of one range in memory, in bytes */
+# define DEO_IC_RANGE_MEM_SIZE           32  /* SWI: 8 registers, no gaps */
+
+/** @brief Minimal Interrupt controller HW version */
+# define DEO_IC_INT_CTL_VER_MIN          0x0102
+
+
+#if defined(CONFIG_IPA_EMULATION) /* declarations to follow */
+
+/*
+ * *****************************************************************************
+ * The following used to set up the EMULATION interrupt controller...
+ * *****************************************************************************
+ */
+int setup_emulator_cntrlr(
+	void __iomem *intcntrlr_base,
+	u32           intcntrlr_mem_size);
+
+/*
+ * *****************************************************************************
+ * The following for EMULATION hard irq...
+ * *****************************************************************************
+ */
+irqreturn_t emulator_hard_irq_isr(
+	int   irq,
+	void *ctxt);
+
+/*
+ * *****************************************************************************
+ * The following for EMULATION soft irq...
+ * *****************************************************************************
+ */
+irqreturn_t emulator_soft_irq_isr(
+	int   irq,
+	void *ctxt);
+
+# else /* #if !defined(CONFIG_IPA_EMULATION) then definitions to follow */
+
+static inline int setup_emulator_cntrlr(
+	void __iomem *intcntrlr_base,
+	u32           intcntrlr_mem_size)
+{
+	return 0;
+}
+
+static inline irqreturn_t emulator_hard_irq_isr(
+	int   irq,
+	void *ctxt)
+{
+	return IRQ_NONE;
+}
+
+static inline irqreturn_t emulator_soft_irq_isr(
+	int   irq,
+	void *ctxt)
+{
+	return IRQ_HANDLED;
+}
+
+# endif /* #if defined(CONFIG_IPA_EMULATION) */
+
+#endif /* #if !defined(_GSI_EMULATION_H_) */

+ 12 - 0
gsi/gsi_emulation_stubs.h

@@ -0,0 +1,12 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.
+ */
+
+#if !defined(_GSI_EMULATION_STUBS_H_)
+# define _GSI_EMULATION_STUBS_H_
+
+# include <asm/barrier.h>
+# define __iowmb()       wmb() /* used in gsi.h */
+
+#endif /* #if !defined(_GSI_EMULATION_STUBS_H_) */

+ 30 - 0
gsi/gsi_reg.h

@@ -0,0 +1,30 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2015-2019, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef __GSI_REG_H__
+#define __GSI_REG_H__
+
+enum gsi_register_ver {
+	GSI_REGISTER_VER_1 = 0,
+	GSI_REGISTER_VER_2 = 1,
+	GSI_REGISTER_MAX,
+};
+
+#ifdef GSI_REGISTER_VER_CURRENT
+#error GSI_REGISTER_VER_CURRENT already defined
+#endif
+
+#ifdef CONFIG_GSI_REGISTER_VERSION_2
+#include "gsi_reg_v2.h"
+#define GSI_REGISTER_VER_CURRENT GSI_REGISTER_VER_2
+#endif
+
+/* The default is V1 */
+#ifndef GSI_REGISTER_VER_CURRENT
+#include "gsi_reg_v1.h"
+#define GSI_REGISTER_VER_CURRENT GSI_REGISTER_VER_1
+#endif
+
+#endif /* __GSI_REG_H__ */

+ 1098 - 0
gsi/gsi_reg_v1.h

@@ -0,0 +1,1098 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2015-2019, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef __GSI_REG_V1_H__
+#define __GSI_REG_V1_H__
+
+#define GSI_GSI_REG_BASE_OFFS 0
+
+#define GSI_GSI_CFG_OFFS \
+	(GSI_GSI_REG_BASE_OFFS + 0x00000000)
+#define GSI_V2_5_GSI_CFG_SLEEP_CLK_DIV_BMSK 0xf00
+#define GSI_V2_5_GSI_CFG_SLEEP_CLK_DIV_SHFT 0x8
+#define GSI_GSI_CFG_BP_MTRIX_DISABLE_BMSK 0x20
+#define GSI_GSI_CFG_BP_MTRIX_DISABLE_SHFT 0x5
+#define GSI_GSI_CFG_GSI_PWR_CLPS_BMSK 0x10
+#define GSI_GSI_CFG_GSI_PWR_CLPS_SHFT 0x4
+#define GSI_GSI_CFG_UC_IS_MCS_BMSK 0x8
+#define GSI_GSI_CFG_UC_IS_MCS_SHFT 0x3
+#define GSI_GSI_CFG_DOUBLE_MCS_CLK_FREQ_BMSK 0x4
+#define GSI_GSI_CFG_DOUBLE_MCS_CLK_FREQ_SHFT 0x2
+#define GSI_GSI_CFG_MCS_ENABLE_BMSK 0x2
+#define GSI_GSI_CFG_MCS_ENABLE_SHFT 0x1
+#define GSI_GSI_CFG_GSI_ENABLE_BMSK 0x1
+#define GSI_GSI_CFG_GSI_ENABLE_SHFT 0x0
+
+#define GSI_GSI_MCS_CFG_OFFS \
+	(GSI_GSI_REG_BASE_OFFS + 0x0000B000)
+#define GSI_GSI_MCS_CFG_MCS_ENABLE_BMSK 0x1
+#define GSI_GSI_MCS_CFG_MCS_ENABLE_SHFT 0x0
+
+#define GSI_GSI_PERIPH_BASE_ADDR_LSB_OFFS \
+	(GSI_GSI_REG_BASE_OFFS + 0x00000018)
+#define GSI_GSI_PERIPH_BASE_ADDR_LSB_RMSK 0xffffffff
+#define GSI_GSI_PERIPH_BASE_ADDR_LSB_BASE_ADDR_BMSK 0xffffffff
+#define GSI_GSI_PERIPH_BASE_ADDR_LSB_BASE_ADDR_SHFT 0x0
+
+#define GSI_GSI_PERIPH_BASE_ADDR_MSB_OFFS \
+	(GSI_GSI_REG_BASE_OFFS + 0x0000001c)
+#define GSI_GSI_PERIPH_BASE_ADDR_MSB_RMSK 0xffffffff
+#define GSI_GSI_PERIPH_BASE_ADDR_MSB_BASE_ADDR_BMSK 0xffffffff
+#define GSI_GSI_PERIPH_BASE_ADDR_MSB_BASE_ADDR_SHFT 0x0
+
+#define GSI_IC_DISABLE_CHNL_BCK_PRS_LSB_OFFS \
+	(GSI_GSI_REG_BASE_OFFS + 0x000000a0)
+#define GSI_IC_DISABLE_CHNL_BCK_PRS_LSB_RMSK 0x3ffc1047
+#define GSI_IC_DISABLE_CHNL_BCK_PRS_LSB_TLV_INT_BMSK 0x3f000000
+#define GSI_IC_DISABLE_CHNL_BCK_PRS_LSB_TLV_INT_SHFT 0x18
+#define GSI_IC_DISABLE_CHNL_BCK_PRS_LSB_CSR_INT_BMSK 0xfc0000
+#define GSI_IC_DISABLE_CHNL_BCK_PRS_LSB_CSR_INT_SHFT 0x12
+#define GSI_IC_DISABLE_CHNL_BCK_PRS_LSB_INT_END_INT_BMSK 0x1000
+#define GSI_IC_DISABLE_CHNL_BCK_PRS_LSB_INT_END_INT_SHFT 0xc
+#define GSI_IC_DISABLE_CHNL_BCK_PRS_LSB_EV_ENG_INT_BMSK 0x40
+#define GSI_IC_DISABLE_CHNL_BCK_PRS_LSB_EV_ENG_INT_SHFT 0x6
+#define GSI_IC_DISABLE_CHNL_BCK_PRS_LSB_REE_INT_BMSK 0x7
+#define GSI_IC_DISABLE_CHNL_BCK_PRS_LSB_REE_INT_SHFT 0x0
+
+#define GSI_IC_DISABLE_CHNL_BCK_PRS_MSB_OFFS \
+	(GSI_GSI_REG_BASE_OFFS + 0x000000a4)
+#define GSI_IC_DISABLE_CHNL_BCK_PRS_MSB_RMSK 0xfc3041
+#define GSI_IC_DISABLE_CHNL_BCK_PRS_MSB_UCONTROLLER_INT_BMSK 0xfc0000
+#define GSI_IC_DISABLE_CHNL_BCK_PRS_MSB_UCONTROLLER_INT_SHFT 0x12
+#define GSI_IC_DISABLE_CHNL_BCK_PRS_MSB_RD_WR_INT_BMSK 0x3000
+#define GSI_IC_DISABLE_CHNL_BCK_PRS_MSB_RD_WR_INT_SHFT 0xc
+#define GSI_IC_DISABLE_CHNL_BCK_PRS_MSB_DB_ENG_INT_BMSK 0x40
+#define GSI_IC_DISABLE_CHNL_BCK_PRS_MSB_DB_ENG_INT_SHFT 0x6
+#define GSI_IC_DISABLE_CHNL_BCK_PRS_MSB_TIMER_INT_BMSK 0x1
+#define GSI_IC_DISABLE_CHNL_BCK_PRS_MSB_TIMER_INT_SHFT 0x0
+
+#define GSI_IC_GEN_EVNT_BCK_PRS_LSB_OFFS \
+	(GSI_GSI_REG_BASE_OFFS + 0x000000a8)
+#define GSI_IC_GEN_EVNT_BCK_PRS_LSB_RMSK 0x3ffc1047
+#define GSI_IC_GEN_EVNT_BCK_PRS_LSB_TLV_INT_BMSK 0x3f000000
+#define GSI_IC_GEN_EVNT_BCK_PRS_LSB_TLV_INT_SHFT 0x18
+#define GSI_IC_GEN_EVNT_BCK_PRS_LSB_CSR_INT_BMSK 0xfc0000
+#define GSI_IC_GEN_EVNT_BCK_PRS_LSB_CSR_INT_SHFT 0x12
+#define GSI_IC_GEN_EVNT_BCK_PRS_LSB_INT_END_INT_BMSK 0x1000
+#define GSI_IC_GEN_EVNT_BCK_PRS_LSB_INT_END_INT_SHFT 0xc
+#define GSI_IC_GEN_EVNT_BCK_PRS_LSB_EV_ENG_INT_BMSK 0x40
+#define GSI_IC_GEN_EVNT_BCK_PRS_LSB_EV_ENG_INT_SHFT 0x6
+#define GSI_IC_GEN_EVNT_BCK_PRS_LSB_REE_INT_BMSK 0x7
+#define GSI_IC_GEN_EVNT_BCK_PRS_LSB_REE_INT_SHFT 0x0
+
+#define GSI_IC_GEN_EVNT_BCK_PRS_MSB_OFFS \
+	(GSI_GSI_REG_BASE_OFFS + 0x000000ac)
+#define GSI_IC_GEN_EVNT_BCK_PRS_MSB_RMSK 0xfc3041
+#define GSI_IC_GEN_EVNT_BCK_PRS_MSB_UCONTROLLER_INT_BMSK 0xfc0000
+#define GSI_IC_GEN_EVNT_BCK_PRS_MSB_UCONTROLLER_INT_SHFT 0x12
+#define GSI_IC_GEN_EVNT_BCK_PRS_MSB_RD_WR_INT_BMSK 0x3000
+#define GSI_IC_GEN_EVNT_BCK_PRS_MSB_RD_WR_INT_SHFT 0xc
+#define GSI_IC_GEN_EVNT_BCK_PRS_MSB_DB_ENG_INT_BMSK 0x40
+#define GSI_IC_GEN_EVNT_BCK_PRS_MSB_DB_ENG_INT_SHFT 0x6
+#define GSI_IC_GEN_EVNT_BCK_PRS_MSB_TIMER_INT_BMSK 0x1
+#define GSI_IC_GEN_EVNT_BCK_PRS_MSB_TIMER_INT_SHFT 0x0
+
+#define GSI_IC_GEN_INT_BCK_PRS_LSB_OFFS \
+	(GSI_GSI_REG_BASE_OFFS + 0x000000b0)
+#define GSI_IC_GEN_INT_BCK_PRS_LSB_RMSK 0x3ffc1047
+#define GSI_IC_GEN_INT_BCK_PRS_LSB_TLV_INT_BMSK 0x3f000000
+#define GSI_IC_GEN_INT_BCK_PRS_LSB_TLV_INT_SHFT 0x18
+#define GSI_IC_GEN_INT_BCK_PRS_LSB_CSR_INT_BMSK 0xfc0000
+#define GSI_IC_GEN_INT_BCK_PRS_LSB_CSR_INT_SHFT 0x12
+#define GSI_IC_GEN_INT_BCK_PRS_LSB_INT_END_INT_BMSK 0x1000
+#define GSI_IC_GEN_INT_BCK_PRS_LSB_INT_END_INT_SHFT 0xc
+#define GSI_IC_GEN_INT_BCK_PRS_LSB_EV_ENG_INT_BMSK 0x40
+#define GSI_IC_GEN_INT_BCK_PRS_LSB_EV_ENG_INT_SHFT 0x6
+#define GSI_IC_GEN_INT_BCK_PRS_LSB_REE_INT_BMSK 0x7
+#define GSI_IC_GEN_INT_BCK_PRS_LSB_REE_INT_SHFT 0x0
+
+#define GSI_IC_GEN_INT_BCK_PRS_MSB_OFFS \
+	(GSI_GSI_REG_BASE_OFFS + 0x000000b4)
+#define GSI_IC_GEN_INT_BCK_PRS_MSB_RMSK 0xfc3041
+#define GSI_IC_GEN_INT_BCK_PRS_MSB_UCONTROLLER_INT_BMSK 0xfc0000
+#define GSI_IC_GEN_INT_BCK_PRS_MSB_UCONTROLLER_INT_SHFT 0x12
+#define GSI_IC_GEN_INT_BCK_PRS_MSB_RD_WR_INT_BMSK 0x3000
+#define GSI_IC_GEN_INT_BCK_PRS_MSB_RD_WR_INT_SHFT 0xc
+#define GSI_IC_GEN_INT_BCK_PRS_MSB_DB_ENG_INT_BMSK 0x40
+#define GSI_IC_GEN_INT_BCK_PRS_MSB_DB_ENG_INT_SHFT 0x6
+#define GSI_IC_GEN_INT_BCK_PRS_MSB_TIMER_INT_BMSK 0x1
+#define GSI_IC_GEN_INT_BCK_PRS_MSB_TIMER_INT_SHFT 0x0
+
+#define GSI_IC_STOP_INT_MOD_BCK_PRS_LSB_OFFS \
+	(GSI_GSI_REG_BASE_OFFS + 0x000000b8)
+#define GSI_IC_STOP_INT_MOD_BCK_PRS_LSB_RMSK 0x3ffc1047
+#define GSI_IC_STOP_INT_MOD_BCK_PRS_LSB_TLV_INT_BMSK 0x3f000000
+#define GSI_IC_STOP_INT_MOD_BCK_PRS_LSB_TLV_INT_SHFT 0x18
+#define GSI_IC_STOP_INT_MOD_BCK_PRS_LSB_CSR_INT_BMSK 0xfc0000
+#define GSI_IC_STOP_INT_MOD_BCK_PRS_LSB_CSR_INT_SHFT 0x12
+#define GSI_IC_STOP_INT_MOD_BCK_PRS_LSB_INT_END_INT_BMSK 0x1000
+#define GSI_IC_STOP_INT_MOD_BCK_PRS_LSB_INT_END_INT_SHFT 0xc
+#define GSI_IC_STOP_INT_MOD_BCK_PRS_LSB_EV_ENG_INT_BMSK 0x40
+#define GSI_IC_STOP_INT_MOD_BCK_PRS_LSB_EV_ENG_INT_SHFT 0x6
+#define GSI_IC_STOP_INT_MOD_BCK_PRS_LSB_REE_INT_BMSK 0x7
+#define GSI_IC_STOP_INT_MOD_BCK_PRS_LSB_REE_INT_SHFT 0x0
+
+#define GSI_IC_STOP_INT_MOD_BCK_PRS_MSB_OFFS \
+	(GSI_GSI_REG_BASE_OFFS + 0x000000bc)
+#define GSI_IC_STOP_INT_MOD_BCK_PRS_MSB_RMSK 0xfc3041
+#define GSI_IC_STOP_INT_MOD_BCK_PRS_MSB_UCONTROLLER_INT_BMSK 0xfc0000
+#define GSI_IC_STOP_INT_MOD_BCK_PRS_MSB_UCONTROLLER_INT_SHFT 0x12
+#define GSI_IC_STOP_INT_MOD_BCK_PRS_MSB_RD_WR_INT_BMSK 0x3000
+#define GSI_IC_STOP_INT_MOD_BCK_PRS_MSB_RD_WR_INT_SHFT 0xc
+#define GSI_IC_STOP_INT_MOD_BCK_PRS_MSB_DB_ENG_INT_BMSK 0x40
+#define GSI_IC_STOP_INT_MOD_BCK_PRS_MSB_DB_ENG_INT_SHFT 0x6
+#define GSI_IC_STOP_INT_MOD_BCK_PRS_MSB_TIMER_INT_BMSK 0x1
+#define GSI_IC_STOP_INT_MOD_BCK_PRS_MSB_TIMER_INT_SHFT 0x0
+
+#define GSI_IC_PROCESS_DESC_BCK_PRS_LSB_OFFS \
+	(GSI_GSI_REG_BASE_OFFS + 0x000000c0)
+#define GSI_IC_PROCESS_DESC_BCK_PRS_LSB_RMSK 0x3ffc1047
+#define GSI_IC_PROCESS_DESC_BCK_PRS_LSB_TLV_INT_BMSK 0x3f000000
+#define GSI_IC_PROCESS_DESC_BCK_PRS_LSB_TLV_INT_SHFT 0x18
+#define GSI_IC_PROCESS_DESC_BCK_PRS_LSB_CSR_INT_BMSK 0xfc0000
+#define GSI_IC_PROCESS_DESC_BCK_PRS_LSB_CSR_INT_SHFT 0x12
+#define GSI_IC_PROCESS_DESC_BCK_PRS_LSB_INT_END_INT_BMSK 0x1000
+#define GSI_IC_PROCESS_DESC_BCK_PRS_LSB_INT_END_INT_SHFT 0xc
+#define GSI_IC_PROCESS_DESC_BCK_PRS_LSB_EV_ENG_INT_BMSK 0x40
+#define GSI_IC_PROCESS_DESC_BCK_PRS_LSB_EV_ENG_INT_SHFT 0x6
+#define GSI_IC_PROCESS_DESC_BCK_PRS_LSB_REE_INT_BMSK 0x7
+#define GSI_IC_PROCESS_DESC_BCK_PRS_LSB_REE_INT_SHFT 0x0
+
+#define GSI_IC_PROCESS_DESC_BCK_PRS_MSB_OFFS \
+	(GSI_GSI_REG_BASE_OFFS + 0x000000c4)
+#define GSI_IC_PROCESS_DESC_BCK_PRS_MSB_RMSK 0xfc3041
+#define GSI_IC_PROCESS_DESC_BCK_PRS_MSB_UCONTROLLER_INT_BMSK 0xfc0000
+#define GSI_IC_PROCESS_DESC_BCK_PRS_MSB_UCONTROLLER_INT_SHFT 0x12
+#define GSI_IC_PROCESS_DESC_BCK_PRS_MSB_RD_WR_INT_BMSK 0x3000
+#define GSI_IC_PROCESS_DESC_BCK_PRS_MSB_RD_WR_INT_SHFT 0xc
+#define GSI_IC_PROCESS_DESC_BCK_PRS_MSB_DB_ENG_INT_BMSK 0x40
+#define GSI_IC_PROCESS_DESC_BCK_PRS_MSB_DB_ENG_INT_SHFT 0x6
+#define GSI_IC_PROCESS_DESC_BCK_PRS_MSB_TIMER_INT_BMSK 0x1
+#define GSI_IC_PROCESS_DESC_BCK_PRS_MSB_TIMER_INT_SHFT 0x0
+
+#define GSI_IC_TLV_STOP_BCK_PRS_LSB_OFFS \
+	(GSI_GSI_REG_BASE_OFFS + 0x000000c8)
+#define GSI_IC_TLV_STOP_BCK_PRS_LSB_RMSK 0x3ffc1047
+#define GSI_IC_TLV_STOP_BCK_PRS_LSB_TLV_INT_BMSK 0x3f000000
+#define GSI_IC_TLV_STOP_BCK_PRS_LSB_TLV_INT_SHFT 0x18
+#define GSI_IC_TLV_STOP_BCK_PRS_LSB_CSR_INT_BMSK 0xfc0000
+#define GSI_IC_TLV_STOP_BCK_PRS_LSB_CSR_INT_SHFT 0x12
+#define GSI_IC_TLV_STOP_BCK_PRS_LSB_INT_END_INT_BMSK 0x1000
+#define GSI_IC_TLV_STOP_BCK_PRS_LSB_INT_END_INT_SHFT 0xc
+#define GSI_IC_TLV_STOP_BCK_PRS_LSB_EV_ENG_INT_BMSK 0x40
+#define GSI_IC_TLV_STOP_BCK_PRS_LSB_EV_ENG_INT_SHFT 0x6
+#define GSI_IC_TLV_STOP_BCK_PRS_LSB_REE_INT_BMSK 0x7
+#define GSI_IC_TLV_STOP_BCK_PRS_LSB_REE_INT_SHFT 0x0
+
+#define GSI_IC_TLV_STOP_BCK_PRS_MSB_OFFS \
+	(GSI_GSI_REG_BASE_OFFS + 0x000000cc)
+#define GSI_IC_TLV_STOP_BCK_PRS_MSB_RMSK 0xfc3041
+#define GSI_IC_TLV_STOP_BCK_PRS_MSB_UCONTROLLER_INT_BMSK 0xfc0000
+#define GSI_IC_TLV_STOP_BCK_PRS_MSB_UCONTROLLER_INT_SHFT 0x12
+#define GSI_IC_TLV_STOP_BCK_PRS_MSB_RD_WR_INT_BMSK 0x3000
+#define GSI_IC_TLV_STOP_BCK_PRS_MSB_RD_WR_INT_SHFT 0xc
+#define GSI_IC_TLV_STOP_BCK_PRS_MSB_DB_ENG_INT_BMSK 0x40
+#define GSI_IC_TLV_STOP_BCK_PRS_MSB_DB_ENG_INT_SHFT 0x6
+#define GSI_IC_TLV_STOP_BCK_PRS_MSB_TIMER_INT_BMSK 0x1
+#define GSI_IC_TLV_STOP_BCK_PRS_MSB_TIMER_INT_SHFT 0x0
+
+#define GSI_IC_TLV_RESET_BCK_PRS_LSB_OFFS \
+	(GSI_GSI_REG_BASE_OFFS + 0x000000d0)
+#define GSI_IC_TLV_RESET_BCK_PRS_LSB_RMSK 0x3ffc1047
+#define GSI_IC_TLV_RESET_BCK_PRS_LSB_TLV_INT_BMSK 0x3f000000
+#define GSI_IC_TLV_RESET_BCK_PRS_LSB_TLV_INT_SHFT 0x18
+#define GSI_IC_TLV_RESET_BCK_PRS_LSB_CSR_INT_BMSK 0xfc0000
+#define GSI_IC_TLV_RESET_BCK_PRS_LSB_CSR_INT_SHFT 0x12
+#define GSI_IC_TLV_RESET_BCK_PRS_LSB_INT_END_INT_BMSK 0x1000
+#define GSI_IC_TLV_RESET_BCK_PRS_LSB_INT_END_INT_SHFT 0xc
+#define GSI_IC_TLV_RESET_BCK_PRS_LSB_EV_ENG_INT_BMSK 0x40
+#define GSI_IC_TLV_RESET_BCK_PRS_LSB_EV_ENG_INT_SHFT 0x6
+#define GSI_IC_TLV_RESET_BCK_PRS_LSB_REE_INT_BMSK 0x7
+#define GSI_IC_TLV_RESET_BCK_PRS_LSB_REE_INT_SHFT 0x0
+
+#define GSI_IC_TLV_RESET_BCK_PRS_MSB_OFFS \
+	(GSI_GSI_REG_BASE_OFFS + 0x000000d4)
+#define GSI_IC_TLV_RESET_BCK_PRS_MSB_RMSK 0xfc3041
+#define GSI_IC_TLV_RESET_BCK_PRS_MSB_UCONTROLLER_INT_BMSK 0xfc0000
+#define GSI_IC_TLV_RESET_BCK_PRS_MSB_UCONTROLLER_INT_SHFT 0x12
+#define GSI_IC_TLV_RESET_BCK_PRS_MSB_RD_WR_INT_BMSK 0x3000
+#define GSI_IC_TLV_RESET_BCK_PRS_MSB_RD_WR_INT_SHFT 0xc
+#define GSI_IC_TLV_RESET_BCK_PRS_MSB_DB_ENG_INT_BMSK 0x40
+#define GSI_IC_TLV_RESET_BCK_PRS_MSB_DB_ENG_INT_SHFT 0x6
+#define GSI_IC_TLV_RESET_BCK_PRS_MSB_TIMER_INT_BMSK 0x1
+#define GSI_IC_TLV_RESET_BCK_PRS_MSB_TIMER_INT_SHFT 0x0
+
+#define GSI_IC_RGSTR_TIMER_BCK_PRS_LSB_OFFS \
+	(GSI_GSI_REG_BASE_OFFS + 0x000000d8)
+#define GSI_IC_RGSTR_TIMER_BCK_PRS_LSB_RMSK 0x3ffc1047
+#define GSI_IC_RGSTR_TIMER_BCK_PRS_LSB_TLV_INT_BMSK 0x3f000000
+#define GSI_IC_RGSTR_TIMER_BCK_PRS_LSB_TLV_INT_SHFT 0x18
+#define GSI_IC_RGSTR_TIMER_BCK_PRS_LSB_CSR_INT_BMSK 0xfc0000
+#define GSI_IC_RGSTR_TIMER_BCK_PRS_LSB_CSR_INT_SHFT 0x12
+#define GSI_IC_RGSTR_TIMER_BCK_PRS_LSB_INT_END_INT_BMSK 0x1000
+#define GSI_IC_RGSTR_TIMER_BCK_PRS_LSB_INT_END_INT_SHFT 0xc
+#define GSI_IC_RGSTR_TIMER_BCK_PRS_LSB_EV_ENG_INT_BMSK 0x40
+#define GSI_IC_RGSTR_TIMER_BCK_PRS_LSB_EV_ENG_INT_SHFT 0x6
+#define GSI_IC_RGSTR_TIMER_BCK_PRS_LSB_REE_INT_BMSK 0x7
+#define GSI_IC_RGSTR_TIMER_BCK_PRS_LSB_REE_INT_SHFT 0x0
+
+#define GSI_IC_RGSTR_TIMER_BCK_PRS_MSB_OFFS \
+	(GSI_GSI_REG_BASE_OFFS + 0x000000dc)
+#define GSI_IC_RGSTR_TIMER_BCK_PRS_MSB_RMSK 0xfc3041
+#define GSI_IC_RGSTR_TIMER_BCK_PRS_MSB_UCONTROLLER_INT_BMSK 0xfc0000
+#define GSI_IC_RGSTR_TIMER_BCK_PRS_MSB_UCONTROLLER_INT_SHFT 0x12
+#define GSI_IC_RGSTR_TIMER_BCK_PRS_MSB_RD_WR_INT_BMSK 0x3000
+#define GSI_IC_RGSTR_TIMER_BCK_PRS_MSB_RD_WR_INT_SHFT 0xc
+#define GSI_IC_RGSTR_TIMER_BCK_PRS_MSB_DB_ENG_INT_BMSK 0x40
+#define GSI_IC_RGSTR_TIMER_BCK_PRS_MSB_DB_ENG_INT_SHFT 0x6
+#define GSI_IC_RGSTR_TIMER_BCK_PRS_MSB_TIMER_INT_BMSK 0x1
+#define GSI_IC_RGSTR_TIMER_BCK_PRS_MSB_TIMER_INT_SHFT 0x0
+
+#define GSI_IC_READ_BCK_PRS_LSB_OFFS \
+	(GSI_GSI_REG_BASE_OFFS + 0x000000e0)
+#define GSI_IC_READ_BCK_PRS_LSB_RMSK 0x3ffc1047
+#define GSI_IC_READ_BCK_PRS_LSB_TLV_INT_BMSK 0x3f000000
+#define GSI_IC_READ_BCK_PRS_LSB_TLV_INT_SHFT 0x18
+#define GSI_IC_READ_BCK_PRS_LSB_CSR_INT_BMSK 0xfc0000
+#define GSI_IC_READ_BCK_PRS_LSB_CSR_INT_SHFT 0x12
+#define GSI_IC_READ_BCK_PRS_LSB_INT_END_INT_BMSK 0x1000
+#define GSI_IC_READ_BCK_PRS_LSB_INT_END_INT_SHFT 0xc
+#define GSI_IC_READ_BCK_PRS_LSB_EV_ENG_INT_BMSK 0x40
+#define GSI_IC_READ_BCK_PRS_LSB_EV_ENG_INT_SHFT 0x6
+#define GSI_IC_READ_BCK_PRS_LSB_REE_INT_BMSK 0x7
+#define GSI_IC_READ_BCK_PRS_LSB_REE_INT_SHFT 0x0
+
+#define GSI_IC_READ_BCK_PRS_MSB_OFFS \
+	(GSI_GSI_REG_BASE_OFFS + 0x000000e4)
+#define GSI_IC_READ_BCK_PRS_MSB_RMSK 0xfc3041
+#define GSI_IC_READ_BCK_PRS_MSB_UCONTROLLER_INT_BMSK 0xfc0000
+#define GSI_IC_READ_BCK_PRS_MSB_UCONTROLLER_INT_SHFT 0x12
+#define GSI_IC_READ_BCK_PRS_MSB_RD_WR_INT_BMSK 0x3000
+#define GSI_IC_READ_BCK_PRS_MSB_RD_WR_INT_SHFT 0xc
+#define GSI_IC_READ_BCK_PRS_MSB_DB_ENG_INT_BMSK 0x40
+#define GSI_IC_READ_BCK_PRS_MSB_DB_ENG_INT_SHFT 0x6
+#define GSI_IC_READ_BCK_PRS_MSB_TIMER_INT_BMSK 0x1
+#define GSI_IC_READ_BCK_PRS_MSB_TIMER_INT_SHFT 0x0
+
+#define GSI_IC_WRITE_BCK_PRS_LSB_OFFS \
+	(GSI_GSI_REG_BASE_OFFS + 0x000000e8)
+#define GSI_IC_WRITE_BCK_PRS_LSB_RMSK 0x3ffc1047
+#define GSI_IC_WRITE_BCK_PRS_LSB_TLV_INT_BMSK 0x3f000000
+#define GSI_IC_WRITE_BCK_PRS_LSB_TLV_INT_SHFT 0x18
+#define GSI_IC_WRITE_BCK_PRS_LSB_CSR_INT_BMSK 0xfc0000
+#define GSI_IC_WRITE_BCK_PRS_LSB_CSR_INT_SHFT 0x12
+#define GSI_IC_WRITE_BCK_PRS_LSB_INT_END_INT_BMSK 0x1000
+#define GSI_IC_WRITE_BCK_PRS_LSB_INT_END_INT_SHFT 0xc
+#define GSI_IC_WRITE_BCK_PRS_LSB_EV_ENG_INT_BMSK 0x40
+#define GSI_IC_WRITE_BCK_PRS_LSB_EV_ENG_INT_SHFT 0x6
+#define GSI_IC_WRITE_BCK_PRS_LSB_REE_INT_BMSK 0x7
+#define GSI_IC_WRITE_BCK_PRS_LSB_REE_INT_SHFT 0x0
+
+#define GSI_IC_WRITE_BCK_PRS_MSB_OFFS \
+	(GSI_GSI_REG_BASE_OFFS + 0x000000ec)
+#define GSI_IC_WRITE_BCK_PRS_MSB_RMSK 0xfc3041
+#define GSI_IC_WRITE_BCK_PRS_MSB_UCONTROLLER_INT_BMSK 0xfc0000
+#define GSI_IC_WRITE_BCK_PRS_MSB_UCONTROLLER_INT_SHFT 0x12
+#define GSI_IC_WRITE_BCK_PRS_MSB_RD_WR_INT_BMSK 0x3000
+#define GSI_IC_WRITE_BCK_PRS_MSB_RD_WR_INT_SHFT 0xc
+#define GSI_IC_WRITE_BCK_PRS_MSB_DB_ENG_INT_BMSK 0x40
+#define GSI_IC_WRITE_BCK_PRS_MSB_DB_ENG_INT_SHFT 0x6
+#define GSI_IC_WRITE_BCK_PRS_MSB_TIMER_INT_BMSK 0x1
+#define GSI_IC_WRITE_BCK_PRS_MSB_TIMER_INT_SHFT 0x0
+
+#define GSI_IC_UCONTROLLER_GPR_BCK_PRS_LSB_OFFS \
+	(GSI_GSI_REG_BASE_OFFS + 0x000000f0)
+#define GSI_IC_UCONTROLLER_GPR_BCK_PRS_LSB_RMSK 0x3ffc1047
+#define GSI_IC_UCONTROLLER_GPR_BCK_PRS_LSB_TLV_INT_BMSK 0x3f000000
+#define GSI_IC_UCONTROLLER_GPR_BCK_PRS_LSB_TLV_INT_SHFT 0x18
+#define GSI_IC_UCONTROLLER_GPR_BCK_PRS_LSB_CSR_INT_BMSK 0xfc0000
+#define GSI_IC_UCONTROLLER_GPR_BCK_PRS_LSB_CSR_INT_SHFT 0x12
+#define GSI_IC_UCONTROLLER_GPR_BCK_PRS_LSB_INT_END_INT_BMSK 0x1000
+#define GSI_IC_UCONTROLLER_GPR_BCK_PRS_LSB_INT_END_INT_SHFT 0xc
+#define GSI_IC_UCONTROLLER_GPR_BCK_PRS_LSB_EV_ENG_INT_BMSK 0x40
+#define GSI_IC_UCONTROLLER_GPR_BCK_PRS_LSB_EV_ENG_INT_SHFT 0x6
+#define GSI_IC_UCONTROLLER_GPR_BCK_PRS_LSB_REE_INT_BMSK 0x7
+#define GSI_IC_UCONTROLLER_GPR_BCK_PRS_LSB_REE_INT_SHFT 0x0
+
+#define GSI_IC_UCONTROLLER_GPR_BCK_PRS_MSB_OFFS \
+	(GSI_GSI_REG_BASE_OFFS + 0x000000f4)
+#define GSI_IC_UCONTROLLER_GPR_BCK_PRS_MSB_RMSK 0xfc3041
+#define GSI_IC_UCONTROLLER_GPR_BCK_PRS_MSB_UCONTROLLER_INT_BMSK 0xfc0000
+#define GSI_IC_UCONTROLLER_GPR_BCK_PRS_MSB_UCONTROLLER_INT_SHFT 0x12
+#define GSI_IC_UCONTROLLER_GPR_BCK_PRS_MSB_RD_WR_INT_BMSK 0x3000
+#define GSI_IC_UCONTROLLER_GPR_BCK_PRS_MSB_RD_WR_INT_SHFT 0xc
+#define GSI_IC_UCONTROLLER_GPR_BCK_PRS_MSB_DB_ENG_INT_BMSK 0x40
+#define GSI_IC_UCONTROLLER_GPR_BCK_PRS_MSB_DB_ENG_INT_SHFT 0x6
+#define GSI_IC_UCONTROLLER_GPR_BCK_PRS_MSB_TIMER_INT_BMSK 0x1
+#define GSI_IC_UCONTROLLER_GPR_BCK_PRS_MSB_TIMER_INT_SHFT 0x0
+
+#define GSI_GSI_IRAM_PTR_CH_CMD_OFFS \
+	(GSI_GSI_REG_BASE_OFFS + 0x00000400)
+#define GSI_GSI_IRAM_PTR_CH_CMD_RMSK 0xfff
+#define GSI_GSI_IRAM_PTR_CH_CMD_IRAM_PTR_BMSK 0xfff
+#define GSI_GSI_IRAM_PTR_CH_CMD_IRAM_PTR_SHFT 0x0
+
+#define GSI_GSI_IRAM_PTR_EE_GENERIC_CMD_OFFS \
+	(GSI_GSI_REG_BASE_OFFS + 0x00000404)
+#define GSI_GSI_IRAM_PTR_EE_GENERIC_CMD_RMSK 0xfff
+#define GSI_GSI_IRAM_PTR_EE_GENERIC_CMD_IRAM_PTR_BMSK 0xfff
+#define GSI_GSI_IRAM_PTR_EE_GENERIC_CMD_IRAM_PTR_SHFT 0x0
+
+#define GSI_V2_5_GSI_IRAM_PTR_TLV_CH_NOT_FULL_OFFS \
+	(GSI_GSI_REG_BASE_OFFS + 0x00000408)
+#define GSI_V2_5_GSI_IRAM_PTR_TLV_CH_NOT_FULL_RMSK 0xfff
+#define GSI_V2_5_GSI_IRAM_PTR_TLV_CH_NOT_FULL_IRAM_PTR_BMSK 0xfff
+#define GSI_V2_5_GSI_IRAM_PTR_TLV_CH_NOT_FULL_IRAM_PTR_SHFT 0x0
+
+#define GSI_GSI_IRAM_PTR_CH_DB_OFFS \
+	(GSI_GSI_REG_BASE_OFFS + 0x00000418)
+#define GSI_GSI_IRAM_PTR_CH_DB_RMSK 0xfff
+#define GSI_GSI_IRAM_PTR_CH_DB_IRAM_PTR_BMSK 0xfff
+#define GSI_GSI_IRAM_PTR_CH_DB_IRAM_PTR_SHFT 0x0
+
+#define GSI_GSI_IRAM_PTR_EV_DB_OFFS \
+	(GSI_GSI_REG_BASE_OFFS + 0x0000041c)
+#define GSI_GSI_IRAM_PTR_EV_DB_RMSK 0xfff
+#define GSI_GSI_IRAM_PTR_EV_DB_IRAM_PTR_BMSK 0xfff
+#define GSI_GSI_IRAM_PTR_EV_DB_IRAM_PTR_SHFT 0x0
+
+#define GSI_GSI_IRAM_PTR_NEW_RE_OFFS \
+	(GSI_GSI_REG_BASE_OFFS + 0x00000420)
+#define GSI_GSI_IRAM_PTR_NEW_RE_RMSK 0xfff
+#define GSI_GSI_IRAM_PTR_NEW_RE_IRAM_PTR_BMSK 0xfff
+#define GSI_GSI_IRAM_PTR_NEW_RE_IRAM_PTR_SHFT 0x0
+
+#define GSI_GSI_IRAM_PTR_CH_DIS_COMP_OFFS \
+	(GSI_GSI_REG_BASE_OFFS + 0x00000424)
+#define GSI_GSI_IRAM_PTR_CH_DIS_COMP_RMSK 0xfff
+#define GSI_GSI_IRAM_PTR_CH_DIS_COMP_IRAM_PTR_BMSK 0xfff
+#define GSI_GSI_IRAM_PTR_CH_DIS_COMP_IRAM_PTR_SHFT 0x0
+
+#define GSI_GSI_IRAM_PTR_CH_EMPTY_OFFS \
+	(GSI_GSI_REG_BASE_OFFS + 0x00000428)
+#define GSI_GSI_IRAM_PTR_CH_EMPTY_RMSK 0xfff
+#define GSI_GSI_IRAM_PTR_CH_EMPTY_IRAM_PTR_BMSK 0xfff
+#define GSI_GSI_IRAM_PTR_CH_EMPTY_IRAM_PTR_SHFT 0x0
+
+#define GSI_GSI_IRAM_PTR_EVENT_GEN_COMP_OFFS \
+	(GSI_GSI_REG_BASE_OFFS + 0x0000042c)
+#define GSI_GSI_IRAM_PTR_EVENT_GEN_COMP_RMSK 0xfff
+#define GSI_GSI_IRAM_PTR_EVENT_GEN_COMP_IRAM_PTR_BMSK 0xfff
+#define GSI_GSI_IRAM_PTR_EVENT_GEN_COMP_IRAM_PTR_SHFT 0x0
+
+#define GSI_GSI_IRAM_PTR_PERIPH_IF_TLV_IN_0_OFFS \
+	(GSI_GSI_REG_BASE_OFFS + 0x00000430)
+#define GSI_GSI_IRAM_PTR_PERIPH_IF_TLV_IN_0_RMSK 0xfff
+#define GSI_GSI_IRAM_PTR_PERIPH_IF_TLV_IN_0_IRAM_PTR_BMSK 0xfff
+#define GSI_GSI_IRAM_PTR_PERIPH_IF_TLV_IN_0_IRAM_PTR_SHFT 0x0
+
+#define GSI_GSI_IRAM_PTR_PERIPH_IF_TLV_IN_2_OFFS \
+	(GSI_GSI_REG_BASE_OFFS + 0x00000434)
+#define GSI_GSI_IRAM_PTR_PERIPH_IF_TLV_IN_2_RMSK 0xfff
+#define GSI_GSI_IRAM_PTR_PERIPH_IF_TLV_IN_2_IRAM_PTR_BMSK 0xfff
+#define GSI_GSI_IRAM_PTR_PERIPH_IF_TLV_IN_2_IRAM_PTR_SHFT 0x0
+
+#define GSI_GSI_IRAM_PTR_PERIPH_IF_TLV_IN_1_OFFS \
+	(GSI_GSI_REG_BASE_OFFS + 0x00000438)
+#define GSI_GSI_IRAM_PTR_PERIPH_IF_TLV_IN_1_RMSK 0xfff
+#define GSI_GSI_IRAM_PTR_PERIPH_IF_TLV_IN_1_IRAM_PTR_BMSK 0xfff
+#define GSI_GSI_IRAM_PTR_PERIPH_IF_TLV_IN_1_IRAM_PTR_SHFT 0x0
+
+#define GSI_GSI_IRAM_PTR_TIMER_EXPIRED_OFFS \
+	(GSI_GSI_REG_BASE_OFFS + 0x0000043c)
+#define GSI_GSI_IRAM_PTR_TIMER_EXPIRED_RMSK 0xfff
+#define GSI_GSI_IRAM_PTR_TIMER_EXPIRED_IRAM_PTR_BMSK 0xfff
+#define GSI_GSI_IRAM_PTR_TIMER_EXPIRED_IRAM_PTR_SHFT 0x0
+
+#define GSI_GSI_IRAM_PTR_WRITE_ENG_COMP_OFFS \
+	(GSI_GSI_REG_BASE_OFFS + 0x00000440)
+#define GSI_GSI_IRAM_PTR_WRITE_ENG_COMP_RMSK 0xfff
+#define GSI_GSI_IRAM_PTR_WRITE_ENG_COMP_IRAM_PTR_BMSK 0xfff
+#define GSI_GSI_IRAM_PTR_WRITE_ENG_COMP_IRAM_PTR_SHFT 0x0
+
+#define GSI_GSI_IRAM_PTR_READ_ENG_COMP_OFFS \
+	(GSI_GSI_REG_BASE_OFFS + 0x00000444)
+#define GSI_GSI_IRAM_PTR_READ_ENG_COMP_RMSK 0xfff
+#define GSI_GSI_IRAM_PTR_READ_ENG_COMP_IRAM_PTR_BMSK 0xfff
+#define GSI_GSI_IRAM_PTR_READ_ENG_COMP_IRAM_PTR_SHFT 0x0
+
+#define GSI_GSI_IRAM_PTR_UC_GP_INT_OFFS \
+	(GSI_GSI_REG_BASE_OFFS + 0x00000448)
+#define GSI_GSI_IRAM_PTR_UC_GP_INT_RMSK 0xfff
+#define GSI_GSI_IRAM_PTR_UC_GP_INT_IRAM_PTR_BMSK 0xfff
+#define GSI_GSI_IRAM_PTR_UC_GP_INT_IRAM_PTR_SHFT 0x0
+
+/* Real H/W register name is with STOPPED with single P */
+#define GSI_GSI_IRAM_PTR_INT_MOD_STOPPED_OFFS \
+	(GSI_GSI_REG_BASE_OFFS + 0x0000044c)
+#define GSI_GSI_IRAM_PTR_INT_MOD_STOPPED_RMSK 0xfff
+#define GSI_GSI_IRAM_PTR_INT_MOD_STOPPED_IRAM_PTR_BMSK 0xfff
+#define GSI_GSI_IRAM_PTR_INT_MOD_STOPPED_IRAM_PTR_SHFT 0x0
+
+#define GSI_GSI_INST_RAM_n_WORD_SZ 0x4
+#define GSI_GSI_INST_RAM_n_OFFS(n) \
+	(GSI_GSI_REG_BASE_OFFS + 0x00004000 + GSI_GSI_INST_RAM_n_WORD_SZ * (n))
+#define GSI_V2_5_GSI_INST_RAM_n_OFFS(n) \
+	(GSI_GSI_REG_BASE_OFFS + 0x0001b000 + GSI_GSI_INST_RAM_n_WORD_SZ * (n))
+#define GSI_GSI_INST_RAM_n_RMSK 0xffffffff
+#define GSI_GSI_INST_RAM_n_MAXn 4095
+#define GSI_V2_0_GSI_INST_RAM_n_MAXn 6143
+#define GSI_V2_2_GSI_INST_RAM_n_MAXn 4095
+#define GSI_V2_5_GSI_INST_RAM_n_MAXn 8191
+#define GSI_V2_7_GSI_INST_RAM_n_MAXn 5119
+
+#define GSI_GSI_INST_RAM_n_INST_BYTE_3_BMSK 0xff000000
+#define GSI_GSI_INST_RAM_n_INST_BYTE_3_SHFT 0x18
+#define GSI_GSI_INST_RAM_n_INST_BYTE_2_BMSK 0xff0000
+#define GSI_GSI_INST_RAM_n_INST_BYTE_2_SHFT 0x10
+#define GSI_GSI_INST_RAM_n_INST_BYTE_1_BMSK 0xff00
+#define GSI_GSI_INST_RAM_n_INST_BYTE_1_SHFT 0x8
+#define GSI_GSI_INST_RAM_n_INST_BYTE_0_BMSK 0xff
+#define GSI_GSI_INST_RAM_n_INST_BYTE_0_SHFT 0x0
+
+#define GSI_EE_n_GSI_CH_k_CNTXT_0_OFFS(k, n) \
+	(GSI_GSI_REG_BASE_OFFS + 0x0001c000 + 0x4000 * (n) + 0x80 * (k))
+#define GSI_EE_n_GSI_CH_k_CNTXT_0_ELEMENT_SIZE_BMSK 0xff000000
+#define GSI_EE_n_GSI_CH_k_CNTXT_0_ELEMENT_SIZE_SHFT 0x18
+#define GSI_EE_n_GSI_CH_k_CNTXT_0_CHSTATE_BMSK 0xf00000
+#define GSI_EE_n_GSI_CH_k_CNTXT_0_CHSTATE_SHFT 0x14
+#define GSI_EE_n_GSI_CH_k_CNTXT_0_ERINDEX_BMSK 0x7c000
+#define GSI_EE_n_GSI_CH_k_CNTXT_0_ERINDEX_SHFT 0xe
+#define GSI_V2_5_EE_n_GSI_CH_k_CNTXT_0_CHTYPE_PROTOCOL_MSB_BMSK 0x2000
+#define GSI_V2_5_EE_n_GSI_CH_k_CNTXT_0_CHTYPE_PROTOCOL_MSB_SHFT 0xd
+#define GSI_EE_n_GSI_CH_k_CNTXT_0_CHID_BMSK 0x1f00
+#define GSI_EE_n_GSI_CH_k_CNTXT_0_CHID_SHFT 0x8
+#define GSI_EE_n_GSI_CH_k_CNTXT_0_EE_BMSK 0xf0
+#define GSI_EE_n_GSI_CH_k_CNTXT_0_EE_SHFT 0x4
+#define GSI_EE_n_GSI_CH_k_CNTXT_0_CHTYPE_DIR_BMSK 0x8
+#define GSI_EE_n_GSI_CH_k_CNTXT_0_CHTYPE_DIR_SHFT 0x3
+#define GSI_EE_n_GSI_CH_k_CNTXT_0_CHTYPE_PROTOCOL_BMSK 0x7
+#define GSI_EE_n_GSI_CH_k_CNTXT_0_CHTYPE_PROTOCOL_SHFT 0x0
+
+#define GSI_EE_n_GSI_CH_k_CNTXT_1_OFFS(k, n) \
+	(GSI_GSI_REG_BASE_OFFS + 0x0001c004 + 0x4000 * (n) + 0x80 * (k))
+#define GSI_EE_n_GSI_CH_k_CNTXT_1_R_LENGTH_BMSK 0xffff
+#define GSI_EE_n_GSI_CH_k_CNTXT_1_R_LENGTH_SHFT 0x0
+
+#define GSI_EE_n_GSI_CH_k_CNTXT_2_OFFS(k, n) \
+	(GSI_GSI_REG_BASE_OFFS + 0x0001c008 + 0x4000 * (n) + 0x80 * (k))
+#define GSI_EE_n_GSI_CH_k_CNTXT_2_R_BASE_ADDR_LSBS_BMSK 0xffffffff
+#define GSI_EE_n_GSI_CH_k_CNTXT_2_R_BASE_ADDR_LSBS_SHFT 0x0
+
+#define GSI_EE_n_GSI_CH_k_CNTXT_3_OFFS(k, n) \
+	(GSI_GSI_REG_BASE_OFFS + 0x0001c00c + 0x4000 * (n) + 0x80 * (k))
+#define GSI_EE_n_GSI_CH_k_CNTXT_3_R_BASE_ADDR_MSBS_BMSK 0xffffffff
+#define GSI_EE_n_GSI_CH_k_CNTXT_3_R_BASE_ADDR_MSBS_SHFT 0x0
+
+#define GSI_EE_n_GSI_CH_k_CNTXT_4_OFFS(k, n) \
+	(GSI_GSI_REG_BASE_OFFS + 0x0001c010 + 0x4000 * (n) + 0x80 * (k))
+#define GSI_EE_n_GSI_CH_k_CNTXT_4_READ_PTR_LSB_BMSK 0xffffffff
+#define GSI_EE_n_GSI_CH_k_CNTXT_4_READ_PTR_LSB_SHFT 0x0
+
+#define GSI_EE_n_GSI_CH_k_CNTXT_5_OFFS(k, n) \
+	(GSI_GSI_REG_BASE_OFFS + 0x0001c014 + 0x4000 * (n) + 0x80 * (k))
+#define GSI_EE_n_GSI_CH_k_CNTXT_5_READ_PTR_MSB_BMSK 0xffffffff
+#define GSI_EE_n_GSI_CH_k_CNTXT_5_READ_PTR_MSB_SHFT 0x0
+
+#define GSI_EE_n_GSI_CH_k_CNTXT_6_OFFS(k, n) \
+	(GSI_GSI_REG_BASE_OFFS + 0x0001c018 + 0x4000 * (n) + 0x80 * (k))
+#define GSI_EE_n_GSI_CH_k_CNTXT_6_WRITE_PTR_LSB_BMSK 0xffffffff
+#define GSI_EE_n_GSI_CH_k_CNTXT_6_WRITE_PTR_LSB_SHFT 0x0
+
+#define GSI_EE_n_GSI_CH_k_CNTXT_7_OFFS(k, n) \
+	(GSI_GSI_REG_BASE_OFFS + 0x0001c01c + 0x4000 * (n) + 0x80 * (k))
+#define GSI_EE_n_GSI_CH_k_CNTXT_7_WRITE_PTR_MSB_BMSK 0xffffffff
+#define GSI_EE_n_GSI_CH_k_CNTXT_7_WRITE_PTR_MSB_SHFT 0x0
+
+#define GSI_EE_n_GSI_CH_k_RE_FETCH_READ_PTR_OFFS(k, n) \
+	(GSI_GSI_REG_BASE_OFFS + 0x0001c054 + 0x4000 * (n) + 0x80 * (k))
+#define GSI_EE_n_GSI_CH_k_RE_FETCH_READ_PTR_RMSK 0xffff
+#define GSI_EE_n_GSI_CH_k_RE_FETCH_READ_PTR_MAXk 30
+#define GSI_EE_n_GSI_CH_k_RE_FETCH_READ_PTR_MAXn 3
+#define GSI_EE_n_GSI_CH_k_RE_FETCH_READ_PTR_READ_PTR_BMSK 0xffff
+#define GSI_EE_n_GSI_CH_k_RE_FETCH_READ_PTR_READ_PTR_SHFT 0x0
+
+#define GSI_EE_n_GSI_CH_k_RE_FETCH_WRITE_PTR_OFFS(k, n) \
+	(GSI_GSI_REG_BASE_OFFS + 0x0001c058 + 0x4000 * (n) + 0x80 * (k))
+#define GSI_EE_n_GSI_CH_k_RE_FETCH_WRITE_PTR_RMSK 0xffff
+#define GSI_EE_n_GSI_CH_k_RE_FETCH_WRITE_PTR_MAXk 30
+#define GSI_EE_n_GSI_CH_k_RE_FETCH_WRITE_PTR_MAXn 3
+#define GSI_EE_n_GSI_CH_k_RE_FETCH_WRITE_PTR_RE_INTR_DB_BMSK 0xffff
+#define GSI_EE_n_GSI_CH_k_RE_FETCH_WRITE_PTR_RE_INTR_DB_SHFT 0x0
+
+#define GSI_EE_n_GSI_CH_k_QOS_OFFS(k, n) \
+	(GSI_GSI_REG_BASE_OFFS + 0x0001c05c + 0x4000 * (n) + 0x80 * (k))
+#define GSI_EE_n_GSI_CH_k_QOS_RMSK 0x303
+#define GSI_EE_n_GSI_CH_k_QOS_MAXk 30
+#define GSI_EE_n_GSI_CH_k_QOS_MAXn 3
+#define GSI_EE_n_GSI_CH_k_QOS_USE_ESCAPE_BUF_ONLY_BMSK 0x400
+#define GSI_EE_n_GSI_CH_k_QOS_USE_ESCAPE_BUF_ONLY_SHFT 0xa
+#define GSI_EE_n_GSI_CH_k_QOS_USE_DB_ENG_BMSK 0x200
+#define GSI_EE_n_GSI_CH_k_QOS_USE_DB_ENG_SHFT 0x9
+#define GSI_EE_n_GSI_CH_k_QOS_MAX_PREFETCH_BMSK 0x100
+#define GSI_EE_n_GSI_CH_k_QOS_MAX_PREFETCH_SHFT 0x8
+#define GSI_EE_n_GSI_CH_k_QOS_WRR_WEIGHT_BMSK 0xf
+#define GSI_EE_n_GSI_CH_k_QOS_WRR_WEIGHT_SHFT 0x0
+
+#define GSI_V2_5_EE_n_GSI_CH_k_QOS_OFFS(k, n) \
+	(GSI_GSI_REG_BASE_OFFS + 0x0000f05c + 0x4000 * (n) + 0x80 * (k))
+#define GSI_V2_5_EE_n_GSI_CH_k_QOS_EMPTY_LVL_THRSHOLD_BMSK 0xff0000
+#define GSI_V2_5_EE_n_GSI_CH_k_QOS_EMPTY_LVL_THRSHOLD_SHFT 0x10
+#define GSI_V2_5_EE_n_GSI_CH_k_QOS_PREFETCH_MODE_BMSK 0x3c00
+#define GSI_V2_5_EE_n_GSI_CH_k_QOS_PREFETCH_MODE_SHFT 0xa
+#define GSI_V2_5_EE_n_GSI_CH_k_QOS_USE_DB_ENG_BMSK 0x200
+#define GSI_V2_5_EE_n_GSI_CH_k_QOS_USE_DB_ENG_SHFT 0x9
+#define GSI_V2_5_EE_n_GSI_CH_k_QOS_MAX_PREFETCH_BMSK 0x100
+#define GSI_V2_5_EE_n_GSI_CH_k_QOS_MAX_PREFETCH_SHFT 0x8
+#define GSI_V2_5_EE_n_GSI_CH_k_QOS_WRR_WEIGHT_BMSK 0xf
+#define GSI_V2_5_EE_n_GSI_CH_k_QOS_WRR_WEIGHT_SHFT 0x0
+
+
+#define GSI_EE_n_GSI_CH_k_SCRATCH_0_OFFS(k, n) \
+	(GSI_GSI_REG_BASE_OFFS + 0x0001c060 + 0x4000 * (n) + 0x80 * (k))
+#define GSI_EE_n_GSI_CH_k_SCRATCH_0_SCRATCH_BMSK 0xffffffff
+#define GSI_EE_n_GSI_CH_k_SCRATCH_0_SCRATCH_SHFT 0x0
+
+#define GSI_EE_n_GSI_CH_k_SCRATCH_1_OFFS(k, n) \
+	(GSI_GSI_REG_BASE_OFFS + 0x0001c064 + 0x4000 * (n) + 0x80 * (k))
+#define GSI_EE_n_GSI_CH_k_SCRATCH_1_SCRATCH_BMSK 0xffffffff
+#define GSI_EE_n_GSI_CH_k_SCRATCH_1_SCRATCH_SHFT 0x0
+
+#define GSI_EE_n_GSI_CH_k_SCRATCH_2_OFFS(k, n) \
+	(GSI_GSI_REG_BASE_OFFS + 0x0001c068 + 0x4000 * (n) + 0x80 * (k))
+#define GSI_EE_n_GSI_CH_k_SCRATCH_2_SCRATCH_BMSK 0xffffffff
+#define GSI_EE_n_GSI_CH_k_SCRATCH_2_SCRATCH_SHFT 0x0
+
+#define GSI_EE_n_GSI_CH_k_SCRATCH_3_OFFS(k, n) \
+	(GSI_GSI_REG_BASE_OFFS + 0x0001c06c + 0x4000 * (n) + 0x80 * (k))
+#define GSI_EE_n_GSI_CH_k_SCRATCH_3_SCRATCH_BMSK 0xffffffff
+#define GSI_EE_n_GSI_CH_k_SCRATCH_3_SCRATCH_SHFT 0x0
+
+#define GSI_EE_n_EV_CH_k_CNTXT_0_OFFS(k, n) \
+	(GSI_GSI_REG_BASE_OFFS + 0x0001d000 + 0x4000 * (n) + 0x80 * (k))
+#define GSI_EE_n_EV_CH_k_CNTXT_0_ELEMENT_SIZE_BMSK 0xff000000
+#define GSI_EE_n_EV_CH_k_CNTXT_0_ELEMENT_SIZE_SHFT 0x18
+#define GSI_EE_n_EV_CH_k_CNTXT_0_CHSTATE_BMSK 0xf00000
+#define GSI_EE_n_EV_CH_k_CNTXT_0_CHSTATE_SHFT 0x14
+#define GSI_EE_n_EV_CH_k_CNTXT_0_INTYPE_BMSK 0x10000
+#define GSI_EE_n_EV_CH_k_CNTXT_0_INTYPE_SHFT 0x10
+#define GSI_EE_n_EV_CH_k_CNTXT_0_EVCHID_BMSK 0xff00
+#define GSI_EE_n_EV_CH_k_CNTXT_0_EVCHID_SHFT 0x8
+#define GSI_EE_n_EV_CH_k_CNTXT_0_EE_BMSK 0xf0
+#define GSI_EE_n_EV_CH_k_CNTXT_0_EE_SHFT 0x4
+#define GSI_EE_n_EV_CH_k_CNTXT_0_CHTYPE_BMSK 0xf
+#define GSI_EE_n_EV_CH_k_CNTXT_0_CHTYPE_SHFT 0x0
+
+#define GSI_EE_n_EV_CH_k_CNTXT_1_OFFS(k, n) \
+	(GSI_GSI_REG_BASE_OFFS + 0x0001d004 + 0x4000 * (n) + 0x80 * (k))
+#define GSI_EE_n_EV_CH_k_CNTXT_1_R_LENGTH_BMSK 0xffff
+#define GSI_EE_n_EV_CH_k_CNTXT_1_R_LENGTH_SHFT 0x0
+
+#define GSI_EE_n_EV_CH_k_CNTXT_2_OFFS(k, n) \
+	(GSI_GSI_REG_BASE_OFFS + 0x0001d008 + 0x4000 * (n) + 0x80 * (k))
+#define GSI_EE_n_EV_CH_k_CNTXT_2_R_BASE_ADDR_LSBS_BMSK 0xffffffff
+#define GSI_EE_n_EV_CH_k_CNTXT_2_R_BASE_ADDR_LSBS_SHFT 0x0
+
+#define GSI_EE_n_EV_CH_k_CNTXT_3_OFFS(k, n) \
+	(GSI_GSI_REG_BASE_OFFS + 0x0001d00c + 0x4000 * (n) + 0x80 * (k))
+#define GSI_EE_n_EV_CH_k_CNTXT_3_R_BASE_ADDR_MSBS_BMSK 0xffffffff
+#define GSI_EE_n_EV_CH_k_CNTXT_3_R_BASE_ADDR_MSBS_SHFT 0x0
+
+#define GSI_EE_n_EV_CH_k_CNTXT_4_OFFS(k, n) \
+	(GSI_GSI_REG_BASE_OFFS + 0x0001d010 + 0x4000 * (n) + 0x80 * (k))
+#define GSI_EE_n_EV_CH_k_CNTXT_4_READ_PTR_LSB_BMSK 0xffffffff
+#define GSI_EE_n_EV_CH_k_CNTXT_4_READ_PTR_LSB_SHFT 0x0
+
+#define GSI_EE_n_EV_CH_k_CNTXT_5_OFFS(k, n) \
+	(GSI_GSI_REG_BASE_OFFS + 0x0001d014 + 0x4000 * (n) + 0x80 * (k))
+#define GSI_EE_n_EV_CH_k_CNTXT_5_READ_PTR_MSB_BMSK 0xffffffff
+#define GSI_EE_n_EV_CH_k_CNTXT_5_READ_PTR_MSB_SHFT 0x0
+
+#define GSI_EE_n_EV_CH_k_CNTXT_6_OFFS(k, n) \
+	(GSI_GSI_REG_BASE_OFFS + 0x0001d018 + 0x4000 * (n) + 0x80 * (k))
+#define GSI_EE_n_EV_CH_k_CNTXT_6_WRITE_PTR_LSB_BMSK 0xffffffff
+#define GSI_EE_n_EV_CH_k_CNTXT_6_WRITE_PTR_LSB_SHFT 0x0
+
+#define GSI_EE_n_EV_CH_k_CNTXT_7_OFFS(k, n) \
+	(GSI_GSI_REG_BASE_OFFS + 0x0001d01c + 0x4000 * (n) + 0x80 * (k))
+#define GSI_EE_n_EV_CH_k_CNTXT_7_WRITE_PTR_MSB_BMSK 0xffffffff
+#define GSI_EE_n_EV_CH_k_CNTXT_7_WRITE_PTR_MSB_SHFT 0x0
+
+#define GSI_EE_n_EV_CH_k_CNTXT_8_OFFS(k, n) \
+	(GSI_GSI_REG_BASE_OFFS + 0x0001d020 + 0x4000 * (n) + 0x80 * (k))
+#define GSI_EE_n_EV_CH_k_CNTXT_8_INT_MOD_CNT_BMSK 0xff000000
+#define GSI_EE_n_EV_CH_k_CNTXT_8_INT_MOD_CNT_SHFT 0x18
+#define GSI_EE_n_EV_CH_k_CNTXT_8_INT_MODC_BMSK 0xff0000
+#define GSI_EE_n_EV_CH_k_CNTXT_8_INT_MODC_SHFT 0x10
+#define GSI_EE_n_EV_CH_k_CNTXT_8_INT_MODT_BMSK 0xffff
+#define GSI_EE_n_EV_CH_k_CNTXT_8_INT_MODT_SHFT 0x0
+
+#define GSI_EE_n_EV_CH_k_CNTXT_9_OFFS(k, n) \
+	(GSI_GSI_REG_BASE_OFFS + 0x0001d024 + 0x4000 * (n) + 0x80 * (k))
+#define GSI_EE_n_EV_CH_k_CNTXT_9_INTVEC_BMSK 0xffffffff
+#define GSI_EE_n_EV_CH_k_CNTXT_9_INTVEC_SHFT 0x0
+
+#define GSI_EE_n_EV_CH_k_CNTXT_10_OFFS(k, n) \
+	(GSI_GSI_REG_BASE_OFFS + 0x0001d028 + 0x4000 * (n) + 0x80 * (k))
+#define GSI_EE_n_EV_CH_k_CNTXT_10_MSI_ADDR_LSB_BMSK 0xffffffff
+#define GSI_EE_n_EV_CH_k_CNTXT_10_MSI_ADDR_LSB_SHFT 0x0
+
+#define GSI_EE_n_EV_CH_k_CNTXT_11_OFFS(k, n) \
+	(GSI_GSI_REG_BASE_OFFS + 0x0001d02c + 0x4000 * (n) + 0x80 * (k))
+#define GSI_EE_n_EV_CH_k_CNTXT_11_MSI_ADDR_MSB_BMSK 0xffffffff
+#define GSI_EE_n_EV_CH_k_CNTXT_11_MSI_ADDR_MSB_SHFT 0x0
+
+#define GSI_EE_n_EV_CH_k_CNTXT_12_OFFS(k, n) \
+	(GSI_GSI_REG_BASE_OFFS + 0x0001d030 + 0x4000 * (n) + 0x80 * (k))
+#define GSI_EE_n_EV_CH_k_CNTXT_12_RP_UPDATE_ADDR_LSB_BMSK 0xffffffff
+#define GSI_EE_n_EV_CH_k_CNTXT_12_RP_UPDATE_ADDR_LSB_SHFT 0x0
+
+#define GSI_EE_n_EV_CH_k_CNTXT_13_OFFS(k, n) \
+	(GSI_GSI_REG_BASE_OFFS + 0x0001d034 + 0x4000 * (n) + 0x80 * (k))
+#define GSI_EE_n_EV_CH_k_CNTXT_13_RP_UPDATE_ADDR_MSB_BMSK 0xffffffff
+#define GSI_EE_n_EV_CH_k_CNTXT_13_RP_UPDATE_ADDR_MSB_SHFT 0x0
+
+#define GSI_EE_n_EV_CH_k_SCRATCH_0_OFFS(k, n) \
+	(GSI_GSI_REG_BASE_OFFS + 0x0001d048 + 0x4000 * (n) + 0x80 * (k))
+#define GSI_EE_n_EV_CH_k_SCRATCH_0_SCRATCH_BMSK 0xffffffff
+#define GSI_EE_n_EV_CH_k_SCRATCH_0_SCRATCH_SHFT 0x0
+
+#define GSI_EE_n_EV_CH_k_SCRATCH_1_OFFS(k, n) \
+	(GSI_GSI_REG_BASE_OFFS + 0x0001d04c + 0x4000 * (n) + 0x80 * (k))
+#define GSI_EE_n_EV_CH_k_SCRATCH_1_SCRATCH_BMSK 0xffffffff
+#define GSI_EE_n_EV_CH_k_SCRATCH_1_SCRATCH_SHFT 0x0
+
+#define GSI_EE_n_GSI_CH_k_DOORBELL_0_OFFS(k, n) \
+	(GSI_GSI_REG_BASE_OFFS + 0x0001e000 + 0x4000 * (n) + 0x8 * (k))
+#define GSI_EE_n_GSI_CH_k_DOORBELL_0_WRITE_PTR_LSB_BMSK 0xffffffff
+#define GSI_EE_n_GSI_CH_k_DOORBELL_0_WRITE_PTR_LSB_SHFT 0x0
+
+#define GSI_EE_n_GSI_CH_k_DOORBELL_1_OFFS(k, n) \
+	(GSI_GSI_REG_BASE_OFFS + 0x0001e004 + 0x4000 * (n) + 0x8 * (k))
+#define GSI_EE_n_GSI_CH_k_DOORBELL_1_WRITE_PTR_MSB_BMSK 0xffffffff
+#define GSI_EE_n_GSI_CH_k_DOORBELL_1_WRITE_PTR_MSB_SHFT 0x0
+
+#define GSI_EE_n_EV_CH_k_DOORBELL_0_OFFS(k, n) \
+	(GSI_GSI_REG_BASE_OFFS + 0x0001e100 + 0x4000 * (n) + 0x8 * (k))
+#define GSI_EE_n_EV_CH_k_DOORBELL_0_WRITE_PTR_LSB_BMSK 0xffffffff
+#define GSI_EE_n_EV_CH_k_DOORBELL_0_WRITE_PTR_LSB_SHFT 0x0
+
+#define GSI_EE_n_EV_CH_k_DOORBELL_1_OFFS(k, n) \
+	(GSI_GSI_REG_BASE_OFFS + 0x0001e104 + 0x4000 * (n) + 0x8 * (k))
+#define GSI_EE_n_EV_CH_k_DOORBELL_1_WRITE_PTR_MSB_BMSK 0xffffffff
+#define GSI_EE_n_EV_CH_k_DOORBELL_1_WRITE_PTR_MSB_SHFT 0x0
+
+#define GSI_EE_n_GSI_STATUS_OFFS(n) \
+	(GSI_GSI_REG_BASE_OFFS + 0x0001f000 + 0x4000 * (n))
+#define GSI_EE_n_GSI_STATUS_ENABLED_BMSK 0x1
+#define GSI_EE_n_GSI_STATUS_ENABLED_SHFT 0x0
+
+#define GSI_EE_n_GSI_CH_CMD_OFFS(n) \
+	(GSI_GSI_REG_BASE_OFFS + 0x0001f008 + 0x4000 * (n))
+#define GSI_EE_n_GSI_CH_CMD_OPCODE_BMSK 0xff000000
+#define GSI_EE_n_GSI_CH_CMD_OPCODE_SHFT 0x18
+#define GSI_EE_n_GSI_CH_CMD_CHID_BMSK 0xff
+#define GSI_EE_n_GSI_CH_CMD_CHID_SHFT 0x0
+
+#define GSI_EE_n_EV_CH_CMD_OFFS(n) \
+	(GSI_GSI_REG_BASE_OFFS + 0x0001f010 + 0x4000 * (n))
+#define GSI_EE_n_EV_CH_CMD_OPCODE_BMSK 0xff000000
+#define GSI_EE_n_EV_CH_CMD_OPCODE_SHFT 0x18
+#define GSI_EE_n_EV_CH_CMD_CHID_BMSK 0xff
+#define GSI_EE_n_EV_CH_CMD_CHID_SHFT 0x0
+
+#define GSI_EE_n_GSI_EE_GENERIC_CMD_OFFS(n) \
+	(GSI_GSI_REG_BASE_OFFS + 0x0001f018 + 0x4000 * (n))
+#define GSI_EE_n_GSI_EE_GENERIC_CMD_OPCODE_BMSK 0x1f
+#define GSI_EE_n_GSI_EE_GENERIC_CMD_OPCODE_SHFT 0x0
+#define GSI_EE_n_GSI_EE_GENERIC_CMD_VIRT_CHAN_IDX_BMSK 0x3e0
+#define GSI_EE_n_GSI_EE_GENERIC_CMD_VIRT_CHAN_IDX_SHFT 0x5
+#define GSI_EE_n_GSI_EE_GENERIC_CMD_EE_BMSK 0x3c00
+#define GSI_EE_n_GSI_EE_GENERIC_CMD_EE_SHFT 0xa
+
+#define GSI_V1_0_EE_n_GSI_HW_PARAM_OFFS(n) \
+	(GSI_GSI_REG_BASE_OFFS + 0x0001f040 + 0x4000 * (n))
+#define GSI_V1_0_EE_n_GSI_HW_PARAM_PERIPH_SEC_GRP_BMSK 0x7c000000
+#define GSI_V1_0_EE_n_GSI_HW_PARAM_PERIPH_SEC_GRP_SHFT 0x1a
+#define GSI_V1_0_EE_n_GSI_HW_PARAM_USE_AXI_M_BMSK 0x2000000
+#define GSI_V1_0_EE_n_GSI_HW_PARAM_USE_AXI_M_SHFT 0x19
+#define GSI_V1_0_EE_n_GSI_HW_PARAM_PERIPH_CONF_ADDR_BUS_W_BMSK 0x1f00000
+#define GSI_V1_0_EE_n_GSI_HW_PARAM_PERIPH_CONF_ADDR_BUS_W_SHFT 0x14
+#define GSI_V1_0_EE_n_GSI_HW_PARAM_NUM_EES_BMSK 0xf0000
+#define GSI_V1_0_EE_n_GSI_HW_PARAM_NUM_EES_SHFT 0x10
+#define GSI_V1_0_EE_n_GSI_HW_PARAM_GSI_CH_NUM_BMSK 0xff00
+#define GSI_V1_0_EE_n_GSI_HW_PARAM_GSI_CH_NUM_SHFT 0x8
+#define GSI_V1_0_EE_n_GSI_HW_PARAM_GSI_EV_CH_NUM_BMSK 0xff
+#define GSI_V1_0_EE_n_GSI_HW_PARAM_GSI_EV_CH_NUM_SHFT 0x0
+
+#define GSI_V1_2_EE_n_GSI_HW_PARAM_0_OFFS(n) \
+	(GSI_GSI_REG_BASE_OFFS + 0x0001f038 + 0x4000 * (n))
+#define GSI_V1_2_EE_n_GSI_HW_PARAM_0_USE_AXI_M_BMSK 0x80000000
+#define GSI_V1_2_EE_n_GSI_HW_PARAM_0_USE_AXI_M_SHFT 0x1f
+#define GSI_V1_2_EE_n_GSI_HW_PARAM_0_PERIPH_SEC_GRP_BMSK 0x7c000000
+#define GSI_V1_2_EE_n_GSI_HW_PARAM_0_PERIPH_SEC_GRP_SHFT 0x1a
+#define GSI_V1_2_EE_n_GSI_HW_PARAM_0_PERIPH_CONF_ADDR_BUS_W_BMSK 0x3e00000
+#define GSI_V1_2_EE_n_GSI_HW_PARAM_0_PERIPH_CONF_ADDR_BUS_W_SHFT 0x15
+#define GSI_V1_2_EE_n_GSI_HW_PARAM_0_NUM_EES_BMSK 0x1f0000
+#define GSI_V1_2_EE_n_GSI_HW_PARAM_0_NUM_EES_SHFT 0x10
+#define GSI_V1_2_EE_n_GSI_HW_PARAM_0_GSI_CH_NUM_BMSK 0xff00
+#define GSI_V1_2_EE_n_GSI_HW_PARAM_0_GSI_CH_NUM_SHFT 0x8
+#define GSI_V1_2_EE_n_GSI_HW_PARAM_0_GSI_EV_CH_NUM_BMSK 0xff
+#define GSI_V1_2_EE_n_GSI_HW_PARAM_0_GSI_EV_CH_NUM_SHFT 0x0
+
+#define GSI_V1_3_EE_n_GSI_HW_PARAM_2_OFFS(n) \
+	(GSI_GSI_REG_BASE_OFFS + 0x0001f040 + 0x4000 * (n))
+#define GSI_V1_3_EE_n_GSI_HW_PARAM_2_RMSK 0x7fff
+#define GSI_V1_3_EE_n_GSI_HW_PARAM_2_MAXn 2
+#define GSI_V1_3_EE_n_GSI_HW_PARAM_2_GSI_CH_FULL_LOGIC_BMSK 0x4000
+#define GSI_V1_3_EE_n_GSI_HW_PARAM_2_GSI_CH_FULL_LOGIC_SHFT 0xe
+#define GSI_V1_3_EE_n_GSI_HW_PARAM_2_GSI_CH_PEND_TRANSLATE_BMSK 0x2000
+#define GSI_V1_3_EE_n_GSI_HW_PARAM_2_GSI_CH_PEND_TRANSLATE_SHFT 0xd
+#define GSI_V1_3_EE_n_GSI_HW_PARAM_2_GSI_NUM_EV_PER_EE_BMSK 0x1f00
+#define GSI_V1_3_EE_n_GSI_HW_PARAM_2_GSI_NUM_EV_PER_EE_SHFT 0x8
+#define GSI_V1_3_EE_n_GSI_HW_PARAM_2_GSI_NUM_CH_PER_EE_BMSK 0xf8
+#define GSI_V1_3_EE_n_GSI_HW_PARAM_2_GSI_NUM_CH_PER_EE_SHFT 0x3
+#define GSI_V1_3_EE_n_GSI_HW_PARAM_2_GSI_IRAM_SIZE_BMSK 0x7
+#define GSI_V1_3_EE_n_GSI_HW_PARAM_2_GSI_IRAM_SIZE_SHFT 0x0
+#define GSI_V1_3_EE_n_GSI_HW_PARAM_2_GSI_IRAM_SIZE_ONE_KB_FVAL 0x0
+#define GSI_V1_3_EE_n_GSI_HW_PARAM_2_GSI_IRAM_SIZE_TWO_KB_FVAL 0x1
+
+#define GSI_V2_0_EE_n_GSI_HW_PARAM_2_OFFS(n) \
+	(GSI_GSI_REG_BASE_OFFS + 0x0001f040 + 0x4000 * (n))
+#define GSI_V2_0_EE_n_GSI_HW_PARAM_2_RMSK 0x7fff
+#define GSI_V2_0_EE_n_GSI_HW_PARAM_2_MAXn 2
+#define GSI_V2_0_EE_n_GSI_HW_PARAM_2_GSI_SDMA_N_IOVEC_BMSK 0x38000000
+#define GSI_V2_0_EE_n_GSI_HW_PARAM_2_GSI_SDMA_N_IOVEC_SHFT 0x1b
+#define GSI_V2_0_EE_n_GSI_HW_PARAM_2_GSI_SDMA_MAX_BURST_BMSK 0x7F80000
+#define GSI_V2_0_EE_n_GSI_HW_PARAM_2_GSI_SDMA_MAX_BURST_SHFT 0x13
+#define GSI_V2_0_EE_n_GSI_HW_PARAM_2_GSI_SDMA_N_INT_BMSK 0x70000
+#define GSI_V2_0_EE_n_GSI_HW_PARAM_2_GSI_SDMA_N_INT_SHFT 0x10
+#define GSI_V2_0_EE_n_GSI_HW_PARAM_2_GSI_USE_SDMA_BMSK 0x8000
+#define GSI_V2_0_EE_n_GSI_HW_PARAM_2_GSI_USE_SDMA_SHFT 0xf
+#define GSI_V2_0_EE_n_GSI_HW_PARAM_2_GSI_CH_FULL_LOGIC_BMSK 0x4000
+#define GSI_V2_0_EE_n_GSI_HW_PARAM_2_GSI_CH_FULL_LOGIC_SHFT 0xe
+#define GSI_V2_0_EE_n_GSI_HW_PARAM_2_GSI_CH_PEND_TRANSLATE_BMSK 0x2000
+#define GSI_V2_0_EE_n_GSI_HW_PARAM_2_GSI_CH_PEND_TRANSLATE_SHFT 0xd
+#define GSI_V2_0_EE_n_GSI_HW_PARAM_2_GSI_NUM_EV_PER_EE_BMSK 0x1f00
+#define GSI_V2_0_EE_n_GSI_HW_PARAM_2_GSI_NUM_EV_PER_EE_SHFT 0x8
+#define GSI_V2_0_EE_n_GSI_HW_PARAM_2_GSI_NUM_CH_PER_EE_BMSK 0xf8
+#define GSI_V2_0_EE_n_GSI_HW_PARAM_2_GSI_NUM_CH_PER_EE_SHFT 0x3
+#define GSI_V2_0_EE_n_GSI_HW_PARAM_2_GSI_IRAM_SIZE_BMSK 0x7
+#define GSI_V2_0_EE_n_GSI_HW_PARAM_2_GSI_IRAM_SIZE_SHFT 0x0
+#define GSI_V2_0_EE_n_GSI_HW_PARAM_2_GSI_IRAM_SIZE_ONE_KB_FVAL 0x0
+#define GSI_V2_0_EE_n_GSI_HW_PARAM_2_GSI_IRAM_SIZE_TWO_KB_FVAL 0x1
+#define GSI_V2_0_EE_n_GSI_HW_PARAM_2_GSI_IRAM_SIZE_TWO_N_HALF_KB_FVAL 0x2
+#define GSI_V2_0_EE_n_GSI_HW_PARAM_2_GSI_IRAM_SIZE_THREE_KB_FVAL 0x3
+
+#define GSI_V2_2_EE_n_GSI_HW_PARAM_2_OFFS(n) \
+		(GSI_GSI_REG_BASE_OFFS + 0x0001f040 + 0x4000 * (n))
+#define GSI_V2_2_EE_n_GSI_HW_PARAM_2_GSI_USE_INTER_EE_BMSK 0x8000
+#define GSI_V2_2_EE_n_GSI_HW_PARAM_2_GSI_USE_INTER_EE_SHFT 0xf
+#define GSI_V2_2_EE_n_GSI_HW_PARAM_2_GSI_USE_RD_WR_ENG_BMSK 0x4000
+#define GSI_V2_2_EE_n_GSI_HW_PARAM_2_GSI_USE_RD_WR_ENG_SHFT 0xE
+#define GSI_V2_2_EE_n_GSI_HW_PARAM_2_RMSK 0x7fff
+#define GSI_V2_2_EE_n_GSI_HW_PARAM_2_MAXn 2
+#define GSI_V2_2_EE_n_GSI_HW_PARAM_2_GSI_SDMA_N_IOVEC_BMSK 0x38000000
+#define GSI_V2_2_EE_n_GSI_HW_PARAM_2_GSI_SDMA_N_IOVEC_SHFT 0x1b
+#define GSI_V2_2_EE_n_GSI_HW_PARAM_2_GSI_SDMA_MAX_BURST_BMSK 0x7F80000
+#define GSI_V2_2_EE_n_GSI_HW_PARAM_2_GSI_SDMA_MAX_BURST_SHFT 0x13
+#define GSI_V2_2_EE_n_GSI_HW_PARAM_2_GSI_SDMA_N_INT_BMSK 0x70000
+#define GSI_V2_2_EE_n_GSI_HW_PARAM_2_GSI_SDMA_N_INT_SHFT 0x10
+#define GSI_V2_2_EE_n_GSI_HW_PARAM_2_GSI_USE_SDMA_BMSK 0x8000
+#define GSI_V2_2_EE_n_GSI_HW_PARAM_2_GSI_USE_SDMA_SHFT 0xf
+#define GSI_V2_2_EE_n_GSI_HW_PARAM_2_GSI_CH_FULL_LOGIC_BMSK 0x4000
+#define GSI_V2_2_EE_n_GSI_HW_PARAM_2_GSI_CH_FULL_LOGIC_SHFT 0xe
+#define GSI_V2_2_EE_n_GSI_HW_PARAM_2_GSI_CH_PEND_TRANSLATE_BMSK 0x2000
+#define GSI_V2_2_EE_n_GSI_HW_PARAM_2_GSI_CH_PEND_TRANSLATE_SHFT 0xd
+#define GSI_V2_2_EE_n_GSI_HW_PARAM_2_GSI_NUM_EV_PER_EE_BMSK 0x1f00
+#define GSI_V2_2_EE_n_GSI_HW_PARAM_2_GSI_NUM_EV_PER_EE_SHFT 0x8
+#define GSI_V2_2_EE_n_GSI_HW_PARAM_2_GSI_NUM_CH_PER_EE_BMSK 0xf8
+#define GSI_V2_2_EE_n_GSI_HW_PARAM_2_GSI_NUM_CH_PER_EE_SHFT 0x3
+#define GSI_V2_2_EE_n_GSI_HW_PARAM_2_GSI_IRAM_SIZE_BMSK 0x7
+#define GSI_V2_2_EE_n_GSI_HW_PARAM_2_GSI_IRAM_SIZE_SHFT 0x0
+#define GSI_V2_2_EE_n_GSI_HW_PARAM_2_GSI_IRAM_SIZE_ONE_KB_FVAL 0x0
+#define GSI_V2_2_EE_n_GSI_HW_PARAM_2_GSI_IRAM_SIZE_TWO_KB_FVAL 0x1
+#define GSI_V2_2_EE_n_GSI_HW_PARAM_2_GSI_IRAM_SIZE_TWO_N_HALF_KB_FVAL 0x2
+#define GSI_V2_2_EE_n_GSI_HW_PARAM_2_GSI_IRAM_SIZE_THREE_KB_FVAL 0x3
+
+#define GSI_V2_5_EE_n_GSI_HW_PARAM_2_OFFS(n) \
+		(GSI_GSI_REG_BASE_OFFS + 0x00012040 + 0x4000 * (n))
+#define GSI_V2_5_EE_n_GSI_HW_PARAM_2_GSI_USE_INTER_EE_BMSK 0x8000
+#define GSI_V2_5_EE_n_GSI_HW_PARAM_2_GSI_USE_INTER_EE_SHFT 0xf
+#define GSI_V2_5_EE_n_GSI_HW_PARAM_2_GSI_USE_RD_WR_ENG_BMSK 0x4000
+#define GSI_V2_5_EE_n_GSI_HW_PARAM_2_GSI_USE_RD_WR_ENG_SHFT 0xE
+#define GSI_V2_5_EE_n_GSI_HW_PARAM_2_RMSK 0x7fff
+#define GSI_V2_5_EE_n_GSI_HW_PARAM_2_MAXn 2
+#define GSI_V2_5_EE_n_GSI_HW_PARAM_2_GSI_SDMA_N_IOVEC_BMSK 0x38000000
+#define GSI_V2_5_EE_n_GSI_HW_PARAM_2_GSI_SDMA_N_IOVEC_SHFT 0x1b
+#define GSI_V2_5_EE_n_GSI_HW_PARAM_2_GSI_SDMA_MAX_BURST_BMSK 0x7F80000
+#define GSI_V2_5_EE_n_GSI_HW_PARAM_2_GSI_SDMA_MAX_BURST_SHFT 0x13
+#define GSI_V2_5_EE_n_GSI_HW_PARAM_2_GSI_SDMA_N_INT_BMSK 0x70000
+#define GSI_V2_5_EE_n_GSI_HW_PARAM_2_GSI_SDMA_N_INT_SHFT 0x10
+#define GSI_V2_5_EE_n_GSI_HW_PARAM_2_GSI_USE_SDMA_BMSK 0x8000
+#define GSI_V2_5_EE_n_GSI_HW_PARAM_2_GSI_USE_SDMA_SHFT 0xf
+#define GSI_V2_5_EE_n_GSI_HW_PARAM_2_GSI_CH_FULL_LOGIC_BMSK 0x4000
+#define GSI_V2_5_EE_n_GSI_HW_PARAM_2_GSI_CH_FULL_LOGIC_SHFT 0xe
+#define GSI_V2_5_EE_n_GSI_HW_PARAM_2_GSI_CH_PEND_TRANSLATE_BMSK 0x2000
+#define GSI_V2_5_EE_n_GSI_HW_PARAM_2_GSI_CH_PEND_TRANSLATE_SHFT 0xd
+#define GSI_V2_5_EE_n_GSI_HW_PARAM_2_GSI_NUM_EV_PER_EE_BMSK 0x1f00
+#define GSI_V2_5_EE_n_GSI_HW_PARAM_2_GSI_NUM_EV_PER_EE_SHFT 0x8
+#define GSI_V2_5_EE_n_GSI_HW_PARAM_2_GSI_NUM_CH_PER_EE_BMSK 0xf8
+#define GSI_V2_5_EE_n_GSI_HW_PARAM_2_GSI_NUM_CH_PER_EE_SHFT 0x3
+#define GSI_V2_5_EE_n_GSI_HW_PARAM_2_GSI_IRAM_SIZE_BMSK 0x7
+#define GSI_V2_5_EE_n_GSI_HW_PARAM_2_GSI_IRAM_SIZE_SHFT 0x0
+#define GSI_V2_5_EE_n_GSI_HW_PARAM_2_GSI_IRAM_SIZE_ONE_KB_FVAL 0x0
+#define GSI_V2_5_EE_n_GSI_HW_PARAM_2_GSI_IRAM_SIZE_TWO_KB_FVAL 0x1
+#define GSI_V2_5_EE_n_GSI_HW_PARAM_2_GSI_IRAM_SIZE_TWO_N_HALF_KB_FVAL 0x2
+#define GSI_V2_5_EE_n_GSI_HW_PARAM_2_GSI_IRAM_SIZE_THREE_KB_FVAL 0x3
+#define GSI_V2_5_EE_n_GSI_HW_PARAM_2_GSI_IRAM_SIZE_THREE_N_HALF_KB_FVAL 0x4
+#define GSI_V2_5_EE_n_GSI_HW_PARAM_2_GSI_IRAM_SIZE_FOUR_KB_FVAL 0x5
+
+#define GSI_EE_n_GSI_SW_VERSION_OFFS(n) \
+	(GSI_GSI_REG_BASE_OFFS + 0x0001f044 + 0x4000 * (n))
+#define GSI_EE_n_GSI_SW_VERSION_MAJOR_BMSK 0xf0000000
+#define GSI_EE_n_GSI_SW_VERSION_MAJOR_SHFT 0x1c
+#define GSI_EE_n_GSI_SW_VERSION_MINOR_BMSK 0xfff0000
+#define GSI_EE_n_GSI_SW_VERSION_MINOR_SHFT 0x10
+#define GSI_EE_n_GSI_SW_VERSION_STEP_BMSK 0xffff
+#define GSI_EE_n_GSI_SW_VERSION_STEP_SHFT 0x0
+
+#define GSI_EE_n_CNTXT_TYPE_IRQ_OFFS(n) \
+	(GSI_GSI_REG_BASE_OFFS + 0x0001f080 + 0x4000 * (n))
+#define GSI_EE_n_CNTXT_TYPE_IRQ_GENERAL_BMSK 0x40
+#define GSI_EE_n_CNTXT_TYPE_IRQ_GENERAL_SHFT 0x6
+#define GSI_EE_n_CNTXT_TYPE_IRQ_INTER_EE_EV_CTRL_BMSK 0x20
+#define GSI_EE_n_CNTXT_TYPE_IRQ_INTER_EE_EV_CTRL_SHFT 0x5
+#define GSI_EE_n_CNTXT_TYPE_IRQ_INTER_EE_CH_CTRL_BMSK 0x10
+#define GSI_EE_n_CNTXT_TYPE_IRQ_INTER_EE_CH_CTRL_SHFT 0x4
+#define GSI_EE_n_CNTXT_TYPE_IRQ_IEOB_BMSK 0x8
+#define GSI_EE_n_CNTXT_TYPE_IRQ_IEOB_SHFT 0x3
+#define GSI_EE_n_CNTXT_TYPE_IRQ_GLOB_EE_BMSK 0x4
+#define GSI_EE_n_CNTXT_TYPE_IRQ_GLOB_EE_SHFT 0x2
+#define GSI_EE_n_CNTXT_TYPE_IRQ_EV_CTRL_BMSK 0x2
+#define GSI_EE_n_CNTXT_TYPE_IRQ_EV_CTRL_SHFT 0x1
+#define GSI_EE_n_CNTXT_TYPE_IRQ_CH_CTRL_BMSK 0x1
+#define GSI_EE_n_CNTXT_TYPE_IRQ_CH_CTRL_SHFT 0x0
+
+#define GSI_EE_n_CNTXT_TYPE_IRQ_MSK_OFFS(n) \
+	(GSI_GSI_REG_BASE_OFFS + 0x0001f088 + 0x4000 * (n))
+#define GSI_EE_n_CNTXT_TYPE_IRQ_MSK_GENERAL_BMSK 0x40
+#define GSI_EE_n_CNTXT_TYPE_IRQ_MSK_GENERAL_SHFT 0x6
+#define GSI_EE_n_CNTXT_TYPE_IRQ_MSK_INTER_EE_EV_CTRL_BMSK 0x20
+#define GSI_EE_n_CNTXT_TYPE_IRQ_MSK_INTER_EE_EV_CTRL_SHFT 0x5
+#define GSI_EE_n_CNTXT_TYPE_IRQ_MSK_INTER_EE_CH_CTRL_BMSK 0x10
+#define GSI_EE_n_CNTXT_TYPE_IRQ_MSK_INTER_EE_CH_CTRL_SHFT 0x4
+#define GSI_EE_n_CNTXT_TYPE_IRQ_MSK_IEOB_BMSK 0x8
+#define GSI_EE_n_CNTXT_TYPE_IRQ_MSK_IEOB_SHFT 0x3
+#define GSI_EE_n_CNTXT_TYPE_IRQ_MSK_GLOB_EE_BMSK 0x4
+#define GSI_EE_n_CNTXT_TYPE_IRQ_MSK_GLOB_EE_SHFT 0x2
+#define GSI_EE_n_CNTXT_TYPE_IRQ_MSK_EV_CTRL_BMSK 0x2
+#define GSI_EE_n_CNTXT_TYPE_IRQ_MSK_EV_CTRL_SHFT 0x1
+#define GSI_EE_n_CNTXT_TYPE_IRQ_MSK_CH_CTRL_BMSK 0x1
+#define GSI_EE_n_CNTXT_TYPE_IRQ_MSK_CH_CTRL_SHFT 0x0
+
+#define GSI_EE_n_CNTXT_SRC_GSI_CH_IRQ_OFFS(n) \
+	(GSI_GSI_REG_BASE_OFFS + 0x0001f090 + 0x4000 * (n))
+#define GSI_EE_n_CNTXT_SRC_GSI_CH_IRQ_GSI_CH_BIT_MAP_BMSK 0xffffffff
+#define GSI_EE_n_CNTXT_SRC_GSI_CH_IRQ_GSI_CH_BIT_MAP_SHFT 0x0
+
+#define GSI_EE_n_CNTXT_SRC_EV_CH_IRQ_OFFS(n) \
+	(GSI_GSI_REG_BASE_OFFS + 0x0001f094 + 0x4000 * (n))
+#define GSI_EE_n_CNTXT_SRC_EV_CH_IRQ_EV_CH_BIT_MAP_BMSK 0xffffffff
+#define GSI_EE_n_CNTXT_SRC_EV_CH_IRQ_EV_CH_BIT_MAP_SHFT 0x0
+
+#define GSI_EE_n_CNTXT_SRC_GSI_CH_IRQ_MSK_OFFS(n) \
+	(GSI_GSI_REG_BASE_OFFS + 0x0001f098 + 0x4000 * (n))
+#define GSI_EE_n_CNTXT_SRC_GSI_CH_IRQ_MSK_GSI_CH_BIT_MAP_MSK_BMSK 0x1ffff
+#define GSI_V2_5_EE_n_CNTXT_SRC_GSI_CH_IRQ_MSK_GSI_CH_BIT_MAP_MSK_BMSK 0x7fffff
+#define GSI_EE_n_CNTXT_SRC_GSI_CH_IRQ_MSK_GSI_CH_BIT_MAP_MSK_SHFT 0x0
+#define GSI_V2_5_EE_n_CNTXT_SRC_GSI_CH_IRQ_MSK_GSI_CH_BIT_MAP_MSK_SHFT 0x0
+
+#define GSI_EE_n_CNTXT_SRC_EV_CH_IRQ_MSK_OFFS(n) \
+	(GSI_GSI_REG_BASE_OFFS + 0x0001f09c + 0x4000 * (n))
+#define GSI_EE_n_CNTXT_SRC_EV_CH_IRQ_MSK_EV_CH_BIT_MAP_MSK_BMSK 0xfff
+#define GSI_V2_5_EE_n_CNTXT_SRC_EV_CH_IRQ_MSK_EV_CH_BIT_MAP_MSK_BMSK 0xfffff
+#define GSI_EE_n_CNTXT_SRC_EV_CH_IRQ_MSK_EV_CH_BIT_MAP_MSK_SHFT 0x0
+#define GSI_V2_5_EE_n_CNTXT_SRC_EV_CH_IRQ_MSK_EV_CH_BIT_MAP_MSK_SHFT 0x0
+
+#define GSI_EE_n_CNTXT_SRC_GSI_CH_IRQ_CLR_OFFS(n) \
+	(GSI_GSI_REG_BASE_OFFS + 0x0001f0a0 + 0x4000 * (n))
+#define GSI_EE_n_CNTXT_SRC_GSI_CH_IRQ_CLR_GSI_CH_BIT_MAP_BMSK 0xffffffff
+#define GSI_EE_n_CNTXT_SRC_GSI_CH_IRQ_CLR_GSI_CH_BIT_MAP_SHFT 0x0
+
+#define GSI_EE_n_CNTXT_SRC_EV_CH_IRQ_CLR_OFFS(n) \
+	(GSI_GSI_REG_BASE_OFFS + 0x0001f0a4 + 0x4000 * (n))
+#define GSI_EE_n_CNTXT_SRC_EV_CH_IRQ_CLR_EV_CH_BIT_MAP_BMSK 0xffffffff
+#define GSI_EE_n_CNTXT_SRC_EV_CH_IRQ_CLR_EV_CH_BIT_MAP_SHFT 0x0
+
+#define GSI_EE_n_CNTXT_SRC_IEOB_IRQ_OFFS(n) \
+	(GSI_GSI_REG_BASE_OFFS + 0x0001f0b0 + 0x4000 * (n))
+#define GSI_EE_n_CNTXT_SRC_IEOB_IRQ_EV_CH_BIT_MAP_BMSK 0xffffffff
+#define GSI_EE_n_CNTXT_SRC_IEOB_IRQ_EV_CH_BIT_MAP_SHFT 0x0
+
+#define GSI_EE_n_CNTXT_SRC_IEOB_IRQ_MSK_OFFS(n) \
+	(GSI_GSI_REG_BASE_OFFS + 0x0001f0b8 + 0x4000 * (n))
+#define GSI_EE_n_CNTXT_SRC_IEOB_IRQ_MSK_EV_CH_BIT_MAP_MSK_BMSK 0xfff
+#define GSI_V2_5_EE_n_CNTXT_SRC_IEOB_IRQ_MSK_EV_CH_BIT_MAP_MSK_BMSK 0xfffff
+#define GSI_EE_n_CNTXT_SRC_IEOB_IRQ_MSK_EV_CH_BIT_MAP_MSK_SHFT 0x0
+#define GSI_V2_5_EE_n_CNTXT_SRC_IEOB_IRQ_MSK_EV_CH_BIT_MAP_MSK_SHFT 0x0
+
+#define GSI_EE_n_CNTXT_SRC_IEOB_IRQ_CLR_OFFS(n) \
+	(GSI_GSI_REG_BASE_OFFS + 0x0001f0c0 + 0x4000 * (n))
+#define GSI_EE_n_CNTXT_SRC_IEOB_IRQ_CLR_EV_CH_BIT_MAP_BMSK 0xffffffff
+#define GSI_EE_n_CNTXT_SRC_IEOB_IRQ_CLR_EV_CH_BIT_MAP_SHFT 0x0
+
+#define GSI_EE_n_CNTXT_GLOB_IRQ_STTS_OFFS(n) \
+	(GSI_GSI_REG_BASE_OFFS + 0x0001f100 + 0x4000 * (n))
+#define GSI_EE_n_CNTXT_GLOB_IRQ_STTS_GP_INT3_BMSK 0x8
+#define GSI_EE_n_CNTXT_GLOB_IRQ_STTS_GP_INT3_SHFT 0x3
+#define GSI_EE_n_CNTXT_GLOB_IRQ_STTS_GP_INT2_BMSK 0x4
+#define GSI_EE_n_CNTXT_GLOB_IRQ_STTS_GP_INT2_SHFT 0x2
+#define GSI_EE_n_CNTXT_GLOB_IRQ_STTS_GP_INT1_BMSK 0x2
+#define GSI_EE_n_CNTXT_GLOB_IRQ_STTS_GP_INT1_SHFT 0x1
+#define GSI_EE_n_CNTXT_GLOB_IRQ_STTS_ERROR_INT_BMSK 0x1
+#define GSI_EE_n_CNTXT_GLOB_IRQ_STTS_ERROR_INT_SHFT 0x0
+
+#define GSI_EE_n_CNTXT_GLOB_IRQ_EN_OFFS(n) \
+	(GSI_GSI_REG_BASE_OFFS + 0x0001f108 + 0x4000 * (n))
+#define GSI_EE_n_CNTXT_GLOB_IRQ_EN_GP_INT3_BMSK 0x8
+#define GSI_EE_n_CNTXT_GLOB_IRQ_EN_GP_INT3_SHFT 0x3
+#define GSI_EE_n_CNTXT_GLOB_IRQ_EN_GP_INT2_BMSK 0x4
+#define GSI_EE_n_CNTXT_GLOB_IRQ_EN_GP_INT2_SHFT 0x2
+#define GSI_EE_n_CNTXT_GLOB_IRQ_EN_GP_INT1_BMSK 0x2
+#define GSI_EE_n_CNTXT_GLOB_IRQ_EN_GP_INT1_SHFT 0x1
+#define GSI_EE_n_CNTXT_GLOB_IRQ_EN_ERROR_INT_BMSK 0x1
+#define GSI_EE_n_CNTXT_GLOB_IRQ_EN_ERROR_INT_SHFT 0x0
+
+#define GSI_EE_n_CNTXT_GLOB_IRQ_CLR_OFFS(n) \
+	(GSI_GSI_REG_BASE_OFFS + 0x0001f110 + 0x4000 * (n))
+#define GSI_EE_n_CNTXT_GLOB_IRQ_CLR_GP_INT3_BMSK 0x8
+#define GSI_EE_n_CNTXT_GLOB_IRQ_CLR_GP_INT3_SHFT 0x3
+#define GSI_EE_n_CNTXT_GLOB_IRQ_CLR_GP_INT2_BMSK 0x4
+#define GSI_EE_n_CNTXT_GLOB_IRQ_CLR_GP_INT2_SHFT 0x2
+#define GSI_EE_n_CNTXT_GLOB_IRQ_CLR_GP_INT1_BMSK 0x2
+#define GSI_EE_n_CNTXT_GLOB_IRQ_CLR_GP_INT1_SHFT 0x1
+#define GSI_EE_n_CNTXT_GLOB_IRQ_CLR_ERROR_INT_BMSK 0x1
+#define GSI_EE_n_CNTXT_GLOB_IRQ_CLR_ERROR_INT_SHFT 0x0
+
+#define GSI_EE_n_CNTXT_GSI_IRQ_STTS_OFFS(n) \
+	(GSI_GSI_REG_BASE_OFFS + 0x0001f118 + 0x4000 * (n))
+#define GSI_EE_n_CNTXT_GSI_IRQ_STTS_GSI_MCS_STACK_OVRFLOW_BMSK 0x8
+#define GSI_EE_n_CNTXT_GSI_IRQ_STTS_GSI_MCS_STACK_OVRFLOW_SHFT 0x3
+#define GSI_EE_n_CNTXT_GSI_IRQ_STTS_GSI_CMD_FIFO_OVRFLOW_BMSK 0x4
+#define GSI_EE_n_CNTXT_GSI_IRQ_STTS_GSI_CMD_FIFO_OVRFLOW_SHFT 0x2
+#define GSI_EE_n_CNTXT_GSI_IRQ_STTS_GSI_BUS_ERROR_BMSK 0x2
+#define GSI_EE_n_CNTXT_GSI_IRQ_STTS_GSI_BUS_ERROR_SHFT 0x1
+#define GSI_EE_n_CNTXT_GSI_IRQ_STTS_GSI_BREAK_POINT_BMSK 0x1
+#define GSI_EE_n_CNTXT_GSI_IRQ_STTS_GSI_BREAK_POINT_SHFT 0x0
+
+#define GSI_EE_n_CNTXT_GSI_IRQ_EN_OFFS(n) \
+	(GSI_GSI_REG_BASE_OFFS + 0x0001f120 + 0x4000 * (n))
+#define GSI_EE_n_CNTXT_GSI_IRQ_EN_GSI_MCS_STACK_OVRFLOW_BMSK 0x8
+#define GSI_EE_n_CNTXT_GSI_IRQ_EN_GSI_MCS_STACK_OVRFLOW_SHFT 0x3
+#define GSI_EE_n_CNTXT_GSI_IRQ_EN_GSI_CMD_FIFO_OVRFLOW_BMSK 0x4
+#define GSI_EE_n_CNTXT_GSI_IRQ_EN_GSI_CMD_FIFO_OVRFLOW_SHFT 0x2
+#define GSI_EE_n_CNTXT_GSI_IRQ_EN_GSI_BUS_ERROR_BMSK 0x2
+#define GSI_EE_n_CNTXT_GSI_IRQ_EN_GSI_BUS_ERROR_SHFT 0x1
+#define GSI_EE_n_CNTXT_GSI_IRQ_EN_GSI_BREAK_POINT_BMSK 0x1
+#define GSI_EE_n_CNTXT_GSI_IRQ_EN_GSI_BREAK_POINT_SHFT 0x0
+
+#define GSI_EE_n_CNTXT_GSI_IRQ_CLR_OFFS(n) \
+	(GSI_GSI_REG_BASE_OFFS + 0x0001f128 + 0x4000 * (n))
+#define GSI_EE_n_CNTXT_GSI_IRQ_CLR_GSI_MCS_STACK_OVRFLOW_BMSK 0x8
+#define GSI_EE_n_CNTXT_GSI_IRQ_CLR_GSI_MCS_STACK_OVRFLOW_SHFT 0x3
+#define GSI_EE_n_CNTXT_GSI_IRQ_CLR_GSI_CMD_FIFO_OVRFLOW_BMSK 0x4
+#define GSI_EE_n_CNTXT_GSI_IRQ_CLR_GSI_CMD_FIFO_OVRFLOW_SHFT 0x2
+#define GSI_EE_n_CNTXT_GSI_IRQ_CLR_GSI_BUS_ERROR_BMSK 0x2
+#define GSI_EE_n_CNTXT_GSI_IRQ_CLR_GSI_BUS_ERROR_SHFT 0x1
+#define GSI_EE_n_CNTXT_GSI_IRQ_CLR_GSI_BREAK_POINT_BMSK 0x1
+#define GSI_EE_n_CNTXT_GSI_IRQ_CLR_GSI_BREAK_POINT_SHFT 0x0
+
+#define GSI_EE_n_CNTXT_MSI_BASE_LSB(n) \
+	(GSI_GSI_REG_BASE_OFFS + 0x0001f188 + 0x4000 * (n))
+#define GSI_EE_n_CNTXT_MSI_BASE_MSB(n) \
+	(GSI_GSI_REG_BASE_OFFS + 0x0001f18c + 0x4000 * (n))
+
+#define GSI_EE_n_CNTXT_INTSET_OFFS(n) \
+	(GSI_GSI_REG_BASE_OFFS + 0x0001f180 + 0x4000 * (n))
+#define GSI_EE_n_CNTXT_INTSET_INTYPE_BMSK 0x1
+#define GSI_EE_n_CNTXT_INTSET_INTYPE_SHFT 0x0
+
+#define GSI_EE_n_ERROR_LOG_OFFS(n) \
+	(GSI_GSI_REG_BASE_OFFS + 0x0001f200 + 0x4000 * (n))
+#define GSI_EE_n_ERROR_LOG_TODO_BMSK 0xffffffff
+#define GSI_EE_n_ERROR_LOG_TODO_SHFT 0x0
+
+#define GSI_EE_n_ERROR_LOG_CLR_OFFS(n) \
+	(GSI_GSI_REG_BASE_OFFS + 0x0001f210 + 0x4000 * (n))
+#define GSI_EE_n_ERROR_LOG_CLR_TODO_BMSK 0xffffffff
+#define GSI_EE_n_ERROR_LOG_CLR_TODO_SHFT 0x0
+
+#define GSI_EE_n_CNTXT_SCRATCH_0_OFFS(n) \
+	(GSI_GSI_REG_BASE_OFFS + 0x0001f400 + 0x4000 * (n))
+#define GSI_EE_n_CNTXT_SCRATCH_0_SCRATCH_BMSK 0xffffffff
+#define GSI_EE_n_CNTXT_SCRATCH_0_SCRATCH_SHFT 0x0
+
+#define GSI_INTER_EE_n_SRC_GSI_CH_IRQ_OFFS(n) \
+	(GSI_GSI_REG_BASE_OFFS + 0x0000c018 + 0x1000 * (n))
+#define GSI_INTER_EE_n_SRC_GSI_CH_IRQ_GSI_CH_BIT_MAP_BMSK 0xffffffff
+#define GSI_INTER_EE_n_SRC_GSI_CH_IRQ_GSI_CH_BIT_MAP_SHFT 0x0
+
+#define GSI_INTER_EE_n_SRC_EV_CH_IRQ_OFFS(n) \
+	(GSI_GSI_REG_BASE_OFFS + 0x0000c01c + 0x1000 * (n))
+#define GSI_INTER_EE_n_SRC_EV_CH_IRQ_EV_CH_BIT_MAP_BMSK 0xffffffff
+#define GSI_INTER_EE_n_SRC_EV_CH_IRQ_EV_CH_BIT_MAP_SHFT 0x0
+
+#define GSI_INTER_EE_n_SRC_GSI_CH_IRQ_CLR_OFFS(n) \
+	(GSI_GSI_REG_BASE_OFFS + 0x0000c028 + 0x1000 * (n))
+#define GSI_INTER_EE_n_SRC_GSI_CH_IRQ_CLR_GSI_CH_BIT_MAP_BMSK 0xffffffff
+#define GSI_INTER_EE_n_SRC_GSI_CH_IRQ_CLR_GSI_CH_BIT_MAP_SHFT 0x0
+
+#define GSI_INTER_EE_n_SRC_EV_CH_IRQ_CLR_OFFS(n) \
+	(GSI_GSI_REG_BASE_OFFS + 0x0000c02c + 0x1000 * (n))
+#define GSI_INTER_EE_n_SRC_EV_CH_IRQ_CLR_EV_CH_BIT_MAP_BMSK 0xffffffff
+#define GSI_INTER_EE_n_SRC_EV_CH_IRQ_CLR_EV_CH_BIT_MAP_SHFT 0x0
+
+#define GSI_V2_5_GSI_MAP_EE_n_CH_k_VP_TABLE_OFFS(k, n) \
+	(GSI_GSI_REG_BASE_OFFS + 0x00003800 + 0x80 * (n) + 0x4 * (k))
+#define GSI_V2_5_GSI_MAP_EE_n_CH_k_VP_TABLE_VALID_BMSK 0x20
+#define GSI_V2_5_GSI_MAP_EE_n_CH_k_VP_TABLE_VALID_SHFT 0x5
+#define GSI_V2_5_GSI_MAP_EE_n_CH_k_VP_TABLE_PHY_CH_BMSK 0x1f
+#define GSI_V2_5_GSI_MAP_EE_n_CH_k_VP_TABLE_PHY_CH_SHFT 0x0
+
+#define GSI_V2_7_EE_n_GSI_HW_PARAM_2_OFFS(n) \
+			(GSI_GSI_REG_BASE_OFFS + 0x00012040 + 0x4000 * (n))
+#define GSI_V2_7_EE_n_GSI_HW_PARAM_2_GSI_USE_INTER_EE_BMSK 0x8000
+#define GSI_V2_7_EE_n_GSI_HW_PARAM_2_GSI_USE_INTER_EE_SHFT 0xf
+#define GSI_V2_7_EE_n_GSI_HW_PARAM_2_GSI_USE_RD_WR_ENG_BMSK 0x4000
+#define GSI_V2_7_EE_n_GSI_HW_PARAM_2_GSI_USE_RD_WR_ENG_SHFT 0xE
+#define GSI_V2_7_EE_n_GSI_HW_PARAM_2_RMSK 0x7fff
+#define GSI_V2_7_EE_n_GSI_HW_PARAM_2_MAXn 2
+#define GSI_V2_7_EE_n_GSI_HW_PARAM_2_GSI_SDMA_N_IOVEC_BMSK 0x38000000
+#define GSI_V2_7_EE_n_GSI_HW_PARAM_2_GSI_SDMA_N_IOVEC_SHFT 0x1b
+#define GSI_V2_7_EE_n_GSI_HW_PARAM_2_GSI_SDMA_MAX_BURST_BMSK 0x7F80000
+#define GSI_V2_7_EE_n_GSI_HW_PARAM_2_GSI_SDMA_MAX_BURST_SHFT 0x13
+#define GSI_V2_7_EE_n_GSI_HW_PARAM_2_GSI_SDMA_N_INT_BMSK 0x70000
+#define GSI_V2_7_EE_n_GSI_HW_PARAM_2_GSI_SDMA_N_INT_SHFT 0x10
+#define GSI_V2_7_EE_n_GSI_HW_PARAM_2_GSI_USE_SDMA_BMSK 0x8000
+#define GSI_V2_7_EE_n_GSI_HW_PARAM_2_GSI_USE_SDMA_SHFT 0xf
+#define GSI_V2_7_EE_n_GSI_HW_PARAM_2_GSI_CH_FULL_LOGIC_BMSK 0x4000
+#define GSI_V2_7_EE_n_GSI_HW_PARAM_2_GSI_CH_FULL_LOGIC_SHFT 0xe
+#define GSI_V2_7_EE_n_GSI_HW_PARAM_2_GSI_CH_PEND_TRANSLATE_BMSK 0x2000
+#define GSI_V2_7_EE_n_GSI_HW_PARAM_2_GSI_CH_PEND_TRANSLATE_SHFT 0xd
+#define GSI_V2_7_EE_n_GSI_HW_PARAM_2_GSI_NUM_EV_PER_EE_BMSK 0x1f00
+#define GSI_V2_7_EE_n_GSI_HW_PARAM_2_GSI_NUM_EV_PER_EE_SHFT 0x8
+#define GSI_V2_7_EE_n_GSI_HW_PARAM_2_GSI_NUM_CH_PER_EE_BMSK 0xf8
+#define GSI_V2_7_EE_n_GSI_HW_PARAM_2_GSI_NUM_CH_PER_EE_SHFT 0x3
+#define GSI_V2_7_EE_n_GSI_HW_PARAM_2_GSI_IRAM_SIZE_BMSK 0x7
+#define GSI_V2_7_EE_n_GSI_HW_PARAM_2_GSI_IRAM_SIZE_SHFT 0x0
+#define GSI_V2_7_EE_n_GSI_HW_PARAM_2_GSI_IRAM_SIZE_ONE_KB_FVAL 0x0
+#define GSI_V2_7_EE_n_GSI_HW_PARAM_2_GSI_IRAM_SIZE_TWO_KB_FVAL 0x1
+#define GSI_V2_7_EE_n_GSI_HW_PARAM_2_GSI_IRAM_SIZE_TWO_N_HALF_KB_FVAL 0x2
+#define GSI_V2_7_EE_n_GSI_HW_PARAM_2_GSI_IRAM_SIZE_THREE_KB_FVAL 0x3
+#define GSI_V2_7_EE_n_GSI_HW_PARAM_2_GSI_IRAM_SIZE_THREE_N_HALF_KB_FVAL 0x4
+#define GSI_V2_7_EE_n_GSI_HW_PARAM_2_GSI_IRAM_SIZE_FOUR_KB_FVAL 0x5
+
+#endif /* __GSI_REG_V1_H__ */

+ 1157 - 0
gsi/gsi_reg_v2.h

@@ -0,0 +1,1157 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2015-2019, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef __GSI_REG_V2_H__
+#define __GSI_REG_V2_H__
+
+#define GSI_GSI_REG_BASE_OFFS 0
+
+#define GSI_GSI_CFG_OFFS \
+	(GSI_GSI_REG_BASE_OFFS + 0x00000000)
+#define GSI_V2_5_GSI_CFG_SLEEP_CLK_DIV_BMSK 0xf00
+#define GSI_V2_5_GSI_CFG_SLEEP_CLK_DIV_SHFT 0x8
+#define GSI_GSI_CFG_BP_MTRIX_DISABLE_BMSK 0x20
+#define GSI_GSI_CFG_BP_MTRIX_DISABLE_SHFT 0x5
+#define GSI_GSI_CFG_GSI_PWR_CLPS_BMSK 0x10
+#define GSI_GSI_CFG_GSI_PWR_CLPS_SHFT 0x4
+#define GSI_GSI_CFG_UC_IS_MCS_BMSK 0x8
+#define GSI_GSI_CFG_UC_IS_MCS_SHFT 0x3
+#define GSI_GSI_CFG_DOUBLE_MCS_CLK_FREQ_BMSK 0x4
+#define GSI_GSI_CFG_DOUBLE_MCS_CLK_FREQ_SHFT 0x2
+#define GSI_GSI_CFG_MCS_ENABLE_BMSK 0x2
+#define GSI_GSI_CFG_MCS_ENABLE_SHFT 0x1
+#define GSI_GSI_CFG_GSI_ENABLE_BMSK 0x1
+#define GSI_GSI_CFG_GSI_ENABLE_SHFT 0x0
+
+#define GSI_GSI_MCS_CFG_OFFS \
+	(GSI_GSI_REG_BASE_OFFS + 0x0000B000)
+#define GSI_GSI_MCS_CFG_MCS_ENABLE_BMSK 0x1
+#define GSI_GSI_MCS_CFG_MCS_ENABLE_SHFT 0x0
+
+#define GSI_GSI_PERIPH_BASE_ADDR_LSB_OFFS \
+	(GSI_GSI_REG_BASE_OFFS + 0x00000018)
+#define GSI_GSI_PERIPH_BASE_ADDR_LSB_RMSK 0xffffffff
+#define GSI_GSI_PERIPH_BASE_ADDR_LSB_BASE_ADDR_BMSK 0xffffffff
+#define GSI_GSI_PERIPH_BASE_ADDR_LSB_BASE_ADDR_SHFT 0x0
+
+#define GSI_GSI_PERIPH_BASE_ADDR_MSB_OFFS \
+	(GSI_GSI_REG_BASE_OFFS + 0x0000001c)
+#define GSI_GSI_PERIPH_BASE_ADDR_MSB_RMSK 0xffffffff
+#define GSI_GSI_PERIPH_BASE_ADDR_MSB_BASE_ADDR_BMSK 0xffffffff
+#define GSI_GSI_PERIPH_BASE_ADDR_MSB_BASE_ADDR_SHFT 0x0
+
+#define GSI_IC_DISABLE_CHNL_BCK_PRS_LSB_OFFS \
+	(GSI_GSI_REG_BASE_OFFS + 0x000000a0)
+#define GSI_IC_DISABLE_CHNL_BCK_PRS_LSB_RMSK 0x3ffc1047
+#define GSI_IC_DISABLE_CHNL_BCK_PRS_LSB_TLV_INT_BMSK 0x3f000000
+#define GSI_IC_DISABLE_CHNL_BCK_PRS_LSB_TLV_INT_SHFT 0x18
+#define GSI_IC_DISABLE_CHNL_BCK_PRS_LSB_CSR_INT_BMSK 0xfc0000
+#define GSI_IC_DISABLE_CHNL_BCK_PRS_LSB_CSR_INT_SHFT 0x12
+#define GSI_IC_DISABLE_CHNL_BCK_PRS_LSB_INT_END_INT_BMSK 0x1000
+#define GSI_IC_DISABLE_CHNL_BCK_PRS_LSB_INT_END_INT_SHFT 0xc
+#define GSI_IC_DISABLE_CHNL_BCK_PRS_LSB_EV_ENG_INT_BMSK 0x40
+#define GSI_IC_DISABLE_CHNL_BCK_PRS_LSB_EV_ENG_INT_SHFT 0x6
+#define GSI_IC_DISABLE_CHNL_BCK_PRS_LSB_REE_INT_BMSK 0x7
+#define GSI_IC_DISABLE_CHNL_BCK_PRS_LSB_REE_INT_SHFT 0x0
+
+#define GSI_IC_DISABLE_CHNL_BCK_PRS_MSB_OFFS \
+	(GSI_GSI_REG_BASE_OFFS + 0x000000a4)
+#define GSI_IC_DISABLE_CHNL_BCK_PRS_MSB_RMSK 0xfc3041
+#define GSI_IC_DISABLE_CHNL_BCK_PRS_MSB_UCONTROLLER_INT_BMSK 0xfc0000
+#define GSI_IC_DISABLE_CHNL_BCK_PRS_MSB_UCONTROLLER_INT_SHFT 0x12
+#define GSI_IC_DISABLE_CHNL_BCK_PRS_MSB_RD_WR_INT_BMSK 0x3000
+#define GSI_IC_DISABLE_CHNL_BCK_PRS_MSB_RD_WR_INT_SHFT 0xc
+#define GSI_IC_DISABLE_CHNL_BCK_PRS_MSB_DB_ENG_INT_BMSK 0x40
+#define GSI_IC_DISABLE_CHNL_BCK_PRS_MSB_DB_ENG_INT_SHFT 0x6
+#define GSI_IC_DISABLE_CHNL_BCK_PRS_MSB_TIMER_INT_BMSK 0x1
+#define GSI_IC_DISABLE_CHNL_BCK_PRS_MSB_TIMER_INT_SHFT 0x0
+
+#define GSI_IC_GEN_EVNT_BCK_PRS_LSB_OFFS \
+	(GSI_GSI_REG_BASE_OFFS + 0x000000a8)
+#define GSI_IC_GEN_EVNT_BCK_PRS_LSB_RMSK 0x3ffc1047
+#define GSI_IC_GEN_EVNT_BCK_PRS_LSB_TLV_INT_BMSK 0x3f000000
+#define GSI_IC_GEN_EVNT_BCK_PRS_LSB_TLV_INT_SHFT 0x18
+#define GSI_IC_GEN_EVNT_BCK_PRS_LSB_CSR_INT_BMSK 0xfc0000
+#define GSI_IC_GEN_EVNT_BCK_PRS_LSB_CSR_INT_SHFT 0x12
+#define GSI_IC_GEN_EVNT_BCK_PRS_LSB_INT_END_INT_BMSK 0x1000
+#define GSI_IC_GEN_EVNT_BCK_PRS_LSB_INT_END_INT_SHFT 0xc
+#define GSI_IC_GEN_EVNT_BCK_PRS_LSB_EV_ENG_INT_BMSK 0x40
+#define GSI_IC_GEN_EVNT_BCK_PRS_LSB_EV_ENG_INT_SHFT 0x6
+#define GSI_IC_GEN_EVNT_BCK_PRS_LSB_REE_INT_BMSK 0x7
+#define GSI_IC_GEN_EVNT_BCK_PRS_LSB_REE_INT_SHFT 0x0
+
+#define GSI_IC_GEN_EVNT_BCK_PRS_MSB_OFFS \
+	(GSI_GSI_REG_BASE_OFFS + 0x000000ac)
+#define GSI_IC_GEN_EVNT_BCK_PRS_MSB_RMSK 0xfc3041
+#define GSI_IC_GEN_EVNT_BCK_PRS_MSB_UCONTROLLER_INT_BMSK 0xfc0000
+#define GSI_IC_GEN_EVNT_BCK_PRS_MSB_UCONTROLLER_INT_SHFT 0x12
+#define GSI_IC_GEN_EVNT_BCK_PRS_MSB_RD_WR_INT_BMSK 0x3000
+#define GSI_IC_GEN_EVNT_BCK_PRS_MSB_RD_WR_INT_SHFT 0xc
+#define GSI_IC_GEN_EVNT_BCK_PRS_MSB_DB_ENG_INT_BMSK 0x40
+#define GSI_IC_GEN_EVNT_BCK_PRS_MSB_DB_ENG_INT_SHFT 0x6
+#define GSI_IC_GEN_EVNT_BCK_PRS_MSB_TIMER_INT_BMSK 0x1
+#define GSI_IC_GEN_EVNT_BCK_PRS_MSB_TIMER_INT_SHFT 0x0
+
+#define GSI_IC_GEN_INT_BCK_PRS_LSB_OFFS \
+	(GSI_GSI_REG_BASE_OFFS + 0x000000b0)
+#define GSI_IC_GEN_INT_BCK_PRS_LSB_RMSK 0x3ffc1047
+#define GSI_IC_GEN_INT_BCK_PRS_LSB_TLV_INT_BMSK 0x3f000000
+#define GSI_IC_GEN_INT_BCK_PRS_LSB_TLV_INT_SHFT 0x18
+#define GSI_IC_GEN_INT_BCK_PRS_LSB_CSR_INT_BMSK 0xfc0000
+#define GSI_IC_GEN_INT_BCK_PRS_LSB_CSR_INT_SHFT 0x12
+#define GSI_IC_GEN_INT_BCK_PRS_LSB_INT_END_INT_BMSK 0x1000
+#define GSI_IC_GEN_INT_BCK_PRS_LSB_INT_END_INT_SHFT 0xc
+#define GSI_IC_GEN_INT_BCK_PRS_LSB_EV_ENG_INT_BMSK 0x40
+#define GSI_IC_GEN_INT_BCK_PRS_LSB_EV_ENG_INT_SHFT 0x6
+#define GSI_IC_GEN_INT_BCK_PRS_LSB_REE_INT_BMSK 0x7
+#define GSI_IC_GEN_INT_BCK_PRS_LSB_REE_INT_SHFT 0x0
+
+#define GSI_IC_GEN_INT_BCK_PRS_MSB_OFFS \
+	(GSI_GSI_REG_BASE_OFFS + 0x000000b4)
+#define GSI_IC_GEN_INT_BCK_PRS_MSB_RMSK 0xfc3041
+#define GSI_IC_GEN_INT_BCK_PRS_MSB_UCONTROLLER_INT_BMSK 0xfc0000
+#define GSI_IC_GEN_INT_BCK_PRS_MSB_UCONTROLLER_INT_SHFT 0x12
+#define GSI_IC_GEN_INT_BCK_PRS_MSB_RD_WR_INT_BMSK 0x3000
+#define GSI_IC_GEN_INT_BCK_PRS_MSB_RD_WR_INT_SHFT 0xc
+#define GSI_IC_GEN_INT_BCK_PRS_MSB_DB_ENG_INT_BMSK 0x40
+#define GSI_IC_GEN_INT_BCK_PRS_MSB_DB_ENG_INT_SHFT 0x6
+#define GSI_IC_GEN_INT_BCK_PRS_MSB_TIMER_INT_BMSK 0x1
+#define GSI_IC_GEN_INT_BCK_PRS_MSB_TIMER_INT_SHFT 0x0
+
+#define GSI_IC_STOP_INT_MOD_BCK_PRS_LSB_OFFS \
+	(GSI_GSI_REG_BASE_OFFS + 0x000000b8)
+#define GSI_IC_STOP_INT_MOD_BCK_PRS_LSB_RMSK 0x3ffc1047
+#define GSI_IC_STOP_INT_MOD_BCK_PRS_LSB_TLV_INT_BMSK 0x3f000000
+#define GSI_IC_STOP_INT_MOD_BCK_PRS_LSB_TLV_INT_SHFT 0x18
+#define GSI_IC_STOP_INT_MOD_BCK_PRS_LSB_CSR_INT_BMSK 0xfc0000
+#define GSI_IC_STOP_INT_MOD_BCK_PRS_LSB_CSR_INT_SHFT 0x12
+#define GSI_IC_STOP_INT_MOD_BCK_PRS_LSB_INT_END_INT_BMSK 0x1000
+#define GSI_IC_STOP_INT_MOD_BCK_PRS_LSB_INT_END_INT_SHFT 0xc
+#define GSI_IC_STOP_INT_MOD_BCK_PRS_LSB_EV_ENG_INT_BMSK 0x40
+#define GSI_IC_STOP_INT_MOD_BCK_PRS_LSB_EV_ENG_INT_SHFT 0x6
+#define GSI_IC_STOP_INT_MOD_BCK_PRS_LSB_REE_INT_BMSK 0x7
+#define GSI_IC_STOP_INT_MOD_BCK_PRS_LSB_REE_INT_SHFT 0x0
+
+#define GSI_IC_STOP_INT_MOD_BCK_PRS_MSB_OFFS \
+	(GSI_GSI_REG_BASE_OFFS + 0x000000bc)
+#define GSI_IC_STOP_INT_MOD_BCK_PRS_MSB_RMSK 0xfc3041
+#define GSI_IC_STOP_INT_MOD_BCK_PRS_MSB_UCONTROLLER_INT_BMSK 0xfc0000
+#define GSI_IC_STOP_INT_MOD_BCK_PRS_MSB_UCONTROLLER_INT_SHFT 0x12
+#define GSI_IC_STOP_INT_MOD_BCK_PRS_MSB_RD_WR_INT_BMSK 0x3000
+#define GSI_IC_STOP_INT_MOD_BCK_PRS_MSB_RD_WR_INT_SHFT 0xc
+#define GSI_IC_STOP_INT_MOD_BCK_PRS_MSB_DB_ENG_INT_BMSK 0x40
+#define GSI_IC_STOP_INT_MOD_BCK_PRS_MSB_DB_ENG_INT_SHFT 0x6
+#define GSI_IC_STOP_INT_MOD_BCK_PRS_MSB_TIMER_INT_BMSK 0x1
+#define GSI_IC_STOP_INT_MOD_BCK_PRS_MSB_TIMER_INT_SHFT 0x0
+
+#define GSI_IC_PROCESS_DESC_BCK_PRS_LSB_OFFS \
+	(GSI_GSI_REG_BASE_OFFS + 0x000000c0)
+#define GSI_IC_PROCESS_DESC_BCK_PRS_LSB_RMSK 0x3ffc1047
+#define GSI_IC_PROCESS_DESC_BCK_PRS_LSB_TLV_INT_BMSK 0x3f000000
+#define GSI_IC_PROCESS_DESC_BCK_PRS_LSB_TLV_INT_SHFT 0x18
+#define GSI_IC_PROCESS_DESC_BCK_PRS_LSB_CSR_INT_BMSK 0xfc0000
+#define GSI_IC_PROCESS_DESC_BCK_PRS_LSB_CSR_INT_SHFT 0x12
+#define GSI_IC_PROCESS_DESC_BCK_PRS_LSB_INT_END_INT_BMSK 0x1000
+#define GSI_IC_PROCESS_DESC_BCK_PRS_LSB_INT_END_INT_SHFT 0xc
+#define GSI_IC_PROCESS_DESC_BCK_PRS_LSB_EV_ENG_INT_BMSK 0x40
+#define GSI_IC_PROCESS_DESC_BCK_PRS_LSB_EV_ENG_INT_SHFT 0x6
+#define GSI_IC_PROCESS_DESC_BCK_PRS_LSB_REE_INT_BMSK 0x7
+#define GSI_IC_PROCESS_DESC_BCK_PRS_LSB_REE_INT_SHFT 0x0
+
+#define GSI_IC_PROCESS_DESC_BCK_PRS_MSB_OFFS \
+	(GSI_GSI_REG_BASE_OFFS + 0x000000c4)
+#define GSI_IC_PROCESS_DESC_BCK_PRS_MSB_RMSK 0xfc3041
+#define GSI_IC_PROCESS_DESC_BCK_PRS_MSB_UCONTROLLER_INT_BMSK 0xfc0000
+#define GSI_IC_PROCESS_DESC_BCK_PRS_MSB_UCONTROLLER_INT_SHFT 0x12
+#define GSI_IC_PROCESS_DESC_BCK_PRS_MSB_RD_WR_INT_BMSK 0x3000
+#define GSI_IC_PROCESS_DESC_BCK_PRS_MSB_RD_WR_INT_SHFT 0xc
+#define GSI_IC_PROCESS_DESC_BCK_PRS_MSB_DB_ENG_INT_BMSK 0x40
+#define GSI_IC_PROCESS_DESC_BCK_PRS_MSB_DB_ENG_INT_SHFT 0x6
+#define GSI_IC_PROCESS_DESC_BCK_PRS_MSB_TIMER_INT_BMSK 0x1
+#define GSI_IC_PROCESS_DESC_BCK_PRS_MSB_TIMER_INT_SHFT 0x0
+
+#define GSI_IC_TLV_STOP_BCK_PRS_LSB_OFFS \
+	(GSI_GSI_REG_BASE_OFFS + 0x000000c8)
+#define GSI_IC_TLV_STOP_BCK_PRS_LSB_RMSK 0x3ffc1047
+#define GSI_IC_TLV_STOP_BCK_PRS_LSB_TLV_INT_BMSK 0x3f000000
+#define GSI_IC_TLV_STOP_BCK_PRS_LSB_TLV_INT_SHFT 0x18
+#define GSI_IC_TLV_STOP_BCK_PRS_LSB_CSR_INT_BMSK 0xfc0000
+#define GSI_IC_TLV_STOP_BCK_PRS_LSB_CSR_INT_SHFT 0x12
+#define GSI_IC_TLV_STOP_BCK_PRS_LSB_INT_END_INT_BMSK 0x1000
+#define GSI_IC_TLV_STOP_BCK_PRS_LSB_INT_END_INT_SHFT 0xc
+#define GSI_IC_TLV_STOP_BCK_PRS_LSB_EV_ENG_INT_BMSK 0x40
+#define GSI_IC_TLV_STOP_BCK_PRS_LSB_EV_ENG_INT_SHFT 0x6
+#define GSI_IC_TLV_STOP_BCK_PRS_LSB_REE_INT_BMSK 0x7
+#define GSI_IC_TLV_STOP_BCK_PRS_LSB_REE_INT_SHFT 0x0
+
+#define GSI_IC_TLV_STOP_BCK_PRS_MSB_OFFS \
+	(GSI_GSI_REG_BASE_OFFS + 0x000000cc)
+#define GSI_IC_TLV_STOP_BCK_PRS_MSB_RMSK 0xfc3041
+#define GSI_IC_TLV_STOP_BCK_PRS_MSB_UCONTROLLER_INT_BMSK 0xfc0000
+#define GSI_IC_TLV_STOP_BCK_PRS_MSB_UCONTROLLER_INT_SHFT 0x12
+#define GSI_IC_TLV_STOP_BCK_PRS_MSB_RD_WR_INT_BMSK 0x3000
+#define GSI_IC_TLV_STOP_BCK_PRS_MSB_RD_WR_INT_SHFT 0xc
+#define GSI_IC_TLV_STOP_BCK_PRS_MSB_DB_ENG_INT_BMSK 0x40
+#define GSI_IC_TLV_STOP_BCK_PRS_MSB_DB_ENG_INT_SHFT 0x6
+#define GSI_IC_TLV_STOP_BCK_PRS_MSB_TIMER_INT_BMSK 0x1
+#define GSI_IC_TLV_STOP_BCK_PRS_MSB_TIMER_INT_SHFT 0x0
+
+#define GSI_IC_TLV_RESET_BCK_PRS_LSB_OFFS \
+	(GSI_GSI_REG_BASE_OFFS + 0x000000d0)
+#define GSI_IC_TLV_RESET_BCK_PRS_LSB_RMSK 0x3ffc1047
+#define GSI_IC_TLV_RESET_BCK_PRS_LSB_TLV_INT_BMSK 0x3f000000
+#define GSI_IC_TLV_RESET_BCK_PRS_LSB_TLV_INT_SHFT 0x18
+#define GSI_IC_TLV_RESET_BCK_PRS_LSB_CSR_INT_BMSK 0xfc0000
+#define GSI_IC_TLV_RESET_BCK_PRS_LSB_CSR_INT_SHFT 0x12
+#define GSI_IC_TLV_RESET_BCK_PRS_LSB_INT_END_INT_BMSK 0x1000
+#define GSI_IC_TLV_RESET_BCK_PRS_LSB_INT_END_INT_SHFT 0xc
+#define GSI_IC_TLV_RESET_BCK_PRS_LSB_EV_ENG_INT_BMSK 0x40
+#define GSI_IC_TLV_RESET_BCK_PRS_LSB_EV_ENG_INT_SHFT 0x6
+#define GSI_IC_TLV_RESET_BCK_PRS_LSB_REE_INT_BMSK 0x7
+#define GSI_IC_TLV_RESET_BCK_PRS_LSB_REE_INT_SHFT 0x0
+
+#define GSI_IC_TLV_RESET_BCK_PRS_MSB_OFFS \
+	(GSI_GSI_REG_BASE_OFFS + 0x000000d4)
+#define GSI_IC_TLV_RESET_BCK_PRS_MSB_RMSK 0xfc3041
+#define GSI_IC_TLV_RESET_BCK_PRS_MSB_UCONTROLLER_INT_BMSK 0xfc0000
+#define GSI_IC_TLV_RESET_BCK_PRS_MSB_UCONTROLLER_INT_SHFT 0x12
+#define GSI_IC_TLV_RESET_BCK_PRS_MSB_RD_WR_INT_BMSK 0x3000
+#define GSI_IC_TLV_RESET_BCK_PRS_MSB_RD_WR_INT_SHFT 0xc
+#define GSI_IC_TLV_RESET_BCK_PRS_MSB_DB_ENG_INT_BMSK 0x40
+#define GSI_IC_TLV_RESET_BCK_PRS_MSB_DB_ENG_INT_SHFT 0x6
+#define GSI_IC_TLV_RESET_BCK_PRS_MSB_TIMER_INT_BMSK 0x1
+#define GSI_IC_TLV_RESET_BCK_PRS_MSB_TIMER_INT_SHFT 0x0
+
+#define GSI_IC_RGSTR_TIMER_BCK_PRS_LSB_OFFS \
+	(GSI_GSI_REG_BASE_OFFS + 0x000000d8)
+#define GSI_IC_RGSTR_TIMER_BCK_PRS_LSB_RMSK 0x3ffc1047
+#define GSI_IC_RGSTR_TIMER_BCK_PRS_LSB_TLV_INT_BMSK 0x3f000000
+#define GSI_IC_RGSTR_TIMER_BCK_PRS_LSB_TLV_INT_SHFT 0x18
+#define GSI_IC_RGSTR_TIMER_BCK_PRS_LSB_CSR_INT_BMSK 0xfc0000
+#define GSI_IC_RGSTR_TIMER_BCK_PRS_LSB_CSR_INT_SHFT 0x12
+#define GSI_IC_RGSTR_TIMER_BCK_PRS_LSB_INT_END_INT_BMSK 0x1000
+#define GSI_IC_RGSTR_TIMER_BCK_PRS_LSB_INT_END_INT_SHFT 0xc
+#define GSI_IC_RGSTR_TIMER_BCK_PRS_LSB_EV_ENG_INT_BMSK 0x40
+#define GSI_IC_RGSTR_TIMER_BCK_PRS_LSB_EV_ENG_INT_SHFT 0x6
+#define GSI_IC_RGSTR_TIMER_BCK_PRS_LSB_REE_INT_BMSK 0x7
+#define GSI_IC_RGSTR_TIMER_BCK_PRS_LSB_REE_INT_SHFT 0x0
+
+#define GSI_IC_RGSTR_TIMER_BCK_PRS_MSB_OFFS \
+	(GSI_GSI_REG_BASE_OFFS + 0x000000dc)
+#define GSI_IC_RGSTR_TIMER_BCK_PRS_MSB_RMSK 0xfc3041
+#define GSI_IC_RGSTR_TIMER_BCK_PRS_MSB_UCONTROLLER_INT_BMSK 0xfc0000
+#define GSI_IC_RGSTR_TIMER_BCK_PRS_MSB_UCONTROLLER_INT_SHFT 0x12
+#define GSI_IC_RGSTR_TIMER_BCK_PRS_MSB_RD_WR_INT_BMSK 0x3000
+#define GSI_IC_RGSTR_TIMER_BCK_PRS_MSB_RD_WR_INT_SHFT 0xc
+#define GSI_IC_RGSTR_TIMER_BCK_PRS_MSB_DB_ENG_INT_BMSK 0x40
+#define GSI_IC_RGSTR_TIMER_BCK_PRS_MSB_DB_ENG_INT_SHFT 0x6
+#define GSI_IC_RGSTR_TIMER_BCK_PRS_MSB_TIMER_INT_BMSK 0x1
+#define GSI_IC_RGSTR_TIMER_BCK_PRS_MSB_TIMER_INT_SHFT 0x0
+
+#define GSI_IC_READ_BCK_PRS_LSB_OFFS \
+	(GSI_GSI_REG_BASE_OFFS + 0x000000e0)
+#define GSI_IC_READ_BCK_PRS_LSB_RMSK 0x3ffc1047
+#define GSI_IC_READ_BCK_PRS_LSB_TLV_INT_BMSK 0x3f000000
+#define GSI_IC_READ_BCK_PRS_LSB_TLV_INT_SHFT 0x18
+#define GSI_IC_READ_BCK_PRS_LSB_CSR_INT_BMSK 0xfc0000
+#define GSI_IC_READ_BCK_PRS_LSB_CSR_INT_SHFT 0x12
+#define GSI_IC_READ_BCK_PRS_LSB_INT_END_INT_BMSK 0x1000
+#define GSI_IC_READ_BCK_PRS_LSB_INT_END_INT_SHFT 0xc
+#define GSI_IC_READ_BCK_PRS_LSB_EV_ENG_INT_BMSK 0x40
+#define GSI_IC_READ_BCK_PRS_LSB_EV_ENG_INT_SHFT 0x6
+#define GSI_IC_READ_BCK_PRS_LSB_REE_INT_BMSK 0x7
+#define GSI_IC_READ_BCK_PRS_LSB_REE_INT_SHFT 0x0
+
+#define GSI_IC_READ_BCK_PRS_MSB_OFFS \
+	(GSI_GSI_REG_BASE_OFFS + 0x000000e4)
+#define GSI_IC_READ_BCK_PRS_MSB_RMSK 0xfc3041
+#define GSI_IC_READ_BCK_PRS_MSB_UCONTROLLER_INT_BMSK 0xfc0000
+#define GSI_IC_READ_BCK_PRS_MSB_UCONTROLLER_INT_SHFT 0x12
+#define GSI_IC_READ_BCK_PRS_MSB_RD_WR_INT_BMSK 0x3000
+#define GSI_IC_READ_BCK_PRS_MSB_RD_WR_INT_SHFT 0xc
+#define GSI_IC_READ_BCK_PRS_MSB_DB_ENG_INT_BMSK 0x40
+#define GSI_IC_READ_BCK_PRS_MSB_DB_ENG_INT_SHFT 0x6
+#define GSI_IC_READ_BCK_PRS_MSB_TIMER_INT_BMSK 0x1
+#define GSI_IC_READ_BCK_PRS_MSB_TIMER_INT_SHFT 0x0
+
+#define GSI_IC_WRITE_BCK_PRS_LSB_OFFS \
+	(GSI_GSI_REG_BASE_OFFS + 0x000000e8)
+#define GSI_IC_WRITE_BCK_PRS_LSB_RMSK 0x3ffc1047
+#define GSI_IC_WRITE_BCK_PRS_LSB_TLV_INT_BMSK 0x3f000000
+#define GSI_IC_WRITE_BCK_PRS_LSB_TLV_INT_SHFT 0x18
+#define GSI_IC_WRITE_BCK_PRS_LSB_CSR_INT_BMSK 0xfc0000
+#define GSI_IC_WRITE_BCK_PRS_LSB_CSR_INT_SHFT 0x12
+#define GSI_IC_WRITE_BCK_PRS_LSB_INT_END_INT_BMSK 0x1000
+#define GSI_IC_WRITE_BCK_PRS_LSB_INT_END_INT_SHFT 0xc
+#define GSI_IC_WRITE_BCK_PRS_LSB_EV_ENG_INT_BMSK 0x40
+#define GSI_IC_WRITE_BCK_PRS_LSB_EV_ENG_INT_SHFT 0x6
+#define GSI_IC_WRITE_BCK_PRS_LSB_REE_INT_BMSK 0x7
+#define GSI_IC_WRITE_BCK_PRS_LSB_REE_INT_SHFT 0x0
+
+#define GSI_IC_WRITE_BCK_PRS_MSB_OFFS \
+	(GSI_GSI_REG_BASE_OFFS + 0x000000ec)
+#define GSI_IC_WRITE_BCK_PRS_MSB_RMSK 0xfc3041
+#define GSI_IC_WRITE_BCK_PRS_MSB_UCONTROLLER_INT_BMSK 0xfc0000
+#define GSI_IC_WRITE_BCK_PRS_MSB_UCONTROLLER_INT_SHFT 0x12
+#define GSI_IC_WRITE_BCK_PRS_MSB_RD_WR_INT_BMSK 0x3000
+#define GSI_IC_WRITE_BCK_PRS_MSB_RD_WR_INT_SHFT 0xc
+#define GSI_IC_WRITE_BCK_PRS_MSB_DB_ENG_INT_BMSK 0x40
+#define GSI_IC_WRITE_BCK_PRS_MSB_DB_ENG_INT_SHFT 0x6
+#define GSI_IC_WRITE_BCK_PRS_MSB_TIMER_INT_BMSK 0x1
+#define GSI_IC_WRITE_BCK_PRS_MSB_TIMER_INT_SHFT 0x0
+
+#define GSI_IC_UCONTROLLER_GPR_BCK_PRS_LSB_OFFS \
+	(GSI_GSI_REG_BASE_OFFS + 0x000000f0)
+#define GSI_IC_UCONTROLLER_GPR_BCK_PRS_LSB_RMSK 0x3ffc1047
+#define GSI_IC_UCONTROLLER_GPR_BCK_PRS_LSB_TLV_INT_BMSK 0x3f000000
+#define GSI_IC_UCONTROLLER_GPR_BCK_PRS_LSB_TLV_INT_SHFT 0x18
+#define GSI_IC_UCONTROLLER_GPR_BCK_PRS_LSB_CSR_INT_BMSK 0xfc0000
+#define GSI_IC_UCONTROLLER_GPR_BCK_PRS_LSB_CSR_INT_SHFT 0x12
+#define GSI_IC_UCONTROLLER_GPR_BCK_PRS_LSB_INT_END_INT_BMSK 0x1000
+#define GSI_IC_UCONTROLLER_GPR_BCK_PRS_LSB_INT_END_INT_SHFT 0xc
+#define GSI_IC_UCONTROLLER_GPR_BCK_PRS_LSB_EV_ENG_INT_BMSK 0x40
+#define GSI_IC_UCONTROLLER_GPR_BCK_PRS_LSB_EV_ENG_INT_SHFT 0x6
+#define GSI_IC_UCONTROLLER_GPR_BCK_PRS_LSB_REE_INT_BMSK 0x7
+#define GSI_IC_UCONTROLLER_GPR_BCK_PRS_LSB_REE_INT_SHFT 0x0
+
+#define GSI_IC_UCONTROLLER_GPR_BCK_PRS_MSB_OFFS \
+	(GSI_GSI_REG_BASE_OFFS + 0x000000f4)
+#define GSI_IC_UCONTROLLER_GPR_BCK_PRS_MSB_RMSK 0xfc3041
+#define GSI_IC_UCONTROLLER_GPR_BCK_PRS_MSB_UCONTROLLER_INT_BMSK 0xfc0000
+#define GSI_IC_UCONTROLLER_GPR_BCK_PRS_MSB_UCONTROLLER_INT_SHFT 0x12
+#define GSI_IC_UCONTROLLER_GPR_BCK_PRS_MSB_RD_WR_INT_BMSK 0x3000
+#define GSI_IC_UCONTROLLER_GPR_BCK_PRS_MSB_RD_WR_INT_SHFT 0xc
+#define GSI_IC_UCONTROLLER_GPR_BCK_PRS_MSB_DB_ENG_INT_BMSK 0x40
+#define GSI_IC_UCONTROLLER_GPR_BCK_PRS_MSB_DB_ENG_INT_SHFT 0x6
+#define GSI_IC_UCONTROLLER_GPR_BCK_PRS_MSB_TIMER_INT_BMSK 0x1
+#define GSI_IC_UCONTROLLER_GPR_BCK_PRS_MSB_TIMER_INT_SHFT 0x0
+
+#define GSI_GSI_IRAM_PTR_CH_CMD_OFFS \
+	(GSI_GSI_REG_BASE_OFFS + 0x00000400)
+#define GSI_GSI_IRAM_PTR_CH_CMD_RMSK 0xfff
+#define GSI_GSI_IRAM_PTR_CH_CMD_IRAM_PTR_BMSK 0xfff
+#define GSI_GSI_IRAM_PTR_CH_CMD_IRAM_PTR_SHFT 0x0
+
+#define GSI_GSI_IRAM_PTR_EE_GENERIC_CMD_OFFS \
+	(GSI_GSI_REG_BASE_OFFS + 0x00000404)
+#define GSI_GSI_IRAM_PTR_EE_GENERIC_CMD_RMSK 0xfff
+#define GSI_GSI_IRAM_PTR_EE_GENERIC_CMD_IRAM_PTR_BMSK 0xfff
+#define GSI_GSI_IRAM_PTR_EE_GENERIC_CMD_IRAM_PTR_SHFT 0x0
+
+#define GSI_V2_5_GSI_IRAM_PTR_TLV_CH_NOT_FULL_OFFS \
+	(GSI_GSI_REG_BASE_OFFS + 0x00000408)
+#define GSI_V2_5_GSI_IRAM_PTR_TLV_CH_NOT_FULL_RMSK 0xfff
+#define GSI_V2_5_GSI_IRAM_PTR_TLV_CH_NOT_FULL_IRAM_PTR_BMSK 0xfff
+#define GSI_V2_5_GSI_IRAM_PTR_TLV_CH_NOT_FULL_IRAM_PTR_SHFT 0x0
+
+#define GSI_GSI_IRAM_PTR_CH_DB_OFFS \
+	(GSI_GSI_REG_BASE_OFFS + 0x00000418)
+#define GSI_GSI_IRAM_PTR_CH_DB_RMSK 0xfff
+#define GSI_GSI_IRAM_PTR_CH_DB_IRAM_PTR_BMSK 0xfff
+#define GSI_GSI_IRAM_PTR_CH_DB_IRAM_PTR_SHFT 0x0
+
+#define GSI_GSI_IRAM_PTR_EV_DB_OFFS \
+	(GSI_GSI_REG_BASE_OFFS + 0x0000041c)
+#define GSI_GSI_IRAM_PTR_EV_DB_RMSK 0xfff
+#define GSI_GSI_IRAM_PTR_EV_DB_IRAM_PTR_BMSK 0xfff
+#define GSI_GSI_IRAM_PTR_EV_DB_IRAM_PTR_SHFT 0x0
+
+#define GSI_GSI_IRAM_PTR_NEW_RE_OFFS \
+	(GSI_GSI_REG_BASE_OFFS + 0x00000420)
+#define GSI_GSI_IRAM_PTR_NEW_RE_RMSK 0xfff
+#define GSI_GSI_IRAM_PTR_NEW_RE_IRAM_PTR_BMSK 0xfff
+#define GSI_GSI_IRAM_PTR_NEW_RE_IRAM_PTR_SHFT 0x0
+
+#define GSI_GSI_IRAM_PTR_CH_DIS_COMP_OFFS \
+	(GSI_GSI_REG_BASE_OFFS + 0x00000424)
+#define GSI_GSI_IRAM_PTR_CH_DIS_COMP_RMSK 0xfff
+#define GSI_GSI_IRAM_PTR_CH_DIS_COMP_IRAM_PTR_BMSK 0xfff
+#define GSI_GSI_IRAM_PTR_CH_DIS_COMP_IRAM_PTR_SHFT 0x0
+
+#define GSI_GSI_IRAM_PTR_CH_EMPTY_OFFS \
+	(GSI_GSI_REG_BASE_OFFS + 0x00000428)
+#define GSI_GSI_IRAM_PTR_CH_EMPTY_RMSK 0xfff
+#define GSI_GSI_IRAM_PTR_CH_EMPTY_IRAM_PTR_BMSK 0xfff
+#define GSI_GSI_IRAM_PTR_CH_EMPTY_IRAM_PTR_SHFT 0x0
+
+#define GSI_GSI_IRAM_PTR_EVENT_GEN_COMP_OFFS \
+	(GSI_GSI_REG_BASE_OFFS + 0x0000042c)
+#define GSI_GSI_IRAM_PTR_EVENT_GEN_COMP_RMSK 0xfff
+#define GSI_GSI_IRAM_PTR_EVENT_GEN_COMP_IRAM_PTR_BMSK 0xfff
+#define GSI_GSI_IRAM_PTR_EVENT_GEN_COMP_IRAM_PTR_SHFT 0x0
+
+#define GSI_GSI_IRAM_PTR_PERIPH_IF_TLV_IN_0_OFFS \
+	(GSI_GSI_REG_BASE_OFFS + 0x00000430)
+#define GSI_GSI_IRAM_PTR_PERIPH_IF_TLV_IN_0_RMSK 0xfff
+#define GSI_GSI_IRAM_PTR_PERIPH_IF_TLV_IN_0_IRAM_PTR_BMSK 0xfff
+#define GSI_GSI_IRAM_PTR_PERIPH_IF_TLV_IN_0_IRAM_PTR_SHFT 0x0
+
+#define GSI_GSI_IRAM_PTR_PERIPH_IF_TLV_IN_2_OFFS \
+	(GSI_GSI_REG_BASE_OFFS + 0x00000434)
+#define GSI_GSI_IRAM_PTR_PERIPH_IF_TLV_IN_2_RMSK 0xfff
+#define GSI_GSI_IRAM_PTR_PERIPH_IF_TLV_IN_2_IRAM_PTR_BMSK 0xfff
+#define GSI_GSI_IRAM_PTR_PERIPH_IF_TLV_IN_2_IRAM_PTR_SHFT 0x0
+
+#define GSI_GSI_IRAM_PTR_PERIPH_IF_TLV_IN_1_OFFS \
+	(GSI_GSI_REG_BASE_OFFS + 0x00000438)
+#define GSI_GSI_IRAM_PTR_PERIPH_IF_TLV_IN_1_RMSK 0xfff
+#define GSI_GSI_IRAM_PTR_PERIPH_IF_TLV_IN_1_IRAM_PTR_BMSK 0xfff
+#define GSI_GSI_IRAM_PTR_PERIPH_IF_TLV_IN_1_IRAM_PTR_SHFT 0x0
+
+#define GSI_GSI_IRAM_PTR_TIMER_EXPIRED_OFFS \
+	(GSI_GSI_REG_BASE_OFFS + 0x0000043c)
+#define GSI_GSI_IRAM_PTR_TIMER_EXPIRED_RMSK 0xfff
+#define GSI_GSI_IRAM_PTR_TIMER_EXPIRED_IRAM_PTR_BMSK 0xfff
+#define GSI_GSI_IRAM_PTR_TIMER_EXPIRED_IRAM_PTR_SHFT 0x0
+
+#define GSI_GSI_IRAM_PTR_WRITE_ENG_COMP_OFFS \
+	(GSI_GSI_REG_BASE_OFFS + 0x00000440)
+#define GSI_GSI_IRAM_PTR_WRITE_ENG_COMP_RMSK 0xfff
+#define GSI_GSI_IRAM_PTR_WRITE_ENG_COMP_IRAM_PTR_BMSK 0xfff
+#define GSI_GSI_IRAM_PTR_WRITE_ENG_COMP_IRAM_PTR_SHFT 0x0
+
+#define GSI_GSI_IRAM_PTR_READ_ENG_COMP_OFFS \
+	(GSI_GSI_REG_BASE_OFFS + 0x00000444)
+#define GSI_GSI_IRAM_PTR_READ_ENG_COMP_RMSK 0xfff
+#define GSI_GSI_IRAM_PTR_READ_ENG_COMP_IRAM_PTR_BMSK 0xfff
+#define GSI_GSI_IRAM_PTR_READ_ENG_COMP_IRAM_PTR_SHFT 0x0
+
+#define GSI_GSI_IRAM_PTR_UC_GP_INT_OFFS \
+	(GSI_GSI_REG_BASE_OFFS + 0x00000448)
+#define GSI_GSI_IRAM_PTR_UC_GP_INT_RMSK 0xfff
+#define GSI_GSI_IRAM_PTR_UC_GP_INT_IRAM_PTR_BMSK 0xfff
+#define GSI_GSI_IRAM_PTR_UC_GP_INT_IRAM_PTR_SHFT 0x0
+
+/* Real H/W register name is with STOPPED with single P */
+#define GSI_GSI_IRAM_PTR_INT_MOD_STOPPED_OFFS \
+	(GSI_GSI_REG_BASE_OFFS + 0x0000044c)
+#define GSI_GSI_IRAM_PTR_INT_MOD_STOPPED_RMSK 0xfff
+#define GSI_GSI_IRAM_PTR_INT_MOD_STOPPED_IRAM_PTR_BMSK 0xfff
+#define GSI_GSI_IRAM_PTR_INT_MOD_STOPPED_IRAM_PTR_SHFT 0x0
+
+#define GSI_GSI_INST_RAM_n_WORD_SZ 0x4
+#define GSI_GSI_INST_RAM_n_OFFS(n) \
+	(GSI_GSI_REG_BASE_OFFS + 0x00004000 + GSI_GSI_INST_RAM_n_WORD_SZ * (n))
+#define GSI_V2_5_GSI_INST_RAM_n_OFFS(n) \
+	(GSI_GSI_REG_BASE_OFFS + 0x0001b000 + GSI_GSI_INST_RAM_n_WORD_SZ * (n))
+#define GSI_GSI_INST_RAM_n_RMSK 0xffffffff
+#define GSI_GSI_INST_RAM_n_MAXn 4095
+#define GSI_V2_0_GSI_INST_RAM_n_MAXn 6143
+#define GSI_V2_2_GSI_INST_RAM_n_MAXn 4095
+#define GSI_V2_5_GSI_INST_RAM_n_MAXn 8191
+#define GSI_V2_7_GSI_INST_RAM_n_MAXn 5119
+#define GSI_V2_9_GSI_INST_RAM_n_MAXn 6143
+
+#define GSI_GSI_INST_RAM_n_INST_BYTE_3_BMSK 0xff000000
+#define GSI_GSI_INST_RAM_n_INST_BYTE_3_SHFT 0x18
+#define GSI_GSI_INST_RAM_n_INST_BYTE_2_BMSK 0xff0000
+#define GSI_GSI_INST_RAM_n_INST_BYTE_2_SHFT 0x10
+#define GSI_GSI_INST_RAM_n_INST_BYTE_1_BMSK 0xff00
+#define GSI_GSI_INST_RAM_n_INST_BYTE_1_SHFT 0x8
+#define GSI_GSI_INST_RAM_n_INST_BYTE_0_BMSK 0xff
+#define GSI_GSI_INST_RAM_n_INST_BYTE_0_SHFT 0x0
+
+#define GSI_EE_n_GSI_CH_k_CNTXT_0_OFFS(k, n) \
+	(GSI_GSI_REG_BASE_OFFS + 0x0000f000 + 0x4000 * (n) + 0x80 * (k))
+#define GSI_EE_n_GSI_CH_k_CNTXT_0_ELEMENT_SIZE_BMSK 0xff000000
+#define GSI_EE_n_GSI_CH_k_CNTXT_0_ELEMENT_SIZE_SHFT 0x18
+#define GSI_EE_n_GSI_CH_k_CNTXT_0_CHSTATE_BMSK 0xf00000
+#define GSI_EE_n_GSI_CH_k_CNTXT_0_CHSTATE_SHFT 0x14
+#define GSI_EE_n_GSI_CH_k_CNTXT_0_ERINDEX_BMSK 0x7c000
+#define GSI_EE_n_GSI_CH_k_CNTXT_0_ERINDEX_SHFT 0xe
+#define GSI_V2_5_EE_n_GSI_CH_k_CNTXT_0_CHTYPE_PROTOCOL_MSB_BMSK 0x2000
+#define GSI_V2_5_EE_n_GSI_CH_k_CNTXT_0_CHTYPE_PROTOCOL_MSB_SHFT 0xd
+#define GSI_EE_n_GSI_CH_k_CNTXT_0_CHID_BMSK 0x1f00
+#define GSI_EE_n_GSI_CH_k_CNTXT_0_CHID_SHFT 0x8
+#define GSI_EE_n_GSI_CH_k_CNTXT_0_EE_BMSK 0xf0
+#define GSI_EE_n_GSI_CH_k_CNTXT_0_EE_SHFT 0x4
+#define GSI_EE_n_GSI_CH_k_CNTXT_0_CHTYPE_DIR_BMSK 0x8
+#define GSI_EE_n_GSI_CH_k_CNTXT_0_CHTYPE_DIR_SHFT 0x3
+#define GSI_EE_n_GSI_CH_k_CNTXT_0_CHTYPE_PROTOCOL_BMSK 0x7
+#define GSI_EE_n_GSI_CH_k_CNTXT_0_CHTYPE_PROTOCOL_SHFT 0x0
+
+#define GSI_EE_n_GSI_CH_k_CNTXT_1_OFFS(k, n) \
+	(GSI_GSI_REG_BASE_OFFS + 0x0000f004 + 0x4000 * (n) + 0x80 * (k))
+#define GSI_EE_n_GSI_CH_k_CNTXT_1_R_LENGTH_BMSK 0xffff
+#define GSI_EE_n_GSI_CH_k_CNTXT_1_R_LENGTH_SHFT 0x0
+
+#define GSI_V2_9_EE_n_GSI_CH_k_CNTXT_1_OFFS(k, n) \
+	(GSI_GSI_REG_BASE_OFFS + 0x0000f004 + 0x4000 * (n) + 0x80 * (k))
+#define GSI_V2_9_EE_n_GSI_CH_k_CNTXT_1_R_LENGTH_BMSK 0xfffff
+#define GSI_V2_9_EE_n_GSI_CH_k_CNTXT_1_R_LENGTH_SHFT 0x0
+
+#define GSI_EE_n_GSI_CH_k_CNTXT_2_OFFS(k, n) \
+	(GSI_GSI_REG_BASE_OFFS + 0x0000f008 + 0x4000 * (n) + 0x80 * (k))
+#define GSI_EE_n_GSI_CH_k_CNTXT_2_R_BASE_ADDR_LSBS_BMSK 0xffffffff
+#define GSI_EE_n_GSI_CH_k_CNTXT_2_R_BASE_ADDR_LSBS_SHFT 0x0
+
+#define GSI_EE_n_GSI_CH_k_CNTXT_3_OFFS(k, n) \
+	(GSI_GSI_REG_BASE_OFFS + 0x0000f00c + 0x4000 * (n) + 0x80 * (k))
+#define GSI_EE_n_GSI_CH_k_CNTXT_3_R_BASE_ADDR_MSBS_BMSK 0xffffffff
+#define GSI_EE_n_GSI_CH_k_CNTXT_3_R_BASE_ADDR_MSBS_SHFT 0x0
+
+#define GSI_EE_n_GSI_CH_k_CNTXT_4_OFFS(k, n) \
+	(GSI_GSI_REG_BASE_OFFS + 0x0000f010 + 0x4000 * (n) + 0x80 * (k))
+#define GSI_EE_n_GSI_CH_k_CNTXT_4_READ_PTR_LSB_BMSK 0xffffffff
+#define GSI_EE_n_GSI_CH_k_CNTXT_4_READ_PTR_LSB_SHFT 0x0
+
+#define GSI_EE_n_GSI_CH_k_CNTXT_5_OFFS(k, n) \
+	(GSI_GSI_REG_BASE_OFFS + 0x0000f014 + 0x4000 * (n) + 0x80 * (k))
+#define GSI_EE_n_GSI_CH_k_CNTXT_5_READ_PTR_MSB_BMSK 0xffffffff
+#define GSI_EE_n_GSI_CH_k_CNTXT_5_READ_PTR_MSB_SHFT 0x0
+
+#define GSI_EE_n_GSI_CH_k_CNTXT_6_OFFS(k, n) \
+	(GSI_GSI_REG_BASE_OFFS + 0x0000f018 + 0x4000 * (n) + 0x80 * (k))
+#define GSI_EE_n_GSI_CH_k_CNTXT_6_WRITE_PTR_LSB_BMSK 0xffffffff
+#define GSI_EE_n_GSI_CH_k_CNTXT_6_WRITE_PTR_LSB_SHFT 0x0
+
+#define GSI_EE_n_GSI_CH_k_CNTXT_7_OFFS(k, n) \
+	(GSI_GSI_REG_BASE_OFFS + 0x0000f01c + 0x4000 * (n) + 0x80 * (k))
+#define GSI_EE_n_GSI_CH_k_CNTXT_7_WRITE_PTR_MSB_BMSK 0xffffffff
+#define GSI_EE_n_GSI_CH_k_CNTXT_7_WRITE_PTR_MSB_SHFT 0x0
+
+#define GSI_EE_n_GSI_CH_k_RE_FETCH_READ_PTR_OFFS(k, n) \
+	(GSI_GSI_REG_BASE_OFFS + 0x0000f054 + 0x4000 * (n) + 0x80 * (k))
+#define GSI_EE_n_GSI_CH_k_RE_FETCH_READ_PTR_RMSK 0xffff
+#define GSI_EE_n_GSI_CH_k_RE_FETCH_READ_PTR_MAXk 30
+#define GSI_EE_n_GSI_CH_k_RE_FETCH_READ_PTR_MAXn 3
+#define GSI_EE_n_GSI_CH_k_RE_FETCH_READ_PTR_READ_PTR_BMSK 0xffff
+#define GSI_EE_n_GSI_CH_k_RE_FETCH_READ_PTR_READ_PTR_SHFT 0x0
+
+#define GSI_EE_n_GSI_CH_k_RE_FETCH_WRITE_PTR_OFFS(k, n) \
+	(GSI_GSI_REG_BASE_OFFS + 0x0000f058 + 0x4000 * (n) + 0x80 * (k))
+#define GSI_EE_n_GSI_CH_k_RE_FETCH_WRITE_PTR_RMSK 0xffff
+#define GSI_EE_n_GSI_CH_k_RE_FETCH_WRITE_PTR_MAXk 30
+#define GSI_EE_n_GSI_CH_k_RE_FETCH_WRITE_PTR_MAXn 3
+#define GSI_EE_n_GSI_CH_k_RE_FETCH_WRITE_PTR_RE_INTR_DB_BMSK 0xffff
+#define GSI_EE_n_GSI_CH_k_RE_FETCH_WRITE_PTR_RE_INTR_DB_SHFT 0x0
+
+#define GSI_EE_n_GSI_CH_k_QOS_OFFS(k, n) \
+	(GSI_GSI_REG_BASE_OFFS + 0x0001c05c + 0x4000 * (n) + 0x80 * (k))
+#define GSI_EE_n_GSI_CH_k_QOS_RMSK 0x303
+#define GSI_EE_n_GSI_CH_k_QOS_MAXk 30
+#define GSI_EE_n_GSI_CH_k_QOS_MAXn 3
+#define GSI_EE_n_GSI_CH_k_QOS_USE_ESCAPE_BUF_ONLY_BMSK 0x400
+#define GSI_EE_n_GSI_CH_k_QOS_USE_ESCAPE_BUF_ONLY_SHFT 0xa
+#define GSI_EE_n_GSI_CH_k_QOS_USE_DB_ENG_BMSK 0x200
+#define GSI_EE_n_GSI_CH_k_QOS_USE_DB_ENG_SHFT 0x9
+#define GSI_EE_n_GSI_CH_k_QOS_MAX_PREFETCH_BMSK 0x100
+#define GSI_EE_n_GSI_CH_k_QOS_MAX_PREFETCH_SHFT 0x8
+#define GSI_EE_n_GSI_CH_k_QOS_WRR_WEIGHT_BMSK 0xf
+#define GSI_EE_n_GSI_CH_k_QOS_WRR_WEIGHT_SHFT 0x0
+
+#define GSI_V2_5_EE_n_GSI_CH_k_QOS_OFFS(k, n) \
+	(GSI_GSI_REG_BASE_OFFS + 0x0000f05c + 0x4000 * (n) + 0x80 * (k))
+#define GSI_V2_5_EE_n_GSI_CH_k_QOS_EMPTY_LVL_THRSHOLD_BMSK 0xff0000
+#define GSI_V2_5_EE_n_GSI_CH_k_QOS_EMPTY_LVL_THRSHOLD_SHFT 0x10
+#define GSI_V2_5_EE_n_GSI_CH_k_QOS_PREFETCH_MODE_BMSK 0x3c00
+#define GSI_V2_5_EE_n_GSI_CH_k_QOS_PREFETCH_MODE_SHFT 0xa
+#define GSI_V2_5_EE_n_GSI_CH_k_QOS_USE_DB_ENG_BMSK 0x200
+#define GSI_V2_5_EE_n_GSI_CH_k_QOS_USE_DB_ENG_SHFT 0x9
+#define GSI_V2_5_EE_n_GSI_CH_k_QOS_MAX_PREFETCH_BMSK 0x100
+#define GSI_V2_5_EE_n_GSI_CH_k_QOS_MAX_PREFETCH_SHFT 0x8
+#define GSI_V2_5_EE_n_GSI_CH_k_QOS_WRR_WEIGHT_BMSK 0xf
+#define GSI_V2_5_EE_n_GSI_CH_k_QOS_WRR_WEIGHT_SHFT 0x0
+
+#define GSI_V2_9_EE_n_GSI_CH_k_QOS_OFFS(k, n) \
+	(GSI_GSI_REG_BASE_OFFS + 0x0000f05c + 0x4000 * (n) + 0x80 * (k))
+#define GSI_V2_9_EE_n_GSI_CH_k_QOS_DB_IN_BYTES_BMSK 0x1000000
+#define GSI_V2_9_EE_n_GSI_CH_k_QOS_DB_IN_BYTES_SHFT 0x18
+#define GSI_V2_9_EE_n_GSI_CH_k_QOS_EMPTY_LVL_THRSHOLD_BMSK 0xff0000
+#define GSI_V2_9_EE_n_GSI_CH_k_QOS_EMPTY_LVL_THRSHOLD_SHFT 0x10
+#define GSI_V2_9_EE_n_GSI_CH_k_QOS_PREFETCH_MODE_BMSK 0x3c00
+#define GSI_V2_9_EE_n_GSI_CH_k_QOS_PREFETCH_MODE_SHFT 0xa
+#define GSI_V2_9_EE_n_GSI_CH_k_QOS_USE_DB_ENG_BMSK 0x200
+#define GSI_V2_9_EE_n_GSI_CH_k_QOS_USE_DB_ENG_SHFT 0x9
+#define GSI_V2_9_EE_n_GSI_CH_k_QOS_MAX_PREFETCH_BMSK 0x100
+#define GSI_V2_9_EE_n_GSI_CH_k_QOS_MAX_PREFETCH_SHFT 0x8
+#define GSI_V2_9_EE_n_GSI_CH_k_QOS_WRR_WEIGHT_BMSK 0xf
+#define GSI_V2_9_EE_n_GSI_CH_k_QOS_WRR_WEIGHT_SHFT 0x0
+
+#define GSI_EE_n_GSI_CH_k_SCRATCH_0_OFFS(k, n) \
+	(GSI_GSI_REG_BASE_OFFS + 0x0000f060 + 0x4000 * (n) + 0x80 * (k))
+#define GSI_EE_n_GSI_CH_k_SCRATCH_0_SCRATCH_BMSK 0xffffffff
+#define GSI_EE_n_GSI_CH_k_SCRATCH_0_SCRATCH_SHFT 0x0
+
+#define GSI_EE_n_GSI_CH_k_SCRATCH_1_OFFS(k, n) \
+	(GSI_GSI_REG_BASE_OFFS + 0x0000f064 + 0x4000 * (n) + 0x80 * (k))
+#define GSI_EE_n_GSI_CH_k_SCRATCH_1_SCRATCH_BMSK 0xffffffff
+#define GSI_EE_n_GSI_CH_k_SCRATCH_1_SCRATCH_SHFT 0x0
+
+#define GSI_EE_n_GSI_CH_k_SCRATCH_2_OFFS(k, n) \
+	(GSI_GSI_REG_BASE_OFFS + 0x0000f068 + 0x4000 * (n) + 0x80 * (k))
+#define GSI_EE_n_GSI_CH_k_SCRATCH_2_SCRATCH_BMSK 0xffffffff
+#define GSI_EE_n_GSI_CH_k_SCRATCH_2_SCRATCH_SHFT 0x0
+
+#define GSI_EE_n_GSI_CH_k_SCRATCH_3_OFFS(k, n) \
+	(GSI_GSI_REG_BASE_OFFS + 0x0000f06c + 0x4000 * (n) + 0x80 * (k))
+#define GSI_EE_n_GSI_CH_k_SCRATCH_3_SCRATCH_BMSK 0xffffffff
+#define GSI_EE_n_GSI_CH_k_SCRATCH_3_SCRATCH_SHFT 0x0
+
+#define GSI_EE_n_EV_CH_k_CNTXT_0_OFFS(k, n) \
+	(GSI_GSI_REG_BASE_OFFS + 0x00010000 + 0x4000 * (n) + 0x80 * (k))
+#define GSI_EE_n_EV_CH_k_CNTXT_0_ELEMENT_SIZE_BMSK 0xff000000
+#define GSI_EE_n_EV_CH_k_CNTXT_0_ELEMENT_SIZE_SHFT 0x18
+#define GSI_EE_n_EV_CH_k_CNTXT_0_CHSTATE_BMSK 0xf00000
+#define GSI_EE_n_EV_CH_k_CNTXT_0_CHSTATE_SHFT 0x14
+#define GSI_EE_n_EV_CH_k_CNTXT_0_INTYPE_BMSK 0x10000
+#define GSI_EE_n_EV_CH_k_CNTXT_0_INTYPE_SHFT 0x10
+#define GSI_EE_n_EV_CH_k_CNTXT_0_EVCHID_BMSK 0xff00
+#define GSI_EE_n_EV_CH_k_CNTXT_0_EVCHID_SHFT 0x8
+#define GSI_EE_n_EV_CH_k_CNTXT_0_EE_BMSK 0xf0
+#define GSI_EE_n_EV_CH_k_CNTXT_0_EE_SHFT 0x4
+#define GSI_EE_n_EV_CH_k_CNTXT_0_CHTYPE_BMSK 0xf
+#define GSI_EE_n_EV_CH_k_CNTXT_0_CHTYPE_SHFT 0x0
+
+#define GSI_EE_n_EV_CH_k_CNTXT_1_OFFS(k, n) \
+	(GSI_GSI_REG_BASE_OFFS + 0x00010004 + 0x4000 * (n) + 0x80 * (k))
+#define GSI_EE_n_EV_CH_k_CNTXT_1_R_LENGTH_BMSK 0xffff
+#define GSI_EE_n_EV_CH_k_CNTXT_1_R_LENGTH_SHFT 0x0
+
+#define GSI_V2_9_EE_n_EV_CH_k_CNTXT_1_OFFS(k, n) \
+	(GSI_GSI_REG_BASE_OFFS + 0x00010004 + 0x4000 * (n) + 0x80 * (k))
+#define GSI_V2_9_EE_n_EV_CH_k_CNTXT_1_R_LENGTH_BMSK 0xfffff
+#define GSI_V2_9_EE_n_EV_CH_k_CNTXT_1_R_LENGTH_SHFT 0x0
+
+#define GSI_EE_n_EV_CH_k_CNTXT_2_OFFS(k, n) \
+	(GSI_GSI_REG_BASE_OFFS + 0x00010008 + 0x4000 * (n) + 0x80 * (k))
+#define GSI_EE_n_EV_CH_k_CNTXT_2_R_BASE_ADDR_LSBS_BMSK 0xffffffff
+#define GSI_EE_n_EV_CH_k_CNTXT_2_R_BASE_ADDR_LSBS_SHFT 0x0
+
+#define GSI_EE_n_EV_CH_k_CNTXT_3_OFFS(k, n) \
+	(GSI_GSI_REG_BASE_OFFS + 0x0001000c + 0x4000 * (n) + 0x80 * (k))
+#define GSI_EE_n_EV_CH_k_CNTXT_3_R_BASE_ADDR_MSBS_BMSK 0xffffffff
+#define GSI_EE_n_EV_CH_k_CNTXT_3_R_BASE_ADDR_MSBS_SHFT 0x0
+
+#define GSI_EE_n_EV_CH_k_CNTXT_4_OFFS(k, n) \
+	(GSI_GSI_REG_BASE_OFFS + 0x00010010 + 0x4000 * (n) + 0x80 * (k))
+#define GSI_EE_n_EV_CH_k_CNTXT_4_READ_PTR_LSB_BMSK 0xffffffff
+#define GSI_EE_n_EV_CH_k_CNTXT_4_READ_PTR_LSB_SHFT 0x0
+
+#define GSI_EE_n_EV_CH_k_CNTXT_5_OFFS(k, n) \
+	(GSI_GSI_REG_BASE_OFFS + 0x00010014 + 0x4000 * (n) + 0x80 * (k))
+#define GSI_EE_n_EV_CH_k_CNTXT_5_READ_PTR_MSB_BMSK 0xffffffff
+#define GSI_EE_n_EV_CH_k_CNTXT_5_READ_PTR_MSB_SHFT 0x0
+
+#define GSI_EE_n_EV_CH_k_CNTXT_6_OFFS(k, n) \
+	(GSI_GSI_REG_BASE_OFFS + 0x00010018 + 0x4000 * (n) + 0x80 * (k))
+#define GSI_EE_n_EV_CH_k_CNTXT_6_WRITE_PTR_LSB_BMSK 0xffffffff
+#define GSI_EE_n_EV_CH_k_CNTXT_6_WRITE_PTR_LSB_SHFT 0x0
+
+#define GSI_EE_n_EV_CH_k_CNTXT_7_OFFS(k, n) \
+	(GSI_GSI_REG_BASE_OFFS + 0x0001001c + 0x4000 * (n) + 0x80 * (k))
+#define GSI_EE_n_EV_CH_k_CNTXT_7_WRITE_PTR_MSB_BMSK 0xffffffff
+#define GSI_EE_n_EV_CH_k_CNTXT_7_WRITE_PTR_MSB_SHFT 0x0
+
+#define GSI_EE_n_EV_CH_k_CNTXT_8_OFFS(k, n) \
+	(GSI_GSI_REG_BASE_OFFS + 0x00010020 + 0x4000 * (n) + 0x80 * (k))
+#define GSI_EE_n_EV_CH_k_CNTXT_8_INT_MOD_CNT_BMSK 0xff000000
+#define GSI_EE_n_EV_CH_k_CNTXT_8_INT_MOD_CNT_SHFT 0x18
+#define GSI_EE_n_EV_CH_k_CNTXT_8_INT_MODC_BMSK 0xff0000
+#define GSI_EE_n_EV_CH_k_CNTXT_8_INT_MODC_SHFT 0x10
+#define GSI_EE_n_EV_CH_k_CNTXT_8_INT_MODT_BMSK 0xffff
+#define GSI_EE_n_EV_CH_k_CNTXT_8_INT_MODT_SHFT 0x0
+
+#define GSI_EE_n_EV_CH_k_CNTXT_9_OFFS(k, n) \
+	(GSI_GSI_REG_BASE_OFFS + 0x00010024 + 0x4000 * (n) + 0x80 * (k))
+#define GSI_EE_n_EV_CH_k_CNTXT_9_INTVEC_BMSK 0xffffffff
+#define GSI_EE_n_EV_CH_k_CNTXT_9_INTVEC_SHFT 0x0
+
+#define GSI_EE_n_EV_CH_k_CNTXT_10_OFFS(k, n) \
+	(GSI_GSI_REG_BASE_OFFS + 0x00010028 + 0x4000 * (n) + 0x80 * (k))
+#define GSI_EE_n_EV_CH_k_CNTXT_10_MSI_ADDR_LSB_BMSK 0xffffffff
+#define GSI_EE_n_EV_CH_k_CNTXT_10_MSI_ADDR_LSB_SHFT 0x0
+
+#define GSI_EE_n_EV_CH_k_CNTXT_11_OFFS(k, n) \
+	(GSI_GSI_REG_BASE_OFFS + 0x0001002c + 0x4000 * (n) + 0x80 * (k))
+#define GSI_EE_n_EV_CH_k_CNTXT_11_MSI_ADDR_MSB_BMSK 0xffffffff
+#define GSI_EE_n_EV_CH_k_CNTXT_11_MSI_ADDR_MSB_SHFT 0x0
+
+#define GSI_EE_n_EV_CH_k_CNTXT_12_OFFS(k, n) \
+	(GSI_GSI_REG_BASE_OFFS + 0x00010030 + 0x4000 * (n) + 0x80 * (k))
+#define GSI_EE_n_EV_CH_k_CNTXT_12_RP_UPDATE_ADDR_LSB_BMSK 0xffffffff
+#define GSI_EE_n_EV_CH_k_CNTXT_12_RP_UPDATE_ADDR_LSB_SHFT 0x0
+
+#define GSI_EE_n_EV_CH_k_CNTXT_13_OFFS(k, n) \
+	(GSI_GSI_REG_BASE_OFFS + 0x00010034 + 0x4000 * (n) + 0x80 * (k))
+#define GSI_EE_n_EV_CH_k_CNTXT_13_RP_UPDATE_ADDR_MSB_BMSK 0xffffffff
+#define GSI_EE_n_EV_CH_k_CNTXT_13_RP_UPDATE_ADDR_MSB_SHFT 0x0
+
+#define GSI_EE_n_EV_CH_k_SCRATCH_0_OFFS(k, n) \
+	(GSI_GSI_REG_BASE_OFFS + 0x00010048 + 0x4000 * (n) + 0x80 * (k))
+#define GSI_EE_n_EV_CH_k_SCRATCH_0_SCRATCH_BMSK 0xffffffff
+#define GSI_EE_n_EV_CH_k_SCRATCH_0_SCRATCH_SHFT 0x0
+
+#define GSI_EE_n_EV_CH_k_SCRATCH_1_OFFS(k, n) \
+	(GSI_GSI_REG_BASE_OFFS + 0x0001004c + 0x4000 * (n) + 0x80 * (k))
+#define GSI_EE_n_EV_CH_k_SCRATCH_1_SCRATCH_BMSK 0xffffffff
+#define GSI_EE_n_EV_CH_k_SCRATCH_1_SCRATCH_SHFT 0x0
+
+#define GSI_EE_n_GSI_CH_k_DOORBELL_0_OFFS(k, n) \
+	(GSI_GSI_REG_BASE_OFFS + 0x00011000 + 0x4000 * (n) + 0x8 * (k))
+#define GSI_EE_n_GSI_CH_k_DOORBELL_0_WRITE_PTR_LSB_BMSK 0xffffffff
+#define GSI_EE_n_GSI_CH_k_DOORBELL_0_WRITE_PTR_LSB_SHFT 0x0
+
+#define GSI_EE_n_GSI_CH_k_DOORBELL_1_OFFS(k, n) \
+	(GSI_GSI_REG_BASE_OFFS + 0x00011004 + 0x4000 * (n) + 0x8 * (k))
+#define GSI_EE_n_GSI_CH_k_DOORBELL_1_WRITE_PTR_MSB_BMSK 0xffffffff
+#define GSI_EE_n_GSI_CH_k_DOORBELL_1_WRITE_PTR_MSB_SHFT 0x0
+
+#define GSI_EE_n_EV_CH_k_DOORBELL_0_OFFS(k, n) \
+	(GSI_GSI_REG_BASE_OFFS + 0x00011100 + 0x4000 * (n) + 0x8 * (k))
+#define GSI_EE_n_EV_CH_k_DOORBELL_0_WRITE_PTR_LSB_BMSK 0xffffffff
+#define GSI_EE_n_EV_CH_k_DOORBELL_0_WRITE_PTR_LSB_SHFT 0x0
+
+#define GSI_EE_n_EV_CH_k_DOORBELL_1_OFFS(k, n) \
+	(GSI_GSI_REG_BASE_OFFS + 0x00011104 + 0x4000 * (n) + 0x8 * (k))
+#define GSI_EE_n_EV_CH_k_DOORBELL_1_WRITE_PTR_MSB_BMSK 0xffffffff
+#define GSI_EE_n_EV_CH_k_DOORBELL_1_WRITE_PTR_MSB_SHFT 0x0
+
+#define GSI_EE_n_GSI_STATUS_OFFS(n) \
+	(GSI_GSI_REG_BASE_OFFS + 0x00012000 + 0x4000 * (n))
+#define GSI_EE_n_GSI_STATUS_ENABLED_BMSK 0x1
+#define GSI_EE_n_GSI_STATUS_ENABLED_SHFT 0x0
+
+#define GSI_EE_n_GSI_CH_CMD_OFFS(n) \
+	(GSI_GSI_REG_BASE_OFFS + 0x00012008 + 0x4000 * (n))
+#define GSI_EE_n_GSI_CH_CMD_OPCODE_BMSK 0xff000000
+#define GSI_EE_n_GSI_CH_CMD_OPCODE_SHFT 0x18
+#define GSI_EE_n_GSI_CH_CMD_CHID_BMSK 0xff
+#define GSI_EE_n_GSI_CH_CMD_CHID_SHFT 0x0
+
+#define GSI_EE_n_EV_CH_CMD_OFFS(n) \
+	(GSI_GSI_REG_BASE_OFFS + 0x00012010 + 0x4000 * (n))
+#define GSI_EE_n_EV_CH_CMD_OPCODE_BMSK 0xff000000
+#define GSI_EE_n_EV_CH_CMD_OPCODE_SHFT 0x18
+#define GSI_EE_n_EV_CH_CMD_CHID_BMSK 0xff
+#define GSI_EE_n_EV_CH_CMD_CHID_SHFT 0x0
+
+#define GSI_EE_n_GSI_EE_GENERIC_CMD_OFFS(n) \
+	(GSI_GSI_REG_BASE_OFFS + 0x00012018 + 0x4000 * (n))
+#define GSI_EE_n_GSI_EE_GENERIC_CMD_OPCODE_BMSK 0x1f
+#define GSI_EE_n_GSI_EE_GENERIC_CMD_OPCODE_SHFT 0x0
+#define GSI_EE_n_GSI_EE_GENERIC_CMD_VIRT_CHAN_IDX_BMSK 0x3e0
+#define GSI_EE_n_GSI_EE_GENERIC_CMD_VIRT_CHAN_IDX_SHFT 0x5
+#define GSI_EE_n_GSI_EE_GENERIC_CMD_EE_BMSK 0x3c00
+#define GSI_EE_n_GSI_EE_GENERIC_CMD_EE_SHFT 0xa
+
+#define GSI_V1_0_EE_n_GSI_HW_PARAM_OFFS(n) \
+	(GSI_GSI_REG_BASE_OFFS + 0x0001f040 + 0x4000 * (n))
+#define GSI_V1_0_EE_n_GSI_HW_PARAM_PERIPH_SEC_GRP_BMSK 0x7c000000
+#define GSI_V1_0_EE_n_GSI_HW_PARAM_PERIPH_SEC_GRP_SHFT 0x1a
+#define GSI_V1_0_EE_n_GSI_HW_PARAM_USE_AXI_M_BMSK 0x2000000
+#define GSI_V1_0_EE_n_GSI_HW_PARAM_USE_AXI_M_SHFT 0x19
+#define GSI_V1_0_EE_n_GSI_HW_PARAM_PERIPH_CONF_ADDR_BUS_W_BMSK 0x1f00000
+#define GSI_V1_0_EE_n_GSI_HW_PARAM_PERIPH_CONF_ADDR_BUS_W_SHFT 0x14
+#define GSI_V1_0_EE_n_GSI_HW_PARAM_NUM_EES_BMSK 0xf0000
+#define GSI_V1_0_EE_n_GSI_HW_PARAM_NUM_EES_SHFT 0x10
+#define GSI_V1_0_EE_n_GSI_HW_PARAM_GSI_CH_NUM_BMSK 0xff00
+#define GSI_V1_0_EE_n_GSI_HW_PARAM_GSI_CH_NUM_SHFT 0x8
+#define GSI_V1_0_EE_n_GSI_HW_PARAM_GSI_EV_CH_NUM_BMSK 0xff
+#define GSI_V1_0_EE_n_GSI_HW_PARAM_GSI_EV_CH_NUM_SHFT 0x0
+
+#define GSI_V1_2_EE_n_GSI_HW_PARAM_0_OFFS(n) \
+	(GSI_GSI_REG_BASE_OFFS + 0x0001f038 + 0x4000 * (n))
+#define GSI_V1_2_EE_n_GSI_HW_PARAM_0_USE_AXI_M_BMSK 0x80000000
+#define GSI_V1_2_EE_n_GSI_HW_PARAM_0_USE_AXI_M_SHFT 0x1f
+#define GSI_V1_2_EE_n_GSI_HW_PARAM_0_PERIPH_SEC_GRP_BMSK 0x7c000000
+#define GSI_V1_2_EE_n_GSI_HW_PARAM_0_PERIPH_SEC_GRP_SHFT 0x1a
+#define GSI_V1_2_EE_n_GSI_HW_PARAM_0_PERIPH_CONF_ADDR_BUS_W_BMSK 0x3e00000
+#define GSI_V1_2_EE_n_GSI_HW_PARAM_0_PERIPH_CONF_ADDR_BUS_W_SHFT 0x15
+#define GSI_V1_2_EE_n_GSI_HW_PARAM_0_NUM_EES_BMSK 0x1f0000
+#define GSI_V1_2_EE_n_GSI_HW_PARAM_0_NUM_EES_SHFT 0x10
+#define GSI_V1_2_EE_n_GSI_HW_PARAM_0_GSI_CH_NUM_BMSK 0xff00
+#define GSI_V1_2_EE_n_GSI_HW_PARAM_0_GSI_CH_NUM_SHFT 0x8
+#define GSI_V1_2_EE_n_GSI_HW_PARAM_0_GSI_EV_CH_NUM_BMSK 0xff
+#define GSI_V1_2_EE_n_GSI_HW_PARAM_0_GSI_EV_CH_NUM_SHFT 0x0
+
+#define GSI_V1_3_EE_n_GSI_HW_PARAM_2_OFFS(n) \
+	(GSI_GSI_REG_BASE_OFFS + 0x0001f040 + 0x4000 * (n))
+#define GSI_V1_3_EE_n_GSI_HW_PARAM_2_RMSK 0x7fff
+#define GSI_V1_3_EE_n_GSI_HW_PARAM_2_MAXn 2
+#define GSI_V1_3_EE_n_GSI_HW_PARAM_2_GSI_CH_FULL_LOGIC_BMSK 0x4000
+#define GSI_V1_3_EE_n_GSI_HW_PARAM_2_GSI_CH_FULL_LOGIC_SHFT 0xe
+#define GSI_V1_3_EE_n_GSI_HW_PARAM_2_GSI_CH_PEND_TRANSLATE_BMSK 0x2000
+#define GSI_V1_3_EE_n_GSI_HW_PARAM_2_GSI_CH_PEND_TRANSLATE_SHFT 0xd
+#define GSI_V1_3_EE_n_GSI_HW_PARAM_2_GSI_NUM_EV_PER_EE_BMSK 0x1f00
+#define GSI_V1_3_EE_n_GSI_HW_PARAM_2_GSI_NUM_EV_PER_EE_SHFT 0x8
+#define GSI_V1_3_EE_n_GSI_HW_PARAM_2_GSI_NUM_CH_PER_EE_BMSK 0xf8
+#define GSI_V1_3_EE_n_GSI_HW_PARAM_2_GSI_NUM_CH_PER_EE_SHFT 0x3
+#define GSI_V1_3_EE_n_GSI_HW_PARAM_2_GSI_IRAM_SIZE_BMSK 0x7
+#define GSI_V1_3_EE_n_GSI_HW_PARAM_2_GSI_IRAM_SIZE_SHFT 0x0
+#define GSI_V1_3_EE_n_GSI_HW_PARAM_2_GSI_IRAM_SIZE_ONE_KB_FVAL 0x0
+#define GSI_V1_3_EE_n_GSI_HW_PARAM_2_GSI_IRAM_SIZE_TWO_KB_FVAL 0x1
+
+#define GSI_V2_0_EE_n_GSI_HW_PARAM_2_OFFS(n) \
+	(GSI_GSI_REG_BASE_OFFS + 0x0001f040 + 0x4000 * (n))
+#define GSI_V2_0_EE_n_GSI_HW_PARAM_2_RMSK 0x7fff
+#define GSI_V2_0_EE_n_GSI_HW_PARAM_2_MAXn 2
+#define GSI_V2_0_EE_n_GSI_HW_PARAM_2_GSI_SDMA_N_IOVEC_BMSK 0x38000000
+#define GSI_V2_0_EE_n_GSI_HW_PARAM_2_GSI_SDMA_N_IOVEC_SHFT 0x1b
+#define GSI_V2_0_EE_n_GSI_HW_PARAM_2_GSI_SDMA_MAX_BURST_BMSK 0x7F80000
+#define GSI_V2_0_EE_n_GSI_HW_PARAM_2_GSI_SDMA_MAX_BURST_SHFT 0x13
+#define GSI_V2_0_EE_n_GSI_HW_PARAM_2_GSI_SDMA_N_INT_BMSK 0x70000
+#define GSI_V2_0_EE_n_GSI_HW_PARAM_2_GSI_SDMA_N_INT_SHFT 0x10
+#define GSI_V2_0_EE_n_GSI_HW_PARAM_2_GSI_USE_SDMA_BMSK 0x8000
+#define GSI_V2_0_EE_n_GSI_HW_PARAM_2_GSI_USE_SDMA_SHFT 0xf
+#define GSI_V2_0_EE_n_GSI_HW_PARAM_2_GSI_CH_FULL_LOGIC_BMSK 0x4000
+#define GSI_V2_0_EE_n_GSI_HW_PARAM_2_GSI_CH_FULL_LOGIC_SHFT 0xe
+#define GSI_V2_0_EE_n_GSI_HW_PARAM_2_GSI_CH_PEND_TRANSLATE_BMSK 0x2000
+#define GSI_V2_0_EE_n_GSI_HW_PARAM_2_GSI_CH_PEND_TRANSLATE_SHFT 0xd
+#define GSI_V2_0_EE_n_GSI_HW_PARAM_2_GSI_NUM_EV_PER_EE_BMSK 0x1f00
+#define GSI_V2_0_EE_n_GSI_HW_PARAM_2_GSI_NUM_EV_PER_EE_SHFT 0x8
+#define GSI_V2_0_EE_n_GSI_HW_PARAM_2_GSI_NUM_CH_PER_EE_BMSK 0xf8
+#define GSI_V2_0_EE_n_GSI_HW_PARAM_2_GSI_NUM_CH_PER_EE_SHFT 0x3
+#define GSI_V2_0_EE_n_GSI_HW_PARAM_2_GSI_IRAM_SIZE_BMSK 0x7
+#define GSI_V2_0_EE_n_GSI_HW_PARAM_2_GSI_IRAM_SIZE_SHFT 0x0
+#define GSI_V2_0_EE_n_GSI_HW_PARAM_2_GSI_IRAM_SIZE_ONE_KB_FVAL 0x0
+#define GSI_V2_0_EE_n_GSI_HW_PARAM_2_GSI_IRAM_SIZE_TWO_KB_FVAL 0x1
+#define GSI_V2_0_EE_n_GSI_HW_PARAM_2_GSI_IRAM_SIZE_TWO_N_HALF_KB_FVAL 0x2
+#define GSI_V2_0_EE_n_GSI_HW_PARAM_2_GSI_IRAM_SIZE_THREE_KB_FVAL 0x3
+
+#define GSI_V2_2_EE_n_GSI_HW_PARAM_2_OFFS(n) \
+		(GSI_GSI_REG_BASE_OFFS + 0x0001f040 + 0x4000 * (n))
+#define GSI_V2_2_EE_n_GSI_HW_PARAM_2_GSI_USE_INTER_EE_BMSK 0x8000
+#define GSI_V2_2_EE_n_GSI_HW_PARAM_2_GSI_USE_INTER_EE_SHFT 0xf
+#define GSI_V2_2_EE_n_GSI_HW_PARAM_2_GSI_USE_RD_WR_ENG_BMSK 0x4000
+#define GSI_V2_2_EE_n_GSI_HW_PARAM_2_GSI_USE_RD_WR_ENG_SHFT 0xE
+#define GSI_V2_2_EE_n_GSI_HW_PARAM_2_RMSK 0x7fff
+#define GSI_V2_2_EE_n_GSI_HW_PARAM_2_MAXn 2
+#define GSI_V2_2_EE_n_GSI_HW_PARAM_2_GSI_SDMA_N_IOVEC_BMSK 0x38000000
+#define GSI_V2_2_EE_n_GSI_HW_PARAM_2_GSI_SDMA_N_IOVEC_SHFT 0x1b
+#define GSI_V2_2_EE_n_GSI_HW_PARAM_2_GSI_SDMA_MAX_BURST_BMSK 0x7F80000
+#define GSI_V2_2_EE_n_GSI_HW_PARAM_2_GSI_SDMA_MAX_BURST_SHFT 0x13
+#define GSI_V2_2_EE_n_GSI_HW_PARAM_2_GSI_SDMA_N_INT_BMSK 0x70000
+#define GSI_V2_2_EE_n_GSI_HW_PARAM_2_GSI_SDMA_N_INT_SHFT 0x10
+#define GSI_V2_2_EE_n_GSI_HW_PARAM_2_GSI_USE_SDMA_BMSK 0x8000
+#define GSI_V2_2_EE_n_GSI_HW_PARAM_2_GSI_USE_SDMA_SHFT 0xf
+#define GSI_V2_2_EE_n_GSI_HW_PARAM_2_GSI_CH_FULL_LOGIC_BMSK 0x4000
+#define GSI_V2_2_EE_n_GSI_HW_PARAM_2_GSI_CH_FULL_LOGIC_SHFT 0xe
+#define GSI_V2_2_EE_n_GSI_HW_PARAM_2_GSI_CH_PEND_TRANSLATE_BMSK 0x2000
+#define GSI_V2_2_EE_n_GSI_HW_PARAM_2_GSI_CH_PEND_TRANSLATE_SHFT 0xd
+#define GSI_V2_2_EE_n_GSI_HW_PARAM_2_GSI_NUM_EV_PER_EE_BMSK 0x1f00
+#define GSI_V2_2_EE_n_GSI_HW_PARAM_2_GSI_NUM_EV_PER_EE_SHFT 0x8
+#define GSI_V2_2_EE_n_GSI_HW_PARAM_2_GSI_NUM_CH_PER_EE_BMSK 0xf8
+#define GSI_V2_2_EE_n_GSI_HW_PARAM_2_GSI_NUM_CH_PER_EE_SHFT 0x3
+#define GSI_V2_2_EE_n_GSI_HW_PARAM_2_GSI_IRAM_SIZE_BMSK 0x7
+#define GSI_V2_2_EE_n_GSI_HW_PARAM_2_GSI_IRAM_SIZE_SHFT 0x0
+#define GSI_V2_2_EE_n_GSI_HW_PARAM_2_GSI_IRAM_SIZE_ONE_KB_FVAL 0x0
+#define GSI_V2_2_EE_n_GSI_HW_PARAM_2_GSI_IRAM_SIZE_TWO_KB_FVAL 0x1
+#define GSI_V2_2_EE_n_GSI_HW_PARAM_2_GSI_IRAM_SIZE_TWO_N_HALF_KB_FVAL 0x2
+#define GSI_V2_2_EE_n_GSI_HW_PARAM_2_GSI_IRAM_SIZE_THREE_KB_FVAL 0x3
+
+#define GSI_V2_5_EE_n_GSI_HW_PARAM_2_OFFS(n) \
+		(GSI_GSI_REG_BASE_OFFS + 0x00012040 + 0x4000 * (n))
+#define GSI_V2_5_EE_n_GSI_HW_PARAM_2_GSI_USE_INTER_EE_BMSK 0x8000
+#define GSI_V2_5_EE_n_GSI_HW_PARAM_2_GSI_USE_INTER_EE_SHFT 0xf
+#define GSI_V2_5_EE_n_GSI_HW_PARAM_2_GSI_USE_RD_WR_ENG_BMSK 0x4000
+#define GSI_V2_5_EE_n_GSI_HW_PARAM_2_GSI_USE_RD_WR_ENG_SHFT 0xE
+#define GSI_V2_5_EE_n_GSI_HW_PARAM_2_RMSK 0x7fff
+#define GSI_V2_5_EE_n_GSI_HW_PARAM_2_MAXn 2
+#define GSI_V2_5_EE_n_GSI_HW_PARAM_2_GSI_SDMA_N_IOVEC_BMSK 0x38000000
+#define GSI_V2_5_EE_n_GSI_HW_PARAM_2_GSI_SDMA_N_IOVEC_SHFT 0x1b
+#define GSI_V2_5_EE_n_GSI_HW_PARAM_2_GSI_SDMA_MAX_BURST_BMSK 0x7F80000
+#define GSI_V2_5_EE_n_GSI_HW_PARAM_2_GSI_SDMA_MAX_BURST_SHFT 0x13
+#define GSI_V2_5_EE_n_GSI_HW_PARAM_2_GSI_SDMA_N_INT_BMSK 0x70000
+#define GSI_V2_5_EE_n_GSI_HW_PARAM_2_GSI_SDMA_N_INT_SHFT 0x10
+#define GSI_V2_5_EE_n_GSI_HW_PARAM_2_GSI_USE_SDMA_BMSK 0x8000
+#define GSI_V2_5_EE_n_GSI_HW_PARAM_2_GSI_USE_SDMA_SHFT 0xf
+#define GSI_V2_5_EE_n_GSI_HW_PARAM_2_GSI_CH_FULL_LOGIC_BMSK 0x4000
+#define GSI_V2_5_EE_n_GSI_HW_PARAM_2_GSI_CH_FULL_LOGIC_SHFT 0xe
+#define GSI_V2_5_EE_n_GSI_HW_PARAM_2_GSI_CH_PEND_TRANSLATE_BMSK 0x2000
+#define GSI_V2_5_EE_n_GSI_HW_PARAM_2_GSI_CH_PEND_TRANSLATE_SHFT 0xd
+#define GSI_V2_5_EE_n_GSI_HW_PARAM_2_GSI_NUM_EV_PER_EE_BMSK 0x1f00
+#define GSI_V2_5_EE_n_GSI_HW_PARAM_2_GSI_NUM_EV_PER_EE_SHFT 0x8
+#define GSI_V2_5_EE_n_GSI_HW_PARAM_2_GSI_NUM_CH_PER_EE_BMSK 0xf8
+#define GSI_V2_5_EE_n_GSI_HW_PARAM_2_GSI_NUM_CH_PER_EE_SHFT 0x3
+#define GSI_V2_5_EE_n_GSI_HW_PARAM_2_GSI_IRAM_SIZE_BMSK 0x7
+#define GSI_V2_5_EE_n_GSI_HW_PARAM_2_GSI_IRAM_SIZE_SHFT 0x0
+#define GSI_V2_5_EE_n_GSI_HW_PARAM_2_GSI_IRAM_SIZE_ONE_KB_FVAL 0x0
+#define GSI_V2_5_EE_n_GSI_HW_PARAM_2_GSI_IRAM_SIZE_TWO_KB_FVAL 0x1
+#define GSI_V2_5_EE_n_GSI_HW_PARAM_2_GSI_IRAM_SIZE_TWO_N_HALF_KB_FVAL 0x2
+#define GSI_V2_5_EE_n_GSI_HW_PARAM_2_GSI_IRAM_SIZE_THREE_KB_FVAL 0x3
+#define GSI_V2_5_EE_n_GSI_HW_PARAM_2_GSI_IRAM_SIZE_THREE_N_HALF_KB_FVAL 0x4
+#define GSI_V2_5_EE_n_GSI_HW_PARAM_2_GSI_IRAM_SIZE_FOUR_KB_FVAL 0x5
+
+
+#define GSI_V2_7_EE_n_GSI_HW_PARAM_2_OFFS(n) \
+			(GSI_GSI_REG_BASE_OFFS + 0x00012040 + 0x4000 * (n))
+#define GSI_V2_7_EE_n_GSI_HW_PARAM_2_GSI_USE_INTER_EE_BMSK 0x8000
+#define GSI_V2_7_EE_n_GSI_HW_PARAM_2_GSI_USE_INTER_EE_SHFT 0xf
+#define GSI_V2_7_EE_n_GSI_HW_PARAM_2_GSI_USE_RD_WR_ENG_BMSK 0x4000
+#define GSI_V2_7_EE_n_GSI_HW_PARAM_2_GSI_USE_RD_WR_ENG_SHFT 0xE
+#define GSI_V2_7_EE_n_GSI_HW_PARAM_2_RMSK 0x7fff
+#define GSI_V2_7_EE_n_GSI_HW_PARAM_2_MAXn 2
+#define GSI_V2_7_EE_n_GSI_HW_PARAM_2_GSI_SDMA_N_IOVEC_BMSK 0x38000000
+#define GSI_V2_7_EE_n_GSI_HW_PARAM_2_GSI_SDMA_N_IOVEC_SHFT 0x1b
+#define GSI_V2_7_EE_n_GSI_HW_PARAM_2_GSI_SDMA_MAX_BURST_BMSK 0x7F80000
+#define GSI_V2_7_EE_n_GSI_HW_PARAM_2_GSI_SDMA_MAX_BURST_SHFT 0x13
+#define GSI_V2_7_EE_n_GSI_HW_PARAM_2_GSI_SDMA_N_INT_BMSK 0x70000
+#define GSI_V2_7_EE_n_GSI_HW_PARAM_2_GSI_SDMA_N_INT_SHFT 0x10
+#define GSI_V2_7_EE_n_GSI_HW_PARAM_2_GSI_USE_SDMA_BMSK 0x8000
+#define GSI_V2_7_EE_n_GSI_HW_PARAM_2_GSI_USE_SDMA_SHFT 0xf
+#define GSI_V2_7_EE_n_GSI_HW_PARAM_2_GSI_CH_FULL_LOGIC_BMSK 0x4000
+#define GSI_V2_7_EE_n_GSI_HW_PARAM_2_GSI_CH_FULL_LOGIC_SHFT 0xe
+#define GSI_V2_7_EE_n_GSI_HW_PARAM_2_GSI_CH_PEND_TRANSLATE_BMSK 0x2000
+#define GSI_V2_7_EE_n_GSI_HW_PARAM_2_GSI_CH_PEND_TRANSLATE_SHFT 0xd
+#define GSI_V2_7_EE_n_GSI_HW_PARAM_2_GSI_NUM_EV_PER_EE_BMSK 0x1f00
+#define GSI_V2_7_EE_n_GSI_HW_PARAM_2_GSI_NUM_EV_PER_EE_SHFT 0x8
+#define GSI_V2_7_EE_n_GSI_HW_PARAM_2_GSI_NUM_CH_PER_EE_BMSK 0xf8
+#define GSI_V2_7_EE_n_GSI_HW_PARAM_2_GSI_NUM_CH_PER_EE_SHFT 0x3
+#define GSI_V2_7_EE_n_GSI_HW_PARAM_2_GSI_IRAM_SIZE_BMSK 0x7
+#define GSI_V2_7_EE_n_GSI_HW_PARAM_2_GSI_IRAM_SIZE_SHFT 0x0
+#define GSI_V2_7_EE_n_GSI_HW_PARAM_2_GSI_IRAM_SIZE_ONE_KB_FVAL 0x0
+#define GSI_V2_7_EE_n_GSI_HW_PARAM_2_GSI_IRAM_SIZE_TWO_KB_FVAL 0x1
+#define GSI_V2_7_EE_n_GSI_HW_PARAM_2_GSI_IRAM_SIZE_TWO_N_HALF_KB_FVAL 0x2
+#define GSI_V2_7_EE_n_GSI_HW_PARAM_2_GSI_IRAM_SIZE_THREE_KB_FVAL 0x3
+#define GSI_V2_7_EE_n_GSI_HW_PARAM_2_GSI_IRAM_SIZE_THREE_N_HALF_KB_FVAL 0x4
+#define GSI_V2_7_EE_n_GSI_HW_PARAM_2_GSI_IRAM_SIZE_FOUR_KB_FVAL 0x5
+
+#define GSI_V2_9_EE_n_GSI_HW_PARAM_2_OFFS(n) \
+			(GSI_GSI_REG_BASE_OFFS + 0x00012040 + 0x4000 * (n))
+#define GSI_V2_9_EE_n_GSI_HW_PARAM_2_RMSK 0xffffffff
+#define GSI_V2_9_EE_n_GSI_HW_PARAM_2_MAXn 2
+#define GSI_V2_9_EE_n_GSI_HW_PARAM_2_INI(n) \
+			in_dword_masked(GSI_V2_9_EE_n_GSI_HW_PARAM_2_OFFS(n), \
+				GSI_V2_9_EE_n_GSI_HW_PARAM_2_RMSK)
+#define GSI_V2_9_EE_n_GSI_HW_PARAM_2_INMI(n, mask) \
+			in_dword_masked(GSI_V2_9_EE_n_GSI_HW_PARAM_2_OFFS(n), \
+				mask)
+#define GSI_V2_9_EE_n_GSI_HW_PARAM_2_GSI_USE_INTER_EE_BMSK 0x80000000
+#define GSI_V2_9_EE_n_GSI_HW_PARAM_2_GSI_USE_INTER_EE_SHFT 0x1f
+#define GSI_V2_9_EE_n_GSI_HW_PARAM_2_GSI_USE_RD_WR_ENG_BMSK 0x40000000
+#define GSI_V2_9_EE_n_GSI_HW_PARAM_2_GSI_USE_RD_WR_ENG_SHFT 0x1e
+#define GSI_V2_9_EE_n_GSI_HW_PARAM_2_GSI_SDMA_N_IOVEC_BMSK 0x38000000
+#define GSI_V2_9_EE_n_GSI_HW_PARAM_2_GSI_SDMA_N_IOVEC_SHFT 0x1b
+#define GSI_V2_9_EE_n_GSI_HW_PARAM_2_GSI_SDMA_MAX_BURST_BMSK 0x7f80000
+#define GSI_V2_9_EE_n_GSI_HW_PARAM_2_GSI_SDMA_MAX_BURST_SHFT 0x13
+#define GSI_V2_9_EE_n_GSI_HW_PARAM_2_GSI_SDMA_N_INT_BMSK 0x70000
+#define GSI_V2_9_EE_n_GSI_HW_PARAM_2_GSI_SDMA_N_INT_SHFT 0x10
+#define GSI_V2_9_EE_n_GSI_HW_PARAM_2_GSI_USE_SDMA_BMSK 0x8000
+#define GSI_V2_9_EE_n_GSI_HW_PARAM_2_GSI_USE_SDMA_SHFT 0xf
+#define GSI_V2_9_EE_n_GSI_HW_PARAM_2_GSI_CH_FULL_LOGIC_BMSK 0x4000
+#define GSI_V2_9_EE_n_GSI_HW_PARAM_2_GSI_CH_FULL_LOGIC_SHFT 0xe
+#define GSI_V2_9_EE_n_GSI_HW_PARAM_2_GSI_CH_PEND_TRANSLATE_BMSK 0x2000
+#define GSI_V2_9_EE_n_GSI_HW_PARAM_2_GSI_CH_PEND_TRANSLATE_SHFT 0xd
+#define GSI_V2_9_EE_n_GSI_HW_PARAM_2_GSI_NUM_EV_PER_EE_BMSK 0x1f00
+#define GSI_V2_9_EE_n_GSI_HW_PARAM_2_GSI_NUM_EV_PER_EE_SHFT 0x8
+#define GSI_V2_9_EE_n_GSI_HW_PARAM_2_GSI_NUM_CH_PER_EE_BMSK 0xf8
+#define GSI_V2_9_EE_n_GSI_HW_PARAM_2_GSI_NUM_CH_PER_EE_SHFT 0x3
+#define GSI_V2_9_EE_n_GSI_HW_PARAM_2_GSI_IRAM_SIZE_BMSK 0x7
+#define GSI_V2_9_EE_n_GSI_HW_PARAM_2_GSI_IRAM_SIZE_SHFT 0x0
+
+#define GSI_EE_n_GSI_SW_VERSION_OFFS(n) \
+	(GSI_GSI_REG_BASE_OFFS + 0x00012044 + 0x4000 * (n))
+#define GSI_EE_n_GSI_SW_VERSION_MAJOR_BMSK 0xf0000000
+#define GSI_EE_n_GSI_SW_VERSION_MAJOR_SHFT 0x1c
+#define GSI_EE_n_GSI_SW_VERSION_MINOR_BMSK 0xfff0000
+#define GSI_EE_n_GSI_SW_VERSION_MINOR_SHFT 0x10
+#define GSI_EE_n_GSI_SW_VERSION_STEP_BMSK 0xffff
+#define GSI_EE_n_GSI_SW_VERSION_STEP_SHFT 0x0
+
+#define GSI_EE_n_CNTXT_TYPE_IRQ_OFFS(n) \
+	(GSI_GSI_REG_BASE_OFFS + 0x00012080 + 0x4000 * (n))
+#define GSI_EE_n_CNTXT_TYPE_IRQ_GENERAL_BMSK 0x40
+#define GSI_EE_n_CNTXT_TYPE_IRQ_GENERAL_SHFT 0x6
+#define GSI_EE_n_CNTXT_TYPE_IRQ_INTER_EE_EV_CTRL_BMSK 0x20
+#define GSI_EE_n_CNTXT_TYPE_IRQ_INTER_EE_EV_CTRL_SHFT 0x5
+#define GSI_EE_n_CNTXT_TYPE_IRQ_INTER_EE_CH_CTRL_BMSK 0x10
+#define GSI_EE_n_CNTXT_TYPE_IRQ_INTER_EE_CH_CTRL_SHFT 0x4
+#define GSI_EE_n_CNTXT_TYPE_IRQ_IEOB_BMSK 0x8
+#define GSI_EE_n_CNTXT_TYPE_IRQ_IEOB_SHFT 0x3
+#define GSI_EE_n_CNTXT_TYPE_IRQ_GLOB_EE_BMSK 0x4
+#define GSI_EE_n_CNTXT_TYPE_IRQ_GLOB_EE_SHFT 0x2
+#define GSI_EE_n_CNTXT_TYPE_IRQ_EV_CTRL_BMSK 0x2
+#define GSI_EE_n_CNTXT_TYPE_IRQ_EV_CTRL_SHFT 0x1
+#define GSI_EE_n_CNTXT_TYPE_IRQ_CH_CTRL_BMSK 0x1
+#define GSI_EE_n_CNTXT_TYPE_IRQ_CH_CTRL_SHFT 0x0
+
+#define GSI_EE_n_CNTXT_TYPE_IRQ_MSK_OFFS(n) \
+	(GSI_GSI_REG_BASE_OFFS + 0x00012088 + 0x4000 * (n))
+#define GSI_EE_n_CNTXT_TYPE_IRQ_MSK_GENERAL_BMSK 0x40
+#define GSI_EE_n_CNTXT_TYPE_IRQ_MSK_GENERAL_SHFT 0x6
+#define GSI_EE_n_CNTXT_TYPE_IRQ_MSK_INTER_EE_EV_CTRL_BMSK 0x20
+#define GSI_EE_n_CNTXT_TYPE_IRQ_MSK_INTER_EE_EV_CTRL_SHFT 0x5
+#define GSI_EE_n_CNTXT_TYPE_IRQ_MSK_INTER_EE_CH_CTRL_BMSK 0x10
+#define GSI_EE_n_CNTXT_TYPE_IRQ_MSK_INTER_EE_CH_CTRL_SHFT 0x4
+#define GSI_EE_n_CNTXT_TYPE_IRQ_MSK_IEOB_BMSK 0x8
+#define GSI_EE_n_CNTXT_TYPE_IRQ_MSK_IEOB_SHFT 0x3
+#define GSI_EE_n_CNTXT_TYPE_IRQ_MSK_GLOB_EE_BMSK 0x4
+#define GSI_EE_n_CNTXT_TYPE_IRQ_MSK_GLOB_EE_SHFT 0x2
+#define GSI_EE_n_CNTXT_TYPE_IRQ_MSK_EV_CTRL_BMSK 0x2
+#define GSI_EE_n_CNTXT_TYPE_IRQ_MSK_EV_CTRL_SHFT 0x1
+#define GSI_EE_n_CNTXT_TYPE_IRQ_MSK_CH_CTRL_BMSK 0x1
+#define GSI_EE_n_CNTXT_TYPE_IRQ_MSK_CH_CTRL_SHFT 0x0
+
+#define GSI_EE_n_CNTXT_SRC_GSI_CH_IRQ_OFFS(n) \
+	(GSI_GSI_REG_BASE_OFFS + 0x00012090 + 0x4000 * (n))
+#define GSI_EE_n_CNTXT_SRC_GSI_CH_IRQ_GSI_CH_BIT_MAP_BMSK 0xffffffff
+#define GSI_EE_n_CNTXT_SRC_GSI_CH_IRQ_GSI_CH_BIT_MAP_SHFT 0x0
+
+#define GSI_EE_n_CNTXT_SRC_EV_CH_IRQ_OFFS(n) \
+	(GSI_GSI_REG_BASE_OFFS + 0x00012094 + 0x4000 * (n))
+#define GSI_EE_n_CNTXT_SRC_EV_CH_IRQ_EV_CH_BIT_MAP_BMSK 0xffffffff
+#define GSI_EE_n_CNTXT_SRC_EV_CH_IRQ_EV_CH_BIT_MAP_SHFT 0x0
+
+#define GSI_EE_n_CNTXT_SRC_GSI_CH_IRQ_MSK_OFFS(n) \
+	(GSI_GSI_REG_BASE_OFFS + 0x00012098 + 0x4000 * (n))
+#define GSI_EE_n_CNTXT_SRC_GSI_CH_IRQ_MSK_GSI_CH_BIT_MAP_MSK_BMSK 0x1ffff
+#define GSI_V2_5_EE_n_CNTXT_SRC_GSI_CH_IRQ_MSK_GSI_CH_BIT_MAP_MSK_BMSK 0x7fffff
+#define GSI_EE_n_CNTXT_SRC_GSI_CH_IRQ_MSK_GSI_CH_BIT_MAP_MSK_SHFT 0x0
+#define GSI_V2_5_EE_n_CNTXT_SRC_GSI_CH_IRQ_MSK_GSI_CH_BIT_MAP_MSK_SHFT 0x0
+
+#define GSI_EE_n_CNTXT_SRC_EV_CH_IRQ_MSK_OFFS(n) \
+	(GSI_GSI_REG_BASE_OFFS + 0x0001209c + 0x4000 * (n))
+#define GSI_EE_n_CNTXT_SRC_EV_CH_IRQ_MSK_EV_CH_BIT_MAP_MSK_BMSK 0xfff
+#define GSI_V2_5_EE_n_CNTXT_SRC_EV_CH_IRQ_MSK_EV_CH_BIT_MAP_MSK_BMSK 0xfffff
+#define GSI_EE_n_CNTXT_SRC_EV_CH_IRQ_MSK_EV_CH_BIT_MAP_MSK_SHFT 0x0
+#define GSI_V2_5_EE_n_CNTXT_SRC_EV_CH_IRQ_MSK_EV_CH_BIT_MAP_MSK_SHFT 0x0
+
+#define GSI_EE_n_CNTXT_SRC_GSI_CH_IRQ_CLR_OFFS(n) \
+	(GSI_GSI_REG_BASE_OFFS + 0x000120a0 + 0x4000 * (n))
+#define GSI_EE_n_CNTXT_SRC_GSI_CH_IRQ_CLR_GSI_CH_BIT_MAP_BMSK 0xffffffff
+#define GSI_EE_n_CNTXT_SRC_GSI_CH_IRQ_CLR_GSI_CH_BIT_MAP_SHFT 0x0
+
+#define GSI_EE_n_CNTXT_SRC_EV_CH_IRQ_CLR_OFFS(n) \
+	(GSI_GSI_REG_BASE_OFFS + 0x000120a4 + 0x4000 * (n))
+#define GSI_EE_n_CNTXT_SRC_EV_CH_IRQ_CLR_EV_CH_BIT_MAP_BMSK 0xffffffff
+#define GSI_EE_n_CNTXT_SRC_EV_CH_IRQ_CLR_EV_CH_BIT_MAP_SHFT 0x0
+
+#define GSI_EE_n_CNTXT_SRC_IEOB_IRQ_OFFS(n) \
+	(GSI_GSI_REG_BASE_OFFS + 0x000120b0 + 0x4000 * (n))
+#define GSI_EE_n_CNTXT_SRC_IEOB_IRQ_EV_CH_BIT_MAP_BMSK 0xffffffff
+#define GSI_EE_n_CNTXT_SRC_IEOB_IRQ_EV_CH_BIT_MAP_SHFT 0x0
+
+#define GSI_EE_n_CNTXT_SRC_IEOB_IRQ_MSK_OFFS(n) \
+	(GSI_GSI_REG_BASE_OFFS + 0x000120b8 + 0x4000 * (n))
+#define GSI_EE_n_CNTXT_SRC_IEOB_IRQ_MSK_EV_CH_BIT_MAP_MSK_BMSK 0xfff
+#define GSI_V2_5_EE_n_CNTXT_SRC_IEOB_IRQ_MSK_EV_CH_BIT_MAP_MSK_BMSK 0xfffff
+#define GSI_EE_n_CNTXT_SRC_IEOB_IRQ_MSK_EV_CH_BIT_MAP_MSK_SHFT 0x0
+#define GSI_V2_5_EE_n_CNTXT_SRC_IEOB_IRQ_MSK_EV_CH_BIT_MAP_MSK_SHFT 0x0
+
+#define GSI_EE_n_CNTXT_SRC_IEOB_IRQ_CLR_OFFS(n) \
+	(GSI_GSI_REG_BASE_OFFS + 0x000120c0 + 0x4000 * (n))
+#define GSI_EE_n_CNTXT_SRC_IEOB_IRQ_CLR_EV_CH_BIT_MAP_BMSK 0xffffffff
+#define GSI_EE_n_CNTXT_SRC_IEOB_IRQ_CLR_EV_CH_BIT_MAP_SHFT 0x0
+
+#define GSI_EE_n_CNTXT_GLOB_IRQ_STTS_OFFS(n) \
+	(GSI_GSI_REG_BASE_OFFS + 0x00012100 + 0x4000 * (n))
+#define GSI_EE_n_CNTXT_GLOB_IRQ_STTS_GP_INT3_BMSK 0x8
+#define GSI_EE_n_CNTXT_GLOB_IRQ_STTS_GP_INT3_SHFT 0x3
+#define GSI_EE_n_CNTXT_GLOB_IRQ_STTS_GP_INT2_BMSK 0x4
+#define GSI_EE_n_CNTXT_GLOB_IRQ_STTS_GP_INT2_SHFT 0x2
+#define GSI_EE_n_CNTXT_GLOB_IRQ_STTS_GP_INT1_BMSK 0x2
+#define GSI_EE_n_CNTXT_GLOB_IRQ_STTS_GP_INT1_SHFT 0x1
+#define GSI_EE_n_CNTXT_GLOB_IRQ_STTS_ERROR_INT_BMSK 0x1
+#define GSI_EE_n_CNTXT_GLOB_IRQ_STTS_ERROR_INT_SHFT 0x0
+
+#define GSI_EE_n_CNTXT_GLOB_IRQ_EN_OFFS(n) \
+	(GSI_GSI_REG_BASE_OFFS + 0x00012108 + 0x4000 * (n))
+#define GSI_EE_n_CNTXT_GLOB_IRQ_EN_GP_INT3_BMSK 0x8
+#define GSI_EE_n_CNTXT_GLOB_IRQ_EN_GP_INT3_SHFT 0x3
+#define GSI_EE_n_CNTXT_GLOB_IRQ_EN_GP_INT2_BMSK 0x4
+#define GSI_EE_n_CNTXT_GLOB_IRQ_EN_GP_INT2_SHFT 0x2
+#define GSI_EE_n_CNTXT_GLOB_IRQ_EN_GP_INT1_BMSK 0x2
+#define GSI_EE_n_CNTXT_GLOB_IRQ_EN_GP_INT1_SHFT 0x1
+#define GSI_EE_n_CNTXT_GLOB_IRQ_EN_ERROR_INT_BMSK 0x1
+#define GSI_EE_n_CNTXT_GLOB_IRQ_EN_ERROR_INT_SHFT 0x0
+
+#define GSI_EE_n_CNTXT_GLOB_IRQ_CLR_OFFS(n) \
+	(GSI_GSI_REG_BASE_OFFS + 0x00012110 + 0x4000 * (n))
+#define GSI_EE_n_CNTXT_GLOB_IRQ_CLR_GP_INT3_BMSK 0x8
+#define GSI_EE_n_CNTXT_GLOB_IRQ_CLR_GP_INT3_SHFT 0x3
+#define GSI_EE_n_CNTXT_GLOB_IRQ_CLR_GP_INT2_BMSK 0x4
+#define GSI_EE_n_CNTXT_GLOB_IRQ_CLR_GP_INT2_SHFT 0x2
+#define GSI_EE_n_CNTXT_GLOB_IRQ_CLR_GP_INT1_BMSK 0x2
+#define GSI_EE_n_CNTXT_GLOB_IRQ_CLR_GP_INT1_SHFT 0x1
+#define GSI_EE_n_CNTXT_GLOB_IRQ_CLR_ERROR_INT_BMSK 0x1
+#define GSI_EE_n_CNTXT_GLOB_IRQ_CLR_ERROR_INT_SHFT 0x0
+
+#define GSI_EE_n_CNTXT_GSI_IRQ_STTS_OFFS(n) \
+	(GSI_GSI_REG_BASE_OFFS + 0x00012118 + 0x4000 * (n))
+#define GSI_EE_n_CNTXT_GSI_IRQ_STTS_GSI_MCS_STACK_OVRFLOW_BMSK 0x8
+#define GSI_EE_n_CNTXT_GSI_IRQ_STTS_GSI_MCS_STACK_OVRFLOW_SHFT 0x3
+#define GSI_EE_n_CNTXT_GSI_IRQ_STTS_GSI_CMD_FIFO_OVRFLOW_BMSK 0x4
+#define GSI_EE_n_CNTXT_GSI_IRQ_STTS_GSI_CMD_FIFO_OVRFLOW_SHFT 0x2
+#define GSI_EE_n_CNTXT_GSI_IRQ_STTS_GSI_BUS_ERROR_BMSK 0x2
+#define GSI_EE_n_CNTXT_GSI_IRQ_STTS_GSI_BUS_ERROR_SHFT 0x1
+#define GSI_EE_n_CNTXT_GSI_IRQ_STTS_GSI_BREAK_POINT_BMSK 0x1
+#define GSI_EE_n_CNTXT_GSI_IRQ_STTS_GSI_BREAK_POINT_SHFT 0x0
+
+#define GSI_EE_n_CNTXT_GSI_IRQ_EN_OFFS(n) \
+	(GSI_GSI_REG_BASE_OFFS + 0x00012120 + 0x4000 * (n))
+#define GSI_EE_n_CNTXT_GSI_IRQ_EN_GSI_MCS_STACK_OVRFLOW_BMSK 0x8
+#define GSI_EE_n_CNTXT_GSI_IRQ_EN_GSI_MCS_STACK_OVRFLOW_SHFT 0x3
+#define GSI_EE_n_CNTXT_GSI_IRQ_EN_GSI_CMD_FIFO_OVRFLOW_BMSK 0x4
+#define GSI_EE_n_CNTXT_GSI_IRQ_EN_GSI_CMD_FIFO_OVRFLOW_SHFT 0x2
+#define GSI_EE_n_CNTXT_GSI_IRQ_EN_GSI_BUS_ERROR_BMSK 0x2
+#define GSI_EE_n_CNTXT_GSI_IRQ_EN_GSI_BUS_ERROR_SHFT 0x1
+#define GSI_EE_n_CNTXT_GSI_IRQ_EN_GSI_BREAK_POINT_BMSK 0x1
+#define GSI_EE_n_CNTXT_GSI_IRQ_EN_GSI_BREAK_POINT_SHFT 0x0
+
+#define GSI_EE_n_CNTXT_GSI_IRQ_CLR_OFFS(n) \
+	(GSI_GSI_REG_BASE_OFFS + 0x00012128 + 0x4000 * (n))
+#define GSI_EE_n_CNTXT_GSI_IRQ_CLR_GSI_MCS_STACK_OVRFLOW_BMSK 0x8
+#define GSI_EE_n_CNTXT_GSI_IRQ_CLR_GSI_MCS_STACK_OVRFLOW_SHFT 0x3
+#define GSI_EE_n_CNTXT_GSI_IRQ_CLR_GSI_CMD_FIFO_OVRFLOW_BMSK 0x4
+#define GSI_EE_n_CNTXT_GSI_IRQ_CLR_GSI_CMD_FIFO_OVRFLOW_SHFT 0x2
+#define GSI_EE_n_CNTXT_GSI_IRQ_CLR_GSI_BUS_ERROR_BMSK 0x2
+#define GSI_EE_n_CNTXT_GSI_IRQ_CLR_GSI_BUS_ERROR_SHFT 0x1
+#define GSI_EE_n_CNTXT_GSI_IRQ_CLR_GSI_BREAK_POINT_BMSK 0x1
+#define GSI_EE_n_CNTXT_GSI_IRQ_CLR_GSI_BREAK_POINT_SHFT 0x0
+
+#define GSI_EE_n_CNTXT_MSI_BASE_LSB(n) \
+	(GSI_GSI_REG_BASE_OFFS + 0x00012188 + 0x4000 * (n))
+#define GSI_EE_n_CNTXT_MSI_BASE_MSB(n) \
+	(GSI_GSI_REG_BASE_OFFS + 0x0001218c + 0x4000 * (n))
+
+#define GSI_EE_n_CNTXT_INTSET_OFFS(n) \
+	(GSI_GSI_REG_BASE_OFFS + 0x00012180 + 0x4000 * (n))
+#define GSI_EE_n_CNTXT_INTSET_INTYPE_BMSK 0x1
+#define GSI_EE_n_CNTXT_INTSET_INTYPE_SHFT 0x0
+
+#define GSI_EE_n_ERROR_LOG_OFFS(n) \
+	(GSI_GSI_REG_BASE_OFFS + 0x00012200 + 0x4000 * (n))
+#define GSI_EE_n_ERROR_LOG_TODO_BMSK 0xffffffff
+#define GSI_EE_n_ERROR_LOG_TODO_SHFT 0x0
+
+#define GSI_EE_n_ERROR_LOG_CLR_OFFS(n) \
+	(GSI_GSI_REG_BASE_OFFS + 0x00012210 + 0x4000 * (n))
+#define GSI_EE_n_ERROR_LOG_CLR_TODO_BMSK 0xffffffff
+#define GSI_EE_n_ERROR_LOG_CLR_TODO_SHFT 0x0
+
+#define GSI_EE_n_CNTXT_SCRATCH_0_OFFS(n) \
+	(GSI_GSI_REG_BASE_OFFS + 0x00012400 + 0x4000 * (n))
+#define GSI_EE_n_CNTXT_SCRATCH_0_SCRATCH_BMSK 0xffffffff
+#define GSI_EE_n_CNTXT_SCRATCH_0_SCRATCH_SHFT 0x0
+
+#define GSI_INTER_EE_n_SRC_GSI_CH_IRQ_OFFS(n) \
+	(GSI_GSI_REG_BASE_OFFS + 0x0000c018 + 0x1000 * (n))
+#define GSI_INTER_EE_n_SRC_GSI_CH_IRQ_GSI_CH_BIT_MAP_BMSK 0xffffffff
+#define GSI_INTER_EE_n_SRC_GSI_CH_IRQ_GSI_CH_BIT_MAP_SHFT 0x0
+
+#define GSI_INTER_EE_n_SRC_EV_CH_IRQ_OFFS(n) \
+	(GSI_GSI_REG_BASE_OFFS + 0x0000c01c + 0x1000 * (n))
+#define GSI_INTER_EE_n_SRC_EV_CH_IRQ_EV_CH_BIT_MAP_BMSK 0xffffffff
+#define GSI_INTER_EE_n_SRC_EV_CH_IRQ_EV_CH_BIT_MAP_SHFT 0x0
+
+#define GSI_INTER_EE_n_SRC_GSI_CH_IRQ_CLR_OFFS(n) \
+	(GSI_GSI_REG_BASE_OFFS + 0x0000c028 + 0x1000 * (n))
+#define GSI_INTER_EE_n_SRC_GSI_CH_IRQ_CLR_GSI_CH_BIT_MAP_BMSK 0xffffffff
+#define GSI_INTER_EE_n_SRC_GSI_CH_IRQ_CLR_GSI_CH_BIT_MAP_SHFT 0x0
+
+#define GSI_INTER_EE_n_SRC_EV_CH_IRQ_CLR_OFFS(n) \
+	(GSI_GSI_REG_BASE_OFFS + 0x0000c02c + 0x1000 * (n))
+#define GSI_INTER_EE_n_SRC_EV_CH_IRQ_CLR_EV_CH_BIT_MAP_BMSK 0xffffffff
+#define GSI_INTER_EE_n_SRC_EV_CH_IRQ_CLR_EV_CH_BIT_MAP_SHFT 0x0
+
+#define GSI_V2_5_GSI_MAP_EE_n_CH_k_VP_TABLE_OFFS(k, n) \
+	(GSI_GSI_REG_BASE_OFFS + 0x00003800 + 0x80 * (n) + 0x4 * (k))
+#define GSI_V2_5_GSI_MAP_EE_n_CH_k_VP_TABLE_VALID_BMSK 0x20
+#define GSI_V2_5_GSI_MAP_EE_n_CH_k_VP_TABLE_VALID_SHFT 0x5
+#define GSI_V2_5_GSI_MAP_EE_n_CH_k_VP_TABLE_PHY_CH_BMSK 0x1f
+#define GSI_V2_5_GSI_MAP_EE_n_CH_k_VP_TABLE_PHY_CH_SHFT 0x0
+
+#endif /* __GSI_REG_V2_H__ */

+ 61 - 0
ipa/Makefile

@@ -0,0 +1,61 @@
+# SPDX-License-Identifier: GPL-2.0-only
+
+ipam-y += \
+	ipa_api.o ipa_rm.o ipa_rm_dependency_graph.o ipa_rm_peers_list.o ipa_rm_resource.o ipa_rm_inactivity_timer.o \
+	ipa_v3/ipa.o \
+	ipa_v3/ipa_debugfs.o \
+	ipa_v3/ipa_hdr.o \
+	ipa_v3/ipa_flt.o \
+	ipa_v3/ipa_rt.o \
+	ipa_v3/ipa_dp.o \
+	ipa_v3/ipa_client.o \
+	ipa_v3/ipa_utils.o \
+	ipa_v3/ipa_nat.o \
+	ipa_v3/ipa_intf.o \
+	ipa_v3/teth_bridge.o \
+	ipa_v3/ipa_interrupts.o \
+	ipa_v3/ipa_uc.o \
+	ipa_v3/ipa_uc_wdi.o \
+	ipa_v3/ipa_dma.o \
+	ipa_v3/ipa_uc_mhi.o \
+	ipa_v3/ipa_mhi.o \
+	ipa_v3/ipa_uc_ntn.o \
+	ipa_v3/ipa_hw_stats.o \
+	ipa_v3/ipa_pm.o \
+	ipa_v3/ipa_wdi3_i.o \
+	ipa_v3/ipa_odl.o \
+	ipa_v3/ipa_wigig_i.o \
+	ipa_v3/ipahal/ipahal.o \
+	ipa_v3/ipahal/ipahal_reg.o \
+	ipa_v3/ipahal/ipahal_fltrt.o \
+	ipa_v3/ipahal/ipahal_hw_stats.o \
+	ipa_v3/ipahal/ipahal_nat.o \
+	ipa_clients/odu_bridge.o \
+	ipa_clients/ipa_mhi_client.o \
+	ipa_clients/ipa_uc_offload.o \
+	ipa_clients/ipa_wdi3.o \
+	ipa_clients/ipa_gsb.o \
+	ipa_clients/ipa_wigig.o
+
+ipam-$(CONFIG_RMNET_IPA3) += ipa_v3/rmnet_ipa.o ipa_v3/ipa_qmi_service_v01.o \
+	ipa_v3/ipa_qmi_service.o \
+	ipa_v3/rmnet_ipa_fd_ioctl.o
+
+ipam-$(CONFIG_IPA3_MHI_PROXY) += ipa_v3/ipa_mhi_proxy.o
+ipam-$(CONFIG_IPA_EMULATION) += ipa_v3/ipa_dt_replacement.o
+ipam-$(CONFIG_IPA3_REGDUMP) += ipa_v3/dump/ipa_reg_dump.o
+
+ipam-$(CONFIG_IPA_UT) += test/ipa_ut_framework.o test/ipa_test_example.o \
+	test/ipa_test_mhi.o test/ipa_test_dma.o \
+	test/ipa_test_hw_stats.o test/ipa_pm_ut.o \
+	test/ipa_test_wdi3.o
+
+obj-$(CONFIG_IPA3) += ipam.o
+
+obj-y += ipa_v3/ ipa_clients/
+
+ccflags-$(CONFIG_IPA3) += -Idrivers/platform/msm/ipa/ipa_v3
+ccflags-$(CONFIG_IPA3) += -Idrivers/platform/msm/ipa/ipa_v3/ipahal
+ccflags-$(CONFIG_IPA3) += -Idrivers/platform/msm/ipa/ipa_clients
+ccflags-$(CONFIG_IPA3_REGDUMP) += -Idrivers/platform/msm/ipa/ipa_v3/dump
+ccflags-$(CONFIG_IPA3_REGDUMP_IPA_4_5) += -Idrivers/platform/msm/ipa/ipa_v3/dump/ipa4.5

+ 3854 - 0
ipa/ipa_api.c

@@ -0,0 +1,3854 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2015-2020, The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/ipa.h>
+#include <linux/device.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/ipa_uc_offload.h>
+#include <linux/pci.h>
+#include "ipa_api.h"
+#include "ipa_v3/ipa_i.h"
+
+/*
+ * The following for adding code (ie. for EMULATION) not found on x86.
+ */
+#if defined(CONFIG_IPA_EMULATION)
+# include "ipa_v3/ipa_emulation_stubs.h"
+#endif
+
+#define DRV_NAME "ipa"
+
+#define IPA_API_DISPATCH_RETURN(api, p...) \
+	do { \
+		if (!ipa_api_ctrl) { \
+			pr_err("%s:%d IPA HW is not supported\n", \
+				__func__, __LINE__); \
+			ret = -EPERM; \
+		} \
+		else { \
+			if (ipa_api_ctrl->api) { \
+				ret = ipa_api_ctrl->api(p); \
+			} else { \
+				WARN(1, \
+					"%s not implemented for IPA ver %d\n", \
+						__func__, ipa_api_hw_type); \
+				ret = -EPERM; \
+			} \
+		} \
+	} while (0)
+
+#define IPA_API_DISPATCH(api, p...) \
+	do { \
+		if (!ipa_api_ctrl) \
+			pr_err("%s:%d IPA HW is not supported\n", \
+				__func__, __LINE__); \
+		else { \
+			if (ipa_api_ctrl->api) { \
+				ipa_api_ctrl->api(p); \
+			} else { \
+				WARN(1, \
+					"%s not implemented for IPA ver %d\n",\
+						__func__, ipa_api_hw_type); \
+			} \
+		} \
+	} while (0)
+
+#define IPA_API_DISPATCH_RETURN_PTR(api, p...) \
+	do { \
+		if (!ipa_api_ctrl) { \
+			pr_err("%s:%d IPA HW is not supported\n", \
+				__func__, __LINE__); \
+			ret = NULL; \
+		} \
+		else { \
+			if (ipa_api_ctrl->api) { \
+				ret = ipa_api_ctrl->api(p); \
+			} else { \
+				WARN(1, "%s not implemented for IPA ver %d\n",\
+						__func__, ipa_api_hw_type); \
+				ret = NULL; \
+			} \
+		} \
+	} while (0)
+
+#define IPA_API_DISPATCH_RETURN_BOOL(api, p...) \
+	do { \
+		if (!ipa_api_ctrl) { \
+			pr_err("%s:%d IPA HW is not supported\n", \
+				__func__, __LINE__); \
+			ret = false; \
+		} \
+		else { \
+			if (ipa_api_ctrl->api) { \
+				ret = ipa_api_ctrl->api(p); \
+			} else { \
+				WARN(1, "%s not implemented for IPA ver %d\n",\
+						__func__, ipa_api_hw_type); \
+				ret = false; \
+			} \
+		} \
+	} while (0)
+
+#if defined(CONFIG_IPA_EMULATION)
+static bool running_emulation = true;
+#else
+static bool running_emulation;
+#endif
+
+static enum ipa_hw_type ipa_api_hw_type;
+static struct ipa_api_controller *ipa_api_ctrl;
+
+const char *ipa_clients_strings[IPA_CLIENT_MAX] = {
+	__stringify(IPA_CLIENT_HSIC1_PROD),
+	__stringify(IPA_CLIENT_HSIC1_CONS),
+	__stringify(IPA_CLIENT_HSIC2_PROD),
+	__stringify(IPA_CLIENT_HSIC2_CONS),
+	__stringify(IPA_CLIENT_HSIC3_PROD),
+	__stringify(IPA_CLIENT_HSIC3_CONS),
+	__stringify(IPA_CLIENT_HSIC4_PROD),
+	__stringify(IPA_CLIENT_HSIC4_CONS),
+	__stringify(IPA_CLIENT_HSIC5_PROD),
+	__stringify(IPA_CLIENT_HSIC5_CONS),
+	__stringify(IPA_CLIENT_WLAN1_PROD),
+	__stringify(IPA_CLIENT_WLAN1_CONS),
+	__stringify(IPA_CLIENT_WLAN2_PROD),
+	__stringify(IPA_CLIENT_WLAN2_CONS),
+	__stringify(RESERVED_PROD_14),
+	__stringify(IPA_CLIENT_WLAN3_CONS),
+	__stringify(RESERVED_PROD_16),
+	__stringify(IPA_CLIENT_WLAN4_CONS),
+	__stringify(IPA_CLIENT_USB_PROD),
+	__stringify(IPA_CLIENT_USB_CONS),
+	__stringify(IPA_CLIENT_USB2_PROD),
+	__stringify(IPA_CLIENT_USB2_CONS),
+	__stringify(IPA_CLIENT_USB3_PROD),
+	__stringify(IPA_CLIENT_USB3_CONS),
+	__stringify(IPA_CLIENT_USB4_PROD),
+	__stringify(IPA_CLIENT_USB4_CONS),
+	__stringify(IPA_CLIENT_UC_USB_PROD),
+	__stringify(IPA_CLIENT_USB_DPL_CONS),
+	__stringify(IPA_CLIENT_A2_EMBEDDED_PROD),
+	__stringify(IPA_CLIENT_A2_EMBEDDED_CONS),
+	__stringify(IPA_CLIENT_A2_TETHERED_PROD),
+	__stringify(IPA_CLIENT_A2_TETHERED_CONS),
+	__stringify(IPA_CLIENT_APPS_LAN_PROD),
+	__stringify(IPA_CLIENT_APPS_LAN_CONS),
+	__stringify(IPA_CLIENT_APPS_WAN_PROD),
+	__stringify(IPA_CLIENT_APPS_WAN_CONS),
+	__stringify(IPA_CLIENT_APPS_CMD_PROD),
+	__stringify(IPA_CLIENT_A5_LAN_WAN_CONS),
+	__stringify(IPA_CLIENT_ODU_PROD),
+	__stringify(IPA_CLIENT_ODU_EMB_CONS),
+	__stringify(RESERVED_PROD_40),
+	__stringify(IPA_CLIENT_ODU_TETH_CONS),
+	__stringify(IPA_CLIENT_MHI_PROD),
+	__stringify(IPA_CLIENT_MHI_CONS),
+	__stringify(IPA_CLIENT_MEMCPY_DMA_SYNC_PROD),
+	__stringify(IPA_CLIENT_MEMCPY_DMA_SYNC_CONS),
+	__stringify(IPA_CLIENT_MEMCPY_DMA_ASYNC_PROD),
+	__stringify(IPA_CLIENT_MEMCPY_DMA_ASYNC_CONS),
+	__stringify(IPA_CLIENT_ETHERNET_PROD),
+	__stringify(IPA_CLIENT_ETHERNET_CONS),
+	__stringify(IPA_CLIENT_Q6_LAN_PROD),
+	__stringify(IPA_CLIENT_Q6_LAN_CONS),
+	__stringify(IPA_CLIENT_Q6_WAN_PROD),
+	__stringify(IPA_CLIENT_Q6_WAN_CONS),
+	__stringify(IPA_CLIENT_Q6_CMD_PROD),
+	__stringify(IPA_CLIENT_Q6_DUN_CONS),
+	__stringify(IPA_CLIENT_Q6_DECOMP_PROD),
+	__stringify(IPA_CLIENT_Q6_DECOMP_CONS),
+	__stringify(IPA_CLIENT_Q6_DECOMP2_PROD),
+	__stringify(IPA_CLIENT_Q6_DECOMP2_CONS),
+	__stringify(RESERVED_PROD_60),
+	__stringify(IPA_CLIENT_Q6_LTE_WIFI_AGGR_CONS),
+	__stringify(IPA_CLIENT_TEST_PROD),
+	__stringify(IPA_CLIENT_TEST_CONS),
+	__stringify(IPA_CLIENT_TEST1_PROD),
+	__stringify(IPA_CLIENT_TEST1_CONS),
+	__stringify(IPA_CLIENT_TEST2_PROD),
+	__stringify(IPA_CLIENT_TEST2_CONS),
+	__stringify(IPA_CLIENT_TEST3_PROD),
+	__stringify(IPA_CLIENT_TEST3_CONS),
+	__stringify(IPA_CLIENT_TEST4_PROD),
+	__stringify(IPA_CLIENT_TEST4_CONS),
+	__stringify(RESERVED_PROD_72),
+	__stringify(IPA_CLIENT_DUMMY_CONS),
+	__stringify(IPA_CLIENT_Q6_DL_NLO_DATA_PROD),
+	__stringify(IPA_CLIENT_Q6_UL_NLO_DATA_CONS),
+	__stringify(RESERVED_PROD_76),
+	__stringify(IPA_CLIENT_Q6_UL_NLO_ACK_CONS),
+	__stringify(RESERVED_PROD_78),
+	__stringify(IPA_CLIENT_Q6_QBAP_STATUS_CONS),
+	__stringify(RESERVED_PROD_80),
+	__stringify(IPA_CLIENT_MHI_DPL_CONS),
+	__stringify(RESERVED_PROD_82),
+	__stringify(IPA_CLIENT_ODL_DPL_CONS),
+	__stringify(IPA_CLIENT_Q6_AUDIO_DMA_MHI_PROD),
+	__stringify(IPA_CLIENT_Q6_AUDIO_DMA_MHI_CONS),
+	__stringify(IPA_CLIENT_WIGIG_PROD),
+	__stringify(IPA_CLIENT_WIGIG1_CONS),
+	__stringify(RESERVERD_PROD_88),
+	__stringify(IPA_CLIENT_WIGIG2_CONS),
+	__stringify(RESERVERD_PROD_90),
+	__stringify(IPA_CLIENT_WIGIG3_CONS),
+	__stringify(RESERVERD_PROD_92),
+	__stringify(IPA_CLIENT_WIGIG4_CONS),
+	__stringify(RESERVERD_PROD_94),
+	__stringify(IPA_CLIENT_APPS_WAN_COAL_CONS),
+	__stringify(IPA_CLIENT_MHI_PRIME_RMNET_PROD),
+	__stringify(IPA_CLIENT_MHI_PRIME_RMNET_CONS),
+	__stringify(IPA_CLIENT_MHI_PRIME_TETH_PROD),
+	__stringify(IPA_CLIENT_MHI_PRIME_TETH_CONS),
+	__stringify(IPA_CLIENT_MHI_PRIME_DPL_PROD),
+	__stringify(IPA_CLIENT_AQC_ETHERNET_PROD),
+	__stringify(IPA_CLIENT_AQC_ETHERNET_CONS),
+};
+
+/**
+ * ipa_write_64() - convert 64 bit value to byte array
+ * @w: 64 bit integer
+ * @dest: byte array
+ *
+ * Return value: converted value
+ */
+u8 *ipa_write_64(u64 w, u8 *dest)
+{
+	if (unlikely(dest == NULL)) {
+		pr_err("%s: NULL address\n", __func__);
+		return dest;
+	}
+	*dest++ = (u8)((w) & 0xFF);
+	*dest++ = (u8)((w >> 8) & 0xFF);
+	*dest++ = (u8)((w >> 16) & 0xFF);
+	*dest++ = (u8)((w >> 24) & 0xFF);
+	*dest++ = (u8)((w >> 32) & 0xFF);
+	*dest++ = (u8)((w >> 40) & 0xFF);
+	*dest++ = (u8)((w >> 48) & 0xFF);
+	*dest++ = (u8)((w >> 56) & 0xFF);
+
+	return dest;
+}
+
+/**
+ * ipa_write_32() - convert 32 bit value to byte array
+ * @w: 32 bit integer
+ * @dest: byte array
+ *
+ * Return value: converted value
+ */
+u8 *ipa_write_32(u32 w, u8 *dest)
+{
+	if (unlikely(dest == NULL)) {
+		pr_err("%s: NULL address\n", __func__);
+		return dest;
+	}
+	*dest++ = (u8)((w) & 0xFF);
+	*dest++ = (u8)((w >> 8) & 0xFF);
+	*dest++ = (u8)((w >> 16) & 0xFF);
+	*dest++ = (u8)((w >> 24) & 0xFF);
+
+	return dest;
+}
+
+/**
+ * ipa_write_16() - convert 16 bit value to byte array
+ * @hw: 16 bit integer
+ * @dest: byte array
+ *
+ * Return value: converted value
+ */
+u8 *ipa_write_16(u16 hw, u8 *dest)
+{
+	if (unlikely(dest == NULL)) {
+		pr_err("%s: NULL address\n", __func__);
+		return dest;
+	}
+	*dest++ = (u8)((hw) & 0xFF);
+	*dest++ = (u8)((hw >> 8) & 0xFF);
+
+	return dest;
+}
+
+/**
+ * ipa_write_8() - convert 8 bit value to byte array
+ * @hw: 8 bit integer
+ * @dest: byte array
+ *
+ * Return value: converted value
+ */
+u8 *ipa_write_8(u8 b, u8 *dest)
+{
+	if (unlikely(dest == NULL)) {
+		WARN(1, "%s: NULL address\n", __func__);
+		return dest;
+	}
+	*dest++ = (b) & 0xFF;
+
+	return dest;
+}
+
+/**
+ * ipa_pad_to_64() - pad byte array to 64 bit value
+ * @dest: byte array
+ *
+ * Return value: padded value
+ */
+u8 *ipa_pad_to_64(u8 *dest)
+{
+	int i;
+	int j;
+
+	if (unlikely(dest == NULL)) {
+		WARN(1, "%s: NULL address\n", __func__);
+		return dest;
+	}
+
+	i = (long)dest & 0x7;
+
+	if (i)
+		for (j = 0; j < (8 - i); j++)
+			*dest++ = 0;
+
+	return dest;
+}
+
+/**
+ * ipa_pad_to_32() - pad byte array to 32 bit value
+ * @dest: byte array
+ *
+ * Return value: padded value
+ */
+u8 *ipa_pad_to_32(u8 *dest)
+{
+	int i;
+	int j;
+
+	if (unlikely(dest == NULL)) {
+		WARN(1, "%s: NULL address\n", __func__);
+		return dest;
+	}
+
+	i = (long)dest & 0x7;
+
+	if (i)
+		for (j = 0; j < (4 - i); j++)
+			*dest++ = 0;
+
+	return dest;
+}
+
+int ipa_smmu_store_sgt(struct sg_table **out_ch_ptr,
+	struct sg_table *in_sgt_ptr)
+{
+	unsigned int nents;
+
+	if (in_sgt_ptr != NULL) {
+		*out_ch_ptr = kzalloc(sizeof(struct sg_table), GFP_KERNEL);
+		if (*out_ch_ptr == NULL)
+			return -ENOMEM;
+
+		nents = in_sgt_ptr->nents;
+
+		(*out_ch_ptr)->sgl =
+			kcalloc(nents, sizeof(struct scatterlist),
+				GFP_KERNEL);
+		if ((*out_ch_ptr)->sgl == NULL) {
+			kfree(*out_ch_ptr);
+			*out_ch_ptr = NULL;
+			return -ENOMEM;
+		}
+
+		memcpy((*out_ch_ptr)->sgl, in_sgt_ptr->sgl,
+			nents*sizeof((*out_ch_ptr)->sgl));
+		(*out_ch_ptr)->nents = nents;
+		(*out_ch_ptr)->orig_nents = in_sgt_ptr->orig_nents;
+	}
+	return 0;
+}
+EXPORT_SYMBOL(ipa_smmu_store_sgt);
+
+int ipa_smmu_free_sgt(struct sg_table **out_sgt_ptr)
+{
+	if (*out_sgt_ptr != NULL) {
+		kfree((*out_sgt_ptr)->sgl);
+		(*out_sgt_ptr)->sgl = NULL;
+		kfree(*out_sgt_ptr);
+		*out_sgt_ptr = NULL;
+	}
+	return 0;
+}
+EXPORT_SYMBOL(ipa_smmu_free_sgt);
+
+/**
+ * ipa_clear_endpoint_delay() - Clear ep_delay.
+ * @clnt_hdl:	[in] IPA client handle
+ *
+ * Returns:	0 on success, negative on failure
+ *
+ * Note:		Should not be called from atomic context
+ */
+int ipa_clear_endpoint_delay(u32 clnt_hdl)
+{
+	int ret;
+
+	IPA_API_DISPATCH_RETURN(ipa_clear_endpoint_delay, clnt_hdl);
+
+	return ret;
+}
+EXPORT_SYMBOL(ipa_clear_endpoint_delay);
+
+/**
+ * ipa_reset_endpoint() - reset an endpoint from BAM perspective
+ * @clnt_hdl:	[in] IPA client handle
+ *
+ * Returns:	0 on success, negative on failure
+ *
+ * Note:		Should not be called from atomic context
+ */
+int ipa_reset_endpoint(u32 clnt_hdl)
+{
+	int ret;
+
+	IPA_API_DISPATCH_RETURN(ipa_reset_endpoint, clnt_hdl);
+
+	return ret;
+}
+EXPORT_SYMBOL(ipa_reset_endpoint);
+
+/**
+ * ipa_disable_endpoint() - Disable an endpoint from IPA perspective
+ * @clnt_hdl:	[in] IPA client handle
+ *
+ * Returns:	0 on success, negative on failure
+ *
+ * Note:		Should not be called from atomic context
+ */
+int ipa_disable_endpoint(u32 clnt_hdl)
+{
+	int ret;
+
+	IPA_API_DISPATCH_RETURN(ipa_disable_endpoint, clnt_hdl);
+
+	return ret;
+}
+EXPORT_SYMBOL(ipa_disable_endpoint);
+
+
+/**
+ * ipa_cfg_ep - IPA end-point configuration
+ * @clnt_hdl:	[in] opaque client handle assigned by IPA to client
+ * @ipa_ep_cfg:	[in] IPA end-point configuration params
+ *
+ * This includes nat, header, mode, aggregation and route settings and is a one
+ * shot API to configure the IPA end-point fully
+ *
+ * Returns:	0 on success, negative on failure
+ *
+ * Note:	Should not be called from atomic context
+ */
+int ipa_cfg_ep(u32 clnt_hdl, const struct ipa_ep_cfg *ipa_ep_cfg)
+{
+	int ret;
+
+	IPA_API_DISPATCH_RETURN(ipa_cfg_ep, clnt_hdl, ipa_ep_cfg);
+
+	return ret;
+}
+EXPORT_SYMBOL(ipa_cfg_ep);
+
+/**
+ * ipa_cfg_ep_nat() - IPA end-point NAT configuration
+ * @clnt_hdl:	[in] opaque client handle assigned by IPA to client
+ * @ep_nat:	[in] IPA NAT end-point configuration params
+ *
+ * Returns:	0 on success, negative on failure
+ *
+ * Note:	Should not be called from atomic context
+ */
+int ipa_cfg_ep_nat(u32 clnt_hdl, const struct ipa_ep_cfg_nat *ep_nat)
+{
+	int ret;
+
+	IPA_API_DISPATCH_RETURN(ipa_cfg_ep_nat, clnt_hdl, ep_nat);
+
+	return ret;
+}
+EXPORT_SYMBOL(ipa_cfg_ep_nat);
+
+/**
+ * ipa_cfg_ep_conn_track() - IPA end-point IPv6CT configuration
+ * @clnt_hdl:		[in] opaque client handle assigned by IPA to client
+ * @ep_conn_track:	[in] IPA IPv6CT end-point configuration params
+ *
+ * Returns:	0 on success, negative on failure
+ *
+ * Note:	Should not be called from atomic context
+ */
+int ipa_cfg_ep_conn_track(u32 clnt_hdl,
+	const struct ipa_ep_cfg_conn_track *ep_conn_track)
+{
+	int ret;
+
+	IPA_API_DISPATCH_RETURN(ipa_cfg_ep_conn_track, clnt_hdl,
+		ep_conn_track);
+
+	return ret;
+}
+EXPORT_SYMBOL(ipa_cfg_ep_conn_track);
+
+/**
+ * ipa_cfg_ep_hdr() -  IPA end-point header configuration
+ * @clnt_hdl:	[in] opaque client handle assigned by IPA to client
+ * @ipa_ep_cfg:	[in] IPA end-point configuration params
+ *
+ * Returns:	0 on success, negative on failure
+ *
+ * Note:	Should not be called from atomic context
+ */
+int ipa_cfg_ep_hdr(u32 clnt_hdl, const struct ipa_ep_cfg_hdr *ep_hdr)
+{
+	int ret;
+
+	IPA_API_DISPATCH_RETURN(ipa_cfg_ep_hdr, clnt_hdl, ep_hdr);
+
+	return ret;
+}
+EXPORT_SYMBOL(ipa_cfg_ep_hdr);
+
+/**
+ * ipa_cfg_ep_hdr_ext() -  IPA end-point extended header configuration
+ * @clnt_hdl:	[in] opaque client handle assigned by IPA to client
+ * @ep_hdr_ext:	[in] IPA end-point configuration params
+ *
+ * Returns:	0 on success, negative on failure
+ *
+ * Note:	Should not be called from atomic context
+ */
+int ipa_cfg_ep_hdr_ext(u32 clnt_hdl,
+		       const struct ipa_ep_cfg_hdr_ext *ep_hdr_ext)
+{
+	int ret;
+
+	IPA_API_DISPATCH_RETURN(ipa_cfg_ep_hdr_ext, clnt_hdl, ep_hdr_ext);
+
+	return ret;
+}
+EXPORT_SYMBOL(ipa_cfg_ep_hdr_ext);
+
+/**
+ * ipa_cfg_ep_mode() - IPA end-point mode configuration
+ * @clnt_hdl:	[in] opaque client handle assigned by IPA to client
+ * @ipa_ep_cfg:	[in] IPA end-point configuration params
+ *
+ * Returns:	0 on success, negative on failure
+ *
+ * Note:	Should not be called from atomic context
+ */
+int ipa_cfg_ep_mode(u32 clnt_hdl, const struct ipa_ep_cfg_mode *ep_mode)
+{
+	int ret;
+
+	IPA_API_DISPATCH_RETURN(ipa_cfg_ep_mode, clnt_hdl, ep_mode);
+
+	return ret;
+}
+EXPORT_SYMBOL(ipa_cfg_ep_mode);
+
+/**
+ * ipa_cfg_ep_aggr() - IPA end-point aggregation configuration
+ * @clnt_hdl:	[in] opaque client handle assigned by IPA to client
+ * @ipa_ep_cfg:	[in] IPA end-point configuration params
+ *
+ * Returns:	0 on success, negative on failure
+ *
+ * Note:	Should not be called from atomic context
+ */
+int ipa_cfg_ep_aggr(u32 clnt_hdl, const struct ipa_ep_cfg_aggr *ep_aggr)
+{
+	int ret;
+
+	IPA_API_DISPATCH_RETURN(ipa_cfg_ep_aggr, clnt_hdl, ep_aggr);
+
+	return ret;
+}
+EXPORT_SYMBOL(ipa_cfg_ep_aggr);
+
+/**
+ * ipa_cfg_ep_deaggr() -  IPA end-point deaggregation configuration
+ * @clnt_hdl:	[in] opaque client handle assigned by IPA to client
+ * @ep_deaggr:	[in] IPA end-point configuration params
+ *
+ * Returns:	0 on success, negative on failure
+ *
+ * Note:	Should not be called from atomic context
+ */
+int ipa_cfg_ep_deaggr(u32 clnt_hdl,
+			const struct ipa_ep_cfg_deaggr *ep_deaggr)
+{
+	int ret;
+
+	IPA_API_DISPATCH_RETURN(ipa_cfg_ep_deaggr, clnt_hdl, ep_deaggr);
+
+	return ret;
+}
+EXPORT_SYMBOL(ipa_cfg_ep_deaggr);
+
+/**
+ * ipa_cfg_ep_route() - IPA end-point routing configuration
+ * @clnt_hdl:	[in] opaque client handle assigned by IPA to client
+ * @ipa_ep_cfg:	[in] IPA end-point configuration params
+ *
+ * Returns:	0 on success, negative on failure
+ *
+ * Note:	Should not be called from atomic context
+ */
+int ipa_cfg_ep_route(u32 clnt_hdl, const struct ipa_ep_cfg_route *ep_route)
+{
+	int ret;
+
+	IPA_API_DISPATCH_RETURN(ipa_cfg_ep_route, clnt_hdl, ep_route);
+
+	return ret;
+}
+EXPORT_SYMBOL(ipa_cfg_ep_route);
+
+/**
+ * ipa_cfg_ep_holb() - IPA end-point holb configuration
+ *
+ * If an IPA producer pipe is full, IPA HW by default will block
+ * indefinitely till space opens up. During this time no packets
+ * including those from unrelated pipes will be processed. Enabling
+ * HOLB means IPA HW will be allowed to drop packets as/when needed
+ * and indefinite blocking is avoided.
+ *
+ * @clnt_hdl:	[in] opaque client handle assigned by IPA to client
+ * @ipa_ep_cfg:	[in] IPA end-point configuration params
+ *
+ * Returns:	0 on success, negative on failure
+ */
+int ipa_cfg_ep_holb(u32 clnt_hdl, const struct ipa_ep_cfg_holb *ep_holb)
+{
+	int ret;
+
+	IPA_API_DISPATCH_RETURN(ipa_cfg_ep_holb, clnt_hdl, ep_holb);
+
+	return ret;
+}
+EXPORT_SYMBOL(ipa_cfg_ep_holb);
+
+
+/**
+ * ipa_cfg_ep_cfg() - IPA end-point cfg configuration
+ * @clnt_hdl:	[in] opaque client handle assigned by IPA to client
+ * @ipa_ep_cfg:	[in] IPA end-point configuration params
+ *
+ * Returns:	0 on success, negative on failure
+ *
+ * Note:	Should not be called from atomic context
+ */
+int ipa_cfg_ep_cfg(u32 clnt_hdl, const struct ipa_ep_cfg_cfg *cfg)
+{
+	int ret;
+
+	IPA_API_DISPATCH_RETURN(ipa_cfg_ep_cfg, clnt_hdl, cfg);
+
+	return ret;
+}
+EXPORT_SYMBOL(ipa_cfg_ep_cfg);
+
+/**
+ * ipa_cfg_ep_metadata_mask() - IPA end-point meta-data mask configuration
+ * @clnt_hdl:	[in] opaque client handle assigned by IPA to client
+ * @ipa_ep_cfg:	[in] IPA end-point configuration params
+ *
+ * Returns:	0 on success, negative on failure
+ *
+ * Note:	Should not be called from atomic context
+ */
+int ipa_cfg_ep_metadata_mask(u32 clnt_hdl, const struct ipa_ep_cfg_metadata_mask
+		*metadata_mask)
+{
+	int ret;
+
+	IPA_API_DISPATCH_RETURN(ipa_cfg_ep_metadata_mask, clnt_hdl,
+			metadata_mask);
+
+	return ret;
+}
+EXPORT_SYMBOL(ipa_cfg_ep_metadata_mask);
+
+/**
+ * ipa_cfg_ep_holb_by_client() - IPA end-point holb configuration
+ *
+ * Wrapper function for ipa_cfg_ep_holb() with client name instead of
+ * client handle. This function is used for clients that does not have
+ * client handle.
+ *
+ * @client:	[in] client name
+ * @ipa_ep_cfg:	[in] IPA end-point configuration params
+ *
+ * Returns:	0 on success, negative on failure
+ */
+int ipa_cfg_ep_holb_by_client(enum ipa_client_type client,
+				const struct ipa_ep_cfg_holb *ep_holb)
+{
+	int ret;
+
+	IPA_API_DISPATCH_RETURN(ipa_cfg_ep_holb_by_client, client, ep_holb);
+
+	return ret;
+}
+EXPORT_SYMBOL(ipa_cfg_ep_holb_by_client);
+
+/**
+ * ipa_cfg_ep_ctrl() -  IPA end-point Control configuration
+ * @clnt_hdl:	[in] opaque client handle assigned by IPA to client
+ * @ipa_ep_cfg_ctrl:	[in] IPA end-point configuration params
+ *
+ * Returns:	0 on success, negative on failure
+ */
+int ipa_cfg_ep_ctrl(u32 clnt_hdl, const struct ipa_ep_cfg_ctrl *ep_ctrl)
+{
+	int ret;
+
+	IPA_API_DISPATCH_RETURN(ipa_cfg_ep_ctrl, clnt_hdl, ep_ctrl);
+
+	return ret;
+}
+EXPORT_SYMBOL(ipa_cfg_ep_ctrl);
+
+/**
+ * ipa_add_hdr() - add the specified headers to SW and optionally commit them to
+ * IPA HW
+ * @hdrs:	[inout] set of headers to add
+ *
+ * Returns:	0 on success, negative on failure
+ *
+ * Note:	Should not be called from atomic context
+ */
+int ipa_add_hdr(struct ipa_ioc_add_hdr *hdrs)
+{
+	int ret;
+
+	IPA_API_DISPATCH_RETURN(ipa_add_hdr, hdrs);
+
+	return ret;
+}
+EXPORT_SYMBOL(ipa_add_hdr);
+
+/**
+ * ipa_add_hdr_usr() - add the specified headers to SW and optionally
+ * commit them to IPA HW
+ * @hdrs:		[inout] set of headers to add
+ * @user_only:	[in] indicate rules installed by userspace
+ *
+ * Returns:	0 on success, negative on failure
+ *
+ * Note:	Should not be called from atomic context
+ */
+int ipa_add_hdr_usr(struct ipa_ioc_add_hdr *hdrs, bool user_only)
+{
+	int ret;
+
+	IPA_API_DISPATCH_RETURN(ipa_add_hdr_usr, hdrs, user_only);
+
+	return ret;
+}
+EXPORT_SYMBOL(ipa_add_hdr_usr);
+
+/**
+ * ipa_del_hdr() - Remove the specified headers from SW and optionally
+ * commit them to IPA HW
+ * @hdls:	[inout] set of headers to delete
+ *
+ * Returns:	0 on success, negative on failure
+ *
+ * Note:	Should not be called from atomic context
+ */
+int ipa_del_hdr(struct ipa_ioc_del_hdr *hdls)
+{
+	int ret;
+
+	IPA_API_DISPATCH_RETURN(ipa_del_hdr, hdls);
+
+	return ret;
+}
+EXPORT_SYMBOL(ipa_del_hdr);
+
+/**
+ * ipa_commit_hdr() - commit to IPA HW the current header table in SW
+ *
+ * Returns:	0 on success, negative on failure
+ *
+ * Note:	Should not be called from atomic context
+ */
+int ipa_commit_hdr(void)
+{
+	int ret;
+
+	IPA_API_DISPATCH_RETURN(ipa_commit_hdr);
+
+	return ret;
+}
+EXPORT_SYMBOL(ipa_commit_hdr);
+
+/**
+ * ipa_reset_hdr() - reset the current header table in SW (does not commit to
+ * HW)
+ *
+ * @user_only:	[in] indicate delete rules installed by userspace
+ * Returns:	0 on success, negative on failure
+ *
+ * Note:	Should not be called from atomic context
+ */
+int ipa_reset_hdr(bool user_only)
+{
+	int ret;
+
+	IPA_API_DISPATCH_RETURN(ipa_reset_hdr, user_only);
+
+	return ret;
+}
+EXPORT_SYMBOL(ipa_reset_hdr);
+
+/**
+ * ipa_get_hdr() - Lookup the specified header resource
+ * @lookup:	[inout] header to lookup and its handle
+ *
+ * lookup the specified header resource and return handle if it exists
+ *
+ * Returns:	0 on success, negative on failure
+ *
+ * Note:	Should not be called from atomic context
+ *		Caller should call ipa_put_hdr later if this function succeeds
+ */
+int ipa_get_hdr(struct ipa_ioc_get_hdr *lookup)
+{
+	int ret;
+
+	IPA_API_DISPATCH_RETURN(ipa_get_hdr, lookup);
+
+	return ret;
+}
+EXPORT_SYMBOL(ipa_get_hdr);
+
+/**
+ * ipa_put_hdr() - Release the specified header handle
+ * @hdr_hdl:	[in] the header handle to release
+ *
+ * Returns:	0 on success, negative on failure
+ *
+ * Note:	Should not be called from atomic context
+ */
+int ipa_put_hdr(u32 hdr_hdl)
+{
+	int ret;
+
+	IPA_API_DISPATCH_RETURN(ipa_put_hdr, hdr_hdl);
+
+	return ret;
+}
+EXPORT_SYMBOL(ipa_put_hdr);
+
+/**
+ * ipa_copy_hdr() - Lookup the specified header resource and return a copy of it
+ * @copy:	[inout] header to lookup and its copy
+ *
+ * lookup the specified header resource and return a copy of it (along with its
+ * attributes) if it exists, this would be called for partial headers
+ *
+ * Returns:	0 on success, negative on failure
+ *
+ * Note:	Should not be called from atomic context
+ */
+int ipa_copy_hdr(struct ipa_ioc_copy_hdr *copy)
+{
+	int ret;
+
+	IPA_API_DISPATCH_RETURN(ipa_copy_hdr, copy);
+
+	return ret;
+}
+EXPORT_SYMBOL(ipa_copy_hdr);
+
+/**
+ * ipa_add_hdr_proc_ctx() - add the specified headers to SW
+ * and optionally commit them to IPA HW
+ * @proc_ctxs:	[inout] set of processing context headers to add
+ * @user_only:	[in] indicate rules installed by userspace
+ *
+ * Returns:	0 on success, negative on failure
+ *
+ * Note:	Should not be called from atomic context
+ */
+int ipa_add_hdr_proc_ctx(struct ipa_ioc_add_hdr_proc_ctx *proc_ctxs,
+							bool user_only)
+{
+	int ret;
+
+	IPA_API_DISPATCH_RETURN(ipa_add_hdr_proc_ctx, proc_ctxs, user_only);
+
+	return ret;
+}
+EXPORT_SYMBOL(ipa_add_hdr_proc_ctx);
+
+/**
+ * ipa_del_hdr_proc_ctx() -
+ * Remove the specified processing context headers from SW and
+ * optionally commit them to IPA HW.
+ * @hdls:	[inout] set of processing context headers to delete
+ *
+ * Returns:	0 on success, negative on failure
+ *
+ * Note:	Should not be called from atomic context
+ */
+int ipa_del_hdr_proc_ctx(struct ipa_ioc_del_hdr_proc_ctx *hdls)
+{
+	int ret;
+
+	IPA_API_DISPATCH_RETURN(ipa_del_hdr_proc_ctx, hdls);
+
+	return ret;
+}
+EXPORT_SYMBOL(ipa_del_hdr_proc_ctx);
+
+/**
+ * ipa_add_rt_rule() - Add the specified routing rules to SW and optionally
+ * commit to IPA HW
+ * @rules:	[inout] set of routing rules to add
+ *
+ * Returns:	0 on success, negative on failure
+ *
+ * Note:	Should not be called from atomic context
+ */
+int ipa_add_rt_rule(struct ipa_ioc_add_rt_rule *rules)
+{
+	int ret;
+
+	IPA_API_DISPATCH_RETURN(ipa_add_rt_rule, rules);
+
+	return ret;
+}
+EXPORT_SYMBOL(ipa_add_rt_rule);
+
+/**
+ * ipa_add_rt_rule_v2() - Add the specified routing rules to SW
+ * and optionally commit to IPA HW
+ * @rules:	[inout] set of routing rules to add
+ *
+ * Returns:	0 on success, negative on failure
+ *
+ * Note:	Should not be called from atomic context
+ */
+int ipa_add_rt_rule_v2(struct ipa_ioc_add_rt_rule_v2 *rules)
+{
+	int ret;
+
+	IPA_API_DISPATCH_RETURN(ipa_add_rt_rule_v2, rules);
+
+	return ret;
+}
+EXPORT_SYMBOL(ipa_add_rt_rule_v2);
+
+/**
+ * ipa_add_rt_rule_usr() - Add the specified routing rules to SW and optionally
+ * commit to IPA HW
+ * @rules:	[inout] set of routing rules to add
+ * @user_only:	[in] indicate rules installed by userspace
+ *
+ * Returns:	0 on success, negative on failure
+ *
+ * Note:	Should not be called from atomic context
+ */
+int ipa_add_rt_rule_usr(struct ipa_ioc_add_rt_rule *rules, bool user_only)
+{
+	int ret;
+
+	IPA_API_DISPATCH_RETURN(ipa_add_rt_rule_usr, rules, user_only);
+
+	return ret;
+}
+EXPORT_SYMBOL(ipa_add_rt_rule_usr);
+
+/**
+ * ipa_add_rt_rule_usr_v2() - Add the specified routing rules to
+ * SW and optionally commit to IPA HW
+ * @rules:	[inout] set of routing rules to add
+ * @user_only:	[in] indicate rules installed by userspace
+ *
+ * Returns:	0 on success, negative on failure
+ *
+ * Note:	Should not be called from atomic context
+ */
+int ipa_add_rt_rule_usr_v2(struct ipa_ioc_add_rt_rule_v2 *rules, bool user_only)
+{
+	int ret;
+
+	IPA_API_DISPATCH_RETURN(ipa_add_rt_rule_usr_v2, rules, user_only);
+
+	return ret;
+}
+EXPORT_SYMBOL(ipa_add_rt_rule_usr_v2);
+
+/**
+ * ipa_del_rt_rule() - Remove the specified routing rules to SW and optionally
+ * commit to IPA HW
+ * @hdls:	[inout] set of routing rules to delete
+ *
+ * Returns:	0 on success, negative on failure
+ *
+ * Note:	Should not be called from atomic context
+ */
+int ipa_del_rt_rule(struct ipa_ioc_del_rt_rule *hdls)
+{
+	int ret;
+
+	IPA_API_DISPATCH_RETURN(ipa_del_rt_rule, hdls);
+
+	return ret;
+}
+EXPORT_SYMBOL(ipa_del_rt_rule);
+
+/**
+ * ipa_commit_rt_rule() - Commit the current SW routing table of specified type
+ * to IPA HW
+ * @ip:	The family of routing tables
+ *
+ * Returns:	0 on success, negative on failure
+ *
+ * Note:	Should not be called from atomic context
+ */
+int ipa_commit_rt(enum ipa_ip_type ip)
+{
+	int ret;
+
+	IPA_API_DISPATCH_RETURN(ipa_commit_rt, ip);
+
+	return ret;
+}
+EXPORT_SYMBOL(ipa_commit_rt);
+
+/**
+ * ipa_reset_rt() - reset the current SW routing table of specified type
+ * (does not commit to HW)
+ * @ip:	The family of routing tables
+ * @user_only:	[in] indicate delete rules installed by userspace
+ *
+ * Returns:	0 on success, negative on failure
+ *
+ * Note:	Should not be called from atomic context
+ */
+int ipa_reset_rt(enum ipa_ip_type ip, bool user_only)
+{
+	int ret;
+
+	IPA_API_DISPATCH_RETURN(ipa_reset_rt, ip, user_only);
+
+	return ret;
+}
+EXPORT_SYMBOL(ipa_reset_rt);
+
+/**
+ * ipa_get_rt_tbl() - lookup the specified routing table and return handle if it
+ * exists, if lookup succeeds the routing table ref cnt is increased
+ * @lookup:	[inout] routing table to lookup and its handle
+ *
+ * Returns:	0 on success, negative on failure
+ *
+ * Note:	Should not be called from atomic context
+ *	Caller should call ipa_put_rt_tbl later if this function succeeds
+ */
+int ipa_get_rt_tbl(struct ipa_ioc_get_rt_tbl *lookup)
+{
+	int ret;
+
+	IPA_API_DISPATCH_RETURN(ipa_get_rt_tbl, lookup);
+
+	return ret;
+}
+EXPORT_SYMBOL(ipa_get_rt_tbl);
+
+/**
+ * ipa_put_rt_tbl() - Release the specified routing table handle
+ * @rt_tbl_hdl:	[in] the routing table handle to release
+ *
+ * Returns:	0 on success, negative on failure
+ *
+ * Note:	Should not be called from atomic context
+ */
+int ipa_put_rt_tbl(u32 rt_tbl_hdl)
+{
+	int ret;
+
+	IPA_API_DISPATCH_RETURN(ipa_put_rt_tbl, rt_tbl_hdl);
+
+	return ret;
+}
+EXPORT_SYMBOL(ipa_put_rt_tbl);
+
+/**
+ * ipa_query_rt_index() - find the routing table index
+ *			which name and ip type are given as parameters
+ * @in:	[out] the index of the wanted routing table
+ *
+ * Returns: the routing table which name is given as parameter, or NULL if it
+ * doesn't exist
+ */
+int ipa_query_rt_index(struct ipa_ioc_get_rt_tbl_indx *in)
+{
+	int ret;
+
+	IPA_API_DISPATCH_RETURN(ipa_query_rt_index, in);
+
+	return ret;
+}
+EXPORT_SYMBOL(ipa_query_rt_index);
+
+/**
+ * ipa_mdfy_rt_rule() - Modify the specified routing rules in SW and optionally
+ * commit to IPA HW
+ *
+ * Returns:	0 on success, negative on failure
+ *
+ * Note:	Should not be called from atomic context
+ */
+int ipa_mdfy_rt_rule(struct ipa_ioc_mdfy_rt_rule *hdls)
+{
+	int ret;
+
+	IPA_API_DISPATCH_RETURN(ipa_mdfy_rt_rule, hdls);
+
+	return ret;
+}
+EXPORT_SYMBOL(ipa_mdfy_rt_rule);
+
+/**
+ * ipa_mdfy_rt_rule_v2() - Modify the specified routing rules in
+ * SW and optionally commit to IPA HW
+ *
+ * Returns:	0 on success, negative on failure
+ *
+ * Note:	Should not be called from atomic context
+ */
+int ipa_mdfy_rt_rule_v2(struct ipa_ioc_mdfy_rt_rule_v2 *hdls)
+{
+	int ret;
+
+	IPA_API_DISPATCH_RETURN(ipa_mdfy_rt_rule_v2, hdls);
+
+	return ret;
+}
+EXPORT_SYMBOL(ipa_mdfy_rt_rule_v2);
+
+/**
+ * ipa_add_flt_rule() - Add the specified filtering rules to SW and optionally
+ * commit to IPA HW
+ * @rules:	[inout] set of filtering rules to add
+ *
+ * Returns:	0 on success, negative on failure
+ *
+ * Note:	Should not be called from atomic context
+ */
+int ipa_add_flt_rule(struct ipa_ioc_add_flt_rule *rules)
+{
+	int ret;
+
+	IPA_API_DISPATCH_RETURN(ipa_add_flt_rule, rules);
+
+	return ret;
+}
+EXPORT_SYMBOL(ipa_add_flt_rule);
+
+/**
+ * ipa_add_flt_rule_v2() - Add the specified filtering rules to
+ * SW and optionally commit to IPA HW
+ * @rules:	[inout] set of filtering rules to add
+ *
+ * Returns:	0 on success, negative on failure
+ *
+ * Note:	Should not be called from atomic context
+ */
+int ipa_add_flt_rule_v2(struct ipa_ioc_add_flt_rule_v2 *rules)
+{
+	int ret;
+
+	IPA_API_DISPATCH_RETURN(ipa_add_flt_rule_v2, rules);
+
+	return ret;
+}
+EXPORT_SYMBOL(ipa_add_flt_rule_v2);
+
+/**
+ * ipa_add_flt_rule_usr() - Add the specified filtering rules to
+ * SW and optionally commit to IPA HW
+ * @rules:		[inout] set of filtering rules to add
+ * @user_only:	[in] indicate rules installed by userspace
+ *
+ * Returns:	0 on success, negative on failure
+ *
+ * Note:	Should not be called from atomic context
+ */
+int ipa_add_flt_rule_usr(struct ipa_ioc_add_flt_rule *rules, bool user_only)
+{
+	int ret;
+
+	IPA_API_DISPATCH_RETURN(ipa_add_flt_rule_usr, rules, user_only);
+
+	return ret;
+}
+EXPORT_SYMBOL(ipa_add_flt_rule_usr);
+
+/**
+ * ipa_add_flt_rule_usr_v2() - Add the specified filtering rules
+ * to SW and optionally commit to IPA HW
+ * @rules:		[inout] set of filtering rules to add
+ * @user_only:	[in] indicate rules installed by userspace
+ *
+ * Returns:	0 on success, negative on failure
+ *
+ * Note:	Should not be called from atomic context
+ */
+int ipa_add_flt_rule_usr_v2(struct ipa_ioc_add_flt_rule_v2 *rules,
+	bool user_only)
+{
+	int ret;
+
+	IPA_API_DISPATCH_RETURN(ipa_add_flt_rule_usr_v2,
+		rules, user_only);
+
+	return ret;
+}
+EXPORT_SYMBOL(ipa_add_flt_rule_usr_v2);
+
+/**
+ * ipa_del_flt_rule() - Remove the specified filtering rules from SW and
+ * optionally commit to IPA HW
+ *
+ * Returns:	0 on success, negative on failure
+ *
+ * Note:	Should not be called from atomic context
+ */
+int ipa_del_flt_rule(struct ipa_ioc_del_flt_rule *hdls)
+{
+	int ret;
+
+	IPA_API_DISPATCH_RETURN(ipa_del_flt_rule, hdls);
+
+	return ret;
+}
+EXPORT_SYMBOL(ipa_del_flt_rule);
+
+/**
+ * ipa_mdfy_flt_rule() - Modify the specified filtering rules in SW and
+ * optionally commit to IPA HW
+ *
+ * Returns:	0 on success, negative on failure
+ *
+ * Note:	Should not be called from atomic context
+ */
+int ipa_mdfy_flt_rule(struct ipa_ioc_mdfy_flt_rule *hdls)
+{
+	int ret;
+
+	IPA_API_DISPATCH_RETURN(ipa_mdfy_flt_rule, hdls);
+
+	return ret;
+}
+EXPORT_SYMBOL(ipa_mdfy_flt_rule);
+
+/**
+ * ipa_mdfy_flt_rule_v2() - Modify the specified filtering rules
+ * in SW and optionally commit to IPA HW
+ *
+ * Returns:	0 on success, negative on failure
+ *
+ * Note:	Should not be called from atomic context
+ */
+int ipa_mdfy_flt_rule_v2(struct ipa_ioc_mdfy_flt_rule_v2 *hdls)
+{
+	int ret;
+
+	IPA_API_DISPATCH_RETURN(ipa_mdfy_flt_rule_v2, hdls);
+
+	return ret;
+}
+EXPORT_SYMBOL(ipa_mdfy_flt_rule_v2);
+
+/**
+ * ipa_commit_flt() - Commit the current SW filtering table of specified type to
+ * IPA HW
+ * @ip:	[in] the family of routing tables
+ *
+ * Returns:	0 on success, negative on failure
+ *
+ * Note:	Should not be called from atomic context
+ */
+int ipa_commit_flt(enum ipa_ip_type ip)
+{
+	int ret;
+
+	IPA_API_DISPATCH_RETURN(ipa_commit_flt, ip);
+
+	return ret;
+}
+EXPORT_SYMBOL(ipa_commit_flt);
+
+/**
+ * ipa_reset_flt() - Reset the current SW filtering table of specified type
+ * (does not commit to HW)
+ * @ip:			[in] the family of routing tables
+ * @user_only:	[in] indicate delete rules installed by userspace
+ *
+ * Returns:	0 on success, negative on failure
+ *
+ * Note:	Should not be called from atomic context
+ */
+int ipa_reset_flt(enum ipa_ip_type ip, bool user_only)
+{
+	int ret;
+
+	IPA_API_DISPATCH_RETURN(ipa_reset_flt, ip, user_only);
+
+	return ret;
+}
+EXPORT_SYMBOL(ipa_reset_flt);
+
+/**
+ * ipa_allocate_nat_device() - Allocates memory for the NAT device
+ * @mem:	[in/out] memory parameters
+ *
+ * Called by NAT client driver to allocate memory for the NAT entries. Based on
+ * the request size either shared or system memory will be used.
+ *
+ * Returns:	0 on success, negative on failure
+ */
+int ipa_allocate_nat_device(struct ipa_ioc_nat_alloc_mem *mem)
+{
+	int ret;
+
+	IPA_API_DISPATCH_RETURN(ipa_allocate_nat_device, mem);
+
+	return ret;
+}
+EXPORT_SYMBOL(ipa_allocate_nat_device);
+
+/**
+ * ipa_allocate_nat_table() - Allocates memory for the NAT table
+ * @table_alloc: [in/out] memory parameters
+ *
+ * Called by NAT client to allocate memory for the table entries.
+ * Based on the request size either shared or system memory will be used.
+ *
+ * Returns:	0 on success, negative on failure
+ */
+int ipa_allocate_nat_table(struct ipa_ioc_nat_ipv6ct_table_alloc *table_alloc)
+{
+	int ret;
+
+	IPA_API_DISPATCH_RETURN(ipa_allocate_nat_table, table_alloc);
+
+	return ret;
+}
+EXPORT_SYMBOL(ipa_allocate_nat_table);
+
+
+/**
+ * ipa_allocate_ipv6ct_table() - Allocates memory for the IPv6CT table
+ * @table_alloc: [in/out] memory parameters
+ *
+ * Called by IPv6CT client to allocate memory for the table entries.
+ * Based on the request size either shared or system memory will be used.
+ *
+ * Returns:	0 on success, negative on failure
+ */
+int ipa_allocate_ipv6ct_table(
+	struct ipa_ioc_nat_ipv6ct_table_alloc *table_alloc)
+{
+	int ret;
+
+	IPA_API_DISPATCH_RETURN(ipa_allocate_ipv6ct_table, table_alloc);
+
+	return ret;
+}
+EXPORT_SYMBOL(ipa_allocate_ipv6ct_table);
+
+/**
+ * ipa_nat_init_cmd() - Post IP_V4_NAT_INIT command to IPA HW
+ * @init:	[in] initialization command attributes
+ *
+ * Called by NAT client driver to post IP_V4_NAT_INIT command to IPA HW
+ *
+ * Returns:	0 on success, negative on failure
+ */
+int ipa_nat_init_cmd(struct ipa_ioc_v4_nat_init *init)
+{
+	int ret;
+
+	IPA_API_DISPATCH_RETURN(ipa_nat_init_cmd, init);
+
+	return ret;
+}
+EXPORT_SYMBOL(ipa_nat_init_cmd);
+
+/**
+ * ipa_ipv6ct_init_cmd() - Post IP_V6_CONN_TRACK_INIT command to IPA HW
+ * @init:	[in] initialization command attributes
+ *
+ * Called by IPv6CT client driver to post IP_V6_CONN_TRACK_INIT command
+ * to IPA HW.
+ *
+ * Returns:	0 on success, negative on failure
+ */
+int ipa_ipv6ct_init_cmd(struct ipa_ioc_ipv6ct_init *init)
+{
+	int ret;
+
+	IPA_API_DISPATCH_RETURN(ipa_ipv6ct_init_cmd, init);
+
+	return ret;
+}
+EXPORT_SYMBOL(ipa_ipv6ct_init_cmd);
+
+/**
+ * ipa_nat_dma_cmd() - Post NAT_DMA command to IPA HW
+ * @dma:	[in] initialization command attributes
+ *
+ * Called by NAT client driver to post NAT_DMA command to IPA HW
+ *
+ * Returns:	0 on success, negative on failure
+ */
+int ipa_nat_dma_cmd(struct ipa_ioc_nat_dma_cmd *dma)
+{
+	int ret;
+
+	IPA_API_DISPATCH_RETURN(ipa_nat_dma_cmd, dma);
+
+	return ret;
+}
+EXPORT_SYMBOL(ipa_nat_dma_cmd);
+
+/**
+ * ipa_table_dma_cmd() - Post TABLE_DMA command to IPA HW
+ * @dma:	[in] initialization command attributes
+ *
+ * Called by NAT/IPv6CT client to post TABLE_DMA command to IPA HW
+ *
+ * Returns:	0 on success, negative on failure
+ */
+int ipa_table_dma_cmd(struct ipa_ioc_nat_dma_cmd *dma)
+{
+	int ret;
+
+	IPA_API_DISPATCH_RETURN(ipa_table_dma_cmd, dma);
+
+	return ret;
+}
+EXPORT_SYMBOL(ipa_table_dma_cmd);
+
+/**
+ * ipa_nat_del_cmd() - Delete the NAT table
+ * @del:	[in] delete NAT table parameters
+ *
+ * Called by NAT client driver to delete the nat table
+ *
+ * Returns:	0 on success, negative on failure
+ */
+int ipa_nat_del_cmd(struct ipa_ioc_v4_nat_del *del)
+{
+	int ret;
+
+	IPA_API_DISPATCH_RETURN(ipa_nat_del_cmd, del);
+
+	return ret;
+}
+EXPORT_SYMBOL(ipa_nat_del_cmd);
+
+/**
+ * ipa_del_nat_table() - Delete the NAT table
+ * @del:	[in] delete table parameters
+ *
+ * Called by NAT client to delete the table
+ *
+ * Returns:	0 on success, negative on failure
+ */
+int ipa_del_nat_table(struct ipa_ioc_nat_ipv6ct_table_del *del)
+{
+	int ret;
+
+	IPA_API_DISPATCH_RETURN(ipa_del_nat_table, del);
+
+	return ret;
+}
+EXPORT_SYMBOL(ipa_del_nat_table);
+
+/**
+ * ipa_del_ipv6ct_table() - Delete the IPv6CT table
+ * @del:	[in] delete table parameters
+ *
+ * Called by IPv6CT client to delete the table
+ *
+ * Returns:	0 on success, negative on failure
+ */
+int ipa_del_ipv6ct_table(struct ipa_ioc_nat_ipv6ct_table_del *del)
+{
+	int ret;
+
+	IPA_API_DISPATCH_RETURN(ipa_del_ipv6ct_table, del);
+
+	return ret;
+}
+EXPORT_SYMBOL(ipa_del_ipv6ct_table);
+
+/**
+ * ipa3_nat_mdfy_pdn() - Modify a PDN entry in PDN config table in IPA SRAM
+ * @mdfy_pdn:	[in] PDN info to be written to SRAM
+ *
+ * Called by NAT client driver to modify an entry in the PDN config table
+ *
+ * Returns:	0 on success, negative on failure
+ */
+int ipa_nat_mdfy_pdn(struct ipa_ioc_nat_pdn_entry *mdfy_pdn)
+{
+	int ret;
+
+	IPA_API_DISPATCH_RETURN(ipa_nat_mdfy_pdn, mdfy_pdn);
+
+	return ret;
+}
+EXPORT_SYMBOL(ipa_nat_mdfy_pdn);
+
+/**
+ * ipa_send_msg() - Send "message" from kernel client to IPA driver
+ * @meta: [in] message meta-data
+ * @buff: [in] the payload for message
+ * @callback: [in] free callback
+ *
+ * Client supplies the message meta-data and payload which IPA driver buffers
+ * till read by user-space. After read from user space IPA driver invokes the
+ * callback supplied to free the message payload. Client must not touch/free
+ * the message payload after calling this API.
+ *
+ * Returns:	0 on success, negative on failure
+ *
+ * Note:	Should not be called from atomic context
+ */
+int ipa_send_msg(struct ipa_msg_meta *meta, void *buff,
+		  ipa_msg_free_fn callback)
+{
+	int ret;
+
+	IPA_API_DISPATCH_RETURN(ipa_send_msg, meta, buff, callback);
+
+	return ret;
+}
+EXPORT_SYMBOL(ipa_send_msg);
+
+/**
+ * ipa_register_pull_msg() - register pull message type
+ * @meta: [in] message meta-data
+ * @callback: [in] pull callback
+ *
+ * Register message callback by kernel client with IPA driver for IPA driver to
+ * pull message on-demand.
+ *
+ * Returns:	0 on success, negative on failure
+ *
+ * Note:	Should not be called from atomic context
+ */
+int ipa_register_pull_msg(struct ipa_msg_meta *meta, ipa_msg_pull_fn callback)
+{
+	int ret;
+
+	IPA_API_DISPATCH_RETURN(ipa_register_pull_msg, meta, callback);
+
+	return ret;
+}
+EXPORT_SYMBOL(ipa_register_pull_msg);
+
+/**
+ * ipa_deregister_pull_msg() - De-register pull message type
+ * @meta: [in] message meta-data
+ *
+ * De-register "message" by kernel client from IPA driver
+ *
+ * Returns:	0 on success, negative on failure
+ *
+ * Note:	Should not be called from atomic context
+ */
+int ipa_deregister_pull_msg(struct ipa_msg_meta *meta)
+{
+	int ret;
+
+	IPA_API_DISPATCH_RETURN(ipa_deregister_pull_msg, meta);
+
+	return ret;
+}
+EXPORT_SYMBOL(ipa_deregister_pull_msg);
+
+/**
+ * ipa_register_intf() - register "logical" interface
+ * @name: [in] interface name
+ * @tx:	[in] TX properties of the interface
+ * @rx:	[in] RX properties of the interface
+ *
+ * Register an interface and its tx and rx properties, this allows
+ * configuration of rules from user-space
+ *
+ * Returns:	0 on success, negative on failure
+ *
+ * Note:	Should not be called from atomic context
+ */
+int ipa_register_intf(const char *name, const struct ipa_tx_intf *tx,
+		       const struct ipa_rx_intf *rx)
+{
+	int ret;
+
+	IPA_API_DISPATCH_RETURN(ipa_register_intf, name, tx, rx);
+
+	return ret;
+}
+EXPORT_SYMBOL(ipa_register_intf);
+
+/**
+ * ipa_register_intf_ext() - register "logical" interface which has only
+ * extended properties
+ * @name: [in] interface name
+ * @tx:	[in] TX properties of the interface
+ * @rx:	[in] RX properties of the interface
+ * @ext: [in] EXT properties of the interface
+ *
+ * Register an interface and its tx, rx and ext properties, this allows
+ * configuration of rules from user-space
+ *
+ * Returns:	0 on success, negative on failure
+ *
+ * Note:	Should not be called from atomic context
+ */
+int ipa_register_intf_ext(const char *name, const struct ipa_tx_intf *tx,
+	const struct ipa_rx_intf *rx,
+	const struct ipa_ext_intf *ext)
+{
+	int ret;
+
+	IPA_API_DISPATCH_RETURN(ipa_register_intf_ext, name, tx, rx, ext);
+
+	return ret;
+}
+EXPORT_SYMBOL(ipa_register_intf_ext);
+
+/**
+ * ipa_deregister_intf() - de-register previously registered logical interface
+ * @name: [in] interface name
+ *
+ * De-register a previously registered interface
+ *
+ * Returns:	0 on success, negative on failure
+ *
+ * Note:	Should not be called from atomic context
+ */
+int ipa_deregister_intf(const char *name)
+{
+	int ret;
+
+	IPA_API_DISPATCH_RETURN(ipa_deregister_intf, name);
+
+	return ret;
+}
+EXPORT_SYMBOL(ipa_deregister_intf);
+
+/**
+ * ipa_set_aggr_mode() - Set the aggregation mode which is a global setting
+ * @mode:	[in] the desired aggregation mode for e.g. straight MBIM, QCNCM,
+ * etc
+ *
+ * Returns:	0 on success
+ */
+int ipa_set_aggr_mode(enum ipa_aggr_mode mode)
+{
+	int ret;
+
+	IPA_API_DISPATCH_RETURN(ipa_set_aggr_mode, mode);
+
+	return ret;
+}
+EXPORT_SYMBOL(ipa_set_aggr_mode);
+
+
+/**
+ * ipa_set_qcncm_ndp_sig() - Set the NDP signature used for QCNCM aggregation
+ * mode
+ * @sig:	[in] the first 3 bytes of QCNCM NDP signature (expected to be
+ * "QND")
+ *
+ * Set the NDP signature used for QCNCM aggregation mode. The fourth byte
+ * (expected to be 'P') needs to be set using the header addition mechanism
+ *
+ * Returns:	0 on success, negative on failure
+ */
+int ipa_set_qcncm_ndp_sig(char sig[3])
+{
+	int ret;
+
+	IPA_API_DISPATCH_RETURN(ipa_set_qcncm_ndp_sig, sig);
+
+	return ret;
+}
+EXPORT_SYMBOL(ipa_set_qcncm_ndp_sig);
+
+/**
+ * ipa_set_single_ndp_per_mbim() - Enable/disable single NDP per MBIM frame
+ * configuration
+ * @enable:	[in] true for single NDP/MBIM; false otherwise
+ *
+ * Returns:	0 on success
+ */
+int ipa_set_single_ndp_per_mbim(bool enable)
+{
+	int ret;
+
+	IPA_API_DISPATCH_RETURN(ipa_set_single_ndp_per_mbim, enable);
+
+	return ret;
+}
+EXPORT_SYMBOL(ipa_set_single_ndp_per_mbim);
+
+/**
+ * ipa_tx_dp() - Data-path tx handler
+ * @dst:	[in] which IPA destination to route tx packets to
+ * @skb:	[in] the packet to send
+ * @metadata:	[in] TX packet meta-data
+ *
+ * Data-path tx handler, this is used for both SW data-path which by-passes most
+ * IPA HW blocks AND the regular HW data-path for WLAN AMPDU traffic only. If
+ * dst is a "valid" CONS type, then SW data-path is used. If dst is the
+ * WLAN_AMPDU PROD type, then HW data-path for WLAN AMPDU is used. Anything else
+ * is an error. For errors, client needs to free the skb as needed. For success,
+ * IPA driver will later invoke client callback if one was supplied. That
+ * callback should free the skb. If no callback supplied, IPA driver will free
+ * the skb internally
+ *
+ * The function will use two descriptors for this send command
+ * (for A5_WLAN_AMPDU_PROD only one desciprtor will be sent),
+ * the first descriptor will be used to inform the IPA hardware that
+ * apps need to push data into the IPA (IP_PACKET_INIT immediate command).
+ * Once this send was done from SPS point-of-view the IPA driver will
+ * get notified by the supplied callback - ipa_sps_irq_tx_comp()
+ *
+ * ipa_sps_irq_tx_comp will call to the user supplied
+ * callback (from ipa_connect)
+ *
+ * Returns:	0 on success, negative on failure
+ */
+int ipa_tx_dp(enum ipa_client_type dst, struct sk_buff *skb,
+		struct ipa_tx_meta *meta)
+{
+	int ret;
+
+	IPA_API_DISPATCH_RETURN(ipa_tx_dp, dst, skb, meta);
+
+	return ret;
+}
+EXPORT_SYMBOL(ipa_tx_dp);
+
+/**
+ * ipa_tx_dp_mul() - Data-path tx handler for multiple packets
+ * @src: [in] - Client that is sending data
+ * @ipa_tx_data_desc:	[in] data descriptors from wlan
+ *
+ * this is used for to transfer data descriptors that received
+ * from WLAN1_PROD pipe to IPA HW
+ *
+ * The function will send data descriptors from WLAN1_PROD (one
+ * at a time) using sps_transfer_one. Will set EOT flag for last
+ * descriptor Once this send was done from SPS point-of-view the
+ * IPA driver will get notified by the supplied callback -
+ * ipa_sps_irq_tx_no_aggr_notify()
+ *
+ * ipa_sps_irq_tx_no_aggr_notify will call to the user supplied
+ * callback (from ipa_connect)
+ *
+ * Returns:	0 on success, negative on failure
+ */
+int ipa_tx_dp_mul(enum ipa_client_type src,
+			struct ipa_tx_data_desc *data_desc)
+{
+	int ret;
+
+	IPA_API_DISPATCH_RETURN(ipa_tx_dp_mul, src, data_desc);
+
+	return ret;
+}
+EXPORT_SYMBOL(ipa_tx_dp_mul);
+
+void ipa_free_skb(struct ipa_rx_data *data)
+{
+	IPA_API_DISPATCH(ipa_free_skb, data);
+}
+EXPORT_SYMBOL(ipa_free_skb);
+
+/**
+ * ipa_setup_sys_pipe() - Setup an IPA end-point in system-BAM mode and perform
+ * IPA EP configuration
+ * @sys_in:	[in] input needed to setup BAM pipe and configure EP
+ * @clnt_hdl:	[out] client handle
+ *
+ *  - configure the end-point registers with the supplied
+ *    parameters from the user.
+ *  - call SPS APIs to create a system-to-bam connection with IPA.
+ *  - allocate descriptor FIFO
+ *  - register callback function(ipa_sps_irq_rx_notify or
+ *    ipa_sps_irq_tx_notify - depends on client type) in case the driver is
+ *    not configured to pulling mode
+ *
+ * Returns:	0 on success, negative on failure
+ */
+int ipa_setup_sys_pipe(struct ipa_sys_connect_params *sys_in, u32 *clnt_hdl)
+{
+	int ret;
+
+	IPA_API_DISPATCH_RETURN(ipa_setup_sys_pipe, sys_in, clnt_hdl);
+
+	return ret;
+}
+EXPORT_SYMBOL(ipa_setup_sys_pipe);
+
+/**
+ * ipa_teardown_sys_pipe() - Teardown the system-BAM pipe and cleanup IPA EP
+ * @clnt_hdl:	[in] the handle obtained from ipa_setup_sys_pipe
+ *
+ * Returns:	0 on success, negative on failure
+ */
+int ipa_teardown_sys_pipe(u32 clnt_hdl)
+{
+	int ret;
+
+	IPA_API_DISPATCH_RETURN(ipa_teardown_sys_pipe, clnt_hdl);
+
+	return ret;
+}
+EXPORT_SYMBOL(ipa_teardown_sys_pipe);
+
+int ipa_sys_setup(struct ipa_sys_connect_params *sys_in,
+	unsigned long *ipa_bam_or_gsi_hdl,
+	u32 *ipa_pipe_num, u32 *clnt_hdl, bool en_status)
+
+{
+	int ret;
+
+	IPA_API_DISPATCH_RETURN(ipa_sys_setup, sys_in, ipa_bam_or_gsi_hdl,
+			ipa_pipe_num, clnt_hdl, en_status);
+
+	return ret;
+}
+EXPORT_SYMBOL(ipa_sys_setup);
+
+int ipa_sys_teardown(u32 clnt_hdl)
+{
+	int ret;
+
+	IPA_API_DISPATCH_RETURN(ipa_sys_teardown, clnt_hdl);
+
+	return ret;
+}
+EXPORT_SYMBOL(ipa_sys_teardown);
+
+int ipa_sys_update_gsi_hdls(u32 clnt_hdl, unsigned long gsi_ch_hdl,
+	unsigned long gsi_ev_hdl)
+{
+	int ret;
+
+	IPA_API_DISPATCH_RETURN(ipa_sys_update_gsi_hdls, clnt_hdl,
+		gsi_ch_hdl, gsi_ev_hdl);
+
+	return ret;
+}
+EXPORT_SYMBOL(ipa_sys_update_gsi_hdls);
+
+/**
+ * ipa_connect_wdi_pipe() - WDI client connect
+ * @in:	[in] input parameters from client
+ * @out: [out] output params to client
+ *
+ * Returns:	0 on success, negative on failure
+ *
+ * Note:	Should not be called from atomic context
+ */
+int ipa_connect_wdi_pipe(struct ipa_wdi_in_params *in,
+		struct ipa_wdi_out_params *out)
+{
+	int ret;
+
+	IPA_API_DISPATCH_RETURN(ipa_connect_wdi_pipe, in, out);
+
+	return ret;
+}
+EXPORT_SYMBOL(ipa_connect_wdi_pipe);
+
+/**
+ * ipa_disconnect_wdi_pipe() - WDI client disconnect
+ * @clnt_hdl:	[in] opaque client handle assigned by IPA to client
+ *
+ * Returns:	0 on success, negative on failure
+ *
+ * Note:	Should not be called from atomic context
+ */
+int ipa_disconnect_wdi_pipe(u32 clnt_hdl)
+{
+	int ret;
+
+	IPA_API_DISPATCH_RETURN(ipa_disconnect_wdi_pipe, clnt_hdl);
+
+	return ret;
+}
+EXPORT_SYMBOL(ipa_disconnect_wdi_pipe);
+
+/**
+ * ipa_enable_wdi_pipe() - WDI client enable
+ * @clnt_hdl:	[in] opaque client handle assigned by IPA to client
+ *
+ * Returns:	0 on success, negative on failure
+ *
+ * Note:	Should not be called from atomic context
+ */
+int ipa_enable_wdi_pipe(u32 clnt_hdl)
+{
+	int ret;
+
+	IPA_API_DISPATCH_RETURN(ipa_enable_wdi_pipe, clnt_hdl);
+
+	return ret;
+}
+EXPORT_SYMBOL(ipa_enable_wdi_pipe);
+
+/**
+ * ipa_disable_wdi_pipe() - WDI client disable
+ * @clnt_hdl:	[in] opaque client handle assigned by IPA to client
+ *
+ * Returns:	0 on success, negative on failure
+ *
+ * Note:	Should not be called from atomic context
+ */
+int ipa_disable_wdi_pipe(u32 clnt_hdl)
+{
+	int ret;
+
+	IPA_API_DISPATCH_RETURN(ipa_disable_wdi_pipe, clnt_hdl);
+
+	return ret;
+}
+EXPORT_SYMBOL(ipa_disable_wdi_pipe);
+
+/**
+ * ipa_resume_wdi_pipe() - WDI client resume
+ * @clnt_hdl:	[in] opaque client handle assigned by IPA to client
+ *
+ * Returns:	0 on success, negative on failure
+ *
+ * Note:	Should not be called from atomic context
+ */
+int ipa_resume_wdi_pipe(u32 clnt_hdl)
+{
+	int ret;
+
+	IPA_API_DISPATCH_RETURN(ipa_resume_wdi_pipe, clnt_hdl);
+
+	return ret;
+}
+EXPORT_SYMBOL(ipa_resume_wdi_pipe);
+
+/**
+ * ipa_suspend_wdi_pipe() - WDI client suspend
+ * @clnt_hdl:	[in] opaque client handle assigned by IPA to client
+ *
+ * Returns:	0 on success, negative on failure
+ *
+ * Note:	Should not be called from atomic context
+ */
+int ipa_suspend_wdi_pipe(u32 clnt_hdl)
+{
+	int ret;
+
+	IPA_API_DISPATCH_RETURN(ipa_suspend_wdi_pipe, clnt_hdl);
+
+	return ret;
+}
+EXPORT_SYMBOL(ipa_suspend_wdi_pipe);
+
+/**
+ * ipa_get_wdi_stats() - Query WDI statistics from uc
+ * @stats:	[inout] stats blob from client populated by driver
+ *
+ * Returns:	0 on success, negative on failure
+ *
+ * @note Cannot be called from atomic context
+ *
+ */
+int ipa_get_wdi_stats(struct IpaHwStatsWDIInfoData_t *stats)
+{
+	int ret;
+
+	IPA_API_DISPATCH_RETURN(ipa_get_wdi_stats, stats);
+
+	return ret;
+}
+EXPORT_SYMBOL(ipa_get_wdi_stats);
+
+/**
+ * ipa_uc_bw_monitor() - start uc bw monitoring
+ * @info:	[inout] set info populated by driver
+ *
+ * Returns:	0 on success, negative on failure
+ *
+ * @note Cannot be called from atomic context
+ *
+ */
+int ipa_uc_bw_monitor(struct ipa_wdi_bw_info *info)
+{
+	int ret;
+
+	IPA_API_DISPATCH_RETURN(ipa_uc_bw_monitor, info);
+
+	return ret;
+}
+EXPORT_SYMBOL(ipa_uc_bw_monitor);
+
+/**
+ * ipa_set_wlan_tx_info() -set WDI statistics from uc
+ * @info:	[inout] set info populated by driver
+ *
+ * Returns:	0 on success, negative on failure
+ *
+ * @note Cannot be called from atomic context
+ *
+ */
+int ipa_set_wlan_tx_info(struct ipa_wdi_tx_info *info)
+{
+	int ret;
+
+	IPA_API_DISPATCH_RETURN(ipa_set_wlan_tx_info, info);
+
+	return ret;
+}
+EXPORT_SYMBOL(ipa_set_wlan_tx_info);
+/**
+ * ipa_get_smem_restr_bytes()- Return IPA smem restricted bytes
+ *
+ * Return value: u16 - number of IPA smem restricted bytes
+ */
+u16 ipa_get_smem_restr_bytes(void)
+{
+	int ret;
+
+	IPA_API_DISPATCH_RETURN(ipa_get_smem_restr_bytes);
+
+	return ret;
+}
+EXPORT_SYMBOL(ipa_get_smem_restr_bytes);
+
+/**
+ * ipa_broadcast_wdi_quota_reach_ind() - quota reach
+ * @uint32_t fid: [in] input netdev ID
+ * @uint64_t num_bytes: [in] used bytes
+ *
+ * Returns:	0 on success, negative on failure
+ */
+int ipa_broadcast_wdi_quota_reach_ind(uint32_t fid,
+		uint64_t num_bytes)
+{
+	int ret;
+
+	IPA_API_DISPATCH_RETURN(ipa_broadcast_wdi_quota_reach_ind,
+		fid, num_bytes);
+
+	return ret;
+}
+EXPORT_SYMBOL(ipa_broadcast_wdi_quota_reach_ind);
+
+/**
+ * ipa_uc_wdi_get_dbpa() - To retrieve
+ * doorbell physical address of wlan pipes
+ * @param:  [in/out] input/output parameters
+ *          from/to client
+ *
+ * Returns:	0 on success, negative on failure
+ *
+ */
+int ipa_uc_wdi_get_dbpa(
+	struct ipa_wdi_db_params *param)
+{
+	int ret;
+
+	IPA_API_DISPATCH_RETURN(ipa_uc_wdi_get_dbpa, param);
+
+	return ret;
+}
+EXPORT_SYMBOL(ipa_uc_wdi_get_dbpa);
+
+/**
+ * ipa_uc_reg_rdyCB() - To register uC
+ * ready CB if uC not ready, wdi only.
+ * @inout:	[in/out] input/output parameters
+ * from/to client
+ *
+ * Returns:	0 on success, negative on failure
+ *
+ */
+int ipa_uc_reg_rdyCB(
+	struct ipa_wdi_uc_ready_params *inout)
+{
+	int ret;
+
+	IPA_API_DISPATCH_RETURN(ipa_uc_reg_rdyCB, inout);
+
+	return ret;
+}
+EXPORT_SYMBOL(ipa_uc_reg_rdyCB);
+
+/**
+ * ipa_wigig_internal_init() - get uc db and register uC
+ * ready CB if uC not ready, wigig only.
+ * @inout:	[in/out] uc ready input/output parameters
+ * from/to client
+ * @int_notify: [in] wigig misc interrupt handler function
+ * @uc_db_pa: [out] uC db physical address
+ *
+ * Returns:	0 on success, negative on failure
+ *
+ */
+int ipa_wigig_internal_init(
+	struct ipa_wdi_uc_ready_params *inout,
+	ipa_wigig_misc_int_cb int_notify,
+	phys_addr_t *uc_db_pa)
+{
+	int ret;
+
+	IPA_API_DISPATCH_RETURN(ipa_wigig_internal_init, inout,
+		int_notify, uc_db_pa);
+
+	return ret;
+}
+EXPORT_SYMBOL(ipa_wigig_internal_init);
+
+/**
+ * ipa_uc_dereg_rdyCB() - To de-register uC ready CB
+ *
+ * Returns:	0 on success, negative on failure
+ *
+ */
+int ipa_uc_dereg_rdyCB(void)
+{
+	int ret;
+
+	IPA_API_DISPATCH_RETURN(ipa_uc_dereg_rdyCB);
+
+	return ret;
+}
+EXPORT_SYMBOL(ipa_uc_dereg_rdyCB);
+
+/**
+ * teth_bridge_init() - Initialize the Tethering bridge driver
+ * @params - in/out params for USB initialization API (please look at struct
+ *  definition for more info)
+ *
+ * USB driver gets a pointer to a callback function (usb_notify_cb) and an
+ * associated data. USB driver installs this callback function in the call to
+ * ipa_connect().
+ *
+ * Builds IPA resource manager dependency graph.
+ *
+ * Return codes: 0: success,
+ *		-EINVAL - Bad parameter
+ *		Other negative value - Failure
+ */
+int teth_bridge_init(struct teth_bridge_init_params *params)
+{
+	int ret;
+
+	IPA_API_DISPATCH_RETURN(teth_bridge_init, params);
+
+	return ret;
+}
+EXPORT_SYMBOL(teth_bridge_init);
+
+/**
+ * teth_bridge_disconnect() - Disconnect tethering bridge module
+ */
+int teth_bridge_disconnect(enum ipa_client_type client)
+{
+	int ret;
+
+	IPA_API_DISPATCH_RETURN(teth_bridge_disconnect, client);
+
+	return ret;
+}
+EXPORT_SYMBOL(teth_bridge_disconnect);
+
+/**
+ * teth_bridge_connect() - Connect bridge for a tethered Rmnet / MBIM call
+ * @connect_params:	Connection info
+ *
+ * Return codes: 0: success
+ *		-EINVAL: invalid parameters
+ *		-EPERM: Operation not permitted as the bridge is already
+ *		connected
+ */
+int teth_bridge_connect(struct teth_bridge_connect_params *connect_params)
+{
+	int ret;
+
+	IPA_API_DISPATCH_RETURN(teth_bridge_connect, connect_params);
+
+	return ret;
+}
+EXPORT_SYMBOL(teth_bridge_connect);
+
+/* ipa_set_client() - provide client mapping
+ * @client: client type
+ *
+ * Return value: none
+ */
+
+void ipa_set_client(int index, enum ipacm_client_enum client, bool uplink)
+{
+	IPA_API_DISPATCH(ipa_set_client, index, client, uplink);
+}
+EXPORT_SYMBOL(ipa_set_client);
+
+/**
+ * ipa_get_client() - provide client mapping
+ * @client: client type
+ *
+ * Return value: none
+ */
+enum ipacm_client_enum ipa_get_client(int pipe_idx)
+{
+	int ret;
+
+	IPA_API_DISPATCH_RETURN(ipa_get_client, pipe_idx);
+
+	return ret;
+}
+EXPORT_SYMBOL(ipa_get_client);
+
+/**
+ * ipa_get_client_uplink() - provide client mapping
+ * @client: client type
+ *
+ * Return value: none
+ */
+bool ipa_get_client_uplink(int pipe_idx)
+{
+	int ret;
+
+	IPA_API_DISPATCH_RETURN(ipa_get_client_uplink, pipe_idx);
+
+	return ret;
+}
+EXPORT_SYMBOL(ipa_get_client_uplink);
+
+/**
+ * ipa_dma_init() -Initialize IPADMA.
+ *
+ * This function initialize all IPADMA internal data and connect in dma:
+ *	MEMCPY_DMA_SYNC_PROD ->MEMCPY_DMA_SYNC_CONS
+ *	MEMCPY_DMA_ASYNC_PROD->MEMCPY_DMA_SYNC_CONS
+ *
+ * Return codes: 0: success
+ *		-EFAULT: IPADMA is already initialized
+ *		-ENOMEM: allocating memory error
+ *		-EPERM: pipe connection failed
+ */
+int ipa_dma_init(void)
+{
+	int ret;
+
+	IPA_API_DISPATCH_RETURN(ipa_dma_init);
+
+	return ret;
+}
+EXPORT_SYMBOL(ipa_dma_init);
+
+/**
+ * ipa_dma_enable() -Vote for IPA clocks.
+ *
+ *Return codes: 0: success
+ *		-EINVAL: IPADMA is not initialized
+ *		-EPERM: Operation not permitted as ipa_dma is already
+ *		 enabled
+ */
+int ipa_dma_enable(void)
+{
+	int ret;
+
+	IPA_API_DISPATCH_RETURN(ipa_dma_enable);
+
+	return ret;
+}
+EXPORT_SYMBOL(ipa_dma_enable);
+
+/**
+ * ipa_dma_disable()- Unvote for IPA clocks.
+ *
+ * enter to power save mode.
+ *
+ * Return codes: 0: success
+ *		-EINVAL: IPADMA is not initialized
+ *		-EPERM: Operation not permitted as ipa_dma is already
+ *			diabled
+ *		-EFAULT: can not disable ipa_dma as there are pending
+ *			memcopy works
+ */
+int ipa_dma_disable(void)
+{
+	int ret;
+
+	IPA_API_DISPATCH_RETURN(ipa_dma_disable);
+
+	return ret;
+}
+EXPORT_SYMBOL(ipa_dma_disable);
+
+/**
+ * ipa_dma_sync_memcpy()- Perform synchronous memcpy using IPA.
+ *
+ * @dest: physical address to store the copied data.
+ * @src: physical address of the source data to copy.
+ * @len: number of bytes to copy.
+ *
+ * Return codes: 0: success
+ *		-EINVAL: invalid params
+ *		-EPERM: operation not permitted as ipa_dma isn't enable or
+ *			initialized
+ *		-SPS_ERROR: on sps faliures
+ *		-EFAULT: other
+ */
+int ipa_dma_sync_memcpy(u64 dest, u64 src, int len)
+{
+	int ret;
+
+	IPA_API_DISPATCH_RETURN(ipa_dma_sync_memcpy, dest, src, len);
+
+	return ret;
+}
+EXPORT_SYMBOL(ipa_dma_sync_memcpy);
+
+/**
+ * ipa_dma_async_memcpy()- Perform asynchronous memcpy using IPA.
+ *
+ * @dest: physical address to store the copied data.
+ * @src: physical address of the source data to copy.
+ * @len: number of bytes to copy.
+ * @user_cb: callback function to notify the client when the copy was done.
+ * @user_param: cookie for user_cb.
+ *
+ * Return codes: 0: success
+ *		-EINVAL: invalid params
+ *		-EPERM: operation not permitted as ipa_dma isn't enable or
+ *			initialized
+ *		-SPS_ERROR: on sps faliures
+ *		-EFAULT: descr fifo is full.
+ */
+int ipa_dma_async_memcpy(u64 dest, u64 src, int len,
+		void (*user_cb)(void *user1), void *user_param)
+{
+	int ret;
+
+	IPA_API_DISPATCH_RETURN(ipa_dma_async_memcpy, dest, src, len, user_cb,
+		user_param);
+
+	return ret;
+}
+EXPORT_SYMBOL(ipa_dma_async_memcpy);
+
+/**
+ * ipa_dma_uc_memcpy() - Perform a memcpy action using IPA uC
+ * @dest: physical address to store the copied data.
+ * @src: physical address of the source data to copy.
+ * @len: number of bytes to copy.
+ *
+ * Return codes: 0: success
+ *		-EINVAL: invalid params
+ *		-EPERM: operation not permitted as ipa_dma isn't enable or
+ *			initialized
+ *		-EBADF: IPA uC is not loaded
+ */
+int ipa_dma_uc_memcpy(phys_addr_t dest, phys_addr_t src, int len)
+{
+	int ret;
+
+	IPA_API_DISPATCH_RETURN(ipa_dma_uc_memcpy, dest, src, len);
+
+	return ret;
+}
+EXPORT_SYMBOL(ipa_dma_uc_memcpy);
+
+/**
+ * ipa_dma_destroy() -teardown IPADMA pipes and release ipadma.
+ *
+ * this is a blocking function, returns just after destroying IPADMA.
+ */
+void ipa_dma_destroy(void)
+{
+	IPA_API_DISPATCH(ipa_dma_destroy);
+}
+EXPORT_SYMBOL(ipa_dma_destroy);
+
+int ipa_mhi_init_engine(struct ipa_mhi_init_engine *params)
+{
+	int ret;
+
+	IPA_API_DISPATCH_RETURN(ipa_mhi_init_engine, params);
+
+	return ret;
+}
+EXPORT_SYMBOL(ipa_mhi_init_engine);
+
+/**
+ * ipa_connect_mhi_pipe() - Connect pipe to IPA and start corresponding
+ * MHI channel
+ * @in: connect parameters
+ * @clnt_hdl: [out] client handle for this pipe
+ *
+ * This function is called by IPA MHI client driver on MHI channel start.
+ * This function is called after MHI engine was started.
+ * This function is doing the following:
+ *	- Send command to uC to start corresponding MHI channel
+ *	- Configure IPA EP control
+ *
+ * Return codes: 0	  : success
+ *		 negative : error
+ */
+int ipa_connect_mhi_pipe(struct ipa_mhi_connect_params_internal *in,
+		u32 *clnt_hdl)
+{
+	int ret;
+
+	IPA_API_DISPATCH_RETURN(ipa_connect_mhi_pipe, in, clnt_hdl);
+
+	return ret;
+}
+EXPORT_SYMBOL(ipa_connect_mhi_pipe);
+
+/**
+ * ipa_disconnect_mhi_pipe() - Disconnect pipe from IPA and reset corresponding
+ * MHI channel
+ * @in: connect parameters
+ * @clnt_hdl: [out] client handle for this pipe
+ *
+ * This function is called by IPA MHI client driver on MHI channel reset.
+ * This function is called after MHI channel was started.
+ * This function is doing the following:
+ *	- Send command to uC to reset corresponding MHI channel
+ *	- Configure IPA EP control
+ *
+ * Return codes: 0	  : success
+ *		 negative : error
+ */
+int ipa_disconnect_mhi_pipe(u32 clnt_hdl)
+{
+	int ret;
+
+	IPA_API_DISPATCH_RETURN(ipa_disconnect_mhi_pipe, clnt_hdl);
+
+	return ret;
+}
+EXPORT_SYMBOL(ipa_disconnect_mhi_pipe);
+
+bool ipa_mhi_stop_gsi_channel(enum ipa_client_type client)
+{
+	bool ret;
+
+	IPA_API_DISPATCH_RETURN_BOOL(ipa_mhi_stop_gsi_channel, client);
+
+	return ret;
+}
+
+int ipa_uc_mhi_reset_channel(int channelHandle)
+{
+	int ret;
+
+	IPA_API_DISPATCH_RETURN(ipa_uc_mhi_reset_channel, channelHandle);
+
+	return ret;
+}
+
+bool ipa_mhi_sps_channel_empty(enum ipa_client_type client)
+{
+	bool ret;
+
+	IPA_API_DISPATCH_RETURN_BOOL(ipa_mhi_sps_channel_empty, client);
+
+	return ret;
+}
+
+int ipa_qmi_enable_force_clear_datapath_send(
+	struct ipa_enable_force_clear_datapath_req_msg_v01 *req)
+{
+	int ret;
+
+	IPA_API_DISPATCH_RETURN(ipa_qmi_enable_force_clear_datapath_send, req);
+
+	return ret;
+}
+
+int ipa_qmi_disable_force_clear_datapath_send(
+	struct ipa_disable_force_clear_datapath_req_msg_v01 *req)
+{
+	int ret;
+
+	IPA_API_DISPATCH_RETURN(ipa_qmi_disable_force_clear_datapath_send, req);
+
+	return ret;
+}
+
+int ipa_generate_tag_process(void)
+{
+	int ret;
+
+	IPA_API_DISPATCH_RETURN(ipa_generate_tag_process);
+
+	return ret;
+}
+
+int ipa_disable_sps_pipe(enum ipa_client_type client)
+{
+	int ret;
+
+	IPA_API_DISPATCH_RETURN(ipa_disable_sps_pipe, client);
+
+	return ret;
+}
+
+int ipa_mhi_reset_channel_internal(enum ipa_client_type client)
+{
+	int ret;
+
+	IPA_API_DISPATCH_RETURN(ipa_mhi_reset_channel_internal, client);
+
+	return ret;
+}
+
+int ipa_mhi_start_channel_internal(enum ipa_client_type client)
+{
+	int ret;
+
+	IPA_API_DISPATCH_RETURN(ipa_mhi_start_channel_internal, client);
+
+	return ret;
+}
+
+void ipa_get_holb(int ep_idx, struct ipa_ep_cfg_holb *holb)
+{
+	IPA_API_DISPATCH(ipa_get_holb, ep_idx, holb);
+}
+
+void ipa_set_tag_process_before_gating(bool val)
+{
+	IPA_API_DISPATCH(ipa_set_tag_process_before_gating, val);
+}
+
+int ipa_mhi_query_ch_info(enum ipa_client_type client,
+		struct gsi_chan_info *ch_info)
+{
+	int ret;
+
+	IPA_API_DISPATCH_RETURN(ipa_mhi_query_ch_info, client, ch_info);
+
+	return ret;
+}
+
+int ipa_uc_mhi_suspend_channel(int channelHandle)
+{
+	int ret;
+
+	IPA_API_DISPATCH_RETURN(ipa_uc_mhi_suspend_channel, channelHandle);
+
+	return ret;
+}
+
+int ipa_uc_mhi_stop_event_update_channel(int channelHandle)
+{
+	int ret;
+
+	IPA_API_DISPATCH_RETURN(ipa_uc_mhi_stop_event_update_channel,
+			channelHandle);
+
+	return ret;
+}
+
+bool ipa_has_open_aggr_frame(enum ipa_client_type client)
+{
+	bool ret;
+
+	IPA_API_DISPATCH_RETURN_BOOL(ipa_has_open_aggr_frame, client);
+
+	return ret;
+}
+
+int ipa_mhi_resume_channels_internal(enum ipa_client_type client,
+		bool LPTransitionRejected, bool brstmode_enabled,
+		union __packed gsi_channel_scratch ch_scratch, u8 index)
+{
+	int ret;
+
+	IPA_API_DISPATCH_RETURN(ipa_mhi_resume_channels_internal, client,
+			LPTransitionRejected, brstmode_enabled, ch_scratch,
+			index);
+
+	return ret;
+}
+
+int ipa_uc_mhi_send_dl_ul_sync_info(union IpaHwMhiDlUlSyncCmdData_t *cmd)
+{
+	int ret;
+
+	IPA_API_DISPATCH_RETURN(ipa_uc_mhi_send_dl_ul_sync_info,
+			cmd);
+
+	return ret;
+}
+
+int ipa_mhi_destroy_channel(enum ipa_client_type client)
+{
+	int ret;
+
+	IPA_API_DISPATCH_RETURN(ipa_mhi_destroy_channel, client);
+
+	return ret;
+}
+
+int ipa_uc_mhi_init(void (*ready_cb)(void),
+		void (*wakeup_request_cb)(void))
+{
+	int ret;
+
+	IPA_API_DISPATCH_RETURN(ipa_uc_mhi_init, ready_cb, wakeup_request_cb);
+
+	return ret;
+}
+
+void ipa_uc_mhi_cleanup(void)
+{
+	IPA_API_DISPATCH(ipa_uc_mhi_cleanup);
+}
+
+int ipa_uc_mhi_print_stats(char *dbg_buff, int size)
+{
+	int ret;
+
+	IPA_API_DISPATCH_RETURN(ipa_uc_mhi_print_stats, dbg_buff, size);
+
+	return ret;
+}
+
+/**
+ * ipa_uc_state_check() - Check the status of the uC interface
+ *
+ * Return value: 0 if the uC is loaded, interface is initialized
+ *               and there was no recent failure in one of the commands.
+ *               A negative value is returned otherwise.
+ */
+int ipa_uc_state_check(void)
+{
+	int ret;
+
+	IPA_API_DISPATCH_RETURN(ipa_uc_state_check);
+
+	return ret;
+}
+
+int ipa_write_qmap_id(struct ipa_ioc_write_qmapid *param_in)
+{
+	int ret;
+
+	IPA_API_DISPATCH_RETURN(ipa_write_qmap_id, param_in);
+
+	return ret;
+}
+EXPORT_SYMBOL(ipa_write_qmap_id);
+
+/**
+ * ipa_add_interrupt_handler() - Adds handler to an interrupt type
+ * @interrupt:		Interrupt type
+ * @handler:		The handler to be added
+ * @deferred_flag:	whether the handler processing should be deferred in
+ *			a workqueue
+ * @private_data:	the client's private data
+ *
+ * Adds handler to an interrupt type and enable the specific bit
+ * in IRQ_EN register, associated interrupt in IRQ_STTS register will be enabled
+ */
+int ipa_add_interrupt_handler(enum ipa_irq_type interrupt,
+	ipa_irq_handler_t handler,
+	bool deferred_flag,
+	void *private_data)
+{
+	int ret;
+
+	IPA_API_DISPATCH_RETURN(ipa_add_interrupt_handler, interrupt, handler,
+		deferred_flag, private_data);
+
+	return ret;
+}
+EXPORT_SYMBOL(ipa_add_interrupt_handler);
+
+/**
+ * ipa_remove_interrupt_handler() - Removes handler to an interrupt type
+ * @interrupt:		Interrupt type
+ *
+ * Removes the handler and disable the specific bit in IRQ_EN register
+ */
+int ipa_remove_interrupt_handler(enum ipa_irq_type interrupt)
+{
+	int ret;
+
+	IPA_API_DISPATCH_RETURN(ipa_remove_interrupt_handler, interrupt);
+
+	return ret;
+}
+EXPORT_SYMBOL(ipa_remove_interrupt_handler);
+
+/**
+ * ipa_restore_suspend_handler() - restores the original suspend IRQ handler
+ * as it was registered in the IPA init sequence.
+ * Return codes:
+ * 0: success
+ * -EPERM: failed to remove current handler or failed to add original handler
+ */
+int ipa_restore_suspend_handler(void)
+{
+	int ret;
+
+	IPA_API_DISPATCH_RETURN(ipa_restore_suspend_handler);
+
+	return ret;
+}
+EXPORT_SYMBOL(ipa_restore_suspend_handler);
+
+/**
+ * ipa_bam_reg_dump() - Dump selected BAM registers for IPA and DMA-BAM
+ *
+ * Function is rate limited to avoid flooding kernel log buffer
+ */
+void ipa_bam_reg_dump(void)
+{
+	IPA_API_DISPATCH(ipa_bam_reg_dump);
+}
+EXPORT_SYMBOL(ipa_bam_reg_dump);
+
+/**
+ * ipa_get_ep_mapping() - provide endpoint mapping
+ * @client: client type
+ *
+ * Return value: endpoint mapping
+ */
+int ipa_get_ep_mapping(enum ipa_client_type client)
+{
+	int ret;
+
+	IPA_API_DISPATCH_RETURN(ipa_get_ep_mapping, client);
+
+	return ret;
+}
+EXPORT_SYMBOL(ipa_get_ep_mapping);
+
+/**
+ * ipa_is_ready() - check if IPA module was initialized
+ * successfully
+ *
+ * Return value: true for yes; false for no
+ */
+bool ipa_is_ready(void)
+{
+	if (!ipa_api_ctrl || !ipa_api_ctrl->ipa_is_ready)
+		return false;
+	return ipa_api_ctrl->ipa_is_ready();
+}
+EXPORT_SYMBOL(ipa_is_ready);
+
+/**
+ * ipa_proxy_clk_vote() - called to add IPA clock proxy vote
+ *
+ * Return value: none
+ */
+void ipa_proxy_clk_vote(void)
+{
+	IPA_API_DISPATCH(ipa_proxy_clk_vote);
+}
+EXPORT_SYMBOL(ipa_proxy_clk_vote);
+
+/**
+ * ipa_proxy_clk_unvote() - called to remove IPA clock proxy vote
+ *
+ * Return value: none
+ */
+void ipa_proxy_clk_unvote(void)
+{
+	IPA_API_DISPATCH(ipa_proxy_clk_unvote);
+}
+EXPORT_SYMBOL(ipa_proxy_clk_unvote);
+
+/**
+ * ipa_get_hw_type() - Return IPA HW version
+ *
+ * Return value: enum ipa_hw_type
+ */
+enum ipa_hw_type ipa_get_hw_type(void)
+{
+	return ipa_api_hw_type;
+}
+EXPORT_SYMBOL(ipa_get_hw_type);
+
+/**
+ * ipa_is_client_handle_valid() - check if IPA client handle is valid handle
+ *
+ * Return value: true for yes; false for no
+ */
+bool ipa_is_client_handle_valid(u32 clnt_hdl)
+{
+	if (!ipa_api_ctrl || !ipa_api_ctrl->ipa_is_client_handle_valid)
+		return false;
+	return ipa_api_ctrl->ipa_is_client_handle_valid(clnt_hdl);
+}
+EXPORT_SYMBOL(ipa_is_client_handle_valid);
+
+/**
+ * ipa_get_client_mapping() - provide client mapping
+ * @pipe_idx: IPA end-point number
+ *
+ * Return value: client mapping
+ */
+enum ipa_client_type ipa_get_client_mapping(int pipe_idx)
+{
+	int ret;
+
+	IPA_API_DISPATCH_RETURN(ipa_get_client_mapping, pipe_idx);
+
+	return ret;
+}
+EXPORT_SYMBOL(ipa_get_client_mapping);
+
+/**
+ * ipa_get_rm_resource_from_ep() - this function is part of the deprecated
+ * RM mechanism but is still used by some drivers so we kept the definition.
+ */
+
+enum ipa_rm_resource_name ipa_get_rm_resource_from_ep(int pipe_idx)
+{
+	IPAERR("IPA RM is not supported idx=%d\n", pipe_idx);
+	return -EFAULT;
+}
+EXPORT_SYMBOL(ipa_get_rm_resource_from_ep);
+
+
+
+/**
+ * ipa_get_modem_cfg_emb_pipe_flt()- Return ipa_ctx->modem_cfg_emb_pipe_flt
+ *
+ * Return value: true if modem configures embedded pipe flt, false otherwise
+ */
+bool ipa_get_modem_cfg_emb_pipe_flt(void)
+{
+	if (!ipa_api_ctrl || !ipa_api_ctrl->ipa_get_modem_cfg_emb_pipe_flt)
+		return false;
+	return ipa_api_ctrl->ipa_get_modem_cfg_emb_pipe_flt();
+}
+EXPORT_SYMBOL(ipa_get_modem_cfg_emb_pipe_flt);
+
+/**
+ * ipa_get_transport_type()- Return ipa_ctx->transport_prototype
+ *
+ * Return value: enum ipa_transport_type
+ */
+enum ipa_transport_type ipa_get_transport_type(void)
+{
+	int ret;
+
+	IPA_API_DISPATCH_RETURN(ipa_get_transport_type);
+
+	return ret;
+}
+EXPORT_SYMBOL(ipa_get_transport_type);
+
+/**
+ * ipa_get_smmu_domain()- Return the smmu domain
+ *
+ * Return value: pointer to iommu domain if smmu_cb valid, NULL otherwise
+ */
+struct iommu_domain *ipa_get_smmu_domain(void)
+{
+	struct iommu_domain *ret;
+
+	IPA_API_DISPATCH_RETURN_PTR(ipa_get_smmu_domain);
+
+	return ret;
+}
+EXPORT_SYMBOL(ipa_get_smmu_domain);
+
+/**
+ * ipa_disable_apps_wan_cons_deaggr()- set
+ * ipa_ctx->ipa_client_apps_wan_cons_agg_gro
+ *
+ * Return value: 0 or negative in case of failure
+ */
+int ipa_disable_apps_wan_cons_deaggr(uint32_t agg_size, uint32_t agg_count)
+{
+	int ret;
+
+	IPA_API_DISPATCH_RETURN(ipa_disable_apps_wan_cons_deaggr, agg_size,
+		agg_count);
+
+	return ret;
+}
+EXPORT_SYMBOL(ipa_disable_apps_wan_cons_deaggr);
+
+/**
+ * ipa_get_dma_dev()- Returns ipa_ctx dma dev pointer
+ *
+ * Return value: pointer to ipa_ctx dma dev pointer
+ */
+struct device *ipa_get_dma_dev(void)
+{
+	struct device *ret;
+
+	IPA_API_DISPATCH_RETURN_PTR(ipa_get_dma_dev);
+
+	return ret;
+}
+EXPORT_SYMBOL(ipa_get_dma_dev);
+
+/**
+ * ipa_release_wdi_mapping() - release iommu mapping
+ *
+ *
+ * @num_buffers: number of buffers to be released
+ *
+ * @info: pointer to wdi buffers info array
+ *
+ * Return codes: 0	  : success
+ *		 negative : error
+ */
+int ipa_release_wdi_mapping(u32 num_buffers, struct ipa_wdi_buffer_info *info)
+{
+	int ret;
+
+	IPA_API_DISPATCH_RETURN(ipa_release_wdi_mapping, num_buffers, info);
+
+	return ret;
+}
+EXPORT_SYMBOL(ipa_release_wdi_mapping);
+
+/**
+ * ipa_create_wdi_mapping() - Perform iommu mapping
+ *
+ *
+ * @num_buffers: number of buffers to be mapped
+ *
+ * @info: pointer to wdi buffers info array
+ *
+ * Return codes: 0	  : success
+ *		 negative : error
+ */
+int ipa_create_wdi_mapping(u32 num_buffers, struct ipa_wdi_buffer_info *info)
+{
+	int ret;
+
+	IPA_API_DISPATCH_RETURN(ipa_create_wdi_mapping, num_buffers, info);
+
+	return ret;
+}
+EXPORT_SYMBOL(ipa_create_wdi_mapping);
+
+/**
+ * ipa_get_gsi_ep_info() - provide gsi ep information
+ * @client: IPA client type
+ *
+ * Return value: pointer to ipa_gsi_ep_info
+ */
+const struct ipa_gsi_ep_config *ipa_get_gsi_ep_info(enum ipa_client_type client)
+{
+	if (!ipa_api_ctrl || !ipa_api_ctrl->ipa_get_gsi_ep_info)
+		return NULL;
+	return ipa_api_ctrl->ipa_get_gsi_ep_info(client);
+}
+EXPORT_SYMBOL(ipa_get_gsi_ep_info);
+
+/**
+ * ipa_stop_gsi_channel()- Stops a GSI channel in IPA
+ *
+ * Return value: 0 on success, negative otherwise
+ */
+int ipa_stop_gsi_channel(u32 clnt_hdl)
+{
+	int ret;
+
+	IPA_API_DISPATCH_RETURN(ipa_stop_gsi_channel, clnt_hdl);
+
+	return ret;
+}
+EXPORT_SYMBOL(ipa_stop_gsi_channel);
+
+/**
+ * ipa_start_gsi_channel()- Startsa GSI channel in IPA
+ *
+ * Return value: 0 on success, negative otherwise
+ */
+int ipa_start_gsi_channel(u32 clnt_hdl)
+{
+	int ret;
+
+	IPA_API_DISPATCH_RETURN(ipa_start_gsi_channel, clnt_hdl);
+
+	return ret;
+}
+EXPORT_SYMBOL(ipa_start_gsi_channel);
+
+/**
+ * ipa_is_vlan_mode - check if a LAN driver should load in VLAN mode
+ * @iface - type of vlan capable device
+ * @res - query result: true for vlan mode, false for non vlan mode
+ *
+ * API must be called after ipa_is_ready() returns true, otherwise it will fail
+ *
+ * Returns: 0 on success, negative on failure
+ */
+int ipa_is_vlan_mode(enum ipa_vlan_ifaces iface, bool *res)
+{
+	int ret;
+
+	IPA_API_DISPATCH_RETURN(ipa_is_vlan_mode, iface, res);
+
+	return ret;
+
+}
+EXPORT_SYMBOL(ipa_is_vlan_mode);
+
+/**
+ * ipa_get_version_string() - Get string representation of IPA version
+ * @ver: IPA version
+ *
+ * Return: Constant string representation
+ */
+const char *ipa_get_version_string(enum ipa_hw_type ver)
+{
+	const char *str;
+
+	switch (ver) {
+	case IPA_HW_v1_0:
+		str = "1.0";
+		break;
+	case IPA_HW_v1_1:
+		str = "1.1";
+		break;
+	case IPA_HW_v2_0:
+		str = "2.0";
+		break;
+	case IPA_HW_v2_1:
+		str = "2.1";
+		break;
+	case IPA_HW_v2_5:
+		str = "2.5/2.6";
+		break;
+	case IPA_HW_v2_6L:
+		str = "2.6L";
+		break;
+	case IPA_HW_v3_0:
+		str = "3.0";
+		break;
+	case IPA_HW_v3_1:
+		str = "3.1";
+		break;
+	case IPA_HW_v3_5:
+		str = "3.5";
+		break;
+	case IPA_HW_v3_5_1:
+		str = "3.5.1";
+		break;
+	case IPA_HW_v4_0:
+		str = "4.0";
+		break;
+	case IPA_HW_v4_1:
+		str = "4.1";
+		break;
+	case IPA_HW_v4_2:
+		str = "4.2";
+		break;
+	case IPA_HW_v4_5:
+		str = "4.5";
+		break;
+	case IPA_HW_v4_7:
+		str = "4.7";
+		break;
+	case IPA_HW_v4_9:
+		str = "4.9";
+		break;
+	default:
+		str = "Invalid version";
+		break;
+	}
+
+	return str;
+}
+EXPORT_SYMBOL(ipa_get_version_string);
+
+static const struct of_device_id ipa_plat_drv_match[] = {
+	{ .compatible = "qcom,ipa", },
+	{ .compatible = "qcom,ipa-smmu-ap-cb", },
+	{ .compatible = "qcom,ipa-smmu-wlan-cb", },
+	{ .compatible = "qcom,ipa-smmu-uc-cb", },
+	{ .compatible = "qcom,ipa-smmu-11ad-cb", },
+	{ .compatible = "qcom,smp2p-map-ipa-1-in", },
+	{ .compatible = "qcom,smp2p-map-ipa-1-out", },
+	{}
+};
+
+/*********************************************************/
+/*                PCIe Version                           */
+/*********************************************************/
+
+static const struct of_device_id ipa_pci_drv_match[] = {
+	{ .compatible = "qcom,ipa", },
+	{}
+};
+
+/*
+ * Forward declarations of static functions required for PCI
+ * registraion
+ *
+ * VENDOR and DEVICE should be defined in pci_ids.h
+ */
+static int ipa_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent);
+static void ipa_pci_remove(struct pci_dev *pdev);
+static void ipa_pci_shutdown(struct pci_dev *pdev);
+static pci_ers_result_t ipa_pci_io_error_detected(struct pci_dev *dev,
+	pci_channel_state_t state);
+static pci_ers_result_t ipa_pci_io_slot_reset(struct pci_dev *dev);
+static void ipa_pci_io_resume(struct pci_dev *dev);
+
+#define LOCAL_VENDOR 0x17CB
+#define LOCAL_DEVICE 0x00ff
+
+static const char ipa_pci_driver_name[] = "qcipav3";
+
+static const struct pci_device_id ipa_pci_tbl[] = {
+	{ PCI_DEVICE(LOCAL_VENDOR, LOCAL_DEVICE) },
+	{ 0, 0, 0, 0, 0, 0, 0 }
+};
+
+MODULE_DEVICE_TABLE(pci, ipa_pci_tbl);
+
+/* PCI Error Recovery */
+static const struct pci_error_handlers ipa_pci_err_handler = {
+	.error_detected = ipa_pci_io_error_detected,
+	.slot_reset = ipa_pci_io_slot_reset,
+	.resume = ipa_pci_io_resume,
+};
+
+static struct pci_driver ipa_pci_driver = {
+	.name     = ipa_pci_driver_name,
+	.id_table = ipa_pci_tbl,
+	.probe    = ipa_pci_probe,
+	.remove   = ipa_pci_remove,
+	.shutdown = ipa_pci_shutdown,
+	.err_handler = &ipa_pci_err_handler
+};
+
+static int ipa_generic_plat_drv_probe(struct platform_device *pdev_p)
+{
+	int result;
+
+	/*
+	 * IPA probe function can be called for multiple times as the same probe
+	 * function handles multiple compatibilities
+	 */
+	pr_debug("ipa: IPA driver probing started for %s\n",
+		pdev_p->dev.of_node->name);
+
+	if (!ipa_api_ctrl) {
+		ipa_api_ctrl = kzalloc(sizeof(*ipa_api_ctrl), GFP_KERNEL);
+		if (!ipa_api_ctrl)
+			return -ENOMEM;
+
+		/* Get IPA HW Version */
+		result = of_property_read_u32(pdev_p->dev.of_node,
+			"qcom,ipa-hw-ver", &ipa_api_hw_type);
+		if ((result) || (ipa_api_hw_type == 0)) {
+			pr_err("ipa: get resource failed for ipa-hw-ver!\n");
+			kfree(ipa_api_ctrl);
+			ipa_api_ctrl = 0;
+			return -ENODEV;
+		}
+		pr_debug("ipa: ipa_api_hw_type = %d\n", ipa_api_hw_type);
+	}
+
+	/* call probe based on IPA HW version */
+	switch (ipa_api_hw_type) {
+	case IPA_HW_v3_0:
+	case IPA_HW_v3_1:
+	case IPA_HW_v3_5:
+	case IPA_HW_v3_5_1:
+	case IPA_HW_v4_0:
+	case IPA_HW_v4_1:
+	case IPA_HW_v4_2:
+	case IPA_HW_v4_5:
+	case IPA_HW_v4_7:
+	case IPA_HW_v4_9:
+		result = ipa3_plat_drv_probe(pdev_p, ipa_api_ctrl,
+			ipa_plat_drv_match);
+		break;
+	default:
+		pr_err("ipa: unsupported version %d\n", ipa_api_hw_type);
+		return -EPERM;
+	}
+
+	if (result && result != -EPROBE_DEFER)
+		pr_err("ipa: ipa_plat_drv_probe failed\n");
+
+	return result;
+}
+
+static int ipa_ap_suspend(struct device *dev)
+{
+	int ret;
+
+	IPA_API_DISPATCH_RETURN(ipa_ap_suspend, dev);
+
+	return ret;
+}
+
+static int ipa_ap_resume(struct device *dev)
+{
+	int ret;
+
+	IPA_API_DISPATCH_RETURN(ipa_ap_resume, dev);
+
+	return ret;
+}
+
+int ipa_register_ipa_ready_cb(void (*ipa_ready_cb)(void *user_data),
+			      void *user_data)
+{
+	int ret;
+
+	IPA_API_DISPATCH_RETURN(ipa_register_ipa_ready_cb,
+				ipa_ready_cb, user_data);
+
+	return ret;
+}
+EXPORT_SYMBOL(ipa_register_ipa_ready_cb);
+
+/**
+ * ipa_inc_client_enable_clks() - Increase active clients counter, and
+ * enable ipa clocks if necessary
+ *
+ * Please do not use this API, use the wrapper macros instead (ipa_i.h)
+ * IPA_ACTIVE_CLIENTS_INC_XXX();
+ *
+ * Return codes:
+ * None
+ */
+void ipa_inc_client_enable_clks(struct ipa_active_client_logging_info *id)
+{
+	IPA_API_DISPATCH(ipa_inc_client_enable_clks, id);
+}
+EXPORT_SYMBOL(ipa_inc_client_enable_clks);
+
+/**
+ * ipa_dec_client_disable_clks() - Increase active clients counter, and
+ * enable ipa clocks if necessary
+ *
+ * Please do not use this API, use the wrapper macros instead (ipa_i.h)
+ * IPA_ACTIVE_CLIENTS_DEC_XXX();
+ *
+ * Return codes:
+ * None
+ */
+void ipa_dec_client_disable_clks(struct ipa_active_client_logging_info *id)
+{
+	IPA_API_DISPATCH(ipa_dec_client_disable_clks, id);
+}
+EXPORT_SYMBOL(ipa_dec_client_disable_clks);
+
+/**
+ * ipa_inc_client_enable_clks_no_block() - Only increment the number of active
+ * clients if no asynchronous actions should be done.Asynchronous actions are
+ * locking a mutex and waking up IPA HW.
+ *
+ * Please do not use this API, use the wrapper macros instead(ipa_i.h)
+ *
+ *
+ * Return codes : 0 for success
+ *		-EPERM if an asynchronous action should have been done
+ */
+int ipa_inc_client_enable_clks_no_block(
+	struct ipa_active_client_logging_info *id)
+{
+	int ret;
+
+	IPA_API_DISPATCH_RETURN(ipa_inc_client_enable_clks_no_block, id);
+
+	return ret;
+}
+EXPORT_SYMBOL(ipa_inc_client_enable_clks_no_block);
+
+/**
+ * ipa_suspend_resource_no_block() - suspend client endpoints related to the
+ * IPA_RM resource and decrement active clients counter. This function is
+ * guaranteed to avoid sleeping.
+ *
+ * @resource: [IN] IPA Resource Manager resource
+ *
+ * Return codes: 0 on success, negative on failure.
+ */
+int ipa_suspend_resource_no_block(enum ipa_rm_resource_name resource)
+{
+	int ret;
+
+	IPA_API_DISPATCH_RETURN(ipa_suspend_resource_no_block, resource);
+
+	return ret;
+}
+EXPORT_SYMBOL(ipa_suspend_resource_no_block);
+/**
+ * ipa_resume_resource() - resume client endpoints related to the IPA_RM
+ * resource.
+ *
+ * @resource: [IN] IPA Resource Manager resource
+ *
+ * Return codes: 0 on success, negative on failure.
+ */
+int ipa_resume_resource(enum ipa_rm_resource_name resource)
+{
+	int ret;
+
+	IPA_API_DISPATCH_RETURN(ipa_resume_resource, resource);
+
+	return ret;
+}
+EXPORT_SYMBOL(ipa_resume_resource);
+
+/**
+ * ipa_suspend_resource_sync() - suspend client endpoints related to the IPA_RM
+ * resource and decrement active clients counter, which may result in clock
+ * gating of IPA clocks.
+ *
+ * @resource: [IN] IPA Resource Manager resource
+ *
+ * Return codes: 0 on success, negative on failure.
+ */
+int ipa_suspend_resource_sync(enum ipa_rm_resource_name resource)
+{
+	int ret;
+
+	IPA_API_DISPATCH_RETURN(ipa_suspend_resource_sync, resource);
+
+	return ret;
+}
+EXPORT_SYMBOL(ipa_suspend_resource_sync);
+
+/**
+ * ipa_set_required_perf_profile() - set IPA to the specified performance
+ *	profile based on the bandwidth, unless minimum voltage required is
+ *	higher. In this case the floor_voltage specified will be used.
+ * @floor_voltage: minimum voltage to operate
+ * @bandwidth_mbps: needed bandwidth from IPA
+ *
+ * Return codes: 0 on success, negative on failure.
+ */
+int ipa_set_required_perf_profile(enum ipa_voltage_level floor_voltage,
+	u32 bandwidth_mbps)
+{
+	int ret;
+
+	IPA_API_DISPATCH_RETURN(ipa_set_required_perf_profile, floor_voltage,
+		bandwidth_mbps);
+
+	return ret;
+}
+EXPORT_SYMBOL(ipa_set_required_perf_profile);
+
+/**
+ * ipa_get_ipc_logbuf() - return a pointer to IPA driver IPC log
+ */
+void *ipa_get_ipc_logbuf(void)
+{
+	void *ret;
+
+	IPA_API_DISPATCH_RETURN_PTR(ipa_get_ipc_logbuf);
+
+	return ret;
+}
+EXPORT_SYMBOL(ipa_get_ipc_logbuf);
+
+/**
+ * ipa_get_ipc_logbuf_low() - return a pointer to IPA driver IPC low prio log
+ */
+void *ipa_get_ipc_logbuf_low(void)
+{
+	void *ret;
+
+	IPA_API_DISPATCH_RETURN_PTR(ipa_get_ipc_logbuf_low);
+
+	return ret;
+}
+EXPORT_SYMBOL(ipa_get_ipc_logbuf_low);
+
+/**
+ * ipa_assert() - general function for assertion
+ */
+void ipa_assert(void)
+{
+	pr_err("IPA: unrecoverable error has occurred, asserting\n");
+	BUG();
+}
+EXPORT_SYMBOL(ipa_assert);
+
+/**
+ * ipa_rx_poll() - Poll the rx packets from IPA HW in the
+ * softirq context
+ *
+ * @budget: number of packets to be polled in single iteration
+ *
+ * Return codes: >= 0  : Actual number of packets polled
+ *
+ */
+int ipa_rx_poll(u32 clnt_hdl, int budget)
+{
+	int ret;
+
+	IPA_API_DISPATCH_RETURN(ipa_rx_poll, clnt_hdl, budget);
+
+	return ret;
+}
+EXPORT_SYMBOL(ipa_rx_poll);
+
+/**
+ * ipa_recycle_wan_skb() - Recycle the Wan skb
+ *
+ * @skb: skb that needs to recycle
+ *
+ */
+void ipa_recycle_wan_skb(struct sk_buff *skb)
+{
+	IPA_API_DISPATCH(ipa_recycle_wan_skb, skb);
+}
+EXPORT_SYMBOL(ipa_recycle_wan_skb);
+
+/**
+ * ipa_setup_uc_ntn_pipes() - setup uc offload pipes
+ */
+int ipa_setup_uc_ntn_pipes(struct ipa_ntn_conn_in_params *inp,
+		ipa_notify_cb notify, void *priv, u8 hdr_len,
+		struct ipa_ntn_conn_out_params *outp)
+{
+	int ret;
+
+	IPA_API_DISPATCH_RETURN(ipa_setup_uc_ntn_pipes, inp,
+		notify, priv, hdr_len, outp);
+
+	return ret;
+}
+
+/**
+ * ipa_tear_down_uc_offload_pipes() - tear down uc offload pipes
+ */
+int ipa_tear_down_uc_offload_pipes(int ipa_ep_idx_ul,
+		int ipa_ep_idx_dl, struct ipa_ntn_conn_in_params *params)
+{
+	int ret;
+
+	IPA_API_DISPATCH_RETURN(ipa_tear_down_uc_offload_pipes, ipa_ep_idx_ul,
+		ipa_ep_idx_dl, params);
+
+	return ret;
+}
+
+/**
+ * ipa_get_pdev() - return a pointer to IPA dev struct
+ *
+ * Return value: a pointer to IPA dev struct
+ *
+ */
+struct device *ipa_get_pdev(void)
+{
+	struct device *ret;
+
+	IPA_API_DISPATCH_RETURN_PTR(ipa_get_pdev);
+
+	return ret;
+}
+EXPORT_SYMBOL(ipa_get_pdev);
+
+int ipa_ntn_uc_reg_rdyCB(void (*ipauc_ready_cb)(void *user_data),
+			      void *user_data)
+{
+	int ret;
+
+	IPA_API_DISPATCH_RETURN(ipa_ntn_uc_reg_rdyCB,
+				ipauc_ready_cb, user_data);
+
+	return ret;
+}
+EXPORT_SYMBOL(ipa_ntn_uc_reg_rdyCB);
+
+void ipa_ntn_uc_dereg_rdyCB(void)
+{
+	IPA_API_DISPATCH(ipa_ntn_uc_dereg_rdyCB);
+}
+EXPORT_SYMBOL(ipa_ntn_uc_dereg_rdyCB);
+
+int ipa_get_smmu_params(struct ipa_smmu_in_params *in,
+	struct ipa_smmu_out_params *out)
+{
+	int ret;
+
+	IPA_API_DISPATCH_RETURN(ipa_get_smmu_params, in, out);
+
+	return ret;
+}
+EXPORT_SYMBOL(ipa_get_smmu_params);
+
+/**
+ * ipa_conn_wdi_pipes() - connect wdi pipes
+ */
+int ipa_conn_wdi_pipes(struct ipa_wdi_conn_in_params *in,
+	struct ipa_wdi_conn_out_params *out,
+	ipa_wdi_meter_notifier_cb wdi_notify)
+{
+	int ret;
+
+	IPA_API_DISPATCH_RETURN(ipa_conn_wdi_pipes, in, out, wdi_notify);
+
+	return ret;
+}
+
+/**
+ * ipa_disconn_wdi_pipes() - disconnect wdi pipes
+ */
+int ipa_disconn_wdi_pipes(int ipa_ep_idx_tx, int ipa_ep_idx_rx)
+{
+	int ret;
+
+	IPA_API_DISPATCH_RETURN(ipa_disconn_wdi_pipes, ipa_ep_idx_tx,
+		ipa_ep_idx_rx);
+
+	return ret;
+}
+
+/**
+ * ipa_enable_wdi_pipes() - enable wdi pipes
+ */
+int ipa_enable_wdi_pipes(int ipa_ep_idx_tx, int ipa_ep_idx_rx)
+{
+	int ret;
+
+	IPA_API_DISPATCH_RETURN(ipa_enable_wdi_pipes, ipa_ep_idx_tx,
+		ipa_ep_idx_rx);
+
+	return ret;
+}
+
+/**
+ * ipa_disable_wdi_pipes() - disable wdi pipes
+ */
+int ipa_disable_wdi_pipes(int ipa_ep_idx_tx, int ipa_ep_idx_rx)
+{
+	int ret;
+
+	IPA_API_DISPATCH_RETURN(ipa_disable_wdi_pipes, ipa_ep_idx_tx,
+		ipa_ep_idx_rx);
+
+	return ret;
+}
+
+/**
+ * ipa_wigig_uc_msi_init() - smmu map\unmap msi related wigig HW registers
+ *	and init\deinit uC msi config
+ */
+int ipa_wigig_uc_msi_init(bool init,
+	phys_addr_t periph_baddr_pa,
+	phys_addr_t pseudo_cause_pa,
+	phys_addr_t int_gen_tx_pa,
+	phys_addr_t int_gen_rx_pa,
+	phys_addr_t dma_ep_misc_pa)
+{
+	int ret;
+
+	IPA_API_DISPATCH_RETURN(ipa_wigig_uc_msi_init, init,
+		periph_baddr_pa,
+		pseudo_cause_pa,
+		int_gen_tx_pa,
+		int_gen_rx_pa,
+		dma_ep_misc_pa);
+
+	return ret;
+}
+EXPORT_SYMBOL(ipa_wigig_uc_msi_init);
+
+/**
+ * ipa_conn_wigig_rx_pipe_i() - connect wigig rx pipe
+ */
+int ipa_conn_wigig_rx_pipe_i(void *in, struct ipa_wigig_conn_out_params *out,
+	struct dentry **parent)
+{
+	int ret;
+
+	IPA_API_DISPATCH_RETURN(ipa_conn_wigig_rx_pipe_i, in, out, parent);
+
+	return ret;
+}
+EXPORT_SYMBOL(ipa_conn_wigig_rx_pipe_i);
+
+/**
+ * ipa_conn_wigig_client_i() - connect a wigig client
+ */
+int ipa_conn_wigig_client_i(void *in,
+	struct ipa_wigig_conn_out_params *out,
+	ipa_notify_cb tx_notify,
+	void *priv)
+{
+	int ret;
+
+	IPA_API_DISPATCH_RETURN(ipa_conn_wigig_client_i, in, out,
+		tx_notify, priv);
+
+	return ret;
+}
+EXPORT_SYMBOL(ipa_conn_wigig_client_i);
+
+/**
+ * ipa_disconn_wigig_pipe_i() - disconnect a wigig pipe
+ */
+int ipa_disconn_wigig_pipe_i(enum ipa_client_type client,
+	struct ipa_wigig_pipe_setup_info_smmu *pipe_smmu,
+	void *dbuff)
+{
+	int ret;
+
+	IPA_API_DISPATCH_RETURN(ipa_disconn_wigig_pipe_i, client,
+		pipe_smmu, dbuff);
+
+	return ret;
+}
+EXPORT_SYMBOL(ipa_disconn_wigig_pipe_i);
+
+/**
+ * ipa_enable_wigig_pipe() - enable a wigig pipe
+ */
+int ipa_enable_wigig_pipe_i(enum ipa_client_type client)
+{
+	int ret;
+
+	IPA_API_DISPATCH_RETURN(ipa_enable_wigig_pipe_i, client);
+
+	return ret;
+}
+EXPORT_SYMBOL(ipa_enable_wigig_pipe_i);
+
+/**
+ * ipa_disable_wigig_pipe_i() - disable a wigig pipe
+ */
+int ipa_disable_wigig_pipe_i(enum ipa_client_type client)
+{
+	int ret;
+
+	IPA_API_DISPATCH_RETURN(ipa_disable_wigig_pipe_i, client);
+
+	return ret;
+}
+EXPORT_SYMBOL(ipa_disable_wigig_pipe_i);
+
+/**
+ * ipa_get_lan_rx_napi() - returns if NAPI is enabled in LAN RX
+ */
+bool ipa_get_lan_rx_napi(void)
+{
+	bool ret;
+
+	IPA_API_DISPATCH_RETURN_BOOL(ipa_get_lan_rx_napi);
+
+	return ret;
+}
+EXPORT_SYMBOL(ipa_get_lan_rx_napi);
+
+/**
+ * ipa_tz_unlock_reg() - Allow AP access to memory regions controlled by TZ
+ */
+int ipa_tz_unlock_reg(struct ipa_tz_unlock_reg_info *reg_info, u16 num_regs)
+{
+	int ret;
+
+	IPA_API_DISPATCH_RETURN(ipa_tz_unlock_reg, reg_info, num_regs);
+
+	return ret;
+}
+
+void ipa_register_client_callback(int (*client_cb)(bool is_lock),
+				bool (*teth_port_state)(void),
+					enum ipa_client_type client)
+{
+	IPA_API_DISPATCH(ipa_register_client_callback,
+		client_cb, teth_port_state, client);
+}
+EXPORT_SYMBOL(ipa_register_client_callback);
+
+void ipa_deregister_client_callback(enum ipa_client_type client)
+{
+	IPA_API_DISPATCH(ipa_deregister_client_callback,
+		client);
+}
+EXPORT_SYMBOL(ipa_deregister_client_callback);
+
+int ipa_uc_debug_stats_alloc(
+	struct IpaHwOffloadStatsAllocCmdData_t cmdinfo)
+{
+	int ret;
+
+	IPA_API_DISPATCH_RETURN(ipa_uc_debug_stats_alloc, cmdinfo);
+
+	return ret;
+}
+EXPORT_SYMBOL(ipa_uc_debug_stats_alloc);
+
+int ipa_uc_debug_stats_dealloc(uint32_t prot_id)
+{
+	int ret;
+
+	IPA_API_DISPATCH_RETURN(ipa_uc_debug_stats_dealloc, prot_id);
+
+	return ret;
+}
+EXPORT_SYMBOL(ipa_uc_debug_stats_dealloc);
+
+void ipa_get_gsi_stats(int prot_id,
+	struct ipa_uc_dbg_ring_stats *stats)
+{
+	IPA_API_DISPATCH(ipa_get_gsi_stats,
+		prot_id, stats);
+}
+EXPORT_SYMBOL(ipa_get_gsi_stats);
+
+int ipa_get_prot_id(enum ipa_client_type client)
+{
+	int ret;
+
+	IPA_API_DISPATCH_RETURN(ipa_get_prot_id,
+		client);
+
+	return ret;
+}
+EXPORT_SYMBOL(ipa_get_prot_id);
+
+static const struct dev_pm_ops ipa_pm_ops = {
+	.suspend_noirq = ipa_ap_suspend,
+	.resume_noirq = ipa_ap_resume,
+};
+
+static struct platform_driver ipa_plat_drv = {
+	.probe = ipa_generic_plat_drv_probe,
+	.driver = {
+		.name = DRV_NAME,
+		.pm = &ipa_pm_ops,
+		.of_match_table = ipa_plat_drv_match,
+	},
+};
+
+/*********************************************************/
+/*                PCIe Version                           */
+/*********************************************************/
+
+static int ipa_pci_probe(
+	struct pci_dev             *pci_dev,
+	const struct pci_device_id *ent)
+{
+	int result;
+
+	if (!pci_dev || !ent) {
+		pr_err(
+		    "Bad arg: pci_dev (%pK) and/or ent (%pK)\n",
+		    pci_dev, ent);
+		return -EOPNOTSUPP;
+	}
+
+	if (!ipa_api_ctrl) {
+		ipa_api_ctrl = kzalloc(sizeof(*ipa_api_ctrl), GFP_KERNEL);
+		if (ipa_api_ctrl == NULL)
+			return -ENOMEM;
+		/* Get IPA HW Version */
+		result = of_property_read_u32(NULL,
+			"qcom,ipa-hw-ver", &ipa_api_hw_type);
+		if (result || ipa_api_hw_type == 0) {
+			pr_err("ipa: get resource failed for ipa-hw-ver!\n");
+			kfree(ipa_api_ctrl);
+			ipa_api_ctrl = NULL;
+			return -ENODEV;
+		}
+		pr_debug("ipa: ipa_api_hw_type = %d\n", ipa_api_hw_type);
+	}
+
+	/*
+	 * Call a reduced version of platform_probe appropriate for PCIe
+	 */
+	result = ipa3_pci_drv_probe(pci_dev, ipa_api_ctrl, ipa_pci_drv_match);
+
+	if (result && result != -EPROBE_DEFER)
+		pr_err("ipa: ipa3_pci_drv_probe failed\n");
+
+	return result;
+}
+
+static void ipa_pci_remove(struct pci_dev *pci_dev)
+{
+}
+
+static void ipa_pci_shutdown(struct pci_dev *pci_dev)
+{
+}
+
+static pci_ers_result_t ipa_pci_io_error_detected(struct pci_dev *pci_dev,
+	pci_channel_state_t state)
+{
+	return 0;
+}
+
+static pci_ers_result_t ipa_pci_io_slot_reset(struct pci_dev *pci_dev)
+{
+	return 0;
+}
+
+static void ipa_pci_io_resume(struct pci_dev *pci_dev)
+{
+}
+
+static int __init ipa_module_init(void)
+{
+	pr_debug("IPA module init\n");
+
+	if (running_emulation) {
+		/* Register as a PCI device driver */
+		return pci_register_driver(&ipa_pci_driver);
+	}
+	/* Register as a platform device driver */
+	return platform_driver_register(&ipa_plat_drv);
+}
+subsys_initcall(ipa_module_init);
+
+static void __exit ipa_module_exit(void)
+{
+	if (running_emulation)
+		pci_unregister_driver(&ipa_pci_driver);
+	platform_driver_unregister(&ipa_plat_drv);
+}
+module_exit(ipa_module_exit);
+
+MODULE_SOFTDEP("pre: subsys-pil-tz");
+MODULE_SOFTDEP("pre: qcom-arm-smmu-mod");
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("IPA HW device driver");

+ 515 - 0
ipa/ipa_api.h

@@ -0,0 +1,515 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2015-2019, The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/ipa_mhi.h>
+#include <linux/ipa_uc_offload.h>
+#include <linux/ipa_wdi3.h>
+#include "ipa_common_i.h"
+
+#ifndef _IPA_API_H_
+#define _IPA_API_H_
+
+struct ipa_api_controller {
+	int (*ipa_reset_endpoint)(u32 clnt_hdl);
+
+	int (*ipa_clear_endpoint_delay)(u32 clnt_hdl);
+
+	int (*ipa_disable_endpoint)(u32 clnt_hdl);
+
+	int (*ipa_cfg_ep)(u32 clnt_hdl, const struct ipa_ep_cfg *ipa_ep_cfg);
+
+	int (*ipa_cfg_ep_nat)(u32 clnt_hdl,
+		const struct ipa_ep_cfg_nat *ipa_ep_cfg);
+
+	int (*ipa_cfg_ep_conn_track)(u32 clnt_hdl,
+		const struct ipa_ep_cfg_conn_track *ipa_ep_cfg);
+
+	int (*ipa_cfg_ep_hdr)(u32 clnt_hdl,
+		const struct ipa_ep_cfg_hdr *ipa_ep_cfg);
+
+	int (*ipa_cfg_ep_hdr_ext)(u32 clnt_hdl,
+			const struct ipa_ep_cfg_hdr_ext *ipa_ep_cfg);
+
+	int (*ipa_cfg_ep_mode)(u32 clnt_hdl,
+		const struct ipa_ep_cfg_mode *ipa_ep_cfg);
+
+	int (*ipa_cfg_ep_aggr)(u32 clnt_hdl,
+		const struct ipa_ep_cfg_aggr *ipa_ep_cfg);
+
+	int (*ipa_cfg_ep_deaggr)(u32 clnt_hdl,
+		const struct ipa_ep_cfg_deaggr *ipa_ep_cfg);
+
+	int (*ipa_cfg_ep_route)(u32 clnt_hdl,
+		const struct ipa_ep_cfg_route *ipa_ep_cfg);
+
+	int (*ipa_cfg_ep_holb)(u32 clnt_hdl,
+		const struct ipa_ep_cfg_holb *ipa_ep_cfg);
+
+	int (*ipa_cfg_ep_cfg)(u32 clnt_hdl,
+		const struct ipa_ep_cfg_cfg *ipa_ep_cfg);
+
+	int (*ipa_cfg_ep_metadata_mask)(u32 clnt_hdl,
+		const struct ipa_ep_cfg_metadata_mask *ipa_ep_cfg);
+
+	int (*ipa_cfg_ep_holb_by_client)(enum ipa_client_type client,
+		const struct ipa_ep_cfg_holb *ipa_ep_cfg);
+
+	int (*ipa_cfg_ep_ctrl)(u32 clnt_hdl,
+		const struct ipa_ep_cfg_ctrl *ep_ctrl);
+
+	int (*ipa_add_hdr)(struct ipa_ioc_add_hdr *hdrs);
+
+	int (*ipa_add_hdr_usr)(struct ipa_ioc_add_hdr *hdrs, bool user_only);
+
+	int (*ipa_del_hdr)(struct ipa_ioc_del_hdr *hdls);
+
+	int (*ipa_commit_hdr)(void);
+
+	int (*ipa_reset_hdr)(bool user_only);
+
+	int (*ipa_get_hdr)(struct ipa_ioc_get_hdr *lookup);
+
+	int (*ipa_put_hdr)(u32 hdr_hdl);
+
+	int (*ipa_copy_hdr)(struct ipa_ioc_copy_hdr *copy);
+
+	int (*ipa_add_hdr_proc_ctx)(struct ipa_ioc_add_hdr_proc_ctx *proc_ctxs,
+								bool user_only);
+
+	int (*ipa_del_hdr_proc_ctx)(struct ipa_ioc_del_hdr_proc_ctx *hdls);
+
+	int (*ipa_add_rt_rule)(struct ipa_ioc_add_rt_rule *rules);
+
+	int (*ipa_add_rt_rule_v2)(struct ipa_ioc_add_rt_rule_v2 *rules);
+
+	int (*ipa_add_rt_rule_usr)(struct ipa_ioc_add_rt_rule *rules,
+							bool user_only);
+
+	int (*ipa_add_rt_rule_usr_v2)(struct ipa_ioc_add_rt_rule_v2 *rules,
+							bool user_only);
+
+	int (*ipa_del_rt_rule)(struct ipa_ioc_del_rt_rule *hdls);
+
+	int (*ipa_commit_rt)(enum ipa_ip_type ip);
+
+	int (*ipa_reset_rt)(enum ipa_ip_type ip, bool user_only);
+
+	int (*ipa_get_rt_tbl)(struct ipa_ioc_get_rt_tbl *lookup);
+
+	int (*ipa_put_rt_tbl)(u32 rt_tbl_hdl);
+
+	int (*ipa_query_rt_index)(struct ipa_ioc_get_rt_tbl_indx *in);
+
+	int (*ipa_mdfy_rt_rule)(struct ipa_ioc_mdfy_rt_rule *rules);
+
+	int (*ipa_mdfy_rt_rule_v2)(struct ipa_ioc_mdfy_rt_rule_v2 *rules);
+
+	int (*ipa_add_flt_rule)(struct ipa_ioc_add_flt_rule *rules);
+
+	int (*ipa_add_flt_rule_v2)(struct ipa_ioc_add_flt_rule_v2 *rules);
+
+	int (*ipa_add_flt_rule_usr)(struct ipa_ioc_add_flt_rule *rules,
+								bool user_only);
+
+	int (*ipa_add_flt_rule_usr_v2)
+		(struct ipa_ioc_add_flt_rule_v2 *rules, bool user_only);
+
+	int (*ipa_del_flt_rule)(struct ipa_ioc_del_flt_rule *hdls);
+
+	int (*ipa_mdfy_flt_rule)(struct ipa_ioc_mdfy_flt_rule *rules);
+
+	int (*ipa_mdfy_flt_rule_v2)(struct ipa_ioc_mdfy_flt_rule_v2 *rules);
+
+	int (*ipa_commit_flt)(enum ipa_ip_type ip);
+
+	int (*ipa_reset_flt)(enum ipa_ip_type ip, bool user_only);
+
+	int (*ipa_allocate_nat_device)(struct ipa_ioc_nat_alloc_mem *mem);
+
+	int (*ipa_allocate_nat_table)(
+		struct ipa_ioc_nat_ipv6ct_table_alloc *table_alloc);
+
+	int (*ipa_allocate_ipv6ct_table)(
+		struct ipa_ioc_nat_ipv6ct_table_alloc *table_alloc);
+
+	int (*ipa_nat_init_cmd)(struct ipa_ioc_v4_nat_init *init);
+
+	int (*ipa_ipv6ct_init_cmd)(struct ipa_ioc_ipv6ct_init *init);
+
+	int (*ipa_nat_dma_cmd)(struct ipa_ioc_nat_dma_cmd *dma);
+
+	int (*ipa_table_dma_cmd)(struct ipa_ioc_nat_dma_cmd *dma);
+
+	int (*ipa_nat_del_cmd)(struct ipa_ioc_v4_nat_del *del);
+
+	int (*ipa_del_nat_table)(struct ipa_ioc_nat_ipv6ct_table_del *del);
+
+	int (*ipa_del_ipv6ct_table)(struct ipa_ioc_nat_ipv6ct_table_del *del);
+
+	int (*ipa_nat_mdfy_pdn)(struct ipa_ioc_nat_pdn_entry *mdfy_pdn);
+
+	int (*ipa_send_msg)(struct ipa_msg_meta *meta, void *buff,
+		ipa_msg_free_fn callback);
+
+	int (*ipa_register_pull_msg)(struct ipa_msg_meta *meta,
+		ipa_msg_pull_fn callback);
+
+	int (*ipa_deregister_pull_msg)(struct ipa_msg_meta *meta);
+
+	int (*ipa_register_intf)(const char *name,
+		const struct ipa_tx_intf *tx,
+		const struct ipa_rx_intf *rx);
+
+	int (*ipa_register_intf_ext)(const char *name,
+		const struct ipa_tx_intf *tx,
+		const struct ipa_rx_intf *rx,
+		const struct ipa_ext_intf *ext);
+
+	int (*ipa_deregister_intf)(const char *name);
+
+	int (*ipa_set_aggr_mode)(enum ipa_aggr_mode mode);
+
+	int (*ipa_set_qcncm_ndp_sig)(char sig[3]);
+
+	int (*ipa_set_single_ndp_per_mbim)(bool enable);
+
+	int (*ipa_tx_dp)(enum ipa_client_type dst, struct sk_buff *skb,
+		struct ipa_tx_meta *metadata);
+
+	int (*ipa_tx_dp_mul)(enum ipa_client_type dst,
+			struct ipa_tx_data_desc *data_desc);
+
+	void (*ipa_free_skb)(struct ipa_rx_data *data);
+
+	int (*ipa_setup_sys_pipe)(struct ipa_sys_connect_params *sys_in,
+		u32 *clnt_hdl);
+
+	int (*ipa_teardown_sys_pipe)(u32 clnt_hdl);
+
+	int (*ipa_sys_setup)(struct ipa_sys_connect_params *sys_in,
+		unsigned long *ipa_bam_hdl,
+		u32 *ipa_pipe_num, u32 *clnt_hdl, bool en_status);
+
+	int (*ipa_sys_teardown)(u32 clnt_hdl);
+
+	int (*ipa_sys_update_gsi_hdls)(u32 clnt_hdl, unsigned long gsi_ch_hdl,
+		unsigned long gsi_ev_hdl);
+
+	int (*ipa_connect_wdi_pipe)(struct ipa_wdi_in_params *in,
+		struct ipa_wdi_out_params *out);
+
+	int (*ipa_disconnect_wdi_pipe)(u32 clnt_hdl);
+
+	int (*ipa_enable_wdi_pipe)(u32 clnt_hdl);
+
+	int (*ipa_disable_wdi_pipe)(u32 clnt_hdl);
+
+	int (*ipa_resume_wdi_pipe)(u32 clnt_hdl);
+
+	int (*ipa_suspend_wdi_pipe)(u32 clnt_hdl);
+
+	int (*ipa_get_wdi_stats)(struct IpaHwStatsWDIInfoData_t *stats);
+
+	int (*ipa_uc_bw_monitor)(struct ipa_wdi_bw_info *info);
+
+	int (*ipa_set_wlan_tx_info)(struct ipa_wdi_tx_info *info);
+
+	u16 (*ipa_get_smem_restr_bytes)(void);
+
+	int (*ipa_broadcast_wdi_quota_reach_ind)(uint32_t fid,
+		uint64_t num_bytes);
+
+	int (*ipa_uc_wdi_get_dbpa)(struct ipa_wdi_db_params *out);
+
+	int (*ipa_uc_reg_rdyCB)(struct ipa_wdi_uc_ready_params *param);
+
+	int (*ipa_uc_dereg_rdyCB)(void);
+
+	int (*teth_bridge_init)(struct teth_bridge_init_params *params);
+
+	int (*teth_bridge_disconnect)(enum ipa_client_type client);
+
+	int (*teth_bridge_connect)(
+		struct teth_bridge_connect_params *connect_params);
+
+	void (*ipa_set_client)(
+		int index, enum ipacm_client_enum client, bool uplink);
+
+	enum ipacm_client_enum (*ipa_get_client)(int pipe_idx);
+
+	bool (*ipa_get_client_uplink)(int pipe_idx);
+
+	int (*ipa_dma_init)(void);
+
+	int (*ipa_dma_enable)(void);
+
+	int (*ipa_dma_disable)(void);
+
+	int (*ipa_dma_sync_memcpy)(u64 dest, u64 src, int len);
+
+	int (*ipa_dma_async_memcpy)(u64 dest, u64 src, int len,
+		void (*user_cb)(void *user1), void *user_param);
+
+	int (*ipa_dma_uc_memcpy)(phys_addr_t dest, phys_addr_t src, int len);
+
+	void (*ipa_dma_destroy)(void);
+
+	bool (*ipa_has_open_aggr_frame)(enum ipa_client_type client);
+
+	int (*ipa_generate_tag_process)(void);
+
+	int (*ipa_disable_sps_pipe)(enum ipa_client_type client);
+
+	void (*ipa_set_tag_process_before_gating)(bool val);
+
+	int (*ipa_mhi_init_engine)(struct ipa_mhi_init_engine *params);
+
+	int (*ipa_connect_mhi_pipe)(struct ipa_mhi_connect_params_internal *in,
+		u32 *clnt_hdl);
+
+	int (*ipa_disconnect_mhi_pipe)(u32 clnt_hdl);
+
+	bool (*ipa_mhi_stop_gsi_channel)(enum ipa_client_type client);
+
+	int (*ipa_qmi_disable_force_clear)(u32 request_id);
+
+	int (*ipa_qmi_enable_force_clear_datapath_send)(
+		struct ipa_enable_force_clear_datapath_req_msg_v01 *req);
+
+	int (*ipa_qmi_disable_force_clear_datapath_send)(
+		struct ipa_disable_force_clear_datapath_req_msg_v01 *req);
+
+	bool (*ipa_mhi_sps_channel_empty)(enum ipa_client_type client);
+
+	int (*ipa_mhi_reset_channel_internal)(enum ipa_client_type client);
+
+	int (*ipa_mhi_start_channel_internal)(enum ipa_client_type client);
+
+	void (*ipa_get_holb)(int ep_idx, struct ipa_ep_cfg_holb *holb);
+
+	int (*ipa_mhi_query_ch_info)(enum ipa_client_type client,
+			struct gsi_chan_info *ch_info);
+
+	int (*ipa_mhi_resume_channels_internal)(
+			enum ipa_client_type client,
+			bool LPTransitionRejected,
+			bool brstmode_enabled,
+			union __packed gsi_channel_scratch ch_scratch,
+			u8 index);
+
+	int  (*ipa_mhi_destroy_channel)(enum ipa_client_type client);
+
+	int (*ipa_uc_mhi_send_dl_ul_sync_info)
+		(union IpaHwMhiDlUlSyncCmdData_t *cmd);
+
+	int (*ipa_uc_mhi_init)
+		(void (*ready_cb)(void), void (*wakeup_request_cb)(void));
+
+	void (*ipa_uc_mhi_cleanup)(void);
+
+	int (*ipa_uc_mhi_print_stats)(char *dbg_buff, int size);
+
+	int (*ipa_uc_mhi_reset_channel)(int channelHandle);
+
+	int (*ipa_uc_mhi_suspend_channel)(int channelHandle);
+
+	int (*ipa_uc_mhi_stop_event_update_channel)(int channelHandle);
+
+	int (*ipa_uc_state_check)(void);
+
+	int (*ipa_write_qmap_id)(struct ipa_ioc_write_qmapid *param_in);
+
+	int (*ipa_add_interrupt_handler)(enum ipa_irq_type interrupt,
+		ipa_irq_handler_t handler,
+		bool deferred_flag,
+		void *private_data);
+
+	int (*ipa_remove_interrupt_handler)(enum ipa_irq_type interrupt);
+
+	int (*ipa_restore_suspend_handler)(void);
+
+	void (*ipa_bam_reg_dump)(void);
+
+	int (*ipa_get_ep_mapping)(enum ipa_client_type client);
+
+	bool (*ipa_is_ready)(void);
+
+	void (*ipa_proxy_clk_vote)(void);
+
+	void (*ipa_proxy_clk_unvote)(void);
+
+	bool (*ipa_is_client_handle_valid)(u32 clnt_hdl);
+
+	enum ipa_client_type (*ipa_get_client_mapping)(int pipe_idx);
+
+	bool (*ipa_get_modem_cfg_emb_pipe_flt)(void);
+
+	enum ipa_transport_type (*ipa_get_transport_type)(void);
+
+	int (*ipa_ap_suspend)(struct device *dev);
+
+	int (*ipa_ap_resume)(struct device *dev);
+
+	int (*ipa_stop_gsi_channel)(u32 clnt_hdl);
+
+	int (*ipa_start_gsi_channel)(u32 clnt_hdl);
+
+	struct iommu_domain *(*ipa_get_smmu_domain)(void);
+
+	int (*ipa_disable_apps_wan_cons_deaggr)(uint32_t agg_size,
+						uint32_t agg_count);
+
+	struct device *(*ipa_get_dma_dev)(void);
+
+	int (*ipa_release_wdi_mapping)(u32 num_buffers,
+		struct ipa_wdi_buffer_info *info);
+
+	int (*ipa_create_wdi_mapping)(u32 num_buffers,
+		struct ipa_wdi_buffer_info *info);
+
+	const struct ipa_gsi_ep_config *(*ipa_get_gsi_ep_info)
+		(enum ipa_client_type client);
+
+	int (*ipa_register_ipa_ready_cb)(void (*ipa_ready_cb)(void *user_data),
+		void *user_data);
+
+	void (*ipa_inc_client_enable_clks)(
+		struct ipa_active_client_logging_info *id);
+
+	void (*ipa_dec_client_disable_clks)(
+		struct ipa_active_client_logging_info *id);
+
+	int (*ipa_inc_client_enable_clks_no_block)(
+		struct ipa_active_client_logging_info *id);
+
+	int (*ipa_suspend_resource_no_block)(
+		enum ipa_rm_resource_name resource);
+
+	int (*ipa_resume_resource)(enum ipa_rm_resource_name name);
+
+	int (*ipa_suspend_resource_sync)(enum ipa_rm_resource_name resource);
+
+	int (*ipa_set_required_perf_profile)(
+		enum ipa_voltage_level floor_voltage, u32 bandwidth_mbps);
+
+	void *(*ipa_get_ipc_logbuf)(void);
+
+	void *(*ipa_get_ipc_logbuf_low)(void);
+
+	int (*ipa_rx_poll)(u32 clnt_hdl, int budget);
+
+	void (*ipa_recycle_wan_skb)(struct sk_buff *skb);
+
+	int (*ipa_setup_uc_ntn_pipes)(struct ipa_ntn_conn_in_params *in,
+		ipa_notify_cb notify, void *priv, u8 hdr_len,
+		struct ipa_ntn_conn_out_params *outp);
+
+	int (*ipa_tear_down_uc_offload_pipes)(int ipa_ep_idx_ul,
+		int ipa_ep_idx_dl, struct ipa_ntn_conn_in_params *params);
+
+	struct device *(*ipa_get_pdev)(void);
+
+	int (*ipa_ntn_uc_reg_rdyCB)(void (*ipauc_ready_cb)(void *user_data),
+		void *user_data);
+
+	void (*ipa_ntn_uc_dereg_rdyCB)(void);
+
+	int (*ipa_conn_wdi_pipes)(struct ipa_wdi_conn_in_params *in,
+		struct ipa_wdi_conn_out_params *out,
+		ipa_wdi_meter_notifier_cb wdi_notify);
+
+	int (*ipa_disconn_wdi_pipes)(int ipa_ep_idx_tx,
+		int ipa_ep_idx_rx);
+
+	int (*ipa_enable_wdi_pipes)(int ipa_ep_idx_tx,
+		int ipa_ep_idx_rx);
+
+	int (*ipa_disable_wdi_pipes)(int ipa_ep_idx_tx,
+		int ipa_ep_idx_rx);
+
+	int (*ipa_tz_unlock_reg)(struct ipa_tz_unlock_reg_info *reg_info,
+		u16 num_regs);
+
+	int (*ipa_get_smmu_params)(struct ipa_smmu_in_params *in,
+		struct ipa_smmu_out_params *out);
+	int (*ipa_is_vlan_mode)(enum ipa_vlan_ifaces iface, bool *res);
+
+	int (*ipa_wigig_internal_init)(
+		struct ipa_wdi_uc_ready_params *inout,
+		ipa_wigig_misc_int_cb int_notify,
+		phys_addr_t *uc_db_pa);
+
+	int (*ipa_conn_wigig_rx_pipe_i)(void *in,
+		struct ipa_wigig_conn_out_params *out,
+		struct dentry **parent);
+
+	int (*ipa_conn_wigig_client_i)(void *in,
+		struct ipa_wigig_conn_out_params *out,
+		ipa_notify_cb tx_notify,
+		void *priv);
+
+	int (*ipa_disconn_wigig_pipe_i)(enum ipa_client_type client,
+		struct ipa_wigig_pipe_setup_info_smmu *pipe_smmu,
+		void *dbuff);
+
+	int (*ipa_wigig_uc_msi_init)(bool init,
+		phys_addr_t periph_baddr_pa,
+		phys_addr_t pseudo_cause_pa,
+		phys_addr_t int_gen_tx_pa,
+		phys_addr_t int_gen_rx_pa,
+		phys_addr_t dma_ep_misc_pa);
+
+	int (*ipa_enable_wigig_pipe_i)(enum ipa_client_type client);
+
+	int (*ipa_disable_wigig_pipe_i)(enum ipa_client_type client);
+
+	void (*ipa_register_client_callback)(
+		int (*client_cb)(bool is_lock),
+		bool (*teth_port_state)(void), enum ipa_client_type client);
+
+	void (*ipa_deregister_client_callback)(enum ipa_client_type client);
+
+	bool (*ipa_get_lan_rx_napi)(void);
+
+	int (*ipa_uc_debug_stats_alloc)(
+		struct IpaHwOffloadStatsAllocCmdData_t cmdinfo);
+
+	int (*ipa_uc_debug_stats_dealloc)(uint32_t prot_id);
+
+	void (*ipa_get_gsi_stats)(int prot_id,
+		struct ipa_uc_dbg_ring_stats *stats);
+
+	int (*ipa_get_prot_id)(enum ipa_client_type client);
+};
+
+#if IS_ENABLED(CONFIG_IPA3)
+
+int ipa3_plat_drv_probe(struct platform_device *pdev_p,
+	struct ipa_api_controller *api_ctrl,
+	const struct of_device_id *pdrv_match);
+int ipa3_pci_drv_probe(
+	struct pci_dev            *pci_dev,
+	struct ipa_api_controller *api_ctrl,
+	const struct of_device_id *pdrv_match);
+
+#else /* IS_ENABLED(CONFIG_IPA3) */
+
+static inline int ipa3_plat_drv_probe(struct platform_device *pdev_p,
+	struct ipa_api_controller *api_ctrl,
+	const struct of_device_id *pdrv_match)
+{
+	return -ENODEV;
+}
+static inline int ipa3_pci_drv_probe(
+	struct pci_dev            *pci_dev,
+	struct ipa_api_controller *api_ctrl,
+	const struct of_device_id *pdrv_match)
+{
+	return -ENODEV;
+}
+
+#endif /* IS_ENABLED(CONFIG_IPA3) */
+
+#endif /* _IPA_API_H_ */

+ 10 - 0
ipa/ipa_clients/Makefile

@@ -0,0 +1,10 @@
+# SPDX-License-Identifier: GPL-2.0-only
+
+obj-$(CONFIG_IPA_USB) += ipausbm.o
+ipausbm-objs := ipa_usb.o
+
+obj-$(CONFIG_RNDIS_IPA) += rndisipam.o
+rndisipam-objs := rndis_ipa.o
+
+obj-$(CONFIG_ECM_IPA) += ecmipam.o
+ecmipam-objs := ecm_ipa.o

+ 1487 - 0
ipa/ipa_clients/ecm_ipa.c

@@ -0,0 +1,1487 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2013-2019, The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/debugfs.h>
+#include <linux/errno.h>
+#include <linux/etherdevice.h>
+#include <linux/if_vlan.h>
+#include <linux/fs.h>
+#include <linux/module.h>
+#include <linux/netdevice.h>
+#include <linux/skbuff.h>
+#include <linux/sched.h>
+#include <linux/atomic.h>
+#include <linux/ecm_ipa.h>
+#include "../ipa_common_i.h"
+#include "../ipa_v3/ipa_pm.h"
+
+#define DRIVER_NAME "ecm_ipa"
+#define ECM_IPA_IPV4_HDR_NAME "ecm_eth_ipv4"
+#define ECM_IPA_IPV6_HDR_NAME "ecm_eth_ipv6"
+#define INACTIVITY_MSEC_DELAY 100
+#define DEFAULT_OUTSTANDING_HIGH 64
+#define DEFAULT_OUTSTANDING_LOW 32
+#define DEBUGFS_TEMP_BUF_SIZE 4
+#define TX_TIMEOUT (5 * HZ)
+
+#define IPA_ECM_IPC_LOG_PAGES 50
+
+#define IPA_ECM_IPC_LOGGING(buf, fmt, args...) \
+	do { \
+		if (buf) \
+			ipc_log_string((buf), fmt, __func__, __LINE__, \
+				## args); \
+	} while (0)
+
+static void *ipa_ecm_logbuf;
+
+#define ECM_IPA_DEBUG(fmt, args...) \
+	do { \
+		pr_debug(DRIVER_NAME " %s:%d "\
+			fmt, __func__, __LINE__, ## args);\
+		if (ipa_ecm_logbuf) { \
+			IPA_ECM_IPC_LOGGING(ipa_ecm_logbuf, \
+				DRIVER_NAME " %s:%d " fmt, ## args); \
+		} \
+	} while (0)
+
+#define ECM_IPA_DEBUG_XMIT(fmt, args...) \
+	pr_debug(DRIVER_NAME " %s:%d " fmt, __func__, __LINE__, ## args)
+
+#define ECM_IPA_INFO(fmt, args...) \
+	do { \
+		pr_info(DRIVER_NAME "@%s@%d@ctx:%s: "\
+			fmt, __func__, __LINE__, current->comm, ## args);\
+		if (ipa_ecm_logbuf) { \
+			IPA_ECM_IPC_LOGGING(ipa_ecm_logbuf, \
+				DRIVER_NAME " %s:%d " fmt, ## args); \
+		} \
+	} while (0)
+
+#define ECM_IPA_ERROR(fmt, args...) \
+	do { \
+		pr_err(DRIVER_NAME "@%s@%d@ctx:%s: "\
+			fmt, __func__, __LINE__, current->comm, ## args);\
+		if (ipa_ecm_logbuf) { \
+			IPA_ECM_IPC_LOGGING(ipa_ecm_logbuf, \
+				DRIVER_NAME " %s:%d " fmt, ## args); \
+		} \
+	} while (0)
+
+#define NULL_CHECK(ptr) \
+	do { \
+		if (!(ptr)) { \
+			ECM_IPA_ERROR("null pointer #ptr\n"); \
+			ret = -EINVAL; \
+		} \
+	} \
+	while (0)
+
+#define ECM_IPA_LOG_ENTRY() ECM_IPA_DEBUG("begin\n")
+#define ECM_IPA_LOG_EXIT() ECM_IPA_DEBUG("end\n")
+
+/**
+ * enum ecm_ipa_state - specify the current driver internal state
+ *  which is guarded by a state machine.
+ *
+ * The driver internal state changes due to its external API usage.
+ * The driver saves its internal state to guard from caller illegal
+ * call sequence.
+ * states:
+ * UNLOADED is the first state which is the default one and is also the state
+ *  after the driver gets unloaded(cleanup).
+ * INITIALIZED is the driver state once it finished registering
+ *  the network device and all internal data struct were initialized
+ * CONNECTED is the driver state once the USB pipes were connected to IPA
+ * UP is the driver state after the interface mode was set to UP but the
+ *  pipes are not connected yet - this state is meta-stable state.
+ * CONNECTED_AND_UP is the driver state when the pipe were connected and
+ *  the interface got UP request from the network stack. this is the driver
+ *   idle operation state which allows it to transmit/receive data.
+ * INVALID is a state which is not allowed.
+ */
+enum ecm_ipa_state {
+	ECM_IPA_UNLOADED = 0,
+	ECM_IPA_INITIALIZED,
+	ECM_IPA_CONNECTED,
+	ECM_IPA_UP,
+	ECM_IPA_CONNECTED_AND_UP,
+	ECM_IPA_INVALID,
+};
+
+/**
+ * enum ecm_ipa_operation - enumerations used to describe the API operation
+ *
+ * Those enums are used as input for the driver state machine.
+ */
+enum ecm_ipa_operation {
+	ECM_IPA_INITIALIZE,
+	ECM_IPA_CONNECT,
+	ECM_IPA_OPEN,
+	ECM_IPA_STOP,
+	ECM_IPA_DISCONNECT,
+	ECM_IPA_CLEANUP,
+};
+
+#define ECM_IPA_STATE_DEBUG(ecm_ipa_ctx) \
+	ECM_IPA_DEBUG("Driver state - %s\n",\
+	ecm_ipa_state_string((ecm_ipa_ctx)->state))
+
+/**
+ * struct ecm_ipa_dev - main driver context parameters
+ * @net: network interface struct implemented by this driver
+ * @directory: debugfs directory for various debuging switches
+ * @eth_ipv4_hdr_hdl: saved handle for ipv4 header-insertion table
+ * @eth_ipv6_hdr_hdl: saved handle for ipv6 header-insertion table
+ * @usb_to_ipa_hdl: save handle for IPA pipe operations
+ * @ipa_to_usb_hdl: save handle for IPA pipe operations
+ * @outstanding_pkts: number of packets sent to IPA without TX complete ACKed
+ * @outstanding_high: number of outstanding packets allowed
+ * @outstanding_low: number of outstanding packets which shall cause
+ *  to netdev queue start (after stopped due to outstanding_high reached)
+ * @state: current state of ecm_ipa driver
+ * @device_ready_notify: callback supplied by USB core driver
+ * This callback shall be called by the Netdev once the Netdev internal
+ * state is changed to RNDIS_IPA_CONNECTED_AND_UP
+ * @ipa_to_usb_client: consumer client
+ * @usb_to_ipa_client: producer client
+ * @pm_hdl: handle for IPA PM
+ * @is_vlan_mode: does the driver need to work in VLAN mode?
+ * @netif_rx_function: holds the correct network stack API, needed for NAPI
+ */
+struct ecm_ipa_dev {
+	struct net_device *net;
+	struct dentry *directory;
+	u32 eth_ipv4_hdr_hdl;
+	u32 eth_ipv6_hdr_hdl;
+	u32 usb_to_ipa_hdl;
+	u32 ipa_to_usb_hdl;
+	atomic_t outstanding_pkts;
+	u8 outstanding_high;
+	u8 outstanding_low;
+	enum ecm_ipa_state state;
+	void (*device_ready_notify)(void);
+	enum ipa_client_type ipa_to_usb_client;
+	enum ipa_client_type usb_to_ipa_client;
+	u32 pm_hdl;
+	bool is_vlan_mode;
+	int (*netif_rx_function)(struct sk_buff *skb);
+};
+
+static int ecm_ipa_open(struct net_device *net);
+static void ecm_ipa_packet_receive_notify
+	(void *priv, enum ipa_dp_evt_type evt, unsigned long data);
+static void ecm_ipa_tx_complete_notify
+	(void *priv, enum ipa_dp_evt_type evt, unsigned long data);
+static void ecm_ipa_tx_timeout(struct net_device *net);
+static int ecm_ipa_stop(struct net_device *net);
+static void ecm_ipa_enable_data_path(struct ecm_ipa_dev *ecm_ipa_ctx);
+static int ecm_ipa_rules_cfg
+	(struct ecm_ipa_dev *ecm_ipa_ctx, const void *dst_mac,
+		const void *src_mac);
+static void ecm_ipa_rules_destroy(struct ecm_ipa_dev *ecm_ipa_ctx);
+static int ecm_ipa_register_properties(struct ecm_ipa_dev *ecm_ipa_ctx);
+static void ecm_ipa_deregister_properties(void);
+static struct net_device_stats *ecm_ipa_get_stats(struct net_device *net);
+static int ecm_ipa_register_pm_client(struct ecm_ipa_dev *ecm_ipa_ctx);
+static void ecm_ipa_deregister_pm_client(struct ecm_ipa_dev *ecm_ipa_ctx);
+static netdev_tx_t ecm_ipa_start_xmit
+	(struct sk_buff *skb, struct net_device *net);
+static int ecm_ipa_debugfs_atomic_open(struct inode *inode, struct file *file);
+static ssize_t ecm_ipa_debugfs_atomic_read
+	(struct file *file, char __user *ubuf, size_t count, loff_t *ppos);
+static void ecm_ipa_debugfs_init(struct ecm_ipa_dev *ecm_ipa_ctx);
+static void ecm_ipa_debugfs_destroy(struct ecm_ipa_dev *ecm_ipa_ctx);
+static int ecm_ipa_ep_registers_cfg(u32 usb_to_ipa_hdl, u32 ipa_to_usb_hdl,
+	bool is_vlan_mode);
+static int ecm_ipa_set_device_ethernet_addr
+	(u8 *dev_ethaddr, u8 device_ethaddr[]);
+static enum ecm_ipa_state ecm_ipa_next_state
+	(enum ecm_ipa_state current_state, enum ecm_ipa_operation operation);
+static const char *ecm_ipa_state_string(enum ecm_ipa_state state);
+static int ecm_ipa_init_module(void);
+static void ecm_ipa_cleanup_module(void);
+
+static const struct net_device_ops ecm_ipa_netdev_ops = {
+	.ndo_open		= ecm_ipa_open,
+	.ndo_stop		= ecm_ipa_stop,
+	.ndo_start_xmit = ecm_ipa_start_xmit,
+	.ndo_set_mac_address = eth_mac_addr,
+	.ndo_tx_timeout = ecm_ipa_tx_timeout,
+	.ndo_get_stats = ecm_ipa_get_stats,
+};
+
+static const struct file_operations ecm_ipa_debugfs_atomic_ops = {
+	.open = ecm_ipa_debugfs_atomic_open,
+	.read = ecm_ipa_debugfs_atomic_read,
+};
+
+static void ecm_ipa_msg_free_cb(void *buff, u32 len, u32 type)
+{
+	kfree(buff);
+}
+
+/**
+ * ecm_ipa_init() - create network device and initializes internal
+ *  data structures
+ * @params: in/out parameters required for ecm_ipa initialization
+ *
+ * Shall be called prior to pipe connection.
+ * The out parameters (the callbacks) shall be supplied to ipa_connect.
+ * Detailed description:
+ *  - allocate the network device
+ *  - set default values for driver internals
+ *  - create debugfs folder and files
+ *  - add header insertion rules for IPA driver (based on host/device
+ *    Ethernet addresses given in input params)
+ *  - register tx/rx properties to IPA driver (will be later used
+ *    by IPA configuration manager to configure reset of the IPA rules)
+ *  - set the carrier state to "off" (until ecm_ipa_connect is called)
+ *  - register the network device
+ *  - set the out parameters
+ *
+ * Returns negative errno, or zero on success
+ */
+int ecm_ipa_init(struct ecm_ipa_params *params)
+{
+	int result = 0;
+	struct net_device *net;
+	struct ecm_ipa_dev *ecm_ipa_ctx;
+	int ret;
+
+	ECM_IPA_LOG_ENTRY();
+
+	ECM_IPA_DEBUG("%s initializing\n", DRIVER_NAME);
+	ret = 0;
+	NULL_CHECK(params);
+	if (ret)
+		return ret;
+
+	ECM_IPA_DEBUG
+		("host_ethaddr=%pM, device_ethaddr=%pM\n",
+		params->host_ethaddr,
+		params->device_ethaddr);
+
+	net = alloc_etherdev(sizeof(struct ecm_ipa_dev));
+	if (!net) {
+		result = -ENOMEM;
+		ECM_IPA_ERROR("fail to allocate etherdev\n");
+		goto fail_alloc_etherdev;
+	}
+	ECM_IPA_DEBUG("network device was successfully allocated\n");
+
+	ecm_ipa_ctx = netdev_priv(net);
+	if (!ecm_ipa_ctx) {
+		ECM_IPA_ERROR("fail to extract netdev priv\n");
+		result = -ENOMEM;
+		goto fail_netdev_priv;
+	}
+	memset(ecm_ipa_ctx, 0, sizeof(*ecm_ipa_ctx));
+	ECM_IPA_DEBUG("ecm_ipa_ctx (private) = %pK\n", ecm_ipa_ctx);
+
+	ecm_ipa_ctx->net = net;
+	ecm_ipa_ctx->outstanding_high = DEFAULT_OUTSTANDING_HIGH;
+	ecm_ipa_ctx->outstanding_low = DEFAULT_OUTSTANDING_LOW;
+	atomic_set(&ecm_ipa_ctx->outstanding_pkts, 0);
+	snprintf(net->name, sizeof(net->name), "%s%%d", "ecm");
+	net->netdev_ops = &ecm_ipa_netdev_ops;
+	net->watchdog_timeo = TX_TIMEOUT;
+	if (ipa_get_lan_rx_napi()) {
+		ecm_ipa_ctx->netif_rx_function = netif_receive_skb;
+		ECM_IPA_DEBUG("LAN RX NAPI enabled = True");
+	} else {
+		ecm_ipa_ctx->netif_rx_function = netif_rx_ni;
+		ECM_IPA_DEBUG("LAN RX NAPI enabled = False");
+	}
+	ECM_IPA_DEBUG("internal data structures were initialized\n");
+
+	if (!params->device_ready_notify)
+		ECM_IPA_DEBUG("device_ready_notify() was not supplied");
+	ecm_ipa_ctx->device_ready_notify = params->device_ready_notify;
+
+	ecm_ipa_debugfs_init(ecm_ipa_ctx);
+
+	result = ecm_ipa_set_device_ethernet_addr
+		(net->dev_addr, params->device_ethaddr);
+	if (result) {
+		ECM_IPA_ERROR("set device MAC failed\n");
+		goto fail_set_device_ethernet;
+	}
+	ECM_IPA_DEBUG("Device Ethernet address set %pM\n", net->dev_addr);
+
+	if (ipa_is_vlan_mode(IPA_VLAN_IF_ECM, &ecm_ipa_ctx->is_vlan_mode)) {
+		ECM_IPA_ERROR("couldn't acquire vlan mode, is ipa ready?\n");
+		goto fail_get_vlan_mode;
+	}
+	ECM_IPA_DEBUG("is vlan mode %d\n", ecm_ipa_ctx->is_vlan_mode);
+
+	result = ecm_ipa_rules_cfg
+		(ecm_ipa_ctx, params->host_ethaddr, params->device_ethaddr);
+	if (result) {
+		ECM_IPA_ERROR("fail on ipa rules set\n");
+		goto fail_rules_cfg;
+	}
+	ECM_IPA_DEBUG("Ethernet header insertion set\n");
+
+	netif_carrier_off(net);
+	ECM_IPA_DEBUG("netif_carrier_off() was called\n");
+
+	netif_stop_queue(ecm_ipa_ctx->net);
+	ECM_IPA_DEBUG("netif_stop_queue() was called");
+
+	result = register_netdev(net);
+	if (result) {
+		ECM_IPA_ERROR("register_netdev failed: %d\n", result);
+		goto fail_register_netdev;
+	}
+	ECM_IPA_DEBUG("register_netdev succeeded\n");
+
+	params->ecm_ipa_rx_dp_notify = ecm_ipa_packet_receive_notify;
+	params->ecm_ipa_tx_dp_notify = ecm_ipa_tx_complete_notify;
+	params->private = (void *)ecm_ipa_ctx;
+	params->skip_ep_cfg = false;
+	ecm_ipa_ctx->state = ECM_IPA_INITIALIZED;
+	ECM_IPA_STATE_DEBUG(ecm_ipa_ctx);
+
+	ECM_IPA_INFO("ECM_IPA was initialized successfully\n");
+
+	ECM_IPA_LOG_EXIT();
+
+	return 0;
+
+fail_register_netdev:
+	ecm_ipa_rules_destroy(ecm_ipa_ctx);
+fail_rules_cfg:
+fail_get_vlan_mode:
+fail_set_device_ethernet:
+	ecm_ipa_debugfs_destroy(ecm_ipa_ctx);
+fail_netdev_priv:
+	free_netdev(net);
+fail_alloc_etherdev:
+	return result;
+}
+EXPORT_SYMBOL(ecm_ipa_init);
+
+/**
+ * ecm_ipa_connect() - notify ecm_ipa for IPA<->USB pipes connection
+ * @usb_to_ipa_hdl: handle of IPA driver client for USB->IPA
+ * @ipa_to_usb_hdl: handle of IPA driver client for IPA->USB
+ * @priv: same value that was set by ecm_ipa_init(), this
+ *  parameter holds the network device pointer.
+ *
+ * Once USB driver finishes the pipe connection between IPA core
+ * and USB core this method shall be called in order to
+ * allow ecm_ipa complete the data path configurations.
+ * Caller should make sure that it is calling this function
+ * from a context that allows it to handle device_ready_notify().
+ * Detailed description:
+ *  - configure the IPA end-points register
+ *  - notify the Linux kernel for "carrier_on"
+ *  After this function is done the driver state changes to "Connected".
+ *  This API is expected to be called after ecm_ipa_init() or
+ *  after a call to ecm_ipa_disconnect.
+ */
+int ecm_ipa_connect(u32 usb_to_ipa_hdl, u32 ipa_to_usb_hdl, void *priv)
+{
+	struct ecm_ipa_dev *ecm_ipa_ctx = priv;
+	int next_state;
+	struct ipa_ecm_msg *ecm_msg;
+	struct ipa_msg_meta msg_meta;
+	int retval;
+	int ret;
+
+	ECM_IPA_LOG_ENTRY();
+	ret = 0;
+	NULL_CHECK(priv);
+	if (ret)
+		return ret;
+	ECM_IPA_DEBUG("usb_to_ipa_hdl = %d, ipa_to_usb_hdl = %d, priv=0x%pK\n",
+		      usb_to_ipa_hdl, ipa_to_usb_hdl, priv);
+
+	next_state = ecm_ipa_next_state(ecm_ipa_ctx->state, ECM_IPA_CONNECT);
+	if (next_state == ECM_IPA_INVALID) {
+		ECM_IPA_ERROR("can't call connect before calling initialize\n");
+		return -EPERM;
+	}
+	ecm_ipa_ctx->state = next_state;
+	ECM_IPA_STATE_DEBUG(ecm_ipa_ctx);
+
+	if (!ipa_is_client_handle_valid(usb_to_ipa_hdl)) {
+		ECM_IPA_ERROR
+			("usb_to_ipa_hdl(%d) is not a valid ipa handle\n",
+			usb_to_ipa_hdl);
+		return -EINVAL;
+	}
+	if (!ipa_is_client_handle_valid(ipa_to_usb_hdl)) {
+		ECM_IPA_ERROR
+			("ipa_to_usb_hdl(%d) is not a valid ipa handle\n",
+			ipa_to_usb_hdl);
+		return -EINVAL;
+	}
+
+	ecm_ipa_ctx->ipa_to_usb_hdl = ipa_to_usb_hdl;
+	ecm_ipa_ctx->usb_to_ipa_hdl = usb_to_ipa_hdl;
+
+	ecm_ipa_ctx->ipa_to_usb_client = ipa_get_client_mapping(ipa_to_usb_hdl);
+	if (ecm_ipa_ctx->ipa_to_usb_client < 0) {
+		ECM_IPA_ERROR(
+			"Error getting IPA->USB client from handle %d\n",
+			ecm_ipa_ctx->ipa_to_usb_client);
+		return -EINVAL;
+	}
+	ECM_IPA_DEBUG("ipa_to_usb_client = %d\n",
+		      ecm_ipa_ctx->ipa_to_usb_client);
+
+	ecm_ipa_ctx->usb_to_ipa_client = ipa_get_client_mapping(usb_to_ipa_hdl);
+	if (ecm_ipa_ctx->usb_to_ipa_client < 0) {
+		ECM_IPA_ERROR(
+			"Error getting USB->IPA client from handle %d\n",
+			ecm_ipa_ctx->usb_to_ipa_client);
+		return -EINVAL;
+	}
+	ECM_IPA_DEBUG("usb_to_ipa_client = %d\n",
+		      ecm_ipa_ctx->usb_to_ipa_client);
+
+	retval = ecm_ipa_register_pm_client(ecm_ipa_ctx);
+
+	if (retval) {
+		ECM_IPA_ERROR("fail register PM client\n");
+		return retval;
+	}
+	ECM_IPA_DEBUG("PM client registered\n");
+
+	retval = ecm_ipa_register_properties(ecm_ipa_ctx);
+	if (retval) {
+		ECM_IPA_ERROR("fail on properties set\n");
+		goto fail_register_pm;
+	}
+	ECM_IPA_DEBUG("ecm_ipa 2 Tx and 2 Rx properties were registered\n");
+
+	retval = ecm_ipa_ep_registers_cfg(usb_to_ipa_hdl, ipa_to_usb_hdl,
+		ecm_ipa_ctx->is_vlan_mode);
+	if (retval) {
+		ECM_IPA_ERROR("fail on ep cfg\n");
+		goto fail;
+	}
+	ECM_IPA_DEBUG("end-point configured\n");
+
+	netif_carrier_on(ecm_ipa_ctx->net);
+
+	ecm_msg = kzalloc(sizeof(*ecm_msg), GFP_KERNEL);
+	if (!ecm_msg) {
+		retval = -ENOMEM;
+		goto fail;
+	}
+
+	memset(&msg_meta, 0, sizeof(struct ipa_msg_meta));
+	msg_meta.msg_type = ECM_CONNECT;
+	msg_meta.msg_len = sizeof(struct ipa_ecm_msg);
+	strlcpy(ecm_msg->name, ecm_ipa_ctx->net->name,
+		IPA_RESOURCE_NAME_MAX);
+	ecm_msg->ifindex = ecm_ipa_ctx->net->ifindex;
+
+	retval = ipa_send_msg(&msg_meta, ecm_msg, ecm_ipa_msg_free_cb);
+	if (retval) {
+		ECM_IPA_ERROR("fail to send ECM_CONNECT message\n");
+		kfree(ecm_msg);
+		goto fail;
+	}
+
+	if (!netif_carrier_ok(ecm_ipa_ctx->net)) {
+		ECM_IPA_ERROR("netif_carrier_ok error\n");
+		retval = -EBUSY;
+		goto fail;
+	}
+	ECM_IPA_DEBUG("carrier_on notified\n");
+
+	if (ecm_ipa_ctx->state == ECM_IPA_CONNECTED_AND_UP)
+		ecm_ipa_enable_data_path(ecm_ipa_ctx);
+	else
+		ECM_IPA_DEBUG("data path was not enabled yet\n");
+
+	ECM_IPA_INFO("ECM_IPA was connected successfully\n");
+
+	ECM_IPA_LOG_EXIT();
+
+	return 0;
+
+fail:
+	ecm_ipa_deregister_properties();
+fail_register_pm:
+	ecm_ipa_deregister_pm_client(ecm_ipa_ctx);
+	return retval;
+}
+EXPORT_SYMBOL(ecm_ipa_connect);
+
+/**
+ * ecm_ipa_open() - notify Linux network stack to start sending packets
+ * @net: the network interface supplied by the network stack
+ *
+ * Linux uses this API to notify the driver that the network interface
+ * transitions to the up state.
+ * The driver will instruct the Linux network stack to start
+ * delivering data packets.
+ */
+static int ecm_ipa_open(struct net_device *net)
+{
+	struct ecm_ipa_dev *ecm_ipa_ctx;
+	int next_state;
+
+	ECM_IPA_LOG_ENTRY();
+
+	ecm_ipa_ctx = netdev_priv(net);
+
+	next_state = ecm_ipa_next_state(ecm_ipa_ctx->state, ECM_IPA_OPEN);
+	if (next_state == ECM_IPA_INVALID) {
+		ECM_IPA_ERROR("can't bring driver up before initialize\n");
+		return -EPERM;
+	}
+	ecm_ipa_ctx->state = next_state;
+	ECM_IPA_STATE_DEBUG(ecm_ipa_ctx);
+
+	if (ecm_ipa_ctx->state == ECM_IPA_CONNECTED_AND_UP)
+		ecm_ipa_enable_data_path(ecm_ipa_ctx);
+	else
+		ECM_IPA_DEBUG("data path was not enabled yet\n");
+
+	ECM_IPA_LOG_EXIT();
+
+	return 0;
+}
+
+/**
+ * ecm_ipa_start_xmit() - send data from APPs to USB core via IPA core
+ * @skb: packet received from Linux network stack
+ * @net: the network device being used to send this packet
+ *
+ * Several conditions needed in order to send the packet to IPA:
+ * - Transmit queue for the network driver is currently
+ *   in "send" state
+ * - The driver internal state is in "UP" state.
+ * - Filter Tx switch is turned off
+ * - Outstanding high boundary did not reach.
+ *
+ * In case all of the above conditions are met, the network driver will
+ * send the packet by using the IPA API for Tx.
+ * In case the outstanding packet high boundary is reached, the driver will
+ * stop the send queue until enough packet were proceeded by the IPA core.
+ */
+static netdev_tx_t ecm_ipa_start_xmit
+	(struct sk_buff *skb, struct net_device *net)
+{
+	int ret;
+	netdev_tx_t status = NETDEV_TX_BUSY;
+	struct ecm_ipa_dev *ecm_ipa_ctx = netdev_priv(net);
+
+	netif_trans_update(net);
+
+	ECM_IPA_DEBUG_XMIT
+		("Tx, len=%d, skb->protocol=%d, outstanding=%d\n",
+		skb->len, skb->protocol,
+		atomic_read(&ecm_ipa_ctx->outstanding_pkts));
+
+	if (unlikely(netif_queue_stopped(net))) {
+		ECM_IPA_ERROR("interface queue is stopped\n");
+		goto out;
+	}
+
+	if (unlikely(ecm_ipa_ctx->state != ECM_IPA_CONNECTED_AND_UP)) {
+		ECM_IPA_ERROR("Missing pipe connected and/or iface up\n");
+		return NETDEV_TX_BUSY;
+	}
+
+	ret = ipa_pm_activate(ecm_ipa_ctx->pm_hdl);
+	if (ret) {
+		ECM_IPA_DEBUG("Failed to activate PM client\n");
+		netif_stop_queue(net);
+		goto fail_pm_activate;
+	}
+
+	if (atomic_read(&ecm_ipa_ctx->outstanding_pkts) >=
+					ecm_ipa_ctx->outstanding_high) {
+		ECM_IPA_DEBUG
+			("outstanding high (%d)- stopping\n",
+			ecm_ipa_ctx->outstanding_high);
+		netif_stop_queue(net);
+		status = NETDEV_TX_BUSY;
+		goto out;
+	}
+
+	if (ecm_ipa_ctx->is_vlan_mode)
+		if (unlikely(skb->protocol != htons(ETH_P_8021Q)))
+			ECM_IPA_DEBUG(
+				"ether_type != ETH_P_8021Q && vlan, prot = 0x%X\n"
+				, skb->protocol);
+
+	ret = ipa_tx_dp(ecm_ipa_ctx->ipa_to_usb_client, skb, NULL);
+	if (ret) {
+		ECM_IPA_ERROR("ipa transmit failed (%d)\n", ret);
+		goto fail_tx_packet;
+	}
+
+	atomic_inc(&ecm_ipa_ctx->outstanding_pkts);
+
+	status = NETDEV_TX_OK;
+	goto out;
+
+fail_tx_packet:
+out:
+	ipa_pm_deferred_deactivate(ecm_ipa_ctx->pm_hdl);
+fail_pm_activate:
+	return status;
+}
+
+/**
+ * ecm_ipa_packet_receive_notify() - Rx notify
+ *
+ * @priv: ecm driver context
+ * @evt: event type
+ * @data: data provided with event
+ *
+ * IPA will pass a packet to the Linux network stack with skb->data pointing
+ * to Ethernet packet frame.
+ */
+static void ecm_ipa_packet_receive_notify
+	(void *priv, enum ipa_dp_evt_type evt, unsigned long data)
+{
+	struct sk_buff *skb = (struct sk_buff *)data;
+	struct ecm_ipa_dev *ecm_ipa_ctx = priv;
+	int result;
+	unsigned int packet_len;
+
+	if (!skb) {
+		ECM_IPA_ERROR("Bad SKB received from IPA driver\n");
+		return;
+	}
+
+	packet_len = skb->len;
+	ECM_IPA_DEBUG("packet RX, len=%d\n", skb->len);
+
+	if (unlikely(ecm_ipa_ctx->state != ECM_IPA_CONNECTED_AND_UP)) {
+		ECM_IPA_DEBUG("Missing pipe connected and/or iface up\n");
+		return;
+	}
+
+	if (unlikely(evt != IPA_RECEIVE)) {
+		ECM_IPA_ERROR("A none IPA_RECEIVE event in ecm_ipa_receive\n");
+		return;
+	}
+
+	skb->dev = ecm_ipa_ctx->net;
+	skb->protocol = eth_type_trans(skb, ecm_ipa_ctx->net);
+
+	result = ecm_ipa_ctx->netif_rx_function(skb);
+	if (unlikely(result))
+		ECM_IPA_ERROR("fail on netif_rx_function\n");
+	ecm_ipa_ctx->net->stats.rx_packets++;
+	ecm_ipa_ctx->net->stats.rx_bytes += packet_len;
+}
+
+/** ecm_ipa_stop() - called when network device transitions to the down
+ *     state.
+ *  @net: the network device being stopped.
+ *
+ * This API is used by Linux network stack to notify the network driver that
+ * its state was changed to "down"
+ * The driver will stop the "send" queue and change its internal
+ * state to "Connected".
+ */
+static int ecm_ipa_stop(struct net_device *net)
+{
+	struct ecm_ipa_dev *ecm_ipa_ctx = netdev_priv(net);
+	int next_state;
+
+	ECM_IPA_LOG_ENTRY();
+
+	next_state = ecm_ipa_next_state(ecm_ipa_ctx->state, ECM_IPA_STOP);
+	if (next_state == ECM_IPA_INVALID) {
+		ECM_IPA_ERROR("can't do network interface down without up\n");
+		return -EPERM;
+	}
+	ecm_ipa_ctx->state = next_state;
+	ECM_IPA_STATE_DEBUG(ecm_ipa_ctx);
+
+	netif_stop_queue(net);
+	ECM_IPA_DEBUG("network device stopped\n");
+
+	ECM_IPA_LOG_EXIT();
+	return 0;
+}
+
+/** ecm_ipa_disconnect() - called when the USB cable is unplugged.
+ * @priv: same value that was set by ecm_ipa_init(), this
+ *  parameter holds the network device pointer.
+ *
+ * Once the USB cable is unplugged the USB driver will notify the network
+ * interface driver.
+ * The internal driver state will returned to its initialized state and
+ * Linux network stack will be informed for carrier off and the send queue
+ * will be stopped.
+ */
+int ecm_ipa_disconnect(void *priv)
+{
+	struct ecm_ipa_dev *ecm_ipa_ctx = priv;
+	int next_state;
+	struct ipa_ecm_msg *ecm_msg;
+	struct ipa_msg_meta msg_meta;
+	int retval;
+	int outstanding_dropped_pkts;
+	int ret;
+
+	ECM_IPA_LOG_ENTRY();
+	ret = 0;
+	NULL_CHECK(ecm_ipa_ctx);
+	if (ret)
+		return ret;
+	ECM_IPA_DEBUG("priv=0x%pK\n", priv);
+
+	next_state = ecm_ipa_next_state(ecm_ipa_ctx->state, ECM_IPA_DISCONNECT);
+	if (next_state == ECM_IPA_INVALID) {
+		ECM_IPA_ERROR("can't disconnect before connect\n");
+		return -EPERM;
+	}
+	ecm_ipa_ctx->state = next_state;
+	ECM_IPA_STATE_DEBUG(ecm_ipa_ctx);
+
+	netif_carrier_off(ecm_ipa_ctx->net);
+	ECM_IPA_DEBUG("carrier_off notifcation was sent\n");
+
+	ecm_msg = kzalloc(sizeof(*ecm_msg), GFP_KERNEL);
+	if (!ecm_msg)
+		return -ENOMEM;
+
+	memset(&msg_meta, 0, sizeof(struct ipa_msg_meta));
+	msg_meta.msg_type = ECM_DISCONNECT;
+	msg_meta.msg_len = sizeof(struct ipa_ecm_msg);
+	strlcpy(ecm_msg->name, ecm_ipa_ctx->net->name,
+		IPA_RESOURCE_NAME_MAX);
+	ecm_msg->ifindex = ecm_ipa_ctx->net->ifindex;
+
+	retval = ipa_send_msg(&msg_meta, ecm_msg, ecm_ipa_msg_free_cb);
+	if (retval) {
+		ECM_IPA_ERROR("fail to send ECM_DISCONNECT message\n");
+		kfree(ecm_msg);
+		return -EPERM;
+	}
+
+	netif_stop_queue(ecm_ipa_ctx->net);
+	ECM_IPA_DEBUG("queue stopped\n");
+
+	ecm_ipa_deregister_pm_client(ecm_ipa_ctx);
+
+	outstanding_dropped_pkts =
+		atomic_read(&ecm_ipa_ctx->outstanding_pkts);
+	ecm_ipa_ctx->net->stats.tx_errors += outstanding_dropped_pkts;
+	atomic_set(&ecm_ipa_ctx->outstanding_pkts, 0);
+
+	ECM_IPA_INFO("ECM_IPA was disconnected successfully\n");
+
+	ECM_IPA_LOG_EXIT();
+
+	return 0;
+}
+EXPORT_SYMBOL(ecm_ipa_disconnect);
+
+/**
+ * ecm_ipa_cleanup() - unregister the network interface driver and free
+ *  internal data structs.
+ * @priv: same value that was set by ecm_ipa_init(), this
+ *   parameter holds the network device pointer.
+ *
+ * This function shall be called once the network interface is not
+ * needed anymore, e.g: when the USB composition does not support ECM.
+ * This function shall be called after the pipes were disconnected.
+ * Detailed description:
+ *  -  remove the debugfs entries
+ *  - deregister the network interface from Linux network stack
+ *  - free all internal data structs
+ */
+void ecm_ipa_cleanup(void *priv)
+{
+	struct ecm_ipa_dev *ecm_ipa_ctx = priv;
+	int next_state;
+
+	ECM_IPA_LOG_ENTRY();
+
+	ECM_IPA_DEBUG("priv=0x%pK\n", priv);
+
+	if (!ecm_ipa_ctx) {
+		ECM_IPA_ERROR("ecm_ipa_ctx NULL pointer\n");
+		return;
+	}
+
+	next_state = ecm_ipa_next_state(ecm_ipa_ctx->state, ECM_IPA_CLEANUP);
+	if (next_state == ECM_IPA_INVALID) {
+		ECM_IPA_ERROR("can't clean driver without cable disconnect\n");
+		return;
+	}
+	ecm_ipa_ctx->state = next_state;
+	ECM_IPA_STATE_DEBUG(ecm_ipa_ctx);
+
+	ecm_ipa_rules_destroy(ecm_ipa_ctx);
+	ecm_ipa_debugfs_destroy(ecm_ipa_ctx);
+
+	unregister_netdev(ecm_ipa_ctx->net);
+	free_netdev(ecm_ipa_ctx->net);
+
+	ECM_IPA_INFO("ECM_IPA was destroyed successfully\n");
+
+	ECM_IPA_LOG_EXIT();
+}
+EXPORT_SYMBOL(ecm_ipa_cleanup);
+
+static void ecm_ipa_enable_data_path(struct ecm_ipa_dev *ecm_ipa_ctx)
+{
+	if (ecm_ipa_ctx->device_ready_notify) {
+		ecm_ipa_ctx->device_ready_notify();
+		ECM_IPA_DEBUG("USB device_ready_notify() was called\n");
+	} else {
+		ECM_IPA_DEBUG("device_ready_notify() not supplied\n");
+	}
+
+	netif_start_queue(ecm_ipa_ctx->net);
+	ECM_IPA_DEBUG("queue started\n");
+}
+
+static void ecm_ipa_prepare_header_insertion(
+	int eth_type,
+	const char *hdr_name, struct ipa_hdr_add *add_hdr,
+	const void *dst_mac, const void *src_mac, bool is_vlan_mode)
+{
+	struct ethhdr *eth_hdr;
+	struct vlan_ethhdr *eth_vlan_hdr;
+
+	ECM_IPA_LOG_ENTRY();
+
+	add_hdr->is_partial = 0;
+	strlcpy(add_hdr->name, hdr_name, IPA_RESOURCE_NAME_MAX);
+	add_hdr->is_eth2_ofst_valid = true;
+	add_hdr->eth2_ofst = 0;
+
+	if (is_vlan_mode) {
+		eth_vlan_hdr = (struct vlan_ethhdr *)add_hdr->hdr;
+		memcpy(eth_vlan_hdr->h_dest, dst_mac, ETH_ALEN);
+		memcpy(eth_vlan_hdr->h_source, src_mac, ETH_ALEN);
+		eth_vlan_hdr->h_vlan_encapsulated_proto =
+			htons(eth_type);
+		eth_vlan_hdr->h_vlan_proto = htons(ETH_P_8021Q);
+		add_hdr->hdr_len = VLAN_ETH_HLEN;
+		add_hdr->type = IPA_HDR_L2_802_1Q;
+	} else {
+		eth_hdr = (struct ethhdr *)add_hdr->hdr;
+		memcpy(eth_hdr->h_dest, dst_mac, ETH_ALEN);
+		memcpy(eth_hdr->h_source, src_mac, ETH_ALEN);
+		eth_hdr->h_proto = htons(eth_type);
+		add_hdr->hdr_len = ETH_HLEN;
+		add_hdr->type = IPA_HDR_L2_ETHERNET_II;
+	}
+	ECM_IPA_LOG_EXIT();
+}
+
+/**
+ * ecm_ipa_rules_cfg() - set header insertion and register Tx/Rx properties
+ *				Headers will be committed to HW
+ * @ecm_ipa_ctx: main driver context parameters
+ * @dst_mac: destination MAC address
+ * @src_mac: source MAC address
+ *
+ * Returns negative errno, or zero on success
+ */
+static int ecm_ipa_rules_cfg
+	(struct ecm_ipa_dev *ecm_ipa_ctx,
+	const void *dst_mac, const void *src_mac)
+{
+	struct ipa_ioc_add_hdr *hdrs;
+	struct ipa_hdr_add *ipv4_hdr;
+	struct ipa_hdr_add *ipv6_hdr;
+	int result = 0;
+
+	ECM_IPA_LOG_ENTRY();
+	hdrs = kzalloc
+		(sizeof(*hdrs) + sizeof(*ipv4_hdr) + sizeof(*ipv6_hdr),
+			GFP_KERNEL);
+	if (!hdrs) {
+		result = -ENOMEM;
+		goto out;
+	}
+
+	ipv4_hdr = &hdrs->hdr[0];
+	ecm_ipa_prepare_header_insertion(
+		ETH_P_IP, ECM_IPA_IPV4_HDR_NAME,
+		ipv4_hdr, dst_mac, src_mac, ecm_ipa_ctx->is_vlan_mode);
+
+	ipv6_hdr = &hdrs->hdr[1];
+	ecm_ipa_prepare_header_insertion(
+		ETH_P_IPV6, ECM_IPA_IPV6_HDR_NAME,
+		ipv6_hdr, dst_mac, src_mac, ecm_ipa_ctx->is_vlan_mode);
+
+	hdrs->commit = 1;
+	hdrs->num_hdrs = 2;
+	result = ipa_add_hdr(hdrs);
+	if (result) {
+		ECM_IPA_ERROR("Fail on Header-Insertion(%d)\n", result);
+		goto out_free_mem;
+	}
+	if (ipv4_hdr->status) {
+		ECM_IPA_ERROR
+			("Fail on Header-Insertion ipv4(%d)\n",
+			ipv4_hdr->status);
+		result = ipv4_hdr->status;
+		goto out_free_mem;
+	}
+	if (ipv6_hdr->status) {
+		ECM_IPA_ERROR
+			("Fail on Header-Insertion ipv6(%d)\n",
+			ipv6_hdr->status);
+		result = ipv6_hdr->status;
+		goto out_free_mem;
+	}
+	ecm_ipa_ctx->eth_ipv4_hdr_hdl = ipv4_hdr->hdr_hdl;
+	ecm_ipa_ctx->eth_ipv6_hdr_hdl = ipv6_hdr->hdr_hdl;
+	ECM_IPA_LOG_EXIT();
+out_free_mem:
+	kfree(hdrs);
+out:
+	return result;
+}
+
+/**
+ * ecm_ipa_rules_destroy() - remove the IPA core configuration done for
+ *  the driver data path.
+ *  @ecm_ipa_ctx: the driver context
+ *
+ *  Revert the work done on ecm_ipa_rules_cfg.
+ */
+static void ecm_ipa_rules_destroy(struct ecm_ipa_dev *ecm_ipa_ctx)
+{
+	struct ipa_ioc_del_hdr *del_hdr;
+	struct ipa_hdr_del *ipv4;
+	struct ipa_hdr_del *ipv6;
+	int result;
+
+	del_hdr = kzalloc(sizeof(*del_hdr) + sizeof(*ipv4) +
+			sizeof(*ipv6), GFP_KERNEL);
+	if (!del_hdr)
+		return;
+	del_hdr->commit = 1;
+	del_hdr->num_hdls = 2;
+	ipv4 = &del_hdr->hdl[0];
+	ipv4->hdl = ecm_ipa_ctx->eth_ipv4_hdr_hdl;
+	ipv6 = &del_hdr->hdl[1];
+	ipv6->hdl = ecm_ipa_ctx->eth_ipv6_hdr_hdl;
+	result = ipa_del_hdr(del_hdr);
+	if (result || ipv4->status || ipv6->status)
+		ECM_IPA_ERROR("ipa_del_hdr failed\n");
+	kfree(del_hdr);
+}
+
+/* ecm_ipa_register_properties() - set Tx/Rx properties for ipacm
+ *
+ * Register ecm0 interface with 2 Tx properties and 2 Rx properties:
+ * The 2 Tx properties are for data flowing from IPA to USB, they
+ * have Header-Insertion properties both for Ipv4 and Ipv6 Ethernet framing.
+ * The 2 Rx properties are for data flowing from USB to IPA, they have
+ * simple rule which always "hit".
+ *
+ */
+static int ecm_ipa_register_properties(struct ecm_ipa_dev *ecm_ipa_ctx)
+{
+	struct ipa_tx_intf tx_properties = {0};
+	struct ipa_ioc_tx_intf_prop properties[2] = { {0}, {0} };
+	struct ipa_ioc_tx_intf_prop *ipv4_property;
+	struct ipa_ioc_tx_intf_prop *ipv6_property;
+	struct ipa_ioc_rx_intf_prop rx_ioc_properties[2] = { {0}, {0} };
+	struct ipa_rx_intf rx_properties = {0};
+	struct ipa_ioc_rx_intf_prop *rx_ipv4_property;
+	struct ipa_ioc_rx_intf_prop *rx_ipv6_property;
+	enum ipa_hdr_l2_type hdr_l2_type = IPA_HDR_L2_ETHERNET_II;
+	int result = 0;
+
+	ECM_IPA_LOG_ENTRY();
+
+	if (ecm_ipa_ctx->is_vlan_mode)
+		hdr_l2_type = IPA_HDR_L2_802_1Q;
+
+	tx_properties.prop = properties;
+	ipv4_property = &tx_properties.prop[0];
+	ipv4_property->ip = IPA_IP_v4;
+	ipv4_property->dst_pipe = ecm_ipa_ctx->ipa_to_usb_client;
+	strlcpy
+		(ipv4_property->hdr_name, ECM_IPA_IPV4_HDR_NAME,
+		IPA_RESOURCE_NAME_MAX);
+	ipv4_property->hdr_l2_type = hdr_l2_type;
+	ipv6_property = &tx_properties.prop[1];
+	ipv6_property->ip = IPA_IP_v6;
+	ipv6_property->dst_pipe = ecm_ipa_ctx->ipa_to_usb_client;
+	ipv6_property->hdr_l2_type = hdr_l2_type;
+	strlcpy
+		(ipv6_property->hdr_name, ECM_IPA_IPV6_HDR_NAME,
+		IPA_RESOURCE_NAME_MAX);
+	tx_properties.num_props = 2;
+
+	rx_properties.prop = rx_ioc_properties;
+	rx_ipv4_property = &rx_properties.prop[0];
+	rx_ipv4_property->ip = IPA_IP_v4;
+	rx_ipv4_property->attrib.attrib_mask = 0;
+	rx_ipv4_property->src_pipe = ecm_ipa_ctx->usb_to_ipa_client;
+	rx_ipv4_property->hdr_l2_type = hdr_l2_type;
+	rx_ipv6_property = &rx_properties.prop[1];
+	rx_ipv6_property->ip = IPA_IP_v6;
+	rx_ipv6_property->attrib.attrib_mask = 0;
+	rx_ipv6_property->src_pipe = ecm_ipa_ctx->usb_to_ipa_client;
+	rx_ipv6_property->hdr_l2_type = hdr_l2_type;
+	rx_properties.num_props = 2;
+
+	result = ipa_register_intf("ecm0", &tx_properties, &rx_properties);
+	if (result)
+		ECM_IPA_ERROR("fail on Tx/Rx properties registration\n");
+
+	ECM_IPA_LOG_EXIT();
+
+	return result;
+}
+
+static void ecm_ipa_deregister_properties(void)
+{
+	int result;
+
+	ECM_IPA_LOG_ENTRY();
+	result = ipa_deregister_intf("ecm0");
+	if (result)
+		ECM_IPA_DEBUG("Fail on Tx prop deregister\n");
+	ECM_IPA_LOG_EXIT();
+}
+
+/**
+ * ecm_ipa_configure() - make IPA core end-point specific configuration
+ * @usb_to_ipa_hdl: handle of usb_to_ipa end-point for IPA driver
+ * @ipa_to_usb_hdl: handle of ipa_to_usb end-point for IPA driver
+ * @host_ethaddr: host Ethernet address in network order
+ * @device_ethaddr: device Ethernet address in network order
+ *
+ * Configure the usb_to_ipa and ipa_to_usb end-point registers
+ * - USB->IPA end-point: disable de-aggregation, enable link layer
+ *   header removal (Ethernet removal), source NATing and default routing.
+ * - IPA->USB end-point: disable aggregation, add link layer header (Ethernet)
+ * - allocate Ethernet device
+ * - register to Linux network stack
+ *
+ * Returns negative errno, or zero on success
+ */
+
+static struct net_device_stats *ecm_ipa_get_stats(struct net_device *net)
+{
+	return &net->stats;
+}
+
+static void ecm_ipa_pm_cb(void *p, enum ipa_pm_cb_event event)
+{
+	struct ecm_ipa_dev *ecm_ipa_ctx = p;
+
+	ECM_IPA_LOG_ENTRY();
+	if (event != IPA_PM_CLIENT_ACTIVATED) {
+		ECM_IPA_ERROR("unexpected event %d\n", event);
+		WARN_ON(1);
+		return;
+	}
+
+	if (netif_queue_stopped(ecm_ipa_ctx->net)) {
+		ECM_IPA_DEBUG("Resource Granted - starting queue\n");
+		netif_start_queue(ecm_ipa_ctx->net);
+	}
+	ECM_IPA_LOG_EXIT();
+}
+
+static int ecm_ipa_register_pm_client(struct ecm_ipa_dev *ecm_ipa_ctx)
+{
+	int result;
+	struct ipa_pm_register_params pm_reg;
+
+	memset(&pm_reg, 0, sizeof(pm_reg));
+	pm_reg.name = ecm_ipa_ctx->net->name;
+	pm_reg.user_data = ecm_ipa_ctx;
+	pm_reg.callback = ecm_ipa_pm_cb;
+	pm_reg.group = IPA_PM_GROUP_APPS;
+	result = ipa_pm_register(&pm_reg, &ecm_ipa_ctx->pm_hdl);
+	if (result) {
+		ECM_IPA_ERROR("failed to create IPA PM client %d\n", result);
+		return result;
+	}
+	return 0;
+}
+
+static void ecm_ipa_deregister_pm_client(struct ecm_ipa_dev *ecm_ipa_ctx)
+{
+	ipa_pm_deactivate_sync(ecm_ipa_ctx->pm_hdl);
+	ipa_pm_deregister(ecm_ipa_ctx->pm_hdl);
+	ecm_ipa_ctx->pm_hdl = ~0;
+}
+
+
+/**
+ * ecm_ipa_tx_complete_notify() - Rx notify
+ *
+ * @priv: ecm driver context
+ * @evt: event type
+ * @data: data provided with event
+ *
+ * Check that the packet is the one we sent and release it
+ * This function will be called in defered context in IPA wq.
+ */
+static void ecm_ipa_tx_complete_notify
+		(void *priv,
+		enum ipa_dp_evt_type evt,
+		unsigned long data)
+{
+	struct sk_buff *skb = (struct sk_buff *)data;
+	struct ecm_ipa_dev *ecm_ipa_ctx = priv;
+
+	if (!skb) {
+		ECM_IPA_ERROR("Bad SKB received from IPA driver\n");
+		return;
+	}
+
+	if (!ecm_ipa_ctx) {
+		ECM_IPA_ERROR("ecm_ipa_ctx is NULL pointer\n");
+		return;
+	}
+
+	ECM_IPA_DEBUG
+		("Tx-complete, len=%d, skb->prot=%d, outstanding=%d\n",
+		skb->len, skb->protocol,
+		atomic_read(&ecm_ipa_ctx->outstanding_pkts));
+
+	if (evt != IPA_WRITE_DONE) {
+		ECM_IPA_ERROR("unsupported event on Tx callback\n");
+		return;
+	}
+
+	if (unlikely(ecm_ipa_ctx->state != ECM_IPA_CONNECTED_AND_UP)) {
+		ECM_IPA_DEBUG
+			("dropping Tx-complete pkt, state=%s",
+			ecm_ipa_state_string(ecm_ipa_ctx->state));
+		goto out;
+	}
+
+	ecm_ipa_ctx->net->stats.tx_packets++;
+	ecm_ipa_ctx->net->stats.tx_bytes += skb->len;
+
+	if (atomic_read(&ecm_ipa_ctx->outstanding_pkts) > 0)
+		atomic_dec(&ecm_ipa_ctx->outstanding_pkts);
+
+	if
+		(netif_queue_stopped(ecm_ipa_ctx->net) &&
+		netif_carrier_ok(ecm_ipa_ctx->net) &&
+		atomic_read(&ecm_ipa_ctx->outstanding_pkts)
+		< (ecm_ipa_ctx->outstanding_low)) {
+		ECM_IPA_DEBUG
+			("outstanding low (%d) - waking up queue\n",
+			ecm_ipa_ctx->outstanding_low);
+		netif_wake_queue(ecm_ipa_ctx->net);
+	}
+
+out:
+	dev_kfree_skb_any(skb);
+}
+
+static void ecm_ipa_tx_timeout(struct net_device *net)
+{
+	struct ecm_ipa_dev *ecm_ipa_ctx = netdev_priv(net);
+
+	ECM_IPA_ERROR
+		("possible IPA stall was detected, %d outstanding",
+		atomic_read(&ecm_ipa_ctx->outstanding_pkts));
+
+	net->stats.tx_errors++;
+}
+
+static int ecm_ipa_debugfs_atomic_open(struct inode *inode, struct file *file)
+{
+	struct ecm_ipa_dev *ecm_ipa_ctx = inode->i_private;
+
+	ECM_IPA_LOG_ENTRY();
+	file->private_data = &ecm_ipa_ctx->outstanding_pkts;
+	ECM_IPA_LOG_EXIT();
+	return 0;
+}
+
+static ssize_t ecm_ipa_debugfs_atomic_read
+	(struct file *file, char __user *ubuf, size_t count, loff_t *ppos)
+{
+	int nbytes;
+	u8 atomic_str[DEBUGFS_TEMP_BUF_SIZE] = {0};
+	atomic_t *atomic_var = file->private_data;
+
+	nbytes = scnprintf
+		(atomic_str, sizeof(atomic_str), "%d\n",
+			atomic_read(atomic_var));
+	return simple_read_from_buffer(ubuf, count, ppos, atomic_str, nbytes);
+}
+
+#ifdef CONFIG_DEBUG_FS
+
+static void ecm_ipa_debugfs_init(struct ecm_ipa_dev *ecm_ipa_ctx)
+{
+	const mode_t flags_read_write = 0666;
+	const mode_t flags_read_only = 0444;
+	struct dentry *file;
+
+	ECM_IPA_LOG_ENTRY();
+
+	if (!ecm_ipa_ctx)
+		return;
+
+	ecm_ipa_ctx->directory = debugfs_create_dir("ecm_ipa", NULL);
+	if (!ecm_ipa_ctx->directory) {
+		ECM_IPA_ERROR("could not create debugfs directory entry\n");
+		goto fail_directory;
+	}
+	file = debugfs_create_u8
+		("outstanding_high", flags_read_write,
+		ecm_ipa_ctx->directory, &ecm_ipa_ctx->outstanding_high);
+	if (!file) {
+		ECM_IPA_ERROR("could not create outstanding_high file\n");
+		goto fail_file;
+	}
+	file = debugfs_create_u8
+		("outstanding_low", flags_read_write,
+		ecm_ipa_ctx->directory, &ecm_ipa_ctx->outstanding_low);
+	if (!file) {
+		ECM_IPA_ERROR("could not create outstanding_low file\n");
+		goto fail_file;
+	}
+	file = debugfs_create_file
+		("outstanding", flags_read_only,
+		ecm_ipa_ctx->directory,
+		ecm_ipa_ctx, &ecm_ipa_debugfs_atomic_ops);
+	if (!file) {
+		ECM_IPA_ERROR("could not create outstanding file\n");
+		goto fail_file;
+	}
+
+	file = debugfs_create_bool("is_vlan_mode", flags_read_only,
+		ecm_ipa_ctx->directory, &ecm_ipa_ctx->is_vlan_mode);
+	if (!file) {
+		ECM_IPA_ERROR("could not create is_vlan_mode file\n");
+		goto fail_file;
+	}
+
+	ECM_IPA_DEBUG("debugfs entries were created\n");
+	ECM_IPA_LOG_EXIT();
+
+	return;
+fail_file:
+	debugfs_remove_recursive(ecm_ipa_ctx->directory);
+fail_directory:
+	return;
+}
+
+static void ecm_ipa_debugfs_destroy(struct ecm_ipa_dev *ecm_ipa_ctx)
+{
+	debugfs_remove_recursive(ecm_ipa_ctx->directory);
+}
+
+#else /* !CONFIG_DEBUG_FS*/
+
+static void ecm_ipa_debugfs_init(struct ecm_ipa_dev *ecm_ipa_ctx) {}
+
+static void ecm_ipa_debugfs_destroy(struct ecm_ipa_dev *ecm_ipa_ctx) {}
+
+#endif /* CONFIG_DEBUG_FS */
+
+/**
+ * ecm_ipa_ep_cfg() - configure the USB endpoints for ECM
+ *
+ * @usb_to_ipa_hdl: handle received from ipa_connect
+ * @ipa_to_usb_hdl: handle received from ipa_connect
+ * @is_vlan_mode - should driver work in vlan mode?
+ *
+ * USB to IPA pipe:
+ *  - No de-aggregation
+ *  - Remove Ethernet header
+ *  - SRC NAT
+ *  - Default routing(0)
+ * IPA to USB Pipe:
+ *  - No aggregation
+ *  - Add Ethernet header
+ */
+static int ecm_ipa_ep_registers_cfg(u32 usb_to_ipa_hdl, u32 ipa_to_usb_hdl,
+	bool is_vlan_mode)
+{
+	int result = 0;
+	struct ipa_ep_cfg usb_to_ipa_ep_cfg;
+	struct ipa_ep_cfg ipa_to_usb_ep_cfg;
+	uint8_t hdr_add = 0;
+
+
+	ECM_IPA_LOG_ENTRY();
+	if (is_vlan_mode)
+		hdr_add = VLAN_HLEN;
+	memset(&usb_to_ipa_ep_cfg, 0, sizeof(struct ipa_ep_cfg));
+	usb_to_ipa_ep_cfg.aggr.aggr_en = IPA_BYPASS_AGGR;
+	usb_to_ipa_ep_cfg.hdr.hdr_len = ETH_HLEN + hdr_add;
+	usb_to_ipa_ep_cfg.nat.nat_en = IPA_SRC_NAT;
+	usb_to_ipa_ep_cfg.route.rt_tbl_hdl = 0;
+	usb_to_ipa_ep_cfg.mode.dst = IPA_CLIENT_A5_LAN_WAN_CONS;
+	usb_to_ipa_ep_cfg.mode.mode = IPA_BASIC;
+
+	/* enable hdr_metadata_reg_valid */
+	usb_to_ipa_ep_cfg.hdr.hdr_metadata_reg_valid = true;
+
+	result = ipa_cfg_ep(usb_to_ipa_hdl, &usb_to_ipa_ep_cfg);
+	if (result) {
+		ECM_IPA_ERROR("failed to configure USB to IPA point\n");
+		goto out;
+	}
+	memset(&ipa_to_usb_ep_cfg, 0, sizeof(struct ipa_ep_cfg));
+	ipa_to_usb_ep_cfg.aggr.aggr_en = IPA_BYPASS_AGGR;
+	ipa_to_usb_ep_cfg.hdr.hdr_len = ETH_HLEN + hdr_add;
+	ipa_to_usb_ep_cfg.nat.nat_en = IPA_BYPASS_NAT;
+	result = ipa_cfg_ep(ipa_to_usb_hdl, &ipa_to_usb_ep_cfg);
+	if (result) {
+		ECM_IPA_ERROR("failed to configure IPA to USB end-point\n");
+		goto out;
+	}
+	ECM_IPA_DEBUG("end-point registers successfully configured\n");
+out:
+	ECM_IPA_LOG_EXIT();
+	return result;
+}
+
+/**
+ * ecm_ipa_set_device_ethernet_addr() - set device etherenet address
+ * @dev_ethaddr: device etherenet address
+ *
+ * Returns 0 for success, negative otherwise
+ */
+static int ecm_ipa_set_device_ethernet_addr
+	(u8 *dev_ethaddr, u8 device_ethaddr[])
+{
+	if (!is_valid_ether_addr(device_ethaddr))
+		return -EINVAL;
+	memcpy(dev_ethaddr, device_ethaddr, ETH_ALEN);
+	ECM_IPA_DEBUG("device ethernet address: %pM\n", dev_ethaddr);
+	return 0;
+}
+
+/** ecm_ipa_next_state - return the next state of the driver
+ * @current_state: the current state of the driver
+ * @operation: an enum which represent the operation being made on the driver
+ *  by its API.
+ *
+ * This function implements the driver internal state machine.
+ * Its decisions are based on the driver current state and the operation
+ * being made.
+ * In case the operation is invalid this state machine will return
+ * the value ECM_IPA_INVALID to inform the caller for a forbidden sequence.
+ */
+static enum ecm_ipa_state ecm_ipa_next_state
+	(enum ecm_ipa_state current_state, enum ecm_ipa_operation operation)
+{
+	int next_state = ECM_IPA_INVALID;
+
+	switch (current_state) {
+	case ECM_IPA_UNLOADED:
+		if (operation == ECM_IPA_INITIALIZE)
+			next_state = ECM_IPA_INITIALIZED;
+		break;
+	case ECM_IPA_INITIALIZED:
+		if (operation == ECM_IPA_CONNECT)
+			next_state = ECM_IPA_CONNECTED;
+		else if (operation == ECM_IPA_OPEN)
+			next_state = ECM_IPA_UP;
+		else if (operation == ECM_IPA_CLEANUP)
+			next_state = ECM_IPA_UNLOADED;
+		break;
+	case ECM_IPA_CONNECTED:
+		if (operation == ECM_IPA_DISCONNECT)
+			next_state = ECM_IPA_INITIALIZED;
+		else if (operation == ECM_IPA_OPEN)
+			next_state = ECM_IPA_CONNECTED_AND_UP;
+		break;
+	case ECM_IPA_UP:
+		if (operation == ECM_IPA_STOP)
+			next_state = ECM_IPA_INITIALIZED;
+		else if (operation == ECM_IPA_CONNECT)
+			next_state = ECM_IPA_CONNECTED_AND_UP;
+		else if (operation == ECM_IPA_CLEANUP)
+			next_state = ECM_IPA_UNLOADED;
+		break;
+	case ECM_IPA_CONNECTED_AND_UP:
+		if (operation == ECM_IPA_STOP)
+			next_state = ECM_IPA_CONNECTED;
+		else if (operation == ECM_IPA_DISCONNECT)
+			next_state = ECM_IPA_UP;
+		break;
+	default:
+		ECM_IPA_ERROR("State is not supported\n");
+		break;
+	}
+
+	ECM_IPA_DEBUG
+		("state transition ( %s -> %s )- %s\n",
+		ecm_ipa_state_string(current_state),
+		ecm_ipa_state_string(next_state),
+		next_state == ECM_IPA_INVALID ? "Forbidden" : "Allowed");
+
+	return next_state;
+}
+
+/**
+ * ecm_ipa_state_string - return the state string representation
+ * @state: enum which describe the state
+ */
+static const char *ecm_ipa_state_string(enum ecm_ipa_state state)
+{
+	switch (state) {
+	case ECM_IPA_UNLOADED:
+		return "ECM_IPA_UNLOADED";
+	case ECM_IPA_INITIALIZED:
+		return "ECM_IPA_INITIALIZED";
+	case ECM_IPA_CONNECTED:
+		return "ECM_IPA_CONNECTED";
+	case ECM_IPA_UP:
+		return "ECM_IPA_UP";
+	case ECM_IPA_CONNECTED_AND_UP:
+		return "ECM_IPA_CONNECTED_AND_UP";
+	default:
+		return "Not supported";
+	}
+}
+
+/**
+ * ecm_ipa_init_module() - module initialization
+ *
+ */
+static int __init ecm_ipa_init_module(void)
+{
+	ECM_IPA_LOG_ENTRY();
+	pr_info("ecm driver init\n");
+	ipa_ecm_logbuf = ipc_log_context_create(IPA_ECM_IPC_LOG_PAGES,
+			"ipa_ecm", 0);
+	if (ipa_ecm_logbuf == NULL)
+		ECM_IPA_DEBUG("failed to create IPC log, continue...\n");
+	ECM_IPA_LOG_EXIT();
+	return 0;
+}
+
+/**
+ * ecm_ipa_cleanup_module() - module cleanup
+ *
+ */
+static void __exit ecm_ipa_cleanup_module(void)
+{
+	ECM_IPA_LOG_ENTRY();
+	if (ipa_ecm_logbuf)
+		ipc_log_context_destroy(ipa_ecm_logbuf);
+	ipa_ecm_logbuf = NULL;
+	ECM_IPA_LOG_EXIT();
+}
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("ECM IPA network interface");
+
+late_initcall(ecm_ipa_init_module);
+module_exit(ecm_ipa_cleanup_module);

+ 1226 - 0
ipa/ipa_clients/ipa_gsb.c

@@ -0,0 +1,1226 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/debugfs.h>
+#include <linux/export.h>
+#include <linux/fs.h>
+#include <linux/if_ether.h>
+#include <linux/ioctl.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/msm_ipa.h>
+#include <linux/mutex.h>
+#include <linux/skbuff.h>
+#include <linux/types.h>
+#include <linux/ipv6.h>
+#include <net/addrconf.h>
+#include <linux/ipa.h>
+#include <linux/cdev.h>
+#include <linux/ipa_odu_bridge.h>
+#include "../ipa_common_i.h"
+#include "../ipa_v3/ipa_pm.h"
+
+#define IPA_GSB_DRV_NAME "ipa_gsb"
+
+#define MAX_SUPPORTED_IFACE 5
+
+#define IPA_GSB_DBG(fmt, args...) \
+	do { \
+		pr_debug(IPA_GSB_DRV_NAME " %s:%d " fmt, \
+			__func__, __LINE__, ## args); \
+		IPA_IPC_LOGGING(ipa_get_ipc_logbuf(), \
+			IPA_GSB_DRV_NAME " %s:%d " fmt, ## args); \
+		IPA_IPC_LOGGING(ipa_get_ipc_logbuf_low(), \
+			IPA_GSB_DRV_NAME " %s:%d " fmt, ## args); \
+	} while (0)
+
+#define IPA_GSB_DBG_LOW(fmt, args...) \
+	do { \
+		pr_debug(IPA_GSB_DRV_NAME " %s:%d " fmt, \
+			__func__, __LINE__, ## args); \
+		IPA_IPC_LOGGING(ipa_get_ipc_logbuf_low(), \
+			IPA_GSB_DRV_NAME " %s:%d " fmt, ## args); \
+	} while (0)
+
+#define IPA_GSB_ERR(fmt, args...) \
+	do { \
+		pr_err(IPA_GSB_DRV_NAME " %s:%d " fmt, \
+			__func__, __LINE__, ## args); \
+		IPA_IPC_LOGGING(ipa_get_ipc_logbuf(), \
+			IPA_GSB_DRV_NAME " %s:%d " fmt, ## args); \
+		IPA_IPC_LOGGING(ipa_get_ipc_logbuf_low(), \
+			IPA_GSB_DRV_NAME " %s:%d " fmt, ## args); \
+	} while (0)
+
+#define IPA_GSB_MAX_MSG_LEN 512
+static char dbg_buff[IPA_GSB_MAX_MSG_LEN];
+
+#define IPA_GSB_SKB_HEADROOM 256
+#define IPA_GSB_SKB_DUMMY_HEADER 42
+#define IPA_GSB_AGGR_BYTE_LIMIT 14
+#define IPA_GSB_AGGR_TIME_LIMIT 1000 /* 1000 us */
+
+static struct dentry *dent;
+static struct dentry *dfile_stats;
+
+/**
+ * struct stats - driver statistics,
+ * @num_ul_packets: number of uplink packets
+ * @num_dl_packets: number of downlink packets
+ * @num_insufficient_headroom_packets: number of
+	packets with insufficient headroom
+ */
+struct stats {
+	u64 num_ul_packets;
+	u64 num_dl_packets;
+	u64 num_insufficient_headroom_packets;
+};
+
+/**
+ * struct ipa_gsb_mux_hdr - ipa gsb mux header,
+ * @iface_hdl: interface handle
+ * @qmap_id: qmap id
+ * @pkt_size: packet size
+ */
+struct ipa_gsb_mux_hdr {
+	u8 iface_hdl;
+	u8 qmap_id;
+	u16 pkt_size;
+};
+
+/**
+ * struct ipa_gsb_iface_info - GSB interface information
+ * @netdev_name: network interface name
+ * @device_ethaddr: network interface ethernet address
+ * @priv: client's private data. to be used in client's callbacks
+ * @tx_dp_notify: client callback for handling IPA ODU_PROD callback
+ * @send_dl_skb: client callback for sending skb in downlink direction
+ * @iface_stats: statistics, how many packets were transmitted
+ * using the SW bridge.
+ * @partial_hdr_hdl: handle for partial header
+ * @wakeup_request: client callback to wakeup
+ * @is_conencted: is interface connected ?
+ * @is_resumed: is interface resumed ?
+ * @iface_hdl: interface handle
+ */
+struct ipa_gsb_iface_info {
+	char netdev_name[IPA_RESOURCE_NAME_MAX];
+	u8 device_ethaddr[ETH_ALEN];
+	void *priv;
+	ipa_notify_cb tx_dp_notify;
+	int (*send_dl_skb)(void *priv, struct sk_buff *skb);
+	struct stats iface_stats;
+	uint32_t partial_hdr_hdl[IPA_IP_MAX];
+	void (*wakeup_request)(void *cl_priv);
+	bool is_connected;
+	bool is_resumed;
+	u8 iface_hdl;
+};
+
+/**
+ * struct ipa_gsb_context - GSB driver context information
+ * @logbuf: buffer of ipc logging
+ * @logbuf_low: buffer of ipc logging (low priority)
+ * @lock: global mutex lock for global variables
+ * @prod_hdl: handle for prod pipe
+ * @cons_hdl: handle for cons pipe
+ * @ipa_sys_desc_size: sys pipe desc size
+ * @num_iface: number of interface
+ * @iface_hdl: interface handles
+ * @num_connected_iface: number of connected interface
+ * @num_resumed_iface: number of resumed interface
+ * @iface: interface information
+ * @iface_lock: interface mutex lock for control path
+ * @iface_spinlock: interface spinlock for data path
+ * @pm_hdl: IPA PM handle
+ */
+struct ipa_gsb_context {
+	void *logbuf;
+	void *logbuf_low;
+	struct mutex lock;
+	u32 prod_hdl;
+	u32 cons_hdl;
+	u32 ipa_sys_desc_size;
+	int num_iface;
+	bool iface_hdl[MAX_SUPPORTED_IFACE];
+	int num_connected_iface;
+	int num_resumed_iface;
+	struct ipa_gsb_iface_info *iface[MAX_SUPPORTED_IFACE];
+	struct mutex iface_lock[MAX_SUPPORTED_IFACE];
+	spinlock_t iface_spinlock[MAX_SUPPORTED_IFACE];
+	u32 pm_hdl;
+	atomic_t disconnect_in_progress;
+	atomic_t suspend_in_progress;
+};
+
+static struct ipa_gsb_context *ipa_gsb_ctx;
+
+#ifdef CONFIG_DEBUG_FS
+static ssize_t ipa_gsb_debugfs_stats(struct file *file,
+				  char __user *ubuf,
+				  size_t count,
+				  loff_t *ppos)
+{
+	int i, nbytes = 0;
+	struct ipa_gsb_iface_info *iface = NULL;
+	struct stats iface_stats;
+
+	for (i = 0; i < MAX_SUPPORTED_IFACE; i++) {
+		iface = ipa_gsb_ctx->iface[i];
+		if (iface != NULL) {
+			iface_stats = iface->iface_stats;
+			nbytes += scnprintf(&dbg_buff[nbytes],
+				IPA_GSB_MAX_MSG_LEN - nbytes,
+				"netdev: %s\n",
+				iface->netdev_name);
+
+			nbytes += scnprintf(&dbg_buff[nbytes],
+				IPA_GSB_MAX_MSG_LEN - nbytes,
+				"UL packets: %lld\n",
+				iface_stats.num_ul_packets);
+
+			nbytes += scnprintf(&dbg_buff[nbytes],
+				IPA_GSB_MAX_MSG_LEN - nbytes,
+				"DL packets: %lld\n",
+				iface_stats.num_dl_packets);
+
+			nbytes += scnprintf(&dbg_buff[nbytes],
+				IPA_GSB_MAX_MSG_LEN - nbytes,
+				"packets with insufficient headroom: %lld\n",
+				iface_stats.num_insufficient_headroom_packets);
+		}
+	}
+	return simple_read_from_buffer(ubuf, count, ppos, dbg_buff, nbytes);
+}
+
+static const struct file_operations ipa_gsb_stats_ops = {
+	.read = ipa_gsb_debugfs_stats,
+};
+
+static void ipa_gsb_debugfs_init(void)
+{
+	const mode_t read_only_mode = 00444;
+
+	dent = debugfs_create_dir("ipa_gsb", NULL);
+	if (IS_ERR(dent)) {
+		IPA_GSB_ERR("fail to create folder ipa_gsb\n");
+		return;
+	}
+
+	dfile_stats =
+		debugfs_create_file("stats", read_only_mode, dent,
+					NULL, &ipa_gsb_stats_ops);
+	if (!dfile_stats || IS_ERR(dfile_stats)) {
+		IPA_GSB_ERR("fail to create file stats\n");
+		goto fail;
+	}
+
+	return;
+
+fail:
+	debugfs_remove_recursive(dent);
+}
+
+static void ipa_gsb_debugfs_destroy(void)
+{
+	debugfs_remove_recursive(dent);
+}
+#else
+static void ipa_gsb_debugfs_init(void)
+{
+}
+
+static void ipa_gsb_debugfs_destroy(void)
+{
+}
+#endif
+
+static int ipa_gsb_driver_init(struct odu_bridge_params *params)
+{
+	int i;
+
+	if (!ipa_is_ready()) {
+		IPA_GSB_ERR("IPA is not ready\n");
+		return -EFAULT;
+	}
+
+	ipa_gsb_ctx = kzalloc(sizeof(*ipa_gsb_ctx),
+		GFP_KERNEL);
+
+	if (!ipa_gsb_ctx)
+		return -ENOMEM;
+
+	mutex_init(&ipa_gsb_ctx->lock);
+	for (i = 0; i < MAX_SUPPORTED_IFACE; i++) {
+		mutex_init(&ipa_gsb_ctx->iface_lock[i]);
+		spin_lock_init(&ipa_gsb_ctx->iface_spinlock[i]);
+	}
+	ipa_gsb_debugfs_init();
+
+	return 0;
+}
+
+static int ipa_gsb_commit_partial_hdr(struct ipa_gsb_iface_info *iface_info)
+{
+	int i;
+	struct ipa_ioc_add_hdr *hdr;
+
+	if (!iface_info) {
+		IPA_GSB_ERR("invalid input\n");
+		return -EINVAL;
+	}
+
+	hdr = kzalloc(sizeof(struct ipa_ioc_add_hdr) +
+		2 * sizeof(struct ipa_hdr_add), GFP_KERNEL);
+	if (!hdr)
+		return -ENOMEM;
+
+	hdr->commit = 1;
+	hdr->num_hdrs = 2;
+
+	snprintf(hdr->hdr[0].name, sizeof(hdr->hdr[0].name),
+			 "%s_ipv4", iface_info->netdev_name);
+	snprintf(hdr->hdr[1].name, sizeof(hdr->hdr[1].name),
+			 "%s_ipv6", iface_info->netdev_name);
+	/*
+	 * partial header:
+	 * [hdl][QMAP ID][pkt size][Dummy Header][ETH header]
+	 */
+	for (i = IPA_IP_v4; i < IPA_IP_MAX; i++) {
+		/*
+		 * Optimization: add dummy header to reserve space
+		 * for rndis header, so we can do the skb_clone
+		 * instead of deep copy.
+		 */
+		hdr->hdr[i].hdr_len = ETH_HLEN +
+			sizeof(struct ipa_gsb_mux_hdr) +
+			IPA_GSB_SKB_DUMMY_HEADER;
+		hdr->hdr[i].type = IPA_HDR_L2_ETHERNET_II;
+		hdr->hdr[i].is_partial = 1;
+		hdr->hdr[i].is_eth2_ofst_valid = 1;
+		hdr->hdr[i].eth2_ofst = sizeof(struct ipa_gsb_mux_hdr) +
+			IPA_GSB_SKB_DUMMY_HEADER;
+		/* populate iface handle */
+		hdr->hdr[i].hdr[0] = iface_info->iface_hdl;
+		/* populate src ETH address */
+		memcpy(&hdr->hdr[i].hdr[10 + IPA_GSB_SKB_DUMMY_HEADER],
+			iface_info->device_ethaddr, 6);
+		/* populate Ethertype */
+		if (i == IPA_IP_v4)
+			*(u16 *)(hdr->hdr[i].hdr + 16 +
+				IPA_GSB_SKB_DUMMY_HEADER) = htons(ETH_P_IP);
+		else
+			*(u16 *)(hdr->hdr[i].hdr + 16 +
+				IPA_GSB_SKB_DUMMY_HEADER) = htons(ETH_P_IPV6);
+	}
+
+	if (ipa_add_hdr(hdr)) {
+		IPA_GSB_ERR("fail to add partial headers\n");
+		kfree(hdr);
+		return -EFAULT;
+	}
+
+	for (i = IPA_IP_v4; i < IPA_IP_MAX; i++)
+		iface_info->partial_hdr_hdl[i] =
+			hdr->hdr[i].hdr_hdl;
+
+	IPA_GSB_DBG("added partial hdr hdl for ipv4: %d\n",
+		iface_info->partial_hdr_hdl[IPA_IP_v4]);
+	IPA_GSB_DBG("added partial hdr hdl for ipv6: %d\n",
+		iface_info->partial_hdr_hdl[IPA_IP_v6]);
+
+	kfree(hdr);
+	return 0;
+}
+
+static void ipa_gsb_delete_partial_hdr(struct ipa_gsb_iface_info *iface_info)
+{
+	struct ipa_ioc_del_hdr *del_hdr;
+
+	del_hdr = kzalloc(sizeof(struct ipa_ioc_del_hdr) +
+		2 * sizeof(struct ipa_hdr_del), GFP_KERNEL);
+	if (!del_hdr)
+		return;
+
+	del_hdr->commit = 1;
+	del_hdr->num_hdls = 2;
+	del_hdr->hdl[IPA_IP_v4].hdl = iface_info->partial_hdr_hdl[IPA_IP_v4];
+	del_hdr->hdl[IPA_IP_v6].hdl = iface_info->partial_hdr_hdl[IPA_IP_v6];
+
+	if (ipa_del_hdr(del_hdr) != 0)
+		IPA_GSB_ERR("failed to delete partial hdr\n");
+
+	IPA_GSB_DBG("deleted partial hdr hdl for ipv4: %d\n",
+		iface_info->partial_hdr_hdl[IPA_IP_v4]);
+	IPA_GSB_DBG("deleted partial hdr hdl for ipv6: %d\n",
+		iface_info->partial_hdr_hdl[IPA_IP_v6]);
+
+	kfree(del_hdr);
+}
+
+static int ipa_gsb_reg_intf_props(struct ipa_gsb_iface_info *iface_info)
+{
+	struct ipa_tx_intf tx;
+	struct ipa_rx_intf rx;
+	struct ipa_ioc_tx_intf_prop tx_prop[2];
+	struct ipa_ioc_rx_intf_prop rx_prop[2];
+
+	/* populate tx prop */
+	tx.num_props = 2;
+	tx.prop = tx_prop;
+
+	memset(tx_prop, 0, sizeof(tx_prop));
+	tx_prop[0].ip = IPA_IP_v4;
+	tx_prop[0].dst_pipe = IPA_CLIENT_ODU_EMB_CONS;
+	tx_prop[0].hdr_l2_type = IPA_HDR_L2_ETHERNET_II;
+	snprintf(tx_prop[0].hdr_name, sizeof(tx_prop[0].hdr_name),
+			 "%s_ipv4", iface_info->netdev_name);
+
+	tx_prop[1].ip = IPA_IP_v6;
+	tx_prop[1].dst_pipe = IPA_CLIENT_ODU_EMB_CONS;
+	tx_prop[1].hdr_l2_type = IPA_HDR_L2_ETHERNET_II;
+	snprintf(tx_prop[1].hdr_name, sizeof(tx_prop[1].hdr_name),
+			 "%s_ipv6", iface_info->netdev_name);
+
+	/* populate rx prop */
+	rx.num_props = 2;
+	rx.prop = rx_prop;
+
+	memset(rx_prop, 0, sizeof(rx_prop));
+	rx_prop[0].ip = IPA_IP_v4;
+	rx_prop[0].src_pipe = IPA_CLIENT_ODU_PROD;
+	rx_prop[0].hdr_l2_type = IPA_HDR_L2_ETHERNET_II;
+	rx_prop[0].attrib.attrib_mask |= IPA_FLT_META_DATA;
+	rx_prop[0].attrib.meta_data = iface_info->iface_hdl;
+	rx_prop[0].attrib.meta_data_mask = 0xFF;
+
+	rx_prop[1].ip = IPA_IP_v6;
+	rx_prop[1].src_pipe = IPA_CLIENT_ODU_PROD;
+	rx_prop[1].hdr_l2_type = IPA_HDR_L2_ETHERNET_II;
+	rx_prop[1].attrib.attrib_mask |= IPA_FLT_META_DATA;
+	rx_prop[1].attrib.meta_data = iface_info->iface_hdl;
+	rx_prop[1].attrib.meta_data_mask = 0xFF;
+
+	if (ipa_register_intf(iface_info->netdev_name, &tx, &rx)) {
+		IPA_GSB_ERR("fail to add interface prop\n");
+		return -EFAULT;
+	}
+
+	return 0;
+}
+
+static void ipa_gsb_dereg_intf_props(struct ipa_gsb_iface_info *iface_info)
+{
+	if (ipa_deregister_intf(iface_info->netdev_name) != 0)
+		IPA_GSB_ERR("fail to dereg intf props\n");
+
+	IPA_GSB_DBG("deregistered iface props for %s\n",
+		iface_info->netdev_name);
+}
+
+static void ipa_gsb_pm_cb(void *user_data, enum ipa_pm_cb_event event)
+{
+	int i;
+
+	if (event != IPA_PM_REQUEST_WAKEUP) {
+		IPA_GSB_ERR("Unexpected event %d\n", event);
+		WARN_ON(1);
+		return;
+	}
+
+	IPA_GSB_DBG_LOW("wake up clients\n");
+	for (i = 0; i < MAX_SUPPORTED_IFACE; i++)
+		if (ipa_gsb_ctx->iface[i] != NULL)
+			ipa_gsb_ctx->iface[i]->wakeup_request(
+				ipa_gsb_ctx->iface[i]->priv);
+}
+
+static int ipa_gsb_register_pm(void)
+{
+	struct ipa_pm_register_params reg_params;
+	int ret;
+
+	memset(&reg_params, 0, sizeof(reg_params));
+	reg_params.name = "ipa_gsb";
+	reg_params.callback = ipa_gsb_pm_cb;
+	reg_params.user_data = NULL;
+	reg_params.group = IPA_PM_GROUP_DEFAULT;
+
+	ret = ipa_pm_register(&reg_params,
+		&ipa_gsb_ctx->pm_hdl);
+	if (ret) {
+		IPA_GSB_ERR("fail to register with PM %d\n", ret);
+		goto fail_pm_reg;
+	}
+	IPA_GSB_DBG("ipa pm hdl: %d\n", ipa_gsb_ctx->pm_hdl);
+
+	ret = ipa_pm_associate_ipa_cons_to_client(ipa_gsb_ctx->pm_hdl,
+		IPA_CLIENT_ODU_EMB_CONS);
+	if (ret) {
+		IPA_GSB_ERR("fail to associate cons with PM %d\n", ret);
+		goto fail_pm_cons;
+	}
+
+	return 0;
+
+fail_pm_cons:
+	ipa_pm_deregister(ipa_gsb_ctx->pm_hdl);
+	ipa_gsb_ctx->pm_hdl = ~0;
+fail_pm_reg:
+	return ret;
+}
+
+int ipa_bridge_init(struct ipa_bridge_init_params *params, u32 *hdl)
+{
+	int i, ret;
+	struct ipa_gsb_iface_info *new_intf;
+
+	if (!params || !params->wakeup_request || !hdl ||
+		!params->info.netdev_name || !params->info.tx_dp_notify ||
+		!params->info.send_dl_skb) {
+		IPA_GSB_ERR("Invalid parameters\n");
+		return -EINVAL;
+	}
+
+	IPA_GSB_DBG("netdev_name: %s\n", params->info.netdev_name);
+
+	if (ipa_gsb_ctx == NULL) {
+		ret = ipa_gsb_driver_init(&params->info);
+		if (ret) {
+			IPA_GSB_ERR("fail to init ipa gsb driver\n");
+			return -EFAULT;
+		}
+		ipa_gsb_ctx->ipa_sys_desc_size =
+			params->info.ipa_desc_size;
+		IPA_GSB_DBG("desc size: %d\n", ipa_gsb_ctx->ipa_sys_desc_size);
+	}
+
+	mutex_lock(&ipa_gsb_ctx->lock);
+
+	if (params->info.ipa_desc_size != ipa_gsb_ctx->ipa_sys_desc_size) {
+		IPA_GSB_ERR("unmatch: orig desc size %d, new desc size %d\n",
+			ipa_gsb_ctx->ipa_sys_desc_size,
+			params->info.ipa_desc_size);
+		mutex_unlock(&ipa_gsb_ctx->lock);
+		return -EFAULT;
+	}
+
+	for (i = 0; i < MAX_SUPPORTED_IFACE; i++)
+		if (ipa_gsb_ctx->iface[i] != NULL &&
+			strnlen(ipa_gsb_ctx->iface[i]->netdev_name,
+					IPA_RESOURCE_NAME_MAX) ==
+			strnlen(params->info.netdev_name,
+					IPA_RESOURCE_NAME_MAX) &&
+			strcmp(ipa_gsb_ctx->iface[i]->netdev_name,
+				params->info.netdev_name) == 0) {
+			IPA_GSB_ERR("intf was added before.\n");
+			mutex_unlock(&ipa_gsb_ctx->lock);
+			return -EFAULT;
+		}
+
+	if (ipa_gsb_ctx->num_iface == MAX_SUPPORTED_IFACE) {
+		IPA_GSB_ERR("reached maximum supported interfaces");
+		mutex_unlock(&ipa_gsb_ctx->lock);
+		return -EFAULT;
+	}
+
+	for (i = 0; i < MAX_SUPPORTED_IFACE; i++)
+		if (!ipa_gsb_ctx->iface_hdl[i]) {
+			ipa_gsb_ctx->iface_hdl[i] = true;
+			*hdl = i;
+			IPA_GSB_DBG("iface hdl: %d\n", *hdl);
+			break;
+		}
+
+	IPA_GSB_DBG("intf was not added before, proceed.\n");
+	new_intf = kzalloc(sizeof(*new_intf), GFP_KERNEL);
+	if (new_intf == NULL) {
+		ret = -ENOMEM;
+		goto fail_alloc_mem;
+	}
+
+	strlcpy(new_intf->netdev_name, params->info.netdev_name,
+		sizeof(new_intf->netdev_name));
+	new_intf->wakeup_request = params->wakeup_request;
+	new_intf->priv = params->info.priv;
+	new_intf->tx_dp_notify = params->info.tx_dp_notify;
+	new_intf->send_dl_skb = params->info.send_dl_skb;
+	new_intf->iface_hdl = *hdl;
+	memcpy(new_intf->device_ethaddr, params->info.device_ethaddr,
+		sizeof(new_intf->device_ethaddr));
+
+	if (ipa_gsb_commit_partial_hdr(new_intf) != 0) {
+		IPA_GSB_ERR("fail to commit partial hdrs\n");
+		ret = -EFAULT;
+		goto fail_partial_hdr;
+	}
+
+	if (ipa_gsb_reg_intf_props(new_intf) != 0) {
+		IPA_GSB_ERR("fail to register interface props\n");
+		ret = -EFAULT;
+		goto fail_reg_intf_props;
+	}
+
+	if (ipa_gsb_ctx->num_iface == 0) {
+		ret = ipa_gsb_register_pm();
+		if (ret) {
+			IPA_GSB_ERR("fail to register with IPA PM %d\n", ret);
+			ret = -EFAULT;
+			goto fail_register_pm;
+		}
+	}
+
+	ipa_gsb_ctx->iface[*hdl] = new_intf;
+	ipa_gsb_ctx->num_iface++;
+	IPA_GSB_DBG("num_iface %d\n", ipa_gsb_ctx->num_iface);
+	mutex_unlock(&ipa_gsb_ctx->lock);
+	return 0;
+
+fail_register_pm:
+	ipa_gsb_dereg_intf_props(new_intf);
+fail_reg_intf_props:
+	ipa_gsb_delete_partial_hdr(new_intf);
+fail_partial_hdr:
+	kfree(new_intf);
+fail_alloc_mem:
+	ipa_gsb_ctx->iface_hdl[*hdl] = false;
+	mutex_unlock(&ipa_gsb_ctx->lock);
+	return ret;
+}
+EXPORT_SYMBOL(ipa_bridge_init);
+
+static void ipa_gsb_deregister_pm(void)
+{
+	IPA_GSB_DBG("deregister ipa pm hdl: %d\n", ipa_gsb_ctx->pm_hdl);
+	ipa_pm_deactivate_sync(ipa_gsb_ctx->pm_hdl);
+	ipa_pm_deregister(ipa_gsb_ctx->pm_hdl);
+	ipa_gsb_ctx->pm_hdl = ~0;
+}
+
+int ipa_bridge_cleanup(u32 hdl)
+{
+	int i;
+
+	if (!ipa_gsb_ctx) {
+		IPA_GSB_ERR("ipa_gsb_ctx was not initialized\n");
+		return -EFAULT;
+	}
+
+	if (hdl >= MAX_SUPPORTED_IFACE) {
+		IPA_GSB_ERR("invalid hdl: %d\n", hdl);
+		return -EINVAL;
+	}
+
+	mutex_lock(&ipa_gsb_ctx->iface_lock[hdl]);
+	if (!ipa_gsb_ctx->iface[hdl]) {
+		IPA_GSB_ERR("fail to find interface, hdl: %d\n", hdl);
+		mutex_unlock(&ipa_gsb_ctx->iface_lock[hdl]);
+		return -EFAULT;
+	}
+
+	IPA_GSB_DBG("client hdl: %d\n", hdl);
+
+	if (ipa_gsb_ctx->iface[hdl]->is_connected) {
+		IPA_GSB_ERR("cannot cleanup when iface is connected\n");
+		mutex_unlock(&ipa_gsb_ctx->iface_lock[hdl]);
+		return -EFAULT;
+	}
+	ipa_gsb_dereg_intf_props(ipa_gsb_ctx->iface[hdl]);
+	ipa_gsb_delete_partial_hdr(ipa_gsb_ctx->iface[hdl]);
+	spin_lock_bh(&ipa_gsb_ctx->iface_spinlock[hdl]);
+	kfree(ipa_gsb_ctx->iface[hdl]);
+	ipa_gsb_ctx->iface[hdl] = NULL;
+	ipa_gsb_ctx->iface_hdl[hdl] = false;
+	spin_unlock_bh(&ipa_gsb_ctx->iface_spinlock[hdl]);
+	mutex_unlock(&ipa_gsb_ctx->iface_lock[hdl]);
+	mutex_lock(&ipa_gsb_ctx->lock);
+	ipa_gsb_ctx->num_iface--;
+	IPA_GSB_DBG("num_iface %d\n", ipa_gsb_ctx->num_iface);
+	if (ipa_gsb_ctx->num_iface == 0) {
+		ipa_gsb_deregister_pm();
+		ipa_gsb_debugfs_destroy();
+		ipc_log_context_destroy(ipa_gsb_ctx->logbuf);
+		ipc_log_context_destroy(ipa_gsb_ctx->logbuf_low);
+		mutex_unlock(&ipa_gsb_ctx->lock);
+		mutex_destroy(&ipa_gsb_ctx->lock);
+		for (i = 0; i < MAX_SUPPORTED_IFACE; i++)
+			mutex_destroy(&ipa_gsb_ctx->iface_lock[i]);
+		kfree(ipa_gsb_ctx);
+		ipa_gsb_ctx = NULL;
+		return 0;
+	}
+	mutex_unlock(&ipa_gsb_ctx->lock);
+	return 0;
+}
+EXPORT_SYMBOL(ipa_bridge_cleanup);
+
+static void ipa_gsb_cons_cb(void *priv, enum ipa_dp_evt_type evt,
+	unsigned long data)
+{
+	struct sk_buff *skb;
+	struct sk_buff *skb2;
+	struct ipa_gsb_mux_hdr *mux_hdr;
+	u16 pkt_size, pad_byte;
+	u8 hdl;
+
+	if (evt != IPA_RECEIVE) {
+		IPA_GSB_ERR("unexpected event\n");
+		WARN_ON(1);
+		return;
+	}
+
+	skb = (struct sk_buff *)data;
+
+	if (skb == NULL) {
+		IPA_GSB_ERR("unexpected NULL data\n");
+		WARN_ON(1);
+		return;
+	}
+
+	while (skb->len) {
+		mux_hdr = (struct ipa_gsb_mux_hdr *)skb->data;
+		pkt_size = mux_hdr->pkt_size;
+		/* 4-byte padding */
+		pad_byte = ((pkt_size + sizeof(*mux_hdr) + ETH_HLEN +
+			3 + IPA_GSB_SKB_DUMMY_HEADER) & ~3) -
+			(pkt_size + sizeof(*mux_hdr) +
+			ETH_HLEN + IPA_GSB_SKB_DUMMY_HEADER);
+		hdl = mux_hdr->iface_hdl;
+		if (hdl >= MAX_SUPPORTED_IFACE) {
+			IPA_GSB_ERR("invalid hdl: %d\n", hdl);
+			break;
+		}
+		IPA_GSB_DBG_LOW("pkt_size: %d, pad_byte: %d, hdl: %d\n",
+			pkt_size, pad_byte, hdl);
+
+		/* remove 4 byte mux header AND dummy header*/
+		skb_pull(skb, sizeof(*mux_hdr) + IPA_GSB_SKB_DUMMY_HEADER);
+
+		skb2 = skb_clone(skb, GFP_KERNEL);
+		if (!skb2) {
+			IPA_GSB_ERR("skb_clone failed\n");
+			WARN_ON(1);
+			break;
+		}
+		skb_trim(skb2, pkt_size + ETH_HLEN);
+		spin_lock_bh(&ipa_gsb_ctx->iface_spinlock[hdl]);
+		if (ipa_gsb_ctx->iface[hdl] != NULL) {
+			ipa_gsb_ctx->iface[hdl]->send_dl_skb(
+				ipa_gsb_ctx->iface[hdl]->priv, skb2);
+			ipa_gsb_ctx->iface[hdl]->iface_stats.num_dl_packets++;
+			spin_unlock_bh(&ipa_gsb_ctx->iface_spinlock[hdl]);
+			skb_pull(skb, pkt_size + ETH_HLEN + pad_byte);
+		} else {
+			IPA_GSB_ERR("Invalid hdl: %d, drop the skb\n", hdl);
+			spin_unlock_bh(&ipa_gsb_ctx->iface_spinlock[hdl]);
+			dev_kfree_skb_any(skb2);
+			break;
+		}
+	}
+
+	if (skb) {
+		dev_kfree_skb_any(skb);
+		skb = NULL;
+	}
+}
+
+static void ipa_gsb_tx_dp_notify(void *priv, enum ipa_dp_evt_type evt,
+	unsigned long data)
+{
+	struct sk_buff *skb;
+	struct ipa_gsb_mux_hdr *mux_hdr;
+	u8 hdl;
+
+	skb = (struct sk_buff *)data;
+
+	if (skb == NULL) {
+		IPA_GSB_ERR("unexpected NULL data\n");
+		WARN_ON(1);
+		return;
+	}
+
+	if (evt != IPA_WRITE_DONE && evt != IPA_RECEIVE) {
+		IPA_GSB_ERR("unexpected event: %d\n", evt);
+		dev_kfree_skb_any(skb);
+		return;
+	}
+
+	/* fetch iface handle from header */
+	mux_hdr = (struct ipa_gsb_mux_hdr *)skb->data;
+	/* change to host order */
+	*(u32 *)mux_hdr = ntohl(*(u32 *)mux_hdr);
+	hdl = mux_hdr->iface_hdl;
+	if ((hdl < 0) || (hdl >= MAX_SUPPORTED_IFACE) ||
+		!ipa_gsb_ctx->iface[hdl]) {
+		IPA_GSB_ERR("invalid hdl: %d and cb, drop the skb\n", hdl);
+		dev_kfree_skb_any(skb);
+		return;
+	}
+	IPA_GSB_DBG_LOW("evt: %d, hdl in tx_dp_notify: %d\n", evt, hdl);
+
+	/* remove 4 byte mux header */
+	skb_pull(skb, sizeof(struct ipa_gsb_mux_hdr));
+	ipa_gsb_ctx->iface[hdl]->tx_dp_notify(
+	   ipa_gsb_ctx->iface[hdl]->priv, evt,
+	   (unsigned long)skb);
+}
+
+static int ipa_gsb_connect_sys_pipe(void)
+{
+	struct ipa_sys_connect_params prod_params;
+	struct ipa_sys_connect_params cons_params;
+	int res;
+
+	memset(&prod_params, 0, sizeof(prod_params));
+	memset(&cons_params, 0, sizeof(cons_params));
+
+	/* configure RX EP */
+	prod_params.client = IPA_CLIENT_ODU_PROD;
+	prod_params.ipa_ep_cfg.hdr.hdr_len =
+		ETH_HLEN + sizeof(struct ipa_gsb_mux_hdr);
+	prod_params.ipa_ep_cfg.nat.nat_en = IPA_SRC_NAT;
+	prod_params.ipa_ep_cfg.hdr.hdr_ofst_metadata_valid = 1;
+	prod_params.ipa_ep_cfg.hdr.hdr_ofst_metadata = 0;
+	prod_params.desc_fifo_sz = ipa_gsb_ctx->ipa_sys_desc_size;
+	prod_params.priv = NULL;
+	prod_params.notify = ipa_gsb_tx_dp_notify;
+	res = ipa_setup_sys_pipe(&prod_params,
+		&ipa_gsb_ctx->prod_hdl);
+	if (res) {
+		IPA_GSB_ERR("fail to setup prod sys pipe %d\n", res);
+		goto fail_prod;
+	}
+
+	/* configure TX EP */
+	cons_params.client = IPA_CLIENT_ODU_EMB_CONS;
+	cons_params.ipa_ep_cfg.hdr.hdr_len =
+		ETH_HLEN + sizeof(struct ipa_gsb_mux_hdr) +
+		IPA_GSB_SKB_DUMMY_HEADER;
+	cons_params.ipa_ep_cfg.hdr.hdr_ofst_pkt_size_valid = 1;
+	cons_params.ipa_ep_cfg.hdr.hdr_ofst_pkt_size = 2;
+	cons_params.ipa_ep_cfg.hdr_ext.hdr_pad_to_alignment = 2;
+	cons_params.ipa_ep_cfg.hdr_ext.hdr_little_endian = true;
+	cons_params.ipa_ep_cfg.nat.nat_en = IPA_BYPASS_NAT;
+	/* setup aggregation */
+	cons_params.ipa_ep_cfg.aggr.aggr_en = IPA_ENABLE_AGGR;
+	cons_params.ipa_ep_cfg.aggr.aggr = IPA_GENERIC;
+	cons_params.ipa_ep_cfg.aggr.aggr_time_limit =
+		IPA_GSB_AGGR_TIME_LIMIT;
+	cons_params.ipa_ep_cfg.aggr.aggr_byte_limit =
+		IPA_GSB_AGGR_BYTE_LIMIT;
+	cons_params.desc_fifo_sz = ipa_gsb_ctx->ipa_sys_desc_size;
+	cons_params.priv = NULL;
+	cons_params.notify = ipa_gsb_cons_cb;
+	res = ipa_setup_sys_pipe(&cons_params,
+		&ipa_gsb_ctx->cons_hdl);
+	if (res) {
+		IPA_GSB_ERR("fail to setup cons sys pipe %d\n", res);
+		goto fail_cons;
+	}
+
+	IPA_GSB_DBG("prod_hdl = %d, cons_hdl = %d\n",
+		ipa_gsb_ctx->prod_hdl, ipa_gsb_ctx->cons_hdl);
+
+	return 0;
+
+fail_cons:
+	ipa_teardown_sys_pipe(ipa_gsb_ctx->prod_hdl);
+	ipa_gsb_ctx->prod_hdl = 0;
+fail_prod:
+	return res;
+}
+
+int ipa_bridge_connect(u32 hdl)
+{
+	int ret;
+
+	if (!ipa_gsb_ctx) {
+		IPA_GSB_ERR("ipa_gsb_ctx was not initialized\n");
+		return -EFAULT;
+	}
+
+	if (hdl >= MAX_SUPPORTED_IFACE) {
+		IPA_GSB_ERR("invalid hdl: %d\n", hdl);
+		return -EINVAL;
+	}
+
+	IPA_GSB_DBG("client hdl: %d\n", hdl);
+
+	mutex_lock(&ipa_gsb_ctx->iface_lock[hdl]);
+	if (!ipa_gsb_ctx->iface[hdl]) {
+		IPA_GSB_ERR("fail to find interface, hdl: %d\n", hdl);
+		mutex_unlock(&ipa_gsb_ctx->iface_lock[hdl]);
+		return -EFAULT;
+	}
+
+	if (ipa_gsb_ctx->iface[hdl]->is_connected) {
+		IPA_GSB_DBG("iface was already connected\n");
+		mutex_unlock(&ipa_gsb_ctx->iface_lock[hdl]);
+		return 0;
+	}
+
+	mutex_lock(&ipa_gsb_ctx->lock);
+	if (ipa_gsb_ctx->num_connected_iface == 0) {
+		ret = ipa_pm_activate_sync(ipa_gsb_ctx->pm_hdl);
+		if (ret) {
+			IPA_GSB_ERR("failed to activate ipa pm\n");
+			mutex_unlock(&ipa_gsb_ctx->lock);
+			mutex_unlock(&ipa_gsb_ctx->iface_lock[hdl]);
+			return ret;
+		}
+		ret = ipa_gsb_connect_sys_pipe();
+		if (ret) {
+			IPA_GSB_ERR("fail to connect pipe\n");
+			mutex_unlock(&ipa_gsb_ctx->lock);
+			mutex_unlock(&ipa_gsb_ctx->iface_lock[hdl]);
+			return ret;
+		}
+	}
+
+	/* connect = connect + resume */
+	ipa_gsb_ctx->iface[hdl]->is_connected = true;
+	ipa_gsb_ctx->iface[hdl]->is_resumed = true;
+
+	ipa_gsb_ctx->num_connected_iface++;
+	IPA_GSB_DBG("connected iface: %d\n",
+		ipa_gsb_ctx->num_connected_iface);
+	ipa_gsb_ctx->num_resumed_iface++;
+	IPA_GSB_DBG("num resumed iface: %d\n",
+		ipa_gsb_ctx->num_resumed_iface);
+	mutex_unlock(&ipa_gsb_ctx->lock);
+	mutex_unlock(&ipa_gsb_ctx->iface_lock[hdl]);
+	return 0;
+}
+EXPORT_SYMBOL(ipa_bridge_connect);
+
+static int ipa_gsb_disconnect_sys_pipe(void)
+{
+	int ret;
+
+	IPA_GSB_DBG("prod_hdl = %d, cons_hdl = %d\n",
+		ipa_gsb_ctx->prod_hdl, ipa_gsb_ctx->cons_hdl);
+
+	ret = ipa_teardown_sys_pipe(ipa_gsb_ctx->prod_hdl);
+	if (ret) {
+		IPA_GSB_ERR("failed to tear down prod pipe\n");
+		return -EFAULT;
+	}
+	ipa_gsb_ctx->prod_hdl = 0;
+
+	ret = ipa_teardown_sys_pipe(ipa_gsb_ctx->cons_hdl);
+	if (ret) {
+		IPA_GSB_ERR("failed to tear down cons pipe\n");
+		return -EFAULT;
+	}
+	ipa_gsb_ctx->cons_hdl = 0;
+
+	return 0;
+}
+
+int ipa_bridge_disconnect(u32 hdl)
+{
+	int ret = 0;
+
+	if (!ipa_gsb_ctx) {
+		IPA_GSB_ERR("ipa_gsb_ctx was not initialized\n");
+		return -EFAULT;
+	}
+
+	if (hdl >= MAX_SUPPORTED_IFACE) {
+		IPA_GSB_ERR("invalid hdl: %d\n", hdl);
+		return -EINVAL;
+	}
+
+	IPA_GSB_DBG("client hdl: %d\n", hdl);
+
+	mutex_lock(&ipa_gsb_ctx->iface_lock[hdl]);
+	atomic_set(&ipa_gsb_ctx->disconnect_in_progress, 1);
+
+	if (!ipa_gsb_ctx->iface[hdl]) {
+		IPA_GSB_ERR("fail to find interface, hdl: %d\n", hdl);
+		ret = -EFAULT;
+		goto fail;
+	}
+
+	if (!ipa_gsb_ctx->iface[hdl]->is_connected) {
+		IPA_GSB_DBG("iface was not connected\n");
+		ret = 0;
+		goto fail;
+	}
+
+	mutex_lock(&ipa_gsb_ctx->lock);
+	if (ipa_gsb_ctx->num_connected_iface == 1) {
+		ret = ipa_gsb_disconnect_sys_pipe();
+		if (ret) {
+			IPA_GSB_ERR("fail to discon pipes\n");
+			ret = -EFAULT;
+			goto fail;
+		}
+
+		ret = ipa_pm_deactivate_sync(ipa_gsb_ctx->pm_hdl);
+		if (ret) {
+			IPA_GSB_ERR("failed to deactivate ipa pm\n");
+			ret = -EFAULT;
+			goto fail;
+		}
+	}
+
+	/* disconnect = suspend + disconnect */
+	ipa_gsb_ctx->iface[hdl]->is_connected = false;
+	ipa_gsb_ctx->num_connected_iface--;
+	IPA_GSB_DBG("connected iface: %d\n",
+		ipa_gsb_ctx->num_connected_iface);
+
+	if (ipa_gsb_ctx->iface[hdl]->is_resumed) {
+		ipa_gsb_ctx->iface[hdl]->is_resumed = false;
+		ipa_gsb_ctx->num_resumed_iface--;
+		IPA_GSB_DBG("num resumed iface: %d\n",
+			ipa_gsb_ctx->num_resumed_iface);
+	}
+
+fail:
+	mutex_unlock(&ipa_gsb_ctx->lock);
+	atomic_set(&ipa_gsb_ctx->disconnect_in_progress, 0);
+	mutex_unlock(&ipa_gsb_ctx->iface_lock[hdl]);
+	return ret;
+}
+EXPORT_SYMBOL(ipa_bridge_disconnect);
+
+int ipa_bridge_resume(u32 hdl)
+{
+	int ret;
+
+	if (!ipa_gsb_ctx) {
+		IPA_GSB_ERR("ipa_gsb_ctx was not initialized\n");
+		return -EFAULT;
+	}
+
+	if (hdl >= MAX_SUPPORTED_IFACE) {
+		IPA_GSB_ERR("invalid hdl: %d\n", hdl);
+		return -EINVAL;
+	}
+
+	IPA_GSB_DBG_LOW("client hdl: %d\n", hdl);
+
+	mutex_lock(&ipa_gsb_ctx->iface_lock[hdl]);
+	if (!ipa_gsb_ctx->iface[hdl]) {
+		IPA_GSB_ERR("fail to find interface, hdl: %d\n", hdl);
+		mutex_unlock(&ipa_gsb_ctx->iface_lock[hdl]);
+		return -EFAULT;
+	}
+
+	if (!ipa_gsb_ctx->iface[hdl]->is_connected) {
+		IPA_GSB_ERR("iface is not connected\n");
+		mutex_unlock(&ipa_gsb_ctx->iface_lock[hdl]);
+		return -EFAULT;
+	}
+
+	if (ipa_gsb_ctx->iface[hdl]->is_resumed) {
+		IPA_GSB_DBG_LOW("iface was already resumed\n");
+		mutex_unlock(&ipa_gsb_ctx->iface_lock[hdl]);
+		return 0;
+	}
+
+	mutex_lock(&ipa_gsb_ctx->lock);
+	if (ipa_gsb_ctx->num_resumed_iface == 0) {
+		ret = ipa_pm_activate_sync(ipa_gsb_ctx->pm_hdl);
+		if (ret) {
+			IPA_GSB_ERR("fail to activate ipa pm\n");
+			mutex_unlock(&ipa_gsb_ctx->lock);
+			mutex_unlock(&ipa_gsb_ctx->iface_lock[hdl]);
+			return ret;
+		}
+
+		ret = ipa_start_gsi_channel(
+			ipa_gsb_ctx->cons_hdl);
+		if (ret) {
+			IPA_GSB_ERR(
+				"fail to start con ep %d\n",
+				ret);
+			mutex_unlock(&ipa_gsb_ctx->lock);
+			mutex_unlock(&ipa_gsb_ctx->iface_lock[hdl]);
+			return ret;
+		}
+	}
+
+	ipa_gsb_ctx->iface[hdl]->is_resumed = true;
+	ipa_gsb_ctx->num_resumed_iface++;
+	IPA_GSB_DBG_LOW("num resumed iface: %d\n",
+		ipa_gsb_ctx->num_resumed_iface);
+
+	mutex_unlock(&ipa_gsb_ctx->lock);
+	mutex_unlock(&ipa_gsb_ctx->iface_lock[hdl]);
+	return 0;
+}
+EXPORT_SYMBOL(ipa_bridge_resume);
+
+int ipa_bridge_suspend(u32 hdl)
+{
+	int ret;
+
+	if (!ipa_gsb_ctx) {
+		IPA_GSB_ERR("ipa_gsb_ctx was not initialized\n");
+		return -EFAULT;
+	}
+
+	if (hdl >= MAX_SUPPORTED_IFACE) {
+		IPA_GSB_ERR("invalid hdl: %d\n", hdl);
+		return -EINVAL;
+	}
+
+	IPA_GSB_DBG_LOW("client hdl: %d\n", hdl);
+
+	mutex_lock(&ipa_gsb_ctx->iface_lock[hdl]);
+	atomic_set(&ipa_gsb_ctx->suspend_in_progress, 1);
+	if (!ipa_gsb_ctx->iface[hdl]) {
+		IPA_GSB_ERR("fail to find interface, hdl: %d\n", hdl);
+		atomic_set(&ipa_gsb_ctx->suspend_in_progress, 0);
+		mutex_unlock(&ipa_gsb_ctx->iface_lock[hdl]);
+		return -EFAULT;
+	}
+
+	if (!ipa_gsb_ctx->iface[hdl]->is_connected) {
+		IPA_GSB_ERR("iface is not connected\n");
+		atomic_set(&ipa_gsb_ctx->suspend_in_progress, 0);
+		mutex_unlock(&ipa_gsb_ctx->iface_lock[hdl]);
+		return -EFAULT;
+	}
+
+	if (!ipa_gsb_ctx->iface[hdl]->is_resumed) {
+		IPA_GSB_DBG_LOW("iface was already suspended\n");
+		atomic_set(&ipa_gsb_ctx->suspend_in_progress, 0);
+		mutex_unlock(&ipa_gsb_ctx->iface_lock[hdl]);
+		return 0;
+	}
+
+	mutex_lock(&ipa_gsb_ctx->lock);
+	if (ipa_gsb_ctx->num_resumed_iface == 1) {
+		ret = ipa_stop_gsi_channel(
+			ipa_gsb_ctx->cons_hdl);
+		if (ret) {
+			IPA_GSB_ERR(
+				"fail to stop cons ep %d\n",
+				ret);
+			atomic_set(&ipa_gsb_ctx->suspend_in_progress, 0);
+			mutex_unlock(&ipa_gsb_ctx->lock);
+			mutex_unlock(&ipa_gsb_ctx->iface_lock[hdl]);
+			return ret;
+		}
+
+		ret = ipa_pm_deactivate_sync(ipa_gsb_ctx->pm_hdl);
+		if (ret) {
+			IPA_GSB_ERR("fail to deactivate ipa pm\n");
+			ipa_start_gsi_channel(ipa_gsb_ctx->cons_hdl);
+			atomic_set(&ipa_gsb_ctx->suspend_in_progress, 0);
+			mutex_unlock(&ipa_gsb_ctx->lock);
+			mutex_unlock(&ipa_gsb_ctx->iface_lock[hdl]);
+			return ret;
+		}
+	}
+
+	ipa_gsb_ctx->iface[hdl]->is_resumed = false;
+	ipa_gsb_ctx->num_resumed_iface--;
+	IPA_GSB_DBG_LOW("num resumed iface: %d\n",
+		ipa_gsb_ctx->num_resumed_iface);
+	atomic_set(&ipa_gsb_ctx->suspend_in_progress, 0);
+	mutex_unlock(&ipa_gsb_ctx->lock);
+	mutex_unlock(&ipa_gsb_ctx->iface_lock[hdl]);
+	return 0;
+}
+EXPORT_SYMBOL(ipa_bridge_suspend);
+
+int ipa_bridge_set_perf_profile(u32 hdl, u32 bandwidth)
+{
+	int ret;
+
+	if (!ipa_gsb_ctx) {
+		IPA_GSB_ERR("ipa_gsb_ctx was not initialized\n");
+		return -EFAULT;
+	}
+
+	if (hdl >= MAX_SUPPORTED_IFACE) {
+		IPA_GSB_ERR("invalid hdl: %d\n", hdl);
+		return -EINVAL;
+	}
+
+	IPA_GSB_DBG("client hdl: %d, BW: %d\n", hdl, bandwidth);
+
+	mutex_lock(&ipa_gsb_ctx->iface_lock[hdl]);
+
+	ret = ipa_pm_set_throughput(ipa_gsb_ctx->pm_hdl,
+		bandwidth);
+	if (ret)
+		IPA_GSB_ERR("fail to set perf profile\n");
+
+	mutex_unlock(&ipa_gsb_ctx->iface_lock[hdl]);
+	return ret;
+}
+EXPORT_SYMBOL(ipa_bridge_set_perf_profile);
+
+int ipa_bridge_tx_dp(u32 hdl, struct sk_buff *skb,
+	struct ipa_tx_meta *metadata)
+{
+	struct ipa_gsb_mux_hdr *mux_hdr;
+	struct sk_buff *skb2;
+	struct stats iface_stats;
+	int ret;
+
+	IPA_GSB_DBG_LOW("client hdl: %d\n", hdl);
+
+	iface_stats = ipa_gsb_ctx->iface[hdl]->iface_stats;
+	if (!ipa_gsb_ctx->iface[hdl]) {
+		IPA_GSB_ERR("fail to find interface, hdl: %d\n", hdl);
+		return -EFAULT;
+	}
+
+	if (unlikely(atomic_read(&ipa_gsb_ctx->disconnect_in_progress))) {
+		IPA_GSB_ERR("ipa bridge disconnect_in_progress\n");
+		return -EFAULT;
+	}
+
+	if (unlikely(atomic_read(&ipa_gsb_ctx->suspend_in_progress))) {
+		IPA_GSB_ERR("ipa bridge suspend_in_progress\n");
+		return -EFAULT;
+	}
+
+	if (unlikely(!ipa_gsb_ctx->iface[hdl]->is_resumed)) {
+		IPA_GSB_ERR("iface %d was suspended\n", hdl);
+		return -EFAULT;
+	}
+
+	/* make sure skb has enough headroom */
+	if (unlikely(skb_headroom(skb) < sizeof(struct ipa_gsb_mux_hdr))) {
+		IPA_GSB_DBG_LOW("skb doesn't have enough headroom\n");
+		skb2 = skb_copy_expand(skb, sizeof(struct ipa_gsb_mux_hdr),
+			0, GFP_ATOMIC);
+		if (!skb2) {
+			dev_kfree_skb_any(skb);
+			return -ENOMEM;
+		}
+		dev_kfree_skb_any(skb);
+		skb = skb2;
+		iface_stats.num_insufficient_headroom_packets++;
+	}
+
+	/* add 4 byte header for mux */
+	mux_hdr = (struct ipa_gsb_mux_hdr *)skb_push(skb,
+		sizeof(struct ipa_gsb_mux_hdr));
+	mux_hdr->iface_hdl = (u8)hdl;
+	/* change to network order */
+	*(u32 *)mux_hdr = htonl(*(u32 *)mux_hdr);
+
+	ret = ipa_tx_dp(IPA_CLIENT_ODU_PROD, skb, metadata);
+	if (ret) {
+		IPA_GSB_ERR("tx dp failed %d\n", ret);
+		return -EFAULT;
+	}
+	ipa_gsb_ctx->iface[hdl]->iface_stats.num_ul_packets++;
+
+	return 0;
+}
+EXPORT_SYMBOL(ipa_bridge_tx_dp);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("ipa gsb driver");

+ 2507 - 0
ipa/ipa_clients/ipa_mhi_client.c

@@ -0,0 +1,2507 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2015-2019, The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/debugfs.h>
+#include <linux/export.h>
+#include <linux/delay.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/ipa.h>
+#include <linux/msm_gsi.h>
+#include <linux/ipa_qmi_service_v01.h>
+#include <linux/ipa_mhi.h>
+#include "../ipa_common_i.h"
+#include "../ipa_v3/ipa_pm.h"
+
+#define IPA_MHI_DRV_NAME "ipa_mhi_client"
+
+#define IPA_MHI_DBG(fmt, args...) \
+	do { \
+		pr_debug(IPA_MHI_DRV_NAME " %s:%d " fmt, \
+			__func__, __LINE__, ## args); \
+		IPA_IPC_LOGGING(ipa_get_ipc_logbuf(), \
+			IPA_MHI_DRV_NAME " %s:%d " fmt, ## args); \
+		IPA_IPC_LOGGING(ipa_get_ipc_logbuf_low(), \
+			IPA_MHI_DRV_NAME " %s:%d " fmt, ## args); \
+	} while (0)
+
+#define IPA_MHI_DBG_LOW(fmt, args...) \
+	do { \
+		pr_debug(IPA_MHI_DRV_NAME " %s:%d " fmt, \
+			__func__, __LINE__, ## args); \
+		IPA_IPC_LOGGING(ipa_get_ipc_logbuf_low(), \
+			IPA_MHI_DRV_NAME " %s:%d " fmt, ## args); \
+	} while (0)
+
+
+#define IPA_MHI_ERR(fmt, args...) \
+	do { \
+		pr_err(IPA_MHI_DRV_NAME " %s:%d " fmt, \
+			__func__, __LINE__, ## args); \
+		IPA_IPC_LOGGING(ipa_get_ipc_logbuf(), \
+				IPA_MHI_DRV_NAME " %s:%d " fmt, ## args); \
+		IPA_IPC_LOGGING(ipa_get_ipc_logbuf_low(), \
+				IPA_MHI_DRV_NAME " %s:%d " fmt, ## args); \
+	} while (0)
+
+#define IPA_MHI_FUNC_ENTRY() \
+	IPA_MHI_DBG("ENTRY\n")
+#define IPA_MHI_FUNC_EXIT() \
+	IPA_MHI_DBG("EXIT\n")
+
+#define IPA_MHI_CH_EMPTY_TIMEOUT_MSEC 10
+
+#define IPA_MHI_SUSPEND_SLEEP_MIN 900
+#define IPA_MHI_SUSPEND_SLEEP_MAX 1100
+
+#define IPA_MHI_MAX_UL_CHANNELS 1
+#define IPA_MHI_MAX_DL_CHANNELS 2
+
+/* bit #40 in address should be asserted for MHI transfers over pcie */
+#define IPA_MHI_CLIENT_HOST_ADDR_COND(addr) \
+	((ipa_mhi_client_ctx->assert_bit40)?(IPA_MHI_HOST_ADDR(addr)):(addr))
+
+enum ipa_mhi_state {
+	IPA_MHI_STATE_INITIALIZED,
+	IPA_MHI_STATE_READY,
+	IPA_MHI_STATE_STARTED,
+	IPA_MHI_STATE_SUSPEND_IN_PROGRESS,
+	IPA_MHI_STATE_SUSPENDED,
+	IPA_MHI_STATE_RESUME_IN_PROGRESS,
+	IPA_MHI_STATE_MAX
+};
+
+static char *ipa_mhi_state_str[] = {
+	__stringify(IPA_MHI_STATE_INITIALIZED),
+	__stringify(IPA_MHI_STATE_READY),
+	__stringify(IPA_MHI_STATE_STARTED),
+	__stringify(IPA_MHI_STATE_SUSPEND_IN_PROGRESS),
+	__stringify(IPA_MHI_STATE_SUSPENDED),
+	__stringify(IPA_MHI_STATE_RESUME_IN_PROGRESS),
+};
+
+#define MHI_STATE_STR(state) \
+	(((state) >= 0 && (state) < IPA_MHI_STATE_MAX) ? \
+		ipa_mhi_state_str[(state)] : \
+		"INVALID")
+
+enum ipa_mhi_dma_dir {
+	IPA_MHI_DMA_TO_HOST,
+	IPA_MHI_DMA_FROM_HOST,
+};
+
+/**
+ * struct ipa_mhi_channel_ctx - MHI Channel context
+ * @valid: entry is valid
+ * @id: MHI channel ID
+ * @hdl: channel handle for uC
+ * @client: IPA Client
+ * @state: Channel state
+ */
+struct ipa_mhi_channel_ctx {
+	bool valid;
+	u8 id;
+	u8 index;
+	enum ipa_client_type client;
+	enum ipa_hw_mhi_channel_states state;
+	bool stop_in_proc;
+	struct gsi_chan_info ch_info;
+	u64 channel_context_addr;
+	struct ipa_mhi_ch_ctx ch_ctx_host;
+	u64 event_context_addr;
+	struct ipa_mhi_ev_ctx ev_ctx_host;
+	bool brstmode_enabled;
+	union __packed gsi_channel_scratch ch_scratch;
+	unsigned long cached_gsi_evt_ring_hdl;
+};
+
+struct ipa_mhi_client_ctx {
+	enum ipa_mhi_state state;
+	spinlock_t state_lock;
+	mhi_client_cb cb_notify;
+	void *cb_priv;
+	bool trigger_wakeup;
+	bool wakeup_notified;
+	struct workqueue_struct *wq;
+	struct ipa_mhi_channel_ctx ul_channels[IPA_MHI_MAX_UL_CHANNELS];
+	struct ipa_mhi_channel_ctx dl_channels[IPA_MHI_MAX_DL_CHANNELS];
+	u32 total_channels;
+	struct ipa_mhi_msi_info msi;
+	u32 mmio_addr;
+	u32 first_ch_idx;
+	u32 first_er_idx;
+	u32 host_ctrl_addr;
+	u32 host_data_addr;
+	u64 channel_context_array_addr;
+	u64 event_context_array_addr;
+	u32 qmi_req_id;
+	u32 use_ipadma;
+	bool assert_bit40;
+	bool test_mode;
+	u32 pm_hdl;
+	u32 modem_pm_hdl;
+};
+
+static struct ipa_mhi_client_ctx *ipa_mhi_client_ctx;
+static DEFINE_MUTEX(mhi_client_general_mutex);
+
+#ifdef CONFIG_DEBUG_FS
+#define IPA_MHI_MAX_MSG_LEN 512
+static char dbg_buff[IPA_MHI_MAX_MSG_LEN];
+static struct dentry *dent;
+
+static char *ipa_mhi_channel_state_str[] = {
+	__stringify(IPA_HW_MHI_CHANNEL_STATE_DISABLE),
+	__stringify(IPA_HW_MHI_CHANNEL_STATE_ENABLE),
+	__stringify(IPA_HW_MHI_CHANNEL_STATE_RUN),
+	__stringify(IPA_HW_MHI_CHANNEL_STATE_SUSPEND),
+	__stringify(IPA_HW_MHI_CHANNEL_STATE_STOP),
+	__stringify(IPA_HW_MHI_CHANNEL_STATE_ERROR),
+};
+
+#define MHI_CH_STATE_STR(state) \
+	(((state) >= 0 && (state) <= IPA_HW_MHI_CHANNEL_STATE_ERROR) ? \
+	ipa_mhi_channel_state_str[(state)] : \
+	"INVALID")
+
+static int ipa_mhi_set_lock_unlock(bool is_lock)
+{
+	IPA_MHI_DBG("entry\n");
+	if (is_lock)
+		mutex_lock(&mhi_client_general_mutex);
+	else
+		mutex_unlock(&mhi_client_general_mutex);
+	IPA_MHI_DBG("exit\n");
+
+	return 0;
+}
+
+static int ipa_mhi_read_write_host(enum ipa_mhi_dma_dir dir, void *dev_addr,
+	u64 host_addr, int size)
+{
+	struct ipa_mem_buffer mem;
+	int res;
+	struct device *pdev;
+
+	IPA_MHI_FUNC_ENTRY();
+
+	if (ipa_mhi_client_ctx->use_ipadma) {
+		pdev = ipa_get_dma_dev();
+		host_addr = IPA_MHI_CLIENT_HOST_ADDR_COND(host_addr);
+
+		mem.size = size;
+		mem.base = dma_alloc_coherent(pdev, mem.size,
+			&mem.phys_base, GFP_KERNEL);
+		if (!mem.base) {
+			IPA_MHI_ERR(
+				"dma_alloc_coherent failed, DMA buff size %d\n"
+					, mem.size);
+			return -ENOMEM;
+		}
+
+		res = ipa_dma_enable();
+		if (res) {
+			IPA_MHI_ERR("failed to enable IPA DMA rc=%d\n", res);
+			goto fail_dma_enable;
+		}
+
+		if (dir == IPA_MHI_DMA_FROM_HOST) {
+			res = ipa_dma_sync_memcpy(mem.phys_base, host_addr,
+				size);
+			if (res) {
+				IPA_MHI_ERR(
+					"ipa_dma_sync_memcpy from host fail%d\n"
+					, res);
+				goto fail_memcopy;
+			}
+			memcpy(dev_addr, mem.base, size);
+		} else {
+			memcpy(mem.base, dev_addr, size);
+			res = ipa_dma_sync_memcpy(host_addr, mem.phys_base,
+				size);
+			if (res) {
+				IPA_MHI_ERR(
+					"ipa_dma_sync_memcpy to host fail %d\n"
+					, res);
+				goto fail_memcopy;
+			}
+		}
+		goto dma_succeed;
+	} else {
+		void *host_ptr;
+
+		if (!ipa_mhi_client_ctx->test_mode)
+			host_ptr = ioremap(host_addr, size);
+		else
+			host_ptr = phys_to_virt(host_addr);
+		if (!host_ptr) {
+			IPA_MHI_ERR("ioremap failed for 0x%llx\n", host_addr);
+			return -EFAULT;
+		}
+		if (dir == IPA_MHI_DMA_FROM_HOST)
+			memcpy(dev_addr, host_ptr, size);
+		else
+			memcpy(host_ptr, dev_addr, size);
+		if (!ipa_mhi_client_ctx->test_mode)
+			iounmap(host_ptr);
+	}
+
+	IPA_MHI_FUNC_EXIT();
+	return 0;
+
+dma_succeed:
+	IPA_MHI_FUNC_EXIT();
+	res = 0;
+fail_memcopy:
+	if (ipa_dma_disable())
+		IPA_MHI_ERR("failed to disable IPA DMA\n");
+fail_dma_enable:
+	dma_free_coherent(pdev, mem.size, mem.base, mem.phys_base);
+	return res;
+}
+
+static int ipa_mhi_print_channel_info(struct ipa_mhi_channel_ctx *channel,
+	char *buff, int len)
+{
+	int nbytes = 0;
+
+	if (channel->valid) {
+		nbytes += scnprintf(&buff[nbytes],
+			len - nbytes,
+			"channel idx=%d ch_id=%d client=%d state=%s\n",
+			channel->index, channel->id, channel->client,
+			MHI_CH_STATE_STR(channel->state));
+
+		nbytes += scnprintf(&buff[nbytes],
+			len - nbytes,
+			"	ch_ctx=%llx\n",
+			channel->channel_context_addr);
+
+		nbytes += scnprintf(&buff[nbytes],
+			len - nbytes,
+			"	gsi_evt_ring_hdl=%ld ev_ctx=%llx\n",
+			channel->cached_gsi_evt_ring_hdl,
+			channel->event_context_addr);
+	}
+	return nbytes;
+}
+
+static int ipa_mhi_print_host_channel_ctx_info(
+		struct ipa_mhi_channel_ctx *channel, char *buff, int len)
+{
+	int res, nbytes = 0;
+	struct ipa_mhi_ch_ctx ch_ctx_host;
+
+	memset(&ch_ctx_host, 0, sizeof(ch_ctx_host));
+
+	/* reading ch context from host */
+	res = ipa_mhi_read_write_host(IPA_MHI_DMA_FROM_HOST,
+		&ch_ctx_host, channel->channel_context_addr,
+		sizeof(ch_ctx_host));
+	if (res) {
+		nbytes += scnprintf(&buff[nbytes], len - nbytes,
+			"Failed to read from host %d\n", res);
+		return nbytes;
+	}
+
+	nbytes += scnprintf(&buff[nbytes], len - nbytes,
+		"ch_id: %d\n", channel->id);
+	nbytes += scnprintf(&buff[nbytes], len - nbytes,
+		"chstate: 0x%x\n", ch_ctx_host.chstate);
+	nbytes += scnprintf(&buff[nbytes], len - nbytes,
+		"brstmode: 0x%x\n", ch_ctx_host.brstmode);
+	nbytes += scnprintf(&buff[nbytes], len - nbytes,
+		"chtype: 0x%x\n", ch_ctx_host.chtype);
+	nbytes += scnprintf(&buff[nbytes], len - nbytes,
+		"erindex: 0x%x\n", ch_ctx_host.erindex);
+	nbytes += scnprintf(&buff[nbytes], len - nbytes,
+		"rbase: 0x%llx\n", ch_ctx_host.rbase);
+	nbytes += scnprintf(&buff[nbytes], len - nbytes,
+		"rlen: 0x%llx\n", ch_ctx_host.rlen);
+	nbytes += scnprintf(&buff[nbytes], len - nbytes,
+		"rp: 0x%llx\n", ch_ctx_host.rp);
+	nbytes += scnprintf(&buff[nbytes], len - nbytes,
+		"wp: 0x%llx\n", ch_ctx_host.wp);
+
+	return nbytes;
+}
+
+static ssize_t ipa_mhi_debugfs_stats(struct file *file,
+	char __user *ubuf,
+	size_t count,
+	loff_t *ppos)
+{
+	int nbytes = 0;
+	int i;
+	struct ipa_mhi_channel_ctx *channel;
+
+	nbytes += scnprintf(&dbg_buff[nbytes],
+		IPA_MHI_MAX_MSG_LEN - nbytes,
+		"IPA MHI state: %s\n",
+		MHI_STATE_STR(ipa_mhi_client_ctx->state));
+
+	for (i = 0; i < IPA_MHI_MAX_UL_CHANNELS; i++) {
+		channel = &ipa_mhi_client_ctx->ul_channels[i];
+		nbytes += ipa_mhi_print_channel_info(channel,
+			&dbg_buff[nbytes], IPA_MHI_MAX_MSG_LEN - nbytes);
+	}
+
+	for (i = 0; i < IPA_MHI_MAX_DL_CHANNELS; i++) {
+		channel = &ipa_mhi_client_ctx->dl_channels[i];
+		nbytes += ipa_mhi_print_channel_info(channel,
+			&dbg_buff[nbytes], IPA_MHI_MAX_MSG_LEN - nbytes);
+	}
+
+	return simple_read_from_buffer(ubuf, count, ppos, dbg_buff, nbytes);
+}
+
+static ssize_t ipa_mhi_debugfs_uc_stats(struct file *file,
+	char __user *ubuf,
+	size_t count,
+	loff_t *ppos)
+{
+	int nbytes = 0;
+
+	nbytes += ipa_uc_mhi_print_stats(dbg_buff, IPA_MHI_MAX_MSG_LEN);
+	return simple_read_from_buffer(ubuf, count, ppos, dbg_buff, nbytes);
+}
+
+static ssize_t ipa_mhi_debugfs_dump_host_ch_ctx_arr(struct file *file,
+	char __user *ubuf,
+	size_t count,
+	loff_t *ppos)
+{
+	int i, nbytes = 0;
+	struct ipa_mhi_channel_ctx *channel;
+
+	if (ipa_mhi_client_ctx->state == IPA_MHI_STATE_INITIALIZED ||
+	    ipa_mhi_client_ctx->state == IPA_MHI_STATE_READY) {
+		nbytes += scnprintf(&dbg_buff[nbytes],
+		IPA_MHI_MAX_MSG_LEN - nbytes,
+			"Cannot dump host channel context ");
+		nbytes += scnprintf(&dbg_buff[nbytes],
+				IPA_MHI_MAX_MSG_LEN - nbytes,
+				"before IPA MHI was STARTED\n");
+		return simple_read_from_buffer(ubuf, count, ppos,
+			dbg_buff, nbytes);
+	}
+	if (ipa_mhi_client_ctx->state == IPA_MHI_STATE_SUSPENDED) {
+		nbytes += scnprintf(&dbg_buff[nbytes],
+			IPA_MHI_MAX_MSG_LEN - nbytes,
+			"IPA MHI is suspended, cannot dump channel ctx array");
+		nbytes += scnprintf(&dbg_buff[nbytes],
+			IPA_MHI_MAX_MSG_LEN - nbytes,
+			" from host -PCIe can be in D3 state\n");
+		return simple_read_from_buffer(ubuf, count, ppos,
+			dbg_buff, nbytes);
+	}
+
+	nbytes += scnprintf(&dbg_buff[nbytes],
+			IPA_MHI_MAX_MSG_LEN - nbytes,
+			"channel contex array - dump from host\n");
+	nbytes += scnprintf(&dbg_buff[nbytes],
+			IPA_MHI_MAX_MSG_LEN - nbytes,
+			"***** UL channels *******\n");
+
+	for (i = 0; i < IPA_MHI_MAX_UL_CHANNELS; i++) {
+		channel = &ipa_mhi_client_ctx->ul_channels[i];
+		if (!channel->valid)
+			continue;
+		nbytes += ipa_mhi_print_host_channel_ctx_info(channel,
+			&dbg_buff[nbytes],
+			IPA_MHI_MAX_MSG_LEN - nbytes);
+	}
+
+	nbytes += scnprintf(&dbg_buff[nbytes],
+			IPA_MHI_MAX_MSG_LEN - nbytes,
+			"\n***** DL channels *******\n");
+
+	for (i = 0; i < IPA_MHI_MAX_DL_CHANNELS; i++) {
+		channel = &ipa_mhi_client_ctx->dl_channels[i];
+		if (!channel->valid)
+			continue;
+		nbytes += ipa_mhi_print_host_channel_ctx_info(channel,
+			&dbg_buff[nbytes], IPA_MHI_MAX_MSG_LEN - nbytes);
+	}
+
+	return simple_read_from_buffer(ubuf, count, ppos, dbg_buff, nbytes);
+}
+
+const struct file_operations ipa_mhi_stats_ops = {
+	.read = ipa_mhi_debugfs_stats,
+};
+
+const struct file_operations ipa_mhi_uc_stats_ops = {
+	.read = ipa_mhi_debugfs_uc_stats,
+};
+
+const struct file_operations ipa_mhi_dump_host_ch_ctx_ops = {
+	.read = ipa_mhi_debugfs_dump_host_ch_ctx_arr,
+};
+
+
+static void ipa_mhi_debugfs_init(void)
+{
+	const mode_t read_only_mode = 0444;
+	const mode_t read_write_mode = 0664;
+	struct dentry *file;
+
+	IPA_MHI_FUNC_ENTRY();
+
+	dent = debugfs_create_dir("ipa_mhi", 0);
+	if (IS_ERR(dent)) {
+		IPA_MHI_ERR("fail to create folder ipa_mhi\n");
+		return;
+	}
+
+	file = debugfs_create_file("stats", read_only_mode, dent,
+		0, &ipa_mhi_stats_ops);
+	if (!file || IS_ERR(file)) {
+		IPA_MHI_ERR("fail to create file stats\n");
+		goto fail;
+	}
+
+	file = debugfs_create_file("uc_stats", read_only_mode, dent,
+		0, &ipa_mhi_uc_stats_ops);
+	if (!file || IS_ERR(file)) {
+		IPA_MHI_ERR("fail to create file uc_stats\n");
+		goto fail;
+	}
+
+	file = debugfs_create_u32("use_ipadma", read_write_mode, dent,
+		&ipa_mhi_client_ctx->use_ipadma);
+	if (!file || IS_ERR(file)) {
+		IPA_MHI_ERR("fail to create file use_ipadma\n");
+		goto fail;
+	}
+
+	file = debugfs_create_file("dump_host_channel_ctx_array",
+		read_only_mode, dent, 0, &ipa_mhi_dump_host_ch_ctx_ops);
+	if (!file || IS_ERR(file)) {
+		IPA_MHI_ERR("fail to create file dump_host_channel_ctx_arr\n");
+		goto fail;
+	}
+
+	IPA_MHI_FUNC_EXIT();
+	return;
+fail:
+	debugfs_remove_recursive(dent);
+}
+
+#else
+static void ipa_mhi_debugfs_init(void) {}
+static void ipa_mhi_debugfs_destroy(void) {}
+#endif /* CONFIG_DEBUG_FS */
+
+static union IpaHwMhiDlUlSyncCmdData_t ipa_cached_dl_ul_sync_info;
+
+static void ipa_mhi_wq_notify_wakeup(struct work_struct *work);
+static DECLARE_WORK(ipa_mhi_notify_wakeup_work, ipa_mhi_wq_notify_wakeup);
+
+static void ipa_mhi_wq_notify_ready(struct work_struct *work);
+static DECLARE_WORK(ipa_mhi_notify_ready_work, ipa_mhi_wq_notify_ready);
+
+/**
+ * ipa_mhi_notify_wakeup() - Schedule work to notify data available
+ *
+ * This function will schedule a work to notify data available event.
+ * In case this function is called more than once, only one notification will
+ * be sent to MHI client driver. No further notifications will be sent until
+ * IPA MHI state will become STARTED.
+ */
+static void ipa_mhi_notify_wakeup(void)
+{
+	IPA_MHI_FUNC_ENTRY();
+	if (ipa_mhi_client_ctx->wakeup_notified) {
+		IPA_MHI_DBG("wakeup already called\n");
+		return;
+	}
+	queue_work(ipa_mhi_client_ctx->wq, &ipa_mhi_notify_wakeup_work);
+	ipa_mhi_client_ctx->wakeup_notified = true;
+	IPA_MHI_FUNC_EXIT();
+}
+
+/**
+ * ipa_mhi_wq_notify_wakeup() - Notify MHI client on data available
+ *
+ * This function is called from IPA MHI workqueue to notify
+ * MHI client driver on data available event.
+ */
+static void ipa_mhi_wq_notify_wakeup(struct work_struct *work)
+{
+	IPA_MHI_FUNC_ENTRY();
+	ipa_mhi_client_ctx->cb_notify(ipa_mhi_client_ctx->cb_priv,
+		IPA_MHI_EVENT_DATA_AVAILABLE, 0);
+	IPA_MHI_FUNC_EXIT();
+}
+
+/**
+ * ipa_mhi_wq_notify_ready() - Notify MHI client on ready
+ *
+ * This function is called from IPA MHI workqueue to notify
+ * MHI client driver on ready event when IPA uC is loaded
+ */
+static void ipa_mhi_wq_notify_ready(struct work_struct *work)
+{
+	IPA_MHI_FUNC_ENTRY();
+	ipa_mhi_client_ctx->cb_notify(ipa_mhi_client_ctx->cb_priv,
+		IPA_MHI_EVENT_READY, 0);
+	IPA_MHI_FUNC_EXIT();
+}
+
+/**
+ * ipa_mhi_notify_ready() - Schedule work to notify ready
+ *
+ * This function will schedule a work to notify ready event.
+ */
+static void ipa_mhi_notify_ready(void)
+{
+	IPA_MHI_FUNC_ENTRY();
+	queue_work(ipa_mhi_client_ctx->wq, &ipa_mhi_notify_ready_work);
+	IPA_MHI_FUNC_EXIT();
+}
+
+/**
+ * ipa_mhi_set_state() - Set new state to IPA MHI
+ * @state: new state
+ *
+ * Sets a new state to IPA MHI if possible according to IPA MHI state machine.
+ * In some state transitions a wakeup request will be triggered.
+ *
+ * Returns: 0 on success, -1 otherwise
+ */
+static int ipa_mhi_set_state(enum ipa_mhi_state new_state)
+{
+	unsigned long flags;
+	int res = -EPERM;
+
+	spin_lock_irqsave(&ipa_mhi_client_ctx->state_lock, flags);
+	IPA_MHI_DBG("Current state: %s\n",
+			MHI_STATE_STR(ipa_mhi_client_ctx->state));
+
+	switch (ipa_mhi_client_ctx->state) {
+	case IPA_MHI_STATE_INITIALIZED:
+		if (new_state == IPA_MHI_STATE_READY) {
+			ipa_mhi_notify_ready();
+			res = 0;
+		}
+		break;
+
+	case IPA_MHI_STATE_READY:
+		if (new_state == IPA_MHI_STATE_READY)
+			res = 0;
+		if (new_state == IPA_MHI_STATE_STARTED)
+			res = 0;
+		break;
+
+	case IPA_MHI_STATE_STARTED:
+		if (new_state == IPA_MHI_STATE_INITIALIZED)
+			res = 0;
+		else if (new_state == IPA_MHI_STATE_SUSPEND_IN_PROGRESS)
+			res = 0;
+		break;
+
+	case IPA_MHI_STATE_SUSPEND_IN_PROGRESS:
+		if (new_state == IPA_MHI_STATE_SUSPENDED) {
+			if (ipa_mhi_client_ctx->trigger_wakeup) {
+				ipa_mhi_client_ctx->trigger_wakeup = false;
+				ipa_mhi_notify_wakeup();
+			}
+			res = 0;
+		} else if (new_state == IPA_MHI_STATE_STARTED) {
+			ipa_mhi_client_ctx->wakeup_notified = false;
+			ipa_mhi_client_ctx->trigger_wakeup = false;
+			res = 0;
+		}
+		break;
+
+	case IPA_MHI_STATE_SUSPENDED:
+		if (new_state == IPA_MHI_STATE_RESUME_IN_PROGRESS)
+			res = 0;
+		break;
+
+	case IPA_MHI_STATE_RESUME_IN_PROGRESS:
+		if (new_state == IPA_MHI_STATE_SUSPENDED) {
+			if (ipa_mhi_client_ctx->trigger_wakeup) {
+				ipa_mhi_client_ctx->trigger_wakeup = false;
+				ipa_mhi_notify_wakeup();
+			}
+			res = 0;
+		} else if (new_state == IPA_MHI_STATE_STARTED) {
+			ipa_mhi_client_ctx->trigger_wakeup = false;
+			ipa_mhi_client_ctx->wakeup_notified = false;
+			res = 0;
+		}
+		break;
+
+	default:
+		IPA_MHI_ERR("Invalid state %d\n", ipa_mhi_client_ctx->state);
+		WARN_ON(1);
+	}
+
+	if (res)
+		IPA_MHI_ERR("Invalid state change to %s\n",
+						MHI_STATE_STR(new_state));
+	else {
+		IPA_MHI_DBG("New state change to %s\n",
+						MHI_STATE_STR(new_state));
+		ipa_mhi_client_ctx->state = new_state;
+	}
+	spin_unlock_irqrestore(&ipa_mhi_client_ctx->state_lock, flags);
+	return res;
+}
+
+static void ipa_mhi_uc_ready_cb(void)
+{
+	IPA_MHI_FUNC_ENTRY();
+	ipa_mhi_set_state(IPA_MHI_STATE_READY);
+	IPA_MHI_FUNC_EXIT();
+}
+
+static void ipa_mhi_uc_wakeup_request_cb(void)
+{
+	unsigned long flags;
+
+	IPA_MHI_FUNC_ENTRY();
+	IPA_MHI_DBG("MHI state: %s\n",
+			MHI_STATE_STR(ipa_mhi_client_ctx->state));
+	spin_lock_irqsave(&ipa_mhi_client_ctx->state_lock, flags);
+	if (ipa_mhi_client_ctx->state == IPA_MHI_STATE_SUSPENDED)
+		ipa_mhi_notify_wakeup();
+	else if (ipa_mhi_client_ctx->state ==
+			IPA_MHI_STATE_SUSPEND_IN_PROGRESS)
+		/* wakeup event will be triggered after suspend finishes */
+		ipa_mhi_client_ctx->trigger_wakeup = true;
+
+	spin_unlock_irqrestore(&ipa_mhi_client_ctx->state_lock, flags);
+	IPA_MHI_FUNC_EXIT();
+}
+
+/**
+ * ipa_mhi_start() - Start IPA MHI engine
+ * @params: pcie addresses for MHI
+ *
+ * This function is called by MHI client driver on MHI engine start for
+ * handling MHI accelerated channels. This function is called after
+ * ipa_mhi_init() was called and can be called after MHI reset to restart MHI
+ * engine. When this function returns device can move to M0 state.
+ *
+ * Return codes: 0	  : success
+ *		 negative : error
+ */
+int ipa_mhi_start(struct ipa_mhi_start_params *params)
+{
+	int res;
+	struct ipa_mhi_init_engine init_params;
+
+	IPA_MHI_FUNC_ENTRY();
+
+	if (!params) {
+		IPA_MHI_ERR("null args\n");
+		return -EINVAL;
+	}
+
+	if (!ipa_mhi_client_ctx) {
+		IPA_MHI_ERR("not initialized\n");
+		return -EPERM;
+	}
+
+	res = ipa_mhi_set_state(IPA_MHI_STATE_STARTED);
+	if (res) {
+		IPA_MHI_ERR("ipa_mhi_set_state %d\n", res);
+		return res;
+	}
+
+	ipa_mhi_client_ctx->host_ctrl_addr = params->host_ctrl_addr;
+	ipa_mhi_client_ctx->host_data_addr = params->host_data_addr;
+	ipa_mhi_client_ctx->channel_context_array_addr =
+		params->channel_context_array_addr;
+	ipa_mhi_client_ctx->event_context_array_addr =
+		params->event_context_array_addr;
+	IPA_MHI_DBG("host_ctrl_addr 0x%x\n",
+			ipa_mhi_client_ctx->host_ctrl_addr);
+	IPA_MHI_DBG("host_data_addr 0x%x\n",
+			ipa_mhi_client_ctx->host_data_addr);
+	IPA_MHI_DBG("channel_context_array_addr 0x%llx\n",
+		ipa_mhi_client_ctx->channel_context_array_addr);
+	IPA_MHI_DBG("event_context_array_addr 0x%llx\n",
+		ipa_mhi_client_ctx->event_context_array_addr);
+
+	res = ipa_pm_activate_sync(ipa_mhi_client_ctx->pm_hdl);
+	if (res) {
+		IPA_MHI_ERR("failed activate client %d\n", res);
+		goto fail_pm_activate;
+	}
+	res = ipa_pm_activate_sync(ipa_mhi_client_ctx->modem_pm_hdl);
+	if (res) {
+		IPA_MHI_ERR("failed activate modem client %d\n", res);
+		goto fail_pm_activate_modem;
+	}
+
+	/* gsi params */
+	init_params.gsi.first_ch_idx =
+			ipa_mhi_client_ctx->first_ch_idx;
+	/* uC params */
+	init_params.uC.first_ch_idx =
+			ipa_mhi_client_ctx->first_ch_idx;
+	init_params.uC.first_er_idx =
+			ipa_mhi_client_ctx->first_er_idx;
+	init_params.uC.host_ctrl_addr = params->host_ctrl_addr;
+	init_params.uC.host_data_addr = params->host_data_addr;
+	init_params.uC.mmio_addr = ipa_mhi_client_ctx->mmio_addr;
+	init_params.uC.msi = &ipa_mhi_client_ctx->msi;
+	init_params.uC.ipa_cached_dl_ul_sync_info =
+			&ipa_cached_dl_ul_sync_info;
+
+	res = ipa_mhi_init_engine(&init_params);
+	if (res) {
+		IPA_MHI_ERR("IPA core failed to start MHI %d\n", res);
+		goto fail_init_engine;
+	}
+
+	IPA_MHI_FUNC_EXIT();
+	return 0;
+
+fail_init_engine:
+	ipa_pm_deactivate_sync(ipa_mhi_client_ctx->modem_pm_hdl);
+fail_pm_activate_modem:
+	ipa_pm_deactivate_sync(ipa_mhi_client_ctx->pm_hdl);
+fail_pm_activate:
+	ipa_mhi_set_state(IPA_MHI_STATE_INITIALIZED);
+	return res;
+}
+
+/**
+ * ipa_mhi_get_channel_context() - Get corresponding channel context
+ * @ep: IPA ep
+ * @channel_id: Channel ID
+ *
+ * This function will return the corresponding channel context or allocate new
+ * one in case channel context for channel does not exist.
+ */
+static struct ipa_mhi_channel_ctx *ipa_mhi_get_channel_context(
+	enum ipa_client_type client, u8 channel_id)
+{
+	int ch_idx;
+	struct ipa_mhi_channel_ctx *channels;
+	int max_channels;
+
+	if (IPA_CLIENT_IS_PROD(client)) {
+		channels = ipa_mhi_client_ctx->ul_channels;
+		max_channels = IPA_MHI_MAX_UL_CHANNELS;
+	} else {
+		channels = ipa_mhi_client_ctx->dl_channels;
+		max_channels = IPA_MHI_MAX_DL_CHANNELS;
+	}
+
+	/* find the channel context according to channel id */
+	for (ch_idx = 0; ch_idx < max_channels; ch_idx++) {
+		if (channels[ch_idx].valid &&
+		    channels[ch_idx].id == channel_id)
+			return &channels[ch_idx];
+	}
+
+	/* channel context does not exists, allocate a new one */
+	for (ch_idx = 0; ch_idx < max_channels; ch_idx++) {
+		if (!channels[ch_idx].valid)
+			break;
+	}
+
+	if (ch_idx == max_channels) {
+		IPA_MHI_ERR("no more channels available\n");
+		return NULL;
+	}
+
+	channels[ch_idx].valid = true;
+	channels[ch_idx].id = channel_id;
+	channels[ch_idx].index = ipa_mhi_client_ctx->total_channels++;
+	channels[ch_idx].client = client;
+	channels[ch_idx].state = IPA_HW_MHI_CHANNEL_STATE_INVALID;
+
+	return &channels[ch_idx];
+}
+
+/**
+ * ipa_mhi_get_channel_context_by_clnt_hdl() - Get corresponding channel
+ * context
+ * @clnt_hdl: client handle as provided in ipa_mhi_connect_pipe()
+ *
+ * This function will return the corresponding channel context or NULL in case
+ * that channel does not exist.
+ */
+static struct ipa_mhi_channel_ctx *ipa_mhi_get_channel_context_by_clnt_hdl(
+	u32 clnt_hdl)
+{
+	int ch_idx;
+
+	for (ch_idx = 0; ch_idx < IPA_MHI_MAX_UL_CHANNELS; ch_idx++) {
+		if (ipa_mhi_client_ctx->ul_channels[ch_idx].valid &&
+		ipa_get_ep_mapping(
+			ipa_mhi_client_ctx->ul_channels[ch_idx].client)
+				== clnt_hdl)
+			return &ipa_mhi_client_ctx->ul_channels[ch_idx];
+	}
+
+	for (ch_idx = 0; ch_idx < IPA_MHI_MAX_DL_CHANNELS; ch_idx++) {
+		if (ipa_mhi_client_ctx->dl_channels[ch_idx].valid &&
+		ipa_get_ep_mapping(
+			ipa_mhi_client_ctx->dl_channels[ch_idx].client)
+				== clnt_hdl)
+			return &ipa_mhi_client_ctx->dl_channels[ch_idx];
+	}
+
+	return NULL;
+}
+
+static void ipa_mhi_dump_ch_ctx(struct ipa_mhi_channel_ctx *channel)
+{
+	IPA_MHI_DBG("ch_id %d\n", channel->id);
+	IPA_MHI_DBG("chstate 0x%x\n", channel->ch_ctx_host.chstate);
+	IPA_MHI_DBG("brstmode 0x%x\n", channel->ch_ctx_host.brstmode);
+	IPA_MHI_DBG("pollcfg 0x%x\n", channel->ch_ctx_host.pollcfg);
+	IPA_MHI_DBG("chtype 0x%x\n", channel->ch_ctx_host.chtype);
+	IPA_MHI_DBG("erindex 0x%x\n", channel->ch_ctx_host.erindex);
+	IPA_MHI_DBG("rbase 0x%llx\n", channel->ch_ctx_host.rbase);
+	IPA_MHI_DBG("rlen 0x%llx\n", channel->ch_ctx_host.rlen);
+	IPA_MHI_DBG("rp 0x%llx\n", channel->ch_ctx_host.rp);
+	IPA_MHI_DBG("wp 0x%llx\n", channel->ch_ctx_host.wp);
+}
+
+static void ipa_mhi_dump_ev_ctx(struct ipa_mhi_channel_ctx *channel)
+{
+	IPA_MHI_DBG("ch_id %d event id %d\n", channel->id,
+		channel->ch_ctx_host.erindex);
+
+	IPA_MHI_DBG("intmodc 0x%x\n", channel->ev_ctx_host.intmodc);
+	IPA_MHI_DBG("intmodt 0x%x\n", channel->ev_ctx_host.intmodt);
+	IPA_MHI_DBG("ertype 0x%x\n", channel->ev_ctx_host.ertype);
+	IPA_MHI_DBG("msivec 0x%x\n", channel->ev_ctx_host.msivec);
+	IPA_MHI_DBG("rbase 0x%llx\n", channel->ev_ctx_host.rbase);
+	IPA_MHI_DBG("rlen 0x%llx\n", channel->ev_ctx_host.rlen);
+	IPA_MHI_DBG("rp 0x%llx\n", channel->ev_ctx_host.rp);
+	IPA_MHI_DBG("wp 0x%llx\n", channel->ev_ctx_host.wp);
+}
+
+static int ipa_mhi_read_ch_ctx(struct ipa_mhi_channel_ctx *channel)
+{
+	int res;
+
+	res = ipa_mhi_read_write_host(IPA_MHI_DMA_FROM_HOST,
+		&channel->ch_ctx_host, channel->channel_context_addr,
+		sizeof(channel->ch_ctx_host));
+	if (res) {
+		IPA_MHI_ERR("ipa_mhi_read_write_host failed %d\n", res);
+		return res;
+
+	}
+	ipa_mhi_dump_ch_ctx(channel);
+
+	channel->event_context_addr =
+		ipa_mhi_client_ctx->event_context_array_addr +
+		channel->ch_ctx_host.erindex * sizeof(struct ipa_mhi_ev_ctx);
+	IPA_MHI_DBG("ch %d event_context_addr 0x%llx\n", channel->id,
+		channel->event_context_addr);
+
+	res = ipa_mhi_read_write_host(IPA_MHI_DMA_FROM_HOST,
+		&channel->ev_ctx_host, channel->event_context_addr,
+		sizeof(channel->ev_ctx_host));
+	if (res) {
+		IPA_MHI_ERR("ipa_mhi_read_write_host failed %d\n", res);
+		return res;
+
+	}
+	ipa_mhi_dump_ev_ctx(channel);
+
+	return 0;
+}
+
+static void ipa_mhi_gsi_ev_err_cb(struct gsi_evt_err_notify *notify)
+{
+	struct ipa_mhi_channel_ctx *channel = notify->user_data;
+
+	IPA_MHI_ERR("channel id=%d client=%d state=%d\n",
+		channel->id, channel->client, channel->state);
+	switch (notify->evt_id) {
+	case GSI_EVT_OUT_OF_BUFFERS_ERR:
+		IPA_MHI_ERR("Received GSI_EVT_OUT_OF_BUFFERS_ERR\n");
+		break;
+	case GSI_EVT_OUT_OF_RESOURCES_ERR:
+		IPA_MHI_ERR("Received GSI_EVT_OUT_OF_RESOURCES_ERR\n");
+		break;
+	case GSI_EVT_UNSUPPORTED_INTER_EE_OP_ERR:
+		IPA_MHI_ERR("Received GSI_EVT_UNSUPPORTED_INTER_EE_OP_ERR\n");
+		break;
+	case GSI_EVT_EVT_RING_EMPTY_ERR:
+		IPA_MHI_ERR("Received GSI_EVT_EVT_RING_EMPTY_ERR\n");
+		break;
+	default:
+		IPA_MHI_ERR("Unexpected err evt: %d\n", notify->evt_id);
+	}
+	IPA_MHI_ERR("err_desc=0x%x\n", notify->err_desc);
+	ipa_assert();
+}
+
+static void ipa_mhi_gsi_ch_err_cb(struct gsi_chan_err_notify *notify)
+{
+	struct ipa_mhi_channel_ctx *channel = notify->chan_user_data;
+
+	IPA_MHI_ERR("channel id=%d client=%d state=%d\n",
+		channel->id, channel->client, channel->state);
+	switch (notify->evt_id) {
+	case GSI_CHAN_INVALID_TRE_ERR:
+		IPA_MHI_ERR("Received GSI_CHAN_INVALID_TRE_ERR\n");
+		break;
+	case GSI_CHAN_NON_ALLOCATED_EVT_ACCESS_ERR:
+		IPA_MHI_ERR("Received GSI_CHAN_NON_ALLOCATED_EVT_ACCESS_ERR\n");
+		break;
+	case GSI_CHAN_OUT_OF_BUFFERS_ERR:
+		IPA_MHI_ERR("Received GSI_CHAN_OUT_OF_BUFFERS_ERR\n");
+		break;
+	case GSI_CHAN_OUT_OF_RESOURCES_ERR:
+		IPA_MHI_ERR("Received GSI_CHAN_OUT_OF_RESOURCES_ERR\n");
+		break;
+	case GSI_CHAN_UNSUPPORTED_INTER_EE_OP_ERR:
+		IPA_MHI_ERR("Received GSI_CHAN_UNSUPPORTED_INTER_EE_OP_ERR\n");
+		break;
+	case GSI_CHAN_HWO_1_ERR:
+		IPA_MHI_ERR("Received GSI_CHAN_HWO_1_ERR\n");
+		break;
+	default:
+		IPA_MHI_ERR("Unexpected err evt: %d\n", notify->evt_id);
+	}
+	IPA_MHI_ERR("err_desc=0x%x\n", notify->err_desc);
+	ipa_assert();
+}
+
+
+static bool ipa_mhi_gsi_channel_empty(struct ipa_mhi_channel_ctx *channel)
+{
+	IPA_MHI_FUNC_ENTRY();
+
+	if (!channel->stop_in_proc) {
+		IPA_MHI_DBG("Channel is not in STOP_IN_PROC\n");
+		return true;
+	}
+
+	if (ipa_mhi_stop_gsi_channel(channel->client)) {
+		channel->stop_in_proc = false;
+		return true;
+	}
+
+	return false;
+}
+
+/**
+ * ipa_mhi_wait_for_ul_empty_timeout() - wait for pending packets in uplink
+ * @msecs: timeout to wait
+ *
+ * This function will poll until there are no packets pending in uplink channels
+ * or timeout occurred.
+ *
+ * Return code: true - no pending packets in uplink channels
+ *		false - timeout occurred
+ */
+static bool ipa_mhi_wait_for_ul_empty_timeout(unsigned int msecs)
+{
+	unsigned long jiffies_timeout = msecs_to_jiffies(msecs);
+	unsigned long jiffies_start = jiffies;
+	bool empty = false;
+	int i;
+
+	IPA_MHI_FUNC_ENTRY();
+	while (!empty) {
+		empty = true;
+		for (i = 0; i < IPA_MHI_MAX_UL_CHANNELS; i++) {
+			if (!ipa_mhi_client_ctx->ul_channels[i].valid)
+				continue;
+			if (ipa_get_transport_type() ==
+			    IPA_TRANSPORT_TYPE_GSI)
+				empty &= ipa_mhi_gsi_channel_empty(
+					&ipa_mhi_client_ctx->ul_channels[i]);
+			else
+				empty &= ipa_mhi_sps_channel_empty(
+				ipa_mhi_client_ctx->ul_channels[i].client);
+		}
+
+		if (time_after(jiffies, jiffies_start + jiffies_timeout)) {
+			IPA_MHI_DBG("finished waiting for UL empty\n");
+			break;
+		}
+
+		if (ipa_get_transport_type() == IPA_TRANSPORT_TYPE_GSI &&
+		    IPA_MHI_MAX_UL_CHANNELS == 1)
+			usleep_range(IPA_GSI_CHANNEL_STOP_SLEEP_MIN_USEC,
+			IPA_GSI_CHANNEL_STOP_SLEEP_MAX_USEC);
+	}
+
+	IPA_MHI_DBG("IPA UL is %s\n", (empty) ? "empty" : "not empty");
+
+	IPA_MHI_FUNC_EXIT();
+	return empty;
+}
+
+static int ipa_mhi_enable_force_clear(u32 request_id, bool throttle_source)
+{
+	struct ipa_enable_force_clear_datapath_req_msg_v01 req;
+	int i;
+	int res;
+
+	IPA_MHI_FUNC_ENTRY();
+	memset(&req, 0, sizeof(req));
+	req.request_id = request_id;
+	req.source_pipe_bitmask = 0;
+	for (i = 0; i < IPA_MHI_MAX_UL_CHANNELS; i++) {
+		if (!ipa_mhi_client_ctx->ul_channels[i].valid)
+			continue;
+		req.source_pipe_bitmask |= 1 << ipa_get_ep_mapping(
+				ipa_mhi_client_ctx->ul_channels[i].client);
+	}
+	if (throttle_source) {
+		req.throttle_source_valid = 1;
+		req.throttle_source = 1;
+	}
+	IPA_MHI_DBG("req_id=0x%x src_pipe_btmk=0x%x throt_src=%d\n",
+		req.request_id, req.source_pipe_bitmask,
+		req.throttle_source);
+	res = ipa_qmi_enable_force_clear_datapath_send(&req);
+	if (res) {
+		IPA_MHI_ERR(
+			"ipa_qmi_enable_force_clear_datapath_send failed %d\n"
+				, res);
+		return res;
+	}
+
+	IPA_MHI_FUNC_EXIT();
+	return 0;
+}
+
+static int ipa_mhi_disable_force_clear(u32 request_id)
+{
+	struct ipa_disable_force_clear_datapath_req_msg_v01 req;
+	int res;
+
+	IPA_MHI_FUNC_ENTRY();
+	memset(&req, 0, sizeof(req));
+	req.request_id = request_id;
+	IPA_MHI_DBG("req_id=0x%x\n", req.request_id);
+	res = ipa_qmi_disable_force_clear_datapath_send(&req);
+	if (res) {
+		IPA_MHI_ERR(
+			"ipa_qmi_disable_force_clear_datapath_send failed %d\n"
+				, res);
+		return res;
+	}
+
+	IPA_MHI_FUNC_EXIT();
+	return 0;
+}
+
+static void ipa_mhi_set_holb_on_dl_channels(bool enable,
+	struct ipa_ep_cfg_holb old_holb[])
+{
+	int i;
+	struct ipa_ep_cfg_holb ep_holb;
+	int ep_idx;
+	int res;
+
+	for (i = 0; i < IPA_MHI_MAX_DL_CHANNELS; i++) {
+		if (!ipa_mhi_client_ctx->dl_channels[i].valid)
+			continue;
+		if (ipa_mhi_client_ctx->dl_channels[i].state ==
+			IPA_HW_MHI_CHANNEL_STATE_INVALID)
+			continue;
+		ep_idx = ipa_get_ep_mapping(
+			ipa_mhi_client_ctx->dl_channels[i].client);
+		if (-1 == ep_idx) {
+			IPA_MHI_ERR("Client %u is not mapped\n",
+				ipa_mhi_client_ctx->dl_channels[i].client);
+			ipa_assert();
+			return;
+		}
+		memset(&ep_holb, 0, sizeof(ep_holb));
+		if (enable) {
+			ipa_get_holb(ep_idx, &old_holb[i]);
+			ep_holb.en = 1;
+			ep_holb.tmr_val = 0;
+		} else {
+			ep_holb = old_holb[i];
+		}
+		res = ipa_cfg_ep_holb(ep_idx, &ep_holb);
+		if (res) {
+			IPA_MHI_ERR("ipa_cfg_ep_holb failed %d\n", res);
+			ipa_assert();
+			return;
+		}
+	}
+}
+
+static int ipa_mhi_suspend_gsi_channel(struct ipa_mhi_channel_ctx *channel)
+{
+	int clnt_hdl;
+	int res;
+
+	IPA_MHI_FUNC_ENTRY();
+	clnt_hdl = ipa_get_ep_mapping(channel->client);
+	if (clnt_hdl < 0)
+		return -EFAULT;
+
+	res = ipa_stop_gsi_channel(clnt_hdl);
+	if (res != 0 && res != -GSI_STATUS_AGAIN &&
+	    res != -GSI_STATUS_TIMED_OUT) {
+		IPA_MHI_ERR("GSI stop channel failed %d\n", res);
+		return -EFAULT;
+	}
+
+	/* check if channel was stopped completely */
+	if (res)
+		channel->stop_in_proc = true;
+
+	IPA_MHI_DBG("GSI channel is %s\n", (channel->stop_in_proc) ?
+		"STOP_IN_PROC" : "STOP");
+
+	IPA_MHI_FUNC_EXIT();
+	return 0;
+}
+
+static int ipa_mhi_reset_ul_channel(struct ipa_mhi_channel_ctx *channel)
+{
+	int res;
+	bool empty;
+	struct ipa_ep_cfg_holb old_ep_holb[IPA_MHI_MAX_DL_CHANNELS];
+
+	IPA_MHI_FUNC_ENTRY();
+	if (ipa_get_transport_type() == IPA_TRANSPORT_TYPE_GSI) {
+		res = ipa_mhi_suspend_gsi_channel(channel);
+		if (res) {
+			IPA_MHI_ERR("ipa_mhi_suspend_gsi_channel failed %d\n",
+				 res);
+			return res;
+		}
+	} else {
+		res = ipa_uc_mhi_reset_channel(channel->index);
+		if (res) {
+			IPA_MHI_ERR("ipa_uc_mhi_reset_channel failed %d\n",
+				res);
+			return res;
+		}
+	}
+
+	empty = ipa_mhi_wait_for_ul_empty_timeout(
+			IPA_MHI_CH_EMPTY_TIMEOUT_MSEC);
+	if (!empty) {
+		IPA_MHI_DBG("%s not empty\n",
+			(ipa_get_transport_type() ==
+				IPA_TRANSPORT_TYPE_GSI) ? "GSI" : "BAM");
+		res = ipa_mhi_enable_force_clear(
+				ipa_mhi_client_ctx->qmi_req_id, false);
+		if (res) {
+			IPA_MHI_ERR("ipa_mhi_enable_force_clear failed %d\n",
+				res);
+			ipa_assert();
+			return res;
+		}
+
+		if (ipa_get_transport_type() == IPA_TRANSPORT_TYPE_GSI) {
+			empty = ipa_mhi_wait_for_ul_empty_timeout(
+				IPA_MHI_CH_EMPTY_TIMEOUT_MSEC);
+
+			IPA_MHI_DBG("empty=%d\n", empty);
+		} else {
+			/* enable packet drop on all DL channels */
+			ipa_mhi_set_holb_on_dl_channels(true, old_ep_holb);
+			ipa_generate_tag_process();
+			/* disable packet drop on all DL channels */
+			ipa_mhi_set_holb_on_dl_channels(false, old_ep_holb);
+
+			res = ipa_disable_sps_pipe(channel->client);
+			if (res) {
+				IPA_MHI_ERR("sps_pipe_disable fail %d\n", res);
+				ipa_assert();
+				return res;
+			}
+		}
+
+		res =
+		ipa_mhi_disable_force_clear(ipa_mhi_client_ctx->qmi_req_id);
+		if (res) {
+			IPA_MHI_ERR("ipa_mhi_disable_force_clear failed %d\n",
+				res);
+			ipa_assert();
+			return res;
+		}
+		ipa_mhi_client_ctx->qmi_req_id++;
+	}
+
+	res = ipa_mhi_reset_channel_internal(channel->client);
+	if (res) {
+		IPA_MHI_ERR("ipa_mhi_reset_ul_channel_internal failed %d\n"
+				, res);
+		return res;
+	}
+
+	IPA_MHI_FUNC_EXIT();
+
+	return 0;
+}
+
+static int ipa_mhi_reset_dl_channel(struct ipa_mhi_channel_ctx *channel)
+{
+	int res;
+
+	IPA_MHI_FUNC_ENTRY();
+	if (ipa_get_transport_type() == IPA_TRANSPORT_TYPE_GSI) {
+		res = ipa_mhi_suspend_gsi_channel(channel);
+		if (res) {
+			IPA_MHI_ERR("ipa_mhi_suspend_gsi_channel failed %d\n"
+					, res);
+			return res;
+		}
+
+		res = ipa_mhi_reset_channel_internal(channel->client);
+		if (res) {
+			IPA_MHI_ERR(
+				"ipa_mhi_reset_ul_channel_internal failed %d\n"
+				, res);
+			return res;
+		}
+	} else {
+		res = ipa_mhi_reset_channel_internal(channel->client);
+		if (res) {
+			IPA_MHI_ERR(
+				"ipa_mhi_reset_ul_channel_internal failed %d\n"
+				, res);
+			return res;
+		}
+
+		res = ipa_uc_mhi_reset_channel(channel->index);
+		if (res) {
+			IPA_MHI_ERR("ipa_uc_mhi_reset_channel failed %d\n",
+				res);
+			ipa_mhi_start_channel_internal(channel->client);
+			return res;
+		}
+	}
+
+	IPA_MHI_FUNC_EXIT();
+	return 0;
+}
+
+static int ipa_mhi_reset_channel(struct ipa_mhi_channel_ctx *channel,
+				 bool update_state)
+{
+	int res;
+
+	IPA_MHI_FUNC_ENTRY();
+	if (IPA_CLIENT_IS_PROD(channel->client))
+		res = ipa_mhi_reset_ul_channel(channel);
+	else
+		res = ipa_mhi_reset_dl_channel(channel);
+	if (res) {
+		IPA_MHI_ERR("failed to reset channel error %d\n", res);
+		return res;
+	}
+
+	channel->state = IPA_HW_MHI_CHANNEL_STATE_DISABLE;
+
+	if ((ipa_get_transport_type() == IPA_TRANSPORT_TYPE_GSI) &&
+		update_state) {
+		res = ipa_mhi_read_write_host(IPA_MHI_DMA_TO_HOST,
+			&channel->state, channel->channel_context_addr +
+				offsetof(struct ipa_mhi_ch_ctx, chstate),
+				sizeof(((struct ipa_mhi_ch_ctx *)0)->chstate));
+		if (res) {
+			IPA_MHI_ERR("ipa_mhi_read_write_host failed %d\n", res);
+			return res;
+		}
+	}
+
+	IPA_MHI_FUNC_EXIT();
+	return 0;
+}
+
+/**
+ * ipa_mhi_connect_pipe() - Connect pipe to IPA and start corresponding
+ * MHI channel
+ * @in: connect parameters
+ * @clnt_hdl: [out] client handle for this pipe
+ *
+ * This function is called by MHI client driver on MHI channel start.
+ * This function is called after MHI engine was started.
+ *
+ * Return codes: 0	  : success
+ *		 negative : error
+ */
+int ipa_mhi_connect_pipe(struct ipa_mhi_connect_params *in, u32 *clnt_hdl)
+{
+	int res;
+	unsigned long flags;
+	struct ipa_mhi_channel_ctx *channel = NULL;
+
+	IPA_MHI_FUNC_ENTRY();
+
+	if (!in || !clnt_hdl) {
+		IPA_MHI_ERR("NULL args\n");
+		return -EINVAL;
+	}
+
+	if (in->sys.client >= IPA_CLIENT_MAX) {
+		IPA_MHI_ERR("bad param client:%d\n", in->sys.client);
+		return -EINVAL;
+	}
+
+	if (!IPA_CLIENT_IS_MHI(in->sys.client)) {
+		IPA_MHI_ERR(
+			"Invalid MHI client, client: %d\n", in->sys.client);
+		return -EINVAL;
+	}
+
+	IPA_MHI_DBG("channel=%d\n", in->channel_id);
+
+	spin_lock_irqsave(&ipa_mhi_client_ctx->state_lock, flags);
+	if (!ipa_mhi_client_ctx ||
+			ipa_mhi_client_ctx->state != IPA_MHI_STATE_STARTED) {
+		IPA_MHI_ERR("IPA MHI was not started\n");
+		spin_unlock_irqrestore(&ipa_mhi_client_ctx->state_lock, flags);
+		return -EINVAL;
+	}
+	spin_unlock_irqrestore(&ipa_mhi_client_ctx->state_lock, flags);
+
+	channel = ipa_mhi_get_channel_context(in->sys.client, in->channel_id);
+	if (!channel) {
+		IPA_MHI_ERR("ipa_mhi_get_channel_context failed\n");
+		return -EINVAL;
+	}
+
+	if (channel->state != IPA_HW_MHI_CHANNEL_STATE_INVALID &&
+	    channel->state != IPA_HW_MHI_CHANNEL_STATE_DISABLE) {
+		IPA_MHI_ERR("Invalid channel state %d\n", channel->state);
+		return -EFAULT;
+	}
+
+	channel->channel_context_addr =
+		ipa_mhi_client_ctx->channel_context_array_addr +
+			channel->id * sizeof(struct ipa_mhi_ch_ctx);
+
+	/* for event context address index needs to read from host */
+
+	IPA_MHI_DBG("client %d channelIndex %d channelID %d, state %d\n",
+		channel->client, channel->index, channel->id, channel->state);
+	IPA_MHI_DBG("channel_context_addr 0x%llx cached_gsi_evt_ring_hdl %lu\n",
+		channel->channel_context_addr,
+		channel->cached_gsi_evt_ring_hdl);
+
+	IPA_ACTIVE_CLIENTS_INC_EP(in->sys.client);
+
+	mutex_lock(&mhi_client_general_mutex);
+	if (ipa_get_transport_type() == IPA_TRANSPORT_TYPE_GSI) {
+		struct ipa_mhi_connect_params_internal internal;
+
+		IPA_MHI_DBG("reading ch/ev context from host\n");
+		res = ipa_mhi_read_ch_ctx(channel);
+		if (res) {
+			IPA_MHI_ERR("ipa_mhi_read_ch_ctx failed %d\n", res);
+			goto fail_start_channel;
+		}
+
+		internal.channel_id = in->channel_id;
+		internal.sys = &in->sys;
+		internal.start.gsi.state = channel->state;
+		internal.start.gsi.msi = &ipa_mhi_client_ctx->msi;
+		internal.start.gsi.ev_ctx_host = &channel->ev_ctx_host;
+		internal.start.gsi.event_context_addr =
+				channel->event_context_addr;
+		internal.start.gsi.ch_ctx_host = &channel->ch_ctx_host;
+		internal.start.gsi.channel_context_addr =
+				channel->channel_context_addr;
+		internal.start.gsi.ch_err_cb = ipa_mhi_gsi_ch_err_cb;
+		internal.start.gsi.channel = (void *)channel;
+		internal.start.gsi.ev_err_cb = ipa_mhi_gsi_ev_err_cb;
+		internal.start.gsi.assert_bit40 =
+				ipa_mhi_client_ctx->assert_bit40;
+		internal.start.gsi.mhi = &channel->ch_scratch.mhi;
+		internal.start.gsi.cached_gsi_evt_ring_hdl =
+				&channel->cached_gsi_evt_ring_hdl;
+		internal.start.gsi.evchid = channel->index;
+
+		res = ipa_connect_mhi_pipe(&internal, clnt_hdl);
+		if (res) {
+			IPA_MHI_ERR("ipa_connect_mhi_pipe failed %d\n", res);
+			goto fail_connect_pipe;
+		}
+		channel->state = IPA_HW_MHI_CHANNEL_STATE_RUN;
+		channel->brstmode_enabled =
+				channel->ch_scratch.mhi.burst_mode_enabled;
+
+		res = ipa_mhi_read_write_host(IPA_MHI_DMA_TO_HOST,
+			&channel->state, channel->channel_context_addr +
+				offsetof(struct ipa_mhi_ch_ctx, chstate),
+				sizeof(((struct ipa_mhi_ch_ctx *)0)->chstate));
+		if (res) {
+			IPA_MHI_ERR("ipa_mhi_read_write_host failed\n");
+			mutex_unlock(&mhi_client_general_mutex);
+			IPA_ACTIVE_CLIENTS_DEC_EP(in->sys.client);
+			return res;
+
+		}
+	} else {
+		struct ipa_mhi_connect_params_internal internal;
+
+		internal.channel_id = in->channel_id;
+		internal.sys = &in->sys;
+		internal.start.uC.index = channel->index;
+		internal.start.uC.id = channel->id;
+		internal.start.uC.state = channel->state;
+		res = ipa_connect_mhi_pipe(&internal, clnt_hdl);
+		if (res) {
+			IPA_MHI_ERR("ipa_connect_mhi_pipe failed %d\n", res);
+			goto fail_connect_pipe;
+		}
+		channel->state = IPA_HW_MHI_CHANNEL_STATE_RUN;
+	}
+	mutex_unlock(&mhi_client_general_mutex);
+
+	if (!in->sys.keep_ipa_awake)
+		IPA_ACTIVE_CLIENTS_DEC_EP(in->sys.client);
+
+	IPA_MHI_FUNC_EXIT();
+
+	return 0;
+fail_connect_pipe:
+	mutex_unlock(&mhi_client_general_mutex);
+	ipa_mhi_reset_channel(channel, true);
+fail_start_channel:
+	IPA_ACTIVE_CLIENTS_DEC_EP(in->sys.client);
+	return -EPERM;
+}
+
+/**
+ * ipa_mhi_disconnect_pipe() - Disconnect pipe from IPA and reset corresponding
+ * MHI channel
+ * @clnt_hdl: client handle for this pipe
+ *
+ * This function is called by MHI client driver on MHI channel reset.
+ * This function is called after MHI channel was started.
+ * This function is doing the following:
+ *	- Send command to uC/GSI to reset corresponding MHI channel
+ *	- Configure IPA EP control
+ *
+ * Return codes: 0	  : success
+ *		 negative : error
+ */
+int ipa_mhi_disconnect_pipe(u32 clnt_hdl)
+{
+	int res;
+	enum ipa_client_type client;
+	static struct ipa_mhi_channel_ctx *channel;
+
+	IPA_MHI_FUNC_ENTRY();
+
+	if (!ipa_mhi_client_ctx) {
+		IPA_MHI_ERR("IPA MHI was not initialized\n");
+		return -EINVAL;
+	}
+
+	client = ipa_get_client_mapping(clnt_hdl);
+
+	if (!IPA_CLIENT_IS_MHI(client)) {
+		IPA_MHI_ERR("invalid IPA MHI client, client: %d\n", client);
+		return -EINVAL;
+	}
+
+	channel = ipa_mhi_get_channel_context_by_clnt_hdl(clnt_hdl);
+	if (!channel) {
+		IPA_MHI_ERR("invalid clnt index\n");
+		return -EINVAL;
+	}
+
+	IPA_ACTIVE_CLIENTS_INC_EP(ipa_get_client_mapping(clnt_hdl));
+
+	res = ipa_mhi_reset_channel(channel, false);
+	if (res) {
+		IPA_MHI_ERR("ipa_mhi_reset_channel failed %d\n", res);
+		goto fail_reset_channel;
+	}
+
+	mutex_lock(&mhi_client_general_mutex);
+	res = ipa_disconnect_mhi_pipe(clnt_hdl);
+	if (res) {
+		IPA_MHI_ERR(
+			"IPA core driver failed to disconnect the pipe hdl %d, res %d"
+				, clnt_hdl, res);
+		goto fail_disconnect_pipe;
+	}
+	mutex_unlock(&mhi_client_general_mutex);
+
+	IPA_ACTIVE_CLIENTS_DEC_EP(ipa_get_client_mapping(clnt_hdl));
+
+	IPA_MHI_DBG("client (ep: %d) disconnected\n", clnt_hdl);
+	IPA_MHI_FUNC_EXIT();
+	return 0;
+fail_disconnect_pipe:
+	mutex_unlock(&mhi_client_general_mutex);
+fail_reset_channel:
+	IPA_ACTIVE_CLIENTS_DEC_EP(ipa_get_client_mapping(clnt_hdl));
+	return res;
+}
+
+static int ipa_mhi_suspend_channels(struct ipa_mhi_channel_ctx *channels,
+	int max_channels)
+{
+	int i;
+	int res;
+
+	IPA_MHI_FUNC_ENTRY();
+	for (i = 0; i < max_channels; i++) {
+		if (!channels[i].valid)
+			continue;
+		if (channels[i].state !=
+		    IPA_HW_MHI_CHANNEL_STATE_RUN)
+			continue;
+		IPA_MHI_DBG("suspending channel %d\n",
+			channels[i].id);
+
+		if (ipa_get_transport_type() == IPA_TRANSPORT_TYPE_GSI)
+			res = ipa_mhi_suspend_gsi_channel(
+				&channels[i]);
+		else
+			res = ipa_uc_mhi_suspend_channel(
+				channels[i].index);
+
+		if (res) {
+			IPA_MHI_ERR("failed to suspend channel %d error %d\n",
+				i, res);
+			return res;
+		}
+		channels[i].state =
+			IPA_HW_MHI_CHANNEL_STATE_SUSPEND;
+	}
+
+	IPA_MHI_FUNC_EXIT();
+	return 0;
+}
+
+static int ipa_mhi_stop_event_update_channels(
+		struct ipa_mhi_channel_ctx *channels, int max_channels)
+{
+	int i;
+	int res;
+
+	if (ipa_get_transport_type() == IPA_TRANSPORT_TYPE_GSI)
+		return 0;
+
+	IPA_MHI_FUNC_ENTRY();
+	for (i = 0; i < max_channels; i++) {
+		if (!channels[i].valid)
+			continue;
+		if (channels[i].state !=
+		    IPA_HW_MHI_CHANNEL_STATE_SUSPEND)
+			continue;
+		IPA_MHI_DBG("stop update event channel %d\n",
+			channels[i].id);
+		res = ipa_uc_mhi_stop_event_update_channel(
+			channels[i].index);
+		if (res) {
+			IPA_MHI_ERR("failed stop event channel %d error %d\n",
+				i, res);
+			return res;
+		}
+	}
+
+	IPA_MHI_FUNC_EXIT();
+	return 0;
+}
+
+static bool ipa_mhi_check_pending_packets_from_host(void)
+{
+	int i;
+	int res;
+	struct ipa_mhi_channel_ctx *channel;
+
+	IPA_MHI_FUNC_ENTRY();
+	for (i = 0; i < IPA_MHI_MAX_UL_CHANNELS; i++) {
+		channel = &ipa_mhi_client_ctx->ul_channels[i];
+		if (!channel->valid)
+			continue;
+
+		res = ipa_mhi_query_ch_info(channel->client,
+				&channel->ch_info);
+		if (res) {
+			IPA_MHI_ERR("gsi_query_channel_info failed\n");
+			return true;
+		}
+		res = ipa_mhi_read_ch_ctx(channel);
+		if (res) {
+			IPA_MHI_ERR("ipa_mhi_read_ch_ctx failed %d\n", res);
+			return true;
+		}
+
+		if (channel->ch_info.rp != channel->ch_ctx_host.wp) {
+			IPA_MHI_DBG("There are pending packets from host\n");
+			IPA_MHI_DBG("device rp 0x%llx host 0x%llx\n",
+				channel->ch_info.rp, channel->ch_ctx_host.wp);
+
+			return true;
+		}
+	}
+
+	IPA_MHI_FUNC_EXIT();
+	return false;
+}
+
+static int ipa_mhi_resume_channels(bool LPTransitionRejected,
+		struct ipa_mhi_channel_ctx *channels, int max_channels)
+{
+	int i;
+	int res;
+	struct ipa_mhi_channel_ctx *channel;
+
+	IPA_MHI_FUNC_ENTRY();
+	for (i = 0; i < max_channels; i++) {
+		if (!channels[i].valid)
+			continue;
+		if (channels[i].state !=
+		    IPA_HW_MHI_CHANNEL_STATE_SUSPEND)
+			continue;
+		channel = &channels[i];
+		IPA_MHI_DBG("resuming channel %d\n", channel->id);
+
+		res = ipa_mhi_resume_channels_internal(channel->client,
+			LPTransitionRejected, channel->brstmode_enabled,
+			channel->ch_scratch, channel->index);
+
+		if (res) {
+			IPA_MHI_ERR("failed to resume channel %d error %d\n",
+				i, res);
+			return res;
+		}
+
+		channel->stop_in_proc = false;
+		channel->state = IPA_HW_MHI_CHANNEL_STATE_RUN;
+	}
+
+	IPA_MHI_FUNC_EXIT();
+	return 0;
+}
+
+/**
+ * ipa_mhi_suspend_ul() - Suspend MHI accelerated up link channels
+ * @force:
+ *	false: in case of data pending in IPA, MHI channels will not be
+ *		suspended and function will fail.
+ *	true:  in case of data pending in IPA, make sure no further access from
+ *		IPA to PCIe is possible. In this case suspend cannot fail.
+ *
+ *
+ * This function is called by MHI client driver on MHI suspend.
+ * This function is called after MHI channel was started.
+ * When this function returns device can move to M1/M2/M3/D3cold state.
+ *
+ * Return codes: 0	  : success
+ *		 negative : error
+ */
+static int ipa_mhi_suspend_ul(bool force, bool *empty, bool *force_clear)
+{
+	int res;
+
+	*force_clear = false;
+
+	res = ipa_mhi_suspend_channels(ipa_mhi_client_ctx->ul_channels,
+		IPA_MHI_MAX_UL_CHANNELS);
+	if (res) {
+		IPA_MHI_ERR("ipa_mhi_suspend_ul_channels failed %d\n", res);
+		goto fail_suspend_ul_channel;
+	}
+
+	*empty = ipa_mhi_wait_for_ul_empty_timeout(
+			IPA_MHI_CH_EMPTY_TIMEOUT_MSEC);
+
+	if (!*empty) {
+		if (force) {
+			res = ipa_mhi_enable_force_clear(
+				ipa_mhi_client_ctx->qmi_req_id, false);
+			if (res) {
+				IPA_MHI_ERR("failed to enable force clear\n");
+				ipa_assert();
+				return res;
+			}
+			*force_clear = true;
+			IPA_MHI_DBG("force clear datapath enabled\n");
+
+			*empty = ipa_mhi_wait_for_ul_empty_timeout(
+				IPA_MHI_CH_EMPTY_TIMEOUT_MSEC);
+			IPA_MHI_DBG("empty=%d\n", *empty);
+			if (!*empty && ipa_get_transport_type()
+				== IPA_TRANSPORT_TYPE_GSI) {
+				IPA_MHI_ERR("Failed to suspend UL channels\n");
+				if (ipa_mhi_client_ctx->test_mode) {
+					res = -EAGAIN;
+					goto fail_suspend_ul_channel;
+				}
+
+				ipa_assert();
+			}
+		} else {
+			IPA_MHI_DBG("IPA not empty\n");
+			res = -EAGAIN;
+			goto fail_suspend_ul_channel;
+		}
+	}
+
+	if (*force_clear) {
+		res =
+		ipa_mhi_disable_force_clear(ipa_mhi_client_ctx->qmi_req_id);
+		if (res) {
+			IPA_MHI_ERR("failed to disable force clear\n");
+			ipa_assert();
+			return res;
+		}
+		IPA_MHI_DBG("force clear datapath disabled\n");
+		ipa_mhi_client_ctx->qmi_req_id++;
+	}
+
+	if (!force && ipa_get_transport_type() == IPA_TRANSPORT_TYPE_GSI) {
+		if (ipa_mhi_check_pending_packets_from_host()) {
+			res = -EAGAIN;
+			goto fail_suspend_ul_channel;
+		}
+	}
+
+	res = ipa_mhi_stop_event_update_channels(
+		ipa_mhi_client_ctx->ul_channels, IPA_MHI_MAX_UL_CHANNELS);
+	if (res) {
+		IPA_MHI_ERR(
+			"ipa_mhi_stop_event_update_ul_channels failed %d\n",
+			res);
+		goto fail_suspend_ul_channel;
+	}
+
+	return 0;
+
+fail_suspend_ul_channel:
+	return res;
+}
+
+static bool ipa_mhi_has_open_aggr_frame(void)
+{
+	struct ipa_mhi_channel_ctx *channel;
+	int i;
+
+	for (i = 0; i < IPA_MHI_MAX_DL_CHANNELS; i++) {
+		channel = &ipa_mhi_client_ctx->dl_channels[i];
+
+		if (!channel->valid)
+			continue;
+
+		if (ipa_has_open_aggr_frame(channel->client))
+			return true;
+	}
+
+	return false;
+}
+
+static void ipa_mhi_update_host_ch_state(bool update_rp)
+{
+	int i;
+	int res;
+	struct ipa_mhi_channel_ctx *channel;
+
+	for (i = 0; i < IPA_MHI_MAX_UL_CHANNELS; i++) {
+		channel = &ipa_mhi_client_ctx->ul_channels[i];
+		if (!channel->valid)
+			continue;
+
+		if (update_rp) {
+			res = ipa_mhi_query_ch_info(channel->client,
+				&channel->ch_info);
+			if (res) {
+				IPA_MHI_ERR("gsi_query_channel_info failed\n");
+				ipa_assert();
+				return;
+			}
+
+			res = ipa_mhi_read_write_host(IPA_MHI_DMA_TO_HOST,
+				&channel->ch_info.rp,
+				channel->channel_context_addr +
+					offsetof(struct ipa_mhi_ch_ctx, rp),
+				sizeof(channel->ch_info.rp));
+			if (res) {
+				IPA_MHI_ERR("ipa_mhi_read_write_host failed\n");
+				ipa_assert();
+				return;
+			}
+		}
+
+		res = ipa_mhi_read_write_host(IPA_MHI_DMA_TO_HOST,
+			&channel->state, channel->channel_context_addr +
+				offsetof(struct ipa_mhi_ch_ctx, chstate),
+			sizeof(((struct ipa_mhi_ch_ctx *)0)->chstate));
+		if (res) {
+			IPA_MHI_ERR("ipa_mhi_read_write_host failed\n");
+			ipa_assert();
+			return;
+		}
+		IPA_MHI_DBG("Updated UL CH=%d state to %s on host\n",
+			i, MHI_CH_STATE_STR(channel->state));
+	}
+
+	for (i = 0; i < IPA_MHI_MAX_DL_CHANNELS; i++) {
+		channel = &ipa_mhi_client_ctx->dl_channels[i];
+		if (!channel->valid)
+			continue;
+
+		if (update_rp) {
+			res = ipa_mhi_query_ch_info(channel->client,
+				&channel->ch_info);
+			if (res) {
+				IPA_MHI_ERR("gsi_query_channel_info failed\n");
+				ipa_assert();
+				return;
+			}
+
+			res = ipa_mhi_read_write_host(IPA_MHI_DMA_TO_HOST,
+				&channel->ch_info.rp,
+				channel->channel_context_addr +
+					offsetof(struct ipa_mhi_ch_ctx, rp),
+				sizeof(channel->ch_info.rp));
+			if (res) {
+				IPA_MHI_ERR("ipa_mhi_read_write_host failed\n");
+				ipa_assert();
+				return;
+			}
+		}
+
+		res = ipa_mhi_read_write_host(IPA_MHI_DMA_TO_HOST,
+			&channel->state, channel->channel_context_addr +
+			offsetof(struct ipa_mhi_ch_ctx, chstate),
+			sizeof(((struct ipa_mhi_ch_ctx *)0)->chstate));
+		if (res) {
+			IPA_MHI_ERR("ipa_mhi_read_write_host failed\n");
+			ipa_assert();
+			return;
+		}
+		IPA_MHI_DBG("Updated DL CH=%d state to %s on host\n",
+			i, MHI_CH_STATE_STR(channel->state));
+	}
+}
+
+static int ipa_mhi_suspend_dl(bool force)
+{
+	int res;
+
+	res = ipa_mhi_suspend_channels(ipa_mhi_client_ctx->dl_channels,
+		IPA_MHI_MAX_DL_CHANNELS);
+	if (res) {
+		IPA_MHI_ERR(
+			"ipa_mhi_suspend_channels for dl failed %d\n", res);
+		goto fail_suspend_dl_channel;
+	}
+
+	res = ipa_mhi_stop_event_update_channels
+			(ipa_mhi_client_ctx->dl_channels,
+			IPA_MHI_MAX_DL_CHANNELS);
+	if (res) {
+		IPA_MHI_ERR("failed to stop event update on DL %d\n", res);
+		goto fail_stop_event_update_dl_channel;
+	}
+
+	if (ipa_get_transport_type() == IPA_TRANSPORT_TYPE_GSI) {
+		if (ipa_mhi_has_open_aggr_frame()) {
+			IPA_MHI_DBG("There is an open aggr frame\n");
+			if (force) {
+				ipa_mhi_client_ctx->trigger_wakeup = true;
+			} else {
+				res = -EAGAIN;
+				goto fail_stop_event_update_dl_channel;
+			}
+		}
+	}
+
+	return 0;
+
+fail_stop_event_update_dl_channel:
+		ipa_mhi_resume_channels(true,
+				ipa_mhi_client_ctx->dl_channels,
+				IPA_MHI_MAX_DL_CHANNELS);
+fail_suspend_dl_channel:
+		return res;
+}
+
+/**
+ * ipa_mhi_suspend() - Suspend MHI accelerated channels
+ * @force:
+ *	false: in case of data pending in IPA, MHI channels will not be
+ *		suspended and function will fail.
+ *	true:  in case of data pending in IPA, make sure no further access from
+ *		IPA to PCIe is possible. In this case suspend cannot fail.
+ *
+ * This function is called by MHI client driver on MHI suspend.
+ * This function is called after MHI channel was started.
+ * When this function returns device can move to M1/M2/M3/D3cold state.
+ *
+ * Return codes: 0	  : success
+ *		 negative : error
+ */
+int ipa_mhi_suspend(bool force)
+{
+	int res;
+	bool empty;
+	bool force_clear;
+
+	IPA_MHI_FUNC_ENTRY();
+
+	res = ipa_mhi_set_state(IPA_MHI_STATE_SUSPEND_IN_PROGRESS);
+	if (res) {
+		IPA_MHI_ERR("ipa_mhi_set_state failed %d\n", res);
+		return res;
+	}
+
+	res = ipa_mhi_suspend_dl(force);
+	if (res) {
+		IPA_MHI_ERR("ipa_mhi_suspend_dl failed %d\n", res);
+		goto fail_suspend_dl_channel;
+	}
+
+	usleep_range(IPA_MHI_SUSPEND_SLEEP_MIN, IPA_MHI_SUSPEND_SLEEP_MAX);
+
+	res = ipa_mhi_suspend_ul(force, &empty, &force_clear);
+	if (res) {
+		IPA_MHI_ERR("ipa_mhi_suspend_ul failed %d\n", res);
+		goto fail_suspend_ul_channel;
+	}
+
+	if (ipa_get_transport_type() == IPA_TRANSPORT_TYPE_GSI)
+		ipa_mhi_update_host_ch_state(true);
+
+	/*
+	 * hold IPA clocks and release them after all
+	 * IPA PM clients are deactivated to make sure tag process
+	 * will not start
+	 */
+	IPA_ACTIVE_CLIENTS_INC_SIMPLE();
+
+	res = ipa_pm_deactivate_sync(ipa_mhi_client_ctx->pm_hdl);
+	if (res) {
+		IPA_MHI_ERR("fail to deactivate client %d\n", res);
+		goto fail_deactivate_pm;
+	}
+	res = ipa_pm_deactivate_sync(ipa_mhi_client_ctx->modem_pm_hdl);
+	if (res) {
+		IPA_MHI_ERR("fail to deactivate client %d\n", res);
+		goto fail_deactivate_modem_pm;
+	}
+	usleep_range(IPA_MHI_SUSPEND_SLEEP_MIN, IPA_MHI_SUSPEND_SLEEP_MAX);
+
+	if (!empty)
+		ipa_set_tag_process_before_gating(false);
+
+	res = ipa_mhi_set_state(IPA_MHI_STATE_SUSPENDED);
+	if (res) {
+		IPA_MHI_ERR("ipa_mhi_set_state failed %d\n", res);
+		goto fail_release_cons;
+	}
+
+	IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
+	IPA_MHI_FUNC_EXIT();
+	return 0;
+
+fail_release_cons:
+	ipa_pm_activate_sync(ipa_mhi_client_ctx->modem_pm_hdl);
+fail_deactivate_modem_pm:
+	ipa_pm_activate_sync(ipa_mhi_client_ctx->pm_hdl);
+fail_deactivate_pm:
+	IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
+fail_suspend_ul_channel:
+	ipa_mhi_resume_channels(true, ipa_mhi_client_ctx->ul_channels,
+		IPA_MHI_MAX_UL_CHANNELS);
+	if (force_clear) {
+		if (
+		ipa_mhi_disable_force_clear(ipa_mhi_client_ctx->qmi_req_id)) {
+			IPA_MHI_ERR("failed to disable force clear\n");
+			ipa_assert();
+		}
+		IPA_MHI_DBG("force clear datapath disabled\n");
+		ipa_mhi_client_ctx->qmi_req_id++;
+	}
+fail_suspend_dl_channel:
+	ipa_mhi_resume_channels(true, ipa_mhi_client_ctx->dl_channels,
+		IPA_MHI_MAX_DL_CHANNELS);
+	ipa_mhi_set_state(IPA_MHI_STATE_STARTED);
+	return res;
+}
+
+/**
+ * ipa_mhi_resume() - Resume MHI accelerated channels
+ *
+ * This function is called by MHI client driver on MHI resume.
+ * This function is called after MHI channel was suspended.
+ * When this function returns device can move to M0 state.
+ * This function is doing the following:
+ *	- Send command to uC/GSI to resume corresponding MHI channel
+ *	- Activate PM clients
+ *	- Resume data to IPA
+ *
+ * Return codes: 0	  : success
+ *		 negative : error
+ */
+int ipa_mhi_resume(void)
+{
+	int res;
+
+	IPA_MHI_FUNC_ENTRY();
+
+	res = ipa_mhi_set_state(IPA_MHI_STATE_RESUME_IN_PROGRESS);
+	if (res) {
+		IPA_MHI_ERR("ipa_mhi_set_state failed %d\n", res);
+		return res;
+	}
+
+	res = ipa_pm_activate_sync(ipa_mhi_client_ctx->pm_hdl);
+	if (res) {
+		IPA_MHI_ERR("fail to activate client %d\n", res);
+		goto fail_pm_activate;
+	}
+
+	res = ipa_pm_activate_sync(ipa_mhi_client_ctx->modem_pm_hdl);
+	if (res) {
+		IPA_MHI_ERR("fail to activate client %d\n", res);
+		goto fail_pm_activate_modem;
+	}
+
+	/* resume all UL channels */
+	res = ipa_mhi_resume_channels(false,
+					ipa_mhi_client_ctx->ul_channels,
+					IPA_MHI_MAX_UL_CHANNELS);
+	if (res) {
+		IPA_MHI_ERR("ipa_mhi_resume_ul_channels failed %d\n", res);
+		goto fail_resume_ul_channels;
+	}
+
+	res = ipa_mhi_resume_channels(false,
+				ipa_mhi_client_ctx->dl_channels,
+				IPA_MHI_MAX_DL_CHANNELS);
+	if (res) {
+		IPA_MHI_ERR("ipa_mhi_resume_dl_channels failed %d\n",
+			res);
+		goto fail_resume_dl_channels;
+	}
+
+	if (ipa_get_transport_type() == IPA_TRANSPORT_TYPE_GSI)
+		ipa_mhi_update_host_ch_state(false);
+
+	res = ipa_mhi_set_state(IPA_MHI_STATE_STARTED);
+	if (res) {
+		IPA_MHI_ERR("ipa_mhi_set_state failed %d\n", res);
+		goto fail_set_state;
+	}
+
+	IPA_MHI_FUNC_EXIT();
+	return 0;
+
+fail_set_state:
+	ipa_mhi_suspend_channels(ipa_mhi_client_ctx->dl_channels,
+		IPA_MHI_MAX_DL_CHANNELS);
+fail_resume_dl_channels:
+	ipa_mhi_suspend_channels(ipa_mhi_client_ctx->ul_channels,
+		IPA_MHI_MAX_UL_CHANNELS);
+fail_resume_ul_channels:
+	ipa_pm_deactivate_sync(ipa_mhi_client_ctx->modem_pm_hdl);
+fail_pm_activate_modem:
+	ipa_pm_deactivate_sync(ipa_mhi_client_ctx->pm_hdl);
+fail_pm_activate:
+	ipa_mhi_set_state(IPA_MHI_STATE_SUSPENDED);
+	return res;
+}
+
+
+static int  ipa_mhi_destroy_channels(struct ipa_mhi_channel_ctx *channels,
+	int num_of_channels)
+{
+	struct ipa_mhi_channel_ctx *channel;
+	int i, res;
+	u32 clnt_hdl;
+
+	for (i = 0; i < num_of_channels; i++) {
+		channel = &channels[i];
+		if (!channel->valid)
+			continue;
+		if (channel->state == IPA_HW_MHI_CHANNEL_STATE_INVALID)
+			continue;
+		if (channel->state != IPA_HW_MHI_CHANNEL_STATE_DISABLE) {
+			clnt_hdl = ipa_get_ep_mapping(channel->client);
+			IPA_MHI_DBG("disconnect pipe (ep: %d)\n", clnt_hdl);
+			res = ipa_mhi_disconnect_pipe(clnt_hdl);
+			if (res) {
+				IPA_MHI_ERR(
+					"failed to disconnect pipe %d, err %d\n"
+					, clnt_hdl, res);
+				goto fail;
+			}
+		}
+		res = ipa_mhi_destroy_channel(channel->client);
+		if (res) {
+			IPA_MHI_ERR(
+				"ipa_mhi_destroy_channel failed %d"
+					, res);
+			goto fail;
+		}
+	}
+	return 0;
+fail:
+	return res;
+}
+
+/**
+ * ipa_mhi_destroy_all_channels() - Destroy MHI IPA channels
+ *
+ * This function is called by IPA MHI client driver on MHI reset to destroy all
+ * IPA MHI channels.
+ */
+int ipa_mhi_destroy_all_channels(void)
+{
+	int res;
+
+	IPA_MHI_FUNC_ENTRY();
+	/* reset all UL and DL acc channels and its accociated event rings */
+	res = ipa_mhi_destroy_channels(ipa_mhi_client_ctx->ul_channels,
+		IPA_MHI_MAX_UL_CHANNELS);
+	if (res) {
+		IPA_MHI_ERR("ipa_mhi_destroy_channels(ul_channels) failed %d\n",
+			res);
+		return -EPERM;
+	}
+	IPA_MHI_DBG("All UL channels are disconnected\n");
+
+	res = ipa_mhi_destroy_channels(ipa_mhi_client_ctx->dl_channels,
+		IPA_MHI_MAX_DL_CHANNELS);
+	if (res) {
+		IPA_MHI_ERR("ipa_mhi_destroy_channels(dl_channels) failed %d\n",
+			res);
+		return -EPERM;
+	}
+	IPA_MHI_DBG("All DL channels are disconnected\n");
+
+	IPA_MHI_FUNC_EXIT();
+	return 0;
+}
+
+static void ipa_mhi_debugfs_destroy(void)
+{
+	debugfs_remove_recursive(dent);
+}
+
+static void ipa_mhi_deregister_pm(void)
+{
+	ipa_pm_deactivate_sync(ipa_mhi_client_ctx->pm_hdl);
+	ipa_pm_deregister(ipa_mhi_client_ctx->pm_hdl);
+	ipa_mhi_client_ctx->pm_hdl = ~0;
+
+	ipa_pm_deactivate_sync(ipa_mhi_client_ctx->modem_pm_hdl);
+	ipa_pm_deregister(ipa_mhi_client_ctx->modem_pm_hdl);
+	ipa_mhi_client_ctx->modem_pm_hdl = ~0;
+}
+
+/**
+ * ipa_mhi_destroy() - Destroy MHI IPA
+ *
+ * This function is called by MHI client driver on MHI reset to destroy all IPA
+ * MHI resources.
+ * When this function returns ipa_mhi can re-initialize.
+ */
+void ipa_mhi_destroy(void)
+{
+	int res;
+
+	IPA_MHI_FUNC_ENTRY();
+	if (!ipa_mhi_client_ctx) {
+		IPA_MHI_DBG("IPA MHI was not initialized, already destroyed\n");
+		return;
+	}
+
+	ipa_deregister_client_callback(IPA_CLIENT_MHI_PROD);
+
+	/* reset all UL and DL acc channels and its accociated event rings */
+	if (ipa_get_transport_type() == IPA_TRANSPORT_TYPE_GSI) {
+		res = ipa_mhi_destroy_all_channels();
+		if (res) {
+			IPA_MHI_ERR("ipa_mhi_destroy_all_channels failed %d\n",
+				res);
+			goto fail;
+		}
+	}
+	IPA_MHI_DBG("All channels are disconnected\n");
+
+	if (ipa_get_transport_type() == IPA_TRANSPORT_TYPE_SPS) {
+		IPA_MHI_DBG("cleanup uC MHI\n");
+		ipa_uc_mhi_cleanup();
+	}
+
+	ipa_mhi_deregister_pm();
+	ipa_dma_destroy();
+	ipa_mhi_debugfs_destroy();
+	destroy_workqueue(ipa_mhi_client_ctx->wq);
+	kfree(ipa_mhi_client_ctx);
+	ipa_mhi_client_ctx = NULL;
+	IPA_MHI_DBG("IPA MHI was reset, ready for re-init\n");
+
+	IPA_MHI_FUNC_EXIT();
+	return;
+fail:
+	ipa_assert();
+}
+
+static void ipa_mhi_pm_cb(void *p, enum ipa_pm_cb_event event)
+{
+	unsigned long flags;
+
+	IPA_MHI_FUNC_ENTRY();
+
+	if (event != IPA_PM_REQUEST_WAKEUP) {
+		IPA_MHI_ERR("Unexpected event %d\n", event);
+		WARN_ON(1);
+		return;
+	}
+
+	IPA_MHI_DBG("%s\n", MHI_STATE_STR(ipa_mhi_client_ctx->state));
+	spin_lock_irqsave(&ipa_mhi_client_ctx->state_lock, flags);
+	if (ipa_mhi_client_ctx->state == IPA_MHI_STATE_SUSPENDED) {
+		ipa_mhi_notify_wakeup();
+	} else if (ipa_mhi_client_ctx->state ==
+		IPA_MHI_STATE_SUSPEND_IN_PROGRESS) {
+		/* wakeup event will be trigger after suspend finishes */
+		ipa_mhi_client_ctx->trigger_wakeup = true;
+	}
+	spin_unlock_irqrestore(&ipa_mhi_client_ctx->state_lock, flags);
+	IPA_MHI_DBG("EXIT");
+}
+
+static int ipa_mhi_register_pm(void)
+{
+	int res;
+	struct ipa_pm_register_params params;
+
+	memset(&params, 0, sizeof(params));
+	params.name = "MHI";
+	params.callback = ipa_mhi_pm_cb;
+	params.group = IPA_PM_GROUP_DEFAULT;
+	res = ipa_pm_register(&params, &ipa_mhi_client_ctx->pm_hdl);
+	if (res) {
+		IPA_MHI_ERR("fail to register with PM %d\n", res);
+		return res;
+	}
+
+	res = ipa_pm_associate_ipa_cons_to_client(ipa_mhi_client_ctx->pm_hdl,
+		IPA_CLIENT_MHI_CONS);
+	if (res) {
+		IPA_MHI_ERR("fail to associate cons with PM %d\n", res);
+		goto fail_pm_cons;
+	}
+
+	res = ipa_pm_set_throughput(ipa_mhi_client_ctx->pm_hdl, 1000);
+	if (res) {
+		IPA_MHI_ERR("fail to set perf profile to PM %d\n", res);
+		goto fail_pm_cons;
+	}
+
+	/* create a modem client for clock scaling */
+	memset(&params, 0, sizeof(params));
+	params.name = "MODEM (MHI)";
+	params.group = IPA_PM_GROUP_MODEM;
+	params.skip_clk_vote = true;
+	res = ipa_pm_register(&params, &ipa_mhi_client_ctx->modem_pm_hdl);
+	if (res) {
+		IPA_MHI_ERR("fail to register with PM %d\n", res);
+		goto fail_pm_cons;
+	}
+
+	return 0;
+
+fail_pm_cons:
+	ipa_pm_deregister(ipa_mhi_client_ctx->pm_hdl);
+	ipa_mhi_client_ctx->pm_hdl = ~0;
+	return res;
+}
+
+/**
+ * ipa_mhi_init() - Initialize IPA MHI driver
+ * @params: initialization params
+ *
+ * This function is called by MHI client driver on boot to initialize IPA MHI
+ * Driver. When this function returns device can move to READY state.
+ * This function is doing the following:
+ *	- Initialize MHI IPA internal data structures
+ *	- Register with PM
+ *	- Initialize debugfs
+ *
+ * Return codes: 0	  : success
+ *		 negative : error
+ */
+int ipa_mhi_init(struct ipa_mhi_init_params *params)
+{
+	int res;
+
+	IPA_MHI_FUNC_ENTRY();
+
+	if (!params) {
+		IPA_MHI_ERR("null args\n");
+		return -EINVAL;
+	}
+
+	if (!params->notify) {
+		IPA_MHI_ERR("null notify function\n");
+		return -EINVAL;
+	}
+
+	if (ipa_mhi_client_ctx) {
+		IPA_MHI_ERR("already initialized\n");
+		return -EPERM;
+	}
+
+	IPA_MHI_DBG("notify = %pS priv = %pK\n", params->notify, params->priv);
+	IPA_MHI_DBG("msi: addr_lo = 0x%x addr_hi = 0x%x\n",
+		params->msi.addr_low, params->msi.addr_hi);
+	IPA_MHI_DBG("msi: data = 0x%x mask = 0x%x\n",
+		params->msi.data, params->msi.mask);
+	IPA_MHI_DBG("mmio_addr = 0x%x\n", params->mmio_addr);
+	IPA_MHI_DBG("first_ch_idx = 0x%x\n", params->first_ch_idx);
+	IPA_MHI_DBG("first_er_idx = 0x%x\n", params->first_er_idx);
+	IPA_MHI_DBG("assert_bit40=%d\n", params->assert_bit40);
+	IPA_MHI_DBG("test_mode=%d\n", params->test_mode);
+
+	/* Initialize context */
+	ipa_mhi_client_ctx = kzalloc(sizeof(*ipa_mhi_client_ctx), GFP_KERNEL);
+	if (!ipa_mhi_client_ctx) {
+		res = -EFAULT;
+		goto fail_alloc_ctx;
+	}
+
+	ipa_mhi_client_ctx->state = IPA_MHI_STATE_INITIALIZED;
+	ipa_mhi_client_ctx->cb_notify = params->notify;
+	ipa_mhi_client_ctx->cb_priv = params->priv;
+	spin_lock_init(&ipa_mhi_client_ctx->state_lock);
+	ipa_mhi_client_ctx->msi = params->msi;
+	ipa_mhi_client_ctx->mmio_addr = params->mmio_addr;
+	ipa_mhi_client_ctx->first_ch_idx = params->first_ch_idx;
+	ipa_mhi_client_ctx->first_er_idx = params->first_er_idx;
+	ipa_mhi_client_ctx->qmi_req_id = 0;
+	ipa_mhi_client_ctx->use_ipadma = true;
+	ipa_mhi_client_ctx->assert_bit40 = !!params->assert_bit40;
+	ipa_mhi_client_ctx->test_mode = params->test_mode;
+
+	ipa_mhi_client_ctx->wq = create_singlethread_workqueue("ipa_mhi_wq");
+	if (!ipa_mhi_client_ctx->wq) {
+		IPA_MHI_ERR("failed to create workqueue\n");
+		res = -EFAULT;
+		goto fail_create_wq;
+	}
+
+	res = ipa_dma_init();
+	if (res) {
+		IPA_MHI_ERR("failed to init ipa dma %d\n", res);
+		goto fail_dma_init;
+	}
+
+	res = ipa_mhi_register_pm();
+	if (res) {
+		IPA_MHI_ERR("failed to create PM resources\n");
+		res = -EFAULT;
+		goto fail_pm;
+	}
+
+	if (ipa_get_transport_type() == IPA_TRANSPORT_TYPE_GSI) {
+		ipa_mhi_set_state(IPA_MHI_STATE_READY);
+	} else {
+		/* Initialize uC interface */
+		ipa_uc_mhi_init(ipa_mhi_uc_ready_cb,
+			ipa_mhi_uc_wakeup_request_cb);
+		if (ipa_uc_state_check() == 0)
+			ipa_mhi_set_state(IPA_MHI_STATE_READY);
+	}
+
+	ipa_register_client_callback(&ipa_mhi_set_lock_unlock, NULL,
+					IPA_CLIENT_MHI_PROD);
+
+	/* Initialize debugfs */
+	ipa_mhi_debugfs_init();
+
+	IPA_MHI_FUNC_EXIT();
+	return 0;
+
+fail_pm:
+	ipa_dma_destroy();
+fail_dma_init:
+	destroy_workqueue(ipa_mhi_client_ctx->wq);
+fail_create_wq:
+	kfree(ipa_mhi_client_ctx);
+	ipa_mhi_client_ctx = NULL;
+fail_alloc_ctx:
+	return res;
+}
+
+static void ipa_mhi_cache_dl_ul_sync_info(
+	struct ipa_config_req_msg_v01 *config_req)
+{
+	ipa_cached_dl_ul_sync_info.params.isDlUlSyncEnabled = true;
+	ipa_cached_dl_ul_sync_info.params.UlAccmVal =
+		(config_req->ul_accumulation_time_limit_valid) ?
+		config_req->ul_accumulation_time_limit : 0;
+	ipa_cached_dl_ul_sync_info.params.ulMsiEventThreshold =
+		(config_req->ul_msi_event_threshold_valid) ?
+		config_req->ul_msi_event_threshold : 0;
+	ipa_cached_dl_ul_sync_info.params.dlMsiEventThreshold =
+		(config_req->dl_msi_event_threshold_valid) ?
+		config_req->dl_msi_event_threshold : 0;
+}
+
+/**
+ * ipa_mhi_handle_ipa_config_req() - hanle IPA CONFIG QMI message
+ *
+ * This function is called by by IPA QMI service to indicate that IPA CONFIG
+ * message was sent from modem. IPA MHI will update this information to IPA uC
+ * or will cache it until IPA MHI will be initialized.
+ *
+ * Return codes: 0	  : success
+ *		 negative : error
+ */
+int ipa_mhi_handle_ipa_config_req(struct ipa_config_req_msg_v01 *config_req)
+{
+	IPA_MHI_FUNC_ENTRY();
+
+	if (ipa_get_transport_type() != IPA_TRANSPORT_TYPE_GSI) {
+		ipa_mhi_cache_dl_ul_sync_info(config_req);
+		if (ipa_mhi_client_ctx &&
+				ipa_mhi_client_ctx->state !=
+						IPA_MHI_STATE_INITIALIZED)
+			ipa_uc_mhi_send_dl_ul_sync_info(
+				&ipa_cached_dl_ul_sync_info);
+	}
+
+	IPA_MHI_FUNC_EXIT();
+	return 0;
+}
+EXPORT_SYMBOL(ipa_mhi_handle_ipa_config_req);
+
+int ipa_mhi_is_using_dma(bool *flag)
+{
+	IPA_MHI_FUNC_ENTRY();
+
+	if (!ipa_mhi_client_ctx) {
+		IPA_MHI_ERR("not initialized\n");
+		return -EPERM;
+	}
+
+	*flag = ipa_mhi_client_ctx->use_ipadma ? true : false;
+
+	IPA_MHI_FUNC_EXIT();
+	return 0;
+}
+EXPORT_SYMBOL(ipa_mhi_is_using_dma);
+
+const char *ipa_mhi_get_state_str(int state)
+{
+	return MHI_STATE_STR(state);
+}
+EXPORT_SYMBOL(ipa_mhi_get_state_str);
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("IPA MHI client driver");

+ 635 - 0
ipa/ipa_clients/ipa_uc_offload.c

@@ -0,0 +1,635 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2015-2019, The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/ipa_uc_offload.h>
+#include <linux/msm_ipa.h>
+#include "../ipa_common_i.h"
+#include "../ipa_v3/ipa_pm.h"
+
+#define IPA_NTN_DMA_POOL_ALIGNMENT 8
+#define OFFLOAD_DRV_NAME "ipa_uc_offload"
+#define IPA_UC_OFFLOAD_DBG(fmt, args...) \
+	do { \
+		pr_debug(OFFLOAD_DRV_NAME " %s:%d " fmt, \
+			__func__, __LINE__, ## args); \
+		IPA_IPC_LOGGING(ipa_get_ipc_logbuf(), \
+			OFFLOAD_DRV_NAME " %s:%d " fmt, ## args); \
+		IPA_IPC_LOGGING(ipa_get_ipc_logbuf_low(), \
+			OFFLOAD_DRV_NAME " %s:%d " fmt, ## args); \
+	} while (0)
+
+#define IPA_UC_OFFLOAD_LOW(fmt, args...) \
+	do { \
+		pr_debug(OFFLOAD_DRV_NAME " %s:%d " fmt, \
+			__func__, __LINE__, ## args); \
+		IPA_IPC_LOGGING(ipa_get_ipc_logbuf_low(), \
+			OFFLOAD_DRV_NAME " %s:%d " fmt, ## args); \
+	} while (0)
+
+#define IPA_UC_OFFLOAD_ERR(fmt, args...) \
+	do { \
+		pr_err(OFFLOAD_DRV_NAME " %s:%d " fmt, \
+			__func__, __LINE__, ## args); \
+		IPA_IPC_LOGGING(ipa_get_ipc_logbuf(), \
+			OFFLOAD_DRV_NAME " %s:%d " fmt, ## args); \
+		IPA_IPC_LOGGING(ipa_get_ipc_logbuf_low(), \
+			OFFLOAD_DRV_NAME " %s:%d " fmt, ## args); \
+	} while (0)
+
+#define IPA_UC_OFFLOAD_INFO(fmt, args...) \
+	do { \
+		pr_info(OFFLOAD_DRV_NAME " %s:%d " fmt, \
+			__func__, __LINE__, ## args); \
+		IPA_IPC_LOGGING(ipa_get_ipc_logbuf(), \
+			OFFLOAD_DRV_NAME " %s:%d " fmt, ## args); \
+		IPA_IPC_LOGGING(ipa_get_ipc_logbuf_low(), \
+			OFFLOAD_DRV_NAME " %s:%d " fmt, ## args); \
+	} while (0)
+
+enum ipa_uc_offload_state {
+	IPA_UC_OFFLOAD_STATE_INVALID,
+	IPA_UC_OFFLOAD_STATE_INITIALIZED,
+	IPA_UC_OFFLOAD_STATE_UP,
+};
+
+struct ipa_uc_offload_ctx {
+	enum ipa_uc_offload_proto proto;
+	enum ipa_uc_offload_state state;
+	void *priv;
+	u8 hdr_len;
+	u32 partial_hdr_hdl[IPA_IP_MAX];
+	char netdev_name[IPA_RESOURCE_NAME_MAX];
+	ipa_notify_cb notify;
+	struct completion ntn_completion;
+	u32 pm_hdl;
+	struct ipa_ntn_conn_in_params conn;
+};
+
+static struct ipa_uc_offload_ctx *ipa_uc_offload_ctx[IPA_UC_MAX_PROT_SIZE];
+
+
+static int ipa_commit_partial_hdr(
+	struct ipa_ioc_add_hdr *hdr,
+	const char *netdev_name,
+	struct ipa_hdr_info *hdr_info)
+{
+	int i;
+
+	if (hdr == NULL || hdr_info == NULL) {
+		IPA_UC_OFFLOAD_ERR("Invalid input\n");
+		return -EINVAL;
+	}
+
+	hdr->commit = 1;
+	hdr->num_hdrs = 2;
+
+	snprintf(hdr->hdr[0].name, sizeof(hdr->hdr[0].name),
+			 "%s_ipv4", netdev_name);
+	snprintf(hdr->hdr[1].name, sizeof(hdr->hdr[1].name),
+			 "%s_ipv6", netdev_name);
+	for (i = IPA_IP_v4; i < IPA_IP_MAX; i++) {
+		hdr->hdr[i].hdr_len = hdr_info[i].hdr_len;
+		memcpy(hdr->hdr[i].hdr, hdr_info[i].hdr, hdr->hdr[i].hdr_len);
+		hdr->hdr[i].type = hdr_info[i].hdr_type;
+		hdr->hdr[i].is_partial = 1;
+		hdr->hdr[i].is_eth2_ofst_valid = 1;
+		hdr->hdr[i].eth2_ofst = hdr_info[i].dst_mac_addr_offset;
+	}
+
+	if (ipa_add_hdr(hdr)) {
+		IPA_UC_OFFLOAD_ERR("fail to add partial headers\n");
+		return -EFAULT;
+	}
+
+	return 0;
+}
+
+static void ipa_uc_offload_ntn_pm_cb(void *p, enum ipa_pm_cb_event event)
+{
+	/* suspend/resume is not supported */
+	IPA_UC_OFFLOAD_DBG("event = %d\n", event);
+}
+
+static int ipa_uc_offload_ntn_register_pm_client(
+	struct ipa_uc_offload_ctx *ntn_ctx)
+{
+	int res;
+	struct ipa_pm_register_params params;
+
+	memset(&params, 0, sizeof(params));
+	params.name = "ETH";
+	params.callback = ipa_uc_offload_ntn_pm_cb;
+	params.user_data = ntn_ctx;
+	params.group = IPA_PM_GROUP_DEFAULT;
+	res = ipa_pm_register(&params, &ntn_ctx->pm_hdl);
+	if (res) {
+		IPA_UC_OFFLOAD_ERR("fail to register with PM %d\n", res);
+		return res;
+	}
+
+	res = ipa_pm_associate_ipa_cons_to_client(ntn_ctx->pm_hdl,
+		IPA_CLIENT_ETHERNET_CONS);
+	if (res) {
+		IPA_UC_OFFLOAD_ERR("fail to associate cons with PM %d\n", res);
+		ipa_pm_deregister(ntn_ctx->pm_hdl);
+		ntn_ctx->pm_hdl = ~0;
+		return res;
+	}
+
+	return 0;
+}
+
+static void ipa_uc_offload_ntn_deregister_pm_client(
+	struct ipa_uc_offload_ctx *ntn_ctx)
+{
+	ipa_pm_deactivate_sync(ntn_ctx->pm_hdl);
+	ipa_pm_deregister(ntn_ctx->pm_hdl);
+}
+
+static int ipa_uc_offload_ntn_reg_intf(
+	struct ipa_uc_offload_intf_params *inp,
+	struct ipa_uc_offload_out_params *outp,
+	struct ipa_uc_offload_ctx *ntn_ctx)
+{
+	struct ipa_ioc_add_hdr *hdr = NULL;
+	struct ipa_tx_intf tx;
+	struct ipa_rx_intf rx;
+	struct ipa_ioc_tx_intf_prop tx_prop[2];
+	struct ipa_ioc_rx_intf_prop rx_prop[2];
+	int ret = 0;
+	u32 len;
+
+
+	IPA_UC_OFFLOAD_DBG("register interface for netdev %s\n",
+					 inp->netdev_name);
+	ret = ipa_uc_offload_ntn_register_pm_client(ntn_ctx);
+	if (ret) {
+		IPA_UC_OFFLOAD_ERR("fail to register PM client\n");
+		return -EFAULT;
+	}
+	memcpy(ntn_ctx->netdev_name, inp->netdev_name, IPA_RESOURCE_NAME_MAX);
+	ntn_ctx->hdr_len = inp->hdr_info[0].hdr_len;
+	ntn_ctx->notify = inp->notify;
+	ntn_ctx->priv = inp->priv;
+
+	/* add partial header */
+	len = sizeof(struct ipa_ioc_add_hdr) + 2 * sizeof(struct ipa_hdr_add);
+	hdr = kzalloc(len, GFP_KERNEL);
+	if (hdr == NULL) {
+		ret = -ENOMEM;
+		goto fail_alloc;
+	}
+
+	if (ipa_commit_partial_hdr(hdr, ntn_ctx->netdev_name, inp->hdr_info)) {
+		IPA_UC_OFFLOAD_ERR("fail to commit partial headers\n");
+		ret = -EFAULT;
+		goto fail;
+	}
+
+	/* populate tx prop */
+	tx.num_props = 2;
+	tx.prop = tx_prop;
+
+	memset(tx_prop, 0, sizeof(tx_prop));
+	tx_prop[0].ip = IPA_IP_v4;
+	tx_prop[0].dst_pipe = IPA_CLIENT_ETHERNET_CONS;
+	tx_prop[0].hdr_l2_type = inp->hdr_info[0].hdr_type;
+	memcpy(tx_prop[0].hdr_name, hdr->hdr[IPA_IP_v4].name,
+		sizeof(tx_prop[0].hdr_name));
+
+	tx_prop[1].ip = IPA_IP_v6;
+	tx_prop[1].dst_pipe = IPA_CLIENT_ETHERNET_CONS;
+	tx_prop[1].hdr_l2_type = inp->hdr_info[1].hdr_type;
+	memcpy(tx_prop[1].hdr_name, hdr->hdr[IPA_IP_v6].name,
+		sizeof(tx_prop[1].hdr_name));
+
+	/* populate rx prop */
+	rx.num_props = 2;
+	rx.prop = rx_prop;
+
+	memset(rx_prop, 0, sizeof(rx_prop));
+	rx_prop[0].ip = IPA_IP_v4;
+	rx_prop[0].src_pipe = IPA_CLIENT_ETHERNET_PROD;
+	rx_prop[0].hdr_l2_type = inp->hdr_info[0].hdr_type;
+	if (inp->is_meta_data_valid) {
+		rx_prop[0].attrib.attrib_mask |= IPA_FLT_META_DATA;
+		rx_prop[0].attrib.meta_data = inp->meta_data;
+		rx_prop[0].attrib.meta_data_mask = inp->meta_data_mask;
+	}
+
+	rx_prop[1].ip = IPA_IP_v6;
+	rx_prop[1].src_pipe = IPA_CLIENT_ETHERNET_PROD;
+	rx_prop[1].hdr_l2_type = inp->hdr_info[1].hdr_type;
+	if (inp->is_meta_data_valid) {
+		rx_prop[1].attrib.attrib_mask |= IPA_FLT_META_DATA;
+		rx_prop[1].attrib.meta_data = inp->meta_data;
+		rx_prop[1].attrib.meta_data_mask = inp->meta_data_mask;
+	}
+
+	if (ipa_register_intf(inp->netdev_name, &tx, &rx)) {
+		IPA_UC_OFFLOAD_ERR("fail to add interface prop\n");
+		memset(ntn_ctx, 0, sizeof(*ntn_ctx));
+		ret = -EFAULT;
+		goto fail;
+	}
+
+	ntn_ctx->partial_hdr_hdl[IPA_IP_v4] = hdr->hdr[IPA_IP_v4].hdr_hdl;
+	ntn_ctx->partial_hdr_hdl[IPA_IP_v6] = hdr->hdr[IPA_IP_v6].hdr_hdl;
+	init_completion(&ntn_ctx->ntn_completion);
+	ntn_ctx->state = IPA_UC_OFFLOAD_STATE_INITIALIZED;
+
+	kfree(hdr);
+	return ret;
+
+fail:
+	kfree(hdr);
+fail_alloc:
+	ipa_uc_offload_ntn_deregister_pm_client(ntn_ctx);
+	return ret;
+}
+
+int ipa_uc_offload_reg_intf(
+	struct ipa_uc_offload_intf_params *inp,
+	struct ipa_uc_offload_out_params *outp)
+{
+	struct ipa_uc_offload_ctx *ctx;
+	int ret = 0;
+
+	if (inp == NULL || outp == NULL) {
+		IPA_UC_OFFLOAD_ERR("invalid params in=%pK out=%pK\n",
+			inp, outp);
+		return -EINVAL;
+	}
+
+	if (inp->proto <= IPA_UC_INVALID ||
+		inp->proto >= IPA_UC_MAX_PROT_SIZE) {
+		IPA_UC_OFFLOAD_ERR("invalid proto %d\n", inp->proto);
+		return -EINVAL;
+	}
+
+	if (!ipa_uc_offload_ctx[inp->proto]) {
+		ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
+		if (ctx == NULL) {
+			IPA_UC_OFFLOAD_ERR("fail to alloc uc offload ctx\n");
+			return -EFAULT;
+		}
+		ipa_uc_offload_ctx[inp->proto] = ctx;
+		ctx->proto = inp->proto;
+	} else
+		ctx = ipa_uc_offload_ctx[inp->proto];
+
+	if (ctx->state != IPA_UC_OFFLOAD_STATE_INVALID) {
+		IPA_UC_OFFLOAD_ERR("Already Initialized\n");
+		return -EINVAL;
+	}
+
+	if (ctx->proto == IPA_UC_NTN) {
+		ret = ipa_uc_offload_ntn_reg_intf(inp, outp, ctx);
+		if (!ret)
+			outp->clnt_hndl = IPA_UC_NTN;
+	}
+
+	return ret;
+}
+EXPORT_SYMBOL(ipa_uc_offload_reg_intf);
+
+
+static int ipa_uc_ntn_alloc_conn_smmu_info(struct ipa_ntn_setup_info *dest,
+	struct ipa_ntn_setup_info *source)
+{
+	int result;
+
+	IPA_UC_OFFLOAD_DBG("Allocating smmu info\n");
+
+	memcpy(dest, source, sizeof(struct ipa_ntn_setup_info));
+
+	dest->data_buff_list =
+		kcalloc(dest->num_buffers, sizeof(struct ntn_buff_smmu_map),
+			GFP_KERNEL);
+	if (dest->data_buff_list == NULL) {
+		IPA_UC_OFFLOAD_ERR("failed to alloc smmu info\n");
+		return -ENOMEM;
+	}
+
+	memcpy(dest->data_buff_list, source->data_buff_list,
+		sizeof(struct ntn_buff_smmu_map) * dest->num_buffers);
+
+	result = ipa_smmu_store_sgt(&dest->buff_pool_base_sgt,
+		source->buff_pool_base_sgt);
+	if (result) {
+		kfree(dest->data_buff_list);
+		return result;
+	}
+
+	result = ipa_smmu_store_sgt(&dest->ring_base_sgt,
+		source->ring_base_sgt);
+	if (result) {
+		kfree(dest->data_buff_list);
+		ipa_smmu_free_sgt(&dest->buff_pool_base_sgt);
+		return result;
+	}
+
+	return 0;
+}
+
+static void ipa_uc_ntn_free_conn_smmu_info(struct ipa_ntn_setup_info *params)
+{
+	kfree(params->data_buff_list);
+	ipa_smmu_free_sgt(&params->buff_pool_base_sgt);
+	ipa_smmu_free_sgt(&params->ring_base_sgt);
+}
+
+int ipa_uc_ntn_conn_pipes(struct ipa_ntn_conn_in_params *inp,
+			struct ipa_ntn_conn_out_params *outp,
+			struct ipa_uc_offload_ctx *ntn_ctx)
+{
+	int result = 0;
+	enum ipa_uc_offload_state prev_state;
+
+	if (ntn_ctx->conn.dl.smmu_enabled != ntn_ctx->conn.ul.smmu_enabled) {
+		IPA_UC_OFFLOAD_ERR("ul and dl smmu enablement do not match\n");
+		return -EINVAL;
+	}
+
+	prev_state = ntn_ctx->state;
+	if (inp->dl.ring_base_pa % IPA_NTN_DMA_POOL_ALIGNMENT ||
+		inp->dl.buff_pool_base_pa % IPA_NTN_DMA_POOL_ALIGNMENT) {
+		IPA_UC_OFFLOAD_ERR("alignment failure on TX\n");
+		return -EINVAL;
+	}
+	if (inp->ul.ring_base_pa % IPA_NTN_DMA_POOL_ALIGNMENT ||
+		inp->ul.buff_pool_base_pa % IPA_NTN_DMA_POOL_ALIGNMENT) {
+		IPA_UC_OFFLOAD_ERR("alignment failure on RX\n");
+		return -EINVAL;
+	}
+
+	result = ipa_pm_activate_sync(ntn_ctx->pm_hdl);
+	if (result) {
+		IPA_UC_OFFLOAD_ERR("fail to activate: %d\n", result);
+		return result;
+	}
+
+	ntn_ctx->state = IPA_UC_OFFLOAD_STATE_UP;
+	result = ipa_setup_uc_ntn_pipes(inp, ntn_ctx->notify,
+		ntn_ctx->priv, ntn_ctx->hdr_len, outp);
+	if (result) {
+		IPA_UC_OFFLOAD_ERR("fail to setup uc offload pipes: %d\n",
+				result);
+		ntn_ctx->state = prev_state;
+		result = -EFAULT;
+		goto fail;
+	}
+
+	if (ntn_ctx->conn.dl.smmu_enabled) {
+		result = ipa_uc_ntn_alloc_conn_smmu_info(&ntn_ctx->conn.dl,
+			&inp->dl);
+		if (result) {
+			IPA_UC_OFFLOAD_ERR("alloc failure on TX\n");
+			goto fail;
+		}
+		result = ipa_uc_ntn_alloc_conn_smmu_info(&ntn_ctx->conn.ul,
+			&inp->ul);
+		if (result) {
+			ipa_uc_ntn_free_conn_smmu_info(&ntn_ctx->conn.dl);
+			IPA_UC_OFFLOAD_ERR("alloc failure on RX\n");
+			goto fail;
+		}
+	}
+
+fail:
+	return result;
+}
+
+int ipa_uc_offload_conn_pipes(struct ipa_uc_offload_conn_in_params *inp,
+			struct ipa_uc_offload_conn_out_params *outp)
+{
+	int ret = 0;
+	struct ipa_uc_offload_ctx *offload_ctx;
+
+	if (!(inp && outp)) {
+		IPA_UC_OFFLOAD_ERR("bad parm. in=%pK out=%pK\n", inp, outp);
+		return -EINVAL;
+	}
+
+	if (inp->clnt_hndl <= IPA_UC_INVALID ||
+		inp->clnt_hndl >= IPA_UC_MAX_PROT_SIZE) {
+		IPA_UC_OFFLOAD_ERR("invalid client handle %d\n",
+						   inp->clnt_hndl);
+		return -EINVAL;
+	}
+
+	offload_ctx = ipa_uc_offload_ctx[inp->clnt_hndl];
+	if (!offload_ctx) {
+		IPA_UC_OFFLOAD_ERR("Invalid Handle\n");
+		return -EINVAL;
+	}
+
+	if (offload_ctx->state != IPA_UC_OFFLOAD_STATE_INITIALIZED) {
+		IPA_UC_OFFLOAD_ERR("Invalid state %d\n", offload_ctx->state);
+		return -EPERM;
+	}
+
+	switch (offload_ctx->proto) {
+	case IPA_UC_NTN:
+		ret = ipa_uc_ntn_conn_pipes(&inp->u.ntn, &outp->u.ntn,
+						offload_ctx);
+		break;
+
+	default:
+		IPA_UC_OFFLOAD_ERR("Invalid Proto :%d\n", offload_ctx->proto);
+		ret = -EINVAL;
+		break;
+	}
+
+	return ret;
+}
+EXPORT_SYMBOL(ipa_uc_offload_conn_pipes);
+
+static int ipa_uc_ntn_disconn_pipes(struct ipa_uc_offload_ctx *ntn_ctx)
+{
+	int ipa_ep_idx_ul, ipa_ep_idx_dl;
+	int ret = 0;
+
+	if (ntn_ctx->conn.dl.smmu_enabled != ntn_ctx->conn.ul.smmu_enabled) {
+		IPA_UC_OFFLOAD_ERR("ul and dl smmu enablement do not match\n");
+		return -EINVAL;
+	}
+
+	ntn_ctx->state = IPA_UC_OFFLOAD_STATE_INITIALIZED;
+	ret = ipa_pm_deactivate_sync(ntn_ctx->pm_hdl);
+	if (ret) {
+		IPA_UC_OFFLOAD_ERR("fail to deactivate res: %d\n",
+			ret);
+		return -EFAULT;
+	}
+
+	ipa_ep_idx_ul = ipa_get_ep_mapping(IPA_CLIENT_ETHERNET_PROD);
+	ipa_ep_idx_dl = ipa_get_ep_mapping(IPA_CLIENT_ETHERNET_CONS);
+	ret = ipa_tear_down_uc_offload_pipes(ipa_ep_idx_ul, ipa_ep_idx_dl,
+		&ntn_ctx->conn);
+	if (ret) {
+		IPA_UC_OFFLOAD_ERR("fail to tear down ntn offload pipes, %d\n",
+			ret);
+		return -EFAULT;
+	}
+	if (ntn_ctx->conn.dl.smmu_enabled) {
+		ipa_uc_ntn_free_conn_smmu_info(&ntn_ctx->conn.dl);
+		ipa_uc_ntn_free_conn_smmu_info(&ntn_ctx->conn.ul);
+	}
+
+	return ret;
+}
+
+int ipa_uc_offload_disconn_pipes(u32 clnt_hdl)
+{
+	struct ipa_uc_offload_ctx *offload_ctx;
+	int ret = 0;
+
+	if (clnt_hdl <= IPA_UC_INVALID ||
+		clnt_hdl >= IPA_UC_MAX_PROT_SIZE) {
+		IPA_UC_OFFLOAD_ERR("Invalid client handle %d\n", clnt_hdl);
+		return -EINVAL;
+	}
+
+	offload_ctx = ipa_uc_offload_ctx[clnt_hdl];
+	if (!offload_ctx) {
+		IPA_UC_OFFLOAD_ERR("Invalid client Handle\n");
+		return -EINVAL;
+	}
+
+	if (offload_ctx->state != IPA_UC_OFFLOAD_STATE_UP) {
+		IPA_UC_OFFLOAD_ERR("Invalid state\n");
+		return -EINVAL;
+	}
+
+	switch (offload_ctx->proto) {
+	case IPA_UC_NTN:
+		ret = ipa_uc_ntn_disconn_pipes(offload_ctx);
+		break;
+
+	default:
+		IPA_UC_OFFLOAD_ERR("Invalid Proto :%d\n", clnt_hdl);
+		ret = -EINVAL;
+		break;
+	}
+
+	return ret;
+}
+EXPORT_SYMBOL(ipa_uc_offload_disconn_pipes);
+
+static int ipa_uc_ntn_cleanup(struct ipa_uc_offload_ctx *ntn_ctx)
+{
+	int len, result = 0;
+	struct ipa_ioc_del_hdr *hdr;
+
+	ipa_uc_offload_ntn_deregister_pm_client(ntn_ctx);
+
+	len = sizeof(struct ipa_ioc_del_hdr) + 2 * sizeof(struct ipa_hdr_del);
+	hdr = kzalloc(len, GFP_KERNEL);
+	if (hdr == NULL)
+		return -ENOMEM;
+
+	hdr->commit = 1;
+	hdr->num_hdls = 2;
+	hdr->hdl[0].hdl = ntn_ctx->partial_hdr_hdl[0];
+	hdr->hdl[1].hdl = ntn_ctx->partial_hdr_hdl[1];
+
+	if (ipa_del_hdr(hdr)) {
+		IPA_UC_OFFLOAD_ERR("fail to delete partial header\n");
+		result = -EFAULT;
+		goto fail;
+	}
+
+	if (ipa_deregister_intf(ntn_ctx->netdev_name)) {
+		IPA_UC_OFFLOAD_ERR("fail to delete interface prop\n");
+		result = -EFAULT;
+		goto fail;
+	}
+
+fail:
+	kfree(hdr);
+	return result;
+}
+
+int ipa_uc_offload_cleanup(u32 clnt_hdl)
+{
+	struct ipa_uc_offload_ctx *offload_ctx;
+	int ret = 0;
+
+	if (clnt_hdl <= IPA_UC_INVALID ||
+		clnt_hdl >= IPA_UC_MAX_PROT_SIZE) {
+		IPA_UC_OFFLOAD_ERR("Invalid client handle %d\n", clnt_hdl);
+		return -EINVAL;
+	}
+
+	offload_ctx = ipa_uc_offload_ctx[clnt_hdl];
+	if (!offload_ctx) {
+		IPA_UC_OFFLOAD_ERR("Invalid client handle %d\n", clnt_hdl);
+		return -EINVAL;
+	}
+
+	if (offload_ctx->state != IPA_UC_OFFLOAD_STATE_INITIALIZED) {
+		IPA_UC_OFFLOAD_ERR("Invalid State %d\n", offload_ctx->state);
+		return -EINVAL;
+	}
+
+	switch (offload_ctx->proto) {
+	case IPA_UC_NTN:
+		ret = ipa_uc_ntn_cleanup(offload_ctx);
+		break;
+
+	default:
+		IPA_UC_OFFLOAD_ERR("Invalid Proto :%d\n", clnt_hdl);
+		ret = -EINVAL;
+		break;
+	}
+
+	if (!ret) {
+		kfree(offload_ctx);
+		offload_ctx = NULL;
+		ipa_uc_offload_ctx[clnt_hdl] = NULL;
+	}
+
+	return ret;
+}
+EXPORT_SYMBOL(ipa_uc_offload_cleanup);
+
+/**
+ * ipa_uc_offload_uc_rdyCB() - To register uC ready CB if uC not
+ * ready
+ * @inout:	[in/out] input/output parameters
+ * from/to client
+ *
+ * Returns:	0 on success, negative on failure
+ *
+ */
+int ipa_uc_offload_reg_rdyCB(struct ipa_uc_ready_params *inp)
+{
+	int ret = 0;
+
+	if (!inp) {
+		IPA_UC_OFFLOAD_ERR("Invalid input\n");
+		return -EINVAL;
+	}
+
+	if (inp->proto == IPA_UC_NTN)
+		ret = ipa_ntn_uc_reg_rdyCB(inp->notify, inp->priv);
+
+	if (ret == -EEXIST) {
+		inp->is_uC_ready = true;
+		ret = 0;
+	} else
+		inp->is_uC_ready = false;
+
+	return ret;
+}
+EXPORT_SYMBOL(ipa_uc_offload_reg_rdyCB);
+
+void ipa_uc_offload_dereg_rdyCB(enum ipa_uc_offload_proto proto)
+{
+	if (proto == IPA_UC_NTN)
+		ipa_ntn_uc_dereg_rdyCB();
+}
+EXPORT_SYMBOL(ipa_uc_offload_dereg_rdyCB);

+ 2638 - 0
ipa/ipa_clients/ipa_usb.c

@@ -0,0 +1,2638 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2015-2019, The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/mutex.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/debugfs.h>
+#include <linux/ipa.h>
+#include <linux/ipa_usb.h>
+#include <linux/rndis_ipa.h>
+#include <linux/ecm_ipa.h>
+#include "../ipa_v3/ipa_i.h"
+#include "../ipa_rm_i.h"
+
+#define IPA_USB_DEV_READY_TIMEOUT_MSEC 10000
+
+#define IPA_HOLB_TMR_EN 0x1
+
+/* GSI channels weights */
+#define IPA_USB_DL_CHAN_LOW_WEIGHT 0x5
+#define IPA_USB_UL_CHAN_LOW_WEIGHT 0x4
+
+#define IPA_USB_MAX_MSG_LEN 4096
+
+#define IPA_USB_DRV_NAME "ipa_usb"
+
+#define IPA_USB_DBG(fmt, args...) \
+	do { \
+		pr_debug(IPA_USB_DRV_NAME " %s:%d " fmt, \
+			__func__, __LINE__, ## args); \
+		IPA_IPC_LOGGING(ipa_get_ipc_logbuf(), \
+			IPA_USB_DRV_NAME " %s:%d " fmt, ## args); \
+		IPA_IPC_LOGGING(ipa_get_ipc_logbuf_low(), \
+			IPA_USB_DRV_NAME " %s:%d " fmt, ## args); \
+	} while (0)
+
+#define IPA_USB_DBG_LOW(fmt, args...) \
+	do { \
+		pr_debug(IPA_USB_DRV_NAME " %s:%d " fmt, \
+			__func__, __LINE__, ## args); \
+		IPA_IPC_LOGGING(ipa_get_ipc_logbuf_low(), \
+			IPA_USB_DRV_NAME " %s:%d " fmt, ## args); \
+	} while (0)
+
+#define IPA_USB_ERR(fmt, args...) \
+	do { \
+		pr_err(IPA_USB_DRV_NAME " %s:%d " fmt, \
+			__func__, __LINE__, ## args); \
+		IPA_IPC_LOGGING(ipa_get_ipc_logbuf(), \
+			IPA_USB_DRV_NAME " %s:%d " fmt, ## args); \
+		IPA_IPC_LOGGING(ipa_get_ipc_logbuf_low(), \
+			IPA_USB_DRV_NAME " %s:%d " fmt, ## args); \
+	} while (0)
+
+#define IPA_USB_INFO(fmt, args...) \
+	do { \
+		pr_info(IPA_USB_DRV_NAME " %s:%d " fmt, \
+			__func__, __LINE__, ## args); \
+		IPA_IPC_LOGGING(ipa_get_ipc_logbuf(), \
+			IPA_USB_DRV_NAME " %s:%d " fmt, ## args); \
+		IPA_IPC_LOGGING(ipa_get_ipc_logbuf_low(), \
+			IPA_USB_DRV_NAME " %s:%d " fmt, ## args); \
+	} while (0)
+
+enum ipa_usb_direction {
+	IPA_USB_DIR_UL,
+	IPA_USB_DIR_DL,
+};
+
+struct ipa_usb_xdci_connect_params_internal {
+	enum ipa_usb_max_usb_packet_size max_pkt_size;
+	u32 ipa_to_usb_clnt_hdl;
+	u8 ipa_to_usb_xferrscidx;
+	bool ipa_to_usb_xferrscidx_valid;
+	u32 usb_to_ipa_clnt_hdl;
+	u8 usb_to_ipa_xferrscidx;
+	bool usb_to_ipa_xferrscidx_valid;
+	enum ipa_usb_teth_prot teth_prot;
+	struct ipa_usb_teth_prot_params teth_prot_params;
+	u32 max_supported_bandwidth_mbps;
+};
+
+enum ipa3_usb_teth_prot_state {
+	IPA_USB_TETH_PROT_INITIALIZED,
+	IPA_USB_TETH_PROT_CONNECTED,
+	IPA_USB_TETH_PROT_INVALID
+};
+
+struct ipa3_usb_teth_prot_context {
+	union {
+		struct ipa_usb_init_params rndis;
+		struct ecm_ipa_params ecm;
+		struct teth_bridge_init_params teth_bridge;
+	} teth_prot_params;
+	enum ipa3_usb_teth_prot_state state;
+	void *user_data;
+};
+
+struct ipa3_usb_pm_context {
+	struct ipa_pm_register_params reg_params;
+	struct work_struct *remote_wakeup_work;
+	u32 hdl;
+};
+
+enum ipa3_usb_state {
+	IPA_USB_INVALID,
+	IPA_USB_INITIALIZED,
+	IPA_USB_CONNECTED,
+	IPA_USB_STOPPED,
+	IPA_USB_SUSPEND_REQUESTED,
+	IPA_USB_SUSPENDED,
+	IPA_USB_SUSPENDED_NO_RWAKEUP,
+	IPA_USB_RESUME_IN_PROGRESS
+};
+
+enum ipa3_usb_transport_type {
+	IPA_USB_TRANSPORT_TETH,
+	IPA_USB_TRANSPORT_DPL,
+	IPA_USB_TRANSPORT_MAX
+};
+
+/* Get transport type from tethering protocol */
+#define IPA3_USB_GET_TTYPE(__teth_prot) \
+	(((__teth_prot) == IPA_USB_DIAG) ? \
+	IPA_USB_TRANSPORT_DPL : IPA_USB_TRANSPORT_TETH)
+
+/* Does the given transport type is DPL? */
+#define IPA3_USB_IS_TTYPE_DPL(__ttype) \
+	((__ttype) == IPA_USB_TRANSPORT_DPL)
+
+struct ipa3_usb_teth_prot_conn_params {
+	u32 usb_to_ipa_clnt_hdl;
+	u32 ipa_to_usb_clnt_hdl;
+	struct ipa_usb_teth_prot_params params;
+};
+
+/**
+ * Transport type - could be either data tethering or DPL
+ * Each transport has it's own PM resources and statuses
+ */
+struct ipa3_usb_transport_type_ctx {
+	struct ipa3_usb_pm_context pm_ctx;
+	int (*ipa_usb_notify_cb)(enum ipa_usb_notify_event, void *user_data);
+	void *user_data;
+	enum ipa3_usb_state state;
+	bool rwakeup_pending;
+	struct ipa_usb_xdci_chan_params ul_ch_params;
+	struct ipa_usb_xdci_chan_params dl_ch_params;
+	struct ipa3_usb_teth_prot_conn_params teth_conn_params;
+};
+
+struct ipa3_usb_smmu_reg_map {
+	int cnt;
+	phys_addr_t addr;
+};
+
+/*
+ * Relevant for IPA4.5 on sdx55v1 and Kona.
+ */
+static const bool teth_type_switch_tbl_ipa45
+	[IPA_USB_MAX_TETH_PROT_SIZE][IPA_USB_MAX_TETH_PROT_SIZE] = {
+		[IPA_USB_RNDIS] = {true, false, true, false, false},
+		[IPA_USB_ECM] = {false, true, false, false, false},
+		[IPA_USB_RMNET] = {true, false, true, false, false},
+		[IPA_USB_MBIM] = {true, true, true, true, false},
+		[IPA_USB_DIAG] = {false, false, false, false, true},
+	};
+
+struct ipa3_usb_teth_type_switch {
+	bool valid;
+	enum ipa_usb_teth_prot teth;
+};
+
+struct ipa3_usb_context {
+	struct ipa3_usb_teth_prot_context
+		teth_prot_ctx[IPA_USB_MAX_TETH_PROT_SIZE];
+	int num_init_prot; /* without dpl */
+	struct teth_bridge_init_params teth_bridge_params;
+	struct completion dev_ready_comp;
+	u32 qmi_req_id;
+	spinlock_t state_lock;
+	bool dl_data_pending;
+	struct workqueue_struct *wq;
+	struct mutex general_mutex;
+	struct ipa3_usb_transport_type_ctx
+		ttype_ctx[IPA_USB_TRANSPORT_MAX];
+	struct dentry *dfile_state_info;
+	struct dentry *dent;
+	struct ipa3_usb_smmu_reg_map smmu_reg_map;
+	struct ipa3_usb_teth_type_switch prev_teth;
+};
+
+enum ipa3_usb_op {
+	IPA_USB_OP_INIT_TETH_PROT,
+	IPA_USB_OP_REQUEST_CHANNEL,
+	IPA_USB_OP_CONNECT,
+	IPA_USB_OP_DISCONNECT,
+	IPA_USB_OP_RELEASE_CHANNEL,
+	IPA_USB_OP_DEINIT_TETH_PROT,
+	IPA_USB_OP_SUSPEND,
+	IPA_USB_OP_SUSPEND_NO_RWAKEUP,
+	IPA_USB_OP_RESUME
+};
+
+struct ipa3_usb_status_dbg_info {
+	const char *teth_state;
+	const char *dpl_state;
+	int num_init_prot;
+	const char *inited_prots[IPA_USB_MAX_TETH_PROT_SIZE];
+	const char *teth_connected_prot;
+	const char *dpl_connected_prot;
+};
+
+static void ipa3_usb_wq_notify_remote_wakeup(struct work_struct *work);
+static void ipa3_usb_wq_dpl_notify_remote_wakeup(struct work_struct *work);
+static DECLARE_WORK(ipa3_usb_notify_remote_wakeup_work,
+	ipa3_usb_wq_notify_remote_wakeup);
+static DECLARE_WORK(ipa3_usb_dpl_notify_remote_wakeup_work,
+	ipa3_usb_wq_dpl_notify_remote_wakeup);
+
+static struct ipa3_usb_context *ipa3_usb_ctx;
+
+static char *ipa3_usb_op_to_string(enum ipa3_usb_op op)
+{
+	switch (op) {
+	case IPA_USB_OP_INIT_TETH_PROT:
+		return "IPA_USB_OP_INIT_TETH_PROT";
+	case IPA_USB_OP_REQUEST_CHANNEL:
+		return "IPA_USB_OP_REQUEST_CHANNEL";
+	case IPA_USB_OP_CONNECT:
+		return "IPA_USB_OP_CONNECT";
+	case IPA_USB_OP_DISCONNECT:
+		return "IPA_USB_OP_DISCONNECT";
+	case IPA_USB_OP_RELEASE_CHANNEL:
+		return "IPA_USB_OP_RELEASE_CHANNEL";
+	case IPA_USB_OP_DEINIT_TETH_PROT:
+		return "IPA_USB_OP_DEINIT_TETH_PROT";
+	case IPA_USB_OP_SUSPEND:
+		return "IPA_USB_OP_SUSPEND";
+	case IPA_USB_OP_SUSPEND_NO_RWAKEUP:
+		return "IPA_USB_OP_SUSPEND_NO_RWAKEUP";
+	case IPA_USB_OP_RESUME:
+		return "IPA_USB_OP_RESUME";
+	}
+
+	return "UNSUPPORTED";
+}
+
+static char *ipa3_usb_state_to_string(enum ipa3_usb_state state)
+{
+	switch (state) {
+	case IPA_USB_INVALID:
+		return "IPA_USB_INVALID";
+	case IPA_USB_INITIALIZED:
+		return "IPA_USB_INITIALIZED";
+	case IPA_USB_CONNECTED:
+		return "IPA_USB_CONNECTED";
+	case IPA_USB_STOPPED:
+		return "IPA_USB_STOPPED";
+	case IPA_USB_SUSPEND_REQUESTED:
+		return "IPA_USB_SUSPEND_REQUESTED";
+	case IPA_USB_SUSPENDED:
+		return "IPA_USB_SUSPENDED";
+	case IPA_USB_SUSPENDED_NO_RWAKEUP:
+		return "IPA_USB_SUSPENDED_NO_RWAKEUP";
+	case IPA_USB_RESUME_IN_PROGRESS:
+		return "IPA_USB_RESUME_IN_PROGRESS";
+	}
+
+	return "UNSUPPORTED";
+}
+
+static char *ipa3_usb_notify_event_to_string(enum ipa_usb_notify_event event)
+{
+	switch (event) {
+	case IPA_USB_DEVICE_READY:
+		return "IPA_USB_DEVICE_READY";
+	case IPA_USB_REMOTE_WAKEUP:
+		return "IPA_USB_REMOTE_WAKEUP";
+	case IPA_USB_SUSPEND_COMPLETED:
+		return "IPA_USB_SUSPEND_COMPLETED";
+	}
+
+	return "UNSUPPORTED";
+}
+
+static bool ipa3_usb_get_teth_port_state(void)
+{
+	if (ipa3_usb_ctx == NULL)
+		return false;
+	if (ipa3_usb_ctx->ttype_ctx[IPA_USB_TRANSPORT_TETH].state ==
+					IPA_USB_CONNECTED)
+		return true;
+	else
+		return false;
+}
+
+static bool ipa3_usb_set_state(enum ipa3_usb_state new_state, bool err_permit,
+	enum ipa3_usb_transport_type ttype)
+{
+	unsigned long flags;
+	int state_legal = false;
+	enum ipa3_usb_state state;
+	bool rwakeup_pending;
+
+	spin_lock_irqsave(&ipa3_usb_ctx->state_lock, flags);
+	state = ipa3_usb_ctx->ttype_ctx[ttype].state;
+	rwakeup_pending = ipa3_usb_ctx->ttype_ctx[ttype].rwakeup_pending;
+	switch (new_state) {
+	case IPA_USB_INVALID:
+		if (state == IPA_USB_INITIALIZED)
+			state_legal = true;
+		break;
+	case IPA_USB_INITIALIZED:
+		if (state == IPA_USB_STOPPED || state == IPA_USB_INVALID ||
+			((!IPA3_USB_IS_TTYPE_DPL(ttype)) &&
+			(state == IPA_USB_INITIALIZED)))
+			state_legal = true;
+		break;
+	case IPA_USB_CONNECTED:
+		if (state == IPA_USB_INITIALIZED ||
+			state == IPA_USB_STOPPED ||
+			state == IPA_USB_RESUME_IN_PROGRESS ||
+			state == IPA_USB_SUSPENDED_NO_RWAKEUP ||
+			/*
+			 * In case of failure during suspend request
+			 * handling, state is reverted to connected.
+			 */
+			(err_permit && state == IPA_USB_SUSPEND_REQUESTED))
+			state_legal = true;
+		break;
+	case IPA_USB_STOPPED:
+		if (state == IPA_USB_CONNECTED ||
+			state == IPA_USB_SUSPENDED ||
+			state == IPA_USB_SUSPENDED_NO_RWAKEUP)
+			state_legal = true;
+		break;
+	case IPA_USB_SUSPEND_REQUESTED:
+		if (state == IPA_USB_CONNECTED)
+			state_legal = true;
+		break;
+	case IPA_USB_SUSPENDED:
+		if (state == IPA_USB_SUSPEND_REQUESTED ||
+			/*
+			 * In case of failure during resume, state is reverted
+			 * to original, which could be suspended. Allow it
+			 */
+			(err_permit && state == IPA_USB_RESUME_IN_PROGRESS)) {
+			state_legal = true;
+			rwakeup_pending = false;
+		}
+		break;
+	case IPA_USB_SUSPENDED_NO_RWAKEUP:
+		if (state == IPA_USB_CONNECTED)
+			state_legal = true;
+		break;
+	case IPA_USB_RESUME_IN_PROGRESS:
+		if (state == IPA_USB_SUSPENDED)
+			state_legal = true;
+		break;
+	default:
+		state_legal = false;
+		break;
+
+	}
+	if (state_legal) {
+		if (state != new_state) {
+			IPA_USB_DBG("ipa_usb %s state changed %s -> %s\n",
+				IPA3_USB_IS_TTYPE_DPL(ttype) ? "DPL" : "",
+				ipa3_usb_state_to_string(state),
+				ipa3_usb_state_to_string(new_state));
+			ipa3_usb_ctx->ttype_ctx[ttype].state = new_state;
+			ipa3_usb_ctx->ttype_ctx[ttype].rwakeup_pending =
+				rwakeup_pending;
+		}
+	} else {
+		IPA_USB_ERR("invalid state change %s -> %s\n",
+			ipa3_usb_state_to_string(state),
+			ipa3_usb_state_to_string(new_state));
+	}
+
+	spin_unlock_irqrestore(&ipa3_usb_ctx->state_lock, flags);
+	return state_legal;
+}
+
+static bool ipa3_usb_check_legal_op(enum ipa3_usb_op op,
+	enum ipa3_usb_transport_type ttype)
+{
+	unsigned long flags;
+	bool is_legal = false;
+	enum ipa3_usb_state state;
+	bool is_dpl;
+
+	if (ipa3_usb_ctx == NULL) {
+		IPA_USB_ERR("ipa_usb_ctx is not initialized!\n");
+		return false;
+	}
+
+	is_dpl = IPA3_USB_IS_TTYPE_DPL(ttype);
+
+	spin_lock_irqsave(&ipa3_usb_ctx->state_lock, flags);
+	state = ipa3_usb_ctx->ttype_ctx[ttype].state;
+	switch (op) {
+	case IPA_USB_OP_INIT_TETH_PROT:
+		if (state == IPA_USB_INVALID ||
+			(!is_dpl && state == IPA_USB_INITIALIZED))
+			is_legal = true;
+		break;
+	case IPA_USB_OP_REQUEST_CHANNEL:
+		if (state == IPA_USB_INITIALIZED)
+			is_legal = true;
+		break;
+	case IPA_USB_OP_CONNECT:
+		if (state == IPA_USB_INITIALIZED || state == IPA_USB_STOPPED)
+			is_legal = true;
+		break;
+	case IPA_USB_OP_DISCONNECT:
+		if  (state == IPA_USB_CONNECTED ||
+			state == IPA_USB_SUSPENDED ||
+			state == IPA_USB_SUSPENDED_NO_RWAKEUP)
+			is_legal = true;
+		break;
+	case IPA_USB_OP_RELEASE_CHANNEL:
+		/* when releasing 1st channel state will be changed already */
+		if (state == IPA_USB_STOPPED ||
+			(!is_dpl && state == IPA_USB_INITIALIZED))
+			is_legal = true;
+		break;
+	case IPA_USB_OP_DEINIT_TETH_PROT:
+		/*
+		 * For data tethering we should allow deinit an inited protocol
+		 * always. E.g. rmnet is inited and rndis is connected.
+		 * USB can deinit rmnet first and then disconnect rndis
+		 * on cable disconnect.
+		 */
+		if (!is_dpl || state == IPA_USB_INITIALIZED)
+			is_legal = true;
+		break;
+	case IPA_USB_OP_SUSPEND:
+		if (state == IPA_USB_CONNECTED)
+			is_legal = true;
+		break;
+	case IPA_USB_OP_SUSPEND_NO_RWAKEUP:
+		if (state == IPA_USB_CONNECTED)
+			is_legal = true;
+		break;
+	case IPA_USB_OP_RESUME:
+		if (state == IPA_USB_SUSPENDED ||
+			state == IPA_USB_SUSPENDED_NO_RWAKEUP)
+			is_legal = true;
+		break;
+	default:
+		is_legal = false;
+		break;
+	}
+
+	if (!is_legal) {
+		IPA_USB_ERR("Illegal %s operation: state=%s operation=%s\n",
+			is_dpl ? "DPL" : "",
+			ipa3_usb_state_to_string(state),
+			ipa3_usb_op_to_string(op));
+	}
+
+	spin_unlock_irqrestore(&ipa3_usb_ctx->state_lock, flags);
+	return is_legal;
+}
+
+static void ipa3_usb_notify_do(enum ipa3_usb_transport_type ttype,
+	enum ipa_usb_notify_event event)
+{
+	int (*cb)(enum ipa_usb_notify_event, void *user_data);
+	void *user_data;
+	int res;
+
+	IPA_USB_DBG("Trying to notify USB with %s\n",
+		ipa3_usb_notify_event_to_string(event));
+
+	cb = ipa3_usb_ctx->ttype_ctx[ttype].ipa_usb_notify_cb;
+	user_data = ipa3_usb_ctx->ttype_ctx[ttype].user_data;
+
+	if (cb) {
+		res = cb(event, user_data);
+		IPA_USB_DBG("Notified USB with %s. is_dpl=%d result=%d\n",
+			ipa3_usb_notify_event_to_string(event),
+			IPA3_USB_IS_TTYPE_DPL(ttype), res);
+	}
+}
+
+/*
+ * This call-back is called from ECM or RNDIS drivers.
+ * Both drivers are data tethering drivers and not DPL
+ */
+static void ipa3_usb_device_ready_notify_cb(void)
+{
+	IPA_USB_DBG_LOW("entry\n");
+	ipa3_usb_notify_do(IPA_USB_TRANSPORT_TETH,
+		IPA_USB_DEVICE_READY);
+	IPA_USB_DBG_LOW("exit\n");
+}
+
+static void ipa3_usb_wq_notify_remote_wakeup(struct work_struct *work)
+{
+	bool rwakeup_pending;
+	unsigned long flags;
+	enum ipa3_usb_transport_type ttype =
+		IPA_USB_TRANSPORT_TETH;
+
+
+	spin_lock_irqsave(&ipa3_usb_ctx->state_lock, flags);
+	rwakeup_pending =
+		ipa3_usb_ctx->ttype_ctx[ttype].rwakeup_pending;
+	if (!rwakeup_pending) {
+		rwakeup_pending = true;
+		ipa3_usb_notify_do(ttype,
+			IPA_USB_REMOTE_WAKEUP);
+	}
+	ipa3_usb_ctx->ttype_ctx[ttype].rwakeup_pending =
+		rwakeup_pending;
+	spin_unlock_irqrestore(&ipa3_usb_ctx->state_lock, flags);
+}
+
+static void ipa3_usb_wq_dpl_notify_remote_wakeup(struct work_struct *work)
+{
+	bool rwakeup_pending;
+	unsigned long flags;
+	enum ipa3_usb_transport_type ttype =
+		IPA_USB_TRANSPORT_DPL;
+
+	spin_lock_irqsave(&ipa3_usb_ctx->state_lock, flags);
+	rwakeup_pending =
+		ipa3_usb_ctx->ttype_ctx[ttype].rwakeup_pending;
+	if (!rwakeup_pending) {
+		rwakeup_pending = true;
+		ipa3_usb_notify_do(ttype,
+			IPA_USB_REMOTE_WAKEUP);
+	}
+	ipa3_usb_ctx->ttype_ctx[ttype].rwakeup_pending =
+		rwakeup_pending;
+	spin_unlock_irqrestore(&ipa3_usb_ctx->state_lock, flags);
+}
+
+
+static void ipa3_usb_pm_cb(void *p, enum ipa_pm_cb_event event)
+{
+	struct ipa3_usb_transport_type_ctx *ttype_ctx =
+		(struct ipa3_usb_transport_type_ctx *)p;
+	unsigned long flags;
+
+	IPA_USB_DBG_LOW("entry\n");
+
+	if (event != IPA_PM_REQUEST_WAKEUP) {
+		IPA_USB_ERR("Unexpected event %d\n", event);
+		WARN_ON(1);
+		return;
+	}
+
+	spin_lock_irqsave(&ipa3_usb_ctx->state_lock, flags);
+	IPA_USB_DBG("state is %s\n",
+		ipa3_usb_state_to_string(ttype_ctx->state));
+	if (ttype_ctx->state == IPA_USB_SUSPENDED)
+		queue_work(ipa3_usb_ctx->wq,
+			ttype_ctx->pm_ctx.remote_wakeup_work);
+	spin_unlock_irqrestore(&ipa3_usb_ctx->state_lock, flags);
+	IPA_USB_DBG_LOW("exit\n");
+}
+
+static char *ipa3_usb_teth_prot_to_string(enum ipa_usb_teth_prot teth_prot)
+{
+	switch (teth_prot) {
+	case IPA_USB_RNDIS:
+		return "rndis_ipa";
+	case IPA_USB_ECM:
+		return "ecm_ipa";
+	case IPA_USB_RMNET:
+	case IPA_USB_MBIM:
+		return "teth_bridge";
+	case IPA_USB_DIAG:
+		return "dpl";
+	default:
+		break;
+	}
+
+	return "unsupported";
+}
+
+static char *ipa3_usb_teth_bridge_prot_to_string(
+	enum ipa_usb_teth_prot teth_prot)
+{
+	switch (teth_prot) {
+	case IPA_USB_RMNET:
+		return "rmnet";
+	case IPA_USB_MBIM:
+		return "mbim";
+	default:
+		break;
+	}
+
+	return "unsupported";
+}
+
+static int ipa3_usb_init_teth_bridge(void)
+{
+	int result;
+
+	result = teth_bridge_init(&ipa3_usb_ctx->teth_bridge_params);
+	if (result) {
+		IPA_USB_ERR("Failed to initialize teth_bridge\n");
+		return result;
+	}
+
+	return 0;
+}
+
+static int ipa3_usb_register_pm(enum ipa3_usb_transport_type ttype)
+{
+	struct ipa3_usb_transport_type_ctx *ttype_ctx =
+		&ipa3_usb_ctx->ttype_ctx[ttype];
+	int result;
+
+	/* there is one PM resource for teth and one for DPL */
+	if (!IPA3_USB_IS_TTYPE_DPL(ttype) && ipa3_usb_ctx->num_init_prot > 0)
+		return 0;
+
+	memset(&ttype_ctx->pm_ctx.reg_params, 0,
+		sizeof(ttype_ctx->pm_ctx.reg_params));
+	ttype_ctx->pm_ctx.reg_params.name = (ttype == IPA_USB_TRANSPORT_DPL) ?
+				"USB DPL" : "USB";
+	ttype_ctx->pm_ctx.reg_params.callback = ipa3_usb_pm_cb;
+	ttype_ctx->pm_ctx.reg_params.user_data = ttype_ctx;
+	ttype_ctx->pm_ctx.reg_params.group = IPA_PM_GROUP_DEFAULT;
+
+	result = ipa_pm_register(&ttype_ctx->pm_ctx.reg_params,
+		&ttype_ctx->pm_ctx.hdl);
+	if (result) {
+		IPA_USB_ERR("fail to register with PM %d\n", result);
+		goto fail_pm_reg;
+	}
+
+	result = ipa_pm_associate_ipa_cons_to_client(ttype_ctx->pm_ctx.hdl,
+		(ttype == IPA_USB_TRANSPORT_DPL) ?
+		IPA_CLIENT_USB_DPL_CONS : IPA_CLIENT_USB_CONS);
+	if (result) {
+		IPA_USB_ERR("fail to associate cons with PM %d\n", result);
+		goto fail_pm_cons;
+	}
+
+	return 0;
+
+fail_pm_cons:
+	ipa_pm_deregister(ttype_ctx->pm_ctx.hdl);
+fail_pm_reg:
+	memset(&ttype_ctx->pm_ctx.reg_params, 0,
+		sizeof(ttype_ctx->pm_ctx.reg_params));
+	return result;
+}
+
+static int ipa3_usb_deregister_pm(enum ipa3_usb_transport_type ttype)
+{
+	struct ipa3_usb_pm_context *pm_ctx =
+		&ipa3_usb_ctx->ttype_ctx[ttype].pm_ctx;
+	int result;
+
+	result = ipa_pm_deregister(pm_ctx->hdl);
+	if (result)
+		return result;
+
+	memset(&pm_ctx->reg_params, 0, sizeof(pm_ctx->reg_params));
+	return 0;
+}
+
+static bool ipa3_usb_is_teth_switch_valid(enum ipa_usb_teth_prot new_teth)
+{
+	enum ipa_usb_teth_prot old_teth;
+	u32 ipa_r_rev;
+
+	IPA_USB_DBG("Start new_teth=%s\n",
+		ipa3_usb_teth_prot_to_string(new_teth));
+
+	if (IPA3_USB_IS_TTYPE_DPL(IPA3_USB_GET_TTYPE(new_teth)))
+		return true;
+
+	if (ipa_get_hw_type() != IPA_HW_v4_5)
+		return true;
+
+	ipa_r_rev = ipa3_get_r_rev_version();
+	IPA_USB_DBG("ipa_r_rev=%u\n", ipa_r_rev);
+
+	/* issue relevant for IPA4.5v1 */
+	if (ipa_r_rev != 10 && ipa_r_rev != 13)
+		return true;
+
+	if (ipa3_usb_ctx == NULL) {
+		IPA_USB_ERR("Invalid context");
+		return false;
+	}
+
+	if (new_teth < 0 || new_teth >= IPA_USB_MAX_TETH_PROT_SIZE) {
+		IPA_USB_ERR("Invalid new_teth %d\n", new_teth);
+		return false;
+	}
+
+	if (!ipa3_usb_ctx->prev_teth.valid) {
+		ipa3_usb_ctx->prev_teth.teth = new_teth;
+		ipa3_usb_ctx->prev_teth.valid = true;
+		return true;
+	}
+
+	old_teth = ipa3_usb_ctx->prev_teth.teth;
+	if (teth_type_switch_tbl_ipa45[old_teth][new_teth]) {
+		ipa3_usb_ctx->prev_teth.teth = new_teth;
+		return true;
+	}
+
+	IPA_USB_DBG("Invalid teth switch %s -> %s\n",
+		ipa3_usb_teth_prot_to_string(old_teth),
+		ipa3_usb_teth_prot_to_string(new_teth));
+	return false;
+}
+
+
+static int ipa_usb_set_lock_unlock(bool is_lock)
+{
+	IPA_USB_DBG("entry\n");
+	if (is_lock)
+		mutex_lock(&ipa3_usb_ctx->general_mutex);
+	else
+		mutex_unlock(&ipa3_usb_ctx->general_mutex);
+	IPA_USB_DBG("exit\n");
+
+	return 0;
+}
+
+int ipa_usb_init_teth_prot(enum ipa_usb_teth_prot teth_prot,
+			   struct ipa_usb_teth_params *teth_params,
+			   int (*ipa_usb_notify_cb)(enum ipa_usb_notify_event,
+			   void *),
+			   void *user_data)
+{
+	int result = -EFAULT;
+	enum ipa3_usb_transport_type ttype;
+	struct ipa3_usb_teth_prot_context *teth_prot_ptr;
+
+	mutex_lock(&ipa3_usb_ctx->general_mutex);
+	IPA_USB_DBG_LOW("entry\n");
+	if (teth_prot < 0 || teth_prot >= IPA_USB_MAX_TETH_PROT_SIZE ||
+		((teth_prot == IPA_USB_RNDIS || teth_prot == IPA_USB_ECM) &&
+		teth_params == NULL) || ipa_usb_notify_cb == NULL ||
+		user_data == NULL) {
+		IPA_USB_ERR("bad parameters\n");
+		result = -EINVAL;
+		goto bad_params;
+	}
+
+	ttype = IPA3_USB_GET_TTYPE(teth_prot);
+
+	if (!ipa3_usb_check_legal_op(IPA_USB_OP_INIT_TETH_PROT, ttype)) {
+		IPA_USB_ERR("Illegal operation\n");
+		result = -EPERM;
+		goto bad_params;
+	}
+
+	/* Register with IPA PM */
+	teth_prot_ptr = &ipa3_usb_ctx->teth_prot_ctx[teth_prot];
+	result = ipa3_usb_register_pm(ttype);
+	if (result) {
+		IPA_USB_ERR("Failed registering IPA PM\n");
+		goto bad_params;
+	}
+
+	if (!ipa3_usb_ctx->ttype_ctx[ttype].ipa_usb_notify_cb) {
+		ipa3_usb_ctx->ttype_ctx[ttype].ipa_usb_notify_cb =
+			ipa_usb_notify_cb;
+	} else if (!IPA3_USB_IS_TTYPE_DPL(ttype)) {
+		if (ipa3_usb_ctx->ttype_ctx[ttype].ipa_usb_notify_cb !=
+			ipa_usb_notify_cb) {
+			IPA_USB_ERR("Got different notify_cb\n");
+			result = -EINVAL;
+			goto bad_params;
+		}
+	} else {
+		IPA_USB_ERR("Already has dpl_notify_cb\n");
+		result = -EINVAL;
+		goto bad_params;
+	}
+
+	/* Initialize tethering protocol */
+	switch (teth_prot) {
+	case IPA_USB_RNDIS:
+	case IPA_USB_ECM:
+		if (ipa3_usb_ctx->teth_prot_ctx[teth_prot].state !=
+			IPA_USB_TETH_PROT_INVALID) {
+			IPA_USB_DBG("%s already initialized\n",
+				ipa3_usb_teth_prot_to_string(teth_prot));
+			result = -EPERM;
+			goto bad_params;
+		}
+		ipa3_usb_ctx->teth_prot_ctx[teth_prot].user_data = user_data;
+		if (teth_prot == IPA_USB_RNDIS) {
+			struct ipa_usb_init_params *rndis_ptr =
+				&teth_prot_ptr->teth_prot_params.rndis;
+
+			rndis_ptr->device_ready_notify =
+				ipa3_usb_device_ready_notify_cb;
+			memcpy(rndis_ptr->host_ethaddr,
+				teth_params->host_ethaddr,
+				sizeof(teth_params->host_ethaddr));
+			memcpy(rndis_ptr->device_ethaddr,
+				teth_params->device_ethaddr,
+				sizeof(teth_params->device_ethaddr));
+
+			result = rndis_ipa_init(rndis_ptr);
+			if (result) {
+				IPA_USB_ERR("Failed to initialize %s\n",
+					ipa3_usb_teth_prot_to_string(
+					teth_prot));
+				goto teth_prot_init_fail;
+			}
+		} else {
+			struct ecm_ipa_params *ecm_ptr =
+				&teth_prot_ptr->teth_prot_params.ecm;
+
+			ecm_ptr->device_ready_notify =
+				ipa3_usb_device_ready_notify_cb;
+			memcpy(ecm_ptr->host_ethaddr,
+				teth_params->host_ethaddr,
+				sizeof(teth_params->host_ethaddr));
+			memcpy(ecm_ptr->device_ethaddr,
+				teth_params->device_ethaddr,
+				sizeof(teth_params->device_ethaddr));
+
+			result = ecm_ipa_init(ecm_ptr);
+			if (result) {
+				IPA_USB_ERR("Failed to initialize %s\n",
+					ipa3_usb_teth_prot_to_string(
+					teth_prot));
+				goto teth_prot_init_fail;
+			}
+		}
+		teth_prot_ptr->state =
+			IPA_USB_TETH_PROT_INITIALIZED;
+		ipa3_usb_ctx->num_init_prot++;
+		IPA_USB_DBG("initialized %s\n",
+			ipa3_usb_teth_prot_to_string(teth_prot));
+		break;
+	case IPA_USB_RMNET:
+	case IPA_USB_MBIM:
+		if (ipa3_usb_ctx->teth_prot_ctx[teth_prot].state !=
+			IPA_USB_TETH_PROT_INVALID) {
+			IPA_USB_DBG("%s already initialized\n",
+				ipa3_usb_teth_prot_to_string(teth_prot));
+			result = -EPERM;
+			goto bad_params;
+		}
+		ipa3_usb_ctx->teth_prot_ctx[teth_prot].user_data = user_data;
+		result = ipa3_usb_init_teth_bridge();
+		if (result)
+			goto teth_prot_init_fail;
+		ipa3_usb_ctx->teth_prot_ctx[teth_prot].state =
+			IPA_USB_TETH_PROT_INITIALIZED;
+		ipa3_usb_ctx->num_init_prot++;
+		IPA_USB_DBG("initialized %s %s\n",
+			ipa3_usb_teth_prot_to_string(teth_prot),
+			ipa3_usb_teth_bridge_prot_to_string(teth_prot));
+		/*
+		 * Register for xdci lock/unlock callback with ipa core driver.
+		 * As per use case, only register for IPA_CONS end point now.
+		 * If needed we can include the same for IPA_PROD ep.
+		 * For IPA_USB_DIAG/DPL config there will not be any UL ep.
+		 */
+		ipa_register_client_callback(&ipa_usb_set_lock_unlock,
+			&ipa3_usb_get_teth_port_state, IPA_CLIENT_USB_PROD);
+		break;
+	case IPA_USB_DIAG:
+		if (ipa3_usb_ctx->teth_prot_ctx[teth_prot].state !=
+			IPA_USB_TETH_PROT_INVALID) {
+			IPA_USB_DBG("DPL already initialized\n");
+			result = -EPERM;
+			goto bad_params;
+		}
+		ipa3_usb_ctx->teth_prot_ctx[teth_prot].user_data = user_data;
+		ipa3_usb_ctx->teth_prot_ctx[teth_prot].state =
+			IPA_USB_TETH_PROT_INITIALIZED;
+		IPA_USB_DBG("initialized DPL\n");
+		break;
+	default:
+		IPA_USB_ERR("unexpected tethering protocol\n");
+		result = -EINVAL;
+		goto bad_params;
+	}
+
+	if (!ipa3_usb_set_state(IPA_USB_INITIALIZED, false, ttype))
+		IPA_USB_ERR("failed to change state to initialized\n");
+
+	IPA_USB_DBG_LOW("exit\n");
+	mutex_unlock(&ipa3_usb_ctx->general_mutex);
+	return 0;
+
+teth_prot_init_fail:
+	if ((IPA3_USB_IS_TTYPE_DPL(ttype))
+		|| (ipa3_usb_ctx->num_init_prot == 0)) {
+		ipa3_usb_deregister_pm(ttype);
+	}
+bad_params:
+	mutex_unlock(&ipa3_usb_ctx->general_mutex);
+	return result;
+}
+EXPORT_SYMBOL(ipa_usb_init_teth_prot);
+
+static void ipa3_usb_gsi_evt_err_cb(struct gsi_evt_err_notify *notify)
+{
+	IPA_USB_DBG_LOW("entry\n");
+	if (!notify)
+		return;
+	IPA_USB_ERR("Received event error %d, description: %d\n",
+		notify->evt_id, notify->err_desc);
+	IPA_USB_DBG_LOW("exit\n");
+}
+
+static void ipa3_usb_gsi_chan_err_cb(struct gsi_chan_err_notify *notify)
+{
+	IPA_USB_DBG_LOW("entry\n");
+	if (!notify)
+		return;
+	IPA_USB_ERR("Received channel error %d, description: %d\n",
+		notify->evt_id, notify->err_desc);
+	IPA_USB_DBG_LOW("exit\n");
+}
+
+static bool ipa3_usb_check_chan_params(struct ipa_usb_xdci_chan_params *params)
+{
+	IPA_USB_DBG_LOW("gevntcount_low_addr = %x\n",
+			params->gevntcount_low_addr);
+	IPA_USB_DBG_LOW("gevntcount_hi_addr = %x\n",
+			params->gevntcount_hi_addr);
+	IPA_USB_DBG_LOW("dir = %d\n", params->dir);
+	IPA_USB_DBG_LOW("xfer_ring_len = %d\n", params->xfer_ring_len);
+	IPA_USB_DBG_LOW("last_trb_addr_iova = %x\n",
+		params->xfer_scratch.last_trb_addr_iova);
+	IPA_USB_DBG_LOW("const_buffer_size = %d\n",
+		params->xfer_scratch.const_buffer_size);
+	IPA_USB_DBG_LOW("depcmd_low_addr = %x\n",
+		params->xfer_scratch.depcmd_low_addr);
+	IPA_USB_DBG_LOW("depcmd_hi_addr = %x\n",
+		params->xfer_scratch.depcmd_hi_addr);
+
+	if (params->client >= IPA_CLIENT_MAX  ||
+		params->teth_prot < 0 ||
+		params->teth_prot >= IPA_USB_MAX_TETH_PROT_SIZE ||
+		params->xfer_ring_len % GSI_CHAN_RE_SIZE_16B ||
+		params->xfer_scratch.const_buffer_size < 1 ||
+		params->xfer_scratch.const_buffer_size > 31) {
+		IPA_USB_ERR("Invalid params\n");
+		return false;
+	}
+	switch (params->teth_prot) {
+	case IPA_USB_DIAG:
+		if (!IPA_CLIENT_IS_CONS(params->client)) {
+			IPA_USB_ERR("DPL supports only DL channel\n");
+			return false;
+		}
+	case IPA_USB_RNDIS:
+	case IPA_USB_ECM:
+		if (ipa3_usb_ctx->teth_prot_ctx[params->teth_prot].state ==
+			IPA_USB_TETH_PROT_INVALID) {
+			IPA_USB_ERR("%s is not initialized\n",
+				ipa3_usb_teth_prot_to_string(
+				params->teth_prot));
+			return false;
+		}
+		break;
+	case IPA_USB_RMNET:
+	case IPA_USB_MBIM:
+		if (ipa3_usb_ctx->teth_prot_ctx[params->teth_prot].state ==
+			IPA_USB_TETH_PROT_INVALID) {
+			IPA_USB_ERR("%s is not initialized\n",
+				ipa3_usb_teth_bridge_prot_to_string(
+				params->teth_prot));
+			return false;
+		}
+		break;
+	default:
+		IPA_USB_ERR("Unknown tethering protocol (%d)\n",
+			params->teth_prot);
+		return false;
+	}
+	return true;
+}
+
+static int ipa3_usb_smmu_map_xdci_channel(
+	struct ipa_usb_xdci_chan_params *params, bool map)
+{
+	int result;
+	u32 gevntcount_r = rounddown(params->gevntcount_low_addr, PAGE_SIZE);
+	u32 xfer_scratch_r =
+		rounddown(params->xfer_scratch.depcmd_low_addr, PAGE_SIZE);
+
+	if (gevntcount_r != xfer_scratch_r) {
+		IPA_USB_ERR("No support more than 1 page map for USB regs\n");
+		WARN_ON(1);
+		return -EINVAL;
+	}
+
+	if (map) {
+		if (ipa3_usb_ctx->smmu_reg_map.cnt == 0) {
+			ipa3_usb_ctx->smmu_reg_map.addr = gevntcount_r;
+			result = ipa3_smmu_map_peer_reg(
+				ipa3_usb_ctx->smmu_reg_map.addr, true,
+				IPA_SMMU_CB_AP);
+			if (result) {
+				IPA_USB_ERR("failed to map USB regs %d\n",
+					result);
+				return result;
+			}
+		} else {
+			if (gevntcount_r != ipa3_usb_ctx->smmu_reg_map.addr) {
+				IPA_USB_ERR(
+					"No support for map different reg\n");
+				return -EINVAL;
+			}
+		}
+		ipa3_usb_ctx->smmu_reg_map.cnt++;
+	} else {
+		if (gevntcount_r != ipa3_usb_ctx->smmu_reg_map.addr) {
+			IPA_USB_ERR(
+				"No support for map different reg\n");
+			return -EINVAL;
+		}
+
+		if (ipa3_usb_ctx->smmu_reg_map.cnt == 1) {
+			result = ipa3_smmu_map_peer_reg(
+				ipa3_usb_ctx->smmu_reg_map.addr, false,
+				IPA_SMMU_CB_AP);
+			if (result) {
+				IPA_USB_ERR("failed to unmap USB regs %d\n",
+					result);
+				return result;
+			}
+		}
+		ipa3_usb_ctx->smmu_reg_map.cnt--;
+	}
+
+
+	result = ipa3_smmu_map_peer_buff(params->xfer_ring_base_addr_iova,
+		params->xfer_ring_len, map, params->sgt_xfer_rings,
+		IPA_SMMU_CB_AP);
+	if (result) {
+		IPA_USB_ERR("failed to map Xfer ring %d\n", result);
+		return result;
+	}
+
+	result = ipa3_smmu_map_peer_buff(params->data_buff_base_addr_iova,
+		params->data_buff_base_len, map, params->sgt_data_buff,
+		IPA_SMMU_CB_AP);
+	if (result) {
+		IPA_USB_ERR("failed to map TRBs buff %d\n", result);
+		return result;
+	}
+
+	return 0;
+}
+
+static int ipa3_usb_request_xdci_channel(
+	struct ipa_usb_xdci_chan_params *params,
+	enum ipa_usb_direction dir,
+	struct ipa_req_chan_out_params *out_params)
+{
+	int result = -EFAULT;
+	struct ipa_request_gsi_channel_params chan_params;
+	enum ipa3_usb_transport_type ttype;
+	enum ipa_usb_teth_prot teth_prot;
+	struct ipa_usb_init_params *rndis_ptr;
+	struct ecm_ipa_params *ecm_ptr;
+	struct ipa_usb_xdci_chan_params *xdci_ch_params;
+
+	IPA_USB_DBG_LOW("entry\n");
+	if (params == NULL || out_params == NULL ||
+		!ipa3_usb_check_chan_params(params)) {
+		IPA_USB_ERR("bad parameters\n");
+		return -EINVAL;
+	}
+
+	ttype = IPA3_USB_GET_TTYPE(params->teth_prot);
+	teth_prot = params->teth_prot;
+
+	if (!ipa3_usb_check_legal_op(IPA_USB_OP_REQUEST_CHANNEL, ttype)) {
+		IPA_USB_ERR("Illegal operation\n");
+		return -EPERM;
+	}
+
+	rndis_ptr =
+		&ipa3_usb_ctx->teth_prot_ctx[teth_prot].teth_prot_params.rndis;
+	ecm_ptr =
+		&ipa3_usb_ctx->teth_prot_ctx[teth_prot].teth_prot_params.ecm;
+
+	memset(&chan_params, 0, sizeof(struct ipa_request_gsi_channel_params));
+	memcpy(&chan_params.ipa_ep_cfg, &params->ipa_ep_cfg,
+		sizeof(struct ipa_ep_cfg));
+	chan_params.client = params->client;
+	switch (params->teth_prot) {
+	case IPA_USB_RNDIS:
+		chan_params.priv = rndis_ptr->private;
+		if (params->dir == GSI_CHAN_DIR_FROM_GSI)
+			chan_params.notify = rndis_ptr->ipa_tx_notify;
+		else
+			chan_params.notify = rndis_ptr->ipa_rx_notify;
+		chan_params.skip_ep_cfg = rndis_ptr->skip_ep_cfg;
+		break;
+	case IPA_USB_ECM:
+		chan_params.priv = ecm_ptr->private;
+		if (params->dir == GSI_CHAN_DIR_FROM_GSI)
+			chan_params.notify = ecm_ptr->ecm_ipa_tx_dp_notify;
+		else
+			chan_params.notify = ecm_ptr->ecm_ipa_rx_dp_notify;
+		chan_params.skip_ep_cfg = ecm_ptr->skip_ep_cfg;
+		break;
+	case IPA_USB_RMNET:
+	case IPA_USB_MBIM:
+		chan_params.priv =
+			ipa3_usb_ctx->teth_bridge_params.private_data;
+		chan_params.notify =
+			ipa3_usb_ctx->teth_bridge_params.usb_notify_cb;
+		chan_params.skip_ep_cfg =
+			ipa3_usb_ctx->teth_bridge_params.skip_ep_cfg;
+		break;
+	case IPA_USB_DIAG:
+		chan_params.priv = NULL;
+		chan_params.notify = NULL;
+		chan_params.skip_ep_cfg = true;
+		break;
+	default:
+		break;
+	}
+
+	result = ipa3_usb_smmu_map_xdci_channel(params, true);
+	if (result) {
+		IPA_USB_ERR("failed to smmu map %d\n", result);
+		return result;
+	}
+
+	/* store channel params for SMMU unmap */
+	if (dir == IPA_USB_DIR_UL)
+		xdci_ch_params = &ipa3_usb_ctx->ttype_ctx[ttype].ul_ch_params;
+	else
+		xdci_ch_params = &ipa3_usb_ctx->ttype_ctx[ttype].dl_ch_params;
+
+	*xdci_ch_params = *params;
+	result = ipa_smmu_store_sgt(
+		&xdci_ch_params->sgt_xfer_rings,
+		params->sgt_xfer_rings);
+	if (result)
+		return result;
+
+	result = ipa_smmu_store_sgt(
+		&xdci_ch_params->sgt_data_buff,
+		params->sgt_data_buff);
+	if (result) {
+		ipa_smmu_free_sgt(&xdci_ch_params->sgt_xfer_rings);
+		return result;
+	}
+	chan_params.keep_ipa_awake = params->keep_ipa_awake;
+	chan_params.evt_ring_params.intf = GSI_EVT_CHTYPE_XDCI_EV;
+	chan_params.evt_ring_params.intr = GSI_INTR_IRQ;
+	chan_params.evt_ring_params.re_size = GSI_EVT_RING_RE_SIZE_16B;
+	chan_params.evt_ring_params.ring_len = params->xfer_ring_len -
+		chan_params.evt_ring_params.re_size;
+	chan_params.evt_ring_params.ring_base_addr =
+		params->xfer_ring_base_addr_iova;
+	chan_params.evt_ring_params.ring_base_vaddr = NULL;
+	chan_params.evt_ring_params.int_modt = 0;
+	chan_params.evt_ring_params.int_modt = 0;
+	chan_params.evt_ring_params.intvec = 0;
+	chan_params.evt_ring_params.msi_addr = 0;
+	chan_params.evt_ring_params.rp_update_addr = 0;
+	chan_params.evt_ring_params.exclusive = true;
+	chan_params.evt_ring_params.err_cb = ipa3_usb_gsi_evt_err_cb;
+	chan_params.evt_ring_params.user_data = NULL;
+	chan_params.evt_scratch.xdci.gevntcount_low_addr =
+		params->gevntcount_low_addr;
+	chan_params.evt_scratch.xdci.gevntcount_hi_addr =
+		params->gevntcount_hi_addr;
+	chan_params.chan_params.prot = GSI_CHAN_PROT_XDCI;
+	chan_params.chan_params.dir = params->dir;
+	/* chan_id is set in ipa3_request_gsi_channel() */
+	chan_params.chan_params.re_size = GSI_CHAN_RE_SIZE_16B;
+	chan_params.chan_params.ring_len = params->xfer_ring_len;
+	chan_params.chan_params.ring_base_addr =
+		params->xfer_ring_base_addr_iova;
+	chan_params.chan_params.ring_base_vaddr = NULL;
+	if (ipa_get_hw_type() >= IPA_HW_v4_0)
+		chan_params.chan_params.use_db_eng = GSI_CHAN_DIRECT_MODE;
+	else
+		chan_params.chan_params.use_db_eng = GSI_CHAN_DB_MODE;
+	chan_params.chan_params.db_in_bytes = 1;
+	chan_params.chan_params.max_prefetch = GSI_ONE_PREFETCH_SEG;
+	if (params->dir == GSI_CHAN_DIR_FROM_GSI)
+		chan_params.chan_params.low_weight =
+			IPA_USB_DL_CHAN_LOW_WEIGHT;
+	else
+		chan_params.chan_params.low_weight =
+			IPA_USB_UL_CHAN_LOW_WEIGHT;
+	chan_params.chan_params.xfer_cb = NULL;
+	chan_params.chan_params.err_cb = ipa3_usb_gsi_chan_err_cb;
+	chan_params.chan_params.chan_user_data = NULL;
+	chan_params.chan_scratch.xdci.last_trb_addr =
+		params->xfer_scratch.last_trb_addr_iova;
+	/* xferrscidx will be updated later */
+	chan_params.chan_scratch.xdci.xferrscidx = 0;
+	chan_params.chan_scratch.xdci.const_buffer_size =
+		params->xfer_scratch.const_buffer_size;
+	chan_params.chan_scratch.xdci.depcmd_low_addr =
+		params->xfer_scratch.depcmd_low_addr;
+	chan_params.chan_scratch.xdci.depcmd_hi_addr =
+		params->xfer_scratch.depcmd_hi_addr;
+
+	/*
+	 * Update scratch for MCS smart prefetch:
+	 * Starting IPA4.5, smart prefetch implemented by H/W.
+	 * At IPA 4.0/4.1/4.2, we do not use MCS smart prefetch
+	 *  so keep the fields zero.
+	 */
+	if (ipa_get_hw_type() < IPA_HW_v4_0) {
+		chan_params.chan_scratch.xdci.outstanding_threshold =
+		((params->teth_prot == IPA_USB_MBIM) ? 1 : 2) *
+		chan_params.chan_params.re_size;
+	}
+	/* max_outstanding_tre is set in ipa3_request_gsi_channel() */
+
+	result = ipa3_request_gsi_channel(&chan_params, out_params);
+	if (result) {
+		IPA_USB_ERR("failed to allocate GSI channel\n");
+		ipa3_usb_smmu_map_xdci_channel(params, false);
+		return result;
+	}
+
+	IPA_USB_DBG_LOW("exit\n");
+	return 0;
+}
+
+static int ipa3_usb_release_xdci_channel(u32 clnt_hdl,
+	enum ipa_usb_direction dir,
+	enum ipa3_usb_transport_type ttype)
+{
+	int result = 0;
+	struct ipa_usb_xdci_chan_params *xdci_ch_params;
+
+	IPA_USB_DBG_LOW("entry\n");
+	if (ttype < 0 || ttype >= IPA_USB_TRANSPORT_MAX) {
+		IPA_USB_ERR("bad parameter\n");
+		return -EINVAL;
+	}
+
+	if (!ipa3_usb_check_legal_op(IPA_USB_OP_RELEASE_CHANNEL, ttype)) {
+		IPA_USB_ERR("Illegal operation\n");
+		return -EPERM;
+	}
+
+	/* Release channel */
+	result = ipa3_release_gsi_channel(clnt_hdl);
+	if (result) {
+		IPA_USB_ERR("failed to deallocate channel\n");
+		return result;
+	}
+
+	if (dir == IPA_USB_DIR_UL)
+		xdci_ch_params = &ipa3_usb_ctx->ttype_ctx[ttype].ul_ch_params;
+	else
+		xdci_ch_params = &ipa3_usb_ctx->ttype_ctx[ttype].dl_ch_params;
+
+	result = ipa3_usb_smmu_map_xdci_channel(xdci_ch_params, false);
+
+	if (xdci_ch_params->sgt_xfer_rings != NULL)
+		ipa_smmu_free_sgt(&xdci_ch_params->sgt_xfer_rings);
+	if (xdci_ch_params->sgt_data_buff != NULL)
+		ipa_smmu_free_sgt(&xdci_ch_params->sgt_data_buff);
+
+	/* Change ipa_usb state to INITIALIZED */
+	if (!ipa3_usb_set_state(IPA_USB_INITIALIZED, false, ttype))
+		IPA_USB_ERR("failed to change state to initialized\n");
+
+	IPA_USB_DBG_LOW("exit\n");
+	return 0;
+}
+
+
+static bool ipa3_usb_check_connect_params(
+	struct ipa_usb_xdci_connect_params_internal *params)
+{
+	IPA_USB_DBG_LOW("ul xferrscidx = %d\n", params->usb_to_ipa_xferrscidx);
+	IPA_USB_DBG_LOW("dl xferrscidx = %d\n", params->ipa_to_usb_xferrscidx);
+	IPA_USB_DBG_LOW("max_supported_bandwidth_mbps = %d\n",
+		params->max_supported_bandwidth_mbps);
+
+	if (params->max_pkt_size < IPA_USB_FULL_SPEED_64B  ||
+		params->max_pkt_size > IPA_USB_SUPER_SPEED_1024B  ||
+		params->ipa_to_usb_xferrscidx > 127 ||
+		(params->teth_prot != IPA_USB_DIAG &&
+		(params->usb_to_ipa_xferrscidx > 127)) ||
+		params->teth_prot < 0 ||
+		params->teth_prot >= IPA_USB_MAX_TETH_PROT_SIZE) {
+		IPA_USB_ERR("Invalid params\n");
+		return false;
+	}
+
+	if (ipa3_usb_ctx->teth_prot_ctx[params->teth_prot].state ==
+		IPA_USB_TETH_PROT_INVALID) {
+		IPA_USB_ERR("%s is not initialized\n",
+			ipa3_usb_teth_prot_to_string(
+			params->teth_prot));
+		return false;
+	}
+
+	return true;
+}
+
+static int ipa3_usb_connect_teth_bridge(
+	struct teth_bridge_connect_params *params)
+{
+	int result;
+
+	result = teth_bridge_connect(params);
+	if (result) {
+		IPA_USB_ERR("failed to connect teth_bridge (%s)\n",
+			params->tethering_mode == TETH_TETHERING_MODE_RMNET ?
+			"rmnet" : "mbim");
+		return result;
+	}
+
+	return 0;
+}
+
+static int ipa3_usb_connect_teth_prot(enum ipa_usb_teth_prot teth_prot)
+{
+	int result;
+	struct teth_bridge_connect_params teth_bridge_params;
+	struct ipa3_usb_teth_prot_conn_params *teth_conn_params;
+	enum ipa3_usb_transport_type ttype;
+	struct ipa3_usb_teth_prot_context *teth_prot_ptr =
+		&ipa3_usb_ctx->teth_prot_ctx[teth_prot];
+
+	IPA_USB_DBG("connecting protocol = %s\n",
+		ipa3_usb_teth_prot_to_string(teth_prot));
+
+	ttype = IPA3_USB_GET_TTYPE(teth_prot);
+
+	teth_conn_params = &(ipa3_usb_ctx->ttype_ctx[ttype].teth_conn_params);
+
+	switch (teth_prot) {
+	case IPA_USB_RNDIS:
+		if (teth_prot_ptr->state ==
+			IPA_USB_TETH_PROT_CONNECTED) {
+			IPA_USB_DBG("%s is already connected\n",
+				ipa3_usb_teth_prot_to_string(teth_prot));
+			break;
+		}
+		ipa3_usb_ctx->ttype_ctx[ttype].user_data =
+			teth_prot_ptr->user_data;
+		result = rndis_ipa_pipe_connect_notify(
+			teth_conn_params->usb_to_ipa_clnt_hdl,
+			teth_conn_params->ipa_to_usb_clnt_hdl,
+			teth_conn_params->params.max_xfer_size_bytes_to_dev,
+			teth_conn_params->params.max_packet_number_to_dev,
+			teth_conn_params->params.max_xfer_size_bytes_to_host,
+			teth_prot_ptr->teth_prot_params.rndis.private);
+		if (result) {
+			IPA_USB_ERR("failed to connect %s\n",
+				ipa3_usb_teth_prot_to_string(teth_prot));
+			ipa3_usb_ctx->ttype_ctx[ttype].user_data = NULL;
+			return result;
+		}
+		teth_prot_ptr->state =
+			IPA_USB_TETH_PROT_CONNECTED;
+		IPA_USB_DBG("%s is connected\n",
+			ipa3_usb_teth_prot_to_string(teth_prot));
+		break;
+	case IPA_USB_ECM:
+		if (teth_prot_ptr->state ==
+			IPA_USB_TETH_PROT_CONNECTED) {
+			IPA_USB_DBG("%s is already connected\n",
+				ipa3_usb_teth_prot_to_string(teth_prot));
+			break;
+		}
+		ipa3_usb_ctx->ttype_ctx[ttype].user_data =
+			teth_prot_ptr->user_data;
+		result = ecm_ipa_connect(teth_conn_params->usb_to_ipa_clnt_hdl,
+			teth_conn_params->ipa_to_usb_clnt_hdl,
+			teth_prot_ptr->teth_prot_params.ecm.private);
+		if (result) {
+			IPA_USB_ERR("failed to connect %s\n",
+				ipa3_usb_teth_prot_to_string(teth_prot));
+			ipa3_usb_ctx->ttype_ctx[ttype].user_data = NULL;
+			return result;
+		}
+		teth_prot_ptr->state =
+			IPA_USB_TETH_PROT_CONNECTED;
+		IPA_USB_DBG("%s is connected\n",
+			ipa3_usb_teth_prot_to_string(teth_prot));
+		break;
+	case IPA_USB_RMNET:
+	case IPA_USB_MBIM:
+		if (teth_prot_ptr->state ==
+			IPA_USB_TETH_PROT_CONNECTED) {
+			IPA_USB_DBG("%s is already connected\n",
+				ipa3_usb_teth_prot_to_string(teth_prot));
+			break;
+		}
+		result = ipa3_usb_init_teth_bridge();
+		if (result)
+			return result;
+
+		ipa3_usb_ctx->ttype_ctx[ttype].user_data =
+			teth_prot_ptr->user_data;
+		teth_bridge_params.ipa_usb_pipe_hdl =
+			teth_conn_params->ipa_to_usb_clnt_hdl;
+		teth_bridge_params.usb_ipa_pipe_hdl =
+			teth_conn_params->usb_to_ipa_clnt_hdl;
+		teth_bridge_params.tethering_mode =
+			(teth_prot == IPA_USB_RMNET) ?
+			(TETH_TETHERING_MODE_RMNET):(TETH_TETHERING_MODE_MBIM);
+		teth_bridge_params.client_type = IPA_CLIENT_USB_PROD;
+		result = ipa3_usb_connect_teth_bridge(&teth_bridge_params);
+		if (result) {
+			ipa3_usb_ctx->ttype_ctx[ttype].user_data = NULL;
+			return result;
+		}
+		ipa3_usb_ctx->teth_prot_ctx[teth_prot].state =
+			IPA_USB_TETH_PROT_CONNECTED;
+		ipa3_usb_notify_do(ttype, IPA_USB_DEVICE_READY);
+		IPA_USB_DBG("%s (%s) is connected\n",
+			ipa3_usb_teth_prot_to_string(teth_prot),
+			ipa3_usb_teth_bridge_prot_to_string(teth_prot));
+		break;
+	case IPA_USB_DIAG:
+		if (ipa3_usb_ctx->teth_prot_ctx[IPA_USB_DIAG].state ==
+			IPA_USB_TETH_PROT_CONNECTED) {
+			IPA_USB_DBG("%s is already connected\n",
+				ipa3_usb_teth_prot_to_string(teth_prot));
+			break;
+		}
+
+		ipa3_usb_ctx->ttype_ctx[ttype].user_data =
+			ipa3_usb_ctx->teth_prot_ctx[teth_prot].user_data;
+		ipa3_usb_ctx->teth_prot_ctx[IPA_USB_DIAG].state =
+			IPA_USB_TETH_PROT_CONNECTED;
+		ipa3_usb_notify_do(ttype, IPA_USB_DEVICE_READY);
+		IPA_USB_DBG("%s is connected\n",
+			ipa3_usb_teth_prot_to_string(teth_prot));
+		break;
+	default:
+		IPA_USB_ERR("Invalid tethering protocol\n");
+		return -EFAULT;
+	}
+
+	return 0;
+}
+
+static int ipa3_usb_disconnect_teth_bridge(void)
+{
+	int result;
+
+	result = teth_bridge_disconnect(IPA_CLIENT_USB_PROD);
+	if (result) {
+		IPA_USB_ERR("failed to disconnect teth_bridge\n");
+		return result;
+	}
+
+	return 0;
+}
+
+static int ipa3_usb_disconnect_teth_prot(enum ipa_usb_teth_prot teth_prot)
+{
+	int result = 0;
+	enum ipa3_usb_transport_type ttype;
+	struct ipa3_usb_teth_prot_context *teth_prot_ptr =
+		&ipa3_usb_ctx->teth_prot_ctx[teth_prot];
+
+	ttype = IPA3_USB_GET_TTYPE(teth_prot);
+
+	switch (teth_prot) {
+	case IPA_USB_RNDIS:
+	case IPA_USB_ECM:
+		if (ipa3_usb_ctx->teth_prot_ctx[teth_prot].state !=
+			IPA_USB_TETH_PROT_CONNECTED) {
+			IPA_USB_DBG("%s is not connected\n",
+				ipa3_usb_teth_prot_to_string(teth_prot));
+			return -EPERM;
+		}
+		if (teth_prot == IPA_USB_RNDIS) {
+			result = rndis_ipa_pipe_disconnect_notify(
+				teth_prot_ptr->teth_prot_params.rndis.private);
+		} else {
+			result = ecm_ipa_disconnect(
+				teth_prot_ptr->teth_prot_params.ecm.private);
+		}
+		if (result) {
+			IPA_USB_ERR("failed to disconnect %s\n",
+				ipa3_usb_teth_prot_to_string(teth_prot));
+			break;
+		}
+		teth_prot_ptr->state = IPA_USB_TETH_PROT_INITIALIZED;
+		IPA_USB_DBG("disconnected %s\n",
+			ipa3_usb_teth_prot_to_string(teth_prot));
+		break;
+	case IPA_USB_RMNET:
+	case IPA_USB_MBIM:
+		if (teth_prot_ptr->state != IPA_USB_TETH_PROT_CONNECTED) {
+			IPA_USB_DBG("%s (%s) is not connected\n",
+				ipa3_usb_teth_prot_to_string(teth_prot),
+				ipa3_usb_teth_bridge_prot_to_string(teth_prot));
+			return -EPERM;
+		}
+		result = ipa3_usb_disconnect_teth_bridge();
+		if (result)
+			break;
+
+		teth_prot_ptr->state = IPA_USB_TETH_PROT_INITIALIZED;
+		IPA_USB_DBG("disconnected %s (%s)\n",
+			ipa3_usb_teth_prot_to_string(teth_prot),
+			ipa3_usb_teth_bridge_prot_to_string(teth_prot));
+		break;
+	case IPA_USB_DIAG:
+		if (teth_prot_ptr->state != IPA_USB_TETH_PROT_CONNECTED) {
+			IPA_USB_DBG("%s is not connected\n",
+				ipa3_usb_teth_prot_to_string(teth_prot));
+			return -EPERM;
+		}
+		teth_prot_ptr->state = IPA_USB_TETH_PROT_INITIALIZED;
+		IPA_USB_DBG("disconnected %s\n",
+			ipa3_usb_teth_prot_to_string(teth_prot));
+		break;
+	default:
+		break;
+	}
+
+	ipa3_usb_ctx->ttype_ctx[ttype].user_data = NULL;
+	return result;
+}
+
+static int ipa3_usb_xdci_connect_internal(
+	struct ipa_usb_xdci_connect_params_internal *params)
+{
+	int result = -EFAULT;
+	enum ipa3_usb_transport_type ttype;
+	struct ipa3_usb_teth_prot_conn_params *teth_prot_ptr;
+
+	IPA_USB_DBG_LOW("entry\n");
+	if (params == NULL || !ipa3_usb_check_connect_params(params)) {
+		IPA_USB_ERR("bad parameters\n");
+		return -EINVAL;
+	}
+
+	ttype = (params->teth_prot == IPA_USB_DIAG) ? IPA_USB_TRANSPORT_DPL :
+		IPA_USB_TRANSPORT_TETH;
+
+	if (!ipa3_usb_check_legal_op(IPA_USB_OP_CONNECT, ttype)) {
+		IPA_USB_ERR("Illegal operation\n");
+		return -EPERM;
+	}
+
+	teth_prot_ptr = &ipa3_usb_ctx->ttype_ctx[ttype].teth_conn_params;
+	teth_prot_ptr->ipa_to_usb_clnt_hdl = params->ipa_to_usb_clnt_hdl;
+
+	if (!IPA3_USB_IS_TTYPE_DPL(ttype))
+		teth_prot_ptr->usb_to_ipa_clnt_hdl =
+		params->usb_to_ipa_clnt_hdl;
+	teth_prot_ptr->params = params->teth_prot_params;
+
+	/* Set EE xDCI specific scratch */
+	result = ipa3_set_usb_max_packet_size(params->max_pkt_size);
+	if (result) {
+		IPA_USB_ERR("failed setting xDCI EE scratch field\n");
+		return result;
+	}
+	/* perf profile is not set on  USB DPL pipe */
+	if (ttype != IPA_USB_TRANSPORT_DPL) {
+		result = ipa_pm_set_throughput(
+			ipa3_usb_ctx->ttype_ctx[ttype].pm_ctx.hdl,
+			params->max_supported_bandwidth_mbps);
+		if (result) {
+			IPA_USB_ERR("failed to set pm throughput\n");
+			return result;
+		}
+	}
+
+	result = ipa_pm_activate_sync(
+		ipa3_usb_ctx->ttype_ctx[ttype].pm_ctx.hdl);
+	if (result) {
+		IPA_USB_ERR("failed to activate pm\n");
+		return result;
+	}
+
+	if (params->teth_prot != IPA_USB_DIAG) {
+		/* Start UL channel */
+		result = ipa3_xdci_start(params->usb_to_ipa_clnt_hdl,
+			params->usb_to_ipa_xferrscidx,
+			params->usb_to_ipa_xferrscidx_valid);
+		if (result) {
+			IPA_USB_ERR("failed to connect UL channel\n");
+			goto connect_ul_fail;
+		}
+	}
+
+	/* Start DL/DPL channel */
+	result = ipa3_xdci_start(params->ipa_to_usb_clnt_hdl,
+		params->ipa_to_usb_xferrscidx,
+		params->ipa_to_usb_xferrscidx_valid);
+	if (result) {
+		IPA_USB_ERR("failed to connect DL/DPL channel\n");
+		goto connect_dl_fail;
+	}
+
+	/* MHIP pipe enablement */
+	if (ipa3_is_mhip_offload_enabled()) {
+		result = ipa_mpm_mhip_xdci_pipe_enable(params->teth_prot);
+		if (result) {
+			IPA_USB_ERR("failed to enable MHIP channel\n");
+			goto connect_teth_prot_fail;
+		}
+	}
+
+	/* Connect tethering protocol */
+	result = ipa3_usb_connect_teth_prot(params->teth_prot);
+	if (result) {
+		IPA_USB_ERR("failed to connect teth protocol\n");
+		goto connect_mhip_prot_fail;
+	}
+
+	if (!ipa3_usb_set_state(IPA_USB_CONNECTED, false, ttype)) {
+		IPA_USB_ERR(
+			"failed to change state to connected\n");
+		goto state_change_connected_fail;
+	}
+
+	IPA_USB_DBG_LOW("exit\n");
+	return 0;
+
+state_change_connected_fail:
+	ipa3_usb_disconnect_teth_prot(params->teth_prot);
+connect_mhip_prot_fail:
+	if (ipa3_is_mhip_offload_enabled())
+		ipa_mpm_mhip_xdci_pipe_disable(params->teth_prot);
+connect_teth_prot_fail:
+	ipa3_xdci_disconnect(params->ipa_to_usb_clnt_hdl, false, -1);
+	ipa3_reset_gsi_channel(params->ipa_to_usb_clnt_hdl);
+	ipa3_reset_gsi_event_ring(params->ipa_to_usb_clnt_hdl);
+connect_dl_fail:
+	if (params->teth_prot != IPA_USB_DIAG) {
+		ipa3_xdci_disconnect(params->usb_to_ipa_clnt_hdl, false, -1);
+		ipa3_reset_gsi_channel(params->usb_to_ipa_clnt_hdl);
+		ipa3_reset_gsi_event_ring(params->usb_to_ipa_clnt_hdl);
+	}
+connect_ul_fail:
+		ipa_pm_deactivate_sync(
+			ipa3_usb_ctx->ttype_ctx[ttype].pm_ctx.hdl);
+	return result;
+}
+
+#ifdef CONFIG_DEBUG_FS
+static char dbg_buff[IPA_USB_MAX_MSG_LEN];
+
+static int ipa3_usb_get_status_dbg_info(struct ipa3_usb_status_dbg_info *status)
+{
+	int res;
+	int i;
+	unsigned long flags;
+
+	IPA_USB_DBG_LOW("entry\n");
+
+	if (ipa3_usb_ctx == NULL) {
+		IPA_USB_ERR("IPA USB was not inited yet\n");
+		return -EFAULT;
+	}
+
+	mutex_lock(&ipa3_usb_ctx->general_mutex);
+
+	if (!status) {
+		IPA_USB_ERR("Invalid input\n");
+		res = -EINVAL;
+		goto bail;
+	}
+
+	memset(status, 0, sizeof(struct ipa3_usb_status_dbg_info));
+
+	spin_lock_irqsave(&ipa3_usb_ctx->state_lock, flags);
+	status->teth_state = ipa3_usb_state_to_string(
+		ipa3_usb_ctx->ttype_ctx[IPA_USB_TRANSPORT_TETH].state);
+	status->dpl_state = ipa3_usb_state_to_string(
+		ipa3_usb_ctx->ttype_ctx[IPA_USB_TRANSPORT_DPL].state);
+	spin_unlock_irqrestore(&ipa3_usb_ctx->state_lock, flags);
+
+	for (i = 0 ; i < IPA_USB_MAX_TETH_PROT_SIZE ; i++) {
+		if (ipa3_usb_ctx->teth_prot_ctx[i].state ==
+			IPA_USB_TETH_PROT_INITIALIZED) {
+			if ((i == IPA_USB_RMNET) || (i == IPA_USB_MBIM))
+				status->inited_prots[status->num_init_prot++] =
+					ipa3_usb_teth_bridge_prot_to_string(i);
+			else
+				status->inited_prots[status->num_init_prot++] =
+					ipa3_usb_teth_prot_to_string(i);
+		} else if (ipa3_usb_ctx->teth_prot_ctx[i].state ==
+			IPA_USB_TETH_PROT_CONNECTED) {
+			switch (i) {
+			case IPA_USB_RMNET:
+			case IPA_USB_MBIM:
+				status->teth_connected_prot =
+					ipa3_usb_teth_bridge_prot_to_string(i);
+				break;
+			case IPA_USB_DIAG:
+				status->dpl_connected_prot =
+					ipa3_usb_teth_prot_to_string(i);
+				break;
+			default:
+				status->teth_connected_prot =
+					ipa3_usb_teth_prot_to_string(i);
+			}
+		}
+	}
+
+	res = 0;
+	IPA_USB_DBG_LOW("exit\n");
+bail:
+	mutex_unlock(&ipa3_usb_ctx->general_mutex);
+	return res;
+}
+
+static ssize_t ipa3_read_usb_state_info(struct file *file, char __user *ubuf,
+		size_t count, loff_t *ppos)
+{
+	struct ipa3_usb_status_dbg_info status;
+	int result;
+	int nbytes;
+	int cnt = 0;
+	int i;
+
+	result = ipa3_usb_get_status_dbg_info(&status);
+	if (result) {
+		nbytes = scnprintf(dbg_buff, IPA_USB_MAX_MSG_LEN,
+				"Fail to read IPA USB status\n");
+		cnt += nbytes;
+	} else {
+		nbytes = scnprintf(dbg_buff, IPA_USB_MAX_MSG_LEN,
+			"Tethering Data State: %s\n"
+			"DPL State: %s\n"
+			"Protocols in Initialized State: ",
+			status.teth_state,
+			status.dpl_state);
+		cnt += nbytes;
+
+		for (i = 0 ; i < status.num_init_prot ; i++) {
+			nbytes = scnprintf(dbg_buff + cnt,
+					IPA_USB_MAX_MSG_LEN - cnt,
+					"%s ", status.inited_prots[i]);
+			cnt += nbytes;
+		}
+		nbytes = scnprintf(dbg_buff + cnt, IPA_USB_MAX_MSG_LEN - cnt,
+				status.num_init_prot ? "\n" : "None\n");
+		cnt += nbytes;
+
+		nbytes = scnprintf(dbg_buff + cnt, IPA_USB_MAX_MSG_LEN - cnt,
+				"Protocols in Connected State: ");
+		cnt += nbytes;
+		if (status.teth_connected_prot) {
+			nbytes = scnprintf(dbg_buff + cnt,
+				IPA_USB_MAX_MSG_LEN - cnt,
+				"%s ", status.teth_connected_prot);
+			cnt += nbytes;
+		}
+		if (status.dpl_connected_prot) {
+			nbytes = scnprintf(dbg_buff + cnt,
+				IPA_USB_MAX_MSG_LEN - cnt,
+				"%s ", status.dpl_connected_prot);
+			cnt += nbytes;
+		}
+		nbytes = scnprintf(dbg_buff + cnt, IPA_USB_MAX_MSG_LEN - cnt,
+				(status.teth_connected_prot ||
+				status.dpl_connected_prot) ? "\n" : "None\n");
+		cnt += nbytes;
+	}
+
+	return simple_read_from_buffer(ubuf, count, ppos, dbg_buff, cnt);
+}
+
+static const struct file_operations ipa3_ipa_usb_ops = {
+	.read = ipa3_read_usb_state_info,
+};
+
+static void ipa_usb_debugfs_init(void)
+{
+	const mode_t read_only_mode = 0444;
+
+	ipa3_usb_ctx->dent = debugfs_create_dir("ipa_usb", 0);
+	if (IS_ERR(ipa3_usb_ctx->dent)) {
+		pr_err("fail to create folder in debug_fs\n");
+		return;
+	}
+
+	ipa3_usb_ctx->dfile_state_info = debugfs_create_file("state_info",
+			read_only_mode, ipa3_usb_ctx->dent, 0,
+			&ipa3_ipa_usb_ops);
+	if (!ipa3_usb_ctx->dfile_state_info ||
+		IS_ERR(ipa3_usb_ctx->dfile_state_info)) {
+		pr_err("failed to create file for state_info\n");
+		goto fail;
+	}
+
+	return;
+
+fail:
+	debugfs_remove_recursive(ipa3_usb_ctx->dent);
+	ipa3_usb_ctx->dent = NULL;
+}
+
+static void ipa_usb_debugfs_remove(void)
+{
+	if (IS_ERR(ipa3_usb_ctx->dent)) {
+		IPA_USB_ERR("ipa_usb debugfs folder was not created\n");
+		return;
+	}
+
+	debugfs_remove_recursive(ipa3_usb_ctx->dent);
+}
+#else /* CONFIG_DEBUG_FS */
+static void ipa_usb_debugfs_init(void){}
+static void ipa_usb_debugfs_remove(void){}
+#endif /* CONFIG_DEBUG_FS */
+
+int ipa_usb_xdci_connect(struct ipa_usb_xdci_chan_params *ul_chan_params,
+			 struct ipa_usb_xdci_chan_params *dl_chan_params,
+			 struct ipa_req_chan_out_params *ul_out_params,
+			 struct ipa_req_chan_out_params *dl_out_params,
+			 struct ipa_usb_xdci_connect_params *connect_params)
+{
+	int result = -EFAULT;
+	struct ipa_usb_xdci_connect_params_internal conn_params;
+
+	mutex_lock(&ipa3_usb_ctx->general_mutex);
+	IPA_USB_DBG_LOW("entry\n");
+	if (connect_params == NULL || dl_chan_params == NULL ||
+		dl_out_params == NULL ||
+		(connect_params->teth_prot != IPA_USB_DIAG &&
+		(ul_chan_params == NULL || ul_out_params == NULL))) {
+		IPA_USB_ERR("bad parameters\n");
+		result = -EINVAL;
+		goto bad_params;
+	}
+
+	if (!ipa3_usb_is_teth_switch_valid(connect_params->teth_prot)) {
+		IPA_USB_ERR("Invalid teth type switch\n");
+		goto bad_params;
+	}
+
+	if (connect_params->teth_prot != IPA_USB_DIAG) {
+		result = ipa3_usb_request_xdci_channel(ul_chan_params,
+			IPA_USB_DIR_UL, ul_out_params);
+		if (result) {
+			IPA_USB_ERR("failed to allocate UL channel\n");
+			goto bad_params;
+		}
+	}
+
+	result = ipa3_usb_request_xdci_channel(dl_chan_params, IPA_USB_DIR_DL,
+		dl_out_params);
+	if (result) {
+		IPA_USB_ERR("failed to allocate DL/DPL channel\n");
+		goto alloc_dl_chan_fail;
+	}
+
+	memset(&conn_params, 0,
+		sizeof(struct ipa_usb_xdci_connect_params_internal));
+	conn_params.max_pkt_size = connect_params->max_pkt_size;
+	conn_params.ipa_to_usb_clnt_hdl = dl_out_params->clnt_hdl;
+	conn_params.ipa_to_usb_xferrscidx =
+		connect_params->ipa_to_usb_xferrscidx;
+	conn_params.ipa_to_usb_xferrscidx_valid =
+		connect_params->ipa_to_usb_xferrscidx_valid;
+	if (connect_params->teth_prot != IPA_USB_DIAG) {
+		conn_params.usb_to_ipa_clnt_hdl = ul_out_params->clnt_hdl;
+		conn_params.usb_to_ipa_xferrscidx =
+			connect_params->usb_to_ipa_xferrscidx;
+		conn_params.usb_to_ipa_xferrscidx_valid =
+			connect_params->usb_to_ipa_xferrscidx_valid;
+	}
+	conn_params.teth_prot = connect_params->teth_prot;
+	conn_params.teth_prot_params = connect_params->teth_prot_params;
+	conn_params.max_supported_bandwidth_mbps =
+		connect_params->max_supported_bandwidth_mbps;
+	result = ipa3_usb_xdci_connect_internal(&conn_params);
+	if (result) {
+		IPA_USB_ERR("failed to connect\n");
+		goto connect_fail;
+	}
+
+	IPA_USB_DBG_LOW("exit\n");
+	mutex_unlock(&ipa3_usb_ctx->general_mutex);
+	return 0;
+
+connect_fail:
+	ipa3_usb_release_xdci_channel(dl_out_params->clnt_hdl, IPA_USB_DIR_DL,
+		IPA3_USB_GET_TTYPE(dl_chan_params->teth_prot));
+alloc_dl_chan_fail:
+	if (connect_params->teth_prot != IPA_USB_DIAG)
+		ipa3_usb_release_xdci_channel(ul_out_params->clnt_hdl,
+			IPA_USB_DIR_UL,
+			IPA3_USB_GET_TTYPE(ul_chan_params->teth_prot));
+bad_params:
+	mutex_unlock(&ipa3_usb_ctx->general_mutex);
+	return result;
+}
+EXPORT_SYMBOL(ipa_usb_xdci_connect);
+
+static int ipa3_usb_check_disconnect_prot(enum ipa_usb_teth_prot teth_prot)
+{
+	if (teth_prot < 0 || teth_prot >= IPA_USB_MAX_TETH_PROT_SIZE) {
+		IPA_USB_ERR("bad parameter\n");
+		return -EFAULT;
+	}
+
+	if (ipa3_usb_ctx->teth_prot_ctx[teth_prot].state !=
+		IPA_USB_TETH_PROT_CONNECTED) {
+		IPA_USB_ERR("%s is not connected\n",
+			ipa3_usb_teth_prot_to_string(teth_prot));
+		return -EFAULT;
+	}
+
+	return 0;
+}
+
+/* Assumes lock already acquired */
+static int ipa_usb_xdci_dismiss_channels(u32 ul_clnt_hdl, u32 dl_clnt_hdl,
+				enum ipa_usb_teth_prot teth_prot)
+{
+	int result = 0;
+	enum ipa3_usb_transport_type ttype;
+
+	ttype = IPA3_USB_GET_TTYPE(teth_prot);
+
+	IPA_USB_DBG_LOW("entry\n");
+
+	/* Reset DL channel */
+	result = ipa3_reset_gsi_channel(dl_clnt_hdl);
+	if (result) {
+		IPA_USB_ERR("failed to reset DL channel\n");
+		return result;
+	}
+
+	/* Reset DL event ring */
+	result = ipa3_reset_gsi_event_ring(dl_clnt_hdl);
+	if (result) {
+		IPA_USB_ERR("failed to reset DL event ring\n");
+		return result;
+	}
+
+	if (!IPA3_USB_IS_TTYPE_DPL(ttype)) {
+		ipa3_xdci_ep_delay_rm(ul_clnt_hdl); /* Remove ep_delay if set */
+		/* Reset UL channel */
+		result = ipa3_reset_gsi_channel(ul_clnt_hdl);
+		if (result) {
+			IPA_USB_ERR("failed to reset UL channel\n");
+			return result;
+		}
+
+		/* Reset UL event ring */
+		result = ipa3_reset_gsi_event_ring(ul_clnt_hdl);
+		if (result) {
+			IPA_USB_ERR("failed to reset UL event ring\n");
+			return result;
+		}
+	}
+
+	/* Change state to STOPPED */
+	if (!ipa3_usb_set_state(IPA_USB_STOPPED, false, ttype))
+		IPA_USB_ERR("failed to change state to stopped\n");
+
+	if (!IPA3_USB_IS_TTYPE_DPL(ttype)) {
+		result = ipa3_usb_release_xdci_channel(ul_clnt_hdl,
+			IPA_USB_DIR_UL, ttype);
+		if (result) {
+			IPA_USB_ERR("failed to release UL channel\n");
+			return result;
+		}
+	}
+
+	result = ipa3_usb_release_xdci_channel(dl_clnt_hdl,
+		IPA_USB_DIR_DL, ttype);
+	if (result) {
+		IPA_USB_ERR("failed to release DL channel\n");
+		return result;
+	}
+
+	IPA_USB_DBG_LOW("exit\n");
+
+	return 0;
+}
+
+int ipa_usb_xdci_disconnect(u32 ul_clnt_hdl, u32 dl_clnt_hdl,
+			    enum ipa_usb_teth_prot teth_prot)
+{
+	int result = 0;
+	struct ipa_ep_cfg_holb holb_cfg;
+	unsigned long flags;
+	enum ipa3_usb_state orig_state;
+	enum ipa3_usb_transport_type ttype;
+
+	mutex_lock(&ipa3_usb_ctx->general_mutex);
+	IPA_USB_DBG_LOW("entry\n");
+
+	ttype = IPA3_USB_GET_TTYPE(teth_prot);
+
+	if (!ipa3_usb_check_legal_op(IPA_USB_OP_DISCONNECT, ttype)) {
+		IPA_USB_ERR("Illegal operation\n");
+		result = -EPERM;
+		goto bad_params;
+	}
+
+	spin_lock_irqsave(&ipa3_usb_ctx->state_lock, flags);
+	if (ipa3_usb_ctx->ttype_ctx[ttype].state ==
+		IPA_USB_SUSPENDED_NO_RWAKEUP) {
+		spin_unlock_irqrestore(&ipa3_usb_ctx->state_lock, flags);
+		result = ipa_usb_xdci_dismiss_channels(ul_clnt_hdl, dl_clnt_hdl,
+			teth_prot);
+		mutex_unlock(&ipa3_usb_ctx->general_mutex);
+		return result;
+	}
+
+	if (ipa3_usb_check_disconnect_prot(teth_prot)) {
+		spin_unlock_irqrestore(&ipa3_usb_ctx->state_lock, flags);
+		result = -EINVAL;
+		goto bad_params;
+	}
+
+	if (ipa3_usb_ctx->ttype_ctx[ttype].state != IPA_USB_SUSPENDED) {
+		spin_unlock_irqrestore(&ipa3_usb_ctx->state_lock, flags);
+		/* Stop DL/DPL channel */
+		result = ipa3_xdci_disconnect(dl_clnt_hdl, false, -1);
+		if (result) {
+			IPA_USB_ERR("failed to disconnect DL/DPL channel\n");
+			goto bad_params;
+		}
+	} else {
+		spin_unlock_irqrestore(&ipa3_usb_ctx->state_lock, flags);
+		memset(&holb_cfg, 0, sizeof(holb_cfg));
+		holb_cfg.en = IPA_HOLB_TMR_EN;
+		holb_cfg.tmr_val = 0;
+		ipa_cfg_ep_holb(dl_clnt_hdl, &holb_cfg);
+	}
+
+	spin_lock_irqsave(&ipa3_usb_ctx->state_lock, flags);
+	orig_state = ipa3_usb_ctx->ttype_ctx[ttype].state;
+	if (!IPA3_USB_IS_TTYPE_DPL(ttype)) {
+		if (orig_state != IPA_USB_SUSPENDED) {
+			spin_unlock_irqrestore(&ipa3_usb_ctx->state_lock,
+				flags);
+			/* Stop UL channel */
+			result = ipa3_xdci_disconnect(ul_clnt_hdl,
+				true,
+				ipa3_usb_ctx->qmi_req_id);
+			if (result) {
+				IPA_USB_ERR("failed disconnect UL channel\n");
+				goto bad_params;
+			}
+			ipa3_usb_ctx->qmi_req_id++;
+		} else
+			spin_unlock_irqrestore(&ipa3_usb_ctx->state_lock,
+				flags);
+	} else
+		spin_unlock_irqrestore(&ipa3_usb_ctx->state_lock, flags);
+
+	/* Stop UL/DL MHIP channels */
+	if (ipa3_is_mhip_offload_enabled()) {
+		result = ipa_mpm_mhip_xdci_pipe_disable(teth_prot);
+		if (result) {
+			IPA_USB_ERR("failed to disconnect MHIP pipe\n");
+			goto bad_params;
+		}
+	}
+
+	result = ipa_usb_xdci_dismiss_channels(ul_clnt_hdl, dl_clnt_hdl,
+			teth_prot);
+	if (result)
+		goto bad_params;
+
+	/* Disconnect tethering protocol */
+	result = ipa3_usb_disconnect_teth_prot(teth_prot);
+	if (result)
+		goto bad_params;
+
+	if (orig_state != IPA_USB_SUSPENDED) {
+		result = ipa_pm_deactivate_sync(
+			ipa3_usb_ctx->ttype_ctx[ttype].pm_ctx.hdl);
+		if (result) {
+			IPA_USB_ERR("failed to deactivate PM\n");
+			goto bad_params;
+		}
+	}
+
+	IPA_USB_DBG_LOW("exit\n");
+	mutex_unlock(&ipa3_usb_ctx->general_mutex);
+	return 0;
+
+bad_params:
+	mutex_unlock(&ipa3_usb_ctx->general_mutex);
+	return result;
+
+}
+EXPORT_SYMBOL(ipa_usb_xdci_disconnect);
+
+int ipa_usb_deinit_teth_prot(enum ipa_usb_teth_prot teth_prot)
+{
+	int result = -EFAULT;
+	enum ipa3_usb_transport_type ttype;
+	struct ipa3_usb_teth_prot_context *teth_prot_ptr;
+
+	mutex_lock(&ipa3_usb_ctx->general_mutex);
+	IPA_USB_DBG_LOW("entry\n");
+	if (teth_prot < 0 || teth_prot >= IPA_USB_MAX_TETH_PROT_SIZE) {
+		IPA_USB_ERR("bad parameters\n");
+		result = -EINVAL;
+		goto bad_params;
+	}
+
+	ttype = IPA3_USB_GET_TTYPE(teth_prot);
+
+	if (!ipa3_usb_check_legal_op(IPA_USB_OP_DEINIT_TETH_PROT, ttype)) {
+		IPA_USB_ERR("Illegal operation\n");
+		result = -EPERM;
+		goto bad_params;
+	}
+
+	/* Clean-up tethering protocol */
+	teth_prot_ptr = &ipa3_usb_ctx->teth_prot_ctx[teth_prot];
+
+	switch (teth_prot) {
+	case IPA_USB_RNDIS:
+	case IPA_USB_ECM:
+		if (teth_prot_ptr->state !=
+			IPA_USB_TETH_PROT_INITIALIZED) {
+			IPA_USB_ERR("%s is not initialized\n",
+				ipa3_usb_teth_prot_to_string(teth_prot));
+			result = -EINVAL;
+			goto bad_params;
+		}
+		if (teth_prot == IPA_USB_RNDIS)
+			rndis_ipa_cleanup(
+				teth_prot_ptr->teth_prot_params.rndis.private);
+		else
+			ecm_ipa_cleanup(
+				teth_prot_ptr->teth_prot_params.ecm.private);
+		teth_prot_ptr->user_data = NULL;
+		teth_prot_ptr->state = IPA_USB_TETH_PROT_INVALID;
+		ipa3_usb_ctx->num_init_prot--;
+		IPA_USB_DBG("deinitialized %s\n",
+			ipa3_usb_teth_prot_to_string(teth_prot));
+		break;
+	case IPA_USB_RMNET:
+	case IPA_USB_MBIM:
+		if (teth_prot_ptr->state !=
+			IPA_USB_TETH_PROT_INITIALIZED) {
+			IPA_USB_ERR("%s (%s) is not initialized\n",
+				ipa3_usb_teth_prot_to_string(teth_prot),
+				ipa3_usb_teth_bridge_prot_to_string(teth_prot));
+			result = -EINVAL;
+			goto bad_params;
+		}
+
+		teth_prot_ptr->user_data = NULL;
+		teth_prot_ptr->state = IPA_USB_TETH_PROT_INVALID;
+		ipa3_usb_ctx->num_init_prot--;
+		IPA_USB_DBG("deinitialized %s (%s)\n",
+			ipa3_usb_teth_prot_to_string(teth_prot),
+			ipa3_usb_teth_bridge_prot_to_string(teth_prot));
+		break;
+	case IPA_USB_DIAG:
+		if (teth_prot_ptr->state !=
+			IPA_USB_TETH_PROT_INITIALIZED) {
+			IPA_USB_ERR("%s is not initialized\n",
+				ipa3_usb_teth_prot_to_string(teth_prot));
+			result = -EINVAL;
+			goto bad_params;
+		}
+		teth_prot_ptr->user_data = NULL;
+		teth_prot_ptr->state = IPA_USB_TETH_PROT_INVALID;
+		IPA_USB_DBG("deinitialized %s\n",
+			ipa3_usb_teth_prot_to_string(teth_prot));
+		break;
+	default:
+		IPA_USB_ERR("unexpected tethering protocol\n");
+		result = -EINVAL;
+		goto bad_params;
+	}
+
+	if (IPA3_USB_IS_TTYPE_DPL(ttype) ||
+		(ipa3_usb_ctx->num_init_prot == 0)) {
+		if (!ipa3_usb_set_state(IPA_USB_INVALID, false, ttype))
+			IPA_USB_ERR(
+				"failed to change state to invalid\n");
+		ipa3_usb_deregister_pm(ttype);
+		ipa3_usb_ctx->ttype_ctx[ttype].ipa_usb_notify_cb = NULL;
+	}
+
+	IPA_USB_DBG_LOW("exit\n");
+	mutex_unlock(&ipa3_usb_ctx->general_mutex);
+	return 0;
+
+bad_params:
+	mutex_unlock(&ipa3_usb_ctx->general_mutex);
+	return result;
+}
+EXPORT_SYMBOL(ipa_usb_deinit_teth_prot);
+
+/* Assumes lock already acquired */
+static int ipa3_usb_suspend_no_remote_wakeup(u32 ul_clnt_hdl, u32 dl_clnt_hdl,
+	enum ipa_usb_teth_prot teth_prot)
+{
+	int result = 0;
+	enum ipa3_usb_transport_type ttype;
+
+	ttype = IPA3_USB_GET_TTYPE(teth_prot);
+
+	if (!ipa3_usb_check_legal_op(IPA_USB_OP_SUSPEND_NO_RWAKEUP, ttype)) {
+		IPA_USB_ERR("Illegal operation\n");
+		result = -EPERM;
+		goto fail_exit;
+	}
+
+	IPA_USB_DBG("Start suspend with no remote wakeup sequence: %s\n",
+		IPA3_USB_IS_TTYPE_DPL(ttype) ?
+		"DPL channel":"Data Tethering channels");
+
+	if (ipa3_usb_check_disconnect_prot(teth_prot)) {
+		result = -EINVAL;
+		goto fail_exit;
+	}
+
+	/* Stop DL/DPL channel */
+	result = ipa3_xdci_disconnect(dl_clnt_hdl, false, -1);
+	if (result) {
+		IPA_USB_ERR("failed to disconnect DL/DPL channel\n");
+		goto fail_exit;
+	}
+
+	if (!IPA3_USB_IS_TTYPE_DPL(ttype)) {
+		/* Stop UL channel */
+		result = ipa3_xdci_disconnect(ul_clnt_hdl, true,
+			ipa3_usb_ctx->qmi_req_id);
+		if (result) {
+			IPA_USB_ERR("failed disconnect UL channel\n");
+			goto start_dl;
+		}
+		ipa3_usb_ctx->qmi_req_id++;
+	}
+
+	/* Stop MHIP channel */
+	if (ipa3_is_mhip_offload_enabled()) {
+		result = ipa_mpm_mhip_xdci_pipe_disable(teth_prot);
+		if (result) {
+			IPA_USB_ERR("failed to disconnect MHIP pipe\n");
+			goto start_ul;
+		}
+	}
+
+	/* Disconnect tethering protocol */
+	result = ipa3_usb_disconnect_teth_prot(teth_prot);
+	if (result)
+		goto enable_mhip;
+
+	result = ipa_pm_deactivate_sync(
+		ipa3_usb_ctx->ttype_ctx[ttype].pm_ctx.hdl);
+	if (result) {
+		IPA_USB_ERR("failed to deactivate PM\n");
+		goto connect_teth;
+	}
+
+	/* Change ipa_usb state to SUSPENDED_NO_RWAKEUP */
+	if (!ipa3_usb_set_state(IPA_USB_SUSPENDED_NO_RWAKEUP, false, ttype))
+		IPA_USB_ERR("failed to change state to suspend no rwakeup\n");
+
+	IPA_USB_DBG_LOW("exit\n");
+	return 0;
+
+connect_teth:
+	(void)ipa3_usb_connect_teth_prot(teth_prot);
+enable_mhip:
+	if (ipa3_is_mhip_offload_enabled())
+		(void)ipa_mpm_mhip_xdci_pipe_enable(teth_prot);
+start_ul:
+	if (!IPA3_USB_IS_TTYPE_DPL(ttype))
+		(void)ipa3_xdci_connect(ul_clnt_hdl);
+start_dl:
+	(void)ipa3_xdci_connect(dl_clnt_hdl);
+fail_exit:
+	return result;
+}
+
+int ipa_usb_xdci_suspend(u32 ul_clnt_hdl, u32 dl_clnt_hdl,
+	enum ipa_usb_teth_prot teth_prot, bool with_remote_wakeup)
+{
+	int result = 0;
+	enum ipa3_usb_transport_type ttype;
+
+	mutex_lock(&ipa3_usb_ctx->general_mutex);
+	IPA_USB_DBG_LOW("entry\n");
+
+	if (teth_prot < 0 || teth_prot >= IPA_USB_MAX_TETH_PROT_SIZE) {
+		IPA_USB_ERR("bad parameters\n");
+		result = -EINVAL;
+		goto bad_params;
+	}
+
+	if (!with_remote_wakeup) {
+		result = ipa3_usb_suspend_no_remote_wakeup(ul_clnt_hdl,
+			dl_clnt_hdl, teth_prot);
+		mutex_unlock(&ipa3_usb_ctx->general_mutex);
+		return result;
+	}
+
+	ttype = IPA3_USB_GET_TTYPE(teth_prot);
+
+	if (!ipa3_usb_check_legal_op(IPA_USB_OP_SUSPEND, ttype)) {
+		IPA_USB_ERR("Illegal operation\n");
+		result = -EPERM;
+		goto bad_params;
+	}
+
+	IPA_USB_DBG("Start suspend sequence: %s\n",
+		IPA3_USB_IS_TTYPE_DPL(ttype) ?
+		"DPL channel":"Data Tethering channels");
+
+	/* Change state to SUSPEND_REQUESTED */
+	if (!ipa3_usb_set_state(IPA_USB_SUSPEND_REQUESTED, false, ttype)) {
+		IPA_USB_ERR(
+			"fail changing state to suspend_req\n");
+		result = -EFAULT;
+		goto bad_params;
+	}
+
+	/* Stop UL channel & suspend DL/DPL EP */
+	result = ipa3_xdci_suspend(ul_clnt_hdl, dl_clnt_hdl,
+		true,
+		ipa3_usb_ctx->qmi_req_id, IPA3_USB_IS_TTYPE_DPL(ttype));
+	if (result) {
+		IPA_USB_ERR("failed to suspend\n");
+		goto suspend_fail;
+	}
+	ipa3_usb_ctx->qmi_req_id++;
+
+	result = ipa_pm_deactivate_sync(
+		ipa3_usb_ctx->ttype_ctx[ttype].pm_ctx.hdl);
+	if (result) {
+		IPA_USB_ERR("failed to deactivate PM IPA client\n");
+		goto pm_deactivate_fail;
+	}
+
+	/* Change state to SUSPENDED */
+	if (!ipa3_usb_set_state(IPA_USB_SUSPENDED, false, ttype))
+		IPA_USB_ERR("failed to change state to suspended\n");
+
+
+	IPA_USB_DBG_LOW("exit\n");
+	mutex_unlock(&ipa3_usb_ctx->general_mutex);
+	return 0;
+
+pm_deactivate_fail:
+	ipa3_xdci_resume(ul_clnt_hdl, dl_clnt_hdl,
+		IPA3_USB_IS_TTYPE_DPL(ttype));
+suspend_fail:
+	/* Change state back to CONNECTED */
+	if (!ipa3_usb_set_state(IPA_USB_CONNECTED, true, ttype))
+		IPA_USB_ERR("failed to change state back to connected\n");
+bad_params:
+	mutex_unlock(&ipa3_usb_ctx->general_mutex);
+	return result;
+}
+EXPORT_SYMBOL(ipa_usb_xdci_suspend);
+
+/* Assumes lock already acquired */
+static int ipa3_usb_resume_no_remote_wakeup(u32 ul_clnt_hdl, u32 dl_clnt_hdl,
+	enum ipa_usb_teth_prot teth_prot)
+{
+	int result = -EFAULT;
+	enum ipa3_usb_transport_type ttype;
+
+	ttype = IPA3_USB_GET_TTYPE(teth_prot);
+
+	IPA_USB_DBG("Start resume with no remote wakeup sequence: %s\n",
+		IPA3_USB_IS_TTYPE_DPL(ttype) ?
+		"DPL channel":"Data Tethering channels");
+
+	/* Activate PM */
+	result = ipa_pm_activate_sync(
+		ipa3_usb_ctx->ttype_ctx[ttype].pm_ctx.hdl);
+	if (result)
+		goto fail_exit;
+
+	/* Connect tethering protocol */
+	result = ipa3_usb_connect_teth_prot(teth_prot);
+	if (result) {
+		IPA_USB_ERR("failed to connect teth protocol\n");
+		goto release_prod;
+	}
+
+	if (!IPA3_USB_IS_TTYPE_DPL(ttype)) {
+		/* Start UL channel */
+		result = ipa3_xdci_connect(ul_clnt_hdl);
+		if (result) {
+			IPA_USB_ERR("failed to start UL channel\n");
+			goto disconn_teth;
+		}
+	}
+
+	/* Start DL/DPL channel */
+	result = ipa3_xdci_connect(dl_clnt_hdl);
+	if (result) {
+		IPA_USB_ERR("failed to start DL/DPL channel\n");
+		goto stop_ul;
+	}
+
+	/* Start MHIP channel */
+	if (ipa3_is_mhip_offload_enabled()) {
+		result = ipa_mpm_mhip_xdci_pipe_enable(teth_prot);
+		if (result) {
+			IPA_USB_ERR("failed to enable MHIP pipe\n");
+			goto stop_dl;
+		}
+	}
+	/* Change state to CONNECTED */
+	if (!ipa3_usb_set_state(IPA_USB_CONNECTED, false, ttype)) {
+		IPA_USB_ERR("failed to change state to connected\n");
+		result = -EFAULT;
+		goto stop_mhip;
+	}
+
+	return 0;
+stop_mhip:
+	if (ipa3_is_mhip_offload_enabled())
+		(void)ipa_mpm_mhip_xdci_pipe_disable(teth_prot);
+stop_dl:
+	(void)ipa3_xdci_disconnect(dl_clnt_hdl, false, -1);
+stop_ul:
+	if (!IPA3_USB_IS_TTYPE_DPL(ttype)) {
+		(void)ipa3_xdci_disconnect(ul_clnt_hdl, true,
+			ipa3_usb_ctx->qmi_req_id);
+		ipa3_usb_ctx->qmi_req_id++;
+	}
+disconn_teth:
+	(void)ipa3_usb_disconnect_teth_prot(teth_prot);
+release_prod:
+	(void)ipa_pm_deactivate_sync(
+		ipa3_usb_ctx->ttype_ctx[ttype].pm_ctx.hdl);
+fail_exit:
+	return result;
+}
+
+int ipa_usb_xdci_resume(u32 ul_clnt_hdl, u32 dl_clnt_hdl,
+	enum ipa_usb_teth_prot teth_prot)
+{
+	int result = -EFAULT;
+	enum ipa3_usb_state prev_state;
+	unsigned long flags;
+	enum ipa3_usb_transport_type ttype;
+
+	mutex_lock(&ipa3_usb_ctx->general_mutex);
+	IPA_USB_DBG_LOW("entry\n");
+
+	if (teth_prot < 0 || teth_prot >= IPA_USB_MAX_TETH_PROT_SIZE) {
+		IPA_USB_ERR("bad parameters\n");
+		result = -EINVAL;
+		goto bad_params;
+	}
+
+	ttype = IPA3_USB_GET_TTYPE(teth_prot);
+
+	if (!ipa3_usb_check_legal_op(IPA_USB_OP_RESUME, ttype)) {
+		IPA_USB_ERR("Illegal operation\n");
+		result = -EPERM;
+		goto bad_params;
+	}
+
+	spin_lock_irqsave(&ipa3_usb_ctx->state_lock, flags);
+	prev_state = ipa3_usb_ctx->ttype_ctx[ttype].state;
+	spin_unlock_irqrestore(&ipa3_usb_ctx->state_lock, flags);
+	if (prev_state == IPA_USB_SUSPENDED_NO_RWAKEUP) {
+		result = ipa3_usb_resume_no_remote_wakeup(ul_clnt_hdl,
+			dl_clnt_hdl, teth_prot);
+		goto bad_params;
+	}
+
+	IPA_USB_DBG("Start resume sequence: %s\n",
+		IPA3_USB_IS_TTYPE_DPL(ttype) ?
+		"DPL channel" : "Data Tethering channels");
+
+	/* Change state to RESUME_IN_PROGRESS */
+	if (!ipa3_usb_set_state(IPA_USB_RESUME_IN_PROGRESS, false, ttype)) {
+		IPA_USB_ERR("failed to change state to resume_in_progress\n");
+		result = -EFAULT;
+		goto bad_params;
+	}
+
+	/* Activate PM */
+	result = ipa_pm_activate_sync(
+		ipa3_usb_ctx->ttype_ctx[ttype].pm_ctx.hdl);
+	if (result)
+		goto activate_pm_fail;
+
+	if (!IPA3_USB_IS_TTYPE_DPL(ttype)) {
+		/* Start UL channel */
+		result = ipa3_start_gsi_channel(ul_clnt_hdl);
+		if (result) {
+			IPA_USB_ERR("failed to start UL channel\n");
+			goto start_ul_fail;
+		}
+	}
+
+	/* Start DL/DPL channel */
+	result = ipa3_start_gsi_channel(dl_clnt_hdl);
+	if (result) {
+		IPA_USB_ERR("failed to start DL/DPL channel\n");
+		goto start_dl_fail;
+	}
+
+	/* Change state to CONNECTED */
+	if (!ipa3_usb_set_state(IPA_USB_CONNECTED, false, ttype)) {
+		IPA_USB_ERR("failed to change state to connected\n");
+		result = -EFAULT;
+		goto state_change_connected_fail;
+	}
+
+	IPA_USB_DBG_LOW("exit\n");
+	mutex_unlock(&ipa3_usb_ctx->general_mutex);
+	return 0;
+
+state_change_connected_fail:
+	result = ipa3_stop_gsi_channel(dl_clnt_hdl);
+	if (result)
+		IPA_USB_ERR("Error stopping DL/DPL channel: %d\n",
+			result);
+start_dl_fail:
+	if (!IPA3_USB_IS_TTYPE_DPL(ttype)) {
+		result = ipa3_stop_gsi_channel(ul_clnt_hdl);
+		if (result)
+			IPA_USB_ERR("Error stopping UL channel: %d\n", result);
+	}
+start_ul_fail:
+	ipa_pm_deactivate_sync(
+		ipa3_usb_ctx->ttype_ctx[ttype].pm_ctx.hdl);
+activate_pm_fail:
+	/* Change state back to prev_state */
+	if (!ipa3_usb_set_state(prev_state, true, ttype))
+		IPA_USB_ERR("failed to change state back to %s\n",
+			ipa3_usb_state_to_string(prev_state));
+bad_params:
+	mutex_unlock(&ipa3_usb_ctx->general_mutex);
+	return result;
+}
+EXPORT_SYMBOL(ipa_usb_xdci_resume);
+
+static int __init ipa3_usb_init(void)
+{
+	int i;
+	unsigned long flags;
+	int res;
+	struct ipa3_usb_pm_context *pm_ctx;
+
+	pr_info("ipa_usb driver init\n");
+	ipa3_usb_ctx = kzalloc(sizeof(struct ipa3_usb_context), GFP_KERNEL);
+	if (ipa3_usb_ctx == NULL) {
+		pr_err(":ipa_usb init failed\n");
+		return -ENOMEM;
+	}
+	memset(ipa3_usb_ctx, 0, sizeof(struct ipa3_usb_context));
+
+	for (i = 0; i < IPA_USB_MAX_TETH_PROT_SIZE; i++)
+		ipa3_usb_ctx->teth_prot_ctx[i].state =
+			IPA_USB_TETH_PROT_INVALID;
+	ipa3_usb_ctx->num_init_prot = 0;
+	init_completion(&ipa3_usb_ctx->dev_ready_comp);
+	ipa3_usb_ctx->qmi_req_id = 0;
+	spin_lock_init(&ipa3_usb_ctx->state_lock);
+	ipa3_usb_ctx->dl_data_pending = false;
+	mutex_init(&ipa3_usb_ctx->general_mutex);
+
+	/* init PM related members */
+	pm_ctx = &ipa3_usb_ctx->ttype_ctx[IPA_USB_TRANSPORT_TETH].pm_ctx;
+	pm_ctx->hdl = ~0;
+	pm_ctx->remote_wakeup_work = &ipa3_usb_notify_remote_wakeup_work;
+	pm_ctx = &ipa3_usb_ctx->ttype_ctx[IPA_USB_TRANSPORT_DPL].pm_ctx;
+	pm_ctx->hdl = ~0;
+	pm_ctx->remote_wakeup_work = &ipa3_usb_dpl_notify_remote_wakeup_work;
+
+	for (i = 0; i < IPA_USB_TRANSPORT_MAX; i++)
+		ipa3_usb_ctx->ttype_ctx[i].user_data = NULL;
+
+	spin_lock_irqsave(&ipa3_usb_ctx->state_lock, flags);
+	for (i = 0; i < IPA_USB_TRANSPORT_MAX; i++)
+		ipa3_usb_ctx->ttype_ctx[i].state = IPA_USB_INVALID;
+
+	spin_unlock_irqrestore(&ipa3_usb_ctx->state_lock, flags);
+
+	ipa3_usb_ctx->wq = create_singlethread_workqueue("ipa_usb_wq");
+	if (!ipa3_usb_ctx->wq) {
+		pr_err("failed to create workqueue\n");
+		res = -EFAULT;
+		goto ipa_usb_workqueue_fail;
+	}
+
+	ipa_usb_debugfs_init();
+
+	pr_info("exit: IPA_USB init success!\n");
+
+	return 0;
+
+ipa_usb_workqueue_fail:
+	pr_err("init failed (%d)\n", -res);
+	kfree(ipa3_usb_ctx);
+	return res;
+}
+
+static void ipa3_usb_exit(void)
+{
+	IPA_USB_DBG_LOW("IPA_USB exit\n");
+
+	/*
+	 * Deregister for xdci lock/unlock callback from ipa core driver.
+	 * As per use case, only deregister for IPA_CONS end point for now.
+	 * If needed we can include the same for IPA_PROD ep.
+	 * For IPA_USB_DIAG/DPL config there will not be any UL config.
+	 */
+	ipa_deregister_client_callback(IPA_CLIENT_USB_PROD);
+
+	ipa_usb_debugfs_remove();
+	kfree(ipa3_usb_ctx);
+}
+
+arch_initcall(ipa3_usb_init);
+module_exit(ipa3_usb_exit);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("IPA USB client driver");

+ 774 - 0
ipa/ipa_clients/ipa_wdi3.c

@@ -0,0 +1,774 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2017-2019, The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/ipa_wdi3.h>
+#include <linux/msm_ipa.h>
+#include <linux/string.h>
+#include "../ipa_common_i.h"
+#include "../ipa_v3/ipa_pm.h"
+#include "../ipa_v3/ipa_i.h"
+
+#define OFFLOAD_DRV_NAME "ipa_wdi"
+#define IPA_WDI_DBG(fmt, args...) \
+	do { \
+		pr_debug(OFFLOAD_DRV_NAME " %s:%d " fmt, \
+			__func__, __LINE__, ## args); \
+		IPA_IPC_LOGGING(ipa_get_ipc_logbuf(), \
+			OFFLOAD_DRV_NAME " %s:%d " fmt, ## args); \
+		IPA_IPC_LOGGING(ipa_get_ipc_logbuf_low(), \
+			OFFLOAD_DRV_NAME " %s:%d " fmt, ## args); \
+	} while (0)
+
+#define IPA_WDI_DBG_LOW(fmt, args...) \
+	do { \
+		pr_debug(OFFLOAD_DRV_NAME " %s:%d " fmt, \
+			__func__, __LINE__, ## args); \
+		IPA_IPC_LOGGING(ipa_get_ipc_logbuf_low(), \
+			OFFLOAD_DRV_NAME " %s:%d " fmt, ## args); \
+	} while (0)
+
+#define IPA_WDI_ERR(fmt, args...) \
+	do { \
+		pr_err(OFFLOAD_DRV_NAME " %s:%d " fmt, \
+			__func__, __LINE__, ## args); \
+		IPA_IPC_LOGGING(ipa_get_ipc_logbuf(), \
+			OFFLOAD_DRV_NAME " %s:%d " fmt, ## args); \
+		IPA_IPC_LOGGING(ipa_get_ipc_logbuf_low(), \
+			OFFLOAD_DRV_NAME " %s:%d " fmt, ## args); \
+	} while (0)
+
+struct ipa_wdi_intf_info {
+	char netdev_name[IPA_RESOURCE_NAME_MAX];
+	u8 hdr_len;
+	u32 partial_hdr_hdl[IPA_IP_MAX];
+	struct list_head link;
+};
+
+struct ipa_wdi_context {
+	struct list_head head_intf_list;
+	struct completion wdi_completion;
+	struct mutex lock;
+	enum ipa_wdi_version wdi_version;
+	u8 is_smmu_enabled;
+	u32 tx_pipe_hdl;
+	u32 rx_pipe_hdl;
+	u8 num_sys_pipe_needed;
+	u32 sys_pipe_hdl[IPA_WDI_MAX_SUPPORTED_SYS_PIPE];
+	u32 ipa_pm_hdl;
+#ifdef IPA_WAN_MSG_IPv6_ADDR_GW_LEN
+	ipa_wdi_meter_notifier_cb wdi_notify;
+#endif
+};
+
+static struct ipa_wdi_context *ipa_wdi_ctx;
+
+int ipa_wdi_init(struct ipa_wdi_init_in_params *in,
+	struct ipa_wdi_init_out_params *out)
+{
+	struct ipa_wdi_uc_ready_params uc_ready_params;
+	struct ipa_smmu_in_params smmu_in;
+	struct ipa_smmu_out_params smmu_out;
+
+	if (ipa_wdi_ctx) {
+		IPA_WDI_ERR("ipa_wdi_ctx was initialized before\n");
+		return -EFAULT;
+	}
+
+	if (in->wdi_version > IPA_WDI_3 || in->wdi_version < IPA_WDI_1) {
+		IPA_WDI_ERR("wrong wdi version: %d\n", in->wdi_version);
+		return -EFAULT;
+	}
+
+	ipa_wdi_ctx = kzalloc(sizeof(*ipa_wdi_ctx), GFP_KERNEL);
+	if (ipa_wdi_ctx == NULL) {
+		IPA_WDI_ERR("fail to alloc wdi ctx\n");
+		return -ENOMEM;
+	}
+	mutex_init(&ipa_wdi_ctx->lock);
+	init_completion(&ipa_wdi_ctx->wdi_completion);
+	INIT_LIST_HEAD(&ipa_wdi_ctx->head_intf_list);
+
+	ipa_wdi_ctx->wdi_version = in->wdi_version;
+	uc_ready_params.notify = in->notify;
+	uc_ready_params.priv = in->priv;
+#ifdef IPA_WAN_MSG_IPv6_ADDR_GW_LEN
+	ipa_wdi_ctx->wdi_notify = in->wdi_notify;
+#endif
+
+	if (ipa_uc_reg_rdyCB(&uc_ready_params) != 0) {
+		mutex_destroy(&ipa_wdi_ctx->lock);
+		kfree(ipa_wdi_ctx);
+		ipa_wdi_ctx = NULL;
+		return -EFAULT;
+	}
+
+	out->is_uC_ready = uc_ready_params.is_uC_ready;
+
+	smmu_in.smmu_client = IPA_SMMU_WLAN_CLIENT;
+	if (ipa_get_smmu_params(&smmu_in, &smmu_out))
+		out->is_smmu_enabled = false;
+	else
+		out->is_smmu_enabled = smmu_out.smmu_enable;
+
+	ipa_wdi_ctx->is_smmu_enabled = out->is_smmu_enabled;
+
+	if (ipa3_ctx->ipa_wdi3_over_gsi)
+		out->is_over_gsi = true;
+	else
+		out->is_over_gsi = false;
+	return 0;
+}
+EXPORT_SYMBOL(ipa_wdi_init);
+
+int ipa_wdi_cleanup(void)
+{
+	struct ipa_wdi_intf_info *entry;
+	struct ipa_wdi_intf_info *next;
+
+	/* clear interface list */
+	list_for_each_entry_safe(entry, next,
+		&ipa_wdi_ctx->head_intf_list, link) {
+		list_del(&entry->link);
+		kfree(entry);
+	}
+	mutex_destroy(&ipa_wdi_ctx->lock);
+	kfree(ipa_wdi_ctx);
+	ipa_wdi_ctx = NULL;
+	return 0;
+}
+EXPORT_SYMBOL(ipa_wdi_cleanup);
+
+static int ipa_wdi_commit_partial_hdr(
+	struct ipa_ioc_add_hdr *hdr,
+	const char *netdev_name,
+	struct ipa_wdi_hdr_info *hdr_info)
+{
+	int i;
+
+	if (!hdr || !hdr_info || !netdev_name) {
+		IPA_WDI_ERR("Invalid input\n");
+		return -EINVAL;
+	}
+
+	hdr->commit = 1;
+	hdr->num_hdrs = 2;
+
+	snprintf(hdr->hdr[0].name, sizeof(hdr->hdr[0].name),
+			 "%s_ipv4", netdev_name);
+	snprintf(hdr->hdr[1].name, sizeof(hdr->hdr[1].name),
+			 "%s_ipv6", netdev_name);
+	for (i = IPA_IP_v4; i < IPA_IP_MAX; i++) {
+		hdr->hdr[i].hdr_len = hdr_info[i].hdr_len;
+		memcpy(hdr->hdr[i].hdr, hdr_info[i].hdr, hdr->hdr[i].hdr_len);
+		hdr->hdr[i].type = hdr_info[i].hdr_type;
+		hdr->hdr[i].is_partial = 1;
+		hdr->hdr[i].is_eth2_ofst_valid = 1;
+		hdr->hdr[i].eth2_ofst = hdr_info[i].dst_mac_addr_offset;
+	}
+
+	if (ipa_add_hdr(hdr)) {
+		IPA_WDI_ERR("fail to add partial headers\n");
+		return -EFAULT;
+	}
+
+	return 0;
+}
+
+int ipa_wdi_reg_intf(struct ipa_wdi_reg_intf_in_params *in)
+{
+	struct ipa_ioc_add_hdr *hdr;
+	struct ipa_wdi_intf_info *new_intf;
+	struct ipa_wdi_intf_info *entry;
+	struct ipa_tx_intf tx;
+	struct ipa_rx_intf rx;
+	struct ipa_ioc_tx_intf_prop tx_prop[2];
+	struct ipa_ioc_rx_intf_prop rx_prop[2];
+	u32 len;
+	int ret = 0;
+
+	if (in == NULL) {
+		IPA_WDI_ERR("invalid params in=%pK\n", in);
+		return -EINVAL;
+	}
+
+	if (!ipa_wdi_ctx) {
+		IPA_WDI_ERR("wdi ctx is not initialized\n");
+		return -EPERM;
+	}
+
+	IPA_WDI_DBG("register interface for netdev %s\n",
+		in->netdev_name);
+
+	mutex_lock(&ipa_wdi_ctx->lock);
+	list_for_each_entry(entry, &ipa_wdi_ctx->head_intf_list, link)
+		if (strcmp(entry->netdev_name, in->netdev_name) == 0) {
+			IPA_WDI_DBG("intf was added before.\n");
+			mutex_unlock(&ipa_wdi_ctx->lock);
+			return 0;
+		}
+
+	IPA_WDI_DBG("intf was not added before, proceed.\n");
+	new_intf = kzalloc(sizeof(*new_intf), GFP_KERNEL);
+	if (new_intf == NULL) {
+		IPA_WDI_ERR("fail to alloc new intf\n");
+		mutex_unlock(&ipa_wdi_ctx->lock);
+		return -ENOMEM;
+	}
+
+	INIT_LIST_HEAD(&new_intf->link);
+	strlcpy(new_intf->netdev_name, in->netdev_name,
+		sizeof(new_intf->netdev_name));
+	new_intf->hdr_len = in->hdr_info[0].hdr_len;
+
+	/* add partial header */
+	len = sizeof(struct ipa_ioc_add_hdr) + 2 * sizeof(struct ipa_hdr_add);
+	hdr = kzalloc(len, GFP_KERNEL);
+	if (hdr == NULL) {
+		IPA_WDI_ERR("fail to alloc %d bytes\n", len);
+		ret = -EFAULT;
+		goto fail_alloc_hdr;
+	}
+
+	if (ipa_wdi_commit_partial_hdr(hdr, in->netdev_name, in->hdr_info)) {
+		IPA_WDI_ERR("fail to commit partial headers\n");
+		ret = -EFAULT;
+		goto fail_commit_hdr;
+	}
+
+	new_intf->partial_hdr_hdl[IPA_IP_v4] = hdr->hdr[IPA_IP_v4].hdr_hdl;
+	new_intf->partial_hdr_hdl[IPA_IP_v6] = hdr->hdr[IPA_IP_v6].hdr_hdl;
+	IPA_WDI_DBG("IPv4 hdr hdl: %d IPv6 hdr hdl: %d\n",
+		hdr->hdr[IPA_IP_v4].hdr_hdl, hdr->hdr[IPA_IP_v6].hdr_hdl);
+
+	/* populate tx prop */
+	tx.num_props = 2;
+	tx.prop = tx_prop;
+
+	memset(tx_prop, 0, sizeof(tx_prop));
+	tx_prop[0].ip = IPA_IP_v4;
+	if (!ipa3_ctx->ipa_wdi3_over_gsi)
+		tx_prop[0].dst_pipe = IPA_CLIENT_WLAN1_CONS;
+	else
+		tx_prop[0].dst_pipe = IPA_CLIENT_WLAN2_CONS;
+	tx_prop[0].alt_dst_pipe = in->alt_dst_pipe;
+	tx_prop[0].hdr_l2_type = in->hdr_info[0].hdr_type;
+	strlcpy(tx_prop[0].hdr_name, hdr->hdr[IPA_IP_v4].name,
+		sizeof(tx_prop[0].hdr_name));
+
+	tx_prop[1].ip = IPA_IP_v6;
+	if (!ipa3_ctx->ipa_wdi3_over_gsi)
+		tx_prop[1].dst_pipe = IPA_CLIENT_WLAN1_CONS;
+	else
+		tx_prop[1].dst_pipe = IPA_CLIENT_WLAN2_CONS;
+	tx_prop[1].alt_dst_pipe = in->alt_dst_pipe;
+	tx_prop[1].hdr_l2_type = in->hdr_info[1].hdr_type;
+	strlcpy(tx_prop[1].hdr_name, hdr->hdr[IPA_IP_v6].name,
+		sizeof(tx_prop[1].hdr_name));
+
+	/* populate rx prop */
+	rx.num_props = 2;
+	rx.prop = rx_prop;
+	memset(rx_prop, 0, sizeof(rx_prop));
+	rx_prop[0].ip = IPA_IP_v4;
+	if (!ipa3_ctx->ipa_wdi3_over_gsi)
+		rx_prop[0].src_pipe = IPA_CLIENT_WLAN1_PROD;
+	else
+		rx_prop[0].src_pipe = IPA_CLIENT_WLAN2_PROD;
+	rx_prop[0].hdr_l2_type = in->hdr_info[0].hdr_type;
+	if (in->is_meta_data_valid) {
+		rx_prop[0].attrib.attrib_mask |= IPA_FLT_META_DATA;
+		rx_prop[0].attrib.meta_data = in->meta_data;
+		rx_prop[0].attrib.meta_data_mask = in->meta_data_mask;
+	}
+
+	rx_prop[1].ip = IPA_IP_v6;
+	if (!ipa3_ctx->ipa_wdi3_over_gsi)
+		rx_prop[1].src_pipe = IPA_CLIENT_WLAN1_PROD;
+	else
+		rx_prop[1].src_pipe = IPA_CLIENT_WLAN2_PROD;
+	rx_prop[1].hdr_l2_type = in->hdr_info[1].hdr_type;
+	if (in->is_meta_data_valid) {
+		rx_prop[1].attrib.attrib_mask |= IPA_FLT_META_DATA;
+		rx_prop[1].attrib.meta_data = in->meta_data;
+		rx_prop[1].attrib.meta_data_mask = in->meta_data_mask;
+	}
+
+	if (ipa_register_intf(in->netdev_name, &tx, &rx)) {
+		IPA_WDI_ERR("fail to add interface prop\n");
+		ret = -EFAULT;
+		goto fail_commit_hdr;
+	}
+
+	list_add(&new_intf->link, &ipa_wdi_ctx->head_intf_list);
+	init_completion(&ipa_wdi_ctx->wdi_completion);
+
+	kfree(hdr);
+	mutex_unlock(&ipa_wdi_ctx->lock);
+	return 0;
+
+fail_commit_hdr:
+	kfree(hdr);
+fail_alloc_hdr:
+	kfree(new_intf);
+	mutex_unlock(&ipa_wdi_ctx->lock);
+	return ret;
+}
+EXPORT_SYMBOL(ipa_wdi_reg_intf);
+
+int ipa_wdi_dereg_intf(const char *netdev_name)
+{
+	int len, ret = 0;
+	struct ipa_ioc_del_hdr *hdr = NULL;
+	struct ipa_wdi_intf_info *entry;
+	struct ipa_wdi_intf_info *next;
+
+	if (!netdev_name) {
+		IPA_WDI_ERR("no netdev name.\n");
+		return -EINVAL;
+	}
+
+	if (!ipa_wdi_ctx) {
+		IPA_WDI_ERR("wdi ctx is not initialized.\n");
+		return -EPERM;
+	}
+
+	mutex_lock(&ipa_wdi_ctx->lock);
+	list_for_each_entry_safe(entry, next, &ipa_wdi_ctx->head_intf_list,
+		link)
+		if (strcmp(entry->netdev_name, netdev_name) == 0) {
+			len = sizeof(struct ipa_ioc_del_hdr) +
+				2 * sizeof(struct ipa_hdr_del);
+			hdr = kzalloc(len, GFP_KERNEL);
+			if (hdr == NULL) {
+				IPA_WDI_ERR("fail to alloc %d bytes\n", len);
+				mutex_unlock(&ipa_wdi_ctx->lock);
+				return -ENOMEM;
+			}
+
+			hdr->commit = 1;
+			hdr->num_hdls = 2;
+			hdr->hdl[0].hdl = entry->partial_hdr_hdl[0];
+			hdr->hdl[1].hdl = entry->partial_hdr_hdl[1];
+			IPA_WDI_DBG("IPv4 hdr hdl: %d IPv6 hdr hdl: %d\n",
+				hdr->hdl[0].hdl, hdr->hdl[1].hdl);
+
+			if (ipa_del_hdr(hdr)) {
+				IPA_WDI_ERR("fail to delete partial header\n");
+				ret = -EFAULT;
+				goto fail;
+			}
+
+			if (ipa_deregister_intf(entry->netdev_name)) {
+				IPA_WDI_ERR("fail to del interface props\n");
+				ret = -EFAULT;
+				goto fail;
+			}
+
+			list_del(&entry->link);
+			kfree(entry);
+
+			break;
+		}
+
+fail:
+	kfree(hdr);
+	mutex_unlock(&ipa_wdi_ctx->lock);
+	return ret;
+}
+EXPORT_SYMBOL(ipa_wdi_dereg_intf);
+
+
+static void ipa_wdi_pm_cb(void *p, enum ipa_pm_cb_event event)
+{
+	IPA_WDI_DBG("received pm event %d\n", event);
+}
+
+int ipa_wdi_conn_pipes(struct ipa_wdi_conn_in_params *in,
+			struct ipa_wdi_conn_out_params *out)
+{
+	int i, j, ret = 0;
+	struct ipa_pm_register_params pm_params;
+	struct ipa_wdi_in_params in_tx;
+	struct ipa_wdi_in_params in_rx;
+	struct ipa_wdi_out_params out_tx;
+	struct ipa_wdi_out_params out_rx;
+
+	if (!(in && out)) {
+		IPA_WDI_ERR("empty parameters. in=%pK out=%pK\n", in, out);
+		return -EINVAL;
+	}
+
+	if (!ipa_wdi_ctx) {
+		IPA_WDI_ERR("wdi ctx is not initialized\n");
+		return -EPERM;
+	}
+
+	if (in->num_sys_pipe_needed > IPA_WDI_MAX_SUPPORTED_SYS_PIPE) {
+		IPA_WDI_ERR("ipa can only support up to %d sys pipe\n",
+			IPA_WDI_MAX_SUPPORTED_SYS_PIPE);
+		return -EINVAL;
+	}
+	ipa_wdi_ctx->num_sys_pipe_needed = in->num_sys_pipe_needed;
+	IPA_WDI_DBG("number of sys pipe %d\n", in->num_sys_pipe_needed);
+
+	/* setup sys pipe when needed */
+	for (i = 0; i < ipa_wdi_ctx->num_sys_pipe_needed; i++) {
+		ret = ipa_setup_sys_pipe(&in->sys_in[i],
+			&ipa_wdi_ctx->sys_pipe_hdl[i]);
+		if (ret) {
+			IPA_WDI_ERR("fail to setup sys pipe %d\n", i);
+			ret = -EFAULT;
+			goto fail_setup_sys_pipe;
+		}
+	}
+
+	memset(&pm_params, 0, sizeof(pm_params));
+	pm_params.name = "wdi";
+	pm_params.callback = ipa_wdi_pm_cb;
+	pm_params.user_data = NULL;
+	pm_params.group = IPA_PM_GROUP_DEFAULT;
+	if (ipa_pm_register(&pm_params, &ipa_wdi_ctx->ipa_pm_hdl)) {
+		IPA_WDI_ERR("fail to register ipa pm\n");
+		ret = -EFAULT;
+		goto fail_setup_sys_pipe;
+	}
+
+	if (ipa_wdi_ctx->wdi_version == IPA_WDI_3) {
+		if (ipa_conn_wdi_pipes(in, out, ipa_wdi_ctx->wdi_notify)) {
+			IPA_WDI_ERR("fail to setup wdi pipes\n");
+			ret = -EFAULT;
+			goto fail_connect_pipe;
+		}
+	} else {
+		memset(&in_tx, 0, sizeof(in_tx));
+		memset(&in_rx, 0, sizeof(in_rx));
+		memset(&out_tx, 0, sizeof(out_tx));
+		memset(&out_rx, 0, sizeof(out_rx));
+#ifdef IPA_WAN_MSG_IPv6_ADDR_GW_LEN
+		in_rx.wdi_notify = ipa_wdi_ctx->wdi_notify;
+#endif
+		if (in->is_smmu_enabled == false) {
+			/* firsr setup rx pipe */
+			in_rx.sys.ipa_ep_cfg = in->u_rx.rx.ipa_ep_cfg;
+			in_rx.sys.client = in->u_rx.rx.client;
+			in_rx.sys.notify = in->notify;
+			in_rx.sys.priv = in->priv;
+			in_rx.smmu_enabled = in->is_smmu_enabled;
+			in_rx.u.ul.rdy_ring_base_pa =
+				in->u_rx.rx.transfer_ring_base_pa;
+			in_rx.u.ul.rdy_ring_size =
+				in->u_rx.rx.transfer_ring_size;
+			in_rx.u.ul.rdy_ring_rp_pa =
+				in->u_rx.rx.transfer_ring_doorbell_pa;
+			in_rx.u.ul.rdy_comp_ring_base_pa =
+				in->u_rx.rx.event_ring_base_pa;
+			in_rx.u.ul.rdy_comp_ring_wp_pa =
+				in->u_rx.rx.event_ring_doorbell_pa;
+			in_rx.u.ul.rdy_comp_ring_size =
+				in->u_rx.rx.event_ring_size;
+			if (ipa_connect_wdi_pipe(&in_rx, &out_rx)) {
+				IPA_WDI_ERR("fail to setup rx pipe\n");
+				ret = -EFAULT;
+				goto fail_connect_pipe;
+			}
+			ipa_wdi_ctx->rx_pipe_hdl = out_rx.clnt_hdl;
+			out->rx_uc_db_pa = out_rx.uc_door_bell_pa;
+			IPA_WDI_DBG("rx uc db pa: 0x%pad\n", &out->rx_uc_db_pa);
+
+			/* then setup tx pipe */
+			in_tx.sys.ipa_ep_cfg = in->u_tx.tx.ipa_ep_cfg;
+			in_tx.sys.client = in->u_tx.tx.client;
+			in_tx.smmu_enabled = in->is_smmu_enabled;
+			in_tx.u.dl.comp_ring_base_pa =
+				in->u_tx.tx.transfer_ring_base_pa;
+			in_tx.u.dl.comp_ring_size =
+				in->u_tx.tx.transfer_ring_size;
+			in_tx.u.dl.ce_ring_base_pa =
+				in->u_tx.tx.event_ring_base_pa;
+			in_tx.u.dl.ce_door_bell_pa =
+				in->u_tx.tx.event_ring_doorbell_pa;
+			in_tx.u.dl.ce_ring_size =
+				in->u_tx.tx.event_ring_size;
+			in_tx.u.dl.num_tx_buffers =
+				in->u_tx.tx.num_pkt_buffers;
+			if (ipa_connect_wdi_pipe(&in_tx, &out_tx)) {
+				IPA_WDI_ERR("fail to setup tx pipe\n");
+				ret = -EFAULT;
+				goto fail;
+			}
+			ipa_wdi_ctx->tx_pipe_hdl = out_tx.clnt_hdl;
+			out->tx_uc_db_pa = out_tx.uc_door_bell_pa;
+			IPA_WDI_DBG("tx uc db pa: 0x%pad\n", &out->tx_uc_db_pa);
+		} else { /* smmu is enabled */
+			/* firsr setup rx pipe */
+			in_rx.sys.ipa_ep_cfg = in->u_rx.rx_smmu.ipa_ep_cfg;
+			in_rx.sys.client = in->u_rx.rx_smmu.client;
+			in_rx.sys.notify = in->notify;
+			in_rx.sys.priv = in->priv;
+			in_rx.smmu_enabled = in->is_smmu_enabled;
+			in_rx.u.ul_smmu.rdy_ring =
+				in->u_rx.rx_smmu.transfer_ring_base;
+			in_rx.u.ul_smmu.rdy_ring_size =
+				in->u_rx.rx_smmu.transfer_ring_size;
+			in_rx.u.ul_smmu.rdy_ring_rp_pa =
+				in->u_rx.rx_smmu.transfer_ring_doorbell_pa;
+			in_rx.u.ul_smmu.rdy_comp_ring =
+				in->u_rx.rx_smmu.event_ring_base;
+			in_rx.u.ul_smmu.rdy_comp_ring_wp_pa =
+				in->u_rx.rx_smmu.event_ring_doorbell_pa;
+			in_rx.u.ul_smmu.rdy_comp_ring_size =
+				in->u_rx.rx_smmu.event_ring_size;
+			if (ipa_connect_wdi_pipe(&in_rx, &out_rx)) {
+				IPA_WDI_ERR("fail to setup rx pipe\n");
+				ret = -EFAULT;
+				goto fail_connect_pipe;
+			}
+			ipa_wdi_ctx->rx_pipe_hdl = out_rx.clnt_hdl;
+			out->rx_uc_db_pa = out_rx.uc_door_bell_pa;
+			IPA_WDI_DBG("rx uc db pa: 0x%pad\n", &out->rx_uc_db_pa);
+
+			/* then setup tx pipe */
+			in_tx.sys.ipa_ep_cfg = in->u_tx.tx_smmu.ipa_ep_cfg;
+			in_tx.sys.client = in->u_tx.tx_smmu.client;
+			in_tx.smmu_enabled = in->is_smmu_enabled;
+			in_tx.u.dl_smmu.comp_ring =
+				in->u_tx.tx_smmu.transfer_ring_base;
+			in_tx.u.dl_smmu.comp_ring_size =
+				in->u_tx.tx_smmu.transfer_ring_size;
+			in_tx.u.dl_smmu.ce_ring =
+				in->u_tx.tx_smmu.event_ring_base;
+			in_tx.u.dl_smmu.ce_door_bell_pa =
+				in->u_tx.tx_smmu.event_ring_doorbell_pa;
+			in_tx.u.dl_smmu.ce_ring_size =
+				in->u_tx.tx_smmu.event_ring_size;
+			in_tx.u.dl_smmu.num_tx_buffers =
+				in->u_tx.tx_smmu.num_pkt_buffers;
+			if (ipa_connect_wdi_pipe(&in_tx, &out_tx)) {
+				IPA_WDI_ERR("fail to setup tx pipe\n");
+				ret = -EFAULT;
+				goto fail;
+			}
+			ipa_wdi_ctx->tx_pipe_hdl = out_tx.clnt_hdl;
+			out->tx_uc_db_pa = out_tx.uc_door_bell_pa;
+			IPA_WDI_DBG("tx uc db pa: 0x%pad\n", &out->tx_uc_db_pa);
+		}
+	}
+
+	return 0;
+
+fail:
+	ipa_disconnect_wdi_pipe(ipa_wdi_ctx->rx_pipe_hdl);
+fail_connect_pipe:
+	ipa_pm_deregister(ipa_wdi_ctx->ipa_pm_hdl);
+
+fail_setup_sys_pipe:
+	for (j = 0; j < i; j++)
+		ipa_teardown_sys_pipe(ipa_wdi_ctx->sys_pipe_hdl[j]);
+	return ret;
+}
+EXPORT_SYMBOL(ipa_wdi_conn_pipes);
+
+int ipa_wdi_disconn_pipes(void)
+{
+	int i, ipa_ep_idx_rx, ipa_ep_idx_tx;
+
+	if (!ipa_wdi_ctx) {
+		IPA_WDI_ERR("wdi ctx is not initialized\n");
+		return -EPERM;
+	}
+
+	/* tear down sys pipe if needed */
+	for (i = 0; i < ipa_wdi_ctx->num_sys_pipe_needed; i++) {
+		if (ipa_teardown_sys_pipe(ipa_wdi_ctx->sys_pipe_hdl[i])) {
+			IPA_WDI_ERR("fail to tear down sys pipe %d\n", i);
+			return -EFAULT;
+		}
+	}
+
+	if (!ipa3_ctx->ipa_wdi3_over_gsi) {
+		ipa_ep_idx_rx = ipa_get_ep_mapping(IPA_CLIENT_WLAN1_PROD);
+		ipa_ep_idx_tx = ipa_get_ep_mapping(IPA_CLIENT_WLAN1_CONS);
+	} else {
+		ipa_ep_idx_rx = ipa_get_ep_mapping(IPA_CLIENT_WLAN2_PROD);
+		ipa_ep_idx_tx = ipa_get_ep_mapping(IPA_CLIENT_WLAN2_CONS);
+	}
+
+	if (ipa_wdi_ctx->wdi_version == IPA_WDI_3) {
+		if (ipa_disconn_wdi_pipes(ipa_ep_idx_rx, ipa_ep_idx_tx)) {
+			IPA_WDI_ERR("fail to tear down wdi pipes\n");
+			return -EFAULT;
+		}
+	} else {
+		if (ipa_disconnect_wdi_pipe(ipa_wdi_ctx->tx_pipe_hdl)) {
+			IPA_WDI_ERR("fail to tear down wdi tx pipes\n");
+			return -EFAULT;
+		}
+		if (ipa_disconnect_wdi_pipe(ipa_wdi_ctx->rx_pipe_hdl)) {
+			IPA_WDI_ERR("fail to tear down wdi rx pipes\n");
+			return -EFAULT;
+		}
+	}
+
+	if (ipa_pm_deregister(ipa_wdi_ctx->ipa_pm_hdl)) {
+		IPA_WDI_ERR("fail to deregister ipa pm\n");
+		return -EFAULT;
+	}
+
+	return 0;
+}
+EXPORT_SYMBOL(ipa_wdi_disconn_pipes);
+
+int ipa_wdi_enable_pipes(void)
+{
+	int ret;
+	int ipa_ep_idx_tx, ipa_ep_idx_rx;
+
+	if (!ipa_wdi_ctx) {
+		IPA_WDI_ERR("wdi ctx is not initialized.\n");
+		return -EPERM;
+	}
+
+	if (!ipa3_ctx->ipa_wdi3_over_gsi) {
+		ipa_ep_idx_rx = ipa_get_ep_mapping(IPA_CLIENT_WLAN1_PROD);
+		ipa_ep_idx_tx = ipa_get_ep_mapping(IPA_CLIENT_WLAN1_CONS);
+	} else {
+		ipa_ep_idx_rx = ipa_get_ep_mapping(IPA_CLIENT_WLAN2_PROD);
+		ipa_ep_idx_tx = ipa_get_ep_mapping(IPA_CLIENT_WLAN2_CONS);
+	}
+
+	if (ipa_wdi_ctx->wdi_version == IPA_WDI_3) {
+		if (ipa_enable_wdi_pipes(ipa_ep_idx_tx, ipa_ep_idx_rx)) {
+			IPA_WDI_ERR("fail to enable wdi pipes\n");
+			return -EFAULT;
+		}
+	} else {
+		if (ipa_enable_wdi_pipe(ipa_wdi_ctx->tx_pipe_hdl)) {
+			IPA_WDI_ERR("fail to enable wdi tx pipe\n");
+			return -EFAULT;
+		}
+		if (ipa_resume_wdi_pipe(ipa_wdi_ctx->tx_pipe_hdl)) {
+			IPA_WDI_ERR("fail to resume wdi tx pipe\n");
+			return -EFAULT;
+		}
+		if (ipa_enable_wdi_pipe(ipa_wdi_ctx->rx_pipe_hdl)) {
+			IPA_WDI_ERR("fail to enable wdi rx pipe\n");
+			return -EFAULT;
+		}
+		if (ipa_resume_wdi_pipe(ipa_wdi_ctx->rx_pipe_hdl)) {
+			IPA_WDI_ERR("fail to resume wdi rx pipe\n");
+			return -EFAULT;
+		}
+	}
+
+	ret = ipa_pm_activate_sync(ipa_wdi_ctx->ipa_pm_hdl);
+	if (ret) {
+		IPA_WDI_ERR("fail to activate ipa pm\n");
+		return -EFAULT;
+	}
+
+	return 0;
+}
+EXPORT_SYMBOL(ipa_wdi_enable_pipes);
+
+int ipa_wdi_disable_pipes(void)
+{
+	int ret;
+	int ipa_ep_idx_tx, ipa_ep_idx_rx;
+
+	if (!ipa_wdi_ctx) {
+		IPA_WDI_ERR("wdi ctx is not initialized.\n");
+		return -EPERM;
+	}
+
+	if (!ipa3_ctx->ipa_wdi3_over_gsi) {
+		ipa_ep_idx_rx = ipa_get_ep_mapping(IPA_CLIENT_WLAN1_PROD);
+		ipa_ep_idx_tx = ipa_get_ep_mapping(IPA_CLIENT_WLAN1_CONS);
+	} else {
+		ipa_ep_idx_rx = ipa_get_ep_mapping(IPA_CLIENT_WLAN2_PROD);
+		ipa_ep_idx_tx = ipa_get_ep_mapping(IPA_CLIENT_WLAN2_CONS);
+	}
+
+	if (ipa_wdi_ctx->wdi_version == IPA_WDI_3) {
+		if (ipa_disable_wdi_pipes(ipa_ep_idx_tx, ipa_ep_idx_rx)) {
+			IPA_WDI_ERR("fail to disable wdi pipes\n");
+			return -EFAULT;
+		}
+	} else {
+		if (ipa_suspend_wdi_pipe(ipa_wdi_ctx->tx_pipe_hdl)) {
+			IPA_WDI_ERR("fail to suspend wdi tx pipe\n");
+			return -EFAULT;
+		}
+		if (ipa_disable_wdi_pipe(ipa_wdi_ctx->tx_pipe_hdl)) {
+			IPA_WDI_ERR("fail to disable wdi tx pipe\n");
+			return -EFAULT;
+		}
+		if (ipa_suspend_wdi_pipe(ipa_wdi_ctx->rx_pipe_hdl)) {
+			IPA_WDI_ERR("fail to suspend wdi rx pipe\n");
+			return -EFAULT;
+		}
+		if (ipa_disable_wdi_pipe(ipa_wdi_ctx->rx_pipe_hdl)) {
+			IPA_WDI_ERR("fail to disable wdi rx pipe\n");
+			return -EFAULT;
+		}
+	}
+
+	ret = ipa_pm_deactivate_sync(ipa_wdi_ctx->ipa_pm_hdl);
+	if (ret) {
+		IPA_WDI_ERR("fail to deactivate ipa pm\n");
+		return -EFAULT;
+	}
+
+	return 0;
+}
+EXPORT_SYMBOL(ipa_wdi_disable_pipes);
+
+int ipa_wdi_set_perf_profile(struct ipa_wdi_perf_profile *profile)
+{
+	if (profile == NULL) {
+		IPA_WDI_ERR("Invalid input\n");
+		return -EINVAL;
+	}
+
+	if (ipa_pm_set_throughput(ipa_wdi_ctx->ipa_pm_hdl,
+		profile->max_supported_bw_mbps)) {
+		IPA_WDI_ERR("fail to set pm throughput\n");
+		return -EFAULT;
+	}
+
+	return 0;
+}
+EXPORT_SYMBOL(ipa_wdi_set_perf_profile);
+
+int ipa_wdi_create_smmu_mapping(u32 num_buffers,
+	struct ipa_wdi_buffer_info *info)
+{
+	return ipa_create_wdi_mapping(num_buffers, info);
+}
+EXPORT_SYMBOL(ipa_wdi_create_smmu_mapping);
+
+int ipa_wdi_release_smmu_mapping(u32 num_buffers,
+	struct ipa_wdi_buffer_info *info)
+{
+	return ipa_release_wdi_mapping(num_buffers, info);
+}
+EXPORT_SYMBOL(ipa_wdi_release_smmu_mapping);
+
+int ipa_wdi_get_stats(struct IpaHwStatsWDIInfoData_t *stats)
+{
+	return ipa_get_wdi_stats(stats);
+}
+EXPORT_SYMBOL(ipa_wdi_get_stats);
+
+int ipa_wdi_bw_monitor(struct ipa_wdi_bw_info *info)
+{
+	return ipa_uc_bw_monitor(info);
+}
+EXPORT_SYMBOL(ipa_wdi_bw_monitor);
+
+int ipa_wdi_sw_stats(struct ipa_wdi_tx_info *info)
+{
+	return ipa_set_wlan_tx_info(info);
+}
+EXPORT_SYMBOL(ipa_wdi_sw_stats);

+ 2128 - 0
ipa/ipa_clients/ipa_wigig.c

@@ -0,0 +1,2128 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/ipa_wigig.h>
+#include <linux/debugfs.h>
+#include <linux/string.h>
+#include "../ipa_common_i.h"
+#include "../ipa_v3/ipa_pm.h"
+
+#define OFFLOAD_DRV_NAME "ipa_wigig"
+#define IPA_WIGIG_DBG(fmt, args...) \
+	do { \
+		pr_debug(OFFLOAD_DRV_NAME " %s:%d " fmt, \
+			__func__, __LINE__, ## args); \
+		IPA_IPC_LOGGING(ipa_get_ipc_logbuf(), \
+			OFFLOAD_DRV_NAME " %s:%d " fmt, ## args); \
+		IPA_IPC_LOGGING(ipa_get_ipc_logbuf_low(), \
+			OFFLOAD_DRV_NAME " %s:%d " fmt, ## args); \
+	} while (0)
+
+#define IPA_WIGIG_DBG_LOW(fmt, args...) \
+	do { \
+		pr_debug(OFFLOAD_DRV_NAME " %s:%d " fmt, \
+			__func__, __LINE__, ## args); \
+		IPA_IPC_LOGGING(ipa_get_ipc_logbuf_low(), \
+			OFFLOAD_DRV_NAME " %s:%d " fmt, ## args); \
+	} while (0)
+
+#define IPA_WIGIG_ERR(fmt, args...) \
+	do { \
+		pr_err(OFFLOAD_DRV_NAME " %s:%d " fmt, \
+			__func__, __LINE__, ## args); \
+		IPA_IPC_LOGGING(ipa_get_ipc_logbuf(), \
+			OFFLOAD_DRV_NAME " %s:%d " fmt, ## args); \
+		IPA_IPC_LOGGING(ipa_get_ipc_logbuf_low(), \
+			OFFLOAD_DRV_NAME " %s:%d " fmt, ## args); \
+	} while (0)
+
+#define IPA_WIGIG_ERR_RL(fmt, args...) \
+	do { \
+		pr_err_ratelimited_ipa( \
+		OFFLOAD_DRV_NAME " %s:%d " fmt, __func__,\
+		__LINE__, ## args);\
+		IPA_IPC_LOGGING(ipa_get_ipc_logbuf_low(), \
+			OFFLOAD_DRV_NAME " %s:%d " fmt, ## args); \
+	} while (0)
+
+
+#define IPA_WIGIG_TX_PIPE_NUM	4
+
+enum ipa_wigig_pipes_idx {
+	IPA_CLIENT_WIGIG_PROD_IDX = 0,
+	IPA_CLIENT_WIGIG1_CONS_IDX = 1,
+	IPA_CLIENT_WIGIG2_CONS_IDX = 2,
+	IPA_CLIENT_WIGIG3_CONS_IDX = 3,
+	IPA_CLIENT_WIGIG4_CONS_IDX = 4,
+	IPA_WIGIG_MAX_PIPES
+};
+
+struct ipa_wigig_intf_info {
+	char netdev_name[IPA_RESOURCE_NAME_MAX];
+	u8 netdev_mac[IPA_MAC_ADDR_SIZE];
+	u8 hdr_len;
+	u32 partial_hdr_hdl[IPA_IP_MAX];
+	struct list_head link;
+};
+
+struct ipa_wigig_pipe_values {
+	uint8_t dir;
+	uint8_t tx_ring_id;
+	uint32_t desc_ring_HWHEAD;
+	uint16_t desc_ring_HWHEAD_masked;
+	uint32_t desc_ring_HWTAIL;
+	uint32_t status_ring_HWHEAD;
+	uint16_t status_ring_HWHEAD_masked;
+	uint32_t status_ring_HWTAIL;
+};
+
+struct ipa_wigig_regs_save {
+	struct ipa_wigig_pipe_values pipes_val[IPA_WIGIG_MAX_PIPES];
+	u32 int_gen_tx_val;
+	u32 int_gen_rx_val;
+};
+
+struct ipa_wigig_context {
+	struct list_head head_intf_list;
+	struct mutex lock;
+	u32 ipa_pm_hdl;
+	phys_addr_t periph_baddr_pa;
+	phys_addr_t pseudo_cause_pa;
+	phys_addr_t int_gen_tx_pa;
+	phys_addr_t int_gen_rx_pa;
+	phys_addr_t dma_ep_misc_pa;
+	ipa_notify_cb tx_notify;
+	void *priv;
+	union pipes {
+		struct ipa_wigig_pipe_setup_info flat[IPA_WIGIG_MAX_PIPES];
+		struct ipa_wigig_pipe_setup_info_smmu
+			smmu[IPA_WIGIG_MAX_PIPES];
+	} pipes;
+	struct ipa_wigig_rx_pipe_data_buffer_info_smmu rx_buff_smmu;
+	struct ipa_wigig_tx_pipe_data_buffer_info_smmu
+		tx_buff_smmu[IPA_WIGIG_TX_PIPE_NUM];
+	char clients_mac[IPA_WIGIG_TX_PIPE_NUM][IPA_MAC_ADDR_SIZE];
+	struct ipa_wigig_regs_save regs_save;
+	bool smmu_en;
+	bool shared_cb;
+	u8 conn_pipes;
+	struct dentry *parent;
+	struct dentry *dent_conn_clients;
+	struct dentry *dent_smmu;
+};
+
+static struct ipa_wigig_context *ipa_wigig_ctx;
+
+#ifdef CONFIG_DEBUG_FS
+static int ipa_wigig_init_debugfs(struct dentry *parent);
+static inline void ipa_wigig_deinit_debugfs(void);
+#else
+static int ipa_wigig_init_debugfs(struct dentry *parent) { return 0; }
+static inline void ipa_wigig_deinit_debugfs(void) { }
+#endif
+
+int ipa_wigig_init(struct ipa_wigig_init_in_params *in,
+	struct ipa_wigig_init_out_params *out)
+{
+	struct ipa_wdi_uc_ready_params inout;
+
+	if (!in || !out) {
+		IPA_WIGIG_ERR("invalid params in=%pK, out %pK\n", in, out);
+		return -EINVAL;
+	}
+
+	IPA_WIGIG_DBG("\n");
+	if (ipa_wigig_ctx) {
+		IPA_WIGIG_ERR("ipa_wigig_ctx was initialized before\n");
+		return -EINVAL;
+	}
+
+	ipa_wigig_ctx = kzalloc(sizeof(*ipa_wigig_ctx), GFP_KERNEL);
+	if (ipa_wigig_ctx == NULL)
+		return -ENOMEM;
+
+	mutex_init(&ipa_wigig_ctx->lock);
+	INIT_LIST_HEAD(&ipa_wigig_ctx->head_intf_list);
+
+	ipa_wigig_ctx->pseudo_cause_pa = in->pseudo_cause_pa;
+	ipa_wigig_ctx->int_gen_tx_pa = in->int_gen_tx_pa;
+	ipa_wigig_ctx->int_gen_rx_pa = in->int_gen_rx_pa;
+	ipa_wigig_ctx->dma_ep_misc_pa = in->dma_ep_misc_pa;
+	ipa_wigig_ctx->periph_baddr_pa = in->periph_baddr_pa;
+
+	IPA_WIGIG_DBG(
+		"periph_baddr_pa 0x%pa pseudo_cause_pa 0x%pa, int_gen_tx_pa 0x%pa, int_gen_rx_pa 0x%pa, dma_ep_misc_pa 0x%pa"
+		, &ipa_wigig_ctx->periph_baddr_pa,
+		&ipa_wigig_ctx->pseudo_cause_pa,
+		&ipa_wigig_ctx->int_gen_tx_pa,
+		&ipa_wigig_ctx->int_gen_rx_pa,
+		&ipa_wigig_ctx->dma_ep_misc_pa);
+
+	inout.notify = in->notify;
+	inout.priv = in->priv;
+	if (ipa_wigig_internal_init(&inout, in->int_notify,
+		&out->uc_db_pa)) {
+		kfree(ipa_wigig_ctx);
+		ipa_wigig_ctx = NULL;
+		return -EFAULT;
+	}
+
+	IPA_WIGIG_DBG("uc_db_pa 0x%pa\n", &out->uc_db_pa);
+
+	out->is_uc_ready = inout.is_uC_ready;
+
+	out->lan_rx_napi_enable = ipa_get_lan_rx_napi();
+	IPA_WIGIG_DBG("LAN RX NAPI enabled = %s\n",
+				out->lan_rx_napi_enable ? "True" : "False");
+
+	IPA_WIGIG_DBG("exit\n");
+
+	return 0;
+}
+EXPORT_SYMBOL(ipa_wigig_init);
+
+int ipa_wigig_cleanup(void)
+{
+	struct ipa_wigig_intf_info *entry;
+	struct ipa_wigig_intf_info *next;
+
+	IPA_WIGIG_DBG("\n");
+
+	if (!ipa_wigig_ctx)
+		return -ENODEV;
+
+	/* clear interface list */
+	list_for_each_entry_safe(entry, next,
+		&ipa_wigig_ctx->head_intf_list, link) {
+		list_del(&entry->link);
+		kfree(entry);
+	}
+
+	mutex_destroy(&ipa_wigig_ctx->lock);
+
+	ipa_wigig_deinit_debugfs();
+
+	kfree(ipa_wigig_ctx);
+	ipa_wigig_ctx = NULL;
+
+	IPA_WIGIG_DBG("exit\n");
+	return 0;
+}
+EXPORT_SYMBOL(ipa_wigig_cleanup);
+
+bool ipa_wigig_is_smmu_enabled(void)
+{
+	struct ipa_smmu_in_params in;
+	struct ipa_smmu_out_params out;
+
+	IPA_WIGIG_DBG("\n");
+
+	in.smmu_client = IPA_SMMU_WIGIG_CLIENT;
+	ipa_get_smmu_params(&in, &out);
+
+	IPA_WIGIG_DBG("exit (%d)\n", out.smmu_enable);
+
+	return out.smmu_enable;
+}
+EXPORT_SYMBOL(ipa_wigig_is_smmu_enabled);
+
+static int ipa_wigig_init_smmu_params(void)
+{
+	struct ipa_smmu_in_params in;
+	struct ipa_smmu_out_params out;
+	int ret;
+
+	IPA_WIGIG_DBG("\n");
+
+	in.smmu_client = IPA_SMMU_WIGIG_CLIENT;
+	ret = ipa_get_smmu_params(&in, &out);
+	if (ret) {
+		IPA_WIGIG_ERR("couldn't get SMMU params %d\n", ret);
+		return ret;
+	}
+	ipa_wigig_ctx->smmu_en = out.smmu_enable;
+	ipa_wigig_ctx->shared_cb = out.shared_cb;
+	IPA_WIGIG_DBG("SMMU (%s), 11ad CB (%s)\n",
+		out.smmu_enable ? "enabled" : "disabled",
+		out.shared_cb ? "shared" : "not shared");
+
+	return 0;
+}
+
+static int ipa_wigig_commit_partial_hdr(
+	struct ipa_ioc_add_hdr *hdr,
+	const char *netdev_name,
+	struct ipa_wigig_hdr_info *hdr_info)
+{
+	int i;
+
+	IPA_WIGIG_DBG("\n");
+
+	if (!netdev_name) {
+		IPA_WIGIG_ERR("Invalid input\n");
+		return -EINVAL;
+	}
+
+	IPA_WIGIG_DBG("dst_mac_addr_offset %d hdr_len %d hdr_type %d\n",
+		hdr_info->dst_mac_addr_offset,
+		hdr_info->hdr_len,
+		hdr_info->hdr_type);
+
+	hdr->commit = 1;
+	hdr->num_hdrs = 2;
+
+	snprintf(hdr->hdr[0].name, sizeof(hdr->hdr[0].name),
+		"%s_ipv4", netdev_name);
+	snprintf(hdr->hdr[1].name, sizeof(hdr->hdr[1].name),
+		"%s_ipv6", netdev_name);
+	for (i = IPA_IP_v4; i < IPA_IP_MAX; i++) {
+		hdr->hdr[i].hdr_len = hdr_info[i].hdr_len;
+		memcpy(hdr->hdr[i].hdr, hdr_info[i].hdr, hdr->hdr[i].hdr_len);
+		hdr->hdr[i].type = hdr_info[i].hdr_type;
+		hdr->hdr[i].is_partial = 1;
+		hdr->hdr[i].is_eth2_ofst_valid = 1;
+		hdr->hdr[i].eth2_ofst = hdr_info[i].dst_mac_addr_offset;
+	}
+
+	if (ipa_add_hdr(hdr)) {
+		IPA_WIGIG_ERR("fail to add partial headers\n");
+		return -EFAULT;
+	}
+
+	IPA_WIGIG_DBG("exit\n");
+
+	return 0;
+}
+
+static void ipa_wigig_free_msg(void *msg, uint32_t len, uint32_t type)
+{
+	IPA_WIGIG_DBG("free msg type:%d, len:%d, buff %pK", type, len, msg);
+	kfree(msg);
+	IPA_WIGIG_DBG("exit\n");
+}
+
+static int ipa_wigig_send_wlan_msg(enum ipa_wlan_event msg_type,
+	const char *netdev_name, u8 *mac)
+{
+	struct ipa_msg_meta msg_meta;
+	struct ipa_wlan_msg *wlan_msg;
+	int ret;
+
+	IPA_WIGIG_DBG("%d\n", msg_type);
+
+	wlan_msg = kzalloc(sizeof(*wlan_msg), GFP_KERNEL);
+	if (wlan_msg == NULL)
+		return -ENOMEM;
+	strlcpy(wlan_msg->name, netdev_name, IPA_RESOURCE_NAME_MAX);
+	memcpy(wlan_msg->mac_addr, mac, IPA_MAC_ADDR_SIZE);
+	msg_meta.msg_len = sizeof(struct ipa_wlan_msg);
+	msg_meta.msg_type = msg_type;
+
+	IPA_WIGIG_DBG("send msg type:%d, len:%d, buff %pK", msg_meta.msg_type,
+		msg_meta.msg_len, wlan_msg);
+	ret = ipa_send_msg(&msg_meta, wlan_msg, ipa_wigig_free_msg);
+
+	IPA_WIGIG_DBG("exit\n");
+
+	return ret;
+}
+
+int ipa_wigig_send_msg(int msg_type,
+	const char *netdev_name, u8 *mac,
+	enum ipa_client_type client, bool to_wigig)
+{
+	struct ipa_msg_meta msg_meta;
+	struct ipa_wigig_msg *wigig_msg;
+	int ret;
+
+	IPA_WIGIG_DBG("\n");
+
+	wigig_msg = kzalloc(sizeof(struct ipa_wigig_msg), GFP_KERNEL);
+	if (wigig_msg == NULL)
+		return -ENOMEM;
+	strlcpy(wigig_msg->name, netdev_name, IPA_RESOURCE_NAME_MAX);
+	memcpy(wigig_msg->client_mac_addr, mac, IPA_MAC_ADDR_SIZE);
+	if (msg_type == WIGIG_CLIENT_CONNECT)
+		wigig_msg->u.ipa_client = client;
+	else
+		wigig_msg->u.to_wigig = to_wigig;
+
+	msg_meta.msg_type = msg_type;
+	msg_meta.msg_len = sizeof(struct ipa_wigig_msg);
+
+	IPA_WIGIG_DBG("send msg type:%d, len:%d, buff %pK", msg_meta.msg_type,
+		msg_meta.msg_len, wigig_msg);
+	ret = ipa_send_msg(&msg_meta, wigig_msg, ipa_wigig_free_msg);
+
+	IPA_WIGIG_DBG("exit\n");
+
+	return ret;
+}
+
+static int ipa_wigig_get_devname(char *netdev_name)
+{
+	struct ipa_wigig_intf_info *entry;
+
+	mutex_lock(&ipa_wigig_ctx->lock);
+
+	if (!list_is_singular(&ipa_wigig_ctx->head_intf_list)) {
+		IPA_WIGIG_DBG("list is not singular, was an IF registered?\n");
+		mutex_unlock(&ipa_wigig_ctx->lock);
+		return -EFAULT;
+	}
+	entry = list_first_entry(&ipa_wigig_ctx->head_intf_list,
+		struct ipa_wigig_intf_info,
+		link);
+	strlcpy(netdev_name, entry->netdev_name, IPA_RESOURCE_NAME_MAX);
+
+	mutex_unlock(&ipa_wigig_ctx->lock);
+
+	return 0;
+}
+
+int ipa_wigig_reg_intf(
+	struct ipa_wigig_reg_intf_in_params *in)
+{
+	struct ipa_wigig_intf_info *new_intf;
+	struct ipa_wigig_intf_info *entry;
+	struct ipa_tx_intf tx;
+	struct ipa_rx_intf rx;
+	struct ipa_ioc_tx_intf_prop tx_prop[2];
+	struct ipa_ioc_rx_intf_prop rx_prop[2];
+	struct ipa_ioc_add_hdr *hdr;
+	struct ipa_ioc_del_hdr *del_hdr = NULL;
+	u32 len;
+	int ret = 0;
+
+	IPA_WIGIG_DBG("\n");
+
+	if (in == NULL) {
+		IPA_WIGIG_ERR("invalid params in=%pK\n", in);
+		return -EINVAL;
+	}
+
+	if (!ipa_wigig_ctx) {
+		IPA_WIGIG_ERR("wigig ctx is not initialized\n");
+		return -EPERM;
+	}
+
+	IPA_WIGIG_DBG(
+		"register interface for netdev %s, MAC 0x[%X][%X][%X][%X][%X][%X]\n"
+		, in->netdev_name,
+		in->netdev_mac[0], in->netdev_mac[1], in->netdev_mac[2],
+		in->netdev_mac[3], in->netdev_mac[4], in->netdev_mac[5]);
+
+	mutex_lock(&ipa_wigig_ctx->lock);
+	list_for_each_entry(entry, &ipa_wigig_ctx->head_intf_list, link)
+		if (strcmp(entry->netdev_name, in->netdev_name) == 0) {
+			IPA_WIGIG_DBG("intf was added before.\n");
+			mutex_unlock(&ipa_wigig_ctx->lock);
+			return 0;
+		}
+
+	IPA_WIGIG_DBG("intf was not added before, proceed.\n");
+	new_intf = kzalloc(sizeof(*new_intf), GFP_KERNEL);
+	if (new_intf == NULL) {
+		ret = -ENOMEM;
+		goto fail;
+	}
+
+	INIT_LIST_HEAD(&new_intf->link);
+	strlcpy(new_intf->netdev_name, in->netdev_name,
+		sizeof(new_intf->netdev_name));
+	new_intf->hdr_len = in->hdr_info[0].hdr_len;
+	memcpy(new_intf->netdev_mac, in->netdev_mac, IPA_MAC_ADDR_SIZE);
+
+	/* add partial header */
+	len = sizeof(struct ipa_ioc_add_hdr) + 2 * sizeof(struct ipa_hdr_add);
+	hdr = kzalloc(len, GFP_KERNEL);
+	if (hdr == NULL) {
+		ret = -EFAULT;
+		goto fail_alloc_hdr;
+	}
+
+	if (ipa_wigig_commit_partial_hdr(hdr,
+		in->netdev_name,
+		in->hdr_info)) {
+		IPA_WIGIG_ERR("fail to commit partial headers\n");
+		ret = -EFAULT;
+		goto fail_commit_hdr;
+	}
+
+	new_intf->partial_hdr_hdl[IPA_IP_v4] = hdr->hdr[IPA_IP_v4].hdr_hdl;
+	new_intf->partial_hdr_hdl[IPA_IP_v6] = hdr->hdr[IPA_IP_v6].hdr_hdl;
+	IPA_WIGIG_DBG("IPv4 hdr hdl: %d IPv6 hdr hdl: %d\n",
+		hdr->hdr[IPA_IP_v4].hdr_hdl, hdr->hdr[IPA_IP_v6].hdr_hdl);
+
+	/* populate tx prop */
+	tx.num_props = 2;
+	tx.prop = tx_prop;
+
+	memset(tx_prop, 0, sizeof(tx_prop));
+	tx_prop[0].ip = IPA_IP_v4;
+	/*
+	 * for consumers, we register a default pipe, but IPACM will determine
+	 * the actual pipe according to the relevant client MAC
+	 */
+	tx_prop[0].dst_pipe = IPA_CLIENT_WIGIG1_CONS;
+	tx_prop[0].hdr_l2_type = in->hdr_info[0].hdr_type;
+	strlcpy(tx_prop[0].hdr_name, hdr->hdr[IPA_IP_v4].name,
+		sizeof(tx_prop[0].hdr_name));
+
+	tx_prop[1].ip = IPA_IP_v6;
+	tx_prop[1].dst_pipe = IPA_CLIENT_WIGIG1_CONS;
+	tx_prop[1].hdr_l2_type = in->hdr_info[1].hdr_type;
+	strlcpy(tx_prop[1].hdr_name, hdr->hdr[IPA_IP_v6].name,
+		sizeof(tx_prop[1].hdr_name));
+
+	/* populate rx prop */
+	rx.num_props = 2;
+	rx.prop = rx_prop;
+
+	memset(rx_prop, 0, sizeof(rx_prop));
+	rx_prop[0].ip = IPA_IP_v4;
+	rx_prop[0].src_pipe = IPA_CLIENT_WIGIG_PROD;
+	rx_prop[0].hdr_l2_type = in->hdr_info[0].hdr_type;
+
+	rx_prop[1].ip = IPA_IP_v6;
+	rx_prop[1].src_pipe = IPA_CLIENT_WIGIG_PROD;
+	rx_prop[1].hdr_l2_type = in->hdr_info[1].hdr_type;
+
+	if (ipa_register_intf(in->netdev_name, &tx, &rx)) {
+		IPA_WIGIG_ERR("fail to add interface prop\n");
+		ret = -EFAULT;
+		goto fail_register;
+	}
+
+	if (ipa_wigig_send_wlan_msg(WLAN_AP_CONNECT,
+		in->netdev_name,
+		in->netdev_mac)) {
+		IPA_WIGIG_ERR("couldn't send msg to IPACM\n");
+		ret = -EFAULT;
+		goto fail_sendmsg;
+	}
+
+	list_add(&new_intf->link, &ipa_wigig_ctx->head_intf_list);
+
+	kfree(hdr);
+	mutex_unlock(&ipa_wigig_ctx->lock);
+
+	IPA_WIGIG_DBG("exit\n");
+	return 0;
+fail_sendmsg:
+	ipa_deregister_intf(in->netdev_name);
+fail_register:
+	del_hdr = kzalloc(sizeof(struct ipa_ioc_del_hdr) +
+		2 * sizeof(struct ipa_hdr_del), GFP_KERNEL);
+	if (del_hdr) {
+		del_hdr->commit = 1;
+		del_hdr->num_hdls = 2;
+		del_hdr->hdl[0].hdl = new_intf->partial_hdr_hdl[IPA_IP_v4];
+		del_hdr->hdl[1].hdl = new_intf->partial_hdr_hdl[IPA_IP_v6];
+		ipa_del_hdr(del_hdr);
+		kfree(del_hdr);
+	}
+	new_intf->partial_hdr_hdl[IPA_IP_v4] = 0;
+	new_intf->partial_hdr_hdl[IPA_IP_v6] = 0;
+fail_commit_hdr:
+	kfree(hdr);
+fail_alloc_hdr:
+	kfree(new_intf);
+fail:
+	mutex_unlock(&ipa_wigig_ctx->lock);
+	return ret;
+}
+EXPORT_SYMBOL(ipa_wigig_reg_intf);
+
+int ipa_wigig_dereg_intf(const char *netdev_name)
+{
+	int len, ret;
+	struct ipa_ioc_del_hdr *hdr = NULL;
+	struct ipa_wigig_intf_info *entry;
+	struct ipa_wigig_intf_info *next;
+
+	if (!netdev_name) {
+		IPA_WIGIG_ERR("no netdev name\n");
+		return -EINVAL;
+	}
+
+	IPA_WIGIG_DBG("netdev %s\n", netdev_name);
+
+	if (!ipa_wigig_ctx) {
+		IPA_WIGIG_ERR("wigig ctx is not initialized\n");
+		return -EPERM;
+	}
+
+	mutex_lock(&ipa_wigig_ctx->lock);
+
+	ret = -EFAULT;
+
+	list_for_each_entry_safe(entry, next, &ipa_wigig_ctx->head_intf_list,
+		link)
+		if (strcmp(entry->netdev_name, netdev_name) == 0) {
+			len = sizeof(struct ipa_ioc_del_hdr) +
+				2 * sizeof(struct ipa_hdr_del);
+			hdr = kzalloc(len, GFP_KERNEL);
+			if (hdr == NULL) {
+				mutex_unlock(&ipa_wigig_ctx->lock);
+				return -ENOMEM;
+			}
+
+			hdr->commit = 1;
+			hdr->num_hdls = 2;
+			hdr->hdl[0].hdl = entry->partial_hdr_hdl[0];
+			hdr->hdl[1].hdl = entry->partial_hdr_hdl[1];
+			IPA_WIGIG_DBG("IPv4 hdr hdl: %d IPv6 hdr hdl: %d\n",
+				hdr->hdl[0].hdl, hdr->hdl[1].hdl);
+
+			if (ipa_del_hdr(hdr)) {
+				IPA_WIGIG_ERR(
+					"fail to delete partial header\n");
+				ret = -EFAULT;
+				goto fail;
+			}
+
+			if (ipa_deregister_intf(entry->netdev_name)) {
+				IPA_WIGIG_ERR("fail to del interface props\n");
+				ret = -EFAULT;
+				goto fail;
+			}
+
+			if (ipa_wigig_send_wlan_msg(WLAN_AP_DISCONNECT,
+				entry->netdev_name,
+				entry->netdev_mac)) {
+				IPA_WIGIG_ERR("couldn't send msg to IPACM\n");
+				ret = -EFAULT;
+				goto fail;
+			}
+
+			list_del(&entry->link);
+			kfree(entry);
+
+			ret = 0;
+			break;
+		}
+
+	IPA_WIGIG_DBG("exit\n");
+fail:
+	kfree(hdr);
+	mutex_unlock(&ipa_wigig_ctx->lock);
+	return ret;
+}
+EXPORT_SYMBOL(ipa_wigig_dereg_intf);
+
+static void ipa_wigig_pm_cb(void *p, enum ipa_pm_cb_event event)
+{
+	IPA_WIGIG_DBG("received pm event %d\n", event);
+}
+
+static int ipa_wigig_store_pipe_info(struct ipa_wigig_pipe_setup_info *pipe,
+	unsigned int idx)
+{
+	IPA_WIGIG_DBG(
+		"idx %d: desc_ring HWHEAD_pa %pa, HWTAIL_pa %pa, status_ring HWHEAD_pa %pa, HWTAIL_pa %pa\n",
+		idx,
+		&pipe->desc_ring_HWHEAD_pa,
+		&pipe->desc_ring_HWTAIL_pa,
+		&pipe->status_ring_HWHEAD_pa,
+		&pipe->status_ring_HWTAIL_pa);
+
+	/* store regs */
+	ipa_wigig_ctx->pipes.flat[idx].desc_ring_HWHEAD_pa =
+		pipe->desc_ring_HWHEAD_pa;
+	ipa_wigig_ctx->pipes.flat[idx].desc_ring_HWTAIL_pa =
+		pipe->desc_ring_HWTAIL_pa;
+
+	ipa_wigig_ctx->pipes.flat[idx].status_ring_HWHEAD_pa =
+		pipe->status_ring_HWHEAD_pa;
+
+	ipa_wigig_ctx->pipes.flat[idx].status_ring_HWTAIL_pa =
+		pipe->status_ring_HWTAIL_pa;
+
+	IPA_WIGIG_DBG("exit\n");
+
+	return 0;
+}
+
+static u8 ipa_wigig_pipe_to_bit_val(int client)
+{
+	u8 shift_val;
+
+	switch (client) {
+	case IPA_CLIENT_WIGIG_PROD:
+		shift_val = 0x1 << IPA_CLIENT_WIGIG_PROD_IDX;
+		break;
+	case IPA_CLIENT_WIGIG1_CONS:
+		shift_val = 0x1 << IPA_CLIENT_WIGIG1_CONS_IDX;
+		break;
+	case IPA_CLIENT_WIGIG2_CONS:
+		shift_val = 0x1 << IPA_CLIENT_WIGIG2_CONS_IDX;
+		break;
+	case IPA_CLIENT_WIGIG3_CONS:
+		shift_val = 0x1 << IPA_CLIENT_WIGIG3_CONS_IDX;
+		break;
+	case IPA_CLIENT_WIGIG4_CONS:
+		shift_val = 0x1 << IPA_CLIENT_WIGIG4_CONS_IDX;
+		break;
+	default:
+		IPA_WIGIG_ERR("invalid pipe %d\n", client);
+		return 1;
+	}
+
+	return shift_val;
+}
+
+int ipa_wigig_conn_rx_pipe(struct ipa_wigig_conn_rx_in_params *in,
+	struct ipa_wigig_conn_out_params *out)
+{
+	int ret;
+	struct ipa_pm_register_params pm_params;
+
+	IPA_WIGIG_DBG("\n");
+
+	if (!in || !out) {
+		IPA_WIGIG_ERR("empty parameters. in=%pK out=%pK\n", in, out);
+		return -EINVAL;
+	}
+
+	if (!ipa_wigig_ctx) {
+		IPA_WIGIG_ERR("wigig ctx is not initialized\n");
+		return -EPERM;
+	}
+
+	ret = ipa_uc_state_check();
+	if (ret) {
+		IPA_WIGIG_ERR("uC not ready\n");
+		return ret;
+	}
+
+	if (ipa_wigig_init_smmu_params())
+		return -EINVAL;
+
+	if (ipa_wigig_ctx->smmu_en) {
+		IPA_WIGIG_ERR("IPA SMMU is enabled, wrong API used\n");
+		return -EFAULT;
+	}
+
+	memset(&pm_params, 0, sizeof(pm_params));
+	pm_params.name = "wigig";
+	pm_params.callback = ipa_wigig_pm_cb;
+	pm_params.user_data = NULL;
+	pm_params.group = IPA_PM_GROUP_DEFAULT;
+	if (ipa_pm_register(&pm_params, &ipa_wigig_ctx->ipa_pm_hdl)) {
+		IPA_WIGIG_ERR("fail to register ipa pm\n");
+		ret = -EFAULT;
+		goto fail_pm;
+	}
+	IPA_WIGIG_DBG("pm hdl %d\n", ipa_wigig_ctx->ipa_pm_hdl);
+
+	ret = ipa_wigig_uc_msi_init(true,
+		ipa_wigig_ctx->periph_baddr_pa,
+		ipa_wigig_ctx->pseudo_cause_pa,
+		ipa_wigig_ctx->int_gen_tx_pa,
+		ipa_wigig_ctx->int_gen_rx_pa,
+		ipa_wigig_ctx->dma_ep_misc_pa);
+	if (ret) {
+		IPA_WIGIG_ERR("failed configuring msi regs at uC\n");
+		ret = -EFAULT;
+		goto fail_msi;
+	}
+
+	if (ipa_conn_wigig_rx_pipe_i(in, out, &ipa_wigig_ctx->parent)) {
+		IPA_WIGIG_ERR("fail to connect rx pipe\n");
+		ret = -EFAULT;
+		goto fail_connect_pipe;
+	}
+
+	ipa_wigig_ctx->tx_notify = in->notify;
+	ipa_wigig_ctx->priv = in->priv;
+
+	if (ipa_wigig_ctx->parent)
+		ipa_wigig_init_debugfs(ipa_wigig_ctx->parent);
+
+	ipa_wigig_store_pipe_info(ipa_wigig_ctx->pipes.flat,
+		IPA_CLIENT_WIGIG_PROD_IDX);
+
+	ipa_wigig_ctx->conn_pipes |=
+		ipa_wigig_pipe_to_bit_val(IPA_CLIENT_WIGIG_PROD);
+
+	IPA_WIGIG_DBG("exit\n");
+
+	return 0;
+
+fail_connect_pipe:
+	ipa_wigig_uc_msi_init(false,
+		ipa_wigig_ctx->periph_baddr_pa,
+		ipa_wigig_ctx->pseudo_cause_pa,
+		ipa_wigig_ctx->int_gen_tx_pa,
+		ipa_wigig_ctx->int_gen_rx_pa,
+		ipa_wigig_ctx->dma_ep_misc_pa);
+fail_msi:
+	ipa_pm_deregister(ipa_wigig_ctx->ipa_pm_hdl);
+fail_pm:
+	return ret;
+}
+EXPORT_SYMBOL(ipa_wigig_conn_rx_pipe);
+
+static int ipa_wigig_client_to_idx(enum ipa_client_type client,
+	unsigned int *idx)
+{
+	switch (client) {
+	case IPA_CLIENT_WIGIG1_CONS:
+		*idx = 1;
+		break;
+	case IPA_CLIENT_WIGIG2_CONS:
+		*idx = 2;
+		break;
+	case IPA_CLIENT_WIGIG3_CONS:
+		*idx = 3;
+		break;
+	case IPA_CLIENT_WIGIG4_CONS:
+		*idx = 4;
+		break;
+	default:
+		IPA_WIGIG_ERR("invalid client %d\n", client);
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static int ipa_wigig_clean_pipe_info(unsigned int idx)
+{
+	IPA_WIGIG_DBG("cleaning pipe %d info\n", idx);
+
+	if (idx >= IPA_WIGIG_MAX_PIPES) {
+		IPA_WIGIG_ERR("invalid index %d\n", idx);
+		return -EINVAL;
+	}
+
+	if (ipa_wigig_ctx->smmu_en) {
+		sg_free_table(
+			&ipa_wigig_ctx->pipes.smmu[idx].desc_ring_base);
+		sg_free_table(
+			&ipa_wigig_ctx->pipes.smmu[idx].status_ring_base);
+
+		memset(ipa_wigig_ctx->pipes.smmu + idx,
+			0,
+			sizeof(ipa_wigig_ctx->pipes.smmu[idx]));
+	} else {
+		memset(ipa_wigig_ctx->pipes.flat + idx, 0,
+			sizeof(ipa_wigig_ctx->pipes.flat[idx]));
+	}
+
+	IPA_WIGIG_DBG("exit\n");
+
+	return 0;
+}
+
+static int ipa_wigig_clone_sg_table(struct sg_table *source,
+	struct sg_table *dst)
+{
+	struct scatterlist *next, *s, *sglist;
+	int i, nents = source->nents;
+
+	if (sg_alloc_table(dst, nents, GFP_KERNEL))
+		return -EINVAL;
+	next = dst->sgl;
+	sglist = source->sgl;
+	for_each_sg(sglist, s, nents, i) {
+		*next = *s;
+		next = sg_next(next);
+	}
+
+	dst->nents = nents;
+	dst->orig_nents = source->orig_nents;
+
+	return 0;
+}
+
+static int ipa_wigig_store_pipe_smmu_info
+	(struct ipa_wigig_pipe_setup_info_smmu *pipe_smmu, unsigned int idx)
+{
+	int ret;
+
+	IPA_WIGIG_DBG(
+		"idx %d: desc_ring HWHEAD_pa %pa, HWTAIL_pa %pa, status_ring HWHEAD_pa %pa, HWTAIL_pa %pa, desc_ring_base 0x%llx, status_ring_base 0x%llx\n",
+		idx,
+		&pipe_smmu->desc_ring_HWHEAD_pa,
+		&pipe_smmu->desc_ring_HWTAIL_pa,
+		&pipe_smmu->status_ring_HWHEAD_pa,
+		&pipe_smmu->status_ring_HWTAIL_pa,
+		(unsigned long long)pipe_smmu->desc_ring_base_iova,
+		(unsigned long long)pipe_smmu->status_ring_base_iova);
+
+	/* store regs */
+	ipa_wigig_ctx->pipes.smmu[idx].desc_ring_HWHEAD_pa =
+		pipe_smmu->desc_ring_HWHEAD_pa;
+	ipa_wigig_ctx->pipes.smmu[idx].desc_ring_HWTAIL_pa =
+		pipe_smmu->desc_ring_HWTAIL_pa;
+
+	ipa_wigig_ctx->pipes.smmu[idx].status_ring_HWHEAD_pa =
+		pipe_smmu->status_ring_HWHEAD_pa;
+	ipa_wigig_ctx->pipes.smmu[idx].status_ring_HWTAIL_pa =
+		pipe_smmu->status_ring_HWTAIL_pa;
+
+	/* store rings IOVAs */
+	ipa_wigig_ctx->pipes.smmu[idx].desc_ring_base_iova =
+		pipe_smmu->desc_ring_base_iova;
+	ipa_wigig_ctx->pipes.smmu[idx].status_ring_base_iova =
+		pipe_smmu->status_ring_base_iova;
+
+	/* copy sgt */
+	ret = ipa_wigig_clone_sg_table(
+		&pipe_smmu->desc_ring_base,
+		&ipa_wigig_ctx->pipes.smmu[idx].desc_ring_base);
+	if (ret)
+		goto fail_desc;
+
+	ret = ipa_wigig_clone_sg_table(
+		&pipe_smmu->status_ring_base,
+		&ipa_wigig_ctx->pipes.smmu[idx].status_ring_base);
+	if (ret)
+		goto fail_stat;
+
+	IPA_WIGIG_DBG("exit\n");
+
+	return 0;
+fail_stat:
+	sg_free_table(&ipa_wigig_ctx->pipes.smmu[idx].desc_ring_base);
+	memset(&ipa_wigig_ctx->pipes.smmu[idx].desc_ring_base, 0,
+	       sizeof(ipa_wigig_ctx->pipes.smmu[idx].desc_ring_base));
+fail_desc:
+	return ret;
+}
+
+static int ipa_wigig_get_pipe_smmu_info(
+	struct ipa_wigig_pipe_setup_info_smmu **pipe_smmu, unsigned int idx)
+{
+	if (idx >= IPA_WIGIG_MAX_PIPES) {
+		IPA_WIGIG_ERR("exceeded pipe num %d > %d\n",
+			idx, IPA_WIGIG_MAX_PIPES);
+		return -EINVAL;
+	}
+
+	*pipe_smmu = &ipa_wigig_ctx->pipes.smmu[idx];
+
+	return 0;
+}
+
+static int ipa_wigig_get_pipe_info(
+	struct ipa_wigig_pipe_setup_info **pipe, unsigned int idx)
+{
+	if (idx >= IPA_WIGIG_MAX_PIPES) {
+		IPA_WIGIG_ERR("exceeded pipe num %d >= %d\n", idx,
+			IPA_WIGIG_MAX_PIPES);
+		return -EINVAL;
+	}
+
+	*pipe = &ipa_wigig_ctx->pipes.flat[idx];
+
+	return 0;
+}
+
+static int ipa_wigig_get_regs_addr(
+	void __iomem **desc_ring_h, void __iomem **desc_ring_t,
+	void __iomem **status_ring_h, void __iomem **status_ring_t,
+	unsigned int idx)
+{
+	struct ipa_wigig_pipe_setup_info *pipe;
+	struct ipa_wigig_pipe_setup_info_smmu *pipe_smmu;
+	int ret = 0;
+
+	IPA_WIGIG_DBG("\n");
+
+	if (idx >= IPA_WIGIG_MAX_PIPES) {
+		IPA_WIGIG_DBG("exceeded pipe num %d >= %d\n", idx,
+			IPA_WIGIG_MAX_PIPES);
+		return -EINVAL;
+	}
+
+	if (!ipa_wigig_ctx) {
+		IPA_WIGIG_DBG("wigig ctx is not initialized\n");
+		return -EPERM;
+	}
+
+	if (!(ipa_wigig_ctx->conn_pipes &
+		ipa_wigig_pipe_to_bit_val(IPA_CLIENT_WIGIG_PROD))) {
+		IPA_WIGIG_DBG(
+			"must connect rx pipe before connecting any client\n");
+		return -EINVAL;
+	}
+
+	if (ipa_wigig_ctx->smmu_en) {
+		ret = ipa_wigig_get_pipe_smmu_info(&pipe_smmu, idx);
+		if (ret)
+			return -EINVAL;
+
+		*desc_ring_h =
+			ioremap(pipe_smmu->desc_ring_HWHEAD_pa, sizeof(u32));
+		if (!*desc_ring_h) {
+			IPA_WIGIG_DBG(
+				"couldn't ioremap desc ring head address\n");
+			ret = -EINVAL;
+			goto fail_map_desc_h;
+		}
+		*desc_ring_t =
+			ioremap(pipe_smmu->desc_ring_HWTAIL_pa, sizeof(u32));
+		if (!*desc_ring_t) {
+			IPA_WIGIG_DBG(
+				"couldn't ioremap desc ring tail address\n");
+			ret = -EINVAL;
+			goto fail_map_desc_t;
+		}
+		*status_ring_h =
+			ioremap(pipe_smmu->status_ring_HWHEAD_pa, sizeof(u32));
+		if (!*status_ring_h) {
+			IPA_WIGIG_DBG(
+				"couldn't ioremap status ring head address\n");
+			ret = -EINVAL;
+			goto fail_map_status_h;
+		}
+		*status_ring_t =
+			ioremap(pipe_smmu->status_ring_HWTAIL_pa, sizeof(u32));
+		if (!*status_ring_t) {
+			IPA_WIGIG_DBG(
+				"couldn't ioremap status ring tail address\n");
+			ret = -EINVAL;
+			goto fail_map_status_t;
+		}
+	} else {
+		ret = ipa_wigig_get_pipe_info(&pipe, idx);
+		if (ret)
+			return -EINVAL;
+
+		*desc_ring_h = ioremap(pipe->desc_ring_HWHEAD_pa, sizeof(u32));
+		if (!*desc_ring_h) {
+			IPA_WIGIG_DBG(
+				"couldn't ioremap desc ring head address\n");
+			ret = -EINVAL;
+			goto fail_map_desc_h;
+		}
+		*desc_ring_t = ioremap(pipe->desc_ring_HWTAIL_pa, sizeof(u32));
+		if (!*desc_ring_t) {
+			IPA_WIGIG_DBG(
+				"couldn't ioremap desc ring tail address\n");
+			ret = -EINVAL;
+			goto fail_map_desc_t;
+		}
+		*status_ring_h =
+			ioremap(pipe->status_ring_HWHEAD_pa, sizeof(u32));
+		if (!*status_ring_h) {
+			IPA_WIGIG_DBG(
+				"couldn't ioremap status ring head address\n");
+			ret = -EINVAL;
+			goto fail_map_status_h;
+		}
+		*status_ring_t =
+			ioremap(pipe->status_ring_HWTAIL_pa, sizeof(u32));
+		if (!*status_ring_t) {
+			IPA_WIGIG_DBG(
+				"couldn't ioremap status ring tail address\n");
+			ret = -EINVAL;
+			goto fail_map_status_t;
+		}
+	}
+
+	IPA_WIGIG_DBG("exit\n");
+	return 0;
+
+fail_map_status_t:
+	iounmap(*status_ring_h);
+fail_map_status_h:
+	iounmap(*desc_ring_t);
+fail_map_desc_t:
+	iounmap(*desc_ring_h);
+fail_map_desc_h:
+	IPA_WIGIG_DBG("couldn't get regs information idx %d\n", idx);
+	return ret;
+}
+
+int ipa_wigig_save_regs(void)
+{
+	void __iomem *desc_ring_h = NULL, *desc_ring_t = NULL,
+		*status_ring_h = NULL, *status_ring_t = NULL,
+		*int_gen_rx_pa = NULL, *int_gen_tx_pa = NULL;
+	uint32_t readval;
+	u8 pipe_connected;
+	int i, ret = 0;
+
+	IPA_WIGIG_DBG("Start collecting pipes information\n");
+
+	if (!ipa_wigig_ctx) {
+		IPA_WIGIG_ERR("wigig ctx is not initialized\n");
+		return -EPERM;
+	}
+	if (!(ipa_wigig_ctx->conn_pipes &
+		ipa_wigig_pipe_to_bit_val(IPA_CLIENT_WIGIG_PROD))) {
+		IPA_WIGIG_ERR(
+			"must connect rx pipe before connecting any client\n");
+		return -EINVAL;
+	}
+
+	for (i = 0; i < IPA_WIGIG_MAX_PIPES; i++) {
+		pipe_connected = (ipa_wigig_ctx->conn_pipes & (0x1 << i));
+		if (pipe_connected) {
+			uint32_t mask;
+			uint8_t shift;
+
+			ret = ipa_wigig_get_regs_addr(
+				&desc_ring_h, &desc_ring_t,
+				&status_ring_h, &status_ring_t, i);
+
+			if (ret) {
+				IPA_WIGIG_ERR(
+					"couldn't get registers information on client %d\n",
+					i);
+				return -EINVAL;
+			}
+
+			IPA_WIGIG_DBG("collecting pipe info of index %d\n", i);
+			if (i == IPA_CLIENT_WIGIG_PROD_IDX) {
+				ipa_wigig_ctx->regs_save.pipes_val[i].dir = 0;
+			} else {
+				ipa_wigig_ctx->regs_save.pipes_val[i].dir = 1;
+				/* TX ids start from 2 */
+				ipa_wigig_ctx->regs_save.pipes_val[i]
+					.tx_ring_id = i + 1;
+			}
+
+			readval = readl_relaxed(desc_ring_h);
+			ipa_wigig_ctx->regs_save.pipes_val[i].desc_ring_HWHEAD =
+				readval;
+			/* HWHEAD LSbs are for even IDs, MSbs for odd IDs */
+			if (i != IPA_CLIENT_WIGIG_PROD_IDX) {
+				mask = 0xFFFF0000;
+				shift = 16;
+
+				if ((ipa_wigig_ctx->regs_save.pipes_val[i]
+					.tx_ring_id % 2) == 0) {
+					mask = 0x0000FFFF;
+					shift = 0;
+				}
+				ipa_wigig_ctx->regs_save.pipes_val[i]
+					.desc_ring_HWHEAD_masked =
+					(readval & mask) >> shift;
+			}
+			readval = readl_relaxed(desc_ring_t);
+			ipa_wigig_ctx->regs_save.pipes_val[i].desc_ring_HWTAIL =
+				readval;
+			readval = readl_relaxed(status_ring_h);
+			ipa_wigig_ctx->regs_save.pipes_val[i]
+				.status_ring_HWHEAD = readval;
+			/* two status rings, MSbs for RX LSbs for TX */
+			if (i == IPA_CLIENT_WIGIG_PROD_IDX) {
+				mask = 0xFFFF0000;
+				shift = 16;
+			} else {
+				mask = 0x0000FFFF;
+				shift = 0;
+			}
+			ipa_wigig_ctx->regs_save.pipes_val[i]
+				.status_ring_HWHEAD_masked =
+				(readval & mask) >> shift;
+
+			readval = readl_relaxed(status_ring_t);
+			ipa_wigig_ctx->regs_save.pipes_val[i]
+				.status_ring_HWTAIL = readval;
+			/* unmap all regs */
+			iounmap(desc_ring_h);
+			iounmap(desc_ring_t);
+			iounmap(status_ring_h);
+			iounmap(status_ring_t);
+		}
+	}
+	int_gen_rx_pa = ioremap(ipa_wigig_ctx->int_gen_rx_pa, sizeof(u32));
+	if (!int_gen_rx_pa) {
+		IPA_WIGIG_ERR("couldn't ioremap gen rx address\n");
+		ret = -EINVAL;
+		goto fail_map_gen_rx;
+	}
+	int_gen_tx_pa = ioremap(ipa_wigig_ctx->int_gen_tx_pa, sizeof(u32));
+	if (!int_gen_tx_pa) {
+		IPA_WIGIG_ERR("couldn't ioremap gen tx address\n");
+		ret = -EINVAL;
+		goto fail_map_gen_tx;
+	}
+
+	IPA_WIGIG_DBG("collecting int_gen_rx_pa info\n");
+	readval = readl_relaxed(int_gen_rx_pa);
+	ipa_wigig_ctx->regs_save.int_gen_rx_val = readval;
+
+	IPA_WIGIG_DBG("collecting int_gen_tx_pa info\n");
+	readval = readl_relaxed(int_gen_tx_pa);
+	ipa_wigig_ctx->regs_save.int_gen_tx_val = readval;
+
+	IPA_WIGIG_DBG("Finish collecting pipes info\n");
+	IPA_WIGIG_DBG("exit\n");
+
+	iounmap(int_gen_tx_pa);
+fail_map_gen_tx:
+	iounmap(int_gen_rx_pa);
+fail_map_gen_rx:
+	return ret;
+}
+
+static void ipa_wigig_clean_rx_buff_smmu_info(void)
+{
+	IPA_WIGIG_DBG("clearing rx buff smmu info\n");
+
+	sg_free_table(&ipa_wigig_ctx->rx_buff_smmu.data_buffer_base);
+	memset(&ipa_wigig_ctx->rx_buff_smmu,
+		0,
+		sizeof(ipa_wigig_ctx->rx_buff_smmu));
+
+	IPA_WIGIG_DBG("\n");
+}
+
+static int ipa_wigig_store_rx_buff_smmu_info(
+	struct ipa_wigig_rx_pipe_data_buffer_info_smmu *dbuff_smmu)
+{
+	IPA_WIGIG_DBG("\n");
+	if (ipa_wigig_clone_sg_table(&dbuff_smmu->data_buffer_base,
+		&ipa_wigig_ctx->rx_buff_smmu.data_buffer_base))
+		return -EINVAL;
+
+	ipa_wigig_ctx->rx_buff_smmu.data_buffer_base_iova =
+		dbuff_smmu->data_buffer_base_iova;
+	ipa_wigig_ctx->rx_buff_smmu.data_buffer_size =
+		dbuff_smmu->data_buffer_size;
+
+	IPA_WIGIG_DBG("exit\n");
+
+	return 0;
+}
+
+static int ipa_wigig_get_rx_buff_smmu_info(
+	struct ipa_wigig_rx_pipe_data_buffer_info_smmu **dbuff_smmu)
+{
+	IPA_WIGIG_DBG("\n");
+
+	*dbuff_smmu = &ipa_wigig_ctx->rx_buff_smmu;
+
+	IPA_WIGIG_DBG("exit\n");
+
+	return 0;
+}
+
+static int ipa_wigig_store_tx_buff_smmu_info(
+	struct ipa_wigig_tx_pipe_data_buffer_info_smmu *dbuff_smmu,
+	unsigned int idx)
+{
+	int result, i;
+	struct ipa_wigig_tx_pipe_data_buffer_info_smmu *tx_buff_smmu;
+
+	IPA_WIGIG_DBG("\n");
+
+	if (idx > (IPA_WIGIG_TX_PIPE_NUM - 1)) {
+		IPA_WIGIG_ERR("invalid tx index %d\n", idx);
+		return -EINVAL;
+	}
+
+	tx_buff_smmu = ipa_wigig_ctx->tx_buff_smmu + idx;
+
+	tx_buff_smmu->data_buffer_base =
+		kcalloc(dbuff_smmu->num_buffers,
+			sizeof(struct sg_table),
+			GFP_KERNEL);
+	if (!tx_buff_smmu->data_buffer_base)
+		return -ENOMEM;
+
+	tx_buff_smmu->data_buffer_base_iova =
+		kcalloc(dbuff_smmu->num_buffers, sizeof(u64), GFP_KERNEL);
+	if (!tx_buff_smmu->data_buffer_base_iova) {
+		result = -ENOMEM;
+		goto fail_iova;
+	}
+
+	for (i = 0; i < dbuff_smmu->num_buffers; i++) {
+		result = ipa_wigig_clone_sg_table(
+			dbuff_smmu->data_buffer_base + i,
+			tx_buff_smmu->data_buffer_base + i);
+		if (result)
+			goto fail_sg_clone;
+
+		tx_buff_smmu->data_buffer_base_iova[i] =
+			dbuff_smmu->data_buffer_base_iova[i];
+	}
+	tx_buff_smmu->num_buffers = dbuff_smmu->num_buffers;
+	tx_buff_smmu->data_buffer_size =
+		dbuff_smmu->data_buffer_size;
+
+	IPA_WIGIG_DBG("exit\n");
+
+	return 0;
+fail_sg_clone:
+	i--;
+	for (; i >= 0; i--)
+		sg_free_table(tx_buff_smmu->data_buffer_base + i);
+	kfree(tx_buff_smmu->data_buffer_base_iova);
+	tx_buff_smmu->data_buffer_base_iova = NULL;
+fail_iova:
+	kfree(tx_buff_smmu->data_buffer_base);
+	tx_buff_smmu->data_buffer_base = NULL;
+	return result;
+}
+
+static int ipa_wigig_clean_tx_buff_smmu_info(unsigned int idx)
+{
+	unsigned int i;
+	struct ipa_wigig_tx_pipe_data_buffer_info_smmu *dbuff_smmu;
+
+	IPA_WIGIG_DBG("\n");
+
+	if (idx > (IPA_WIGIG_TX_PIPE_NUM - 1)) {
+		IPA_WIGIG_ERR("invalid tx index %d\n", idx);
+		return -EINVAL;
+	}
+
+	dbuff_smmu = &ipa_wigig_ctx->tx_buff_smmu[idx];
+
+	if (!dbuff_smmu->data_buffer_base) {
+		IPA_WIGIG_ERR("no pa has been allocated\n");
+		return -EFAULT;
+	}
+
+	for (i = 0; i < dbuff_smmu->num_buffers; i++)
+		sg_free_table(dbuff_smmu->data_buffer_base + i);
+
+	kfree(dbuff_smmu->data_buffer_base);
+	dbuff_smmu->data_buffer_base = NULL;
+
+	kfree(dbuff_smmu->data_buffer_base_iova);
+	dbuff_smmu->data_buffer_base_iova = NULL;
+
+	dbuff_smmu->data_buffer_size = 0;
+	dbuff_smmu->num_buffers = 0;
+
+	IPA_WIGIG_DBG("exit\n");
+
+	return 0;
+}
+
+static int ipa_wigig_get_tx_buff_smmu_info(
+struct ipa_wigig_tx_pipe_data_buffer_info_smmu **dbuff_smmu,
+	unsigned int idx)
+{
+	if (idx > (IPA_WIGIG_TX_PIPE_NUM - 1)) {
+		IPA_WIGIG_ERR("invalid tx index %d\n", idx);
+		return -EINVAL;
+	}
+
+	*dbuff_smmu = &ipa_wigig_ctx->tx_buff_smmu[idx];
+
+	return 0;
+}
+
+static int ipa_wigig_store_rx_smmu_info
+	(struct ipa_wigig_conn_rx_in_params_smmu *in)
+{
+	int ret;
+
+	IPA_WIGIG_DBG("\n");
+
+	ret = ipa_wigig_store_pipe_smmu_info(&in->pipe_smmu,
+		IPA_CLIENT_WIGIG_PROD_IDX);
+	if (ret)
+		return ret;
+
+	if (!ipa_wigig_ctx->shared_cb) {
+		ret = ipa_wigig_store_rx_buff_smmu_info(&in->dbuff_smmu);
+		if (ret)
+			goto fail_buff;
+	}
+
+	IPA_WIGIG_DBG("exit\n");
+
+	return 0;
+
+fail_buff:
+	ipa_wigig_clean_pipe_info(IPA_CLIENT_WIGIG_PROD_IDX);
+	return ret;
+}
+
+static int ipa_wigig_store_client_smmu_info
+(struct ipa_wigig_conn_tx_in_params_smmu *in, enum ipa_client_type client)
+{
+	int ret;
+	unsigned int idx;
+
+	IPA_WIGIG_DBG("\n");
+
+	ret = ipa_wigig_client_to_idx(client, &idx);
+	if (ret)
+		return ret;
+
+	ret = ipa_wigig_store_pipe_smmu_info(&in->pipe_smmu, idx);
+	if (ret)
+		return ret;
+
+	if (!ipa_wigig_ctx->shared_cb) {
+		ret = ipa_wigig_store_tx_buff_smmu_info(
+			&in->dbuff_smmu, idx - 1);
+		if (ret)
+			goto fail_buff;
+	}
+
+	IPA_WIGIG_DBG("exit\n");
+
+	return 0;
+
+fail_buff:
+	ipa_wigig_clean_pipe_info(IPA_CLIENT_WIGIG_PROD_IDX);
+	return ret;
+}
+
+static int ipa_wigig_get_rx_smmu_info(
+	struct ipa_wigig_pipe_setup_info_smmu **pipe_smmu,
+	struct ipa_wigig_rx_pipe_data_buffer_info_smmu **dbuff_smmu)
+{
+	int ret;
+
+	ret = ipa_wigig_get_pipe_smmu_info(pipe_smmu,
+		IPA_CLIENT_WIGIG_PROD_IDX);
+	if (ret)
+		return ret;
+
+	ret = ipa_wigig_get_rx_buff_smmu_info(dbuff_smmu);
+	if (ret)
+		return ret;
+
+	return 0;
+}
+
+static int ipa_wigig_get_tx_smmu_info(
+	struct ipa_wigig_pipe_setup_info_smmu **pipe_smmu,
+	struct ipa_wigig_tx_pipe_data_buffer_info_smmu **dbuff_smmu,
+	enum ipa_client_type client)
+{
+	unsigned int idx;
+	int ret;
+
+	ret = ipa_wigig_client_to_idx(client, &idx);
+	if (ret)
+		return ret;
+
+	ret = ipa_wigig_get_pipe_smmu_info(pipe_smmu, idx);
+	if (ret)
+		return ret;
+
+	ret = ipa_wigig_get_tx_buff_smmu_info(dbuff_smmu, idx - 1);
+	if (ret)
+		return ret;
+
+	return 0;
+}
+
+static int ipa_wigig_clean_smmu_info(enum ipa_client_type client)
+{
+	int ret;
+
+	if (client == IPA_CLIENT_WIGIG_PROD) {
+		ret = ipa_wigig_clean_pipe_info(IPA_CLIENT_WIGIG_PROD_IDX);
+		if (ret)
+			return ret;
+		if (!ipa_wigig_ctx->shared_cb)
+			ipa_wigig_clean_rx_buff_smmu_info();
+	} else {
+		unsigned int idx;
+
+		ret = ipa_wigig_client_to_idx(client, &idx);
+		if (ret)
+			return ret;
+
+		ret = ipa_wigig_clean_pipe_info(idx);
+		if (ret)
+			return ret;
+
+		if (!ipa_wigig_ctx->shared_cb) {
+			ret = ipa_wigig_clean_tx_buff_smmu_info(idx - 1);
+			if (ret) {
+				IPA_WIGIG_ERR(
+					"cleaned tx pipe info but wasn't able to clean buff info, client %d\n"
+					, client);
+				WARN_ON(1);
+				return ret;
+			}
+		}
+	}
+
+	return 0;
+}
+int ipa_wigig_conn_rx_pipe_smmu(
+	struct ipa_wigig_conn_rx_in_params_smmu *in,
+	struct ipa_wigig_conn_out_params *out)
+{
+	int ret;
+	struct ipa_pm_register_params pm_params;
+
+	IPA_WIGIG_DBG("\n");
+
+	if (!in || !out) {
+		IPA_WIGIG_ERR("empty parameters. in=%pK out=%pK\n", in, out);
+		return -EINVAL;
+	}
+
+	if (!ipa_wigig_ctx) {
+		IPA_WIGIG_ERR("wigig ctx is not initialized\n");
+		return -EPERM;
+	}
+
+	ret = ipa_uc_state_check();
+	if (ret) {
+		IPA_WIGIG_ERR("uC not ready\n");
+		return ret;
+	}
+
+	if (ipa_wigig_init_smmu_params())
+		return -EINVAL;
+
+	if (!ipa_wigig_ctx->smmu_en) {
+		IPA_WIGIG_ERR("IPA SMMU is disabled, wrong API used\n");
+		return -EFAULT;
+	}
+
+	memset(&pm_params, 0, sizeof(pm_params));
+	pm_params.name = "wigig";
+	pm_params.callback = ipa_wigig_pm_cb;
+	pm_params.user_data = NULL;
+	pm_params.group = IPA_PM_GROUP_DEFAULT;
+	if (ipa_pm_register(&pm_params, &ipa_wigig_ctx->ipa_pm_hdl)) {
+		IPA_WIGIG_ERR("fail to register ipa pm\n");
+		ret = -EFAULT;
+		goto fail_pm;
+	}
+
+	ret = ipa_wigig_uc_msi_init(true,
+		ipa_wigig_ctx->periph_baddr_pa,
+		ipa_wigig_ctx->pseudo_cause_pa,
+		ipa_wigig_ctx->int_gen_tx_pa,
+		ipa_wigig_ctx->int_gen_rx_pa,
+		ipa_wigig_ctx->dma_ep_misc_pa);
+	if (ret) {
+		IPA_WIGIG_ERR("failed configuring msi regs at uC\n");
+		ret = -EFAULT;
+		goto fail_msi;
+	}
+
+	if (ipa_conn_wigig_rx_pipe_i(in, out, &ipa_wigig_ctx->parent)) {
+		IPA_WIGIG_ERR("fail to connect rx pipe\n");
+		ret = -EFAULT;
+		goto fail_connect_pipe;
+	}
+
+	if (ipa_wigig_ctx->parent)
+		ipa_wigig_init_debugfs(ipa_wigig_ctx->parent);
+
+	if (ipa_wigig_store_rx_smmu_info(in)) {
+		IPA_WIGIG_ERR("fail to store smmu data for rx pipe\n");
+		ret = -EFAULT;
+		goto fail_smmu_store;
+	}
+
+	ipa_wigig_ctx->tx_notify = in->notify;
+	ipa_wigig_ctx->priv = in->priv;
+
+	ipa_wigig_ctx->conn_pipes |=
+		ipa_wigig_pipe_to_bit_val(IPA_CLIENT_WIGIG_PROD);
+
+	IPA_WIGIG_DBG("exit\n");
+
+	return 0;
+
+fail_smmu_store:
+	ipa_disconn_wigig_pipe_i(IPA_CLIENT_WIGIG_PROD,
+		&in->pipe_smmu,
+		&in->dbuff_smmu);
+fail_connect_pipe:
+	ipa_wigig_uc_msi_init(false,
+		ipa_wigig_ctx->periph_baddr_pa,
+		ipa_wigig_ctx->pseudo_cause_pa,
+		ipa_wigig_ctx->int_gen_tx_pa,
+		ipa_wigig_ctx->int_gen_rx_pa,
+		ipa_wigig_ctx->dma_ep_misc_pa);
+fail_msi:
+	ipa_pm_deregister(ipa_wigig_ctx->ipa_pm_hdl);
+fail_pm:
+	return ret;
+}
+EXPORT_SYMBOL(ipa_wigig_conn_rx_pipe_smmu);
+
+int ipa_wigig_set_perf_profile(u32 max_supported_bw_mbps)
+{
+	IPA_WIGIG_DBG("setting throughput to %d\n", max_supported_bw_mbps);
+
+	if (!ipa_wigig_ctx) {
+		IPA_WIGIG_ERR("wigig ctx is not initialized\n");
+		return -EPERM;
+	}
+
+	IPA_WIGIG_DBG("ipa_pm handle %d\n", ipa_wigig_ctx->ipa_pm_hdl);
+	if (ipa_pm_set_throughput(ipa_wigig_ctx->ipa_pm_hdl,
+		max_supported_bw_mbps)) {
+		IPA_WIGIG_ERR("fail to setup pm perf profile\n");
+		return -EFAULT;
+	}
+	IPA_WIGIG_DBG("exit\n");
+
+	return 0;
+}
+EXPORT_SYMBOL(ipa_wigig_set_perf_profile);
+
+static int ipa_wigig_store_client_mac(enum ipa_client_type client,
+	const char *mac)
+{
+	unsigned int idx;
+
+	if (ipa_wigig_client_to_idx(client, &idx)) {
+		IPA_WIGIG_ERR("couldn't acquire idx\n");
+		return -EFAULT;
+	}
+	memcpy(ipa_wigig_ctx->clients_mac[idx - 1], mac, IPA_MAC_ADDR_SIZE);
+	return 0;
+}
+
+static int ipa_wigig_get_client_mac(enum ipa_client_type client, char *mac)
+{
+	unsigned int idx;
+
+	if (ipa_wigig_client_to_idx(client, &idx)) {
+		IPA_WIGIG_ERR("couldn't acquire idx\n");
+		return -EFAULT;
+	}
+	memcpy(mac, ipa_wigig_ctx->clients_mac[idx - 1], IPA_MAC_ADDR_SIZE);
+	return 0;
+}
+
+static int ipa_wigig_clean_client_mac(enum ipa_client_type client)
+{
+	char zero_mac[IPA_MAC_ADDR_SIZE] = { 0 };
+
+	return ipa_wigig_store_client_mac(client, zero_mac);
+}
+
+int ipa_wigig_conn_client(struct ipa_wigig_conn_tx_in_params *in,
+	struct ipa_wigig_conn_out_params *out)
+{
+	char dev_name[IPA_RESOURCE_NAME_MAX];
+	unsigned int idx;
+
+	IPA_WIGIG_DBG("\n");
+
+	if (!in || !out) {
+		IPA_WIGIG_ERR("empty parameters. in=%pK out=%pK\n", in, out);
+		return -EINVAL;
+	}
+
+	if (!ipa_wigig_ctx) {
+		IPA_WIGIG_ERR("wigig ctx is not initialized\n");
+		return -EPERM;
+	}
+
+	if (!(ipa_wigig_ctx->conn_pipes &
+		ipa_wigig_pipe_to_bit_val(IPA_CLIENT_WIGIG_PROD))) {
+		IPA_WIGIG_ERR(
+			"must connect rx pipe before connecting any client\n"
+		);
+		return -EINVAL;
+	}
+
+	if (ipa_wigig_ctx->smmu_en) {
+		IPA_WIGIG_ERR("IPA SMMU is enabled, wrong API used\n");
+		return -EFAULT;
+	}
+
+	if (ipa_uc_state_check()) {
+		IPA_WIGIG_ERR("uC not ready\n");
+		return -EFAULT;
+	}
+
+	if (ipa_wigig_get_devname(dev_name)) {
+		IPA_WIGIG_ERR("couldn't get dev name\n");
+		return -EFAULT;
+	}
+
+	if (ipa_conn_wigig_client_i(in, out, ipa_wigig_ctx->tx_notify,
+		ipa_wigig_ctx->priv)) {
+		IPA_WIGIG_ERR(
+			"fail to connect client. MAC [%X][%X][%X][%X][%X][%X]\n"
+		, in->client_mac[0], in->client_mac[1], in->client_mac[2]
+		, in->client_mac[3], in->client_mac[4], in->client_mac[5]);
+		return -EFAULT;
+	}
+
+	if (ipa_wigig_client_to_idx(out->client, &idx)) {
+		IPA_WIGIG_ERR("couldn't acquire idx\n");
+		goto fail_convert_client_to_idx;
+	}
+
+	ipa_wigig_store_pipe_info(&in->pipe, idx);
+
+	if (ipa_wigig_send_msg(WIGIG_CLIENT_CONNECT,
+		dev_name,
+		in->client_mac, out->client, false)) {
+		IPA_WIGIG_ERR("couldn't send msg to IPACM\n");
+		goto fail_sendmsg;
+	}
+
+	/* update connected clients */
+	ipa_wigig_ctx->conn_pipes |=
+		ipa_wigig_pipe_to_bit_val(out->client);
+
+	ipa_wigig_store_client_mac(out->client, in->client_mac);
+
+	IPA_WIGIG_DBG("exit\n");
+	return 0;
+
+fail_sendmsg:
+	ipa_wigig_clean_pipe_info(idx);
+fail_convert_client_to_idx:
+	ipa_disconn_wigig_pipe_i(out->client, NULL, NULL);
+	return -EINVAL;
+}
+EXPORT_SYMBOL(ipa_wigig_conn_client);
+
+int ipa_wigig_conn_client_smmu(
+	struct ipa_wigig_conn_tx_in_params_smmu *in,
+	struct ipa_wigig_conn_out_params *out)
+{
+	char netdev_name[IPA_RESOURCE_NAME_MAX];
+	int ret;
+
+	IPA_WIGIG_DBG("\n");
+
+	if (!in || !out) {
+		IPA_WIGIG_ERR("empty parameters. in=%pK out=%pK\n", in, out);
+		return -EINVAL;
+	}
+
+	if (!ipa_wigig_ctx) {
+		IPA_WIGIG_ERR("wigig ctx is not initialized\n");
+		return -EPERM;
+	}
+
+	if (!(ipa_wigig_ctx->conn_pipes &
+		ipa_wigig_pipe_to_bit_val(IPA_CLIENT_WIGIG_PROD))) {
+		IPA_WIGIG_ERR(
+			"must connect rx pipe before connecting any client\n"
+		);
+		return -EINVAL;
+	}
+
+	if (!ipa_wigig_ctx->smmu_en) {
+		IPA_WIGIG_ERR("IPA SMMU is disabled, wrong API used\n");
+		return -EFAULT;
+	}
+
+	ret = ipa_uc_state_check();
+	if (ret) {
+		IPA_WIGIG_ERR("uC not ready\n");
+		return ret;
+	}
+
+	if (ipa_wigig_get_devname(netdev_name)) {
+		IPA_WIGIG_ERR("couldn't get dev name\n");
+		return -EFAULT;
+	}
+
+	if (ipa_conn_wigig_client_i(in, out, ipa_wigig_ctx->tx_notify,
+		ipa_wigig_ctx->priv)) {
+		IPA_WIGIG_ERR(
+			"fail to connect client. MAC [%X][%X][%X][%X][%X][%X]\n"
+			, in->client_mac[0], in->client_mac[1]
+			, in->client_mac[2], in->client_mac[3]
+			, in->client_mac[4], in->client_mac[5]);
+		return -EFAULT;
+	}
+
+	if (ipa_wigig_send_msg(WIGIG_CLIENT_CONNECT,
+		netdev_name,
+		in->client_mac, out->client, false)) {
+		IPA_WIGIG_ERR("couldn't send msg to IPACM\n");
+		ret = -EFAULT;
+		goto fail_sendmsg;
+	}
+
+	ret = ipa_wigig_store_client_smmu_info(in, out->client);
+	if (ret)
+		goto fail_smmu;
+
+	/* update connected clients */
+	ipa_wigig_ctx->conn_pipes |=
+		ipa_wigig_pipe_to_bit_val(out->client);
+
+	ipa_wigig_store_client_mac(out->client, in->client_mac);
+
+	IPA_WIGIG_DBG("exit\n");
+	return 0;
+
+fail_smmu:
+	/*
+	 * wigig clients are disconnected with legacy message since there is
+	 * no need to send ep, client MAC is sufficient for disconnect
+	 */
+	ipa_wigig_send_wlan_msg(WLAN_CLIENT_DISCONNECT, netdev_name,
+		in->client_mac);
+fail_sendmsg:
+	ipa_disconn_wigig_pipe_i(out->client, &in->pipe_smmu, &in->dbuff_smmu);
+	return ret;
+}
+EXPORT_SYMBOL(ipa_wigig_conn_client_smmu);
+
+static inline int ipa_wigig_validate_client_type(enum ipa_client_type client)
+{
+	switch (client) {
+	case IPA_CLIENT_WIGIG_PROD:
+	case IPA_CLIENT_WIGIG1_CONS:
+	case IPA_CLIENT_WIGIG2_CONS:
+	case IPA_CLIENT_WIGIG3_CONS:
+	case IPA_CLIENT_WIGIG4_CONS:
+		break;
+	default:
+		IPA_WIGIG_ERR_RL("invalid client type %d\n", client);
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+int ipa_wigig_disconn_pipe(enum ipa_client_type client)
+{
+	int ret;
+	char dev_name[IPA_RESOURCE_NAME_MAX];
+	char client_mac[IPA_MAC_ADDR_SIZE];
+
+	IPA_WIGIG_DBG("\n");
+
+	ret = ipa_wigig_validate_client_type(client);
+	if (ret)
+		return ret;
+
+	if (client != IPA_CLIENT_WIGIG_PROD) {
+		if (ipa_wigig_get_devname(dev_name)) {
+			IPA_WIGIG_ERR("couldn't get dev name\n");
+			return -EFAULT;
+		}
+
+		if (ipa_wigig_get_client_mac(client, client_mac)) {
+			IPA_WIGIG_ERR("couldn't get client mac\n");
+			return -EFAULT;
+		}
+	}
+
+	IPA_WIGIG_DBG("disconnecting ipa_client_type %d\n", client);
+
+	if (ipa_wigig_is_smmu_enabled()) {
+		struct ipa_wigig_pipe_setup_info_smmu *pipe_smmu;
+		struct ipa_wigig_rx_pipe_data_buffer_info_smmu *rx_dbuff_smmu;
+		struct ipa_wigig_tx_pipe_data_buffer_info_smmu *tx_dbuff_smmu;
+
+		if (client == IPA_CLIENT_WIGIG_PROD) {
+			ret = ipa_wigig_get_rx_smmu_info(&pipe_smmu,
+				&rx_dbuff_smmu);
+			if (ret)
+				return ret;
+
+			ret = ipa_disconn_wigig_pipe_i(client,
+				pipe_smmu,
+				rx_dbuff_smmu);
+		} else {
+			ret = ipa_wigig_get_tx_smmu_info(&pipe_smmu,
+				&tx_dbuff_smmu, client);
+			if (ret)
+				return ret;
+
+			ret = ipa_disconn_wigig_pipe_i(client,
+				pipe_smmu,
+				tx_dbuff_smmu);
+		}
+
+	} else {
+		ret = ipa_disconn_wigig_pipe_i(client, NULL, NULL);
+	}
+
+	if (ret) {
+		IPA_WIGIG_ERR("couldn't disconnect client %d\n", client);
+		return ret;
+	}
+
+	/* RX will be disconnected last, deinit uC msi config */
+	if (client == IPA_CLIENT_WIGIG_PROD) {
+		IPA_WIGIG_DBG("Rx pipe disconnected, deIniting uc\n");
+		ret = ipa_wigig_uc_msi_init(false,
+			ipa_wigig_ctx->periph_baddr_pa,
+			ipa_wigig_ctx->pseudo_cause_pa,
+			ipa_wigig_ctx->int_gen_tx_pa,
+			ipa_wigig_ctx->int_gen_rx_pa,
+			ipa_wigig_ctx->dma_ep_misc_pa);
+		if (ret) {
+			IPA_WIGIG_ERR("failed unmapping msi regs\n");
+			WARN_ON(1);
+		}
+
+		ret = ipa_pm_deregister(ipa_wigig_ctx->ipa_pm_hdl);
+		if (ret) {
+			IPA_WIGIG_ERR("failed dereg pm\n");
+			WARN_ON(1);
+		}
+
+		ipa_wigig_ctx->conn_pipes &=
+			~ipa_wigig_pipe_to_bit_val(IPA_CLIENT_WIGIG_PROD);
+		WARN_ON(ipa_wigig_ctx->conn_pipes);
+	} else {
+		/*
+		 * wigig clients are disconnected with legacy message since
+		 * there is no need to send ep, client MAC is sufficient for
+		 * disconnect.
+		 */
+		ipa_wigig_send_wlan_msg(WLAN_CLIENT_DISCONNECT, dev_name,
+			client_mac);
+		ipa_wigig_clean_client_mac(client);
+
+		ipa_wigig_ctx->conn_pipes &=
+			~ipa_wigig_pipe_to_bit_val(client);
+	}
+	if (ipa_wigig_is_smmu_enabled())
+		ipa_wigig_clean_smmu_info(client);
+
+	IPA_WIGIG_DBG("exit\n");
+	return 0;
+}
+EXPORT_SYMBOL(ipa_wigig_disconn_pipe);
+
+int ipa_wigig_enable_pipe(enum ipa_client_type client)
+{
+	int ret;
+
+	IPA_WIGIG_DBG("\n");
+
+	ret = ipa_wigig_validate_client_type(client);
+	if (ret)
+		return ret;
+
+	IPA_WIGIG_DBG("enabling pipe %d\n", client);
+
+	ret = ipa_enable_wigig_pipe_i(client);
+	if (ret)
+		return ret;
+
+	/* do only when Rx pipe is enabled */
+	if (client == IPA_CLIENT_WIGIG_PROD) {
+		ret = ipa_pm_activate_sync(ipa_wigig_ctx->ipa_pm_hdl);
+		if (ret) {
+			IPA_WIGIG_ERR("fail to activate ipa pm\n");
+			ret = -EFAULT;
+			goto fail_pm_active;
+		}
+	}
+
+	IPA_WIGIG_DBG("exit\n");
+	return 0;
+
+fail_pm_active:
+	ipa_disable_wigig_pipe_i(client);
+	return ret;
+}
+EXPORT_SYMBOL(ipa_wigig_enable_pipe);
+
+int ipa_wigig_disable_pipe(enum ipa_client_type client)
+{
+	int ret;
+
+	IPA_WIGIG_DBG("\n");
+
+	ret = ipa_wigig_validate_client_type(client);
+	if (ret)
+		return ret;
+
+	ret = ipa_disable_wigig_pipe_i(client);
+	if (ret)
+		return ret;
+
+	/* do only when Rx pipe is disabled */
+	if (client == IPA_CLIENT_WIGIG_PROD) {
+		ret = ipa_pm_deactivate_sync(ipa_wigig_ctx->ipa_pm_hdl);
+		if (ret) {
+			IPA_WIGIG_ERR("fail to deactivate ipa pm\n");
+			return -EFAULT;
+		}
+	}
+
+	IPA_WIGIG_DBG("exit\n");
+	return 0;
+}
+EXPORT_SYMBOL(ipa_wigig_disable_pipe);
+
+int ipa_wigig_tx_dp(enum ipa_client_type dst, struct sk_buff *skb)
+{
+	int ret;
+
+	IPA_WIGIG_DBG_LOW("\n");
+
+	ret = ipa_wigig_validate_client_type(dst);
+	if (unlikely(ret))
+		return ret;
+
+	ret = ipa_tx_dp(dst, skb, NULL);
+	if (unlikely(ret))
+		return ret;
+
+	IPA_WIGIG_DBG_LOW("exit\n");
+	return 0;
+}
+EXPORT_SYMBOL(ipa_wigig_tx_dp);
+
+
+#ifdef CONFIG_DEBUG_FS
+#define IPA_MAX_MSG_LEN 4096
+
+static ssize_t ipa_wigig_read_conn_clients(struct file *file,
+		char __user *ubuf, size_t count, loff_t *ppos)
+{
+	int i;
+	int nbytes = 0;
+	u8 pipe_connected;
+	char *dbg_buff;
+	ssize_t ret;
+
+	dbg_buff = kzalloc(IPA_MAX_MSG_LEN, GFP_KERNEL);
+	if (!dbg_buff)
+		return -ENOMEM;
+
+	if (!ipa_wigig_ctx) {
+		nbytes += scnprintf(dbg_buff + nbytes,
+			IPA_MAX_MSG_LEN - nbytes,
+			"IPA WIGIG not initialized\n");
+		goto finish;
+	}
+
+	if (!ipa_wigig_ctx->conn_pipes) {
+		nbytes += scnprintf(dbg_buff + nbytes,
+			IPA_MAX_MSG_LEN - nbytes,
+			"no WIGIG pipes connected\n");
+		goto finish;
+	}
+
+	for (i = 0; i < IPA_WIGIG_MAX_PIPES; i++) {
+		pipe_connected = (ipa_wigig_ctx->conn_pipes & (0x1 << i));
+		switch (i) {
+		case 0:
+			nbytes += scnprintf(dbg_buff + nbytes,
+				IPA_MAX_MSG_LEN - nbytes,
+				"IPA_CLIENT_WIGIG_PROD");
+			break;
+		case 1:
+		case 2:
+		case 3:
+		case 4:
+			nbytes += scnprintf(dbg_buff + nbytes,
+				IPA_MAX_MSG_LEN - nbytes,
+				"IPA_CLIENT_WIGIG%d_CONS",
+				i);
+			break;
+		default:
+			IPA_WIGIG_ERR("invalid pipe %d\n", i);
+			nbytes += scnprintf(dbg_buff + nbytes,
+				IPA_MAX_MSG_LEN - nbytes,
+				"invalid pipe %d",
+				i);
+			break;
+		}
+		nbytes += scnprintf(dbg_buff + nbytes,
+			IPA_MAX_MSG_LEN - nbytes,
+			" %s connected\n", pipe_connected ? "is" : "not");
+	}
+
+finish:
+	ret = simple_read_from_buffer(
+		ubuf, count, ppos, dbg_buff, nbytes);
+	kfree(dbg_buff);
+	return ret;
+}
+
+static ssize_t ipa_wigig_read_smmu_status(struct file *file,
+	char __user *ubuf, size_t count, loff_t *ppos)
+{
+	int nbytes = 0;
+	char *dbg_buff;
+	ssize_t ret;
+
+	dbg_buff = kzalloc(IPA_MAX_MSG_LEN, GFP_KERNEL);
+	if (!dbg_buff)
+		return -ENOMEM;
+
+	if (!ipa_wigig_ctx) {
+		nbytes += scnprintf(dbg_buff + nbytes,
+			IPA_MAX_MSG_LEN - nbytes,
+			"IPA WIGIG not initialized\n");
+		goto finish;
+	}
+
+	if (ipa_wigig_ctx->smmu_en) {
+		nbytes += scnprintf(dbg_buff + nbytes,
+			IPA_MAX_MSG_LEN - nbytes,
+			"SMMU enabled\n");
+
+		if (ipa_wigig_ctx->shared_cb) {
+			nbytes += scnprintf(dbg_buff + nbytes,
+				IPA_MAX_MSG_LEN - nbytes,
+				"CB shared\n");
+		} else {
+			nbytes += scnprintf(dbg_buff + nbytes,
+				IPA_MAX_MSG_LEN - nbytes,
+				"CB not shared\n");
+		}
+	} else {
+		nbytes += scnprintf(dbg_buff + nbytes,
+			IPA_MAX_MSG_LEN - nbytes,
+			"SMMU in S1 bypass\n");
+	}
+finish:
+	ret = simple_read_from_buffer(
+		ubuf, count, ppos, dbg_buff, nbytes);
+	kfree(dbg_buff);
+	return ret;
+}
+static const struct file_operations ipa_wigig_conn_clients_ops = {
+	.read = ipa_wigig_read_conn_clients,
+};
+
+static const struct file_operations ipa_wigig_smmu_ops = {
+	.read = ipa_wigig_read_smmu_status,
+};
+
+static inline void ipa_wigig_deinit_debugfs(void)
+{
+	debugfs_remove(ipa_wigig_ctx->dent_conn_clients);
+	debugfs_remove(ipa_wigig_ctx->dent_smmu);
+}
+
+static int ipa_wigig_init_debugfs(struct dentry *parent)
+{
+	const mode_t read_only_mode = 0444;
+
+	ipa_wigig_ctx->dent_conn_clients =
+		debugfs_create_file("conn_clients", read_only_mode, parent,
+				NULL, &ipa_wigig_conn_clients_ops);
+	if (IS_ERR_OR_NULL(ipa_wigig_ctx->dent_conn_clients)) {
+		IPA_WIGIG_ERR("fail to create file %s\n", "conn_clients");
+		goto fail_conn_clients;
+	}
+
+	ipa_wigig_ctx->dent_smmu =
+		debugfs_create_file("smmu", read_only_mode, parent, NULL,
+				&ipa_wigig_smmu_ops);
+	if (IS_ERR_OR_NULL(ipa_wigig_ctx->dent_smmu)) {
+		IPA_WIGIG_ERR("fail to create file %s\n", "smmu");
+		goto fail_smmu;
+	}
+
+	return 0;
+fail_smmu:
+	debugfs_remove(ipa_wigig_ctx->dent_conn_clients);
+fail_conn_clients:
+	return -EFAULT;
+}
+#endif

+ 1212 - 0
ipa/ipa_clients/odu_bridge.c

@@ -0,0 +1,1212 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2014-2019, The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/debugfs.h>
+#include <linux/export.h>
+#include <linux/fs.h>
+#include <linux/if_ether.h>
+#include <linux/ioctl.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/msm_ipa.h>
+#include <linux/mutex.h>
+#include <linux/skbuff.h>
+#include <linux/types.h>
+#include <linux/ipv6.h>
+#include <net/addrconf.h>
+#include <linux/ipa.h>
+#include <linux/cdev.h>
+#include <linux/ipa_odu_bridge.h>
+#include "../ipa_common_i.h"
+#include "../ipa_v3/ipa_pm.h"
+
+#define ODU_BRIDGE_DRV_NAME "odu_ipa_bridge"
+
+#define ODU_BRIDGE_DBG(fmt, args...) \
+	do { \
+		pr_debug(ODU_BRIDGE_DRV_NAME " %s:%d " fmt, \
+			__func__, __LINE__, ## args); \
+		IPA_IPC_LOGGING(ipa_get_ipc_logbuf(), \
+			ODU_BRIDGE_DRV_NAME " %s:%d " fmt, ## args); \
+		IPA_IPC_LOGGING(ipa_get_ipc_logbuf_low(), \
+			ODU_BRIDGE_DRV_NAME " %s:%d " fmt, ## args); \
+	} while (0)
+#define ODU_BRIDGE_DBG_LOW(fmt, args...) \
+	do { \
+		pr_debug(ODU_BRIDGE_DRV_NAME " %s:%d " fmt, \
+			__func__, __LINE__, ## args); \
+		IPA_IPC_LOGGING(ipa_get_ipc_logbuf_low(), \
+			ODU_BRIDGE_DRV_NAME " %s:%d " fmt, ## args); \
+	} while (0)
+#define ODU_BRIDGE_ERR(fmt, args...) \
+	do { \
+		pr_err(ODU_BRIDGE_DRV_NAME " %s:%d " fmt, \
+			__func__, __LINE__, ## args); \
+		IPA_IPC_LOGGING(ipa_get_ipc_logbuf(), \
+			ODU_BRIDGE_DRV_NAME " %s:%d " fmt, ## args); \
+		IPA_IPC_LOGGING(ipa_get_ipc_logbuf_low(), \
+			ODU_BRIDGE_DRV_NAME " %s:%d " fmt, ## args); \
+	} while (0)
+
+#define ODU_BRIDGE_FUNC_ENTRY() \
+	ODU_BRIDGE_DBG_LOW("ENTRY\n")
+#define ODU_BRIDGE_FUNC_EXIT() \
+	ODU_BRIDGE_DBG_LOW("EXIT\n")
+
+
+#define ODU_BRIDGE_IS_QMI_ADDR(daddr) \
+	(memcmp(&(daddr), &odu_bridge_ctx->llv6_addr, sizeof((daddr))) \
+		== 0)
+
+#define ODU_BRIDGE_IPV4_HDR_NAME "odu_br_ipv4"
+#define ODU_BRIDGE_IPV6_HDR_NAME "odu_br_ipv6"
+
+#define IPA_ODU_SYS_DESC_FIFO_SZ 0x800
+
+#ifdef CONFIG_COMPAT
+#define ODU_BRIDGE_IOC_SET_LLV6_ADDR32 _IOW(ODU_BRIDGE_IOC_MAGIC, \
+				ODU_BRIDGE_IOCTL_SET_LLV6_ADDR, \
+				compat_uptr_t)
+#endif
+
+#define IPA_ODU_VER_CHECK() \
+	do { \
+		ret = 0;\
+		if (ipa_get_hw_type() == IPA_HW_None) { \
+			pr_err("IPA HW is unknown\n"); \
+			ret = -EFAULT; \
+		} \
+		else if (ipa_get_hw_type() < IPA_HW_v3_0) \
+			ret = 1; \
+	} while (0)
+
+/**
+ * struct stats - driver statistics, viewable using debugfs
+ * @num_ul_packets: number of packets bridged in uplink direction
+ * @num_dl_packets: number of packets bridged in downink direction
+ * bridge
+ * @num_lan_packets: number of packets bridged to APPS on bridge mode
+ */
+struct stats {
+	u64 num_ul_packets;
+	u64 num_dl_packets;
+	u64 num_lan_packets;
+};
+
+/**
+ * struct odu_bridge_ctx - ODU bridge driver context information
+ * @class: kernel class pointer
+ * @dev_num: kernel device number
+ * @dev: kernel device struct pointer
+ * @cdev: kernel character device struct
+ * @netdev_name: network interface name
+ * @device_ethaddr: network interface ethernet address
+ * @priv: client's private data. to be used in client's callbacks
+ * @tx_dp_notify: client callback for handling IPA ODU_PROD callback
+ * @send_dl_skb: client callback for sending skb in downlink direction
+ * @stats: statistics, how many packets were transmitted using the SW bridge
+ * @is_conencted: is bridge connected ?
+ * @is_suspended: is bridge suspended ?
+ * @mode: ODU mode (router/bridge)
+ * @lock: for the initialization, connect and disconnect synchronization
+ * @llv6_addr: link local IPv6 address of ODU network interface
+ * @odu_br_ipv4_hdr_hdl: handle for partial ipv4 ethernet header
+ * @odu_br_ipv6_hdr_hdl: handle for partial ipv6 ethernet header
+ * @odu_prod_hdl: handle for IPA_CLIENT_ODU_PROD pipe
+ * @odu_emb_cons_hdl: handle for IPA_CLIENT_ODU_EMB_CONS pipe
+ * @odu_teth_cons_hdl: handle for IPA_CLIENT_ODU_TETH_CONS pipe
+ * @wakeup_request: client callback to wakeup
+ */
+struct odu_bridge_ctx {
+	struct class *class;
+	dev_t dev_num;
+	struct device *dev;
+	struct cdev cdev;
+	char netdev_name[IPA_RESOURCE_NAME_MAX];
+	u8 device_ethaddr[ETH_ALEN];
+	void *priv;
+	ipa_notify_cb tx_dp_notify;
+	int (*send_dl_skb)(void *priv, struct sk_buff *skb);
+	struct stats stats;
+	bool is_connected;
+	bool is_suspended;
+	enum odu_bridge_mode mode;
+	struct mutex lock;
+	struct in6_addr llv6_addr;
+	uint32_t odu_br_ipv4_hdr_hdl;
+	uint32_t odu_br_ipv6_hdr_hdl;
+	u32 odu_prod_hdl;
+	u32 odu_emb_cons_hdl;
+	u32 odu_teth_cons_hdl;
+	u32 ipa_sys_desc_size;
+	void *logbuf;
+	void *logbuf_low;
+	void (*wakeup_request)(void *cl_priv);
+	u32 pm_hdl;
+};
+static struct odu_bridge_ctx *odu_bridge_ctx;
+
+#ifdef CONFIG_DEBUG_FS
+#define ODU_MAX_MSG_LEN 512
+static char dbg_buff[ODU_MAX_MSG_LEN];
+#endif
+
+static void odu_bridge_emb_cons_cb(void *priv, enum ipa_dp_evt_type evt,
+	unsigned long data)
+{
+	ODU_BRIDGE_FUNC_ENTRY();
+	if (evt != IPA_RECEIVE) {
+		ODU_BRIDGE_ERR("unexpected event\n");
+		WARN_ON(1);
+		return;
+	}
+	odu_bridge_ctx->send_dl_skb(priv, (struct sk_buff *)data);
+	odu_bridge_ctx->stats.num_dl_packets++;
+	ODU_BRIDGE_FUNC_EXIT();
+}
+
+static void odu_bridge_teth_cons_cb(void *priv, enum ipa_dp_evt_type evt,
+	unsigned long data)
+{
+	struct ipv6hdr *ipv6hdr;
+	struct sk_buff *skb = (struct sk_buff *)data;
+	struct sk_buff *skb_copied;
+
+	ODU_BRIDGE_FUNC_ENTRY();
+	if (evt != IPA_RECEIVE) {
+		ODU_BRIDGE_ERR("unexpected event\n");
+		WARN_ON(1);
+		return;
+	}
+
+	ipv6hdr = (struct ipv6hdr *)(skb->data + ETH_HLEN);
+	if (ipv6hdr->version == 6 &&
+	    ipv6_addr_is_multicast(&ipv6hdr->daddr)) {
+		ODU_BRIDGE_DBG_LOW("Multicast pkt, send to APPS and adapter\n");
+		skb_copied = skb_clone(skb, GFP_KERNEL);
+		if (skb_copied) {
+			odu_bridge_ctx->tx_dp_notify(odu_bridge_ctx->priv,
+						IPA_RECEIVE,
+						(unsigned long) skb_copied);
+			odu_bridge_ctx->stats.num_lan_packets++;
+		} else {
+			ODU_BRIDGE_ERR("No memory\n");
+		}
+	}
+
+	odu_bridge_ctx->send_dl_skb(priv, skb);
+	odu_bridge_ctx->stats.num_dl_packets++;
+	ODU_BRIDGE_FUNC_EXIT();
+}
+
+static int odu_bridge_connect_router(void)
+{
+	struct ipa_sys_connect_params odu_prod_params;
+	struct ipa_sys_connect_params odu_emb_cons_params;
+	int res;
+
+	ODU_BRIDGE_FUNC_ENTRY();
+
+	memset(&odu_prod_params, 0, sizeof(odu_prod_params));
+	memset(&odu_emb_cons_params, 0, sizeof(odu_emb_cons_params));
+
+	/* configure RX (ODU->IPA) EP */
+	odu_prod_params.client = IPA_CLIENT_ODU_PROD;
+	odu_prod_params.ipa_ep_cfg.hdr.hdr_len = ETH_HLEN;
+	odu_prod_params.ipa_ep_cfg.nat.nat_en = IPA_SRC_NAT;
+	odu_prod_params.desc_fifo_sz = odu_bridge_ctx->ipa_sys_desc_size;
+	odu_prod_params.priv = odu_bridge_ctx->priv;
+	odu_prod_params.notify = odu_bridge_ctx->tx_dp_notify;
+	res = ipa_setup_sys_pipe(&odu_prod_params,
+		&odu_bridge_ctx->odu_prod_hdl);
+	if (res) {
+		ODU_BRIDGE_ERR("fail to setup sys pipe ODU_PROD %d\n", res);
+		goto fail_odu_prod;
+	}
+
+	/* configure TX (IPA->ODU) EP */
+	odu_emb_cons_params.client = IPA_CLIENT_ODU_EMB_CONS;
+	odu_emb_cons_params.ipa_ep_cfg.hdr.hdr_len = ETH_HLEN;
+	odu_emb_cons_params.ipa_ep_cfg.nat.nat_en = IPA_BYPASS_NAT;
+	odu_emb_cons_params.desc_fifo_sz = odu_bridge_ctx->ipa_sys_desc_size;
+	odu_emb_cons_params.priv = odu_bridge_ctx->priv;
+	odu_emb_cons_params.notify = odu_bridge_emb_cons_cb;
+	res = ipa_setup_sys_pipe(&odu_emb_cons_params,
+		&odu_bridge_ctx->odu_emb_cons_hdl);
+	if (res) {
+		ODU_BRIDGE_ERR("fail to setup sys pipe ODU_EMB_CONS %d\n", res);
+		goto fail_odu_emb_cons;
+	}
+
+	ODU_BRIDGE_DBG("odu_prod_hdl = %d, odu_emb_cons_hdl = %d\n",
+		odu_bridge_ctx->odu_prod_hdl, odu_bridge_ctx->odu_emb_cons_hdl);
+
+	ODU_BRIDGE_FUNC_EXIT();
+
+	return 0;
+
+fail_odu_emb_cons:
+	ipa_teardown_sys_pipe(odu_bridge_ctx->odu_prod_hdl);
+	odu_bridge_ctx->odu_prod_hdl = 0;
+fail_odu_prod:
+	return res;
+}
+
+static int odu_bridge_connect_bridge(void)
+{
+	struct ipa_sys_connect_params odu_prod_params;
+	struct ipa_sys_connect_params odu_emb_cons_params;
+	struct ipa_sys_connect_params odu_teth_cons_params;
+	int res;
+
+	ODU_BRIDGE_FUNC_ENTRY();
+
+	memset(&odu_prod_params, 0, sizeof(odu_prod_params));
+	memset(&odu_emb_cons_params, 0, sizeof(odu_emb_cons_params));
+
+	/* configure RX (ODU->IPA) EP */
+	odu_prod_params.client = IPA_CLIENT_ODU_PROD;
+	odu_prod_params.desc_fifo_sz = IPA_ODU_SYS_DESC_FIFO_SZ;
+	odu_prod_params.priv = odu_bridge_ctx->priv;
+	odu_prod_params.notify = odu_bridge_ctx->tx_dp_notify;
+	odu_prod_params.skip_ep_cfg = true;
+	res = ipa_setup_sys_pipe(&odu_prod_params,
+		&odu_bridge_ctx->odu_prod_hdl);
+	if (res) {
+		ODU_BRIDGE_ERR("fail to setup sys pipe ODU_PROD %d\n", res);
+		goto fail_odu_prod;
+	}
+
+	/* configure TX tethered (IPA->ODU) EP */
+	odu_teth_cons_params.client = IPA_CLIENT_ODU_TETH_CONS;
+	odu_teth_cons_params.desc_fifo_sz = IPA_ODU_SYS_DESC_FIFO_SZ;
+	odu_teth_cons_params.priv = odu_bridge_ctx->priv;
+	odu_teth_cons_params.notify = odu_bridge_teth_cons_cb;
+	odu_teth_cons_params.skip_ep_cfg = true;
+	res = ipa_setup_sys_pipe(&odu_teth_cons_params,
+		&odu_bridge_ctx->odu_teth_cons_hdl);
+	if (res) {
+		ODU_BRIDGE_ERR("fail to setup sys pipe ODU_TETH_CONS %d\n",
+				res);
+		goto fail_odu_teth_cons;
+	}
+
+	/* configure TX embedded(IPA->ODU) EP */
+	odu_emb_cons_params.client = IPA_CLIENT_ODU_EMB_CONS;
+	odu_emb_cons_params.ipa_ep_cfg.hdr.hdr_len = ETH_HLEN;
+	odu_emb_cons_params.ipa_ep_cfg.nat.nat_en = IPA_BYPASS_NAT;
+	odu_emb_cons_params.desc_fifo_sz = IPA_ODU_SYS_DESC_FIFO_SZ;
+	odu_emb_cons_params.priv = odu_bridge_ctx->priv;
+	odu_emb_cons_params.notify = odu_bridge_emb_cons_cb;
+	res = ipa_setup_sys_pipe(&odu_emb_cons_params,
+		&odu_bridge_ctx->odu_emb_cons_hdl);
+	if (res) {
+		ODU_BRIDGE_ERR("fail to setup sys pipe ODU_EMB_CONS %d\n", res);
+		goto fail_odu_emb_cons;
+	}
+
+	ODU_BRIDGE_DBG_LOW("odu_prod_hdl = %d, odu_emb_cons_hdl = %d\n",
+		odu_bridge_ctx->odu_prod_hdl, odu_bridge_ctx->odu_emb_cons_hdl);
+	ODU_BRIDGE_DBG_LOW("odu_teth_cons_hdl = %d\n",
+		odu_bridge_ctx->odu_teth_cons_hdl);
+
+	ODU_BRIDGE_FUNC_EXIT();
+
+	return 0;
+
+fail_odu_emb_cons:
+	ipa_teardown_sys_pipe(odu_bridge_ctx->odu_teth_cons_hdl);
+	odu_bridge_ctx->odu_teth_cons_hdl = 0;
+fail_odu_teth_cons:
+	ipa_teardown_sys_pipe(odu_bridge_ctx->odu_prod_hdl);
+	odu_bridge_ctx->odu_prod_hdl = 0;
+fail_odu_prod:
+	return res;
+}
+
+static int odu_bridge_disconnect_router(void)
+{
+	int res;
+
+	ODU_BRIDGE_FUNC_ENTRY();
+
+	res = ipa_teardown_sys_pipe(odu_bridge_ctx->odu_prod_hdl);
+	if (res)
+		ODU_BRIDGE_ERR("teardown ODU PROD failed\n");
+	odu_bridge_ctx->odu_prod_hdl = 0;
+
+	res = ipa_teardown_sys_pipe(odu_bridge_ctx->odu_emb_cons_hdl);
+	if (res)
+		ODU_BRIDGE_ERR("teardown ODU EMB CONS failed\n");
+	odu_bridge_ctx->odu_emb_cons_hdl = 0;
+
+	ODU_BRIDGE_FUNC_EXIT();
+
+	return 0;
+}
+
+static int odu_bridge_disconnect_bridge(void)
+{
+	int res;
+
+	ODU_BRIDGE_FUNC_ENTRY();
+
+	res = ipa_teardown_sys_pipe(odu_bridge_ctx->odu_prod_hdl);
+	if (res)
+		ODU_BRIDGE_ERR("teardown ODU PROD failed\n");
+	odu_bridge_ctx->odu_prod_hdl = 0;
+
+	res = ipa_teardown_sys_pipe(odu_bridge_ctx->odu_teth_cons_hdl);
+	if (res)
+		ODU_BRIDGE_ERR("teardown ODU TETH CONS failed\n");
+	odu_bridge_ctx->odu_teth_cons_hdl = 0;
+
+	res = ipa_teardown_sys_pipe(odu_bridge_ctx->odu_emb_cons_hdl);
+	if (res)
+		ODU_BRIDGE_ERR("teardown ODU EMB CONS failed\n");
+	odu_bridge_ctx->odu_emb_cons_hdl = 0;
+
+	return 0;
+}
+
+/**
+ * odu_bridge_disconnect() - Disconnect odu bridge
+ *
+ * Disconnect all pipes
+ *
+ * Return codes: 0- success, error otherwise
+ */
+int odu_bridge_disconnect(void)
+{
+	int res;
+
+	ODU_BRIDGE_FUNC_ENTRY();
+
+	if (!odu_bridge_ctx) {
+		ODU_BRIDGE_ERR("Not initialized\n");
+		return -EFAULT;
+	}
+
+	if (!odu_bridge_ctx->is_connected) {
+		ODU_BRIDGE_ERR("Not connected\n");
+		return -EFAULT;
+	}
+
+	mutex_lock(&odu_bridge_ctx->lock);
+	if (odu_bridge_ctx->mode == ODU_BRIDGE_MODE_ROUTER) {
+		res = odu_bridge_disconnect_router();
+		if (res) {
+			ODU_BRIDGE_ERR("disconnect_router failed %d\n", res);
+			goto out;
+		}
+	} else {
+		res = odu_bridge_disconnect_bridge();
+		if (res) {
+			ODU_BRIDGE_ERR("disconnect_bridge failed %d\n", res);
+			goto out;
+		}
+	}
+
+	odu_bridge_ctx->is_connected = false;
+	res = 0;
+out:
+	mutex_unlock(&odu_bridge_ctx->lock);
+	ODU_BRIDGE_FUNC_EXIT();
+	return res;
+}
+EXPORT_SYMBOL(odu_bridge_disconnect);
+
+/**
+ * odu_bridge_connect() - Connect odu bridge.
+ *
+ * Call to the mode-specific connect function for connection IPA pipes
+ * Return codes: 0: success
+ *		-EINVAL: invalid parameters
+ *		-EPERM: Operation not permitted as the bridge is already
+ *		connected
+ */
+int odu_bridge_connect(void)
+{
+	int res;
+
+	ODU_BRIDGE_FUNC_ENTRY();
+
+	if (!odu_bridge_ctx) {
+		ODU_BRIDGE_ERR("Not initialized\n");
+		return -EFAULT;
+	}
+
+	if (odu_bridge_ctx->is_connected) {
+		ODU_BRIDGE_ERR("already connected\n");
+		return -EFAULT;
+	}
+
+	mutex_lock(&odu_bridge_ctx->lock);
+	if (odu_bridge_ctx->mode == ODU_BRIDGE_MODE_ROUTER) {
+		res = odu_bridge_connect_router();
+		if (res) {
+			ODU_BRIDGE_ERR("connect_router failed\n");
+			goto bail;
+		}
+	} else {
+		res = odu_bridge_connect_bridge();
+		if (res) {
+			ODU_BRIDGE_ERR("connect_bridge failed\n");
+			goto bail;
+		}
+	}
+
+	odu_bridge_ctx->is_connected = true;
+	res = 0;
+bail:
+	mutex_unlock(&odu_bridge_ctx->lock);
+	ODU_BRIDGE_FUNC_EXIT();
+	return res;
+}
+EXPORT_SYMBOL(odu_bridge_connect);
+
+/**
+ * odu_bridge_set_mode() - Set bridge mode to Router/Bridge
+ * @mode: mode to be set
+ */
+static int odu_bridge_set_mode(enum odu_bridge_mode mode)
+{
+	int res;
+
+	ODU_BRIDGE_FUNC_ENTRY();
+
+	if (mode < 0 || mode >= ODU_BRIDGE_MODE_MAX) {
+		ODU_BRIDGE_ERR("Unsupported mode: %d\n", mode);
+		return -EFAULT;
+	}
+
+	ODU_BRIDGE_DBG_LOW("setting mode: %d\n", mode);
+	mutex_lock(&odu_bridge_ctx->lock);
+
+	if (odu_bridge_ctx->mode == mode) {
+		ODU_BRIDGE_DBG_LOW("same mode\n");
+		res = 0;
+		goto bail;
+	}
+
+	if (odu_bridge_ctx->is_connected) {
+		/* first disconnect the old configuration */
+		if (odu_bridge_ctx->mode == ODU_BRIDGE_MODE_ROUTER) {
+			res = odu_bridge_disconnect_router();
+			if (res) {
+				ODU_BRIDGE_ERR("disconnect_router failed\n");
+				goto bail;
+			}
+		} else {
+			res = odu_bridge_disconnect_bridge();
+			if (res) {
+				ODU_BRIDGE_ERR("disconnect_bridge failed\n");
+				goto bail;
+			}
+		}
+
+		/* connect the new configuration */
+		if (mode == ODU_BRIDGE_MODE_ROUTER) {
+			res = odu_bridge_connect_router();
+			if (res) {
+				ODU_BRIDGE_ERR("connect_router failed\n");
+				goto bail;
+			}
+		} else {
+			res = odu_bridge_connect_bridge();
+			if (res) {
+				ODU_BRIDGE_ERR("connect_bridge failed\n");
+				goto bail;
+			}
+		}
+	}
+	odu_bridge_ctx->mode = mode;
+	res = 0;
+bail:
+	mutex_unlock(&odu_bridge_ctx->lock);
+	ODU_BRIDGE_FUNC_EXIT();
+	return res;
+};
+
+/**
+ * odu_bridge_set_llv6_addr() - Set link local ipv6 address
+ * @llv6_addr: odu network interface link local address
+ *
+ * This function sets the link local ipv6 address provided by IOCTL
+ */
+static int odu_bridge_set_llv6_addr(struct in6_addr *llv6_addr)
+{
+	struct in6_addr llv6_addr_host;
+
+	ODU_BRIDGE_FUNC_ENTRY();
+
+	llv6_addr_host.s6_addr32[0] = ntohl(llv6_addr->s6_addr32[0]);
+	llv6_addr_host.s6_addr32[1] = ntohl(llv6_addr->s6_addr32[1]);
+	llv6_addr_host.s6_addr32[2] = ntohl(llv6_addr->s6_addr32[2]);
+	llv6_addr_host.s6_addr32[3] = ntohl(llv6_addr->s6_addr32[3]);
+
+	memcpy(&odu_bridge_ctx->llv6_addr, &llv6_addr_host,
+				sizeof(odu_bridge_ctx->llv6_addr));
+	ODU_BRIDGE_DBG_LOW("LLV6 addr: %pI6c\n", &odu_bridge_ctx->llv6_addr);
+
+	ODU_BRIDGE_FUNC_EXIT();
+
+	return 0;
+};
+
+static long odu_bridge_ioctl(struct file *filp,
+			      unsigned int cmd,
+			      unsigned long arg)
+{
+	int res = 0;
+	struct in6_addr llv6_addr;
+
+	ODU_BRIDGE_DBG("cmd=%x nr=%d\n", cmd, _IOC_NR(cmd));
+
+	if ((_IOC_TYPE(cmd) != ODU_BRIDGE_IOC_MAGIC) ||
+	    (_IOC_NR(cmd) >= ODU_BRIDGE_IOCTL_MAX)) {
+		ODU_BRIDGE_ERR("Invalid ioctl\n");
+		return -ENOIOCTLCMD;
+	}
+
+	switch (cmd) {
+	case ODU_BRIDGE_IOC_SET_MODE:
+		ODU_BRIDGE_DBG("ODU_BRIDGE_IOC_SET_MODE ioctl called\n");
+		res = odu_bridge_set_mode(arg);
+		if (res) {
+			ODU_BRIDGE_ERR("Error, res = %d\n", res);
+			break;
+		}
+		break;
+
+	case ODU_BRIDGE_IOC_SET_LLV6_ADDR:
+		ODU_BRIDGE_DBG("ODU_BRIDGE_IOC_SET_LLV6_ADDR ioctl called\n");
+		res = copy_from_user(&llv6_addr,
+			(struct in6_addr *)arg,
+			sizeof(llv6_addr));
+		if (res) {
+			ODU_BRIDGE_ERR("Error, res = %d\n", res);
+			res = -EFAULT;
+			break;
+		}
+
+		res = odu_bridge_set_llv6_addr(&llv6_addr);
+		if (res) {
+			ODU_BRIDGE_ERR("Error, res = %d\n", res);
+			break;
+		}
+		break;
+
+	default:
+		ODU_BRIDGE_ERR("Unknown ioctl: %d\n", cmd);
+		WARN_ON(1);
+	}
+
+	return res;
+}
+
+#ifdef CONFIG_COMPAT
+static long compat_odu_bridge_ioctl(struct file *file,
+	unsigned int cmd, unsigned long arg)
+{
+	switch (cmd) {
+	case ODU_BRIDGE_IOC_SET_LLV6_ADDR32:
+		cmd = ODU_BRIDGE_IOC_SET_LLV6_ADDR;
+		break;
+	case ODU_BRIDGE_IOC_SET_MODE:
+		break;
+	default:
+		return -ENOIOCTLCMD;
+	}
+	return odu_bridge_ioctl(file, cmd, (unsigned long)compat_ptr(arg));
+}
+#endif
+
+#ifdef CONFIG_DEBUG_FS
+static struct dentry *dent;
+static struct dentry *dfile_stats;
+static struct dentry *dfile_mode;
+
+static ssize_t odu_debugfs_stats(struct file *file,
+				  char __user *ubuf,
+				  size_t count,
+				  loff_t *ppos)
+{
+	int nbytes = 0;
+
+	nbytes += scnprintf(&dbg_buff[nbytes],
+			    ODU_MAX_MSG_LEN - nbytes,
+			   "UL packets: %lld\n",
+			    odu_bridge_ctx->stats.num_ul_packets);
+	nbytes += scnprintf(&dbg_buff[nbytes],
+			    ODU_MAX_MSG_LEN - nbytes,
+			   "DL packets: %lld\n",
+			    odu_bridge_ctx->stats.num_dl_packets);
+	nbytes += scnprintf(&dbg_buff[nbytes],
+			    ODU_MAX_MSG_LEN - nbytes,
+			    "LAN packets: %lld\n",
+			    odu_bridge_ctx->stats.num_lan_packets);
+	return simple_read_from_buffer(ubuf, count, ppos, dbg_buff, nbytes);
+}
+
+static ssize_t odu_debugfs_hw_bridge_mode_write(struct file *file,
+					const char __user *ubuf,
+					size_t count,
+					loff_t *ppos)
+{
+	unsigned long missing;
+	enum odu_bridge_mode mode;
+
+	if (count >= sizeof(dbg_buff))
+		return -EFAULT;
+
+	missing = copy_from_user(dbg_buff, ubuf, count);
+	if (missing)
+		return -EFAULT;
+
+	if (count > 0)
+		dbg_buff[count-1] = '\0';
+
+	if (strcmp(dbg_buff, "router") == 0) {
+		mode = ODU_BRIDGE_MODE_ROUTER;
+	} else if (strcmp(dbg_buff, "bridge") == 0) {
+		mode = ODU_BRIDGE_MODE_BRIDGE;
+	} else {
+		ODU_BRIDGE_ERR("Bad mode, got %s,\n"
+			 "Use <router> or <bridge>.\n", dbg_buff);
+		return count;
+	}
+
+	odu_bridge_set_mode(mode);
+	return count;
+}
+
+static ssize_t odu_debugfs_hw_bridge_mode_read(struct file *file,
+					     char __user *ubuf,
+					     size_t count,
+					     loff_t *ppos)
+{
+	int nbytes = 0;
+
+	switch (odu_bridge_ctx->mode) {
+	case ODU_BRIDGE_MODE_ROUTER:
+		nbytes += scnprintf(&dbg_buff[nbytes],
+			ODU_MAX_MSG_LEN - nbytes,
+			"router\n");
+		break;
+	case ODU_BRIDGE_MODE_BRIDGE:
+		nbytes += scnprintf(&dbg_buff[nbytes],
+			ODU_MAX_MSG_LEN - nbytes,
+			"bridge\n");
+		break;
+	default:
+		nbytes += scnprintf(&dbg_buff[nbytes],
+			ODU_MAX_MSG_LEN - nbytes,
+			"mode error\n");
+		break;
+
+	}
+
+	return simple_read_from_buffer(ubuf, count, ppos, dbg_buff, nbytes);
+}
+
+const struct file_operations odu_stats_ops = {
+	.read = odu_debugfs_stats,
+};
+
+const struct file_operations odu_hw_bridge_mode_ops = {
+	.read = odu_debugfs_hw_bridge_mode_read,
+	.write = odu_debugfs_hw_bridge_mode_write,
+};
+
+static void odu_debugfs_init(void)
+{
+	const mode_t read_only_mode = 0444;
+	const mode_t read_write_mode = 0666;
+
+	dent = debugfs_create_dir("odu_ipa_bridge", 0);
+	if (IS_ERR(dent)) {
+		ODU_BRIDGE_ERR("fail to create folder odu_ipa_bridge\n");
+		return;
+	}
+
+	dfile_stats =
+		debugfs_create_file("stats", read_only_mode, dent,
+				    0, &odu_stats_ops);
+	if (!dfile_stats || IS_ERR(dfile_stats)) {
+		ODU_BRIDGE_ERR("fail to create file stats\n");
+		goto fail;
+	}
+
+	dfile_mode =
+		debugfs_create_file("mode", read_write_mode,
+				    dent, 0, &odu_hw_bridge_mode_ops);
+	if (!dfile_mode ||
+	    IS_ERR(dfile_mode)) {
+		ODU_BRIDGE_ERR("fail to create file dfile_mode\n");
+		goto fail;
+	}
+
+	return;
+fail:
+	debugfs_remove_recursive(dent);
+}
+
+static void odu_debugfs_destroy(void)
+{
+	debugfs_remove_recursive(dent);
+}
+
+#else
+static void odu_debugfs_init(void) {}
+static void odu_debugfs_destroy(void) {}
+#endif /* CONFIG_DEBUG_FS */
+
+
+static const struct file_operations odu_bridge_drv_fops = {
+	.owner = THIS_MODULE,
+	.unlocked_ioctl = odu_bridge_ioctl,
+#ifdef CONFIG_COMPAT
+	.compat_ioctl = compat_odu_bridge_ioctl,
+#endif
+};
+
+/**
+ * odu_bridge_tx_dp() - Send skb to ODU bridge
+ * @skb: skb to send
+ * @metadata: metadata on packet
+ *
+ * This function handles uplink packet.
+ * In Router Mode:
+ *	packet is sent directly to IPA.
+ * In Router Mode:
+ *	packet is classified if it should arrive to network stack.
+ *	QMI IP packet should arrive to APPS network stack
+ *	IPv6 Multicast packet should arrive to APPS network stack and Q6
+ *
+ * Return codes: 0- success, error otherwise
+ */
+int odu_bridge_tx_dp(struct sk_buff *skb, struct ipa_tx_meta *metadata)
+{
+	struct sk_buff *skb_copied = NULL;
+	struct ipv6hdr *ipv6hdr;
+	int res;
+
+	ODU_BRIDGE_FUNC_ENTRY();
+
+	switch (odu_bridge_ctx->mode) {
+	case ODU_BRIDGE_MODE_ROUTER:
+		/* Router mode - pass skb to IPA */
+		res = ipa_tx_dp(IPA_CLIENT_ODU_PROD, skb, metadata);
+		if (res) {
+			ODU_BRIDGE_DBG("tx dp failed %d\n", res);
+			goto out;
+		}
+		odu_bridge_ctx->stats.num_ul_packets++;
+		goto out;
+
+	case ODU_BRIDGE_MODE_BRIDGE:
+		ipv6hdr = (struct ipv6hdr *)(skb->data + ETH_HLEN);
+		if (ipv6hdr->version == 6 &&
+		    ODU_BRIDGE_IS_QMI_ADDR(ipv6hdr->daddr)) {
+			ODU_BRIDGE_DBG_LOW("QMI packet\n");
+			skb_copied = skb_clone(skb, GFP_KERNEL);
+			if (!skb_copied) {
+				ODU_BRIDGE_ERR("No memory\n");
+				return -ENOMEM;
+			}
+			odu_bridge_ctx->tx_dp_notify(odu_bridge_ctx->priv,
+						     IPA_RECEIVE,
+						     (unsigned long)skb_copied);
+			odu_bridge_ctx->tx_dp_notify(odu_bridge_ctx->priv,
+						     IPA_WRITE_DONE,
+						     (unsigned long)skb);
+			odu_bridge_ctx->stats.num_ul_packets++;
+			odu_bridge_ctx->stats.num_lan_packets++;
+			res = 0;
+			goto out;
+		}
+
+		if (ipv6hdr->version == 6 &&
+		    ipv6_addr_is_multicast(&ipv6hdr->daddr)) {
+			ODU_BRIDGE_DBG_LOW(
+				"Multicast pkt, send to APPS and IPA\n");
+			skb_copied = skb_clone(skb, GFP_KERNEL);
+			if (!skb_copied) {
+				ODU_BRIDGE_ERR("No memory\n");
+				return -ENOMEM;
+			}
+
+			res = ipa_tx_dp(IPA_CLIENT_ODU_PROD, skb, metadata);
+			if (res) {
+				ODU_BRIDGE_DBG("tx dp failed %d\n", res);
+				dev_kfree_skb(skb_copied);
+				goto out;
+			}
+
+			odu_bridge_ctx->tx_dp_notify(odu_bridge_ctx->priv,
+						     IPA_RECEIVE,
+						     (unsigned long)skb_copied);
+			odu_bridge_ctx->stats.num_ul_packets++;
+			odu_bridge_ctx->stats.num_lan_packets++;
+			goto out;
+		}
+
+		res = ipa_tx_dp(IPA_CLIENT_ODU_PROD, skb, metadata);
+		if (res) {
+			ODU_BRIDGE_DBG("tx dp failed %d\n", res);
+			goto out;
+		}
+		odu_bridge_ctx->stats.num_ul_packets++;
+		goto out;
+
+	default:
+		ODU_BRIDGE_ERR("Unsupported mode: %d\n", odu_bridge_ctx->mode);
+		WARN_ON(1);
+		res = -EFAULT;
+
+	}
+out:
+	ODU_BRIDGE_FUNC_EXIT();
+	return res;
+}
+EXPORT_SYMBOL(odu_bridge_tx_dp);
+
+static int odu_bridge_add_hdrs(void)
+{
+	struct ipa_ioc_add_hdr *hdrs;
+	struct ipa_hdr_add *ipv4_hdr;
+	struct ipa_hdr_add *ipv6_hdr;
+	struct ethhdr *eth_ipv4;
+	struct ethhdr *eth_ipv6;
+	int res;
+
+	ODU_BRIDGE_FUNC_ENTRY();
+	hdrs = kzalloc(sizeof(*hdrs) + sizeof(*ipv4_hdr) + sizeof(*ipv6_hdr),
+			GFP_KERNEL);
+	if (!hdrs) {
+		ODU_BRIDGE_ERR("no mem\n");
+		res = -ENOMEM;
+		goto out;
+	}
+	ipv4_hdr = &hdrs->hdr[0];
+	eth_ipv4 = (struct ethhdr *)(ipv4_hdr->hdr);
+	ipv6_hdr = &hdrs->hdr[1];
+	eth_ipv6 = (struct ethhdr *)(ipv6_hdr->hdr);
+	strlcpy(ipv4_hdr->name, ODU_BRIDGE_IPV4_HDR_NAME,
+		IPA_RESOURCE_NAME_MAX);
+	memcpy(eth_ipv4->h_source, odu_bridge_ctx->device_ethaddr, ETH_ALEN);
+	eth_ipv4->h_proto = htons(ETH_P_IP);
+	ipv4_hdr->hdr_len = ETH_HLEN;
+	ipv4_hdr->is_partial = 1;
+	ipv4_hdr->is_eth2_ofst_valid = 1;
+	ipv4_hdr->eth2_ofst = 0;
+	strlcpy(ipv6_hdr->name, ODU_BRIDGE_IPV6_HDR_NAME,
+		IPA_RESOURCE_NAME_MAX);
+	memcpy(eth_ipv6->h_source, odu_bridge_ctx->device_ethaddr, ETH_ALEN);
+	eth_ipv6->h_proto = htons(ETH_P_IPV6);
+	ipv6_hdr->hdr_len = ETH_HLEN;
+	ipv6_hdr->is_partial = 1;
+	ipv6_hdr->is_eth2_ofst_valid = 1;
+	ipv6_hdr->eth2_ofst = 0;
+	hdrs->commit = 1;
+	hdrs->num_hdrs = 2;
+	res = ipa_add_hdr(hdrs);
+	if (res) {
+		ODU_BRIDGE_ERR("Fail on Header-Insertion(%d)\n", res);
+		goto out_free_mem;
+	}
+	if (ipv4_hdr->status) {
+		ODU_BRIDGE_ERR("Fail on Header-Insertion ipv4(%d)\n",
+				ipv4_hdr->status);
+		res = ipv4_hdr->status;
+		goto out_free_mem;
+	}
+	if (ipv6_hdr->status) {
+		ODU_BRIDGE_ERR("Fail on Header-Insertion ipv6(%d)\n",
+				ipv6_hdr->status);
+		res = ipv6_hdr->status;
+		goto out_free_mem;
+	}
+	odu_bridge_ctx->odu_br_ipv4_hdr_hdl = ipv4_hdr->hdr_hdl;
+	odu_bridge_ctx->odu_br_ipv6_hdr_hdl = ipv6_hdr->hdr_hdl;
+
+	res = 0;
+out_free_mem:
+	kfree(hdrs);
+out:
+	ODU_BRIDGE_FUNC_EXIT();
+	return res;
+}
+
+static void odu_bridge_del_hdrs(void)
+{
+	struct ipa_ioc_del_hdr *del_hdr;
+	struct ipa_hdr_del *ipv4;
+	struct ipa_hdr_del *ipv6;
+	int result;
+
+	del_hdr = kzalloc(sizeof(*del_hdr) + sizeof(*ipv4) +
+			sizeof(*ipv6), GFP_KERNEL);
+	if (!del_hdr)
+		return;
+	del_hdr->commit = 1;
+	del_hdr->num_hdls = 2;
+	ipv4 = &del_hdr->hdl[0];
+	ipv4->hdl = odu_bridge_ctx->odu_br_ipv4_hdr_hdl;
+	ipv6 = &del_hdr->hdl[1];
+	ipv6->hdl = odu_bridge_ctx->odu_br_ipv6_hdr_hdl;
+	result = ipa_del_hdr(del_hdr);
+	if (result || ipv4->status || ipv6->status)
+		ODU_BRIDGE_ERR("ipa_del_hdr failed");
+	kfree(del_hdr);
+}
+
+/**
+ * odu_bridge_register_properties() - set Tx/Rx properties for ipacm
+ *
+ * Register the network interface interface with Tx and Rx properties
+ * Tx properties are for data flowing from IPA to adapter, they
+ * have Header-Insertion properties both for Ipv4 and Ipv6 Ethernet framing.
+ * Rx properties are for data flowing from adapter to IPA, they have
+ * simple rule which always "hit".
+ *
+ */
+static int odu_bridge_register_properties(void)
+{
+	struct ipa_tx_intf tx_properties = {0};
+	struct ipa_ioc_tx_intf_prop properties[2] = { {0}, {0} };
+	struct ipa_ioc_tx_intf_prop *ipv4_property;
+	struct ipa_ioc_tx_intf_prop *ipv6_property;
+	struct ipa_ioc_rx_intf_prop rx_ioc_properties[2] = { {0}, {0} };
+	struct ipa_rx_intf rx_properties = {0};
+	struct ipa_ioc_rx_intf_prop *rx_ipv4_property;
+	struct ipa_ioc_rx_intf_prop *rx_ipv6_property;
+	int res = 0;
+
+	ODU_BRIDGE_FUNC_ENTRY();
+
+	tx_properties.prop = properties;
+	ipv4_property = &tx_properties.prop[0];
+	ipv4_property->ip = IPA_IP_v4;
+	ipv4_property->dst_pipe = IPA_CLIENT_ODU_EMB_CONS;
+	ipv4_property->hdr_l2_type = IPA_HDR_L2_ETHERNET_II;
+	strlcpy(ipv4_property->hdr_name, ODU_BRIDGE_IPV4_HDR_NAME,
+			IPA_RESOURCE_NAME_MAX);
+	ipv6_property = &tx_properties.prop[1];
+	ipv6_property->ip = IPA_IP_v6;
+	ipv6_property->dst_pipe = IPA_CLIENT_ODU_EMB_CONS;
+	ipv6_property->hdr_l2_type = IPA_HDR_L2_ETHERNET_II;
+	strlcpy(ipv6_property->hdr_name, ODU_BRIDGE_IPV6_HDR_NAME,
+			IPA_RESOURCE_NAME_MAX);
+	tx_properties.num_props = 2;
+
+	rx_properties.prop = rx_ioc_properties;
+	rx_ipv4_property = &rx_properties.prop[0];
+	rx_ipv4_property->ip = IPA_IP_v4;
+	rx_ipv4_property->attrib.attrib_mask = 0;
+	rx_ipv4_property->src_pipe = IPA_CLIENT_ODU_PROD;
+	rx_ipv4_property->hdr_l2_type = IPA_HDR_L2_ETHERNET_II;
+	rx_ipv6_property = &rx_properties.prop[1];
+	rx_ipv6_property->ip = IPA_IP_v6;
+	rx_ipv6_property->attrib.attrib_mask = 0;
+	rx_ipv6_property->src_pipe = IPA_CLIENT_ODU_PROD;
+	rx_ipv6_property->hdr_l2_type = IPA_HDR_L2_ETHERNET_II;
+	rx_properties.num_props = 2;
+
+	res = ipa_register_intf(odu_bridge_ctx->netdev_name, &tx_properties,
+		&rx_properties);
+	if (res) {
+		ODU_BRIDGE_ERR("fail on Tx/Rx properties registration %d\n",
+									res);
+	}
+
+	ODU_BRIDGE_FUNC_EXIT();
+
+	return res;
+}
+
+static void odu_bridge_deregister_properties(void)
+{
+	int res;
+
+	ODU_BRIDGE_FUNC_ENTRY();
+	res = ipa_deregister_intf(odu_bridge_ctx->netdev_name);
+	if (res)
+		ODU_BRIDGE_ERR("Fail on Tx prop deregister %d\n", res);
+	ODU_BRIDGE_FUNC_EXIT();
+}
+
+/**
+ * odu_bridge_init() - Initialize the ODU bridge driver
+ * @params: initialization parameters
+ *
+ * This function initialize all bridge internal data and register odu bridge to
+ * kernel for IOCTL and debugfs.
+ * Header addition and properties are registered to IPA driver.
+ *
+ * Return codes: 0: success,
+ *		-EINVAL - Bad parameter
+ *		Other negative value - Failure
+ */
+int odu_bridge_init(struct odu_bridge_params *params)
+{
+	int res;
+
+	ODU_BRIDGE_FUNC_ENTRY();
+
+	if (!params) {
+		ODU_BRIDGE_ERR("null pointer params\n");
+		return -EINVAL;
+	}
+	if (!params->netdev_name) {
+		ODU_BRIDGE_ERR("null pointer params->netdev_name\n");
+		return -EINVAL;
+	}
+	if (!params->tx_dp_notify) {
+		ODU_BRIDGE_ERR("null pointer params->tx_dp_notify\n");
+		return -EINVAL;
+	}
+	if (!params->send_dl_skb) {
+		ODU_BRIDGE_ERR("null pointer params->send_dl_skb\n");
+		return -EINVAL;
+	}
+	if (odu_bridge_ctx) {
+		ODU_BRIDGE_ERR("Already initialized\n");
+		return -EFAULT;
+	}
+	if (!ipa_is_ready()) {
+		ODU_BRIDGE_ERR("IPA is not ready\n");
+		return -EFAULT;
+	}
+
+	ODU_BRIDGE_DBG("device_ethaddr=%pM\n", params->device_ethaddr);
+
+	odu_bridge_ctx = kzalloc(sizeof(*odu_bridge_ctx), GFP_KERNEL);
+	if (!odu_bridge_ctx) {
+		ODU_BRIDGE_ERR("kzalloc err.\n");
+		return -ENOMEM;
+	}
+
+	odu_bridge_ctx->class = class_create(THIS_MODULE, ODU_BRIDGE_DRV_NAME);
+	if (!odu_bridge_ctx->class) {
+		ODU_BRIDGE_ERR("Class_create err.\n");
+		res = -ENODEV;
+		goto fail_class_create;
+	}
+
+	res = alloc_chrdev_region(&odu_bridge_ctx->dev_num, 0, 1,
+				  ODU_BRIDGE_DRV_NAME);
+	if (res) {
+		ODU_BRIDGE_ERR("alloc_chrdev_region err.\n");
+		res = -ENODEV;
+		goto fail_alloc_chrdev_region;
+	}
+
+	odu_bridge_ctx->dev = device_create(odu_bridge_ctx->class, NULL,
+		odu_bridge_ctx->dev_num, odu_bridge_ctx, ODU_BRIDGE_DRV_NAME);
+	if (IS_ERR(odu_bridge_ctx->dev)) {
+		ODU_BRIDGE_ERR(":device_create err.\n");
+		res = -ENODEV;
+		goto fail_device_create;
+	}
+
+	cdev_init(&odu_bridge_ctx->cdev, &odu_bridge_drv_fops);
+	odu_bridge_ctx->cdev.owner = THIS_MODULE;
+	odu_bridge_ctx->cdev.ops = &odu_bridge_drv_fops;
+
+	res = cdev_add(&odu_bridge_ctx->cdev, odu_bridge_ctx->dev_num, 1);
+	if (res) {
+		ODU_BRIDGE_ERR(":cdev_add err=%d\n", -res);
+		res = -ENODEV;
+		goto fail_cdev_add;
+	}
+
+	odu_debugfs_init();
+
+	strlcpy(odu_bridge_ctx->netdev_name, params->netdev_name,
+		IPA_RESOURCE_NAME_MAX);
+	odu_bridge_ctx->priv = params->priv;
+	odu_bridge_ctx->tx_dp_notify = params->tx_dp_notify;
+	odu_bridge_ctx->send_dl_skb = params->send_dl_skb;
+	memcpy(odu_bridge_ctx->device_ethaddr, params->device_ethaddr,
+		ETH_ALEN);
+	odu_bridge_ctx->ipa_sys_desc_size = params->ipa_desc_size;
+	odu_bridge_ctx->mode = ODU_BRIDGE_MODE_ROUTER;
+
+	mutex_init(&odu_bridge_ctx->lock);
+
+	res = odu_bridge_add_hdrs();
+	if (res) {
+		ODU_BRIDGE_ERR("fail on odu_bridge_add_hdr %d\n", res);
+		goto fail_add_hdrs;
+	}
+
+	res = odu_bridge_register_properties();
+	if (res) {
+		ODU_BRIDGE_ERR("fail on register properties %d\n", res);
+		goto fail_register_properties;
+	}
+
+	ODU_BRIDGE_FUNC_EXIT();
+	return 0;
+
+fail_register_properties:
+	odu_bridge_del_hdrs();
+fail_add_hdrs:
+	odu_debugfs_destroy();
+fail_cdev_add:
+	device_destroy(odu_bridge_ctx->class, odu_bridge_ctx->dev_num);
+fail_device_create:
+	unregister_chrdev_region(odu_bridge_ctx->dev_num, 1);
+fail_alloc_chrdev_region:
+	class_destroy(odu_bridge_ctx->class);
+fail_class_create:
+	kfree(odu_bridge_ctx);
+	odu_bridge_ctx = NULL;
+	return res;
+}
+EXPORT_SYMBOL(odu_bridge_init);
+
+/**
+ * odu_bridge_cleanup() - De-Initialize the ODU bridge driver
+ *
+ * Return codes: 0: success,
+ *		-EINVAL - Bad parameter
+ *		Other negative value - Failure
+ */
+int odu_bridge_cleanup(void)
+{
+	ODU_BRIDGE_FUNC_ENTRY();
+
+	if (!odu_bridge_ctx) {
+		ODU_BRIDGE_ERR("Not initialized\n");
+		return -EFAULT;
+	}
+
+	if (odu_bridge_ctx->is_connected) {
+		ODU_BRIDGE_ERR("cannot deinit while bridge is conncetd\n");
+		return -EFAULT;
+	}
+
+	odu_bridge_deregister_properties();
+	odu_bridge_del_hdrs();
+	odu_debugfs_destroy();
+	cdev_del(&odu_bridge_ctx->cdev);
+	device_destroy(odu_bridge_ctx->class, odu_bridge_ctx->dev_num);
+	unregister_chrdev_region(odu_bridge_ctx->dev_num, 1);
+	class_destroy(odu_bridge_ctx->class);
+	ipc_log_context_destroy(odu_bridge_ctx->logbuf);
+	ipc_log_context_destroy(odu_bridge_ctx->logbuf_low);
+	kfree(odu_bridge_ctx);
+	odu_bridge_ctx = NULL;
+
+	ODU_BRIDGE_FUNC_EXIT();
+	return 0;
+}
+EXPORT_SYMBOL(odu_bridge_cleanup);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("ODU bridge driver");

+ 2440 - 0
ipa/ipa_clients/rndis_ipa.c

@@ -0,0 +1,2440 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2013-2019, The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/atomic.h>
+#include <linux/errno.h>
+#include <linux/etherdevice.h>
+#include <linux/if_vlan.h>
+#include <linux/debugfs.h>
+#include <linux/in.h>
+#include <linux/stddef.h>
+#include <linux/ip.h>
+#include <linux/fs.h>
+#include <linux/module.h>
+#include <linux/msm_ipa.h>
+#include <linux/netdevice.h>
+#include <linux/skbuff.h>
+#include <linux/sched.h>
+#include <linux/ipa.h>
+#include <linux/random.h>
+#include <linux/rndis_ipa.h>
+#include <linux/workqueue.h>
+#include "../ipa_common_i.h"
+#include "../ipa_v3/ipa_pm.h"
+
+#define CREATE_TRACE_POINTS
+#include "rndis_ipa_trace.h"
+
+#define DRV_NAME "RNDIS_IPA"
+#define DEBUGFS_DIR_NAME "rndis_ipa"
+#define DEBUGFS_AGGR_DIR_NAME "rndis_ipa_aggregation"
+#define NETDEV_NAME "rndis"
+#define IPV4_HDR_NAME "rndis_eth_ipv4"
+#define IPV6_HDR_NAME "rndis_eth_ipv6"
+#define IPA_TO_USB_CLIENT IPA_CLIENT_USB_CONS
+#define INACTIVITY_MSEC_DELAY 100
+#define DEFAULT_OUTSTANDING_HIGH 64
+#define DEFAULT_OUTSTANDING_LOW 32
+#define DEBUGFS_TEMP_BUF_SIZE 4
+#define RNDIS_IPA_PKT_TYPE 0x00000001
+#define RNDIS_IPA_DFLT_RT_HDL 0
+#define FROM_IPA_TO_USB_BAMDMA 4
+#define FROM_USB_TO_IPA_BAMDMA 5
+#define BAM_DMA_MAX_PKT_NUMBER 10
+#define BAM_DMA_DATA_FIFO_SIZE \
+		(BAM_DMA_MAX_PKT_NUMBER * \
+			(ETH_FRAME_LEN + sizeof(struct rndis_pkt_hdr)))
+#define BAM_DMA_DESC_FIFO_SIZE \
+		(BAM_DMA_MAX_PKT_NUMBER * (sizeof(struct sps_iovec)))
+#define TX_TIMEOUT (5 * HZ)
+#define MIN_TX_ERROR_SLEEP_PERIOD 500
+#define DEFAULT_AGGR_TIME_LIMIT 1000 /* 1ms */
+#define DEFAULT_AGGR_PKT_LIMIT 0
+
+#define IPA_RNDIS_IPC_LOG_PAGES 50
+
+#define IPA_RNDIS_IPC_LOGGING(buf, fmt, args...) \
+	do { \
+		if (buf) \
+			ipc_log_string((buf), fmt, __func__, __LINE__, \
+				## args); \
+	} while (0)
+
+static void *ipa_rndis_logbuf;
+
+#define RNDIS_IPA_DEBUG(fmt, args...) \
+	do { \
+		pr_debug(DRV_NAME " %s:%d " fmt, __func__, __LINE__, ## args);\
+		if (ipa_rndis_logbuf) { \
+			IPA_RNDIS_IPC_LOGGING(ipa_rndis_logbuf, \
+				DRV_NAME " %s:%d " fmt, ## args); \
+		} \
+	} while (0)
+
+#define RNDIS_IPA_DEBUG_XMIT(fmt, args...) \
+	pr_debug(DRV_NAME " %s:%d " fmt, __func__, __LINE__, ## args)
+
+#define RNDIS_IPA_ERROR(fmt, args...) \
+	do { \
+		pr_err(DRV_NAME "@%s@%d@ctx:%s: "\
+			fmt, __func__, __LINE__, current->comm, ## args);\
+		if (ipa_rndis_logbuf) { \
+			IPA_RNDIS_IPC_LOGGING(ipa_rndis_logbuf, \
+				DRV_NAME " %s:%d " fmt, ## args); \
+		} \
+	} while (0)
+
+#define NULL_CHECK_RETVAL(ptr) \
+		do { \
+			if (!(ptr)) { \
+				RNDIS_IPA_ERROR("null pointer #ptr\n"); \
+				ret = -EINVAL; \
+			} \
+		} \
+		while (0)
+
+#define RNDIS_HDR_OFST(field) offsetof(struct rndis_pkt_hdr, field)
+#define RNDIS_IPA_LOG_ENTRY() RNDIS_IPA_DEBUG("begin\n")
+#define RNDIS_IPA_LOG_EXIT()  RNDIS_IPA_DEBUG("end\n")
+
+/**
+ * enum rndis_ipa_state - specify the current driver internal state
+ *  which is guarded by a state machine.
+ *
+ * The driver internal state changes due to its external API usage.
+ * The driver saves its internal state to guard from caller illegal
+ * call sequence.
+ * states:
+ * UNLOADED is the first state which is the default one and is also the state
+ *  after the driver gets unloaded(cleanup).
+ * INITIALIZED is the driver state once it finished registering
+ *  the network device and all internal data struct were initialized
+ * CONNECTED is the driver state once the USB pipes were connected to IPA
+ * UP is the driver state after the interface mode was set to UP but the
+ *  pipes are not connected yet - this state is meta-stable state.
+ * CONNECTED_AND_UP is the driver state when the pipe were connected and
+ *  the interface got UP request from the network stack. this is the driver
+ *   idle operation state which allows it to transmit/receive data.
+ * INVALID is a state which is not allowed.
+ */
+enum rndis_ipa_state {
+	RNDIS_IPA_UNLOADED          = 0,
+	RNDIS_IPA_INITIALIZED       = 1,
+	RNDIS_IPA_CONNECTED         = 2,
+	RNDIS_IPA_UP                = 3,
+	RNDIS_IPA_CONNECTED_AND_UP  = 4,
+	RNDIS_IPA_INVALID           = 5,
+};
+
+/**
+ * enum rndis_ipa_operation - enumerations used to describe the API operation
+ *
+ * Those enums are used as input for the driver state machine.
+ */
+enum rndis_ipa_operation {
+	RNDIS_IPA_INITIALIZE,
+	RNDIS_IPA_CONNECT,
+	RNDIS_IPA_OPEN,
+	RNDIS_IPA_STOP,
+	RNDIS_IPA_DISCONNECT,
+	RNDIS_IPA_CLEANUP,
+};
+
+#define RNDIS_IPA_STATE_DEBUG(ctx) \
+	RNDIS_IPA_DEBUG("Driver state: %s\n",\
+	rndis_ipa_state_string((ctx)->state))
+
+
+/**
+ * struct rndis_ipa_dev - main driver context parameters
+ *
+ * @net: network interface struct implemented by this driver
+ * @directory: debugfs directory for various debugging switches
+ * @tx_filter: flag that enable/disable Tx path to continue to IPA
+ * @tx_dropped: number of filtered out Tx packets
+ * @tx_dump_enable: dump all Tx packets
+ * @rx_filter: flag that enable/disable Rx path to continue to IPA
+ * @rx_dropped: number of filtered out Rx packets
+ * @rx_dump_enable: dump all Rx packets
+ * @icmp_filter: allow all ICMP packet to pass through the filters
+ * @deaggregation_enable: enable/disable IPA HW deaggregation logic
+ * @during_xmit_error: flags that indicate that the driver is in a middle
+ *  of error handling in Tx path
+ * @directory: holds all debug flags used by the driver to allow cleanup
+ *  for driver unload
+ * @eth_ipv4_hdr_hdl: saved handle for ipv4 header-insertion table
+ * @eth_ipv6_hdr_hdl: saved handle for ipv6 header-insertion table
+ * @usb_to_ipa_hdl: save handle for IPA pipe operations
+ * @ipa_to_usb_hdl: save handle for IPA pipe operations
+ * @outstanding_pkts: number of packets sent to IPA without TX complete ACKed
+ * @outstanding_high: number of outstanding packets allowed
+ * @outstanding_low: number of outstanding packets which shall cause
+ *  to netdev queue start (after stopped due to outstanding_high reached)
+ * @error_msec_sleep_time: number of msec for sleeping in case of Tx error
+ * @state: current state of the driver
+ * @host_ethaddr: holds the tethered PC ethernet address
+ * @device_ethaddr: holds the device ethernet address
+ * @device_ready_notify: callback supplied by USB core driver
+ * This callback shall be called by the Netdev once the Netdev internal
+ * state is changed to RNDIS_IPA_CONNECTED_AND_UP
+ * @xmit_error_delayed_work: work item for cases where IPA driver Tx fails
+ * @state_lock: used to protect the state variable.
+ * @pm_hdl: handle for IPA PM framework
+ * @is_vlan_mode: should driver work in vlan mode?
+ * @netif_rx_function: holds the correct network stack API, needed for NAPI
+ */
+struct rndis_ipa_dev {
+	struct net_device *net;
+	bool tx_filter;
+	u32 tx_dropped;
+	bool tx_dump_enable;
+	bool rx_filter;
+	u32 rx_dropped;
+	bool rx_dump_enable;
+	bool icmp_filter;
+	bool deaggregation_enable;
+	bool during_xmit_error;
+	struct dentry *directory;
+	u32 eth_ipv4_hdr_hdl;
+	u32 eth_ipv6_hdr_hdl;
+	u32 usb_to_ipa_hdl;
+	u32 ipa_to_usb_hdl;
+	atomic_t outstanding_pkts;
+	u32 outstanding_high;
+	u32 outstanding_low;
+	u32 error_msec_sleep_time;
+	enum rndis_ipa_state state;
+	u8 host_ethaddr[ETH_ALEN];
+	u8 device_ethaddr[ETH_ALEN];
+	void (*device_ready_notify)(void);
+	struct delayed_work xmit_error_delayed_work;
+	spinlock_t state_lock; /* Spinlock for the state variable.*/
+	u32 pm_hdl;
+	bool is_vlan_mode;
+	int (*netif_rx_function)(struct sk_buff *skb);
+};
+
+/**
+ * rndis_pkt_hdr - RNDIS_IPA representation of REMOTE_NDIS_PACKET_MSG
+ * @msg_type: for REMOTE_NDIS_PACKET_MSG this value should be 1
+ * @msg_len:  total message length in bytes, including RNDIS header an payload
+ * @data_ofst: offset in bytes from start of the data_ofst to payload
+ * @data_len: payload size in bytes
+ * @zeroes: OOB place holder - not used for RNDIS_IPA.
+ */
+struct rndis_pkt_hdr {
+	__le32	msg_type;
+	__le32	msg_len;
+	__le32	data_ofst;
+	__le32	data_len;
+	__le32  zeroes[7];
+} __packed__;
+
+static int rndis_ipa_open(struct net_device *net);
+static void rndis_ipa_packet_receive_notify
+	(void *private, enum ipa_dp_evt_type evt, unsigned long data);
+static void rndis_ipa_tx_complete_notify
+	(void *private, enum ipa_dp_evt_type evt, unsigned long data);
+static void rndis_ipa_tx_timeout(struct net_device *net);
+static int rndis_ipa_stop(struct net_device *net);
+static void rndis_ipa_enable_data_path(struct rndis_ipa_dev *rndis_ipa_ctx);
+static struct sk_buff *rndis_encapsulate_skb(struct sk_buff *skb,
+	struct rndis_ipa_dev *rndis_ipa_ctx);
+static void rndis_ipa_xmit_error(struct sk_buff *skb);
+static void rndis_ipa_xmit_error_aftercare_wq(struct work_struct *work);
+static void rndis_ipa_prepare_header_insertion
+	(int eth_type,
+	const char *hdr_name, struct ipa_hdr_add *add_hdr,
+	const void *dst_mac, const void *src_mac, bool is_vlan_mode);
+static int rndis_ipa_hdrs_cfg
+	(struct rndis_ipa_dev *rndis_ipa_ctx,
+	const void *dst_mac, const void *src_mac);
+static int rndis_ipa_hdrs_destroy(struct rndis_ipa_dev *rndis_ipa_ctx);
+static struct net_device_stats *rndis_ipa_get_stats(struct net_device *net);
+static int rndis_ipa_register_properties(char *netdev_name, bool is_vlan_mode);
+static int rndis_ipa_deregister_properties(char *netdev_name);
+static int rndis_ipa_register_pm_client(struct rndis_ipa_dev *rndis_ipa_ctx);
+static int rndis_ipa_deregister_pm_client(struct rndis_ipa_dev *rndis_ipa_ctx);
+static bool rx_filter(struct sk_buff *skb);
+static bool tx_filter(struct sk_buff *skb);
+static netdev_tx_t rndis_ipa_start_xmit
+	(struct sk_buff *skb, struct net_device *net);
+static int rndis_ipa_debugfs_atomic_open
+	(struct inode *inode, struct file *file);
+static int rndis_ipa_debugfs_aggr_open
+	(struct inode *inode, struct file *file);
+static ssize_t rndis_ipa_debugfs_aggr_write
+	(struct file *file,
+	const char __user *buf, size_t count, loff_t *ppos);
+static ssize_t rndis_ipa_debugfs_atomic_read
+	(struct file *file,
+	char __user *ubuf, size_t count, loff_t *ppos);
+static void rndis_ipa_dump_skb(struct sk_buff *skb);
+static void rndis_ipa_debugfs_init(struct rndis_ipa_dev *rndis_ipa_ctx);
+static void rndis_ipa_debugfs_destroy(struct rndis_ipa_dev *rndis_ipa_ctx);
+static int rndis_ipa_ep_registers_cfg
+	(u32 usb_to_ipa_hdl,
+	u32 ipa_to_usb_hdl, u32 max_xfer_size_bytes_to_dev,
+	u32 max_xfer_size_bytes_to_host, u32 mtu,
+	bool deaggr_enable,
+	bool is_vlan_mode);
+static int rndis_ipa_set_device_ethernet_addr
+	(u8 *dev_ethaddr,
+	u8 device_ethaddr[]);
+static enum rndis_ipa_state rndis_ipa_next_state
+	(enum rndis_ipa_state current_state,
+	enum rndis_ipa_operation operation);
+static const char *rndis_ipa_state_string(enum rndis_ipa_state state);
+
+static struct rndis_ipa_dev *rndis_ipa;
+
+static const struct net_device_ops rndis_ipa_netdev_ops = {
+	.ndo_open		= rndis_ipa_open,
+	.ndo_stop		= rndis_ipa_stop,
+	.ndo_start_xmit = rndis_ipa_start_xmit,
+	.ndo_tx_timeout = rndis_ipa_tx_timeout,
+	.ndo_get_stats = rndis_ipa_get_stats,
+	.ndo_set_mac_address = eth_mac_addr,
+};
+
+static const struct file_operations rndis_ipa_debugfs_atomic_ops = {
+	.open = rndis_ipa_debugfs_atomic_open,
+	.read = rndis_ipa_debugfs_atomic_read,
+};
+
+static const struct file_operations rndis_ipa_aggr_ops = {
+		.open = rndis_ipa_debugfs_aggr_open,
+		.write = rndis_ipa_debugfs_aggr_write,
+};
+
+static struct ipa_ep_cfg ipa_to_usb_ep_cfg = {
+	.mode = {
+		.mode = IPA_BASIC,
+		.dst  = IPA_CLIENT_APPS_LAN_CONS,
+	},
+	.hdr = {
+		.hdr_len = ETH_HLEN + sizeof(struct rndis_pkt_hdr),
+		.hdr_ofst_metadata_valid = false,
+		.hdr_ofst_metadata = 0,
+		.hdr_additional_const_len = ETH_HLEN,
+		.hdr_ofst_pkt_size_valid = true,
+		.hdr_ofst_pkt_size = 3 * sizeof(u32),
+		.hdr_a5_mux = false,
+		.hdr_remove_additional = false,
+		.hdr_metadata_reg_valid = false,
+	},
+	.hdr_ext = {
+		.hdr_pad_to_alignment = 0,
+		.hdr_total_len_or_pad_offset = 1 * sizeof(u32),
+		.hdr_payload_len_inc_padding = false,
+		.hdr_total_len_or_pad = IPA_HDR_TOTAL_LEN,
+		.hdr_total_len_or_pad_valid = true,
+		.hdr_little_endian = true,
+	},
+	.aggr = {
+		.aggr_en = IPA_ENABLE_AGGR,
+		.aggr = IPA_GENERIC,
+		.aggr_byte_limit = 4,
+		.aggr_time_limit = DEFAULT_AGGR_TIME_LIMIT,
+		.aggr_pkt_limit = DEFAULT_AGGR_PKT_LIMIT,
+	},
+	.deaggr = {
+		.deaggr_hdr_len = 0,
+		.packet_offset_valid = 0,
+		.packet_offset_location = 0,
+		.max_packet_len = 0,
+	},
+	.route = {
+		.rt_tbl_hdl = RNDIS_IPA_DFLT_RT_HDL,
+	},
+	.nat = {
+		.nat_en = IPA_SRC_NAT,
+	},
+};
+
+static struct ipa_ep_cfg usb_to_ipa_ep_cfg_deaggr_dis = {
+	.mode = {
+		.mode = IPA_BASIC,
+		.dst  = IPA_CLIENT_APPS_LAN_CONS,
+	},
+	.hdr = {
+		.hdr_len = ETH_HLEN + sizeof(struct rndis_pkt_hdr),
+		.hdr_ofst_metadata_valid = false,
+		.hdr_ofst_metadata = 0,
+		.hdr_additional_const_len = 0,
+		.hdr_ofst_pkt_size_valid = true,
+		.hdr_ofst_pkt_size = 3 * sizeof(u32) +
+			sizeof(struct rndis_pkt_hdr),
+		.hdr_a5_mux = false,
+		.hdr_remove_additional = false,
+		.hdr_metadata_reg_valid = true,
+	},
+	.hdr_ext = {
+		.hdr_pad_to_alignment = 0,
+		.hdr_total_len_or_pad_offset = 1 * sizeof(u32),
+		.hdr_payload_len_inc_padding = false,
+		.hdr_total_len_or_pad = IPA_HDR_TOTAL_LEN,
+		.hdr_total_len_or_pad_valid = true,
+		.hdr_little_endian = true,
+	},
+
+	.aggr = {
+		.aggr_en = IPA_BYPASS_AGGR,
+		.aggr = 0,
+		.aggr_byte_limit = 0,
+		.aggr_time_limit = 0,
+		.aggr_pkt_limit  = 0,
+	},
+	.deaggr = {
+		.deaggr_hdr_len = 0,
+		.packet_offset_valid = false,
+		.packet_offset_location = 0,
+		.max_packet_len = 0,
+	},
+
+	.route = {
+		.rt_tbl_hdl = RNDIS_IPA_DFLT_RT_HDL,
+	},
+	.nat = {
+		.nat_en = IPA_BYPASS_NAT,
+	},
+};
+
+static struct ipa_ep_cfg usb_to_ipa_ep_cfg_deaggr_en = {
+	.mode = {
+		.mode = IPA_BASIC,
+		.dst  = IPA_CLIENT_APPS_LAN_CONS,
+	},
+	.hdr = {
+		.hdr_len = ETH_HLEN,
+		.hdr_ofst_metadata_valid = false,
+		.hdr_ofst_metadata = 0,
+		.hdr_additional_const_len = 0,
+		.hdr_ofst_pkt_size_valid = true,
+		.hdr_ofst_pkt_size = 3 * sizeof(u32),
+		.hdr_a5_mux = false,
+		.hdr_remove_additional = false,
+		.hdr_metadata_reg_valid = true,
+	},
+	.hdr_ext = {
+		.hdr_pad_to_alignment = 0,
+		.hdr_total_len_or_pad_offset = 1 * sizeof(u32),
+		.hdr_payload_len_inc_padding = false,
+		.hdr_total_len_or_pad = IPA_HDR_TOTAL_LEN,
+		.hdr_total_len_or_pad_valid = true,
+		.hdr_little_endian = true,
+	},
+	.aggr = {
+		.aggr_en = IPA_ENABLE_DEAGGR,
+		.aggr = IPA_GENERIC,
+		.aggr_byte_limit = 0,
+		.aggr_time_limit = 0,
+		.aggr_pkt_limit  = 0,
+	},
+	.deaggr = {
+		.deaggr_hdr_len = sizeof(struct rndis_pkt_hdr),
+		.packet_offset_valid = true,
+		.packet_offset_location = 8,
+		.max_packet_len = 8192, /* Will be overridden*/
+	},
+	.route = {
+		.rt_tbl_hdl = RNDIS_IPA_DFLT_RT_HDL,
+	},
+	.nat = {
+		.nat_en = IPA_BYPASS_NAT,
+	},
+};
+
+/**
+ * rndis_template_hdr - RNDIS template structure for RNDIS_IPA SW insertion
+ * @msg_type: set for REMOTE_NDIS_PACKET_MSG (0x00000001)
+ *  this value will be used for all data packets
+ * @msg_len:  will add the skb length to get final size
+ * @data_ofst: this field value will not be changed
+ * @data_len: set as skb length to get final size
+ * @zeroes: make sure all OOB data is not used
+ */
+static struct rndis_pkt_hdr rndis_template_hdr = {
+	.msg_type = RNDIS_IPA_PKT_TYPE,
+	.msg_len = sizeof(struct rndis_pkt_hdr),
+	.data_ofst = sizeof(struct rndis_pkt_hdr) - RNDIS_HDR_OFST(data_ofst),
+	.data_len = 0,
+	.zeroes = {0},
+};
+
+static void rndis_ipa_msg_free_cb(void *buff, u32 len, u32 type)
+{
+	kfree(buff);
+}
+
+/**
+ * rndis_ipa_init() - create network device and initialize internal
+ *  data structures
+ * @params: in/out parameters required for initialization,
+ *  see "struct ipa_usb_init_params" for more details
+ *
+ * Shall be called prior to pipe connection.
+ * Detailed description:
+ *  - allocate the network device
+ *  - set default values for driver internal switches and stash them inside
+ *     the netdev private field
+ *  - set needed headroom for RNDIS header
+ *  - create debugfs folder and files
+ *  - create IPA resource manager client
+ *  - set the ethernet address for the netdev to be added on SW Tx path
+ *  - add header insertion rules for IPA driver (based on host/device Ethernet
+ *     addresses given in input params and on RNDIS data template struct)
+ *  - register tx/rx properties to IPA driver (will be later used
+ *    by IPA configuration manager to configure rest of the IPA rules)
+ *  - set the carrier state to "off" (until connect is called)
+ *  - register the network device
+ *  - set the out parameters
+ *  - change driver internal state to INITIALIZED
+ *
+ * Returns negative errno, or zero on success
+ */
+int rndis_ipa_init(struct ipa_usb_init_params *params)
+{
+	int result = 0;
+	struct net_device *net;
+	struct rndis_ipa_dev *rndis_ipa_ctx;
+	int ret;
+
+	RNDIS_IPA_LOG_ENTRY();
+	RNDIS_IPA_DEBUG("%s initializing\n", DRV_NAME);
+	ret = 0;
+	NULL_CHECK_RETVAL(params);
+	if (ret)
+		return ret;
+
+	RNDIS_IPA_DEBUG
+		("host_ethaddr=%pM, device_ethaddr=%pM\n",
+		params->host_ethaddr,
+		params->device_ethaddr);
+
+	net = alloc_etherdev(sizeof(struct rndis_ipa_dev));
+	if (!net) {
+		result = -ENOMEM;
+		RNDIS_IPA_ERROR("fail to allocate Ethernet device\n");
+		goto fail_alloc_etherdev;
+	}
+	RNDIS_IPA_DEBUG("network device was successfully allocated\n");
+
+	rndis_ipa_ctx = netdev_priv(net);
+	if (!rndis_ipa_ctx) {
+		result = -ENOMEM;
+		RNDIS_IPA_ERROR("fail to extract netdev priv\n");
+		goto fail_netdev_priv;
+	}
+	memset(rndis_ipa_ctx, 0, sizeof(*rndis_ipa_ctx));
+	RNDIS_IPA_DEBUG("rndis_ipa_ctx (private)=%pK\n", rndis_ipa_ctx);
+
+	spin_lock_init(&rndis_ipa_ctx->state_lock);
+
+	rndis_ipa_ctx->net = net;
+	rndis_ipa_ctx->tx_filter = false;
+	rndis_ipa_ctx->rx_filter = false;
+	rndis_ipa_ctx->icmp_filter = true;
+	rndis_ipa_ctx->tx_dropped = 0;
+	rndis_ipa_ctx->rx_dropped = 0;
+	rndis_ipa_ctx->tx_dump_enable = false;
+	rndis_ipa_ctx->rx_dump_enable = false;
+	rndis_ipa_ctx->deaggregation_enable = false;
+	rndis_ipa_ctx->outstanding_high = DEFAULT_OUTSTANDING_HIGH;
+	rndis_ipa_ctx->outstanding_low = DEFAULT_OUTSTANDING_LOW;
+	atomic_set(&rndis_ipa_ctx->outstanding_pkts, 0);
+	memcpy
+		(rndis_ipa_ctx->device_ethaddr, params->device_ethaddr,
+		sizeof(rndis_ipa_ctx->device_ethaddr));
+	memcpy
+		(rndis_ipa_ctx->host_ethaddr, params->host_ethaddr,
+		sizeof(rndis_ipa_ctx->host_ethaddr));
+	INIT_DELAYED_WORK
+		(&rndis_ipa_ctx->xmit_error_delayed_work,
+		rndis_ipa_xmit_error_aftercare_wq);
+	rndis_ipa_ctx->error_msec_sleep_time =
+		MIN_TX_ERROR_SLEEP_PERIOD;
+	RNDIS_IPA_DEBUG("internal data structures were set\n");
+
+	if (!params->device_ready_notify)
+		RNDIS_IPA_DEBUG("device_ready_notify() was not supplied\n");
+	rndis_ipa_ctx->device_ready_notify = params->device_ready_notify;
+
+	snprintf(net->name, sizeof(net->name), "%s%%d", NETDEV_NAME);
+	RNDIS_IPA_DEBUG
+		("Setting network interface driver name to: %s\n",
+		net->name);
+
+	net->netdev_ops = &rndis_ipa_netdev_ops;
+	net->watchdog_timeo = TX_TIMEOUT;
+
+	net->needed_headroom = sizeof(rndis_template_hdr);
+	RNDIS_IPA_DEBUG
+		("Needed headroom for RNDIS header set to %d\n",
+		net->needed_headroom);
+
+	rndis_ipa_debugfs_init(rndis_ipa_ctx);
+
+	result = rndis_ipa_set_device_ethernet_addr
+		(net->dev_addr, rndis_ipa_ctx->device_ethaddr);
+	if (result) {
+		RNDIS_IPA_ERROR("set device MAC failed\n");
+		goto fail_set_device_ethernet;
+	}
+	RNDIS_IPA_DEBUG("Device Ethernet address set %pM\n", net->dev_addr);
+
+	if (ipa_is_vlan_mode(IPA_VLAN_IF_RNDIS,
+		&rndis_ipa_ctx->is_vlan_mode)) {
+		RNDIS_IPA_ERROR("couldn't acquire vlan mode, is ipa ready?\n");
+		goto fail_get_vlan_mode;
+	}
+
+	RNDIS_IPA_DEBUG("is_vlan_mode %d\n", rndis_ipa_ctx->is_vlan_mode);
+
+	result = rndis_ipa_hdrs_cfg
+			(rndis_ipa_ctx,
+			params->host_ethaddr,
+			params->device_ethaddr);
+	if (result) {
+		RNDIS_IPA_ERROR("fail on ipa hdrs set\n");
+		goto fail_hdrs_cfg;
+	}
+	RNDIS_IPA_DEBUG("IPA header-insertion configed for Ethernet+RNDIS\n");
+
+	result = rndis_ipa_register_properties(net->name,
+		rndis_ipa_ctx->is_vlan_mode);
+	if (result) {
+		RNDIS_IPA_ERROR("fail on properties set\n");
+		goto fail_register_tx;
+	}
+	RNDIS_IPA_DEBUG("2 TX and 2 RX properties were registered\n");
+
+	netif_carrier_off(net);
+	RNDIS_IPA_DEBUG("set carrier off until pipes are connected\n");
+
+	result = register_netdev(net);
+	if (result) {
+		RNDIS_IPA_ERROR("register_netdev failed: %d\n", result);
+		goto fail_register_netdev;
+	}
+	RNDIS_IPA_DEBUG
+		("netdev:%s registration succeeded, index=%d\n",
+		net->name, net->ifindex);
+
+	if (ipa_get_lan_rx_napi()) {
+		rndis_ipa_ctx->netif_rx_function = netif_receive_skb;
+		RNDIS_IPA_DEBUG("LAN RX NAPI enabled = True");
+	} else {
+		rndis_ipa_ctx->netif_rx_function = netif_rx_ni;
+		RNDIS_IPA_DEBUG("LAN RX NAPI enabled = False");
+	}
+
+	rndis_ipa = rndis_ipa_ctx;
+	params->ipa_rx_notify = rndis_ipa_packet_receive_notify;
+	params->ipa_tx_notify = rndis_ipa_tx_complete_notify;
+	params->private = rndis_ipa_ctx;
+	params->skip_ep_cfg = false;
+	rndis_ipa_ctx->state = RNDIS_IPA_INITIALIZED;
+	RNDIS_IPA_STATE_DEBUG(rndis_ipa_ctx);
+	pr_info("RNDIS_IPA NetDev was initialized\n");
+
+	RNDIS_IPA_LOG_EXIT();
+
+	return 0;
+
+fail_register_netdev:
+	rndis_ipa_deregister_properties(net->name);
+fail_register_tx:
+	rndis_ipa_hdrs_destroy(rndis_ipa_ctx);
+fail_hdrs_cfg:
+fail_get_vlan_mode:
+fail_set_device_ethernet:
+	rndis_ipa_debugfs_destroy(rndis_ipa_ctx);
+fail_netdev_priv:
+	free_netdev(net);
+fail_alloc_etherdev:
+	return result;
+}
+EXPORT_SYMBOL(rndis_ipa_init);
+
+/**
+ * rndis_ipa_pipe_connect_notify() - notify rndis_ipa Netdev that the USB pipes
+ *  were connected
+ * @usb_to_ipa_hdl: handle from IPA driver client for USB->IPA
+ * @ipa_to_usb_hdl: handle from IPA driver client for IPA->USB
+ * @private: same value that was set by init(), this parameter holds the
+ *  network device pointer.
+ * @max_transfer_byte_size: RNDIS protocol specific, the maximum size that
+ *  the host expect
+ * @max_packet_number: RNDIS protocol specific, the maximum packet number
+ *  that the host expects
+ *
+ * Once USB driver finishes the pipe connection between IPA core
+ * and USB core this method shall be called in order to
+ * allow the driver to complete the data path configurations.
+ * Detailed description:
+ *  - configure the IPA end-points register
+ *  - notify the Linux kernel for "carrier_on"
+ *  - change the driver internal state
+ *
+ *  After this function is done the driver state changes to "Connected"  or
+ *  Connected and Up.
+ *  This API is expected to be called after initialization() or
+ *  after a call to disconnect().
+ *
+ * Returns negative errno, or zero on success
+ */
+int rndis_ipa_pipe_connect_notify(
+	u32 usb_to_ipa_hdl,
+	u32 ipa_to_usb_hdl,
+	u32 max_xfer_size_bytes_to_dev,
+	u32 max_packet_number_to_dev,
+	u32 max_xfer_size_bytes_to_host,
+	void *private)
+{
+	struct rndis_ipa_dev *rndis_ipa_ctx = private;
+	int next_state;
+	int result;
+	int ret;
+	unsigned long flags;
+	struct ipa_ecm_msg *rndis_msg;
+	struct ipa_msg_meta msg_meta;
+
+	RNDIS_IPA_LOG_ENTRY();
+
+	ret = 0;
+	NULL_CHECK_RETVAL(private);
+
+	if (ret)
+		return ret;
+
+	RNDIS_IPA_DEBUG
+		("usb_to_ipa_hdl=%d, ipa_to_usb_hdl=%d, private=0x%pK\n",
+		usb_to_ipa_hdl, ipa_to_usb_hdl, private);
+	RNDIS_IPA_DEBUG
+		("max_xfer_sz_to_dev=%d, max_pkt_num_to_dev=%d\n",
+		max_xfer_size_bytes_to_dev,
+		max_packet_number_to_dev);
+	RNDIS_IPA_DEBUG
+		("max_xfer_sz_to_host=%d\n",
+		max_xfer_size_bytes_to_host);
+
+	spin_lock_irqsave(&rndis_ipa_ctx->state_lock, flags);
+	next_state = rndis_ipa_next_state
+		(rndis_ipa_ctx->state,
+		RNDIS_IPA_CONNECT);
+	if (next_state == RNDIS_IPA_INVALID) {
+		spin_unlock_irqrestore(&rndis_ipa_ctx->state_lock, flags);
+		RNDIS_IPA_ERROR("use init()/disconnect() before connect()\n");
+		return -EPERM;
+	}
+	spin_unlock_irqrestore(&rndis_ipa_ctx->state_lock, flags);
+
+	if (usb_to_ipa_hdl >= IPA_CLIENT_MAX) {
+		RNDIS_IPA_ERROR
+			("usb_to_ipa_hdl(%d) - not valid ipa handle\n",
+			usb_to_ipa_hdl);
+		return -EINVAL;
+	}
+	if (ipa_to_usb_hdl >= IPA_CLIENT_MAX) {
+		RNDIS_IPA_ERROR
+			("ipa_to_usb_hdl(%d) - not valid ipa handle\n",
+			ipa_to_usb_hdl);
+		return -EINVAL;
+	}
+
+	result = rndis_ipa_register_pm_client(rndis_ipa_ctx);
+	if (result) {
+		RNDIS_IPA_ERROR("fail on PM register\n");
+		goto fail_register_pm;
+	}
+	RNDIS_IPA_DEBUG("PM client was registered\n");
+
+	rndis_ipa_ctx->ipa_to_usb_hdl = ipa_to_usb_hdl;
+	rndis_ipa_ctx->usb_to_ipa_hdl = usb_to_ipa_hdl;
+	if (max_packet_number_to_dev > 1)
+		rndis_ipa_ctx->deaggregation_enable = true;
+	else
+		rndis_ipa_ctx->deaggregation_enable = false;
+	result = rndis_ipa_ep_registers_cfg
+		(usb_to_ipa_hdl,
+		ipa_to_usb_hdl,
+		max_xfer_size_bytes_to_dev,
+		max_xfer_size_bytes_to_host,
+		rndis_ipa_ctx->net->mtu,
+		rndis_ipa_ctx->deaggregation_enable,
+		rndis_ipa_ctx->is_vlan_mode);
+	if (result) {
+		RNDIS_IPA_ERROR("fail on ep cfg\n");
+		goto fail;
+	}
+	RNDIS_IPA_DEBUG("end-points configured\n");
+
+	netif_stop_queue(rndis_ipa_ctx->net);
+	RNDIS_IPA_DEBUG("netif_stop_queue() was called\n");
+
+	netif_carrier_on(rndis_ipa_ctx->net);
+	if (!netif_carrier_ok(rndis_ipa_ctx->net)) {
+		RNDIS_IPA_ERROR("netif_carrier_ok error\n");
+		result = -EBUSY;
+		goto fail;
+	}
+	RNDIS_IPA_DEBUG("netif_carrier_on() was called\n");
+
+	rndis_msg = kzalloc(sizeof(*rndis_msg), GFP_KERNEL);
+	if (!rndis_msg) {
+		result = -ENOMEM;
+		goto fail;
+	}
+
+	memset(&msg_meta, 0, sizeof(struct ipa_msg_meta));
+	msg_meta.msg_type = ECM_CONNECT;
+	msg_meta.msg_len = sizeof(struct ipa_ecm_msg);
+	strlcpy(rndis_msg->name, rndis_ipa_ctx->net->name,
+		IPA_RESOURCE_NAME_MAX);
+	rndis_msg->ifindex = rndis_ipa_ctx->net->ifindex;
+
+	result = ipa_send_msg(&msg_meta, rndis_msg, rndis_ipa_msg_free_cb);
+	if (result) {
+		RNDIS_IPA_ERROR("fail to send ECM_CONNECT for rndis\n");
+		kfree(rndis_msg);
+		goto fail;
+	}
+
+	spin_lock_irqsave(&rndis_ipa_ctx->state_lock, flags);
+	next_state = rndis_ipa_next_state(rndis_ipa_ctx->state,
+					  RNDIS_IPA_CONNECT);
+	if (next_state == RNDIS_IPA_INVALID) {
+		spin_unlock_irqrestore(&rndis_ipa_ctx->state_lock, flags);
+		RNDIS_IPA_ERROR("use init()/disconnect() before connect()\n");
+		return -EPERM;
+	}
+	rndis_ipa_ctx->state = next_state;
+	spin_unlock_irqrestore(&rndis_ipa_ctx->state_lock, flags);
+
+	RNDIS_IPA_STATE_DEBUG(rndis_ipa_ctx);
+
+	if (next_state == RNDIS_IPA_CONNECTED_AND_UP)
+		rndis_ipa_enable_data_path(rndis_ipa_ctx);
+	else
+		RNDIS_IPA_DEBUG("queue shall be started after open()\n");
+
+	pr_info("RNDIS_IPA NetDev pipes were connected\n");
+
+	RNDIS_IPA_LOG_EXIT();
+
+	return 0;
+
+fail:
+	rndis_ipa_deregister_pm_client(rndis_ipa_ctx);
+fail_register_pm:
+	return result;
+}
+EXPORT_SYMBOL(rndis_ipa_pipe_connect_notify);
+
+/**
+ * rndis_ipa_open() - notify Linux network stack to start sending packets
+ * @net: the network interface supplied by the network stack
+ *
+ * Linux uses this API to notify the driver that the network interface
+ * transitions to the up state.
+ * The driver will instruct the Linux network stack to start
+ * delivering data packets.
+ * The driver internal state shall be changed to Up or Connected and Up
+ *
+ * Returns negative errno, or zero on success
+ */
+static int rndis_ipa_open(struct net_device *net)
+{
+	struct rndis_ipa_dev *rndis_ipa_ctx;
+	int next_state;
+	unsigned long flags;
+
+	RNDIS_IPA_LOG_ENTRY();
+
+	rndis_ipa_ctx = netdev_priv(net);
+
+	spin_lock_irqsave(&rndis_ipa_ctx->state_lock, flags);
+
+	next_state = rndis_ipa_next_state(rndis_ipa_ctx->state, RNDIS_IPA_OPEN);
+	if (next_state == RNDIS_IPA_INVALID) {
+		spin_unlock_irqrestore(&rndis_ipa_ctx->state_lock, flags);
+		RNDIS_IPA_ERROR("can't bring driver up before initialize\n");
+		return -EPERM;
+	}
+
+	rndis_ipa_ctx->state = next_state;
+
+	spin_unlock_irqrestore(&rndis_ipa_ctx->state_lock, flags);
+
+	RNDIS_IPA_STATE_DEBUG(rndis_ipa_ctx);
+
+	if (next_state == RNDIS_IPA_CONNECTED_AND_UP)
+		rndis_ipa_enable_data_path(rndis_ipa_ctx);
+	else
+		RNDIS_IPA_DEBUG("queue shall be started after connect()\n");
+
+	pr_info("RNDIS_IPA NetDev was opened\n");
+
+	RNDIS_IPA_LOG_EXIT();
+
+	return 0;
+}
+
+/**
+ * rndis_ipa_start_xmit() - send data from APPs to USB core via IPA core
+ *  using SW path (Tx data path)
+ * Tx path for this Netdev is Apps-processor->IPA->USB
+ * @skb: packet received from Linux network stack destined for tethered PC
+ * @net: the network device being used to send this packet (rndis0)
+ *
+ * Several conditions needed in order to send the packet to IPA:
+ * - Transmit queue for the network driver is currently
+ *   in "started" state
+ * - The driver internal state is in Connected and Up state.
+ * - Filters Tx switch are turned off
+ * - The IPA resource manager state for the driver producer client
+ *   is "Granted" which implies that all the resources in the dependency
+ *   graph are valid for data flow.
+ * - outstanding high boundary was not reached.
+ *
+ * In case the outstanding packets high boundary is reached, the driver will
+ * stop the send queue until enough packets are processed by
+ * the IPA core (based on calls to rndis_ipa_tx_complete_notify).
+ *
+ * In case all of the conditions are met, the network driver shall:
+ *  - encapsulate the Ethernet packet with RNDIS header (REMOTE_NDIS_PACKET_MSG)
+ *  - send the packet by using IPA Driver SW path (IP_PACKET_INIT)
+ *  - Netdev status fields shall be updated based on the current Tx packet
+ *
+ * Returns NETDEV_TX_BUSY if retry should be made later,
+ * or NETDEV_TX_OK on success.
+ */
+static netdev_tx_t rndis_ipa_start_xmit(struct sk_buff *skb,
+					struct net_device *net)
+{
+	int ret;
+	netdev_tx_t status = NETDEV_TX_BUSY;
+	struct rndis_ipa_dev *rndis_ipa_ctx = netdev_priv(net);
+
+	netif_trans_update(net);
+
+	RNDIS_IPA_DEBUG_XMIT
+		("Tx, len=%d, skb->protocol=%d, outstanding=%d\n",
+		skb->len, skb->protocol,
+		atomic_read(&rndis_ipa_ctx->outstanding_pkts));
+
+	if (unlikely(netif_queue_stopped(net))) {
+		RNDIS_IPA_ERROR("interface queue is stopped\n");
+		goto out;
+	}
+
+	if (unlikely(rndis_ipa_ctx->tx_dump_enable))
+		rndis_ipa_dump_skb(skb);
+
+	if (unlikely(rndis_ipa_ctx->state != RNDIS_IPA_CONNECTED_AND_UP)) {
+		RNDIS_IPA_ERROR("Missing pipe connected and/or iface up\n");
+		return NETDEV_TX_BUSY;
+	}
+
+	if (unlikely(tx_filter(skb))) {
+		dev_kfree_skb_any(skb);
+		RNDIS_IPA_DEBUG("packet got filtered out on Tx path\n");
+		rndis_ipa_ctx->tx_dropped++;
+		status = NETDEV_TX_OK;
+		goto out;
+	}
+
+	ret = ipa_pm_activate(rndis_ipa_ctx->pm_hdl);
+	if (ret) {
+		RNDIS_IPA_DEBUG("Failed activate PM client\n");
+		netif_stop_queue(net);
+		goto fail_pm_activate;
+	}
+
+	if (atomic_read(&rndis_ipa_ctx->outstanding_pkts) >=
+				rndis_ipa_ctx->outstanding_high) {
+		RNDIS_IPA_DEBUG("Outstanding high boundary reached (%d)\n",
+				rndis_ipa_ctx->outstanding_high);
+		netif_stop_queue(net);
+		RNDIS_IPA_DEBUG("send  queue was stopped\n");
+		status = NETDEV_TX_BUSY;
+		goto out;
+	}
+
+	skb = rndis_encapsulate_skb(skb, rndis_ipa_ctx);
+	trace_rndis_tx_dp(skb->protocol);
+	ret = ipa_tx_dp(IPA_TO_USB_CLIENT, skb, NULL);
+	if (ret) {
+		RNDIS_IPA_ERROR("ipa transmit failed (%d)\n", ret);
+		goto fail_tx_packet;
+	}
+
+	atomic_inc(&rndis_ipa_ctx->outstanding_pkts);
+
+	status = NETDEV_TX_OK;
+	goto out;
+
+fail_tx_packet:
+	rndis_ipa_xmit_error(skb);
+out:
+	ipa_pm_deferred_deactivate(rndis_ipa_ctx->pm_hdl);
+fail_pm_activate:
+	RNDIS_IPA_DEBUG
+		("packet Tx done - %s\n",
+		(status == NETDEV_TX_OK) ? "OK" : "FAIL");
+
+	return status;
+}
+
+/**
+ * rndis_ipa_tx_complete_notify() - notification for Netdev that the
+ *  last packet was successfully sent
+ * @private: driver context stashed by IPA driver upon pipe connect
+ * @evt: event type (expected to be write-done event)
+ * @data: data provided with event (this is actually the skb that
+ *  holds the sent packet)
+ *
+ * This function will be called on interrupt bottom halve deferred context.
+ * outstanding packets counter shall be decremented.
+ * Network stack send queue will be re-started in case low outstanding
+ * boundary is reached and queue was stopped before.
+ * At the end the skb shall be freed.
+ */
+static void rndis_ipa_tx_complete_notify(
+	void *private,
+	enum ipa_dp_evt_type evt,
+	unsigned long data)
+{
+	struct sk_buff *skb = (struct sk_buff *)data;
+	struct rndis_ipa_dev *rndis_ipa_ctx = private;
+	int ret;
+
+	ret = 0;
+	NULL_CHECK_RETVAL(private);
+	if (ret)
+		return;
+
+	trace_rndis_status_rcvd(skb->protocol);
+
+	RNDIS_IPA_DEBUG
+		("Tx-complete, len=%d, skb->prot=%d, outstanding=%d\n",
+		skb->len, skb->protocol,
+		atomic_read(&rndis_ipa_ctx->outstanding_pkts));
+
+	if (unlikely((evt != IPA_WRITE_DONE))) {
+		RNDIS_IPA_ERROR("unsupported event on TX call-back\n");
+		return;
+	}
+
+	if (unlikely(rndis_ipa_ctx->state != RNDIS_IPA_CONNECTED_AND_UP)) {
+		RNDIS_IPA_DEBUG
+		("dropping Tx-complete pkt, state=%s\n",
+		rndis_ipa_state_string(rndis_ipa_ctx->state));
+		goto out;
+	}
+
+	rndis_ipa_ctx->net->stats.tx_packets++;
+	rndis_ipa_ctx->net->stats.tx_bytes += skb->len;
+
+	if (atomic_read(&rndis_ipa_ctx->outstanding_pkts) > 0)
+		atomic_dec(&rndis_ipa_ctx->outstanding_pkts);
+
+	if
+		(netif_queue_stopped(rndis_ipa_ctx->net) &&
+		netif_carrier_ok(rndis_ipa_ctx->net) &&
+		atomic_read(&rndis_ipa_ctx->outstanding_pkts) <
+					(rndis_ipa_ctx->outstanding_low)) {
+		RNDIS_IPA_DEBUG("outstanding low boundary reached (%d)n",
+				rndis_ipa_ctx->outstanding_low);
+		netif_wake_queue(rndis_ipa_ctx->net);
+		RNDIS_IPA_DEBUG("send queue was awaken\n");
+	}
+
+out:
+	dev_kfree_skb_any(skb);
+}
+
+static void rndis_ipa_tx_timeout(struct net_device *net)
+{
+	struct rndis_ipa_dev *rndis_ipa_ctx = netdev_priv(net);
+	int outstanding = atomic_read(&rndis_ipa_ctx->outstanding_pkts);
+
+	RNDIS_IPA_ERROR
+		("possible IPA stall was detected, %d outstanding\n",
+		outstanding);
+
+	net->stats.tx_errors++;
+}
+
+/**
+ * rndis_ipa_packet_receive_notify() - Rx notify for packet sent from
+ *  tethered PC (USB->IPA).
+ *  is USB->IPA->Apps-processor
+ * @private: driver context
+ * @evt: event type
+ * @data: data provided with event
+ *
+ * Once IPA driver receives a packet from USB client this callback will be
+ * called from bottom-half interrupt handling context (ipa Rx workqueue).
+ *
+ * Packets that shall be sent to Apps processor may be of two types:
+ * 1) Packets that are destined for Apps (e.g: WEBSERVER running on Apps)
+ * 2) Exception packets that need special handling (based on IPA core
+ *    configuration, e.g: new TCP session or any other packets that IPA core
+ *    can't handle)
+ * If the next conditions are met, the packet shall be sent up to the
+ * Linux network stack:
+ *  - Driver internal state is Connected and Up
+ *  - Notification received from IPA driver meets the expected type
+ *    for Rx packet
+ *  -Filters Rx switch are turned off
+ *
+ * Prior to the sending to the network stack:
+ *  - Netdev struct shall be stashed to the skb as required by the network stack
+ *  - Ethernet header shall be removed (skb->data shall point to the Ethernet
+ *     payload, Ethernet still stashed under MAC header).
+ *  - The skb->pkt_protocol shall be set based on the ethernet destination
+ *     address, Can be Broadcast, Multicast or Other-Host, The later
+ *     pkt-types packets shall be dropped in case the Netdev is not
+ *     in  promisc mode.
+ *   - Set the skb protocol field based on the EtherType field
+ *
+ * Netdev status fields shall be updated based on the current Rx packet
+ */
+static void rndis_ipa_packet_receive_notify(
+		void *private,
+		enum ipa_dp_evt_type evt,
+		unsigned long data)
+{
+	struct sk_buff *skb = (struct sk_buff *)data;
+	struct rndis_ipa_dev *rndis_ipa_ctx = private;
+	int result;
+	unsigned int packet_len = skb->len;
+
+	RNDIS_IPA_DEBUG
+		("packet Rx, len=%d\n",
+		skb->len);
+
+	if (unlikely(rndis_ipa_ctx->rx_dump_enable))
+		rndis_ipa_dump_skb(skb);
+
+	if (unlikely(rndis_ipa_ctx->state != RNDIS_IPA_CONNECTED_AND_UP)) {
+		RNDIS_IPA_DEBUG("use connect()/up() before receive()\n");
+		RNDIS_IPA_DEBUG("packet dropped (length=%d)\n",
+				skb->len);
+		return;
+	}
+
+	if (evt != IPA_RECEIVE)	{
+		RNDIS_IPA_ERROR("a none IPA_RECEIVE event in driver RX\n");
+		return;
+	}
+
+	if (!rndis_ipa_ctx->deaggregation_enable)
+		skb_pull(skb, sizeof(struct rndis_pkt_hdr));
+
+	skb->dev = rndis_ipa_ctx->net;
+	skb->protocol = eth_type_trans(skb, rndis_ipa_ctx->net);
+
+	if (rx_filter(skb)) {
+		RNDIS_IPA_DEBUG("packet got filtered out on RX path\n");
+		rndis_ipa_ctx->rx_dropped++;
+		dev_kfree_skb_any(skb);
+		return;
+	}
+
+	trace_rndis_netif_ni(skb->protocol);
+	result = rndis_ipa_ctx->netif_rx_function(skb);
+	if (unlikely(result))
+		RNDIS_IPA_ERROR("fail on netif_rx_function\n");
+	rndis_ipa_ctx->net->stats.rx_packets++;
+	rndis_ipa_ctx->net->stats.rx_bytes += packet_len;
+}
+
+/** rndis_ipa_stop() - notify the network interface to stop
+ *   sending/receiving data
+ *  @net: the network device being stopped.
+ *
+ * This API is used by Linux network stack to notify the network driver that
+ * its state was changed to "down"
+ * The driver will stop the "send" queue and change its internal
+ * state to "Connected".
+ * The Netdev shall be returned to be "Up" after rndis_ipa_open().
+ */
+static int rndis_ipa_stop(struct net_device *net)
+{
+	struct rndis_ipa_dev *rndis_ipa_ctx = netdev_priv(net);
+	int next_state;
+	unsigned long flags;
+
+	RNDIS_IPA_LOG_ENTRY();
+
+	spin_lock_irqsave(&rndis_ipa_ctx->state_lock, flags);
+
+	next_state = rndis_ipa_next_state(rndis_ipa_ctx->state, RNDIS_IPA_STOP);
+	if (next_state == RNDIS_IPA_INVALID) {
+		spin_unlock_irqrestore(&rndis_ipa_ctx->state_lock, flags);
+		RNDIS_IPA_DEBUG("can't do network interface down without up\n");
+		return -EPERM;
+	}
+
+	rndis_ipa_ctx->state = next_state;
+
+	spin_unlock_irqrestore(&rndis_ipa_ctx->state_lock, flags);
+
+	netif_stop_queue(net);
+	pr_info("RNDIS_IPA NetDev queue is stopped\n");
+
+	RNDIS_IPA_STATE_DEBUG(rndis_ipa_ctx);
+
+	RNDIS_IPA_LOG_EXIT();
+
+	return 0;
+}
+
+/** rndis_ipa_disconnect() - notify rndis_ipa Netdev that the USB pipes
+ *   were disconnected
+ * @private: same value that was set by init(), this  parameter holds the
+ *  network device pointer.
+ *
+ * USB shall notify the Netdev after disconnecting the pipe.
+ * - The internal driver state shall returned to its previous
+ *   state (Up or Initialized).
+ * - Linux network stack shall be informed for carrier off to notify
+ *   user space for pipe disconnect
+ * - send queue shall be stopped
+ * During the transition between the pipe disconnection to
+ * the Netdev notification packets
+ * are expected to be dropped by IPA driver or IPA core.
+ */
+int rndis_ipa_pipe_disconnect_notify(void *private)
+{
+	struct rndis_ipa_dev *rndis_ipa_ctx = private;
+	int next_state;
+	int outstanding_dropped_pkts;
+	int retval;
+	int ret;
+	unsigned long flags;
+	struct ipa_ecm_msg *rndis_msg;
+	struct ipa_msg_meta msg_meta;
+
+	RNDIS_IPA_LOG_ENTRY();
+
+	ret = 0;
+	NULL_CHECK_RETVAL(rndis_ipa_ctx);
+	if (ret)
+		return ret;
+	RNDIS_IPA_DEBUG("private=0x%pK\n", private);
+
+	spin_lock_irqsave(&rndis_ipa_ctx->state_lock, flags);
+
+	next_state = rndis_ipa_next_state
+		(rndis_ipa_ctx->state,
+		RNDIS_IPA_DISCONNECT);
+	if (next_state == RNDIS_IPA_INVALID) {
+		spin_unlock_irqrestore(&rndis_ipa_ctx->state_lock, flags);
+		RNDIS_IPA_ERROR("can't disconnect before connect\n");
+		return -EPERM;
+	}
+	spin_unlock_irqrestore(&rndis_ipa_ctx->state_lock, flags);
+
+	if (rndis_ipa_ctx->during_xmit_error) {
+		RNDIS_IPA_DEBUG("canceling xmit-error delayed work\n");
+		cancel_delayed_work_sync(
+			&rndis_ipa_ctx->xmit_error_delayed_work);
+		rndis_ipa_ctx->during_xmit_error = false;
+	}
+
+	netif_carrier_off(rndis_ipa_ctx->net);
+	RNDIS_IPA_DEBUG("carrier_off notification was sent\n");
+
+	rndis_msg = kzalloc(sizeof(*rndis_msg), GFP_KERNEL);
+	if (!rndis_msg)
+		return -ENOMEM;
+
+	memset(&msg_meta, 0, sizeof(struct ipa_msg_meta));
+	msg_meta.msg_type = ECM_DISCONNECT;
+	msg_meta.msg_len = sizeof(struct ipa_ecm_msg);
+	strlcpy(rndis_msg->name, rndis_ipa_ctx->net->name,
+		IPA_RESOURCE_NAME_MAX);
+	rndis_msg->ifindex = rndis_ipa_ctx->net->ifindex;
+
+	retval = ipa_send_msg(&msg_meta, rndis_msg, rndis_ipa_msg_free_cb);
+	if (retval) {
+		RNDIS_IPA_ERROR("fail to send ECM_DISCONNECT for rndis\n");
+		kfree(rndis_msg);
+		return -EPERM;
+	}
+
+	netif_stop_queue(rndis_ipa_ctx->net);
+	RNDIS_IPA_DEBUG("queue stopped\n");
+
+	outstanding_dropped_pkts =
+		atomic_read(&rndis_ipa_ctx->outstanding_pkts);
+
+	rndis_ipa_ctx->net->stats.tx_dropped += outstanding_dropped_pkts;
+	atomic_set(&rndis_ipa_ctx->outstanding_pkts, 0);
+
+	retval = rndis_ipa_deregister_pm_client(rndis_ipa_ctx);
+	if (retval) {
+		RNDIS_IPA_ERROR("Fail to deregister PM\n");
+		return retval;
+	}
+	RNDIS_IPA_DEBUG("PM was successfully deregistered\n");
+
+	spin_lock_irqsave(&rndis_ipa_ctx->state_lock, flags);
+	next_state = rndis_ipa_next_state(rndis_ipa_ctx->state,
+					  RNDIS_IPA_DISCONNECT);
+	if (next_state == RNDIS_IPA_INVALID) {
+		spin_unlock_irqrestore(&rndis_ipa_ctx->state_lock, flags);
+		RNDIS_IPA_ERROR("can't disconnect before connect\n");
+		return -EPERM;
+	}
+	rndis_ipa_ctx->state = next_state;
+	spin_unlock_irqrestore(&rndis_ipa_ctx->state_lock, flags);
+
+	RNDIS_IPA_STATE_DEBUG(rndis_ipa_ctx);
+
+	pr_info("RNDIS_IPA NetDev pipes disconnected (%d outstanding clr)\n",
+		outstanding_dropped_pkts);
+
+	RNDIS_IPA_LOG_EXIT();
+
+	return 0;
+}
+EXPORT_SYMBOL(rndis_ipa_pipe_disconnect_notify);
+
+/**
+ * rndis_ipa_cleanup() - unregister the network interface driver and free
+ *  internal data structs.
+ * @private: same value that was set by init(), this
+ *   parameter holds the network device pointer.
+ *
+ * This function shall be called once the network interface is not
+ * needed anymore, e.g: when the USB composition does not support it.
+ * This function shall be called after the pipes were disconnected.
+ * Detailed description:
+ *  - remove header-insertion headers from IPA core
+ *  - delete the driver dependency defined for IPA resource manager and
+ *   destroy the producer resource.
+ *  -  remove the debugfs entries
+ *  - deregister the network interface from Linux network stack
+ *  - free all internal data structs
+ *
+ * It is assumed that no packets shall be sent through HW bridging
+ * during cleanup to avoid packets trying to add an header that is
+ * removed during cleanup (IPA configuration manager should have
+ * removed them at this point)
+ */
+void rndis_ipa_cleanup(void *private)
+{
+	struct rndis_ipa_dev *rndis_ipa_ctx = private;
+	int next_state;
+	int ret;
+	unsigned long flags;
+
+	RNDIS_IPA_LOG_ENTRY();
+
+	RNDIS_IPA_DEBUG("private=0x%pK\n", private);
+
+	ret = 0;
+	NULL_CHECK_RETVAL(rndis_ipa_ctx);
+	if (ret)
+		return;
+
+	spin_lock_irqsave(&rndis_ipa_ctx->state_lock, flags);
+	next_state = rndis_ipa_next_state
+		(rndis_ipa_ctx->state,
+		RNDIS_IPA_CLEANUP);
+	if (next_state == RNDIS_IPA_INVALID) {
+		spin_unlock_irqrestore(&rndis_ipa_ctx->state_lock, flags);
+		RNDIS_IPA_ERROR("use disconnect()before clean()\n");
+		return;
+	}
+	spin_unlock_irqrestore(&rndis_ipa_ctx->state_lock, flags);
+
+	RNDIS_IPA_STATE_DEBUG(rndis_ipa_ctx);
+
+	ret = rndis_ipa_deregister_properties(rndis_ipa_ctx->net->name);
+	if (ret) {
+		RNDIS_IPA_ERROR("Fail to deregister Tx/Rx properties\n");
+		return;
+	}
+	RNDIS_IPA_DEBUG("deregister Tx/Rx properties was successful\n");
+
+	ret = rndis_ipa_hdrs_destroy(rndis_ipa_ctx);
+	if (ret)
+		RNDIS_IPA_ERROR(
+			"Failed removing RNDIS headers from IPA core. Continue anyway\n");
+	else
+		RNDIS_IPA_DEBUG("RNDIS headers were removed from IPA core\n");
+
+	rndis_ipa_debugfs_destroy(rndis_ipa_ctx);
+	RNDIS_IPA_DEBUG("debugfs remove was done\n");
+
+	unregister_netdev(rndis_ipa_ctx->net);
+	RNDIS_IPA_DEBUG("netdev unregistered\n");
+
+	spin_lock_irqsave(&rndis_ipa_ctx->state_lock, flags);
+	next_state = rndis_ipa_next_state(rndis_ipa_ctx->state,
+					  RNDIS_IPA_CLEANUP);
+	if (next_state == RNDIS_IPA_INVALID) {
+		spin_unlock_irqrestore(&rndis_ipa_ctx->state_lock, flags);
+		RNDIS_IPA_ERROR("use disconnect()before clean()\n");
+		return;
+	}
+	rndis_ipa_ctx->state = next_state;
+	spin_unlock_irqrestore(&rndis_ipa_ctx->state_lock, flags);
+	free_netdev(rndis_ipa_ctx->net);
+	pr_info("RNDIS_IPA NetDev was cleaned\n");
+
+	RNDIS_IPA_LOG_EXIT();
+}
+EXPORT_SYMBOL(rndis_ipa_cleanup);
+
+static void rndis_ipa_enable_data_path(struct rndis_ipa_dev *rndis_ipa_ctx)
+{
+	if (rndis_ipa_ctx->device_ready_notify) {
+		rndis_ipa_ctx->device_ready_notify();
+		RNDIS_IPA_DEBUG("USB device_ready_notify() was called\n");
+	} else {
+		RNDIS_IPA_DEBUG("device_ready_notify() not supplied\n");
+	}
+
+	netif_start_queue(rndis_ipa_ctx->net);
+	RNDIS_IPA_DEBUG("netif_start_queue() was called\n");
+}
+
+static void rndis_ipa_xmit_error(struct sk_buff *skb)
+{
+	bool retval;
+	struct rndis_ipa_dev *rndis_ipa_ctx = netdev_priv(skb->dev);
+	unsigned long delay_jiffies;
+	u8 rand_dealy_msec;
+
+	RNDIS_IPA_LOG_ENTRY();
+
+	RNDIS_IPA_DEBUG("starting Tx-queue backoff\n");
+
+	netif_stop_queue(rndis_ipa_ctx->net);
+	RNDIS_IPA_DEBUG("netif_stop_queue was called\n");
+
+	skb_pull(skb, sizeof(rndis_template_hdr));
+	rndis_ipa_ctx->net->stats.tx_errors++;
+
+	get_random_bytes(&rand_dealy_msec, sizeof(rand_dealy_msec));
+	delay_jiffies = msecs_to_jiffies(
+		rndis_ipa_ctx->error_msec_sleep_time + rand_dealy_msec);
+
+	retval = schedule_delayed_work(
+		&rndis_ipa_ctx->xmit_error_delayed_work, delay_jiffies);
+	if (!retval) {
+		RNDIS_IPA_ERROR("fail to schedule delayed work\n");
+		netif_start_queue(rndis_ipa_ctx->net);
+	} else {
+		RNDIS_IPA_DEBUG
+			("work scheduled to start Tx-queue in %d msec\n",
+			rndis_ipa_ctx->error_msec_sleep_time +
+			rand_dealy_msec);
+		rndis_ipa_ctx->during_xmit_error = true;
+	}
+
+	RNDIS_IPA_LOG_EXIT();
+}
+
+static void rndis_ipa_xmit_error_aftercare_wq(struct work_struct *work)
+{
+	struct rndis_ipa_dev *rndis_ipa_ctx;
+	struct delayed_work *delayed_work;
+
+	RNDIS_IPA_LOG_ENTRY();
+
+	RNDIS_IPA_DEBUG("Starting queue after xmit error\n");
+
+	delayed_work = to_delayed_work(work);
+	rndis_ipa_ctx = container_of
+		(delayed_work, struct rndis_ipa_dev,
+		xmit_error_delayed_work);
+
+	if (unlikely(rndis_ipa_ctx->state != RNDIS_IPA_CONNECTED_AND_UP)) {
+		RNDIS_IPA_ERROR
+			("error aftercare handling in bad state (%d)",
+			rndis_ipa_ctx->state);
+		return;
+	}
+
+	rndis_ipa_ctx->during_xmit_error = false;
+
+	netif_start_queue(rndis_ipa_ctx->net);
+	RNDIS_IPA_DEBUG("netif_start_queue() was called\n");
+
+	RNDIS_IPA_LOG_EXIT();
+}
+
+/**
+ * rndis_ipa_prepare_header_insertion() - prepare the header insertion request
+ *  for IPA driver
+ * eth_type: the Ethernet type for this header-insertion header
+ * hdr_name: string that shall represent this header in IPA data base
+ * add_hdr: output for caller to be used with ipa_add_hdr() to configure
+ *  the IPA core
+ * dst_mac: tethered PC MAC (Ethernet) address to be added to packets
+ *  for IPA->USB pipe
+ * src_mac: device MAC (Ethernet) address to be added to packets
+ *  for IPA->USB pipe
+ * is_vlan_mode: should driver work in vlan mode?
+ *
+ * This function shall build the header-insertion block request for a
+ * single Ethernet+RNDIS header)
+ * this header shall be inserted for packets processed by IPA
+ * and destined for USB client.
+ * This header shall be used for HW bridging for packets destined for
+ *  tethered PC.
+ * For SW data-path, this header won't be used.
+ */
+static void rndis_ipa_prepare_header_insertion(
+	int eth_type,
+	const char *hdr_name, struct ipa_hdr_add *add_hdr,
+	const void *dst_mac, const void *src_mac, bool is_vlan_mode)
+{
+	struct ethhdr *eth_hdr;
+	struct vlan_ethhdr *eth_vlan_hdr;
+
+	add_hdr->hdr_len = sizeof(rndis_template_hdr);
+	add_hdr->is_partial = false;
+	strlcpy(add_hdr->name, hdr_name, IPA_RESOURCE_NAME_MAX);
+
+	memcpy(add_hdr->hdr, &rndis_template_hdr, sizeof(rndis_template_hdr));
+	add_hdr->is_eth2_ofst_valid = true;
+	add_hdr->eth2_ofst = sizeof(rndis_template_hdr);
+
+	if (is_vlan_mode) {
+		eth_vlan_hdr = (struct vlan_ethhdr *)(add_hdr->hdr +
+			sizeof(rndis_template_hdr));
+		memcpy(eth_vlan_hdr->h_dest, dst_mac, ETH_ALEN);
+		memcpy(eth_vlan_hdr->h_source, src_mac, ETH_ALEN);
+		eth_vlan_hdr->h_vlan_encapsulated_proto = htons(eth_type);
+		eth_vlan_hdr->h_vlan_proto = htons(ETH_P_8021Q);
+		add_hdr->hdr_len += VLAN_ETH_HLEN;
+		add_hdr->type = IPA_HDR_L2_802_1Q;
+	} else {
+		eth_hdr = (struct ethhdr *)(add_hdr->hdr +
+			sizeof(rndis_template_hdr));
+		memcpy(eth_hdr->h_dest, dst_mac, ETH_ALEN);
+		memcpy(eth_hdr->h_source, src_mac, ETH_ALEN);
+		eth_hdr->h_proto = htons(eth_type);
+		add_hdr->hdr_len += ETH_HLEN;
+		add_hdr->type = IPA_HDR_L2_ETHERNET_II;
+	}
+}
+
+/**
+ * rndis_ipa_hdrs_cfg() - configure header insertion block in IPA core
+ *  to allow HW bridging
+ * @rndis_ipa_ctx: main driver context
+ * @dst_mac: destination MAC address (tethered PC)
+ * @src_mac: source MAC address (MDM device)
+ *
+ * This function shall add 2 headers.
+ * One header for Ipv4 and one header for Ipv6.
+ * Both headers shall contain Ethernet header and RNDIS header, the only
+ * difference shall be in the EtherTye field.
+ * Headers will be committed to HW
+ *
+ * Returns negative errno, or zero on success
+ */
+static int rndis_ipa_hdrs_cfg(
+	struct rndis_ipa_dev *rndis_ipa_ctx,
+	const void *dst_mac, const void *src_mac)
+{
+	struct ipa_ioc_add_hdr *hdrs;
+	struct ipa_hdr_add *ipv4_hdr;
+	struct ipa_hdr_add *ipv6_hdr;
+	int result = 0;
+
+	RNDIS_IPA_LOG_ENTRY();
+
+	hdrs = kzalloc
+		(sizeof(*hdrs) + sizeof(*ipv4_hdr) + sizeof(*ipv6_hdr),
+		GFP_KERNEL);
+	if (!hdrs) {
+		result = -ENOMEM;
+		goto fail_mem;
+	}
+
+	ipv4_hdr = &hdrs->hdr[0];
+	ipv6_hdr = &hdrs->hdr[1];
+	rndis_ipa_prepare_header_insertion
+		(ETH_P_IP, IPV4_HDR_NAME,
+		ipv4_hdr, dst_mac, src_mac, rndis_ipa_ctx->is_vlan_mode);
+	rndis_ipa_prepare_header_insertion
+		(ETH_P_IPV6, IPV6_HDR_NAME,
+		ipv6_hdr, dst_mac, src_mac, rndis_ipa_ctx->is_vlan_mode);
+
+	hdrs->commit = 1;
+	hdrs->num_hdrs = 2;
+	result = ipa_add_hdr(hdrs);
+	if (result) {
+		RNDIS_IPA_ERROR("Fail on Header-Insertion(%d)\n", result);
+		goto fail_add_hdr;
+	}
+	if (ipv4_hdr->status) {
+		RNDIS_IPA_ERROR("Fail on Header-Insertion ipv4(%d)\n",
+				ipv4_hdr->status);
+		result = ipv4_hdr->status;
+		goto fail_add_hdr;
+	}
+	if (ipv6_hdr->status) {
+		RNDIS_IPA_ERROR("Fail on Header-Insertion ipv6(%d)\n",
+				ipv6_hdr->status);
+		result = ipv6_hdr->status;
+		goto fail_add_hdr;
+	}
+	rndis_ipa_ctx->eth_ipv4_hdr_hdl = ipv4_hdr->hdr_hdl;
+	rndis_ipa_ctx->eth_ipv6_hdr_hdl = ipv6_hdr->hdr_hdl;
+
+	RNDIS_IPA_LOG_EXIT();
+
+fail_add_hdr:
+	kfree(hdrs);
+fail_mem:
+	return result;
+}
+
+/**
+ * rndis_ipa_hdrs_destroy() - remove the IPA core configuration done for
+ *  the driver data path bridging.
+ * @rndis_ipa_ctx: the driver context
+ *
+ *  Revert the work done on rndis_ipa_hdrs_cfg(), which is,
+ * remove 2 headers for Ethernet+RNDIS.
+ */
+static int rndis_ipa_hdrs_destroy(struct rndis_ipa_dev *rndis_ipa_ctx)
+{
+	struct ipa_ioc_del_hdr *del_hdr;
+	struct ipa_hdr_del *ipv4;
+	struct ipa_hdr_del *ipv6;
+	int result;
+
+	del_hdr = kzalloc(sizeof(*del_hdr) + sizeof(*ipv4) +
+			sizeof(*ipv6), GFP_KERNEL);
+	if (!del_hdr)
+		return -ENOMEM;
+
+	del_hdr->commit = 1;
+	del_hdr->num_hdls = 2;
+
+	ipv4 = &del_hdr->hdl[0];
+	ipv4->hdl = rndis_ipa_ctx->eth_ipv4_hdr_hdl;
+	ipv6 = &del_hdr->hdl[1];
+	ipv6->hdl = rndis_ipa_ctx->eth_ipv6_hdr_hdl;
+
+	result = ipa_del_hdr(del_hdr);
+	if (result || ipv4->status || ipv6->status)
+		RNDIS_IPA_ERROR("ipa_del_hdr failed\n");
+	else
+		RNDIS_IPA_DEBUG("hdrs deletion done\n");
+
+	kfree(del_hdr);
+	return result;
+}
+
+static struct net_device_stats *rndis_ipa_get_stats(struct net_device *net)
+{
+	return &net->stats;
+}
+
+/**
+ * rndis_ipa_register_properties() - set Tx/Rx properties needed
+ *  by IPA configuration manager
+ * @netdev_name: a string with the name of the network interface device
+ * @is_vlan_mode: should driver work in vlan mode?
+ *
+ * Register Tx/Rx properties to allow user space configuration (IPA
+ * Configuration Manager):
+ *
+ * - Two Tx properties (IPA->USB): specify the header names and pipe number
+ *   that shall be used by user space for header-addition configuration
+ *   for ipv4/ipv6 packets flowing from IPA to USB for HW bridging data.
+ *   That header-addition header is added by the Netdev and used by user
+ *   space to close the the HW bridge by adding filtering and routing rules
+ *   that point to this header.
+ *
+ * - Two Rx properties (USB->IPA): these properties shall be used by user space
+ *   to configure the IPA core to identify the packets destined
+ *   for Apps-processor by configuring the unicast rules destined for
+ *   the Netdev IP address.
+ *   This rules shall be added based on the attribute mask supplied at
+ *   this function, that is, always hit rule.
+ */
+static int rndis_ipa_register_properties(char *netdev_name, bool is_vlan_mode)
+{
+	struct ipa_tx_intf tx_properties = {0};
+	struct ipa_ioc_tx_intf_prop properties[2] = { {0}, {0} };
+	struct ipa_ioc_tx_intf_prop *ipv4_property;
+	struct ipa_ioc_tx_intf_prop *ipv6_property;
+	struct ipa_ioc_rx_intf_prop rx_ioc_properties[2] = { {0}, {0} };
+	struct ipa_rx_intf rx_properties = {0};
+	struct ipa_ioc_rx_intf_prop *rx_ipv4_property;
+	struct ipa_ioc_rx_intf_prop *rx_ipv6_property;
+	enum ipa_hdr_l2_type hdr_l2_type = IPA_HDR_L2_ETHERNET_II;
+	int result = 0;
+
+	RNDIS_IPA_LOG_ENTRY();
+
+	if (is_vlan_mode)
+		hdr_l2_type = IPA_HDR_L2_802_1Q;
+
+	tx_properties.prop = properties;
+	ipv4_property = &tx_properties.prop[0];
+	ipv4_property->ip = IPA_IP_v4;
+	ipv4_property->dst_pipe = IPA_TO_USB_CLIENT;
+	strlcpy
+		(ipv4_property->hdr_name, IPV4_HDR_NAME,
+		IPA_RESOURCE_NAME_MAX);
+	ipv4_property->hdr_l2_type = hdr_l2_type;
+	ipv6_property = &tx_properties.prop[1];
+	ipv6_property->ip = IPA_IP_v6;
+	ipv6_property->dst_pipe = IPA_TO_USB_CLIENT;
+	strlcpy
+		(ipv6_property->hdr_name, IPV6_HDR_NAME,
+		IPA_RESOURCE_NAME_MAX);
+	ipv6_property->hdr_l2_type = hdr_l2_type;
+	tx_properties.num_props = 2;
+
+	rx_properties.prop = rx_ioc_properties;
+	rx_ipv4_property = &rx_properties.prop[0];
+	rx_ipv4_property->ip = IPA_IP_v4;
+	rx_ipv4_property->attrib.attrib_mask = 0;
+	rx_ipv4_property->src_pipe = IPA_CLIENT_USB_PROD;
+	rx_ipv4_property->hdr_l2_type = hdr_l2_type;
+	rx_ipv6_property = &rx_properties.prop[1];
+	rx_ipv6_property->ip = IPA_IP_v6;
+	rx_ipv6_property->attrib.attrib_mask = 0;
+	rx_ipv6_property->src_pipe = IPA_CLIENT_USB_PROD;
+	rx_ipv6_property->hdr_l2_type = hdr_l2_type;
+	rx_properties.num_props = 2;
+
+	result = ipa_register_intf("rndis0", &tx_properties, &rx_properties);
+	if (result)
+		RNDIS_IPA_ERROR("fail on Tx/Rx properties registration\n");
+	else
+		RNDIS_IPA_DEBUG("Tx/Rx properties registration done\n");
+
+	RNDIS_IPA_LOG_EXIT();
+
+	return result;
+}
+
+/**
+ * rndis_ipa_deregister_properties() - remove the 2 Tx and 2 Rx properties
+ * @netdev_name: a string with the name of the network interface device
+ *
+ * This function revert the work done on rndis_ipa_register_properties().
+ */
+static int  rndis_ipa_deregister_properties(char *netdev_name)
+{
+	int result;
+
+	RNDIS_IPA_LOG_ENTRY();
+
+	result = ipa_deregister_intf(netdev_name);
+	if (result) {
+		RNDIS_IPA_DEBUG("Fail on Tx prop deregister\n");
+		return result;
+	}
+	RNDIS_IPA_LOG_EXIT();
+
+	return 0;
+}
+
+static void rndis_ipa_pm_cb(void *p, enum ipa_pm_cb_event event)
+{
+	struct rndis_ipa_dev *rndis_ipa_ctx = p;
+
+	RNDIS_IPA_LOG_ENTRY();
+
+	if (event != IPA_PM_CLIENT_ACTIVATED) {
+		RNDIS_IPA_ERROR("unexpected event %d\n", event);
+		WARN_ON(1);
+		return;
+	}
+	RNDIS_IPA_DEBUG("Resource Granted\n");
+
+	if (netif_queue_stopped(rndis_ipa_ctx->net)) {
+		RNDIS_IPA_DEBUG("starting queue\n");
+		netif_start_queue(rndis_ipa_ctx->net);
+	} else {
+		RNDIS_IPA_DEBUG("queue already awake\n");
+	}
+
+	RNDIS_IPA_LOG_EXIT();
+}
+
+static int rndis_ipa_register_pm_client(struct rndis_ipa_dev *rndis_ipa_ctx)
+{
+	int result;
+	struct ipa_pm_register_params pm_reg;
+
+	memset(&pm_reg, 0, sizeof(pm_reg));
+
+	pm_reg.name = rndis_ipa_ctx->net->name;
+	pm_reg.user_data = rndis_ipa_ctx;
+	pm_reg.callback = rndis_ipa_pm_cb;
+	pm_reg.group = IPA_PM_GROUP_APPS;
+	result = ipa_pm_register(&pm_reg, &rndis_ipa_ctx->pm_hdl);
+	if (result) {
+		RNDIS_IPA_ERROR("failed to create IPA PM client %d\n", result);
+		return result;
+	}
+	return 0;
+}
+
+static int rndis_ipa_deregister_pm_client(struct rndis_ipa_dev *rndis_ipa_ctx)
+{
+	ipa_pm_deactivate_sync(rndis_ipa_ctx->pm_hdl);
+	ipa_pm_deregister(rndis_ipa_ctx->pm_hdl);
+	rndis_ipa_ctx->pm_hdl = ~0;
+	return 0;
+}
+
+
+/**
+ * rndis_encapsulate_skb() - encapsulate the given Ethernet skb with
+ *  an RNDIS header
+ * @skb: packet to be encapsulated with the RNDIS header
+ * @rndis_ipa_ctx: main driver context
+ *
+ * Shall use a template header for RNDIS and update it with the given
+ * skb values.
+ * Ethernet is expected to be already encapsulate the packet.
+ */
+static struct sk_buff *rndis_encapsulate_skb(struct sk_buff *skb,
+	struct rndis_ipa_dev *rndis_ipa_ctx)
+{
+	struct rndis_pkt_hdr *rndis_hdr;
+	int payload_byte_len = skb->len;
+
+	/* if there is no room in this skb, allocate a new one */
+	if (unlikely(skb_headroom(skb) < sizeof(rndis_template_hdr))) {
+		struct sk_buff *new_skb = skb_copy_expand(skb,
+			sizeof(rndis_template_hdr), 0, GFP_ATOMIC);
+		if (!new_skb) {
+			RNDIS_IPA_ERROR("no memory for skb expand\n");
+			return skb;
+		}
+		RNDIS_IPA_DEBUG("skb expanded. old %pK new %pK\n",
+			skb, new_skb);
+		dev_kfree_skb_any(skb);
+		skb = new_skb;
+	}
+
+	if (rndis_ipa_ctx->is_vlan_mode)
+		if (unlikely(skb->protocol != htons(ETH_P_8021Q)))
+			RNDIS_IPA_DEBUG(
+				"ether_type != ETH_P_8021Q && vlan, prot = 0x%X\n"
+				, skb->protocol);
+
+	/* make room at the head of the SKB to put the RNDIS header */
+	rndis_hdr = (struct rndis_pkt_hdr *)skb_push(skb,
+					sizeof(rndis_template_hdr));
+
+	memcpy(rndis_hdr, &rndis_template_hdr, sizeof(*rndis_hdr));
+	rndis_hdr->msg_len +=  payload_byte_len;
+	rndis_hdr->data_len +=  payload_byte_len;
+
+	return skb;
+}
+
+/**
+ * rx_filter() - logic that decide if the current skb is to be filtered out
+ * @skb: skb that may be sent up to the network stack
+ *
+ * This function shall do Rx packet filtering on the Netdev level.
+ */
+static bool rx_filter(struct sk_buff *skb)
+{
+	struct rndis_ipa_dev *rndis_ipa_ctx = netdev_priv(skb->dev);
+
+	return rndis_ipa_ctx->rx_filter;
+}
+
+/**
+ * tx_filter() - logic that decide if the current skb is to be filtered out
+ * @skb: skb that may be sent to the USB core
+ *
+ * This function shall do Tx packet filtering on the Netdev level.
+ * ICMP filter bypass is possible to allow only ICMP packet to be
+ * sent (pings and etc)
+ */
+
+static bool tx_filter(struct sk_buff *skb)
+{
+	struct rndis_ipa_dev *rndis_ipa_ctx = netdev_priv(skb->dev);
+	bool is_icmp;
+
+	if (likely(!rndis_ipa_ctx->tx_filter))
+		return false;
+
+	is_icmp = (skb->protocol == htons(ETH_P_IP)	&&
+		ip_hdr(skb)->protocol == IPPROTO_ICMP);
+
+	if ((!rndis_ipa_ctx->icmp_filter) && is_icmp)
+		return false;
+
+	return true;
+}
+
+
+/**
+ * rndis_ipa_ep_registers_cfg() - configure the USB endpoints
+ * @usb_to_ipa_hdl: handle received from ipa_connect which represents
+ *  the USB to IPA end-point
+ * @ipa_to_usb_hdl: handle received from ipa_connect which represents
+ *  the IPA to USB end-point
+ * @max_xfer_size_bytes_to_dev: the maximum size, in bytes, that the device
+ *  expects to receive from the host. supplied on REMOTE_NDIS_INITIALIZE_CMPLT.
+ * @max_xfer_size_bytes_to_host: the maximum size, in bytes, that the host
+ *  expects to receive from the device. supplied on REMOTE_NDIS_INITIALIZE_MSG.
+ * @mtu: the netdev MTU size, in bytes
+ * @deaggr_enable: should deaggregation be enabled?
+ * @is_vlan_mode: should driver work in vlan mode?
+ *
+ * USB to IPA pipe:
+ *  - de-aggregation
+ *  - Remove Ethernet header
+ *  - Remove RNDIS header
+ *  - SRC NAT
+ *  - Default routing(0)
+ * IPA to USB Pipe:
+ *  - aggregation
+ *  - Add Ethernet header
+ *  - Add RNDIS header
+ */
+static int rndis_ipa_ep_registers_cfg(
+	u32 usb_to_ipa_hdl,
+	u32 ipa_to_usb_hdl,
+	u32 max_xfer_size_bytes_to_dev,
+	u32 max_xfer_size_bytes_to_host,
+	u32 mtu,
+	bool deaggr_enable,
+	bool is_vlan_mode)
+{
+	int result;
+	struct ipa_ep_cfg *usb_to_ipa_ep_cfg;
+	int add = 0;
+
+	if (deaggr_enable) {
+		usb_to_ipa_ep_cfg = &usb_to_ipa_ep_cfg_deaggr_en;
+		RNDIS_IPA_DEBUG("deaggregation enabled\n");
+	} else {
+		usb_to_ipa_ep_cfg = &usb_to_ipa_ep_cfg_deaggr_dis;
+		RNDIS_IPA_DEBUG("deaggregation disabled\n");
+		add = sizeof(struct rndis_pkt_hdr);
+	}
+
+	if (is_vlan_mode) {
+		usb_to_ipa_ep_cfg->hdr.hdr_len =
+			VLAN_ETH_HLEN + add;
+		ipa_to_usb_ep_cfg.hdr.hdr_len =
+			VLAN_ETH_HLEN + sizeof(struct rndis_pkt_hdr);
+		ipa_to_usb_ep_cfg.hdr.hdr_additional_const_len = VLAN_ETH_HLEN;
+	} else {
+		usb_to_ipa_ep_cfg->hdr.hdr_len =
+			ETH_HLEN + add;
+		ipa_to_usb_ep_cfg.hdr.hdr_len =
+			ETH_HLEN + sizeof(struct rndis_pkt_hdr);
+		ipa_to_usb_ep_cfg.hdr.hdr_additional_const_len = ETH_HLEN;
+	}
+
+	usb_to_ipa_ep_cfg->deaggr.max_packet_len = max_xfer_size_bytes_to_dev;
+	result = ipa_cfg_ep(usb_to_ipa_hdl, usb_to_ipa_ep_cfg);
+	if (result) {
+		pr_err("failed to configure USB to IPA point\n");
+		return result;
+	}
+	RNDIS_IPA_DEBUG("IPA<-USB end-point configured\n");
+
+	ipa_to_usb_ep_cfg.aggr.aggr_byte_limit =
+		(max_xfer_size_bytes_to_host - mtu) / 1024;
+
+	if (ipa_to_usb_ep_cfg.aggr.aggr_byte_limit == 0) {
+		ipa_to_usb_ep_cfg.aggr.aggr_time_limit = 0;
+		ipa_to_usb_ep_cfg.aggr.aggr_pkt_limit = 1;
+	} else {
+		ipa_to_usb_ep_cfg.aggr.aggr_time_limit =
+			DEFAULT_AGGR_TIME_LIMIT;
+		ipa_to_usb_ep_cfg.aggr.aggr_pkt_limit =
+			DEFAULT_AGGR_PKT_LIMIT;
+	}
+
+	RNDIS_IPA_DEBUG(
+		"RNDIS aggregation param: en=%d byte_limit=%d time_limit=%d pkt_limit=%d\n"
+		, ipa_to_usb_ep_cfg.aggr.aggr_en,
+		ipa_to_usb_ep_cfg.aggr.aggr_byte_limit,
+		ipa_to_usb_ep_cfg.aggr.aggr_time_limit,
+		ipa_to_usb_ep_cfg.aggr.aggr_pkt_limit);
+
+	/* enable hdr_metadata_reg_valid */
+	usb_to_ipa_ep_cfg->hdr.hdr_metadata_reg_valid = true;
+
+	result = ipa_cfg_ep(ipa_to_usb_hdl, &ipa_to_usb_ep_cfg);
+	if (result) {
+		pr_err("failed to configure IPA to USB end-point\n");
+		return result;
+	}
+	RNDIS_IPA_DEBUG("IPA->USB end-point configured\n");
+
+	return 0;
+}
+
+/**
+ * rndis_ipa_set_device_ethernet_addr() - set device Ethernet address
+ * @dev_ethaddr: device Ethernet address
+ *
+ * Returns 0 for success, negative otherwise
+ */
+static int rndis_ipa_set_device_ethernet_addr(
+	u8 *dev_ethaddr,
+	u8 device_ethaddr[])
+{
+	if (!is_valid_ether_addr(device_ethaddr))
+		return -EINVAL;
+	memcpy(dev_ethaddr, device_ethaddr, ETH_ALEN);
+
+	return 0;
+}
+
+/** rndis_ipa_next_state - return the next state of the driver
+ * @current_state: the current state of the driver
+ * @operation: an enum which represent the operation being made on the driver
+ *  by its API.
+ *
+ * This function implements the driver internal state machine.
+ * Its decisions are based on the driver current state and the operation
+ * being made.
+ * In case the operation is invalid this state machine will return
+ * the value RNDIS_IPA_INVALID to inform the caller for a forbidden sequence.
+ */
+static enum rndis_ipa_state rndis_ipa_next_state(
+		enum rndis_ipa_state current_state,
+		enum rndis_ipa_operation operation)
+{
+	int next_state = RNDIS_IPA_INVALID;
+
+	switch (current_state) {
+	case RNDIS_IPA_UNLOADED:
+		if (operation == RNDIS_IPA_INITIALIZE)
+			next_state = RNDIS_IPA_INITIALIZED;
+		break;
+	case RNDIS_IPA_INITIALIZED:
+		if (operation == RNDIS_IPA_CONNECT)
+			next_state = RNDIS_IPA_CONNECTED;
+		else if (operation == RNDIS_IPA_OPEN)
+			next_state = RNDIS_IPA_UP;
+		else if (operation == RNDIS_IPA_CLEANUP)
+			next_state = RNDIS_IPA_UNLOADED;
+		break;
+	case RNDIS_IPA_CONNECTED:
+		if (operation == RNDIS_IPA_DISCONNECT)
+			next_state = RNDIS_IPA_INITIALIZED;
+		else if (operation == RNDIS_IPA_OPEN)
+			next_state = RNDIS_IPA_CONNECTED_AND_UP;
+		break;
+	case RNDIS_IPA_UP:
+		if (operation == RNDIS_IPA_STOP)
+			next_state = RNDIS_IPA_INITIALIZED;
+		else if (operation == RNDIS_IPA_CONNECT)
+			next_state = RNDIS_IPA_CONNECTED_AND_UP;
+		else if (operation == RNDIS_IPA_CLEANUP)
+			next_state = RNDIS_IPA_UNLOADED;
+		break;
+	case RNDIS_IPA_CONNECTED_AND_UP:
+		if (operation == RNDIS_IPA_STOP)
+			next_state = RNDIS_IPA_CONNECTED;
+		else if (operation == RNDIS_IPA_DISCONNECT)
+			next_state = RNDIS_IPA_UP;
+		break;
+	default:
+		RNDIS_IPA_ERROR("State is not supported\n");
+		break;
+	}
+
+	RNDIS_IPA_DEBUG
+		("state transition ( %s -> %s )- %s\n",
+		rndis_ipa_state_string(current_state),
+		rndis_ipa_state_string(next_state),
+		next_state == RNDIS_IPA_INVALID ?
+		"Forbidden" : "Allowed");
+
+	return next_state;
+}
+
+/**
+ * rndis_ipa_state_string - return the state string representation
+ * @state: enum which describe the state
+ */
+static const char *rndis_ipa_state_string(enum rndis_ipa_state state)
+{
+	switch (state) {
+	case RNDIS_IPA_UNLOADED:
+		return "RNDIS_IPA_UNLOADED";
+	case RNDIS_IPA_INITIALIZED:
+		return "RNDIS_IPA_INITIALIZED";
+	case RNDIS_IPA_CONNECTED:
+		return "RNDIS_IPA_CONNECTED";
+	case RNDIS_IPA_UP:
+		return "RNDIS_IPA_UP";
+	case RNDIS_IPA_CONNECTED_AND_UP:
+		return "RNDIS_IPA_CONNECTED_AND_UP";
+	default:
+		return "Not supported";
+	}
+}
+
+static void rndis_ipa_dump_skb(struct sk_buff *skb)
+{
+	int i;
+	u32 *cur = (u32 *)skb->data;
+	u8 *byte;
+
+	RNDIS_IPA_DEBUG
+		("packet dump start for skb->len=%d\n",
+		skb->len);
+
+	for (i = 0; i < (skb->len / 4); i++) {
+		byte = (u8 *)(cur + i);
+		pr_info
+			("%2d %08x   %02x %02x %02x %02x\n",
+			i, *(cur + i),
+			byte[0], byte[1], byte[2], byte[3]);
+	}
+	RNDIS_IPA_DEBUG
+		("packet dump ended for skb->len=%d\n", skb->len);
+}
+
+#ifdef CONFIG_DEBUG_FS
+/**
+ * Creates the root folder for the driver
+ */
+static void rndis_ipa_debugfs_init(struct rndis_ipa_dev *rndis_ipa_ctx)
+{
+	const mode_t flags_read_write = 0666;
+	const mode_t flags_read_only = 0444;
+	const mode_t  flags_write_only = 0222;
+	struct dentry *file;
+	struct dentry *aggr_directory;
+
+	RNDIS_IPA_LOG_ENTRY();
+
+	if (!rndis_ipa_ctx)
+		return;
+
+	rndis_ipa_ctx->directory = debugfs_create_dir(DEBUGFS_DIR_NAME, NULL);
+	if (!rndis_ipa_ctx->directory) {
+		RNDIS_IPA_ERROR("could not create debugfs directory entry\n");
+		goto fail_directory;
+	}
+
+	file = debugfs_create_bool
+		("tx_filter", flags_read_write,
+		rndis_ipa_ctx->directory, &rndis_ipa_ctx->tx_filter);
+	if (!file) {
+		RNDIS_IPA_ERROR("could not create debugfs tx_filter file\n");
+		goto fail_file;
+	}
+
+	file = debugfs_create_bool
+		("rx_filter", flags_read_write,
+		rndis_ipa_ctx->directory, &rndis_ipa_ctx->rx_filter);
+	if (!file) {
+		RNDIS_IPA_ERROR("could not create debugfs rx_filter file\n");
+		goto fail_file;
+	}
+
+	file = debugfs_create_bool
+		("icmp_filter", flags_read_write,
+		rndis_ipa_ctx->directory, &rndis_ipa_ctx->icmp_filter);
+	if (!file) {
+		RNDIS_IPA_ERROR("could not create debugfs icmp_filter file\n");
+		goto fail_file;
+	}
+
+	file = debugfs_create_u32
+		("outstanding_high", flags_read_write,
+		rndis_ipa_ctx->directory,
+		&rndis_ipa_ctx->outstanding_high);
+	if (!file) {
+		RNDIS_IPA_ERROR("could not create outstanding_high file\n");
+		goto fail_file;
+	}
+
+	file = debugfs_create_u32
+		("outstanding_low", flags_read_write,
+		rndis_ipa_ctx->directory,
+		&rndis_ipa_ctx->outstanding_low);
+	if (!file) {
+		RNDIS_IPA_ERROR("could not create outstanding_low file\n");
+		goto fail_file;
+	}
+
+	file = debugfs_create_file
+		("outstanding", flags_read_only,
+		rndis_ipa_ctx->directory,
+		rndis_ipa_ctx, &rndis_ipa_debugfs_atomic_ops);
+	if (!file) {
+		RNDIS_IPA_ERROR("could not create outstanding file\n");
+		goto fail_file;
+	}
+
+	file = debugfs_create_u8
+		("state", flags_read_only,
+		rndis_ipa_ctx->directory, (u8 *)&rndis_ipa_ctx->state);
+	if (!file) {
+		RNDIS_IPA_ERROR("could not create state file\n");
+		goto fail_file;
+	}
+
+	file = debugfs_create_u32
+		("tx_dropped", flags_read_only,
+		rndis_ipa_ctx->directory, &rndis_ipa_ctx->tx_dropped);
+	if (!file) {
+		RNDIS_IPA_ERROR("could not create tx_dropped file\n");
+		goto fail_file;
+	}
+
+	file = debugfs_create_u32
+		("rx_dropped", flags_read_only,
+		rndis_ipa_ctx->directory, &rndis_ipa_ctx->rx_dropped);
+	if (!file) {
+		RNDIS_IPA_ERROR("could not create rx_dropped file\n");
+		goto fail_file;
+	}
+
+	aggr_directory = debugfs_create_dir
+		(DEBUGFS_AGGR_DIR_NAME,
+		rndis_ipa_ctx->directory);
+	if (!aggr_directory) {
+		RNDIS_IPA_ERROR("could not create debugfs aggr entry\n");
+		goto fail_directory;
+	}
+
+	file = debugfs_create_file
+		("aggr_value_set", flags_write_only,
+		aggr_directory,
+		rndis_ipa_ctx, &rndis_ipa_aggr_ops);
+	if (!file) {
+		RNDIS_IPA_ERROR("could not create aggr_value_set file\n");
+		goto fail_file;
+	}
+
+	file = debugfs_create_u8
+		("aggr_enable", flags_read_write,
+		aggr_directory, (u8 *)&ipa_to_usb_ep_cfg.aggr.aggr_en);
+	if (!file) {
+		RNDIS_IPA_ERROR("could not create aggr_enable file\n");
+		goto fail_file;
+	}
+
+	file = debugfs_create_u8
+		("aggr_type", flags_read_write,
+		aggr_directory, (u8 *)&ipa_to_usb_ep_cfg.aggr.aggr);
+	if (!file) {
+		RNDIS_IPA_ERROR("could not create aggr_type file\n");
+		goto fail_file;
+	}
+
+	file = debugfs_create_u32
+		("aggr_byte_limit", flags_read_write,
+		aggr_directory,
+		&ipa_to_usb_ep_cfg.aggr.aggr_byte_limit);
+	if (!file) {
+		RNDIS_IPA_ERROR("could not create aggr_byte_limit file\n");
+		goto fail_file;
+	}
+
+	file = debugfs_create_u32
+		("aggr_time_limit", flags_read_write,
+		aggr_directory,
+		&ipa_to_usb_ep_cfg.aggr.aggr_time_limit);
+	if (!file) {
+		RNDIS_IPA_ERROR("could not create aggr_time_limit file\n");
+		goto fail_file;
+	}
+
+	file = debugfs_create_u32
+		("aggr_pkt_limit", flags_read_write,
+		aggr_directory,
+		&ipa_to_usb_ep_cfg.aggr.aggr_pkt_limit);
+	if (!file) {
+		RNDIS_IPA_ERROR("could not create aggr_pkt_limit file\n");
+		goto fail_file;
+	}
+
+	file = debugfs_create_bool
+		("tx_dump_enable", flags_read_write,
+		rndis_ipa_ctx->directory,
+		&rndis_ipa_ctx->tx_dump_enable);
+	if (!file) {
+		RNDIS_IPA_ERROR("fail to create tx_dump_enable file\n");
+		goto fail_file;
+	}
+
+	file = debugfs_create_bool
+		("rx_dump_enable", flags_read_write,
+		rndis_ipa_ctx->directory,
+		&rndis_ipa_ctx->rx_dump_enable);
+	if (!file) {
+		RNDIS_IPA_ERROR("fail to create rx_dump_enable file\n");
+		goto fail_file;
+	}
+
+	file = debugfs_create_bool
+		("deaggregation_enable", flags_read_write,
+		rndis_ipa_ctx->directory,
+		&rndis_ipa_ctx->deaggregation_enable);
+	if (!file) {
+		RNDIS_IPA_ERROR("fail to create deaggregation_enable file\n");
+		goto fail_file;
+	}
+
+	file = debugfs_create_u32
+		("error_msec_sleep_time", flags_read_write,
+		rndis_ipa_ctx->directory,
+		&rndis_ipa_ctx->error_msec_sleep_time);
+	if (!file) {
+		RNDIS_IPA_ERROR("fail to create error_msec_sleep_time file\n");
+		goto fail_file;
+	}
+
+	file = debugfs_create_bool
+		("during_xmit_error", flags_read_only,
+		rndis_ipa_ctx->directory,
+		&rndis_ipa_ctx->during_xmit_error);
+	if (!file) {
+		RNDIS_IPA_ERROR("fail to create during_xmit_error file\n");
+		goto fail_file;
+	}
+
+	file = debugfs_create_bool("is_vlan_mode", flags_read_only,
+		rndis_ipa_ctx->directory,
+		&rndis_ipa_ctx->is_vlan_mode);
+	if (!file) {
+		RNDIS_IPA_ERROR("fail to create is_vlan_mode file\n");
+		goto fail_file;
+	}
+
+	RNDIS_IPA_DEBUG("debugfs entries were created\n");
+	RNDIS_IPA_LOG_EXIT();
+
+	return;
+fail_file:
+	debugfs_remove_recursive(rndis_ipa_ctx->directory);
+fail_directory:
+	return;
+}
+
+static void rndis_ipa_debugfs_destroy(struct rndis_ipa_dev *rndis_ipa_ctx)
+{
+	debugfs_remove_recursive(rndis_ipa_ctx->directory);
+}
+
+#else /* !CONFIG_DEBUG_FS */
+
+static void rndis_ipa_debugfs_init(struct rndis_ipa_dev *rndis_ipa_ctx) {}
+
+static void rndis_ipa_debugfs_destroy(struct rndis_ipa_dev *rndis_ipa_ctx) {}
+
+#endif /* CONFIG_DEBUG_FS*/
+
+static int rndis_ipa_debugfs_aggr_open
+		(struct inode *inode,
+		struct file *file)
+{
+	struct rndis_ipa_dev *rndis_ipa_ctx = inode->i_private;
+
+	file->private_data = rndis_ipa_ctx;
+
+	return 0;
+}
+
+static ssize_t rndis_ipa_debugfs_aggr_write
+	(struct file *file,
+	const char __user *buf, size_t count, loff_t *ppos)
+{
+	struct rndis_ipa_dev *rndis_ipa_ctx = NULL;
+	int result;
+
+	if (file == NULL)
+		return -EFAULT;
+	rndis_ipa_ctx = file->private_data;
+
+	result = ipa_cfg_ep(rndis_ipa_ctx->usb_to_ipa_hdl, &ipa_to_usb_ep_cfg);
+	if (result) {
+		pr_err("failed to re-configure USB to IPA point\n");
+		return result;
+	}
+	pr_info("IPA<-USB end-point re-configured\n");
+
+	return count;
+}
+
+static int rndis_ipa_debugfs_atomic_open(struct inode *inode, struct file *file)
+{
+	struct rndis_ipa_dev *rndis_ipa_ctx = inode->i_private;
+
+	RNDIS_IPA_LOG_ENTRY();
+
+	file->private_data = &rndis_ipa_ctx->outstanding_pkts;
+
+	RNDIS_IPA_LOG_EXIT();
+
+	return 0;
+}
+
+static ssize_t rndis_ipa_debugfs_atomic_read
+	(struct file *file, char __user *ubuf, size_t count, loff_t *ppos)
+{
+	int nbytes;
+	u8 atomic_str[DEBUGFS_TEMP_BUF_SIZE] = {0};
+	atomic_t *atomic_var = file->private_data;
+
+	RNDIS_IPA_LOG_ENTRY();
+
+	nbytes = scnprintf
+		(atomic_str, sizeof(atomic_str), "%d\n",
+		atomic_read(atomic_var));
+
+	RNDIS_IPA_LOG_EXIT();
+
+	return simple_read_from_buffer(ubuf, count, ppos, atomic_str, nbytes);
+}
+
+static int __init rndis_ipa_init_module(void)
+{
+	ipa_rndis_logbuf = ipc_log_context_create(IPA_RNDIS_IPC_LOG_PAGES,
+		"ipa_rndis", 0);
+	if (ipa_rndis_logbuf == NULL)
+		RNDIS_IPA_ERROR("failed to create IPC log, continue...\n");
+
+	pr_info("RNDIS_IPA module is loaded.\n");
+	return 0;
+}
+
+static void __exit rndis_ipa_cleanup_module(void)
+{
+	if (ipa_rndis_logbuf)
+		ipc_log_context_destroy(ipa_rndis_logbuf);
+	ipa_rndis_logbuf = NULL;
+
+	pr_info("RNDIS_IPA module is unloaded.\n");
+}
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("RNDIS_IPA network interface");
+
+late_initcall(rndis_ipa_init_module);
+module_exit(rndis_ipa_cleanup_module);

+ 74 - 0
ipa/ipa_clients/rndis_ipa_trace.h

@@ -0,0 +1,74 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2015-2019, The Linux Foundation. All rights reserved.
+ */
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM rndis_ipa
+#define TRACE_INCLUDE_FILE rndis_ipa_trace
+
+#if !defined(_RNDIS_IPA_TRACE_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _RNDIS_IPA_TRACE_H
+
+#include <linux/tracepoint.h>
+
+TRACE_EVENT(
+	rndis_netif_ni,
+
+	TP_PROTO(unsigned long proto),
+
+	TP_ARGS(proto),
+
+	TP_STRUCT__entry(
+		__field(unsigned long,	proto)
+	),
+
+	TP_fast_assign(
+		__entry->proto = proto;
+	),
+
+	TP_printk("proto =%lu\n", __entry->proto)
+);
+
+TRACE_EVENT(
+	rndis_tx_dp,
+
+	TP_PROTO(unsigned long proto),
+
+	TP_ARGS(proto),
+
+	TP_STRUCT__entry(
+		__field(unsigned long,	proto)
+	),
+
+	TP_fast_assign(
+		__entry->proto = proto;
+	),
+
+	TP_printk("proto =%lu\n", __entry->proto)
+);
+
+TRACE_EVENT(
+	rndis_status_rcvd,
+
+	TP_PROTO(unsigned long proto),
+
+	TP_ARGS(proto),
+
+	TP_STRUCT__entry(
+		__field(unsigned long,	proto)
+	),
+
+	TP_fast_assign(
+		__entry->proto = proto;
+	),
+
+	TP_printk("proto =%lu\n", __entry->proto)
+);
+
+#endif /* _RNDIS_IPA_TRACE_H */
+
+/* This part must be outside protection */
+#undef TRACE_INCLUDE_PATH
+#define TRACE_INCLUDE_PATH ../../drivers/platform/msm/ipa/ipa_clients
+#include <trace/define_trace.h>

+ 487 - 0
ipa/ipa_common_i.h

@@ -0,0 +1,487 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2012-2019, The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/ipa_mhi.h>
+#include <linux/ipa_qmi_service_v01.h>
+
+#ifndef _IPA_COMMON_I_H_
+#define _IPA_COMMON_I_H_
+#include <linux/errno.h>
+#include <linux/ipc_logging.h>
+#include <linux/ipa.h>
+#include <linux/ipa_uc_offload.h>
+#include <linux/ipa_wdi3.h>
+#include <linux/ipa_wigig.h>
+#include <linux/ratelimit.h>
+
+#define WARNON_RATELIMIT_BURST 1
+#define IPA_RATELIMIT_BURST 1
+
+#define __FILENAME__ \
+	(strrchr(__FILE__, '/') ? strrchr(__FILE__, '/') + 1 : __FILE__)
+
+#define IPA_ACTIVE_CLIENTS_PREP_EP(log_info, client) \
+		log_info.file = __FILENAME__; \
+		log_info.line = __LINE__; \
+		log_info.type = EP; \
+		log_info.id_string = (client < 0 || client >= IPA_CLIENT_MAX) \
+			? "Invalid Client" : ipa_clients_strings[client]
+
+#define IPA_ACTIVE_CLIENTS_PREP_SIMPLE(log_info) \
+		log_info.file = __FILENAME__; \
+		log_info.line = __LINE__; \
+		log_info.type = SIMPLE; \
+		log_info.id_string = __func__
+
+#define IPA_ACTIVE_CLIENTS_PREP_RESOURCE(log_info, resource_name) \
+		log_info.file = __FILENAME__; \
+		log_info.line = __LINE__; \
+		log_info.type = RESOURCE; \
+		log_info.id_string = resource_name
+
+#define IPA_ACTIVE_CLIENTS_PREP_SPECIAL(log_info, id_str) \
+		log_info.file = __FILENAME__; \
+		log_info.line = __LINE__; \
+		log_info.type = SPECIAL; \
+		log_info.id_string = id_str
+
+#define IPA_ACTIVE_CLIENTS_INC_EP(client) \
+	do { \
+		struct ipa_active_client_logging_info log_info; \
+		IPA_ACTIVE_CLIENTS_PREP_EP(log_info, client); \
+		ipa_inc_client_enable_clks(&log_info); \
+	} while (0)
+
+#define IPA_ACTIVE_CLIENTS_DEC_EP(client) \
+	do { \
+		struct ipa_active_client_logging_info log_info; \
+		IPA_ACTIVE_CLIENTS_PREP_EP(log_info, client); \
+		ipa_dec_client_disable_clks(&log_info); \
+	} while (0)
+
+#define IPA_ACTIVE_CLIENTS_INC_SIMPLE() \
+	do { \
+		struct ipa_active_client_logging_info log_info; \
+		IPA_ACTIVE_CLIENTS_PREP_SIMPLE(log_info); \
+		ipa_inc_client_enable_clks(&log_info); \
+	} while (0)
+
+#define IPA_ACTIVE_CLIENTS_DEC_SIMPLE() \
+	do { \
+		struct ipa_active_client_logging_info log_info; \
+		IPA_ACTIVE_CLIENTS_PREP_SIMPLE(log_info); \
+		ipa_dec_client_disable_clks(&log_info); \
+	} while (0)
+
+#define IPA_ACTIVE_CLIENTS_INC_RESOURCE(resource_name) \
+	do { \
+		struct ipa_active_client_logging_info log_info; \
+		IPA_ACTIVE_CLIENTS_PREP_RESOURCE(log_info, resource_name); \
+		ipa_inc_client_enable_clks(&log_info); \
+	} while (0)
+
+#define IPA_ACTIVE_CLIENTS_DEC_RESOURCE(resource_name) \
+	do { \
+		struct ipa_active_client_logging_info log_info; \
+		IPA_ACTIVE_CLIENTS_PREP_RESOURCE(log_info, resource_name); \
+		ipa_dec_client_disable_clks(&log_info); \
+	} while (0)
+
+#define IPA_ACTIVE_CLIENTS_INC_SPECIAL(id_str) \
+	do { \
+		struct ipa_active_client_logging_info log_info; \
+		IPA_ACTIVE_CLIENTS_PREP_SPECIAL(log_info, id_str); \
+		ipa_inc_client_enable_clks(&log_info); \
+	} while (0)
+
+#define IPA_ACTIVE_CLIENTS_DEC_SPECIAL(id_str) \
+	do { \
+		struct ipa_active_client_logging_info log_info; \
+		IPA_ACTIVE_CLIENTS_PREP_SPECIAL(log_info, id_str); \
+		ipa_dec_client_disable_clks(&log_info); \
+	} while (0)
+
+/*
+ * Printing one warning message in 5 seconds if multiple warning messages
+ * are coming back to back.
+ */
+
+#define WARN_ON_RATELIMIT_IPA(condition)				\
+({								\
+	static DEFINE_RATELIMIT_STATE(_rs,			\
+				DEFAULT_RATELIMIT_INTERVAL,	\
+				WARNON_RATELIMIT_BURST);	\
+	int rtn = !!(condition);				\
+								\
+	if (unlikely(rtn && __ratelimit(&_rs)))			\
+		WARN_ON(rtn);					\
+})
+
+/*
+ * Printing one error message in 5 seconds if multiple error messages
+ * are coming back to back.
+ */
+
+#define pr_err_ratelimited_ipa(fmt, args...)				\
+({									\
+	static DEFINE_RATELIMIT_STATE(_rs,				\
+				      DEFAULT_RATELIMIT_INTERVAL,	\
+				      IPA_RATELIMIT_BURST);		\
+									\
+	if (__ratelimit(&_rs))						\
+		pr_err(fmt, ## args);					\
+})
+
+#define ipa_assert_on(condition)\
+do {\
+	if (unlikely(condition))\
+		ipa_assert();\
+} while (0)
+
+#define IPA_CLIENT_IS_PROD(x) \
+	(x < IPA_CLIENT_MAX && (x & 0x1) == 0)
+#define IPA_CLIENT_IS_CONS(x) \
+	(x < IPA_CLIENT_MAX && (x & 0x1) == 1)
+
+#define IPA_GSI_CHANNEL_STOP_SLEEP_MIN_USEC (1000)
+#define IPA_GSI_CHANNEL_STOP_SLEEP_MAX_USEC (2000)
+
+enum ipa_active_client_log_type {
+	EP,
+	SIMPLE,
+	RESOURCE,
+	SPECIAL,
+	INVALID
+};
+
+struct ipa_active_client_logging_info {
+	const char *id_string;
+	char *file;
+	int line;
+	enum ipa_active_client_log_type type;
+};
+
+/**
+ * struct ipa_mem_buffer - IPA memory buffer
+ * @base: base
+ * @phys_base: physical base address
+ * @size: size of memory buffer
+ */
+struct ipa_mem_buffer {
+	void *base;
+	dma_addr_t phys_base;
+	u32 size;
+};
+
+/**
+ * enum ipa3_mhi_burst_mode - MHI channel burst mode state
+ *
+ * Values are according to MHI specification
+ * @IPA_MHI_BURST_MODE_DEFAULT: burst mode enabled for HW channels,
+ * disabled for SW channels
+ * @IPA_MHI_BURST_MODE_RESERVED:
+ * @IPA_MHI_BURST_MODE_DISABLE: Burst mode is disabled for this channel
+ * @IPA_MHI_BURST_MODE_ENABLE: Burst mode is enabled for this channel
+ *
+ */
+enum ipa3_mhi_burst_mode {
+	IPA_MHI_BURST_MODE_DEFAULT,
+	IPA_MHI_BURST_MODE_RESERVED,
+	IPA_MHI_BURST_MODE_DISABLE,
+	IPA_MHI_BURST_MODE_ENABLE,
+};
+
+/**
+ * enum ipa_hw_mhi_channel_states - MHI channel state machine
+ *
+ * Values are according to MHI specification
+ * @IPA_HW_MHI_CHANNEL_STATE_DISABLE: Channel is disabled and not processed by
+ *	the host or device.
+ * @IPA_HW_MHI_CHANNEL_STATE_ENABLE: A channel is enabled after being
+ *	initialized and configured by host, including its channel context and
+ *	associated transfer ring. While this state, the channel is not active
+ *	and the device does not process transfer.
+ * @IPA_HW_MHI_CHANNEL_STATE_RUN: The device processes transfers and doorbell
+ *	for channels.
+ * @IPA_HW_MHI_CHANNEL_STATE_SUSPEND: Used to halt operations on the channel.
+ *	The device does not process transfers for the channel in this state.
+ *	This state is typically used to synchronize the transition to low power
+ *	modes.
+ * @IPA_HW_MHI_CHANNEL_STATE_STOP: Used to halt operations on the channel.
+ *	The device does not process transfers for the channel in this state.
+ * @IPA_HW_MHI_CHANNEL_STATE_ERROR: The device detected an error in an element
+ *	from the transfer ring associated with the channel.
+ * @IPA_HW_MHI_CHANNEL_STATE_INVALID: Invalid state. Shall not be in use in
+ *	operational scenario.
+ */
+enum ipa_hw_mhi_channel_states {
+	IPA_HW_MHI_CHANNEL_STATE_DISABLE	= 0,
+	IPA_HW_MHI_CHANNEL_STATE_ENABLE		= 1,
+	IPA_HW_MHI_CHANNEL_STATE_RUN		= 2,
+	IPA_HW_MHI_CHANNEL_STATE_SUSPEND	= 3,
+	IPA_HW_MHI_CHANNEL_STATE_STOP		= 4,
+	IPA_HW_MHI_CHANNEL_STATE_ERROR		= 5,
+	IPA_HW_MHI_CHANNEL_STATE_INVALID	= 0xFF
+};
+
+/**
+ * Structure holding the parameters for IPA_CPU_2_HW_CMD_MHI_DL_UL_SYNC_INFO
+ * command. Parameters are sent as 32b immediate parameters.
+ * @isDlUlSyncEnabled: Flag to indicate if DL UL Syncronization is enabled
+ * @UlAccmVal: UL Timer Accumulation value (Period after which device will poll
+ *	for UL data)
+ * @ulMsiEventThreshold: Threshold at which HW fires MSI to host for UL events
+ * @dlMsiEventThreshold: Threshold at which HW fires MSI to host for DL events
+ */
+union IpaHwMhiDlUlSyncCmdData_t {
+	struct IpaHwMhiDlUlSyncCmdParams_t {
+		u32 isDlUlSyncEnabled:8;
+		u32 UlAccmVal:8;
+		u32 ulMsiEventThreshold:8;
+		u32 dlMsiEventThreshold:8;
+	} params;
+	u32 raw32b;
+};
+
+struct ipa_mhi_ch_ctx {
+	u8 chstate;/*0-7*/
+	u8 brstmode:2;/*8-9*/
+	u8 pollcfg:6;/*10-15*/
+	u16 rsvd;/*16-31*/
+	u32 chtype;
+	u32 erindex;
+	u64 rbase;
+	u64 rlen;
+	u64 rp;
+	u64 wp;
+} __packed;
+
+struct ipa_mhi_ev_ctx {
+	u32 intmodc:16;
+	u32 intmodt:16;
+	u32 ertype;
+	u32 msivec;
+	u64 rbase;
+	u64 rlen;
+	u64 rp;
+	u64 wp;
+} __packed;
+
+struct ipa_mhi_init_uc_engine {
+	struct ipa_mhi_msi_info *msi;
+	u32 mmio_addr;
+	u32 host_ctrl_addr;
+	u32 host_data_addr;
+	u32 first_ch_idx;
+	u32 first_er_idx;
+	union IpaHwMhiDlUlSyncCmdData_t *ipa_cached_dl_ul_sync_info;
+};
+
+struct ipa_mhi_init_gsi_engine {
+	u32 first_ch_idx;
+};
+
+struct ipa_mhi_init_engine {
+	struct ipa_mhi_init_uc_engine uC;
+	struct ipa_mhi_init_gsi_engine gsi;
+};
+
+struct start_gsi_channel {
+	enum ipa_hw_mhi_channel_states state;
+	struct ipa_mhi_msi_info *msi;
+	struct ipa_mhi_ev_ctx *ev_ctx_host;
+	u64 event_context_addr;
+	struct ipa_mhi_ch_ctx *ch_ctx_host;
+	u64 channel_context_addr;
+	void (*ch_err_cb)(struct gsi_chan_err_notify *notify);
+	void (*ev_err_cb)(struct gsi_evt_err_notify *notify);
+	void *channel;
+	bool assert_bit40;
+	struct gsi_mhi_channel_scratch *mhi;
+	unsigned long *cached_gsi_evt_ring_hdl;
+	uint8_t evchid;
+};
+
+struct start_uc_channel {
+	enum ipa_hw_mhi_channel_states state;
+	u8 index;
+	u8 id;
+};
+
+struct start_mhi_channel {
+	struct start_uc_channel uC;
+	struct start_gsi_channel gsi;
+};
+
+struct ipa_mhi_connect_params_internal {
+	struct ipa_sys_connect_params *sys;
+	u8 channel_id;
+	struct start_mhi_channel start;
+};
+
+/**
+ * struct ipa_hdr_offset_entry - IPA header offset entry
+ * @link: entry's link in global header offset entries list
+ * @offset: the offset
+ * @bin: bin
+ * @ipacm_installed: indicate if installed by ipacm
+ */
+struct ipa_hdr_offset_entry {
+	struct list_head link;
+	u32 offset;
+	u32 bin;
+	bool ipacm_installed;
+};
+
+extern const char *ipa_clients_strings[];
+
+#define IPA_IPC_LOGGING(buf, fmt, args...) \
+	do { \
+		if (buf) \
+			ipc_log_string((buf), fmt, __func__, __LINE__, \
+				## args); \
+	} while (0)
+
+void ipa_inc_client_enable_clks(struct ipa_active_client_logging_info *id);
+void ipa_dec_client_disable_clks(struct ipa_active_client_logging_info *id);
+int ipa_inc_client_enable_clks_no_block(
+	struct ipa_active_client_logging_info *id);
+int ipa_suspend_resource_no_block(enum ipa_rm_resource_name resource);
+int ipa_resume_resource(enum ipa_rm_resource_name name);
+int ipa_suspend_resource_sync(enum ipa_rm_resource_name resource);
+int ipa_set_required_perf_profile(enum ipa_voltage_level floor_voltage,
+	u32 bandwidth_mbps);
+void *ipa_get_ipc_logbuf(void);
+void *ipa_get_ipc_logbuf_low(void);
+void ipa_assert(void);
+
+/* MHI */
+int ipa_mhi_init_engine(struct ipa_mhi_init_engine *params);
+int ipa_connect_mhi_pipe(struct ipa_mhi_connect_params_internal *in,
+		u32 *clnt_hdl);
+int ipa_disconnect_mhi_pipe(u32 clnt_hdl);
+bool ipa_mhi_stop_gsi_channel(enum ipa_client_type client);
+int ipa_qmi_enable_force_clear_datapath_send(
+	struct ipa_enable_force_clear_datapath_req_msg_v01 *req);
+int ipa_qmi_disable_force_clear_datapath_send(
+	struct ipa_disable_force_clear_datapath_req_msg_v01 *req);
+int ipa_generate_tag_process(void);
+int ipa_disable_sps_pipe(enum ipa_client_type client);
+int ipa_mhi_reset_channel_internal(enum ipa_client_type client);
+int ipa_mhi_start_channel_internal(enum ipa_client_type client);
+bool ipa_mhi_sps_channel_empty(enum ipa_client_type client);
+int ipa_mhi_resume_channels_internal(enum ipa_client_type client,
+		bool LPTransitionRejected, bool brstmode_enabled,
+		union __packed gsi_channel_scratch ch_scratch, u8 index);
+int ipa_mhi_handle_ipa_config_req(struct ipa_config_req_msg_v01 *config_req);
+int ipa_mhi_query_ch_info(enum ipa_client_type client,
+		struct gsi_chan_info *ch_info);
+int ipa_mhi_destroy_channel(enum ipa_client_type client);
+int ipa_mhi_is_using_dma(bool *flag);
+const char *ipa_mhi_get_state_str(int state);
+
+/* MHI uC */
+int ipa_uc_mhi_send_dl_ul_sync_info(union IpaHwMhiDlUlSyncCmdData_t *cmd);
+int ipa_uc_mhi_init
+	(void (*ready_cb)(void), void (*wakeup_request_cb)(void));
+void ipa_uc_mhi_cleanup(void);
+int ipa_uc_mhi_reset_channel(int channelHandle);
+int ipa_uc_mhi_suspend_channel(int channelHandle);
+int ipa_uc_mhi_stop_event_update_channel(int channelHandle);
+int ipa_uc_mhi_print_stats(char *dbg_buff, int size);
+
+/* uC */
+int ipa_uc_state_check(void);
+
+/* general */
+void ipa_get_holb(int ep_idx, struct ipa_ep_cfg_holb *holb);
+void ipa_set_tag_process_before_gating(bool val);
+bool ipa_has_open_aggr_frame(enum ipa_client_type client);
+int ipa_setup_uc_ntn_pipes(struct ipa_ntn_conn_in_params *in,
+	ipa_notify_cb notify, void *priv, u8 hdr_len,
+	struct ipa_ntn_conn_out_params *outp);
+
+int ipa_tear_down_uc_offload_pipes(int ipa_ep_idx_ul, int ipa_ep_idx_dl,
+	struct ipa_ntn_conn_in_params *params);
+u8 *ipa_write_64(u64 w, u8 *dest);
+u8 *ipa_write_32(u32 w, u8 *dest);
+u8 *ipa_write_16(u16 hw, u8 *dest);
+u8 *ipa_write_8(u8 b, u8 *dest);
+u8 *ipa_pad_to_64(u8 *dest);
+u8 *ipa_pad_to_32(u8 *dest);
+int ipa_ntn_uc_reg_rdyCB(void (*ipauc_ready_cb)(void *user_data),
+			      void *user_data);
+void ipa_ntn_uc_dereg_rdyCB(void);
+
+int ipa_conn_wdi_pipes(struct ipa_wdi_conn_in_params *in,
+	struct ipa_wdi_conn_out_params *out,
+	ipa_wdi_meter_notifier_cb wdi_notify);
+
+int ipa_disconn_wdi_pipes(int ipa_ep_idx_tx, int ipa_ep_idx_rx);
+
+int ipa_enable_wdi_pipes(int ipa_ep_idx_tx, int ipa_ep_idx_rx);
+
+int ipa_disable_wdi_pipes(int ipa_ep_idx_tx, int ipa_ep_idx_rx);
+
+const char *ipa_get_version_string(enum ipa_hw_type ver);
+int ipa_start_gsi_channel(u32 clnt_hdl);
+
+int ipa_smmu_store_sgt(struct sg_table **out_ch_ptr,
+		struct sg_table *in_sgt_ptr);
+int ipa_smmu_free_sgt(struct sg_table **out_sgt_ptr);
+
+#ifdef CONFIG_IPA_UT
+int ipa_ut_module_init(void);
+void ipa_ut_module_exit(void);
+#else
+static inline int ipa_ut_module_init(void)
+{
+	return -EPERM;
+}
+static inline void ipa_ut_module_exit(void)
+{
+}
+#endif
+
+int ipa_wigig_internal_init(
+	struct ipa_wdi_uc_ready_params *inout,
+	ipa_wigig_misc_int_cb int_notify,
+	phys_addr_t *uc_db_pa);
+
+int ipa_conn_wigig_rx_pipe_i(void *in, struct ipa_wigig_conn_out_params *out,
+	struct dentry **parent);
+
+int ipa_conn_wigig_client_i(void *in, struct ipa_wigig_conn_out_params *out,
+	ipa_notify_cb tx_notify,
+	void *priv);
+
+int ipa_wigig_uc_msi_init(
+	bool init,
+	phys_addr_t periph_baddr_pa,
+	phys_addr_t pseudo_cause_pa,
+	phys_addr_t int_gen_tx_pa,
+	phys_addr_t int_gen_rx_pa,
+	phys_addr_t dma_ep_misc_pa);
+
+int ipa_disconn_wigig_pipe_i(enum ipa_client_type client,
+	struct ipa_wigig_pipe_setup_info_smmu *pipe_smmu,
+	void *dbuff);
+
+int ipa_enable_wigig_pipe_i(enum ipa_client_type client);
+
+int ipa_disable_wigig_pipe_i(enum ipa_client_type client);
+
+int ipa_wigig_send_msg(int msg_type,
+	const char *netdev_name, u8 *mac,
+	enum ipa_client_type client, bool to_wigig);
+
+int ipa_wigig_save_regs(void);
+
+void ipa_register_client_callback(int (*client_cb)(bool is_lock),
+			bool (*teth_port_state)(void), u32 ipa_ep_idx);
+
+void ipa_deregister_client_callback(u32 ipa_ep_idx);
+
+#endif /* _IPA_COMMON_I_H_ */

+ 1184 - 0
ipa/ipa_rm.c

@@ -0,0 +1,1184 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2013-2019, The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/slab.h>
+#include <linux/workqueue.h>
+#include <linux/ipa.h>
+#include "ipa_rm_dependency_graph.h"
+#include "ipa_rm_i.h"
+#include "ipa_common_i.h"
+
+static const char *resource_name_to_str[IPA_RM_RESOURCE_MAX] = {
+	__stringify(IPA_RM_RESOURCE_Q6_PROD),
+	__stringify(IPA_RM_RESOURCE_Q6_CONS),
+	__stringify(IPA_RM_RESOURCE_USB_PROD),
+	__stringify(IPA_RM_RESOURCE_USB_CONS),
+	__stringify(IPA_RM_RESOURCE_USB_DPL_DUMMY_PROD),
+	__stringify(IPA_RM_RESOURCE_USB_DPL_CONS),
+	__stringify(IPA_RM_RESOURCE_HSIC_PROD),
+	__stringify(IPA_RM_RESOURCE_HSIC_CONS),
+	__stringify(IPA_RM_RESOURCE_STD_ECM_PROD),
+	__stringify(IPA_RM_RESOURCE_APPS_CONS),
+	__stringify(IPA_RM_RESOURCE_RNDIS_PROD),
+	__stringify(RESERVED_CONS_11),
+	__stringify(IPA_RM_RESOURCE_WWAN_0_PROD),
+	__stringify(RESERVED_CONS_13),
+	__stringify(IPA_RM_RESOURCE_WLAN_PROD),
+	__stringify(IPA_RM_RESOURCE_WLAN_CONS),
+	__stringify(IPA_RM_RESOURCE_ODU_ADAPT_PROD),
+	__stringify(IPA_RM_RESOURCE_ODU_ADAPT_CONS),
+	__stringify(IPA_RM_RESOURCE_MHI_PROD),
+	__stringify(IPA_RM_RESOURCE_MHI_CONS),
+	__stringify(IPA_RM_RESOURCE_ETHERNET_PROD),
+	__stringify(IPA_RM_RESOURCE_ETHERNET_CONS),
+};
+
+struct ipa_rm_profile_vote_type {
+	enum ipa_voltage_level volt[IPA_RM_RESOURCE_MAX];
+	enum ipa_voltage_level curr_volt;
+	u32 bw_resources[IPA_RM_RESOURCE_MAX];
+	u32 curr_bw;
+};
+
+struct ipa_rm_context_type {
+	struct ipa_rm_dep_graph *dep_graph;
+	struct workqueue_struct *ipa_rm_wq;
+	spinlock_t ipa_rm_lock;
+	struct ipa_rm_profile_vote_type prof_vote;
+};
+static struct ipa_rm_context_type *ipa_rm_ctx;
+
+struct ipa_rm_notify_ipa_work_type {
+	struct work_struct		work;
+	enum ipa_voltage_level		volt;
+	u32				bandwidth_mbps;
+};
+
+/**
+ * ipa_rm_create_resource() - create resource
+ * @create_params: [in] parameters needed
+ *                  for resource initialization
+ *
+ * Returns: 0 on success, negative on failure
+ *
+ * This function is called by IPA RM client to initialize client's resources.
+ * This API should be called before any other IPA RM API on a given resource
+ * name.
+ *
+ */
+int ipa_rm_create_resource(struct ipa_rm_create_params *create_params)
+{
+	struct ipa_rm_resource *resource;
+	unsigned long flags;
+	int result;
+
+	if (unlikely(!ipa_rm_ctx)) {
+		IPA_RM_ERR("IPA RM was not initialized\n");
+		return -EINVAL;
+	}
+
+	if (!create_params) {
+		IPA_RM_ERR("invalid args\n");
+		return -EINVAL;
+	}
+	IPA_RM_DBG("%s\n", ipa_rm_resource_str(create_params->name));
+
+	if (create_params->floor_voltage < 0 ||
+		create_params->floor_voltage >= IPA_VOLTAGE_MAX) {
+		IPA_RM_ERR("invalid voltage %d\n",
+			create_params->floor_voltage);
+		return -EINVAL;
+	}
+
+	spin_lock_irqsave(&ipa_rm_ctx->ipa_rm_lock, flags);
+	if (ipa_rm_dep_graph_get_resource(ipa_rm_ctx->dep_graph,
+					  create_params->name,
+					  &resource) == 0) {
+		IPA_RM_ERR("resource already exists\n");
+		result = -EEXIST;
+		goto bail;
+	}
+	result = ipa_rm_resource_create(create_params,
+			&resource);
+	if (result) {
+		IPA_RM_ERR("ipa_rm_resource_create() failed\n");
+		goto bail;
+	}
+	result = ipa_rm_dep_graph_add(ipa_rm_ctx->dep_graph, resource);
+	if (result) {
+		IPA_RM_ERR("ipa_rm_dep_graph_add() failed\n");
+		ipa_rm_resource_delete(resource);
+		goto bail;
+	}
+bail:
+	spin_unlock_irqrestore(&ipa_rm_ctx->ipa_rm_lock, flags);
+	IPA_RM_DBG("EXIT with %d\n", result);
+
+	return result;
+}
+EXPORT_SYMBOL(ipa_rm_create_resource);
+
+/**
+ * ipa_rm_delete_resource() - delete resource
+ * @resource_name: name of resource to be deleted
+ *
+ * Returns: 0 on success, negative on failure
+ *
+ * This function is called by IPA RM client to delete client's resources.
+ *
+ */
+int ipa_rm_delete_resource(enum ipa_rm_resource_name resource_name)
+{
+	struct ipa_rm_resource *resource;
+	unsigned long flags;
+	int result;
+
+	if (unlikely(!ipa_rm_ctx)) {
+		IPA_RM_ERR("IPA RM was not initialized\n");
+		return -EINVAL;
+	}
+
+	IPA_RM_DBG("%s\n", ipa_rm_resource_str(resource_name));
+	spin_lock_irqsave(&ipa_rm_ctx->ipa_rm_lock, flags);
+	if (ipa_rm_dep_graph_get_resource(ipa_rm_ctx->dep_graph,
+					resource_name,
+						&resource) != 0) {
+		IPA_RM_ERR("resource does not exist\n");
+		result = -EINVAL;
+		goto bail;
+	}
+	result = ipa_rm_resource_delete(resource);
+	if (result) {
+		IPA_RM_ERR("ipa_rm_resource_delete() failed\n");
+		goto bail;
+	}
+	result = ipa_rm_dep_graph_remove(ipa_rm_ctx->dep_graph,
+								resource_name);
+	if (result) {
+		IPA_RM_ERR("ipa_rm_dep_graph_remove() failed\n");
+		goto bail;
+	}
+bail:
+	spin_unlock_irqrestore(&ipa_rm_ctx->ipa_rm_lock, flags);
+	IPA_RM_DBG("EXIT with %d\n", result);
+
+	return result;
+}
+EXPORT_SYMBOL(ipa_rm_delete_resource);
+
+static int _ipa_rm_add_dependency(enum ipa_rm_resource_name resource_name,
+			enum ipa_rm_resource_name depends_on_name,
+			bool userspace_dep)
+{
+	unsigned long flags;
+	int result;
+
+	if (unlikely(!ipa_rm_ctx)) {
+		IPA_RM_ERR("IPA RM was not initialized\n");
+		return -EINVAL;
+	}
+
+	IPA_RM_DBG("%s -> %s\n", ipa_rm_resource_str(resource_name),
+				 ipa_rm_resource_str(depends_on_name));
+	spin_lock_irqsave(&ipa_rm_ctx->ipa_rm_lock, flags);
+	result = ipa_rm_dep_graph_add_dependency(
+						ipa_rm_ctx->dep_graph,
+						resource_name,
+						depends_on_name,
+						userspace_dep);
+	spin_unlock_irqrestore(&ipa_rm_ctx->ipa_rm_lock, flags);
+	IPA_RM_DBG("EXIT with %d\n", result);
+
+	return result;
+}
+
+/**
+ * ipa_rm_add_dependency() - create dependency between 2 resources
+ * @resource_name: name of dependent resource
+ * @depends_on_name: name of its dependency
+ *
+ * Returns: 0 on success, negative on failure
+ *
+ * Side effects: IPA_RM_RESORCE_GRANTED could be generated
+ * in case client registered with IPA RM
+ */
+int ipa_rm_add_dependency(enum ipa_rm_resource_name resource_name,
+			enum ipa_rm_resource_name depends_on_name)
+{
+	return _ipa_rm_add_dependency(resource_name, depends_on_name, false);
+}
+EXPORT_SYMBOL(ipa_rm_add_dependency);
+
+/**
+ * ipa_rm_add_dependency_from_ioctl() - create dependency between 2 resources
+ * @resource_name: name of dependent resource
+ * @depends_on_name: name of its dependency
+ *
+ * This function is expected to be called from IOCTL and the dependency will be
+ * marked as is was added by the userspace.
+ *
+ * Returns: 0 on success, negative on failure
+ *
+ * Side effects: IPA_RM_RESORCE_GRANTED could be generated
+ * in case client registered with IPA RM
+ */
+int ipa_rm_add_dependency_from_ioctl(enum ipa_rm_resource_name resource_name,
+			enum ipa_rm_resource_name depends_on_name)
+{
+	return _ipa_rm_add_dependency(resource_name, depends_on_name, true);
+}
+
+static int _ipa_rm_add_dependency_sync(enum ipa_rm_resource_name resource_name,
+		enum ipa_rm_resource_name depends_on_name,
+		bool userspsace_dep)
+{
+	int result;
+	struct ipa_rm_resource *consumer;
+	unsigned long time;
+	unsigned long flags;
+
+	if (unlikely(!ipa_rm_ctx)) {
+		IPA_RM_ERR("IPA RM was not initialized\n");
+		return -EINVAL;
+	}
+
+	IPA_RM_DBG("%s -> %s\n", ipa_rm_resource_str(resource_name),
+				 ipa_rm_resource_str(depends_on_name));
+	spin_lock_irqsave(&ipa_rm_ctx->ipa_rm_lock, flags);
+	result = ipa_rm_dep_graph_add_dependency(
+						ipa_rm_ctx->dep_graph,
+						resource_name,
+						depends_on_name,
+						userspsace_dep);
+	spin_unlock_irqrestore(&ipa_rm_ctx->ipa_rm_lock, flags);
+	if (result == -EINPROGRESS) {
+		ipa_rm_dep_graph_get_resource(ipa_rm_ctx->dep_graph,
+				depends_on_name,
+				&consumer);
+		IPA_RM_DBG("%s waits for GRANT of %s.\n",
+				ipa_rm_resource_str(resource_name),
+				ipa_rm_resource_str(depends_on_name));
+		time = wait_for_completion_timeout(
+				&((struct ipa_rm_resource_cons *)consumer)->
+				request_consumer_in_progress,
+				HZ * 5);
+		result = 0;
+		if (!time) {
+			IPA_RM_ERR("TIMEOUT waiting for %s GRANT event.",
+					ipa_rm_resource_str(depends_on_name));
+			result = -ETIME;
+		} else {
+			IPA_RM_DBG("%s waited for %s GRANT %lu time.\n",
+				ipa_rm_resource_str(resource_name),
+				ipa_rm_resource_str(depends_on_name),
+				time);
+		}
+	}
+	IPA_RM_DBG("EXIT with %d\n", result);
+
+	return result;
+}
+/**
+ * ipa_rm_add_dependency_sync() - Create a dependency between 2 resources
+ * in a synchronized fashion. In case a producer resource is in GRANTED state
+ * and the newly added consumer resource is in RELEASED state, the consumer
+ * entity will be requested and the function will block until the consumer
+ * is granted.
+ * @resource_name: name of dependent resource
+ * @depends_on_name: name of its dependency
+ *
+ * This function is expected to be called from IOCTL and the dependency will be
+ * marked as is was added by the userspace.
+ *
+ * Returns: 0 on success, negative on failure
+ *
+ * Side effects: May block. See documentation above.
+ */
+int ipa_rm_add_dependency_sync(enum ipa_rm_resource_name resource_name,
+		enum ipa_rm_resource_name depends_on_name)
+{
+	return _ipa_rm_add_dependency_sync(resource_name, depends_on_name,
+		false);
+}
+EXPORT_SYMBOL(ipa_rm_add_dependency_sync);
+
+/**
+ * ipa_rm_add_dependency_sync_from_ioctl() - Create a dependency between 2
+ * resources in a synchronized fashion. In case a producer resource is in
+ * GRANTED state and the newly added consumer resource is in RELEASED state,
+ * the consumer entity will be requested and the function will block until
+ * the consumer is granted.
+ * @resource_name: name of dependent resource
+ * @depends_on_name: name of its dependency
+ *
+ * Returns: 0 on success, negative on failure
+ *
+ * Side effects: May block. See documentation above.
+ */
+int ipa_rm_add_dependency_sync_from_ioctl(
+	enum ipa_rm_resource_name resource_name,
+	enum ipa_rm_resource_name depends_on_name)
+{
+	return _ipa_rm_add_dependency_sync(resource_name, depends_on_name,
+		true);
+}
+
+static int _ipa_rm_delete_dependency(enum ipa_rm_resource_name resource_name,
+			enum ipa_rm_resource_name depends_on_name,
+			bool userspace_dep)
+{
+	unsigned long flags;
+	int result;
+
+	if (unlikely(!ipa_rm_ctx)) {
+		IPA_RM_ERR("IPA RM was not initialized\n");
+		return -EINVAL;
+	}
+
+	IPA_RM_DBG("%s -> %s\n", ipa_rm_resource_str(resource_name),
+				 ipa_rm_resource_str(depends_on_name));
+	spin_lock_irqsave(&ipa_rm_ctx->ipa_rm_lock, flags);
+	result = ipa_rm_dep_graph_delete_dependency(
+			  ipa_rm_ctx->dep_graph,
+			  resource_name,
+			  depends_on_name,
+			  userspace_dep);
+	spin_unlock_irqrestore(&ipa_rm_ctx->ipa_rm_lock, flags);
+	IPA_RM_DBG("EXIT with %d\n", result);
+
+	return result;
+}
+
+/**
+ * ipa_rm_delete_dependency() - delete dependency between 2 resources
+ * @resource_name: name of dependent resource
+ * @depends_on_name: name of its dependency
+ *
+ * Returns: 0 on success, negative on failure
+ *
+ * Side effects: IPA_RM_RESORCE_GRANTED could be generated
+ * in case client registered with IPA RM
+ */
+int ipa_rm_delete_dependency(enum ipa_rm_resource_name resource_name,
+			enum ipa_rm_resource_name depends_on_name)
+{
+	return _ipa_rm_delete_dependency(resource_name, depends_on_name, false);
+}
+EXPORT_SYMBOL(ipa_rm_delete_dependency);
+
+/**
+ * ipa_rm_delete_dependency_fron_ioctl() - delete dependency between 2 resources
+ * @resource_name: name of dependent resource
+ * @depends_on_name: name of its dependency
+ *
+ * This function is expected to be called from IOCTL and the dependency will be
+ * marked as is was added by the userspace.
+ *
+ * Returns: 0 on success, negative on failure
+ *
+ * Side effects: IPA_RM_RESORCE_GRANTED could be generated
+ * in case client registered with IPA RM
+ */
+int ipa_rm_delete_dependency_from_ioctl(enum ipa_rm_resource_name resource_name,
+			enum ipa_rm_resource_name depends_on_name)
+{
+	return _ipa_rm_delete_dependency(resource_name, depends_on_name, true);
+}
+
+/**
+ * ipa_rm_request_resource() - request resource
+ * @resource_name: [in] name of the requested resource
+ *
+ * Returns: 0 on success, negative on failure
+ *
+ * All registered callbacks are called with IPA_RM_RESOURCE_GRANTED
+ * on successful completion of this operation.
+ */
+int ipa_rm_request_resource(enum ipa_rm_resource_name resource_name)
+{
+	struct ipa_rm_resource *resource;
+	unsigned long flags;
+	int result;
+
+	if (unlikely(!ipa_rm_ctx)) {
+		IPA_RM_ERR("IPA RM was not initialized\n");
+		return -EINVAL;
+	}
+
+	if (!IPA_RM_RESORCE_IS_PROD(resource_name)) {
+		IPA_RM_ERR("can be called on PROD only\n");
+		return -EINVAL;
+	}
+	spin_lock_irqsave(&ipa_rm_ctx->ipa_rm_lock, flags);
+	if (ipa_rm_dep_graph_get_resource(ipa_rm_ctx->dep_graph,
+			resource_name,
+			&resource) != 0) {
+		IPA_RM_ERR("resource does not exist\n");
+		result = -EPERM;
+		goto bail;
+	}
+	result = ipa_rm_resource_producer_request(
+			(struct ipa_rm_resource_prod *)resource);
+
+bail:
+	spin_unlock_irqrestore(&ipa_rm_ctx->ipa_rm_lock, flags);
+
+	return result;
+}
+EXPORT_SYMBOL(ipa_rm_request_resource);
+
+void delayed_release_work_func(struct work_struct *work)
+{
+	unsigned long flags;
+	struct ipa_rm_resource *resource;
+	struct ipa_rm_delayed_release_work_type *rwork = container_of(
+			to_delayed_work(work),
+			struct ipa_rm_delayed_release_work_type,
+			work);
+
+	if (!IPA_RM_RESORCE_IS_CONS(rwork->resource_name)) {
+		IPA_RM_ERR("can be called on CONS only\n");
+		kfree(rwork);
+		return;
+	}
+	spin_lock_irqsave(&ipa_rm_ctx->ipa_rm_lock, flags);
+	if (ipa_rm_dep_graph_get_resource(ipa_rm_ctx->dep_graph,
+					rwork->resource_name,
+					&resource) != 0) {
+		IPA_RM_ERR("resource does not exists\n");
+		goto bail;
+	}
+
+	ipa_rm_resource_consumer_release(
+		(struct ipa_rm_resource_cons *)resource, rwork->needed_bw,
+		rwork->dec_usage_count);
+
+bail:
+	spin_unlock_irqrestore(&ipa_rm_ctx->ipa_rm_lock, flags);
+	kfree(rwork);
+
+}
+
+/**
+ * ipa_rm_request_resource_with_timer() - requests the specified consumer
+ * resource and releases it after 1 second
+ * @resource_name: name of the requested resource
+ *
+ * Returns: 0 on success, negative on failure
+ */
+int ipa_rm_request_resource_with_timer(enum ipa_rm_resource_name resource_name)
+{
+	unsigned long flags;
+	struct ipa_rm_resource *resource;
+	struct ipa_rm_delayed_release_work_type *release_work;
+	int result;
+
+	if (!IPA_RM_RESORCE_IS_CONS(resource_name)) {
+		IPA_RM_ERR("can be called on CONS only\n");
+		return -EINVAL;
+	}
+
+	spin_lock_irqsave(&ipa_rm_ctx->ipa_rm_lock, flags);
+	if (ipa_rm_dep_graph_get_resource(ipa_rm_ctx->dep_graph,
+			resource_name,
+			&resource) != 0) {
+		IPA_RM_ERR("resource does not exists\n");
+		result = -EPERM;
+		goto bail;
+	}
+	result = ipa_rm_resource_consumer_request(
+		(struct ipa_rm_resource_cons *)resource, 0, false, true);
+	if (result != 0 && result != -EINPROGRESS) {
+		IPA_RM_ERR("consumer request returned error %d\n", result);
+		result = -EPERM;
+		goto bail;
+	}
+
+	release_work = kzalloc(sizeof(*release_work), GFP_ATOMIC);
+	if (!release_work) {
+		result = -ENOMEM;
+		goto bail;
+	}
+	release_work->resource_name = resource->name;
+	release_work->needed_bw = 0;
+	release_work->dec_usage_count = false;
+	INIT_DELAYED_WORK(&release_work->work, delayed_release_work_func);
+	schedule_delayed_work(&release_work->work,
+			msecs_to_jiffies(IPA_RM_RELEASE_DELAY_IN_MSEC));
+	result = 0;
+bail:
+	spin_unlock_irqrestore(&ipa_rm_ctx->ipa_rm_lock, flags);
+
+	return result;
+}
+
+/**
+ * ipa_rm_release_resource() - release resource
+ * @resource_name: [in] name of the requested resource
+ *
+ * Returns: 0 on success, negative on failure
+ *
+ * All registered callbacks are called with IPA_RM_RESOURCE_RELEASED
+ * on successful completion of this operation.
+ */
+int ipa_rm_release_resource(enum ipa_rm_resource_name resource_name)
+{
+	unsigned long flags;
+	struct ipa_rm_resource *resource;
+	int result;
+
+	if (unlikely(!ipa_rm_ctx)) {
+		IPA_RM_ERR("IPA RM was not initialized\n");
+		return -EINVAL;
+	}
+
+	if (!IPA_RM_RESORCE_IS_PROD(resource_name)) {
+		IPA_RM_ERR("can be called on PROD only\n");
+		return -EINVAL;
+	}
+	spin_lock_irqsave(&ipa_rm_ctx->ipa_rm_lock, flags);
+	if (ipa_rm_dep_graph_get_resource(ipa_rm_ctx->dep_graph,
+					  resource_name,
+					  &resource) != 0) {
+		IPA_RM_ERR("resource does not exists\n");
+		result = -EPERM;
+		goto bail;
+	}
+	result = ipa_rm_resource_producer_release(
+		    (struct ipa_rm_resource_prod *)resource);
+
+bail:
+	spin_unlock_irqrestore(&ipa_rm_ctx->ipa_rm_lock, flags);
+
+	return result;
+}
+EXPORT_SYMBOL(ipa_rm_release_resource);
+
+/**
+ * ipa_rm_register() - register for event
+ * @resource_name: resource name
+ * @reg_params: [in] registration parameters
+ *
+ * Returns: 0 on success, negative on failure
+ *
+ * Registration parameters provided here should be the same
+ * as provided later in  ipa_rm_deregister() call.
+ */
+int ipa_rm_register(enum ipa_rm_resource_name resource_name,
+			struct ipa_rm_register_params *reg_params)
+{
+	int result;
+	unsigned long flags;
+	struct ipa_rm_resource *resource;
+
+	IPA_RM_DBG("%s\n", ipa_rm_resource_str(resource_name));
+
+	if (!IPA_RM_RESORCE_IS_PROD(resource_name)) {
+		IPA_RM_ERR("can be called on PROD only\n");
+		return -EINVAL;
+	}
+	spin_lock_irqsave(&ipa_rm_ctx->ipa_rm_lock, flags);
+	if (ipa_rm_dep_graph_get_resource(ipa_rm_ctx->dep_graph,
+				resource_name,
+				&resource) != 0) {
+		IPA_RM_ERR("resource does not exists\n");
+		result = -EPERM;
+		goto bail;
+	}
+	result = ipa_rm_resource_producer_register(
+			(struct ipa_rm_resource_prod *)resource,
+			reg_params,
+			true);
+bail:
+	spin_unlock_irqrestore(&ipa_rm_ctx->ipa_rm_lock, flags);
+	IPA_RM_DBG("EXIT with %d\n", result);
+
+	return result;
+}
+EXPORT_SYMBOL(ipa_rm_register);
+
+/**
+ * ipa_rm_deregister() - cancel the registration
+ * @resource_name: resource name
+ * @reg_params: [in] registration parameters
+ *
+ * Returns: 0 on success, negative on failure
+ *
+ * Registration parameters provided here should be the same
+ * as provided in  ipa_rm_register() call.
+ */
+int ipa_rm_deregister(enum ipa_rm_resource_name resource_name,
+			struct ipa_rm_register_params *reg_params)
+{
+	int result;
+	unsigned long flags;
+	struct ipa_rm_resource *resource;
+
+	IPA_RM_DBG("%s\n", ipa_rm_resource_str(resource_name));
+
+	if (!IPA_RM_RESORCE_IS_PROD(resource_name)) {
+		IPA_RM_ERR("can be called on PROD only\n");
+		return -EINVAL;
+	}
+	spin_lock_irqsave(&ipa_rm_ctx->ipa_rm_lock, flags);
+	if (ipa_rm_dep_graph_get_resource(ipa_rm_ctx->dep_graph,
+			resource_name,
+			&resource) != 0) {
+		IPA_RM_ERR("resource does not exists\n");
+		result = -EPERM;
+		goto bail;
+	}
+	result = ipa_rm_resource_producer_deregister(
+			(struct ipa_rm_resource_prod *)resource,
+			reg_params);
+bail:
+	spin_unlock_irqrestore(&ipa_rm_ctx->ipa_rm_lock, flags);
+	IPA_RM_DBG("EXIT with %d\n", result);
+
+	return result;
+}
+EXPORT_SYMBOL(ipa_rm_deregister);
+
+/**
+ * ipa_rm_set_perf_profile() - set performance profile
+ * @resource_name: resource name
+ * @profile: [in] profile information.
+ *
+ * Returns: 0 on success, negative on failure
+ *
+ * Set resource performance profile.
+ * Updates IPA driver if performance level changed.
+ */
+int ipa_rm_set_perf_profile(enum ipa_rm_resource_name resource_name,
+			struct ipa_rm_perf_profile *profile)
+{
+	int result;
+	unsigned long flags;
+	struct ipa_rm_resource *resource;
+
+	if (unlikely(!ipa_rm_ctx)) {
+		IPA_RM_ERR("IPA RM was not initialized\n");
+		return -EINVAL;
+	}
+
+	IPA_RM_DBG("%s\n", ipa_rm_resource_str(resource_name));
+	if (profile)
+		IPA_RM_DBG("BW: %d\n", profile->max_supported_bandwidth_mbps);
+
+	spin_lock_irqsave(&ipa_rm_ctx->ipa_rm_lock, flags);
+	if (ipa_rm_dep_graph_get_resource(ipa_rm_ctx->dep_graph,
+				resource_name,
+				&resource) != 0) {
+		IPA_RM_ERR("resource does not exists\n");
+		result = -EPERM;
+		goto bail;
+	}
+	result = ipa_rm_resource_set_perf_profile(resource, profile);
+	if (result) {
+		IPA_RM_ERR("ipa_rm_resource_set_perf_profile failed %d\n",
+			result);
+		goto bail;
+	}
+
+	result = 0;
+bail:
+	spin_unlock_irqrestore(&ipa_rm_ctx->ipa_rm_lock, flags);
+	IPA_RM_DBG("EXIT with %d\n", result);
+
+	return result;
+}
+EXPORT_SYMBOL(ipa_rm_set_perf_profile);
+
+/**
+ * ipa_rm_notify_completion() -
+ *	consumer driver notification for
+ *	request_resource / release_resource operations
+ *	completion
+ * @event: notified event
+ * @resource_name: resource name
+ *
+ * Returns: 0 on success, negative on failure
+ */
+int ipa_rm_notify_completion(enum ipa_rm_event event,
+		enum ipa_rm_resource_name resource_name)
+{
+	int result;
+
+	if (unlikely(!ipa_rm_ctx)) {
+		IPA_RM_ERR("IPA RM was not initialized\n");
+		return -EINVAL;
+	}
+
+	IPA_RM_DBG("event %d on %s\n", event,
+				ipa_rm_resource_str(resource_name));
+	if (!IPA_RM_RESORCE_IS_CONS(resource_name)) {
+		IPA_RM_ERR("can be called on CONS only\n");
+		result = -EINVAL;
+		goto bail;
+	}
+	ipa_rm_wq_send_cmd(IPA_RM_WQ_RESOURCE_CB,
+			resource_name,
+			event,
+			false);
+	result = 0;
+bail:
+	IPA_RM_DBG("EXIT with %d\n", result);
+
+	return result;
+}
+EXPORT_SYMBOL(ipa_rm_notify_completion);
+
+static void ipa_rm_wq_handler(struct work_struct *work)
+{
+	unsigned long flags;
+	struct ipa_rm_resource *resource;
+	struct ipa_rm_wq_work_type *ipa_rm_work =
+			container_of(work,
+					struct ipa_rm_wq_work_type,
+					work);
+	IPA_RM_DBG_LOW("%s cmd=%d event=%d notify_registered_only=%d\n",
+		ipa_rm_resource_str(ipa_rm_work->resource_name),
+		ipa_rm_work->wq_cmd,
+		ipa_rm_work->event,
+		ipa_rm_work->notify_registered_only);
+	switch (ipa_rm_work->wq_cmd) {
+	case IPA_RM_WQ_NOTIFY_PROD:
+		if (!IPA_RM_RESORCE_IS_PROD(ipa_rm_work->resource_name)) {
+			IPA_RM_ERR("resource is not PROD\n");
+			goto free_work;
+		}
+		spin_lock_irqsave(&ipa_rm_ctx->ipa_rm_lock, flags);
+		if (ipa_rm_dep_graph_get_resource(ipa_rm_ctx->dep_graph,
+						ipa_rm_work->resource_name,
+						&resource) != 0){
+			IPA_RM_ERR("resource does not exists\n");
+			spin_unlock_irqrestore(&ipa_rm_ctx->ipa_rm_lock, flags);
+			goto free_work;
+		}
+		ipa_rm_resource_producer_notify_clients(
+				(struct ipa_rm_resource_prod *)resource,
+				ipa_rm_work->event,
+				ipa_rm_work->notify_registered_only);
+		spin_unlock_irqrestore(&ipa_rm_ctx->ipa_rm_lock, flags);
+		break;
+	case IPA_RM_WQ_NOTIFY_CONS:
+		break;
+	case IPA_RM_WQ_RESOURCE_CB:
+		spin_lock_irqsave(&ipa_rm_ctx->ipa_rm_lock, flags);
+		if (ipa_rm_dep_graph_get_resource(ipa_rm_ctx->dep_graph,
+						ipa_rm_work->resource_name,
+						&resource) != 0){
+			IPA_RM_ERR("resource does not exists\n");
+			spin_unlock_irqrestore(&ipa_rm_ctx->ipa_rm_lock, flags);
+			goto free_work;
+		}
+		ipa_rm_resource_consumer_handle_cb(
+				(struct ipa_rm_resource_cons *)resource,
+				ipa_rm_work->event);
+		spin_unlock_irqrestore(&ipa_rm_ctx->ipa_rm_lock, flags);
+		break;
+	default:
+		break;
+	}
+
+free_work:
+	kfree((void *) work);
+}
+
+static void ipa_rm_wq_resume_handler(struct work_struct *work)
+{
+	unsigned long flags;
+	struct ipa_rm_resource *resource;
+	struct ipa_rm_wq_suspend_resume_work_type *ipa_rm_work =
+			container_of(work,
+			struct ipa_rm_wq_suspend_resume_work_type,
+			work);
+		IPA_RM_DBG_LOW("resume work handler: %s",
+		ipa_rm_resource_str(ipa_rm_work->resource_name));
+
+	if (!IPA_RM_RESORCE_IS_CONS(ipa_rm_work->resource_name)) {
+		IPA_RM_ERR("resource is not CONS\n");
+		return;
+	}
+	IPA_ACTIVE_CLIENTS_INC_RESOURCE(ipa_rm_resource_str(
+			ipa_rm_work->resource_name));
+	spin_lock_irqsave(&ipa_rm_ctx->ipa_rm_lock, flags);
+	if (ipa_rm_dep_graph_get_resource(ipa_rm_ctx->dep_graph,
+					ipa_rm_work->resource_name,
+					&resource) != 0){
+		IPA_RM_ERR("resource does not exists\n");
+		spin_unlock_irqrestore(&ipa_rm_ctx->ipa_rm_lock, flags);
+		IPA_ACTIVE_CLIENTS_DEC_RESOURCE(ipa_rm_resource_str(
+				ipa_rm_work->resource_name));
+		goto bail;
+	}
+	ipa_rm_resource_consumer_request_work(
+			(struct ipa_rm_resource_cons *)resource,
+			ipa_rm_work->prev_state, ipa_rm_work->needed_bw, true,
+			ipa_rm_work->inc_usage_count);
+	spin_unlock_irqrestore(&ipa_rm_ctx->ipa_rm_lock, flags);
+bail:
+	kfree(ipa_rm_work);
+}
+
+
+static void ipa_rm_wq_suspend_handler(struct work_struct *work)
+{
+	unsigned long flags;
+	struct ipa_rm_resource *resource;
+	struct ipa_rm_wq_suspend_resume_work_type *ipa_rm_work =
+			container_of(work,
+			struct ipa_rm_wq_suspend_resume_work_type,
+			work);
+		IPA_RM_DBG_LOW("suspend work handler: %s",
+		ipa_rm_resource_str(ipa_rm_work->resource_name));
+
+	if (!IPA_RM_RESORCE_IS_CONS(ipa_rm_work->resource_name)) {
+		IPA_RM_ERR("resource is not CONS\n");
+		return;
+	}
+	ipa_suspend_resource_sync(ipa_rm_work->resource_name);
+	spin_lock_irqsave(&ipa_rm_ctx->ipa_rm_lock, flags);
+	if (ipa_rm_dep_graph_get_resource(ipa_rm_ctx->dep_graph,
+					ipa_rm_work->resource_name,
+					&resource) != 0){
+		IPA_RM_ERR("resource does not exists\n");
+		spin_unlock_irqrestore(&ipa_rm_ctx->ipa_rm_lock, flags);
+		return;
+	}
+	ipa_rm_resource_consumer_release_work(
+			(struct ipa_rm_resource_cons *)resource,
+			ipa_rm_work->prev_state,
+			true);
+	spin_unlock_irqrestore(&ipa_rm_ctx->ipa_rm_lock, flags);
+
+	kfree(ipa_rm_work);
+}
+
+/**
+ * ipa_rm_wq_send_cmd() - send a command for deferred work
+ * @wq_cmd: command that should be executed
+ * @resource_name: resource on which command should be executed
+ * @notify_registered_only: notify only clients registered by
+ *	ipa_rm_register()
+ *
+ * Returns: 0 on success, negative otherwise
+ */
+int ipa_rm_wq_send_cmd(enum ipa_rm_wq_cmd wq_cmd,
+		enum ipa_rm_resource_name resource_name,
+		enum ipa_rm_event event,
+		bool notify_registered_only)
+{
+	int result = -ENOMEM;
+	struct ipa_rm_wq_work_type *work = kzalloc(sizeof(*work), GFP_ATOMIC);
+
+	if (work) {
+		INIT_WORK((struct work_struct *)work, ipa_rm_wq_handler);
+		work->wq_cmd = wq_cmd;
+		work->resource_name = resource_name;
+		work->event = event;
+		work->notify_registered_only = notify_registered_only;
+		result = queue_work(ipa_rm_ctx->ipa_rm_wq,
+				(struct work_struct *)work);
+	}
+
+	return result;
+}
+
+int ipa_rm_wq_send_suspend_cmd(enum ipa_rm_resource_name resource_name,
+		enum ipa_rm_resource_state prev_state,
+		u32 needed_bw)
+{
+	int result = -ENOMEM;
+	struct ipa_rm_wq_suspend_resume_work_type *work = kzalloc(sizeof(*work),
+			GFP_ATOMIC);
+	if (work) {
+		INIT_WORK((struct work_struct *)work,
+				ipa_rm_wq_suspend_handler);
+		work->resource_name = resource_name;
+		work->prev_state = prev_state;
+		work->needed_bw = needed_bw;
+		result = queue_work(ipa_rm_ctx->ipa_rm_wq,
+				(struct work_struct *)work);
+	}
+
+	return result;
+}
+
+int ipa_rm_wq_send_resume_cmd(enum ipa_rm_resource_name resource_name,
+		enum ipa_rm_resource_state prev_state,
+		u32 needed_bw,
+		bool inc_usage_count)
+{
+	int result = -ENOMEM;
+	struct ipa_rm_wq_suspend_resume_work_type *work = kzalloc(sizeof(*work),
+			GFP_ATOMIC);
+	if (work) {
+		INIT_WORK((struct work_struct *)work, ipa_rm_wq_resume_handler);
+		work->resource_name = resource_name;
+		work->prev_state = prev_state;
+		work->needed_bw = needed_bw;
+		work->inc_usage_count = inc_usage_count;
+		result = queue_work(ipa_rm_ctx->ipa_rm_wq,
+				(struct work_struct *)work);
+	} else {
+		IPA_RM_ERR("no mem\n");
+	}
+
+	return result;
+}
+/**
+ * ipa_rm_initialize() - initialize IPA RM component
+ *
+ * Returns: 0 on success, negative otherwise
+ */
+int ipa_rm_initialize(void)
+{
+	int result;
+
+	ipa_rm_ctx = kzalloc(sizeof(*ipa_rm_ctx), GFP_KERNEL);
+	if (!ipa_rm_ctx) {
+		IPA_RM_ERR("no mem\n");
+		result = -ENOMEM;
+		goto bail;
+	}
+	ipa_rm_ctx->ipa_rm_wq = create_singlethread_workqueue("ipa_rm_wq");
+	if (!ipa_rm_ctx->ipa_rm_wq) {
+		IPA_RM_ERR("create workqueue failed\n");
+		result = -ENOMEM;
+		goto create_wq_fail;
+	}
+	result = ipa_rm_dep_graph_create(&(ipa_rm_ctx->dep_graph));
+	if (result) {
+		IPA_RM_ERR("create dependency graph failed\n");
+		goto graph_alloc_fail;
+	}
+	spin_lock_init(&ipa_rm_ctx->ipa_rm_lock);
+	IPA_RM_DBG("SUCCESS\n");
+
+	return 0;
+graph_alloc_fail:
+	destroy_workqueue(ipa_rm_ctx->ipa_rm_wq);
+create_wq_fail:
+	kfree(ipa_rm_ctx);
+bail:
+	return result;
+}
+
+/**
+ * ipa_rm_stat() - print RM stat
+ * @buf: [in] The user buff used to print
+ * @size: [in] The size of buf
+ * Returns: number of bytes used on success, negative on failure
+ *
+ * This function is called by ipa_debugfs in order to receive
+ * a full picture of the current state of the RM
+ */
+
+int ipa_rm_stat(char *buf, int size)
+{
+	unsigned long flags;
+	int i, cnt = 0, result = EINVAL;
+	struct ipa_rm_resource *resource = NULL;
+	u32 sum_bw_prod = 0;
+	u32 sum_bw_cons = 0;
+
+	if (!buf || size < 0)
+		return result;
+
+	spin_lock_irqsave(&ipa_rm_ctx->ipa_rm_lock, flags);
+	for (i = 0; i < IPA_RM_RESOURCE_MAX; ++i) {
+		if (!IPA_RM_RESORCE_IS_PROD(i))
+			continue;
+		result = ipa_rm_dep_graph_get_resource(
+				ipa_rm_ctx->dep_graph,
+				i,
+				&resource);
+		if (!result) {
+			result = ipa_rm_resource_producer_print_stat(
+							resource, buf + cnt,
+							size-cnt);
+			if (result < 0)
+				goto bail;
+			cnt += result;
+		}
+	}
+
+	for (i = 0; i < IPA_RM_RESOURCE_MAX; i++) {
+		if (IPA_RM_RESORCE_IS_PROD(i))
+			sum_bw_prod += ipa_rm_ctx->prof_vote.bw_resources[i];
+		else
+			sum_bw_cons += ipa_rm_ctx->prof_vote.bw_resources[i];
+	}
+
+	result = scnprintf(buf + cnt, size - cnt,
+		"All prod bandwidth: %d, All cons bandwidth: %d\n",
+		sum_bw_prod, sum_bw_cons);
+	cnt += result;
+
+	result = scnprintf(buf + cnt, size - cnt,
+		"Voting: voltage %d, bandwidth %d\n",
+		ipa_rm_ctx->prof_vote.curr_volt,
+		ipa_rm_ctx->prof_vote.curr_bw);
+	cnt += result;
+
+	result = cnt;
+bail:
+	spin_unlock_irqrestore(&ipa_rm_ctx->ipa_rm_lock, flags);
+
+	return result;
+}
+
+/**
+ * ipa_rm_resource_str() - returns string that represent the resource
+ * @resource_name: [in] resource name
+ */
+const char *ipa_rm_resource_str(enum ipa_rm_resource_name resource_name)
+{
+	if (resource_name < 0 || resource_name >= IPA_RM_RESOURCE_MAX)
+		return "INVALID RESOURCE";
+
+	return resource_name_to_str[resource_name];
+};
+
+static void ipa_rm_perf_profile_notify_to_ipa_work(struct work_struct *work)
+{
+	struct ipa_rm_notify_ipa_work_type *notify_work = container_of(work,
+				struct ipa_rm_notify_ipa_work_type,
+				work);
+	int res;
+
+	IPA_RM_DBG_LOW("calling to IPA driver. voltage %d bandwidth %d\n",
+		notify_work->volt, notify_work->bandwidth_mbps);
+
+	res = ipa_set_required_perf_profile(notify_work->volt,
+		notify_work->bandwidth_mbps);
+	if (res) {
+		IPA_RM_ERR("ipa_set_required_perf_profile failed %d\n", res);
+		goto bail;
+	}
+
+	IPA_RM_DBG_LOW("IPA driver notified\n");
+bail:
+	kfree(notify_work);
+}
+
+static void ipa_rm_perf_profile_notify_to_ipa(enum ipa_voltage_level volt,
+					      u32 bandwidth)
+{
+	struct ipa_rm_notify_ipa_work_type *work;
+
+	work = kzalloc(sizeof(*work), GFP_ATOMIC);
+	if (!work)
+		return;
+
+	INIT_WORK(&work->work, ipa_rm_perf_profile_notify_to_ipa_work);
+	work->volt = volt;
+	work->bandwidth_mbps = bandwidth;
+	queue_work(ipa_rm_ctx->ipa_rm_wq, &work->work);
+}
+
+/**
+ * ipa_rm_perf_profile_change() - change performance profile vote for resource
+ * @resource_name: [in] resource name
+ *
+ * change bandwidth and voltage vote based on resource state.
+ */
+void ipa_rm_perf_profile_change(enum ipa_rm_resource_name resource_name)
+{
+	enum ipa_voltage_level old_volt;
+	u32 *bw_ptr;
+	u32 old_bw;
+	struct ipa_rm_resource *resource;
+	int i;
+	u32 sum_bw_prod = 0;
+	u32 sum_bw_cons = 0;
+
+	IPA_RM_DBG_LOW("%s\n", ipa_rm_resource_str(resource_name));
+
+	if (ipa_rm_dep_graph_get_resource(ipa_rm_ctx->dep_graph,
+					  resource_name,
+					  &resource) != 0) {
+		IPA_RM_ERR("resource does not exists\n");
+		WARN_ON(1);
+		return;
+	}
+
+	old_volt = ipa_rm_ctx->prof_vote.curr_volt;
+	old_bw = ipa_rm_ctx->prof_vote.curr_bw;
+
+	bw_ptr = &ipa_rm_ctx->prof_vote.bw_resources[resource_name];
+
+	switch (resource->state) {
+	case IPA_RM_GRANTED:
+	case IPA_RM_REQUEST_IN_PROGRESS:
+		IPA_RM_DBG_LOW("max_bw = %d, needed_bw = %d\n",
+			resource->max_bw, resource->needed_bw);
+		*bw_ptr = min(resource->max_bw, resource->needed_bw);
+		ipa_rm_ctx->prof_vote.volt[resource_name] =
+						resource->floor_voltage;
+		break;
+
+	case IPA_RM_RELEASE_IN_PROGRESS:
+	case IPA_RM_RELEASED:
+		*bw_ptr = 0;
+		ipa_rm_ctx->prof_vote.volt[resource_name] = 0;
+		break;
+
+	default:
+		IPA_RM_ERR("unknown state %d\n", resource->state);
+		WARN_ON(1);
+	return;
+	}
+	IPA_RM_DBG_LOW("resource bandwidth: %d voltage: %d\n", *bw_ptr,
+					resource->floor_voltage);
+
+	ipa_rm_ctx->prof_vote.curr_volt = IPA_VOLTAGE_UNSPECIFIED;
+	for (i = 0; i < IPA_RM_RESOURCE_MAX; i++) {
+		if (ipa_rm_ctx->prof_vote.volt[i] >
+				ipa_rm_ctx->prof_vote.curr_volt) {
+			ipa_rm_ctx->prof_vote.curr_volt =
+				ipa_rm_ctx->prof_vote.volt[i];
+		}
+	}
+
+	for (i = 0; i < IPA_RM_RESOURCE_MAX; i++) {
+		if (IPA_RM_RESORCE_IS_PROD(i))
+			sum_bw_prod += ipa_rm_ctx->prof_vote.bw_resources[i];
+		else
+			sum_bw_cons += ipa_rm_ctx->prof_vote.bw_resources[i];
+	}
+
+	IPA_RM_DBG_LOW("all prod bandwidth: %d all cons bandwidth: %d\n",
+		sum_bw_prod, sum_bw_cons);
+	ipa_rm_ctx->prof_vote.curr_bw = min(sum_bw_prod, sum_bw_cons);
+
+	if (ipa_rm_ctx->prof_vote.curr_volt == old_volt &&
+		ipa_rm_ctx->prof_vote.curr_bw == old_bw) {
+		IPA_RM_DBG_LOW("same voting\n");
+		return;
+	}
+
+	IPA_RM_DBG_LOW("new voting: voltage %d bandwidth %d\n",
+		ipa_rm_ctx->prof_vote.curr_volt,
+		ipa_rm_ctx->prof_vote.curr_bw);
+
+	ipa_rm_perf_profile_notify_to_ipa(ipa_rm_ctx->prof_vote.curr_volt,
+			ipa_rm_ctx->prof_vote.curr_bw);
+
+	return;
+};
+/**
+ * ipa_rm_exit() - free all IPA RM resources
+ */
+void ipa_rm_exit(void)
+{
+	IPA_RM_DBG("ENTER\n");
+	ipa_rm_dep_graph_delete(ipa_rm_ctx->dep_graph);
+	destroy_workqueue(ipa_rm_ctx->ipa_rm_wq);
+	kfree(ipa_rm_ctx);
+	ipa_rm_ctx = NULL;
+	IPA_RM_DBG("EXIT\n");
+}

+ 240 - 0
ipa/ipa_rm_dependency_graph.c

@@ -0,0 +1,240 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2013-2019, The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/slab.h>
+#include "ipa_rm_dependency_graph.h"
+#include "ipa_rm_i.h"
+
+static int ipa_rm_dep_get_index(enum ipa_rm_resource_name resource_name)
+{
+	int resource_index = IPA_RM_INDEX_INVALID;
+
+	if (IPA_RM_RESORCE_IS_PROD(resource_name))
+		resource_index = ipa_rm_prod_index(resource_name);
+	else if (IPA_RM_RESORCE_IS_CONS(resource_name))
+		resource_index = ipa_rm_cons_index(resource_name);
+
+	return resource_index;
+}
+
+/**
+ * ipa_rm_dep_graph_create() - creates graph
+ * @dep_graph: [out] created dependency graph
+ *
+ * Returns: dependency graph on success, NULL on failure
+ */
+int  ipa_rm_dep_graph_create(struct ipa_rm_dep_graph **dep_graph)
+{
+	int result = 0;
+
+	*dep_graph = kzalloc(sizeof(**dep_graph), GFP_KERNEL);
+	if (!*dep_graph)
+		result = -ENOMEM;
+	return result;
+}
+
+/**
+ * ipa_rm_dep_graph_delete() - destroyes the graph
+ * @graph: [in] dependency graph
+ *
+ * Frees all resources.
+ */
+void ipa_rm_dep_graph_delete(struct ipa_rm_dep_graph *graph)
+{
+	int resource_index;
+
+	if (!graph) {
+		IPA_RM_ERR("invalid params\n");
+		return;
+	}
+	for (resource_index = 0;
+			resource_index < IPA_RM_RESOURCE_MAX;
+			resource_index++)
+		kfree(graph->resource_table[resource_index]);
+	memset(graph->resource_table, 0, sizeof(graph->resource_table));
+}
+
+/**
+ * ipa_rm_dep_graph_get_resource() - provides a resource by name
+ * @graph: [in] dependency graph
+ * @name: [in] name of the resource
+ * @resource: [out] resource in case of success
+ *
+ * Returns: 0 on success, negative on failure
+ */
+int ipa_rm_dep_graph_get_resource(
+				struct ipa_rm_dep_graph *graph,
+				enum ipa_rm_resource_name resource_name,
+				struct ipa_rm_resource **resource)
+{
+	int result;
+	int resource_index;
+
+	if (!graph) {
+		result = -EINVAL;
+		goto bail;
+	}
+	resource_index = ipa_rm_dep_get_index(resource_name);
+	if (resource_index == IPA_RM_INDEX_INVALID) {
+		result = -EINVAL;
+		goto bail;
+	}
+	*resource = graph->resource_table[resource_index];
+	if (!*resource) {
+		result = -EINVAL;
+		goto bail;
+	}
+	result = 0;
+bail:
+	return result;
+}
+
+/**
+ * ipa_rm_dep_graph_add() - adds resource to graph
+ * @graph: [in] dependency graph
+ * @resource: [in] resource to add
+ *
+ * Returns: 0 on success, negative on failure
+ */
+int ipa_rm_dep_graph_add(struct ipa_rm_dep_graph *graph,
+			 struct ipa_rm_resource *resource)
+{
+	int result = 0;
+	int resource_index;
+
+	if (!graph || !resource) {
+		result = -EINVAL;
+		goto bail;
+	}
+	resource_index = ipa_rm_dep_get_index(resource->name);
+	if (resource_index == IPA_RM_INDEX_INVALID) {
+		result = -EINVAL;
+		goto bail;
+	}
+	graph->resource_table[resource_index] = resource;
+bail:
+	return result;
+}
+
+/**
+ * ipa_rm_dep_graph_remove() - removes resource from graph
+ * @graph: [in] dependency graph
+ * @resource: [in] resource to add
+ *
+ * Returns: 0 on success, negative on failure
+ */
+int ipa_rm_dep_graph_remove(struct ipa_rm_dep_graph *graph,
+		enum ipa_rm_resource_name resource_name)
+{
+	if (!graph)
+		return -EINVAL;
+	graph->resource_table[resource_name] = NULL;
+
+	return 0;
+}
+
+/**
+ * ipa_rm_dep_graph_add_dependency() - adds dependency between
+ *				two nodes in graph
+ * @graph: [in] dependency graph
+ * @resource_name: [in] resource to add
+ * @depends_on_name: [in] resource to add
+ * @userspace_dep: [in] operation requested by userspace ?
+ *
+ * Returns: 0 on success, negative on failure
+ */
+int ipa_rm_dep_graph_add_dependency(struct ipa_rm_dep_graph *graph,
+				    enum ipa_rm_resource_name resource_name,
+				    enum ipa_rm_resource_name depends_on_name,
+				    bool userspace_dep)
+{
+	struct ipa_rm_resource *dependent = NULL;
+	struct ipa_rm_resource *dependency = NULL;
+	int result;
+
+	if (!graph ||
+		!IPA_RM_RESORCE_IS_PROD(resource_name) ||
+		!IPA_RM_RESORCE_IS_CONS(depends_on_name)) {
+		IPA_RM_ERR("invalid params\n");
+		result = -EINVAL;
+		goto bail;
+	}
+	if (ipa_rm_dep_graph_get_resource(graph,
+					  resource_name,
+					  &dependent)) {
+		IPA_RM_ERR("%s does not exist\n",
+					ipa_rm_resource_str(resource_name));
+		result = -EINVAL;
+		goto bail;
+	}
+	if (ipa_rm_dep_graph_get_resource(graph,
+					depends_on_name,
+					  &dependency)) {
+		IPA_RM_ERR("%s does not exist\n",
+					ipa_rm_resource_str(depends_on_name));
+		result = -EINVAL;
+		goto bail;
+	}
+	result = ipa_rm_resource_add_dependency(dependent, dependency,
+		userspace_dep);
+bail:
+	IPA_RM_DBG("EXIT with %d\n", result);
+
+	return result;
+}
+
+/**
+ * ipa_rm_dep_graph_delete_dependency() - deleted dependency between
+ *				two nodes in graph
+ * @graph: [in] dependency graph
+ * @resource_name: [in] resource to delete
+ * @depends_on_name: [in] resource to delete
+ * @userspace_dep: [in] operation requested by userspace ?
+ *
+ * Returns: 0 on success, negative on failure
+ *
+ */
+int ipa_rm_dep_graph_delete_dependency(struct ipa_rm_dep_graph *graph,
+				enum ipa_rm_resource_name resource_name,
+				enum ipa_rm_resource_name depends_on_name,
+				bool userspace_dep)
+{
+	struct ipa_rm_resource *dependent = NULL;
+	struct ipa_rm_resource *dependency = NULL;
+	int result;
+
+	if (!graph ||
+		!IPA_RM_RESORCE_IS_PROD(resource_name) ||
+		!IPA_RM_RESORCE_IS_CONS(depends_on_name)) {
+		IPA_RM_ERR("invalid params\n");
+		result = -EINVAL;
+		goto bail;
+	}
+
+	if (ipa_rm_dep_graph_get_resource(graph,
+					  resource_name,
+					  &dependent)) {
+		IPA_RM_DBG("%s does not exist\n",
+					ipa_rm_resource_str(resource_name));
+		result = -EINVAL;
+		goto bail;
+	}
+
+	if (ipa_rm_dep_graph_get_resource(graph,
+					  depends_on_name,
+					  &dependency)) {
+		IPA_RM_DBG("%s does not exist\n",
+					ipa_rm_resource_str(depends_on_name));
+		result = -EINVAL;
+		goto bail;
+	}
+
+	result = ipa_rm_resource_delete_dependency(dependent, dependency,
+		userspace_dep);
+bail:
+	IPA_RM_DBG("EXIT with %d\n", result);
+
+	return result;
+}

+ 42 - 0
ipa/ipa_rm_dependency_graph.h

@@ -0,0 +1,42 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2013-2019, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _IPA_RM_DEPENDENCY_GRAPH_H_
+#define _IPA_RM_DEPENDENCY_GRAPH_H_
+
+#include <linux/list.h>
+#include <linux/ipa.h>
+#include "ipa_rm_resource.h"
+
+struct ipa_rm_dep_graph {
+	struct ipa_rm_resource *resource_table[IPA_RM_RESOURCE_MAX];
+};
+
+int ipa_rm_dep_graph_get_resource(
+				struct ipa_rm_dep_graph *graph,
+				enum ipa_rm_resource_name name,
+				struct ipa_rm_resource **resource);
+
+int ipa_rm_dep_graph_create(struct ipa_rm_dep_graph **dep_graph);
+
+void ipa_rm_dep_graph_delete(struct ipa_rm_dep_graph *graph);
+
+int ipa_rm_dep_graph_add(struct ipa_rm_dep_graph *graph,
+			 struct ipa_rm_resource *resource);
+
+int ipa_rm_dep_graph_remove(struct ipa_rm_dep_graph *graph,
+				enum ipa_rm_resource_name resource_name);
+
+int ipa_rm_dep_graph_add_dependency(struct ipa_rm_dep_graph *graph,
+				enum ipa_rm_resource_name resource_name,
+				enum ipa_rm_resource_name depends_on_name,
+				bool userspsace_dep);
+
+int ipa_rm_dep_graph_delete_dependency(struct ipa_rm_dep_graph *graph,
+				enum ipa_rm_resource_name resource_name,
+				enum ipa_rm_resource_name depends_on_name,
+				bool userspsace_dep);
+
+#endif /* _IPA_RM_DEPENDENCY_GRAPH_H_ */

+ 150 - 0
ipa/ipa_rm_i.h

@@ -0,0 +1,150 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2013-2019, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _IPA_RM_I_H_
+#define _IPA_RM_I_H_
+
+#include <linux/workqueue.h>
+#include <linux/ipa.h>
+#include "ipa_rm_resource.h"
+#include "ipa_common_i.h"
+
+#define IPA_RM_DRV_NAME "ipa_rm"
+
+#define IPA_RM_DBG_LOW(fmt, args...) \
+	do { \
+		pr_debug(IPA_RM_DRV_NAME " %s:%d " fmt, __func__, __LINE__, \
+			## args); \
+		IPA_IPC_LOGGING(ipa_get_ipc_logbuf_low(), \
+			IPA_RM_DRV_NAME " %s:%d " fmt, ## args); \
+	} while (0)
+#define IPA_RM_DBG(fmt, args...) \
+	do { \
+		pr_debug(IPA_RM_DRV_NAME " %s:%d " fmt, __func__, __LINE__, \
+			## args); \
+		IPA_IPC_LOGGING(ipa_get_ipc_logbuf(), \
+			IPA_RM_DRV_NAME " %s:%d " fmt, ## args); \
+		IPA_IPC_LOGGING(ipa_get_ipc_logbuf_low(), \
+			IPA_RM_DRV_NAME " %s:%d " fmt, ## args); \
+	} while (0)
+
+#define IPA_RM_ERR(fmt, args...) \
+	do { \
+		pr_err(IPA_RM_DRV_NAME " %s:%d " fmt, __func__, __LINE__, \
+			## args); \
+		IPA_IPC_LOGGING(ipa_get_ipc_logbuf(), \
+			IPA_RM_DRV_NAME " %s:%d " fmt, ## args); \
+		IPA_IPC_LOGGING(ipa_get_ipc_logbuf_low(), \
+			IPA_RM_DRV_NAME " %s:%d " fmt, ## args); \
+	} while (0)
+
+#define IPA_RM_RESORCE_IS_PROD(x) \
+	(x < IPA_RM_RESOURCE_MAX && (x & 0x1) == 0)
+#define IPA_RM_RESORCE_IS_CONS(x) \
+	(x < IPA_RM_RESOURCE_MAX && (x & 0x1) == 1)
+#define IPA_RM_INDEX_INVALID	(-1)
+#define IPA_RM_RELEASE_DELAY_IN_MSEC 1000
+
+int ipa_rm_prod_index(enum ipa_rm_resource_name resource_name);
+int ipa_rm_cons_index(enum ipa_rm_resource_name resource_name);
+
+/**
+ * struct ipa_rm_delayed_release_work_type - IPA RM delayed resource release
+ *				work type
+ * @delayed_work: work struct
+ * @ipa_rm_resource_name: name of the resource on which this work should be done
+ * @needed_bw: bandwidth required for resource in Mbps
+ * @dec_usage_count: decrease usage count on release ?
+ */
+struct ipa_rm_delayed_release_work_type {
+	struct delayed_work		work;
+	enum ipa_rm_resource_name	resource_name;
+	u32				needed_bw;
+	bool				dec_usage_count;
+
+};
+
+/**
+ * enum ipa_rm_wq_cmd - workqueue commands
+ */
+enum ipa_rm_wq_cmd {
+	IPA_RM_WQ_NOTIFY_PROD,
+	IPA_RM_WQ_NOTIFY_CONS,
+	IPA_RM_WQ_RESOURCE_CB
+};
+
+/**
+ * struct ipa_rm_wq_work_type - IPA RM worqueue specific
+ *				work type
+ * @work: work struct
+ * @wq_cmd: command that should be processed in workqueue context
+ * @resource_name: name of the resource on which this work
+ *			should be done
+ * @dep_graph: data structure to search for resource if exists
+ * @event: event to notify
+ * @notify_registered_only: notify only clients registered by
+ *	ipa_rm_register()
+ */
+struct ipa_rm_wq_work_type {
+	struct work_struct		work;
+	enum ipa_rm_wq_cmd		wq_cmd;
+	enum ipa_rm_resource_name	resource_name;
+	enum ipa_rm_event		event;
+	bool				notify_registered_only;
+};
+
+/**
+ * struct ipa_rm_wq_suspend_resume_work_type - IPA RM worqueue resume or
+ *				suspend work type
+ * @work: work struct
+ * @resource_name: name of the resource on which this work
+ *			should be done
+ * @prev_state:
+ * @needed_bw:
+ */
+struct ipa_rm_wq_suspend_resume_work_type {
+	struct work_struct		work;
+	enum ipa_rm_resource_name	resource_name;
+	enum ipa_rm_resource_state	prev_state;
+	u32				needed_bw;
+	bool				inc_usage_count;
+
+};
+
+int ipa_rm_wq_send_cmd(enum ipa_rm_wq_cmd wq_cmd,
+		enum ipa_rm_resource_name resource_name,
+		enum ipa_rm_event event,
+		bool notify_registered_only);
+
+int ipa_rm_wq_send_resume_cmd(enum ipa_rm_resource_name resource_name,
+		enum ipa_rm_resource_state prev_state,
+		u32 needed_bw,
+		bool inc_usage_count);
+
+int ipa_rm_wq_send_suspend_cmd(enum ipa_rm_resource_name resource_name,
+		enum ipa_rm_resource_state prev_state,
+		u32 needed_bw);
+
+int ipa_rm_initialize(void);
+
+int ipa_rm_stat(char *buf, int size);
+
+const char *ipa_rm_resource_str(enum ipa_rm_resource_name resource_name);
+
+void ipa_rm_perf_profile_change(enum ipa_rm_resource_name resource_name);
+
+int ipa_rm_request_resource_with_timer(enum ipa_rm_resource_name resource_name);
+
+void delayed_release_work_func(struct work_struct *work);
+
+int ipa_rm_add_dependency_from_ioctl(enum ipa_rm_resource_name resource_name,
+	enum ipa_rm_resource_name depends_on_name);
+
+int ipa_rm_delete_dependency_from_ioctl(enum ipa_rm_resource_name resource_name,
+	enum ipa_rm_resource_name depends_on_name);
+
+void ipa_rm_exit(void);
+
+#endif /* _IPA_RM_I_H_ */

+ 279 - 0
ipa/ipa_rm_inactivity_timer.c

@@ -0,0 +1,279 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2013-2019, The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/jiffies.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/timer.h>
+#include <linux/unistd.h>
+#include <linux/workqueue.h>
+#include <linux/ipa.h>
+#include "ipa_rm_i.h"
+
+#define MAX_WS_NAME 20
+
+/**
+ * struct ipa_rm_it_private - IPA RM Inactivity Timer private
+ *	data
+ * @initied: indicates if instance was initialized
+ * @lock - spinlock for mutual exclusion
+ * @resource_name - resource name
+ * @work: delayed work object for running delayed releas
+ *	function
+ * @resource_requested: boolean flag indicates if resource was requested
+ * @reschedule_work: boolean flag indicates to not release and to
+ *	reschedule the release work.
+ * @work_in_progress: boolean flag indicates is release work was scheduled.
+ * @jiffies: number of jiffies for timeout
+ *
+ * WWAN private - holds all relevant info about WWAN driver
+ */
+struct ipa_rm_it_private {
+	bool initied;
+	enum ipa_rm_resource_name resource_name;
+	spinlock_t lock;
+	struct delayed_work work;
+	bool resource_requested;
+	bool reschedule_work;
+	bool work_in_progress;
+	unsigned long jiffies;
+	struct wakeup_source *w_lock;
+	char w_lock_name[MAX_WS_NAME];
+};
+
+static struct ipa_rm_it_private ipa_rm_it_handles[IPA_RM_RESOURCE_MAX];
+
+/**
+ * ipa_rm_inactivity_timer_func() - called when timer expired in
+ * the context of the shared workqueue. Checks internally if
+ * reschedule_work flag is set. In case it is not set this function calls to
+ * ipa_rm_release_resource(). In case reschedule_work is set this function
+ * reschedule the work. This flag is cleared cleared when
+ * calling to ipa_rm_inactivity_timer_release_resource().
+ *
+ * @work: work object provided by the work queue
+ *
+ * Return codes:
+ * None
+ */
+static void ipa_rm_inactivity_timer_func(struct work_struct *work)
+{
+
+	struct ipa_rm_it_private *me = container_of(to_delayed_work(work),
+						    struct ipa_rm_it_private,
+						    work);
+	unsigned long flags;
+
+	IPA_RM_DBG_LOW("timer expired for resource %d\n", me->resource_name);
+
+	spin_lock_irqsave(
+		&ipa_rm_it_handles[me->resource_name].lock, flags);
+	if (ipa_rm_it_handles[me->resource_name].reschedule_work) {
+		IPA_RM_DBG_LOW("setting delayed work\n");
+		ipa_rm_it_handles[me->resource_name].reschedule_work = false;
+		queue_delayed_work(system_unbound_wq,
+			&ipa_rm_it_handles[me->resource_name].work,
+			ipa_rm_it_handles[me->resource_name].jiffies);
+	} else if (ipa_rm_it_handles[me->resource_name].resource_requested) {
+		IPA_RM_DBG_LOW("not calling release\n");
+		ipa_rm_it_handles[me->resource_name].work_in_progress = false;
+	} else {
+		IPA_RM_DBG_LOW("calling release_resource on resource %d\n",
+			me->resource_name);
+		__pm_relax(ipa_rm_it_handles[me->resource_name].w_lock);
+		ipa_rm_release_resource(me->resource_name);
+		ipa_rm_it_handles[me->resource_name].work_in_progress = false;
+	}
+	spin_unlock_irqrestore(
+		&ipa_rm_it_handles[me->resource_name].lock, flags);
+}
+
+/**
+ * ipa_rm_inactivity_timer_init() - Init function for IPA RM
+ * inactivity timer. This function shall be called prior calling
+ * any other API of IPA RM inactivity timer.
+ *
+ * @resource_name: Resource name. @see ipa_rm.h
+ * @msecs: time in miliseccond, that IPA RM inactivity timer
+ * shall wait prior calling to ipa_rm_release_resource().
+ *
+ * Return codes:
+ * 0: success
+ * -EINVAL: invalid parameters
+ */
+int ipa_rm_inactivity_timer_init(enum ipa_rm_resource_name resource_name,
+				 unsigned long msecs)
+{
+	char *name;
+
+	IPA_RM_DBG_LOW("resource %d\n", resource_name);
+
+	if (resource_name < 0 ||
+	    resource_name >= IPA_RM_RESOURCE_MAX) {
+		IPA_RM_ERR("Invalid parameter\n");
+		return -EINVAL;
+	}
+
+	if (ipa_rm_it_handles[resource_name].initied) {
+		IPA_RM_ERR("resource %d already inited\n", resource_name);
+		return -EINVAL;
+	}
+
+	spin_lock_init(&ipa_rm_it_handles[resource_name].lock);
+	ipa_rm_it_handles[resource_name].resource_name = resource_name;
+	ipa_rm_it_handles[resource_name].jiffies = msecs_to_jiffies(msecs);
+	ipa_rm_it_handles[resource_name].resource_requested = false;
+	ipa_rm_it_handles[resource_name].reschedule_work = false;
+	ipa_rm_it_handles[resource_name].work_in_progress = false;
+	name = ipa_rm_it_handles[resource_name].w_lock_name;
+	snprintf(name, MAX_WS_NAME, "IPA_RM%d\n", resource_name);
+	ipa_rm_it_handles[resource_name].w_lock =
+		wakeup_source_register(NULL, name);
+	if (!ipa_rm_it_handles[resource_name].w_lock) {
+		IPA_RM_ERR("IPA wakeup source register failed %s\n",
+			name);
+		return -ENOMEM;
+	}
+
+	INIT_DELAYED_WORK(&ipa_rm_it_handles[resource_name].work,
+			  ipa_rm_inactivity_timer_func);
+	ipa_rm_it_handles[resource_name].initied = true;
+
+	return 0;
+}
+EXPORT_SYMBOL(ipa_rm_inactivity_timer_init);
+
+/**
+ * ipa_rm_inactivity_timer_destroy() - De-Init function for IPA
+ * RM inactivity timer.
+ * @resource_name: Resource name. @see ipa_rm.h
+ * Return codes:
+ * 0: success
+ * -EINVAL: invalid parameters
+ */
+int ipa_rm_inactivity_timer_destroy(enum ipa_rm_resource_name resource_name)
+{
+	IPA_RM_DBG_LOW("resource %d\n", resource_name);
+
+	if (resource_name < 0 ||
+	    resource_name >= IPA_RM_RESOURCE_MAX) {
+		IPA_RM_ERR("Invalid parameter\n");
+		return -EINVAL;
+	}
+
+	if (!ipa_rm_it_handles[resource_name].initied) {
+		IPA_RM_ERR("resource %d already inited\n",
+			resource_name);
+		return -EINVAL;
+	}
+
+	cancel_delayed_work_sync(&ipa_rm_it_handles[resource_name].work);
+	wakeup_source_unregister(ipa_rm_it_handles[resource_name].w_lock);
+
+	memset(&ipa_rm_it_handles[resource_name], 0,
+	       sizeof(struct ipa_rm_it_private));
+
+	return 0;
+}
+EXPORT_SYMBOL(ipa_rm_inactivity_timer_destroy);
+
+/**
+ * ipa_rm_inactivity_timer_request_resource() - Same as
+ * ipa_rm_request_resource(), with a difference that calling to
+ * this function will also cancel the inactivity timer, if
+ * ipa_rm_inactivity_timer_release_resource() was called earlier.
+ *
+ * @resource_name: Resource name. @see ipa_rm.h
+ *
+ * Return codes:
+ * 0: success
+ * -EINVAL: invalid parameters
+ */
+int ipa_rm_inactivity_timer_request_resource(
+				enum ipa_rm_resource_name resource_name)
+{
+	int ret;
+	unsigned long flags;
+
+	IPA_RM_DBG_LOW("resource %d\n", resource_name);
+
+	if (resource_name < 0 ||
+	    resource_name >= IPA_RM_RESOURCE_MAX) {
+		IPA_RM_ERR("Invalid parameter\n");
+		return -EINVAL;
+	}
+
+	if (!ipa_rm_it_handles[resource_name].initied) {
+		IPA_RM_ERR("Not initialized\n");
+		return -EINVAL;
+	}
+
+	spin_lock_irqsave(&ipa_rm_it_handles[resource_name].lock, flags);
+	ipa_rm_it_handles[resource_name].resource_requested = true;
+	spin_unlock_irqrestore(&ipa_rm_it_handles[resource_name].lock, flags);
+	ret = ipa_rm_request_resource(resource_name);
+	IPA_RM_DBG_LOW("resource %d: returning %d\n", resource_name, ret);
+
+	return ret;
+}
+EXPORT_SYMBOL(ipa_rm_inactivity_timer_request_resource);
+
+/**
+ * ipa_rm_inactivity_timer_release_resource() - Sets the
+ * inactivity timer to the timeout set by
+ * ipa_rm_inactivity_timer_init(). When the timeout expires, IPA
+ * RM inactivity timer will call to ipa_rm_release_resource().
+ * If a call to ipa_rm_inactivity_timer_request_resource() was
+ * made BEFORE the timeout has expired, rge timer will be
+ * cancelled.
+ *
+ * @resource_name: Resource name. @see ipa_rm.h
+ *
+ * Return codes:
+ * 0: success
+ * -EINVAL: invalid parameters
+ */
+int ipa_rm_inactivity_timer_release_resource(
+				enum ipa_rm_resource_name resource_name)
+{
+	unsigned long flags;
+
+	IPA_RM_DBG_LOW("resource %d\n", resource_name);
+
+	if (resource_name < 0 ||
+	    resource_name >= IPA_RM_RESOURCE_MAX) {
+		IPA_RM_ERR("Invalid parameter\n");
+		return -EINVAL;
+	}
+
+	if (!ipa_rm_it_handles[resource_name].initied) {
+		IPA_RM_ERR("Not initialized\n");
+		return -EINVAL;
+	}
+
+	spin_lock_irqsave(&ipa_rm_it_handles[resource_name].lock, flags);
+	ipa_rm_it_handles[resource_name].resource_requested = false;
+	if (ipa_rm_it_handles[resource_name].work_in_progress) {
+		IPA_RM_DBG_LOW("Timer already set, no sched again %d\n",
+		    resource_name);
+		ipa_rm_it_handles[resource_name].reschedule_work = true;
+		spin_unlock_irqrestore(
+			&ipa_rm_it_handles[resource_name].lock, flags);
+		return 0;
+	}
+	ipa_rm_it_handles[resource_name].work_in_progress = true;
+	ipa_rm_it_handles[resource_name].reschedule_work = false;
+	__pm_stay_awake(ipa_rm_it_handles[resource_name].w_lock);
+	IPA_RM_DBG_LOW("setting delayed work\n");
+	queue_delayed_work(system_unbound_wq,
+			      &ipa_rm_it_handles[resource_name].work,
+			      ipa_rm_it_handles[resource_name].jiffies);
+	spin_unlock_irqrestore(&ipa_rm_it_handles[resource_name].lock, flags);
+
+	return 0;
+}
+EXPORT_SYMBOL(ipa_rm_inactivity_timer_release_resource);
+

+ 270 - 0
ipa/ipa_rm_peers_list.c

@@ -0,0 +1,270 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2013-2019, The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/slab.h>
+#include "ipa_rm_i.h"
+
+/**
+ * ipa_rm_peers_list_get_resource_index() - resource name to index
+ *	of this resource in corresponding peers list
+ * @resource_name: [in] resource name
+ *
+ * Returns: resource index mapping, IPA_RM_INDEX_INVALID
+ * in case provided resource name isn't contained in enum
+ * ipa_rm_resource_name.
+ *
+ */
+static int ipa_rm_peers_list_get_resource_index(
+		enum ipa_rm_resource_name resource_name)
+{
+	int resource_index = IPA_RM_INDEX_INVALID;
+
+	if (IPA_RM_RESORCE_IS_PROD(resource_name))
+		resource_index = ipa_rm_prod_index(resource_name);
+	else if (IPA_RM_RESORCE_IS_CONS(resource_name))
+		resource_index = ipa_rm_cons_index(resource_name);
+
+	return resource_index;
+}
+
+static bool ipa_rm_peers_list_check_index(int index,
+		struct ipa_rm_peers_list *peers_list)
+{
+	return !(index > peers_list->max_peers || index < 0);
+}
+
+/**
+ * ipa_rm_peers_list_create() - creates the peers list
+ *
+ * @max_peers: maximum number of peers in new list
+ * @peers_list: [out] newly created peers list
+ *
+ * Returns: 0 in case of SUCCESS, negative otherwise
+ */
+int ipa_rm_peers_list_create(int max_peers,
+		struct ipa_rm_peers_list **peers_list)
+{
+	int result;
+
+	*peers_list = kzalloc(sizeof(**peers_list), GFP_ATOMIC);
+	if (!*peers_list) {
+		IPA_RM_ERR("no mem\n");
+		result = -ENOMEM;
+		goto bail;
+	}
+
+	(*peers_list)->max_peers = max_peers;
+	(*peers_list)->peers = kzalloc((*peers_list)->max_peers *
+			sizeof(*((*peers_list)->peers)), GFP_ATOMIC);
+	if (!((*peers_list)->peers)) {
+		IPA_RM_ERR("no mem\n");
+		result = -ENOMEM;
+		goto list_alloc_fail;
+	}
+
+	return 0;
+
+list_alloc_fail:
+	kfree(*peers_list);
+bail:
+	return result;
+}
+
+/**
+ * ipa_rm_peers_list_delete() - deletes the peers list
+ *
+ * @peers_list: peers list
+ *
+ */
+void ipa_rm_peers_list_delete(struct ipa_rm_peers_list *peers_list)
+{
+	if (peers_list) {
+		kfree(peers_list->peers);
+		kfree(peers_list);
+	}
+}
+
+/**
+ * ipa_rm_peers_list_remove_peer() - removes peer from the list
+ *
+ * @peers_list: peers list
+ * @resource_name: name of the resource to remove
+ *
+ */
+void ipa_rm_peers_list_remove_peer(
+		struct ipa_rm_peers_list *peers_list,
+		enum ipa_rm_resource_name resource_name)
+{
+	if (!peers_list)
+		return;
+
+	peers_list->peers[ipa_rm_peers_list_get_resource_index(
+			resource_name)].resource = NULL;
+	peers_list->peers[ipa_rm_peers_list_get_resource_index(
+			resource_name)].userspace_dep = false;
+	peers_list->peers_count--;
+}
+
+/**
+ * ipa_rm_peers_list_add_peer() - adds peer to the list
+ *
+ * @peers_list: peers list
+ * @resource: resource to add
+ *
+ */
+void ipa_rm_peers_list_add_peer(
+		struct ipa_rm_peers_list *peers_list,
+		struct ipa_rm_resource *resource,
+		bool userspace_dep)
+{
+	if (!peers_list || !resource)
+		return;
+
+	peers_list->peers[ipa_rm_peers_list_get_resource_index(
+			resource->name)].resource = resource;
+	peers_list->peers[ipa_rm_peers_list_get_resource_index(
+		resource->name)].userspace_dep = userspace_dep;
+	peers_list->peers_count++;
+}
+
+/**
+ * ipa_rm_peers_list_is_empty() - checks
+ *	if resource peers list is empty
+ *
+ * @peers_list: peers list
+ *
+ * Returns: true if the list is empty, false otherwise
+ */
+bool ipa_rm_peers_list_is_empty(struct ipa_rm_peers_list *peers_list)
+{
+	bool result = true;
+
+	if (!peers_list)
+		goto bail;
+
+	if (peers_list->peers_count > 0)
+		result = false;
+bail:
+	return result;
+}
+
+/**
+ * ipa_rm_peers_list_has_last_peer() - checks
+ *	if resource peers list has exactly one peer
+ *
+ * @peers_list: peers list
+ *
+ * Returns: true if the list has exactly one peer, false otherwise
+ */
+bool ipa_rm_peers_list_has_last_peer(
+		struct ipa_rm_peers_list *peers_list)
+{
+	bool result = false;
+
+	if (!peers_list)
+		goto bail;
+
+	if (peers_list->peers_count == 1)
+		result = true;
+bail:
+	return result;
+}
+
+/**
+ * ipa_rm_peers_list_check_dependency() - check dependency
+ *	between 2 peer lists
+ * @resource_peers: first peers list
+ * @resource_name: first peers list resource name
+ * @depends_on_peers: second peers list
+ * @depends_on_name: second peers list resource name
+ * @userspace_dep: [out] dependency was created by userspace
+ *
+ * Returns: true if there is dependency, false otherwise
+ *
+ */
+bool ipa_rm_peers_list_check_dependency(
+		struct ipa_rm_peers_list *resource_peers,
+		enum ipa_rm_resource_name resource_name,
+		struct ipa_rm_peers_list *depends_on_peers,
+		enum ipa_rm_resource_name depends_on_name,
+		bool *userspace_dep)
+{
+	bool result = false;
+	int resource_index;
+	struct ipa_rm_resource_peer *peer_ptr;
+
+	if (!resource_peers || !depends_on_peers || !userspace_dep)
+		return result;
+
+	resource_index = ipa_rm_peers_list_get_resource_index(depends_on_name);
+	peer_ptr = &resource_peers->peers[resource_index];
+	if (peer_ptr->resource != NULL) {
+		result = true;
+		*userspace_dep = peer_ptr->userspace_dep;
+	}
+
+	resource_index = ipa_rm_peers_list_get_resource_index(resource_name);
+	peer_ptr = &depends_on_peers->peers[resource_index];
+	if (peer_ptr->resource != NULL) {
+		result = true;
+		*userspace_dep = peer_ptr->userspace_dep;
+	}
+
+	return result;
+}
+
+/**
+ * ipa_rm_peers_list_get_resource() - get resource by
+ *	resource index
+ * @resource_index: resource index
+ * @resource_peers: peers list
+ *
+ * Returns: the resource if found, NULL otherwise
+ */
+struct ipa_rm_resource *ipa_rm_peers_list_get_resource(int resource_index,
+		struct ipa_rm_peers_list *resource_peers)
+{
+	struct ipa_rm_resource *result = NULL;
+
+	if (!ipa_rm_peers_list_check_index(resource_index, resource_peers))
+		goto bail;
+
+	result = resource_peers->peers[resource_index].resource;
+bail:
+	return result;
+}
+
+/**
+ * ipa_rm_peers_list_get_userspace_dep() - returns whether resource dependency
+ * was added by userspace
+ * @resource_index: resource index
+ * @resource_peers: peers list
+ *
+ * Returns: true if dependency was added by userspace, false by kernel
+ */
+bool ipa_rm_peers_list_get_userspace_dep(int resource_index,
+		struct ipa_rm_peers_list *resource_peers)
+{
+	bool result = false;
+
+	if (!ipa_rm_peers_list_check_index(resource_index, resource_peers))
+		goto bail;
+
+	result = resource_peers->peers[resource_index].userspace_dep;
+bail:
+	return result;
+}
+
+/**
+ * ipa_rm_peers_list_get_size() - get peers list sise
+ *
+ * @peers_list: peers list
+ *
+ * Returns: the size of the peers list
+ */
+int ipa_rm_peers_list_get_size(struct ipa_rm_peers_list *peers_list)
+{
+	return peers_list->max_peers;
+}

+ 55 - 0
ipa/ipa_rm_peers_list.h

@@ -0,0 +1,55 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2013-2019, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _IPA_RM_PEERS_LIST_H_
+#define _IPA_RM_PEERS_LIST_H_
+
+#include "ipa_rm_resource.h"
+
+struct ipa_rm_resource_peer {
+	struct ipa_rm_resource *resource;
+	bool userspace_dep;
+};
+
+/**
+ * struct ipa_rm_peers_list - IPA RM resource peers list
+ * @peers: the list of references to resources dependent on this resource
+ *          in case of producer or list of dependencies in case of consumer
+ * @max_peers: maximum number of peers for this resource
+ * @peers_count: actual number of peers for this resource
+ */
+struct ipa_rm_peers_list {
+	struct ipa_rm_resource_peer	*peers;
+	int				max_peers;
+	int				peers_count;
+};
+
+int ipa_rm_peers_list_create(int max_peers,
+		struct ipa_rm_peers_list **peers_list);
+void ipa_rm_peers_list_delete(struct ipa_rm_peers_list *peers_list);
+void ipa_rm_peers_list_remove_peer(
+		struct ipa_rm_peers_list *peers_list,
+		enum ipa_rm_resource_name resource_name);
+void ipa_rm_peers_list_add_peer(
+		struct ipa_rm_peers_list *peers_list,
+		struct ipa_rm_resource *resource,
+		bool userspace_dep);
+bool ipa_rm_peers_list_check_dependency(
+		struct ipa_rm_peers_list *resource_peers,
+		enum ipa_rm_resource_name resource_name,
+		struct ipa_rm_peers_list *depends_on_peers,
+		enum ipa_rm_resource_name depends_on_name,
+		bool *userspace_dep);
+struct ipa_rm_resource *ipa_rm_peers_list_get_resource(int resource_index,
+		struct ipa_rm_peers_list *peers_list);
+bool ipa_rm_peers_list_get_userspace_dep(int resource_index,
+		struct ipa_rm_peers_list *resource_peers);
+int ipa_rm_peers_list_get_size(struct ipa_rm_peers_list *peers_list);
+bool ipa_rm_peers_list_is_empty(struct ipa_rm_peers_list *peers_list);
+bool ipa_rm_peers_list_has_last_peer(
+		struct ipa_rm_peers_list *peers_list);
+
+
+#endif /* _IPA_RM_PEERS_LIST_H_ */

+ 1204 - 0
ipa/ipa_rm_resource.c

@@ -0,0 +1,1204 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2013-2019, The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/slab.h>
+#include "ipa_rm_resource.h"
+#include "ipa_rm_i.h"
+#include "ipa_common_i.h"
+/**
+ * ipa_rm_dep_prod_index() - producer name to producer index mapping
+ * @resource_name: [in] resource name (should be of producer)
+ *
+ * Returns: resource index mapping, IPA_RM_INDEX_INVALID
+ *	in case provided resource name isn't contained
+ *	in enum ipa_rm_resource_name or is not of producers.
+ *
+ */
+int ipa_rm_prod_index(enum ipa_rm_resource_name resource_name)
+{
+	int result = resource_name;
+
+	switch (resource_name) {
+	case IPA_RM_RESOURCE_Q6_PROD:
+	case IPA_RM_RESOURCE_USB_PROD:
+	case IPA_RM_RESOURCE_USB_DPL_DUMMY_PROD:
+	case IPA_RM_RESOURCE_HSIC_PROD:
+	case IPA_RM_RESOURCE_STD_ECM_PROD:
+	case IPA_RM_RESOURCE_RNDIS_PROD:
+	case IPA_RM_RESOURCE_WWAN_0_PROD:
+	case IPA_RM_RESOURCE_WLAN_PROD:
+	case IPA_RM_RESOURCE_ODU_ADAPT_PROD:
+	case IPA_RM_RESOURCE_MHI_PROD:
+	case IPA_RM_RESOURCE_ETHERNET_PROD:
+		break;
+	default:
+		result = IPA_RM_INDEX_INVALID;
+		break;
+	}
+
+	return result;
+}
+
+/**
+ * ipa_rm_cons_index() - consumer name to consumer index mapping
+ * @resource_name: [in] resource name (should be of consumer)
+ *
+ * Returns: resource index mapping, IPA_RM_INDEX_INVALID
+ *	in case provided resource name isn't contained
+ *	in enum ipa_rm_resource_name or is not of consumers.
+ *
+ */
+int ipa_rm_cons_index(enum ipa_rm_resource_name resource_name)
+{
+	int result = resource_name;
+
+	switch (resource_name) {
+	case IPA_RM_RESOURCE_Q6_CONS:
+	case IPA_RM_RESOURCE_USB_CONS:
+	case IPA_RM_RESOURCE_HSIC_CONS:
+	case IPA_RM_RESOURCE_WLAN_CONS:
+	case IPA_RM_RESOURCE_APPS_CONS:
+	case IPA_RM_RESOURCE_ODU_ADAPT_CONS:
+	case IPA_RM_RESOURCE_MHI_CONS:
+	case IPA_RM_RESOURCE_USB_DPL_CONS:
+	case IPA_RM_RESOURCE_ETHERNET_CONS:
+		break;
+	default:
+		result = IPA_RM_INDEX_INVALID;
+		break;
+	}
+
+	return result;
+}
+
+int ipa_rm_resource_consumer_release_work(
+		struct ipa_rm_resource_cons *consumer,
+		enum ipa_rm_resource_state prev_state,
+		bool notify_completion)
+{
+	int driver_result;
+
+	IPA_RM_DBG_LOW("calling driver CB\n");
+	driver_result = consumer->release_resource();
+	IPA_RM_DBG_LOW("driver CB returned with %d\n", driver_result);
+	/*
+	 * Treat IPA_RM_RELEASE_IN_PROGRESS as IPA_RM_RELEASED
+	 * for CONS which remains in RELEASE_IN_PROGRESS.
+	 */
+	if (driver_result == -EINPROGRESS)
+		driver_result = 0;
+	if (driver_result != 0 && driver_result != -EINPROGRESS) {
+		IPA_RM_ERR("driver CB returned error %d\n", driver_result);
+		consumer->resource.state = prev_state;
+		goto bail;
+	}
+	if (driver_result == 0) {
+		if (notify_completion)
+			ipa_rm_resource_consumer_handle_cb(consumer,
+					IPA_RM_RESOURCE_RELEASED);
+		else
+			consumer->resource.state = IPA_RM_RELEASED;
+	}
+	complete_all(&consumer->request_consumer_in_progress);
+
+	ipa_rm_perf_profile_change(consumer->resource.name);
+bail:
+	return driver_result;
+}
+
+int ipa_rm_resource_consumer_request_work(struct ipa_rm_resource_cons *consumer,
+		enum ipa_rm_resource_state prev_state,
+		u32 prod_needed_bw,
+		bool notify_completion,
+		bool dec_client_on_err)
+{
+	int driver_result;
+
+	IPA_RM_DBG_LOW("calling driver CB\n");
+	driver_result = consumer->request_resource();
+	IPA_RM_DBG_LOW("driver CB returned with %d\n", driver_result);
+	if (driver_result == 0) {
+		if (notify_completion) {
+			ipa_rm_resource_consumer_handle_cb(consumer,
+					IPA_RM_RESOURCE_GRANTED);
+		} else {
+			consumer->resource.state = IPA_RM_GRANTED;
+			ipa_rm_perf_profile_change(consumer->resource.name);
+			ipa_resume_resource(consumer->resource.name);
+		}
+	} else if (driver_result != -EINPROGRESS) {
+		consumer->resource.state = prev_state;
+		consumer->resource.needed_bw -= prod_needed_bw;
+		if (dec_client_on_err)
+			consumer->usage_count--;
+	}
+
+	return driver_result;
+}
+
+int ipa_rm_resource_consumer_request(
+		struct ipa_rm_resource_cons *consumer,
+		u32 prod_needed_bw,
+		bool inc_usage_count,
+		bool wake_client)
+{
+	int result = 0;
+	enum ipa_rm_resource_state prev_state;
+	struct ipa_active_client_logging_info log_info;
+
+	IPA_RM_DBG_LOW("%s state: %d\n",
+			ipa_rm_resource_str(consumer->resource.name),
+			consumer->resource.state);
+
+	prev_state = consumer->resource.state;
+	consumer->resource.needed_bw += prod_needed_bw;
+	switch (consumer->resource.state) {
+	case IPA_RM_RELEASED:
+	case IPA_RM_RELEASE_IN_PROGRESS:
+		reinit_completion(&consumer->request_consumer_in_progress);
+		consumer->resource.state = IPA_RM_REQUEST_IN_PROGRESS;
+		IPA_ACTIVE_CLIENTS_PREP_RESOURCE(log_info,
+				ipa_rm_resource_str(consumer->resource.name));
+		if (prev_state == IPA_RM_RELEASE_IN_PROGRESS ||
+			ipa_inc_client_enable_clks_no_block(&log_info) != 0) {
+			IPA_RM_DBG_LOW("async resume work for %s\n",
+				ipa_rm_resource_str(consumer->resource.name));
+			ipa_rm_wq_send_resume_cmd(consumer->resource.name,
+						prev_state,
+						prod_needed_bw,
+						inc_usage_count);
+			result = -EINPROGRESS;
+			break;
+		}
+		result = ipa_rm_resource_consumer_request_work(consumer,
+						prev_state,
+						prod_needed_bw,
+						false,
+						inc_usage_count);
+		break;
+	case IPA_RM_GRANTED:
+		if (wake_client) {
+			result = ipa_rm_resource_consumer_request_work(
+				consumer, prev_state, prod_needed_bw, false,
+				inc_usage_count);
+			break;
+		}
+		ipa_rm_perf_profile_change(consumer->resource.name);
+		break;
+	case IPA_RM_REQUEST_IN_PROGRESS:
+		result = -EINPROGRESS;
+		break;
+	default:
+		consumer->resource.needed_bw -= prod_needed_bw;
+		result = -EPERM;
+		goto bail;
+	}
+	if (inc_usage_count)
+		consumer->usage_count++;
+bail:
+	IPA_RM_DBG_LOW("%s new state: %d\n",
+		ipa_rm_resource_str(consumer->resource.name),
+		consumer->resource.state);
+	IPA_RM_DBG_LOW("EXIT with %d\n", result);
+
+	return result;
+}
+
+int ipa_rm_resource_consumer_release(
+		struct ipa_rm_resource_cons *consumer,
+		u32 prod_needed_bw,
+		bool dec_usage_count)
+{
+	int result = 0;
+	enum ipa_rm_resource_state save_state;
+
+	IPA_RM_DBG_LOW("%s state: %d\n",
+		ipa_rm_resource_str(consumer->resource.name),
+		consumer->resource.state);
+	save_state = consumer->resource.state;
+	consumer->resource.needed_bw -= prod_needed_bw;
+	switch (consumer->resource.state) {
+	case IPA_RM_RELEASED:
+		break;
+	case IPA_RM_GRANTED:
+	case IPA_RM_REQUEST_IN_PROGRESS:
+		if (dec_usage_count && consumer->usage_count > 0)
+			consumer->usage_count--;
+		if (consumer->usage_count == 0) {
+			consumer->resource.state = IPA_RM_RELEASE_IN_PROGRESS;
+			if (save_state == IPA_RM_REQUEST_IN_PROGRESS ||
+			    ipa_suspend_resource_no_block(
+						consumer->resource.name) != 0) {
+				ipa_rm_wq_send_suspend_cmd(
+						consumer->resource.name,
+						save_state,
+						prod_needed_bw);
+				result = -EINPROGRESS;
+				goto bail;
+			}
+			result = ipa_rm_resource_consumer_release_work(consumer,
+					save_state, false);
+			goto bail;
+		} else if (consumer->resource.state == IPA_RM_GRANTED) {
+			ipa_rm_perf_profile_change(consumer->resource.name);
+		}
+		break;
+	case IPA_RM_RELEASE_IN_PROGRESS:
+		if (dec_usage_count && consumer->usage_count > 0)
+			consumer->usage_count--;
+		result = -EINPROGRESS;
+		break;
+	default:
+		result = -EPERM;
+		goto bail;
+	}
+bail:
+	IPA_RM_DBG_LOW("%s new state: %d\n",
+		ipa_rm_resource_str(consumer->resource.name),
+		consumer->resource.state);
+	IPA_RM_DBG_LOW("EXIT with %d\n", result);
+
+	return result;
+}
+
+/**
+ * ipa_rm_resource_producer_notify_clients() - notify
+ *	all registered clients of given producer
+ * @producer: producer
+ * @event: event to notify
+ * @notify_registered_only: notify only clients registered by
+ *	ipa_rm_register()
+ */
+void ipa_rm_resource_producer_notify_clients(
+				struct ipa_rm_resource_prod *producer,
+				enum ipa_rm_event event,
+				bool notify_registered_only)
+{
+	struct ipa_rm_notification_info *reg_info;
+
+	IPA_RM_DBG_LOW("%s event: %d notify_registered_only: %d\n",
+		ipa_rm_resource_str(producer->resource.name),
+		event,
+		notify_registered_only);
+
+	list_for_each_entry(reg_info, &(producer->event_listeners), link) {
+		if (notify_registered_only && !reg_info->explicit)
+			continue;
+
+		IPA_RM_DBG_LOW("Notifying %s event: %d\n",
+			   ipa_rm_resource_str(producer->resource.name), event);
+		reg_info->reg_params.notify_cb(reg_info->reg_params.user_data,
+					       event,
+					       0);
+		IPA_RM_DBG_LOW("back from client CB\n");
+	}
+}
+
+static int ipa_rm_resource_producer_create(struct ipa_rm_resource **resource,
+		struct ipa_rm_resource_prod **producer,
+		struct ipa_rm_create_params *create_params,
+		int *max_peers)
+{
+	int result = 0;
+
+	*producer = kzalloc(sizeof(**producer), GFP_ATOMIC);
+	if (*producer == NULL) {
+		result = -ENOMEM;
+		goto bail;
+	}
+
+	INIT_LIST_HEAD(&((*producer)->event_listeners));
+	result = ipa_rm_resource_producer_register(*producer,
+			&(create_params->reg_params),
+			false);
+	if (result) {
+		IPA_RM_ERR("ipa_rm_resource_producer_register() failed\n");
+		goto register_fail;
+	}
+
+	(*resource) = (struct ipa_rm_resource *) (*producer);
+	(*resource)->type = IPA_RM_PRODUCER;
+	*max_peers = IPA_RM_RESOURCE_MAX;
+	goto bail;
+register_fail:
+	kfree(*producer);
+bail:
+	return result;
+}
+
+static void ipa_rm_resource_producer_delete(
+				struct ipa_rm_resource_prod *producer)
+{
+	struct ipa_rm_notification_info *reg_info;
+	struct list_head *pos, *q;
+
+	ipa_rm_resource_producer_release(producer);
+	list_for_each_safe(pos, q, &(producer->event_listeners)) {
+		reg_info = list_entry(pos,
+				struct ipa_rm_notification_info,
+				link);
+		list_del(pos);
+		kfree(reg_info);
+	}
+}
+
+static int ipa_rm_resource_consumer_create(struct ipa_rm_resource **resource,
+		struct ipa_rm_resource_cons **consumer,
+		struct ipa_rm_create_params *create_params,
+		int *max_peers)
+{
+	int result = 0;
+
+	*consumer = kzalloc(sizeof(**consumer), GFP_ATOMIC);
+	if (*consumer == NULL) {
+		result = -ENOMEM;
+		goto bail;
+	}
+
+	(*consumer)->request_resource = create_params->request_resource;
+	(*consumer)->release_resource = create_params->release_resource;
+	(*resource) = (struct ipa_rm_resource *) (*consumer);
+	(*resource)->type = IPA_RM_CONSUMER;
+	init_completion(&((*consumer)->request_consumer_in_progress));
+	*max_peers = IPA_RM_RESOURCE_MAX;
+bail:
+	return result;
+}
+
+/**
+ * ipa_rm_resource_create() - creates resource
+ * @create_params: [in] parameters needed
+ *			for resource initialization with IPA RM
+ * @resource: [out] created resource
+ *
+ * Returns: 0 on success, negative on failure
+ */
+int ipa_rm_resource_create(
+		struct ipa_rm_create_params *create_params,
+		struct ipa_rm_resource **resource)
+{
+	struct ipa_rm_resource_cons *consumer;
+	struct ipa_rm_resource_prod *producer;
+	int max_peers;
+	int result = 0;
+
+	if (!create_params) {
+		result = -EINVAL;
+		goto bail;
+	}
+
+	if (IPA_RM_RESORCE_IS_PROD(create_params->name)) {
+		result = ipa_rm_resource_producer_create(resource,
+				&producer,
+				create_params,
+				&max_peers);
+		if (result) {
+			IPA_RM_ERR("ipa_rm_resource_producer_create failed\n");
+			goto bail;
+		}
+	} else if (IPA_RM_RESORCE_IS_CONS(create_params->name)) {
+		result = ipa_rm_resource_consumer_create(resource,
+				&consumer,
+				create_params,
+				&max_peers);
+		if (result) {
+			IPA_RM_ERR("ipa_rm_resource_producer_create failed\n");
+			goto bail;
+		}
+	} else {
+		IPA_RM_ERR("invalid resource\n");
+		result = -EPERM;
+		goto bail;
+	}
+
+	result = ipa_rm_peers_list_create(max_peers,
+			&((*resource)->peers_list));
+	if (result) {
+		IPA_RM_ERR("ipa_rm_peers_list_create failed\n");
+		goto peers_alloc_fail;
+	}
+	(*resource)->name = create_params->name;
+	(*resource)->floor_voltage = create_params->floor_voltage;
+	(*resource)->state = IPA_RM_RELEASED;
+	goto bail;
+
+peers_alloc_fail:
+	ipa_rm_resource_delete(*resource);
+bail:
+	return result;
+}
+
+/**
+ * ipa_rm_resource_delete() - deletes resource
+ * @resource: [in] resource
+ *			for resource initialization with IPA RM
+ *
+ * Returns: 0 on success, negative on failure
+ */
+int ipa_rm_resource_delete(struct ipa_rm_resource *resource)
+{
+	struct ipa_rm_resource *consumer;
+	struct ipa_rm_resource *producer;
+	int peers_index;
+	int result = 0;
+	int list_size;
+	bool userspace_dep;
+
+	if (!resource) {
+		IPA_RM_ERR("invalid params\n");
+		return -EINVAL;
+	}
+
+	IPA_RM_DBG("ENTER with resource %d\n", resource->name);
+	if (resource->type == IPA_RM_PRODUCER) {
+		if (resource->peers_list) {
+			list_size = ipa_rm_peers_list_get_size(
+				resource->peers_list);
+			for (peers_index = 0;
+				peers_index < list_size;
+				peers_index++) {
+				consumer = ipa_rm_peers_list_get_resource(
+						peers_index,
+						resource->peers_list);
+				if (consumer) {
+					userspace_dep =
+					ipa_rm_peers_list_get_userspace_dep(
+							peers_index,
+							resource->peers_list);
+					ipa_rm_resource_delete_dependency(
+						resource,
+						consumer,
+						userspace_dep);
+				}
+			}
+		}
+
+		ipa_rm_resource_producer_delete(
+				(struct ipa_rm_resource_prod *) resource);
+	} else if (resource->type == IPA_RM_CONSUMER) {
+		if (resource->peers_list) {
+			list_size = ipa_rm_peers_list_get_size(
+				resource->peers_list);
+			for (peers_index = 0;
+					peers_index < list_size;
+					peers_index++){
+				producer = ipa_rm_peers_list_get_resource(
+							peers_index,
+							resource->peers_list);
+				if (producer) {
+					userspace_dep =
+					ipa_rm_peers_list_get_userspace_dep(
+						peers_index,
+						resource->peers_list);
+					ipa_rm_resource_delete_dependency(
+							producer,
+							resource,
+							userspace_dep);
+				}
+			}
+		}
+	}
+	ipa_rm_peers_list_delete(resource->peers_list);
+	kfree(resource);
+	return result;
+}
+
+/**
+ * ipa_rm_resource_register() - register resource
+ * @resource: [in] resource
+ * @reg_params: [in] registration parameters
+ * @explicit: [in] registered explicitly by ipa_rm_register()
+ *
+ * Returns: 0 on success, negative on failure
+ *
+ * Producer resource is expected for this call.
+ *
+ */
+int ipa_rm_resource_producer_register(struct ipa_rm_resource_prod *producer,
+		struct ipa_rm_register_params *reg_params,
+		bool explicit)
+{
+	int result = 0;
+	struct ipa_rm_notification_info *reg_info;
+	struct list_head *pos;
+
+	if (!producer || !reg_params) {
+		IPA_RM_ERR("invalid params\n");
+		result = -EPERM;
+		goto bail;
+	}
+
+	list_for_each(pos, &(producer->event_listeners)) {
+		reg_info = list_entry(pos,
+					struct ipa_rm_notification_info,
+					link);
+		if (reg_info->reg_params.notify_cb ==
+						reg_params->notify_cb) {
+			IPA_RM_ERR("already registered\n");
+			result = -EPERM;
+			goto bail;
+		}
+
+	}
+
+	reg_info = kzalloc(sizeof(*reg_info), GFP_ATOMIC);
+	if (reg_info == NULL) {
+		result = -ENOMEM;
+		goto bail;
+	}
+
+	reg_info->reg_params.user_data = reg_params->user_data;
+	reg_info->reg_params.notify_cb = reg_params->notify_cb;
+	reg_info->explicit = explicit;
+	INIT_LIST_HEAD(&reg_info->link);
+	list_add(&reg_info->link, &producer->event_listeners);
+bail:
+	return result;
+}
+
+/**
+ * ipa_rm_resource_deregister() - register resource
+ * @resource: [in] resource
+ * @reg_params: [in] registration parameters
+ *
+ * Returns: 0 on success, negative on failure
+ *
+ * Producer resource is expected for this call.
+ * This function deleted only single instance of
+ * registration info.
+ *
+ */
+int ipa_rm_resource_producer_deregister(struct ipa_rm_resource_prod *producer,
+		struct ipa_rm_register_params *reg_params)
+{
+	int result = -EINVAL;
+	struct ipa_rm_notification_info *reg_info;
+	struct list_head *pos, *q;
+
+	if (!producer || !reg_params) {
+		IPA_RM_ERR("invalid params\n");
+		return -EINVAL;
+	}
+
+	list_for_each_safe(pos, q, &(producer->event_listeners)) {
+		reg_info = list_entry(pos,
+				struct ipa_rm_notification_info,
+				link);
+		if (reg_info->reg_params.notify_cb ==
+						reg_params->notify_cb) {
+			list_del(pos);
+			kfree(reg_info);
+			result = 0;
+			goto bail;
+		}
+	}
+bail:
+	return result;
+}
+
+/**
+ * ipa_rm_resource_add_dependency() - add dependency between two
+ *				given resources
+ * @resource: [in] resource resource
+ * @depends_on: [in] depends_on resource
+ *
+ * Returns: 0 on success, negative on failure
+ */
+int ipa_rm_resource_add_dependency(struct ipa_rm_resource *resource,
+				   struct ipa_rm_resource *depends_on,
+				   bool userspace_dep)
+{
+	int result = 0;
+	int consumer_result;
+	bool add_dep_by_userspace;
+
+	if (!resource || !depends_on) {
+		IPA_RM_ERR("invalid params\n");
+		return -EINVAL;
+	}
+
+	if (ipa_rm_peers_list_check_dependency(resource->peers_list,
+			resource->name,
+			depends_on->peers_list,
+			depends_on->name,
+			&add_dep_by_userspace)) {
+		IPA_RM_ERR("dependency already exists, added by %s\n",
+			add_dep_by_userspace ? "userspace" : "kernel");
+		return -EEXIST;
+	}
+
+	ipa_rm_peers_list_add_peer(resource->peers_list, depends_on,
+		userspace_dep);
+	ipa_rm_peers_list_add_peer(depends_on->peers_list, resource,
+		userspace_dep);
+	IPA_RM_DBG("%s state: %d\n", ipa_rm_resource_str(resource->name),
+				resource->state);
+
+	resource->needed_bw += depends_on->max_bw;
+	switch (resource->state) {
+	case IPA_RM_RELEASED:
+	case IPA_RM_RELEASE_IN_PROGRESS:
+		break;
+	case IPA_RM_GRANTED:
+	case IPA_RM_REQUEST_IN_PROGRESS:
+	{
+		enum ipa_rm_resource_state prev_state = resource->state;
+
+		resource->state = IPA_RM_REQUEST_IN_PROGRESS;
+		((struct ipa_rm_resource_prod *)
+					resource)->pending_request++;
+		consumer_result = ipa_rm_resource_consumer_request(
+				(struct ipa_rm_resource_cons *)depends_on,
+				resource->max_bw,
+				true, false);
+		if (consumer_result != -EINPROGRESS) {
+			resource->state = prev_state;
+			((struct ipa_rm_resource_prod *)
+					resource)->pending_request--;
+			ipa_rm_perf_profile_change(resource->name);
+		}
+		result = consumer_result;
+		break;
+	}
+	default:
+		IPA_RM_ERR("invalid state\n");
+		result = -EPERM;
+		goto bail;
+	}
+bail:
+	IPA_RM_DBG("%s new state: %d\n", ipa_rm_resource_str(resource->name),
+					resource->state);
+	IPA_RM_DBG("EXIT with %d\n", result);
+
+	return result;
+}
+
+/**
+ * ipa_rm_resource_delete_dependency() - add dependency between two
+ *				given resources
+ * @resource: [in] resource resource
+ * @depends_on: [in] depends_on resource
+ *
+ * Returns: 0 on success, negative on failure
+ * In case the resource state was changed, a notification
+ * will be sent to the RM client
+ */
+int ipa_rm_resource_delete_dependency(struct ipa_rm_resource *resource,
+				   struct ipa_rm_resource *depends_on,
+				   bool userspace_dep)
+{
+	int result = 0;
+	bool state_changed = false;
+	bool release_consumer = false;
+	enum ipa_rm_event evt;
+	bool add_dep_by_userspace;
+
+	if (!resource || !depends_on) {
+		IPA_RM_ERR("invalid params\n");
+		return -EINVAL;
+	}
+
+	if (!ipa_rm_peers_list_check_dependency(resource->peers_list,
+			resource->name,
+			depends_on->peers_list,
+			depends_on->name,
+			&add_dep_by_userspace)) {
+		IPA_RM_ERR("dependency does not exist\n");
+		return -EINVAL;
+	}
+
+	/*
+	 * to avoid race conditions between kernel and userspace
+	 * need to check that the dependency was added by same entity
+	 */
+	if (add_dep_by_userspace != userspace_dep) {
+		IPA_RM_DBG("dependency was added by %s\n",
+			add_dep_by_userspace ? "userspace" : "kernel");
+		IPA_RM_DBG("ignore request to delete dependency by %s\n",
+			userspace_dep ? "userspace" : "kernel");
+		return 0;
+	}
+
+	IPA_RM_DBG("%s state: %d\n", ipa_rm_resource_str(resource->name),
+				resource->state);
+
+	resource->needed_bw -= depends_on->max_bw;
+	switch (resource->state) {
+	case IPA_RM_RELEASED:
+		break;
+	case IPA_RM_GRANTED:
+		ipa_rm_perf_profile_change(resource->name);
+		release_consumer = true;
+		break;
+	case IPA_RM_RELEASE_IN_PROGRESS:
+		if (((struct ipa_rm_resource_prod *)
+			resource)->pending_release > 0)
+			((struct ipa_rm_resource_prod *)
+				resource)->pending_release--;
+		if (depends_on->state == IPA_RM_RELEASE_IN_PROGRESS &&
+			((struct ipa_rm_resource_prod *)
+			resource)->pending_release == 0) {
+			resource->state = IPA_RM_RELEASED;
+			state_changed = true;
+			evt = IPA_RM_RESOURCE_RELEASED;
+			ipa_rm_perf_profile_change(resource->name);
+		}
+		break;
+	case IPA_RM_REQUEST_IN_PROGRESS:
+		release_consumer = true;
+		if (((struct ipa_rm_resource_prod *)
+			resource)->pending_request > 0)
+			((struct ipa_rm_resource_prod *)
+				resource)->pending_request--;
+		if (depends_on->state == IPA_RM_REQUEST_IN_PROGRESS &&
+			((struct ipa_rm_resource_prod *)
+				resource)->pending_request == 0) {
+			resource->state = IPA_RM_GRANTED;
+			state_changed = true;
+			evt = IPA_RM_RESOURCE_GRANTED;
+			ipa_rm_perf_profile_change(resource->name);
+		}
+		break;
+	default:
+		result = -EINVAL;
+		goto bail;
+	}
+	if (state_changed) {
+		(void) ipa_rm_wq_send_cmd(IPA_RM_WQ_NOTIFY_PROD,
+				resource->name,
+				evt,
+				false);
+	}
+	IPA_RM_DBG("%s new state: %d\n", ipa_rm_resource_str(resource->name),
+					resource->state);
+	ipa_rm_peers_list_remove_peer(resource->peers_list,
+			depends_on->name);
+	ipa_rm_peers_list_remove_peer(depends_on->peers_list,
+			resource->name);
+	if (release_consumer)
+		(void) ipa_rm_resource_consumer_release(
+				(struct ipa_rm_resource_cons *)depends_on,
+				resource->max_bw,
+				true);
+bail:
+	IPA_RM_DBG("EXIT with %d\n", result);
+
+	return result;
+}
+
+/**
+ * ipa_rm_resource_producer_request() - producer resource request
+ * @producer: [in] producer
+ *
+ * Returns: 0 on success, negative on failure
+ */
+int ipa_rm_resource_producer_request(struct ipa_rm_resource_prod *producer)
+{
+	int peers_index;
+	int result = 0;
+	struct ipa_rm_resource *consumer;
+	int consumer_result;
+	enum ipa_rm_resource_state state;
+
+	state = producer->resource.state;
+	switch (producer->resource.state) {
+	case IPA_RM_RELEASED:
+	case IPA_RM_RELEASE_IN_PROGRESS:
+		producer->resource.state = IPA_RM_REQUEST_IN_PROGRESS;
+		break;
+	case IPA_RM_GRANTED:
+		goto unlock_and_bail;
+	case IPA_RM_REQUEST_IN_PROGRESS:
+		result = -EINPROGRESS;
+		goto unlock_and_bail;
+	default:
+		result = -EINVAL;
+		goto unlock_and_bail;
+	}
+
+	producer->pending_request = 0;
+	for (peers_index = 0;
+		peers_index < ipa_rm_peers_list_get_size(
+				producer->resource.peers_list);
+		peers_index++) {
+		consumer = ipa_rm_peers_list_get_resource(peers_index,
+				producer->resource.peers_list);
+		if (consumer) {
+			producer->pending_request++;
+			consumer_result = ipa_rm_resource_consumer_request(
+				(struct ipa_rm_resource_cons *)consumer,
+				producer->resource.max_bw,
+				true, false);
+			if (consumer_result == -EINPROGRESS) {
+				result = -EINPROGRESS;
+			} else {
+				producer->pending_request--;
+				if (consumer_result != 0) {
+					result = consumer_result;
+					goto bail;
+				}
+			}
+		}
+	}
+
+	if (producer->pending_request == 0) {
+		producer->resource.state = IPA_RM_GRANTED;
+		ipa_rm_perf_profile_change(producer->resource.name);
+		(void) ipa_rm_wq_send_cmd(IPA_RM_WQ_NOTIFY_PROD,
+			producer->resource.name,
+			IPA_RM_RESOURCE_GRANTED,
+			true);
+		result = 0;
+	}
+unlock_and_bail:
+	if (state != producer->resource.state)
+		IPA_RM_DBG_LOW("%s state changed %d->%d\n",
+			ipa_rm_resource_str(producer->resource.name),
+			state,
+			producer->resource.state);
+bail:
+	return result;
+}
+
+/**
+ * ipa_rm_resource_producer_release() - producer resource release
+ * producer: [in] producer resource
+ *
+ * Returns: 0 on success, negative on failure
+ *
+ */
+int ipa_rm_resource_producer_release(struct ipa_rm_resource_prod *producer)
+{
+	int peers_index;
+	int result = 0;
+	struct ipa_rm_resource *consumer;
+	int consumer_result;
+	enum ipa_rm_resource_state state;
+
+	state = producer->resource.state;
+	switch (producer->resource.state) {
+	case IPA_RM_RELEASED:
+		goto bail;
+	case IPA_RM_GRANTED:
+	case IPA_RM_REQUEST_IN_PROGRESS:
+		producer->resource.state = IPA_RM_RELEASE_IN_PROGRESS;
+		break;
+	case IPA_RM_RELEASE_IN_PROGRESS:
+		result = -EINPROGRESS;
+		goto bail;
+	default:
+		result = -EPERM;
+		goto bail;
+	}
+
+	producer->pending_release = 0;
+	for (peers_index = 0;
+		peers_index < ipa_rm_peers_list_get_size(
+				producer->resource.peers_list);
+		peers_index++) {
+		consumer = ipa_rm_peers_list_get_resource(peers_index,
+				producer->resource.peers_list);
+		if (consumer) {
+			producer->pending_release++;
+			consumer_result = ipa_rm_resource_consumer_release(
+				(struct ipa_rm_resource_cons *)consumer,
+				producer->resource.max_bw,
+				true);
+			producer->pending_release--;
+		}
+	}
+
+	if (producer->pending_release == 0) {
+		producer->resource.state = IPA_RM_RELEASED;
+		ipa_rm_perf_profile_change(producer->resource.name);
+		(void) ipa_rm_wq_send_cmd(IPA_RM_WQ_NOTIFY_PROD,
+			producer->resource.name,
+			IPA_RM_RESOURCE_RELEASED,
+			true);
+	}
+bail:
+	if (state != producer->resource.state)
+		IPA_RM_DBG_LOW("%s state changed %d->%d\n",
+		ipa_rm_resource_str(producer->resource.name),
+		state,
+		producer->resource.state);
+
+	return result;
+}
+
+static void ipa_rm_resource_producer_handle_cb(
+		struct ipa_rm_resource_prod *producer,
+		enum ipa_rm_event event)
+{
+	IPA_RM_DBG_LOW("%s state: %d event: %d pending_request: %d\n",
+		ipa_rm_resource_str(producer->resource.name),
+		producer->resource.state,
+		event,
+		producer->pending_request);
+
+	switch (producer->resource.state) {
+	case IPA_RM_REQUEST_IN_PROGRESS:
+		if (event != IPA_RM_RESOURCE_GRANTED)
+			goto unlock_and_bail;
+		if (producer->pending_request > 0) {
+			producer->pending_request--;
+			if (producer->pending_request == 0) {
+				producer->resource.state =
+						IPA_RM_GRANTED;
+				ipa_rm_perf_profile_change(
+					producer->resource.name);
+				ipa_rm_resource_producer_notify_clients(
+						producer,
+						IPA_RM_RESOURCE_GRANTED,
+						false);
+				goto bail;
+			}
+		}
+		break;
+	case IPA_RM_RELEASE_IN_PROGRESS:
+		if (event != IPA_RM_RESOURCE_RELEASED)
+			goto unlock_and_bail;
+		if (producer->pending_release > 0) {
+			producer->pending_release--;
+			if (producer->pending_release == 0) {
+				producer->resource.state =
+						IPA_RM_RELEASED;
+				ipa_rm_perf_profile_change(
+					producer->resource.name);
+				ipa_rm_resource_producer_notify_clients(
+						producer,
+						IPA_RM_RESOURCE_RELEASED,
+						false);
+				goto bail;
+			}
+		}
+		break;
+	case IPA_RM_GRANTED:
+	case IPA_RM_RELEASED:
+	default:
+		goto unlock_and_bail;
+	}
+unlock_and_bail:
+	IPA_RM_DBG_LOW("%s new state: %d\n",
+		ipa_rm_resource_str(producer->resource.name),
+		producer->resource.state);
+bail:
+	return;
+}
+
+/**
+ * ipa_rm_resource_consumer_handle_cb() - propagates resource
+ *	notification to all dependent producers
+ * @consumer: [in] notifying resource
+ *
+ */
+void ipa_rm_resource_consumer_handle_cb(struct ipa_rm_resource_cons *consumer,
+				enum ipa_rm_event event)
+{
+	int peers_index;
+	struct ipa_rm_resource *producer;
+
+	if (!consumer) {
+		IPA_RM_ERR("invalid params\n");
+		return;
+	}
+	IPA_RM_DBG_LOW("%s state: %d event: %d\n",
+		ipa_rm_resource_str(consumer->resource.name),
+		consumer->resource.state,
+		event);
+
+	switch (consumer->resource.state) {
+	case IPA_RM_REQUEST_IN_PROGRESS:
+		if (event == IPA_RM_RESOURCE_RELEASED)
+			goto bail;
+		consumer->resource.state = IPA_RM_GRANTED;
+		ipa_rm_perf_profile_change(consumer->resource.name);
+		ipa_resume_resource(consumer->resource.name);
+		complete_all(&consumer->request_consumer_in_progress);
+		break;
+	case IPA_RM_RELEASE_IN_PROGRESS:
+		if (event == IPA_RM_RESOURCE_GRANTED)
+			goto bail;
+		consumer->resource.state = IPA_RM_RELEASED;
+		break;
+	case IPA_RM_GRANTED:
+	case IPA_RM_RELEASED:
+	default:
+		goto bail;
+	}
+
+	for (peers_index = 0;
+		peers_index < ipa_rm_peers_list_get_size(
+				consumer->resource.peers_list);
+		peers_index++) {
+		producer = ipa_rm_peers_list_get_resource(peers_index,
+				consumer->resource.peers_list);
+		if (producer)
+			ipa_rm_resource_producer_handle_cb(
+					(struct ipa_rm_resource_prod *)
+						producer,
+						event);
+	}
+
+	return;
+bail:
+	IPA_RM_DBG_LOW("%s new state: %d\n",
+		ipa_rm_resource_str(consumer->resource.name),
+		consumer->resource.state);
+}
+
+/*
+ * ipa_rm_resource_set_perf_profile() - sets the performance profile to
+ *					resource.
+ *
+ * @resource: [in] resource
+ * @profile: [in] profile to be set
+ *
+ * sets the profile to the given resource, In case the resource is
+ * granted, update bandwidth vote of the resource
+ */
+int ipa_rm_resource_set_perf_profile(struct ipa_rm_resource *resource,
+				     struct ipa_rm_perf_profile *profile)
+{
+	int peers_index;
+	struct ipa_rm_resource *peer;
+
+	if (!resource || !profile) {
+		IPA_RM_ERR("invalid params\n");
+		return -EINVAL;
+	}
+
+	if (profile->max_supported_bandwidth_mbps == resource->max_bw) {
+		IPA_RM_DBG_LOW("same profile\n");
+		return 0;
+	}
+
+	if ((resource->type == IPA_RM_PRODUCER &&
+	    (resource->state == IPA_RM_GRANTED ||
+	    resource->state == IPA_RM_REQUEST_IN_PROGRESS)) ||
+	    resource->type == IPA_RM_CONSUMER) {
+		for (peers_index = 0;
+		     peers_index < ipa_rm_peers_list_get_size(
+		     resource->peers_list);
+		     peers_index++) {
+			peer = ipa_rm_peers_list_get_resource(peers_index,
+				resource->peers_list);
+			if (!peer)
+				continue;
+			peer->needed_bw -= resource->max_bw;
+			peer->needed_bw +=
+				profile->max_supported_bandwidth_mbps;
+			if (peer->state == IPA_RM_GRANTED)
+				ipa_rm_perf_profile_change(peer->name);
+		}
+	}
+
+	resource->max_bw = profile->max_supported_bandwidth_mbps;
+	if (resource->state == IPA_RM_GRANTED)
+		ipa_rm_perf_profile_change(resource->name);
+
+	return 0;
+}
+
+
+/*
+ * ipa_rm_resource_producer_print_stat() - print the
+ * resource status and all his dependencies
+ *
+ * @resource: [in] Resource resource
+ * @buff: [in] The buf used to print
+ * @size: [in] Buf size
+ *
+ * Returns: number of bytes used on success, negative on failure
+ */
+int ipa_rm_resource_producer_print_stat(
+				struct ipa_rm_resource *resource,
+				char *buf,
+				int size)
+{
+
+	int i;
+	int nbytes;
+	int cnt = 0;
+	struct ipa_rm_resource *consumer;
+
+	if (!buf || size < 0)
+		return -EINVAL;
+
+	nbytes = scnprintf(buf + cnt, size - cnt,
+		ipa_rm_resource_str(resource->name));
+	cnt += nbytes;
+	nbytes = scnprintf(buf + cnt, size - cnt, "[%d, ", resource->max_bw);
+	cnt += nbytes;
+
+	switch (resource->state) {
+	case IPA_RM_RELEASED:
+		nbytes = scnprintf(buf + cnt, size - cnt,
+			"Released] -> ");
+		cnt += nbytes;
+		break;
+	case IPA_RM_REQUEST_IN_PROGRESS:
+		nbytes = scnprintf(buf + cnt, size - cnt,
+			"Request In Progress] -> ");
+		cnt += nbytes;
+		break;
+	case IPA_RM_GRANTED:
+		nbytes = scnprintf(buf + cnt, size - cnt,
+			"Granted] -> ");
+		cnt += nbytes;
+		break;
+	case IPA_RM_RELEASE_IN_PROGRESS:
+		nbytes = scnprintf(buf + cnt, size - cnt,
+			"Release In Progress] -> ");
+		cnt += nbytes;
+		break;
+	default:
+		return -EPERM;
+	}
+
+	for (i = 0; i < resource->peers_list->max_peers; ++i) {
+		consumer =
+			ipa_rm_peers_list_get_resource(
+			i,
+			resource->peers_list);
+		if (consumer) {
+			nbytes = scnprintf(buf + cnt, size - cnt,
+				ipa_rm_resource_str(consumer->name));
+			cnt += nbytes;
+			nbytes = scnprintf(buf + cnt, size - cnt, "[%d, ",
+				consumer->max_bw);
+			cnt += nbytes;
+
+			switch (consumer->state) {
+			case IPA_RM_RELEASED:
+				nbytes = scnprintf(buf + cnt, size - cnt,
+					"Released], ");
+				cnt += nbytes;
+				break;
+			case IPA_RM_REQUEST_IN_PROGRESS:
+				nbytes = scnprintf(buf + cnt, size - cnt,
+						"Request In Progress], ");
+				cnt += nbytes;
+					break;
+			case IPA_RM_GRANTED:
+				nbytes = scnprintf(buf + cnt, size - cnt,
+						"Granted], ");
+				cnt += nbytes;
+				break;
+			case IPA_RM_RELEASE_IN_PROGRESS:
+				nbytes = scnprintf(buf + cnt, size - cnt,
+						"Release In Progress], ");
+				cnt += nbytes;
+				break;
+			default:
+				return -EPERM;
+			}
+		}
+	}
+	nbytes = scnprintf(buf + cnt, size - cnt, "\n");
+	cnt += nbytes;
+
+	return cnt;
+}

+ 159 - 0
ipa/ipa_rm_resource.h

@@ -0,0 +1,159 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2013-2019, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _IPA_RM_RESOURCE_H_
+#define _IPA_RM_RESOURCE_H_
+
+#include <linux/list.h>
+#include <linux/ipa.h>
+#include "ipa_rm_peers_list.h"
+
+/**
+ * enum ipa_rm_resource_state - resource state
+ */
+enum ipa_rm_resource_state {
+	IPA_RM_RELEASED,
+	IPA_RM_REQUEST_IN_PROGRESS,
+	IPA_RM_GRANTED,
+	IPA_RM_RELEASE_IN_PROGRESS
+};
+
+/**
+ * enum ipa_rm_resource_type - IPA resource manager resource type
+ */
+enum ipa_rm_resource_type {
+	IPA_RM_PRODUCER,
+	IPA_RM_CONSUMER
+};
+
+/**
+ * struct ipa_rm_notification_info - notification information
+ *				of IPA RM client
+ * @reg_params: registration parameters
+ * @explicit: registered explicitly by ipa_rm_register()
+ * @link: link to the list of all registered clients information
+ */
+struct ipa_rm_notification_info {
+	struct ipa_rm_register_params	reg_params;
+	bool				explicit;
+	struct list_head		link;
+};
+
+/**
+ * struct ipa_rm_resource - IPA RM resource
+ * @name: name identifying resource
+ * @type: type of resource (PRODUCER or CONSUMER)
+ * @floor_voltage: minimum voltage level for operation
+ * @max_bw: maximum bandwidth required for resource in Mbps
+ * @state: state of the resource
+ * @peers_list: list of the peers of the resource
+ */
+struct ipa_rm_resource {
+	enum ipa_rm_resource_name	name;
+	enum ipa_rm_resource_type	type;
+	enum ipa_voltage_level		floor_voltage;
+	u32				max_bw;
+	u32				needed_bw;
+	enum ipa_rm_resource_state	state;
+	struct ipa_rm_peers_list	*peers_list;
+};
+
+/**
+ * struct ipa_rm_resource_cons - IPA RM consumer
+ * @resource: resource
+ * @usage_count: number of producers in GRANTED / REQUESTED state
+ *		using this consumer
+ * @request_consumer_in_progress: when set, the consumer is during its request
+ *		phase
+ * @request_resource: function which should be called to request resource
+ *			from resource manager
+ * @release_resource: function which should be called to release resource
+ *			from resource manager
+ * Add new fields after @resource only.
+ */
+struct ipa_rm_resource_cons {
+	struct ipa_rm_resource resource;
+	int usage_count;
+	struct completion request_consumer_in_progress;
+	int (*request_resource)(void);
+	int (*release_resource)(void);
+};
+
+/**
+ * struct ipa_rm_resource_prod - IPA RM producer
+ * @resource: resource
+ * @event_listeners: clients registered with this producer
+ *		for notifications in resource state
+ * list Add new fields after @resource only.
+ */
+struct ipa_rm_resource_prod {
+	struct ipa_rm_resource	resource;
+	struct list_head	event_listeners;
+	int			pending_request;
+	int			pending_release;
+};
+
+int ipa_rm_resource_create(
+		struct ipa_rm_create_params *create_params,
+		struct ipa_rm_resource **resource);
+
+int ipa_rm_resource_delete(struct ipa_rm_resource *resource);
+
+int ipa_rm_resource_producer_register(struct ipa_rm_resource_prod *producer,
+				struct ipa_rm_register_params *reg_params,
+				bool explicit);
+
+int ipa_rm_resource_producer_deregister(struct ipa_rm_resource_prod *producer,
+				struct ipa_rm_register_params *reg_params);
+
+int ipa_rm_resource_add_dependency(struct ipa_rm_resource *resource,
+				   struct ipa_rm_resource *depends_on,
+				   bool userspace_dep);
+
+int ipa_rm_resource_delete_dependency(struct ipa_rm_resource *resource,
+				      struct ipa_rm_resource *depends_on,
+				      bool userspace_dep);
+
+int ipa_rm_resource_producer_request(struct ipa_rm_resource_prod *producer);
+
+int ipa_rm_resource_producer_release(struct ipa_rm_resource_prod *producer);
+
+int ipa_rm_resource_consumer_request(struct ipa_rm_resource_cons *consumer,
+				u32 needed_bw,
+				bool inc_usage_count,
+				bool wake_client);
+
+int ipa_rm_resource_consumer_release(struct ipa_rm_resource_cons *consumer,
+				u32 needed_bw,
+				bool dec_usage_count);
+
+int ipa_rm_resource_set_perf_profile(struct ipa_rm_resource *resource,
+				     struct ipa_rm_perf_profile *profile);
+
+void ipa_rm_resource_consumer_handle_cb(struct ipa_rm_resource_cons *consumer,
+				enum ipa_rm_event event);
+
+void ipa_rm_resource_producer_notify_clients(
+				struct ipa_rm_resource_prod *producer,
+				enum ipa_rm_event event,
+				bool notify_registered_only);
+
+int ipa_rm_resource_producer_print_stat(
+		struct ipa_rm_resource *resource,
+		char *buf,
+		int size);
+
+int ipa_rm_resource_consumer_request_work(struct ipa_rm_resource_cons *consumer,
+		enum ipa_rm_resource_state prev_state,
+		u32 needed_bw,
+		bool notify_completion,
+		bool dec_client_on_err);
+
+int ipa_rm_resource_consumer_release_work(
+		struct ipa_rm_resource_cons *consumer,
+		enum ipa_rm_resource_state prev_state,
+		bool notify_completion);
+
+#endif /* _IPA_RM_RESOURCE_H_ */

+ 22 - 0
ipa/ipa_uc_offload_common_i.h

@@ -0,0 +1,22 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2012-2019, The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/ipa_mhi.h>
+#include <linux/ipa_qmi_service_v01.h>
+
+#ifndef _IPA_UC_OFFLOAD_COMMON_I_H_
+#define _IPA_UC_OFFLOAD_COMMON_I_H_
+
+int ipa_setup_uc_ntn_pipes(struct ipa_ntn_conn_in_params *in,
+	ipa_notify_cb notify, void *priv, u8 hdr_len,
+	struct ipa_ntn_conn_out_params *outp);
+
+int ipa_tear_down_uc_offload_pipes(int ipa_ep_idx_ul, int ipa_ep_idx_dl,
+	struct ipa_ntn_conn_in_params *params);
+
+int ipa_ntn_uc_reg_rdyCB(void (*ipauc_ready_cb)(void *user_data),
+			      void *user_data);
+void ipa_ntn_uc_dereg_rdyCB(void);
+#endif /* _IPA_UC_OFFLOAD_COMMON_I_H_ */

+ 4 - 0
ipa/ipa_v3/Makefile

@@ -0,0 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0-only
+
+obj-$(CONFIG_IPA3_MHI_PRIME_MANAGER) += ipampmm.o
+ipampmm-objs := ipa_mpm.o

+ 2392 - 0
ipa/ipa_v3/dump/ipa4.5/gsi_hwio.h

@@ -0,0 +1,2392 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2019, The Linux Foundation. All rights reserved.
+ */
+#if !defined(_GSI_HWIO_H_)
+#define _GSI_HWIO_H_
+/*
+ *
+ * HWIO register definitions to follow:
+ *
+ */
+#define GSI_REG_BASE (IPA_0_IPA_WRAPPER_BASE + 0x00004000)
+#define GSI_REG_BASE_PHYS (IPA_0_IPA_WRAPPER_BASE_PHYS + 0x00004000)
+#define GSI_REG_BASE_OFFS 0x00004000
+#define HWIO_GSI_CFG_ADDR (GSI_REG_BASE + 0x00000000)
+#define HWIO_GSI_CFG_PHYS (GSI_REG_BASE_PHYS + 0x00000000)
+#define HWIO_GSI_CFG_OFFS (GSI_REG_BASE_OFFS + 0x00000000)
+#define HWIO_GSI_CFG_RMSK 0xf3f
+#define HWIO_GSI_CFG_ATTR 0x3
+#define HWIO_GSI_CFG_IN in_dword_masked(HWIO_GSI_CFG_ADDR, \
+					HWIO_GSI_CFG_RMSK)
+#define HWIO_GSI_CFG_INM(m) in_dword_masked(HWIO_GSI_CFG_ADDR, m)
+#define HWIO_GSI_CFG_OUT(v) out_dword(HWIO_GSI_CFG_ADDR, v)
+#define HWIO_GSI_CFG_OUTM(m, v) out_dword_masked_ns(HWIO_GSI_CFG_ADDR, \
+						    m, \
+						    v, \
+						    HWIO_GSI_CFG_IN)
+#define HWIO_GSI_CFG_SLEEP_CLK_DIV_BMSK 0xf00
+#define HWIO_GSI_CFG_SLEEP_CLK_DIV_SHFT 0x8
+#define HWIO_GSI_CFG_BP_MTRIX_DISABLE_BMSK 0x20
+#define HWIO_GSI_CFG_BP_MTRIX_DISABLE_SHFT 0x5
+#define HWIO_GSI_CFG_GSI_PWR_CLPS_BMSK 0x10
+#define HWIO_GSI_CFG_GSI_PWR_CLPS_SHFT 0x4
+#define HWIO_GSI_CFG_UC_IS_MCS_BMSK 0x8
+#define HWIO_GSI_CFG_UC_IS_MCS_SHFT 0x3
+#define HWIO_GSI_CFG_DOUBLE_MCS_CLK_FREQ_BMSK 0x4
+#define HWIO_GSI_CFG_DOUBLE_MCS_CLK_FREQ_SHFT 0x2
+#define HWIO_GSI_CFG_MCS_ENABLE_BMSK 0x2
+#define HWIO_GSI_CFG_MCS_ENABLE_SHFT 0x1
+#define HWIO_GSI_CFG_GSI_ENABLE_BMSK 0x1
+#define HWIO_GSI_CFG_GSI_ENABLE_SHFT 0x0
+#define HWIO_GSI_MANAGER_MCS_CODE_VER_ADDR (GSI_REG_BASE + 0x00000008)
+#define HWIO_GSI_MANAGER_MCS_CODE_VER_PHYS (GSI_REG_BASE_PHYS + 0x00000008)
+#define HWIO_GSI_MANAGER_MCS_CODE_VER_OFFS (GSI_REG_BASE_OFFS + 0x00000008)
+#define HWIO_GSI_ZEROS_ADDR (GSI_REG_BASE + 0x00000010)
+#define HWIO_GSI_ZEROS_PHYS (GSI_REG_BASE_PHYS + 0x00000010)
+#define HWIO_GSI_ZEROS_OFFS (GSI_REG_BASE_OFFS + 0x00000010)
+#define HWIO_GSI_PERIPH_BASE_ADDR_LSB_ADDR (GSI_REG_BASE + 0x00000018)
+#define HWIO_GSI_PERIPH_BASE_ADDR_LSB_PHYS (GSI_REG_BASE_PHYS + 0x00000018)
+#define HWIO_GSI_PERIPH_BASE_ADDR_LSB_OFFS (GSI_REG_BASE_OFFS + 0x00000018)
+#define HWIO_GSI_PERIPH_BASE_ADDR_MSB_ADDR (GSI_REG_BASE + 0x0000001c)
+#define HWIO_GSI_PERIPH_BASE_ADDR_MSB_PHYS (GSI_REG_BASE_PHYS + 0x0000001c)
+#define HWIO_GSI_PERIPH_BASE_ADDR_MSB_OFFS (GSI_REG_BASE_OFFS + 0x0000001c)
+#define HWIO_GSI_PERIPH_PENDING_ADDR (GSI_REG_BASE + 0x00000020)
+#define HWIO_GSI_PERIPH_PENDING_PHYS (GSI_REG_BASE_PHYS + 0x00000020)
+#define HWIO_GSI_PERIPH_PENDING_OFFS (GSI_REG_BASE_OFFS + 0x00000020)
+#define HWIO_GSI_MOQA_CFG_ADDR (GSI_REG_BASE + 0x00000030)
+#define HWIO_GSI_MOQA_CFG_PHYS (GSI_REG_BASE_PHYS + 0x00000030)
+#define HWIO_GSI_MOQA_CFG_OFFS (GSI_REG_BASE_OFFS + 0x00000030)
+#define HWIO_GSI_REE_CFG_ADDR (GSI_REG_BASE + 0x00000038)
+#define HWIO_GSI_REE_CFG_PHYS (GSI_REG_BASE_PHYS + 0x00000038)
+#define HWIO_GSI_REE_CFG_OFFS (GSI_REG_BASE_OFFS + 0x00000038)
+#define HWIO_GSI_REE_CFG_RMSK 0xff03
+#define HWIO_GSI_REE_CFG_ATTR 0x3
+#define HWIO_GSI_REE_CFG_IN in_dword_masked(HWIO_GSI_REE_CFG_ADDR, \
+					    HWIO_GSI_REE_CFG_RMSK)
+#define HWIO_GSI_REE_CFG_INM(m) in_dword_masked(HWIO_GSI_REE_CFG_ADDR, m)
+#define HWIO_GSI_REE_CFG_OUT(v) out_dword(HWIO_GSI_REE_CFG_ADDR, v)
+#define HWIO_GSI_REE_CFG_OUTM(m, v) out_dword_masked_ns( \
+		HWIO_GSI_REE_CFG_ADDR, \
+		m, \
+		v, \
+		HWIO_GSI_REE_CFG_IN)
+#define HWIO_GSI_REE_CFG_MAX_BURST_SIZE_BMSK 0xff00
+#define HWIO_GSI_REE_CFG_MAX_BURST_SIZE_SHFT 0x8
+#define HWIO_GSI_REE_CFG_CHANNEL_EMPTY_INT_ENABLE_BMSK 0x2
+#define HWIO_GSI_REE_CFG_CHANNEL_EMPTY_INT_ENABLE_SHFT 0x1
+#define HWIO_GSI_REE_CFG_MOVE_TO_ESC_CLR_MODE_TRSH_BMSK 0x1
+#define HWIO_GSI_REE_CFG_MOVE_TO_ESC_CLR_MODE_TRSH_SHFT 0x0
+#define HWIO_GSI_CGC_CTRL_ADDR (GSI_REG_BASE + 0x00000060)
+#define HWIO_GSI_CGC_CTRL_PHYS (GSI_REG_BASE_PHYS + 0x00000060)
+#define HWIO_GSI_CGC_CTRL_OFFS (GSI_REG_BASE_OFFS + 0x00000060)
+#define HWIO_GSI_MSI_CACHEATTR_ADDR (GSI_REG_BASE + 0x00000080)
+#define HWIO_GSI_MSI_CACHEATTR_PHYS (GSI_REG_BASE_PHYS + 0x00000080)
+#define HWIO_GSI_MSI_CACHEATTR_OFFS (GSI_REG_BASE_OFFS + 0x00000080)
+#define HWIO_GSI_EVENT_CACHEATTR_ADDR (GSI_REG_BASE + 0x00000084)
+#define HWIO_GSI_EVENT_CACHEATTR_PHYS (GSI_REG_BASE_PHYS + 0x00000084)
+#define HWIO_GSI_EVENT_CACHEATTR_OFFS (GSI_REG_BASE_OFFS + 0x00000084)
+#define HWIO_GSI_DATA_CACHEATTR_ADDR (GSI_REG_BASE + 0x00000088)
+#define HWIO_GSI_DATA_CACHEATTR_PHYS (GSI_REG_BASE_PHYS + 0x00000088)
+#define HWIO_GSI_DATA_CACHEATTR_OFFS (GSI_REG_BASE_OFFS + 0x00000088)
+#define HWIO_GSI_TRE_CACHEATTR_ADDR (GSI_REG_BASE + 0x00000090)
+#define HWIO_GSI_TRE_CACHEATTR_PHYS (GSI_REG_BASE_PHYS + 0x00000090)
+#define HWIO_GSI_TRE_CACHEATTR_OFFS (GSI_REG_BASE_OFFS + 0x00000090)
+#define HWIO_IC_DISABLE_CHNL_BCK_PRS_LSB_ADDR (GSI_REG_BASE + 0x000000a0)
+#define HWIO_IC_DISABLE_CHNL_BCK_PRS_LSB_PHYS (GSI_REG_BASE_PHYS + \
+					       0x000000a0)
+#define HWIO_IC_DISABLE_CHNL_BCK_PRS_LSB_OFFS (GSI_REG_BASE_OFFS + \
+					       0x000000a0)
+#define HWIO_IC_DISABLE_CHNL_BCK_PRS_MSB_ADDR (GSI_REG_BASE + 0x000000a4)
+#define HWIO_IC_DISABLE_CHNL_BCK_PRS_MSB_PHYS (GSI_REG_BASE_PHYS + \
+					       0x000000a4)
+#define HWIO_IC_DISABLE_CHNL_BCK_PRS_MSB_OFFS (GSI_REG_BASE_OFFS + \
+					       0x000000a4)
+#define HWIO_IC_GEN_EVNT_BCK_PRS_LSB_ADDR (GSI_REG_BASE + 0x000000a8)
+#define HWIO_IC_GEN_EVNT_BCK_PRS_LSB_PHYS (GSI_REG_BASE_PHYS + 0x000000a8)
+#define HWIO_IC_GEN_EVNT_BCK_PRS_LSB_OFFS (GSI_REG_BASE_OFFS + 0x000000a8)
+#define HWIO_IC_GEN_EVNT_BCK_PRS_MSB_ADDR (GSI_REG_BASE + 0x000000ac)
+#define HWIO_IC_GEN_EVNT_BCK_PRS_MSB_PHYS (GSI_REG_BASE_PHYS + 0x000000ac)
+#define HWIO_IC_GEN_EVNT_BCK_PRS_MSB_OFFS (GSI_REG_BASE_OFFS + 0x000000ac)
+#define HWIO_IC_GEN_INT_BCK_PRS_LSB_ADDR (GSI_REG_BASE + 0x000000b0)
+#define HWIO_IC_GEN_INT_BCK_PRS_LSB_PHYS (GSI_REG_BASE_PHYS + 0x000000b0)
+#define HWIO_IC_GEN_INT_BCK_PRS_LSB_OFFS (GSI_REG_BASE_OFFS + 0x000000b0)
+#define HWIO_IC_GEN_INT_BCK_PRS_MSB_ADDR (GSI_REG_BASE + 0x000000b4)
+#define HWIO_IC_GEN_INT_BCK_PRS_MSB_PHYS (GSI_REG_BASE_PHYS + 0x000000b4)
+#define HWIO_IC_GEN_INT_BCK_PRS_MSB_OFFS (GSI_REG_BASE_OFFS + 0x000000b4)
+#define HWIO_IC_STOP_INT_MOD_BCK_PRS_LSB_ADDR (GSI_REG_BASE + 0x000000b8)
+#define HWIO_IC_STOP_INT_MOD_BCK_PRS_LSB_PHYS (GSI_REG_BASE_PHYS + \
+					       0x000000b8)
+#define HWIO_IC_STOP_INT_MOD_BCK_PRS_LSB_OFFS (GSI_REG_BASE_OFFS + \
+					       0x000000b8)
+#define HWIO_IC_STOP_INT_MOD_BCK_PRS_MSB_ADDR (GSI_REG_BASE + 0x000000bc)
+#define HWIO_IC_STOP_INT_MOD_BCK_PRS_MSB_PHYS (GSI_REG_BASE_PHYS + \
+					       0x000000bc)
+#define HWIO_IC_STOP_INT_MOD_BCK_PRS_MSB_OFFS (GSI_REG_BASE_OFFS + \
+					       0x000000bc)
+#define HWIO_IC_PROCESS_DESC_BCK_PRS_LSB_ADDR (GSI_REG_BASE + 0x000000c0)
+#define HWIO_IC_PROCESS_DESC_BCK_PRS_LSB_PHYS (GSI_REG_BASE_PHYS + \
+					       0x000000c0)
+#define HWIO_IC_PROCESS_DESC_BCK_PRS_LSB_OFFS (GSI_REG_BASE_OFFS + \
+					       0x000000c0)
+#define HWIO_IC_PROCESS_DESC_BCK_PRS_MSB_ADDR (GSI_REG_BASE + 0x000000c4)
+#define HWIO_IC_PROCESS_DESC_BCK_PRS_MSB_PHYS (GSI_REG_BASE_PHYS + \
+					       0x000000c4)
+#define HWIO_IC_PROCESS_DESC_BCK_PRS_MSB_OFFS (GSI_REG_BASE_OFFS + \
+					       0x000000c4)
+#define HWIO_IC_TLV_STOP_BCK_PRS_LSB_ADDR (GSI_REG_BASE + 0x000000c8)
+#define HWIO_IC_TLV_STOP_BCK_PRS_LSB_PHYS (GSI_REG_BASE_PHYS + 0x000000c8)
+#define HWIO_IC_TLV_STOP_BCK_PRS_LSB_OFFS (GSI_REG_BASE_OFFS + 0x000000c8)
+#define HWIO_IC_TLV_STOP_BCK_PRS_MSB_ADDR (GSI_REG_BASE + 0x000000cc)
+#define HWIO_IC_TLV_STOP_BCK_PRS_MSB_PHYS (GSI_REG_BASE_PHYS + 0x000000cc)
+#define HWIO_IC_TLV_STOP_BCK_PRS_MSB_OFFS (GSI_REG_BASE_OFFS + 0x000000cc)
+#define HWIO_IC_TLV_RESET_BCK_PRS_LSB_ADDR (GSI_REG_BASE + 0x000000d0)
+#define HWIO_IC_TLV_RESET_BCK_PRS_LSB_PHYS (GSI_REG_BASE_PHYS + 0x000000d0)
+#define HWIO_IC_TLV_RESET_BCK_PRS_LSB_OFFS (GSI_REG_BASE_OFFS + 0x000000d0)
+#define HWIO_IC_TLV_RESET_BCK_PRS_MSB_ADDR (GSI_REG_BASE + 0x000000d4)
+#define HWIO_IC_TLV_RESET_BCK_PRS_MSB_PHYS (GSI_REG_BASE_PHYS + 0x000000d4)
+#define HWIO_IC_TLV_RESET_BCK_PRS_MSB_OFFS (GSI_REG_BASE_OFFS + 0x000000d4)
+#define HWIO_IC_RGSTR_TIMER_BCK_PRS_LSB_ADDR (GSI_REG_BASE + 0x000000d8)
+#define HWIO_IC_RGSTR_TIMER_BCK_PRS_LSB_PHYS (GSI_REG_BASE_PHYS + \
+					      0x000000d8)
+#define HWIO_IC_RGSTR_TIMER_BCK_PRS_LSB_OFFS (GSI_REG_BASE_OFFS + \
+					      0x000000d8)
+#define HWIO_IC_RGSTR_TIMER_BCK_PRS_MSB_ADDR (GSI_REG_BASE + 0x000000dc)
+#define HWIO_IC_RGSTR_TIMER_BCK_PRS_MSB_PHYS (GSI_REG_BASE_PHYS + \
+					      0x000000dc)
+#define HWIO_IC_RGSTR_TIMER_BCK_PRS_MSB_OFFS (GSI_REG_BASE_OFFS + \
+					      0x000000dc)
+#define HWIO_IC_READ_BCK_PRS_LSB_ADDR (GSI_REG_BASE + 0x000000e0)
+#define HWIO_IC_READ_BCK_PRS_LSB_PHYS (GSI_REG_BASE_PHYS + 0x000000e0)
+#define HWIO_IC_READ_BCK_PRS_LSB_OFFS (GSI_REG_BASE_OFFS + 0x000000e0)
+#define HWIO_IC_READ_BCK_PRS_MSB_ADDR (GSI_REG_BASE + 0x000000e4)
+#define HWIO_IC_READ_BCK_PRS_MSB_PHYS (GSI_REG_BASE_PHYS + 0x000000e4)
+#define HWIO_IC_READ_BCK_PRS_MSB_OFFS (GSI_REG_BASE_OFFS + 0x000000e4)
+#define HWIO_IC_WRITE_BCK_PRS_LSB_ADDR (GSI_REG_BASE + 0x000000e8)
+#define HWIO_IC_WRITE_BCK_PRS_LSB_PHYS (GSI_REG_BASE_PHYS + 0x000000e8)
+#define HWIO_IC_WRITE_BCK_PRS_LSB_OFFS (GSI_REG_BASE_OFFS + 0x000000e8)
+#define HWIO_IC_WRITE_BCK_PRS_MSB_ADDR (GSI_REG_BASE + 0x000000ec)
+#define HWIO_IC_WRITE_BCK_PRS_MSB_PHYS (GSI_REG_BASE_PHYS + 0x000000ec)
+#define HWIO_IC_WRITE_BCK_PRS_MSB_OFFS (GSI_REG_BASE_OFFS + 0x000000ec)
+#define HWIO_IC_UCONTROLLER_GPR_BCK_PRS_LSB_ADDR (GSI_REG_BASE + \
+						  0x000000f0)
+#define HWIO_IC_UCONTROLLER_GPR_BCK_PRS_LSB_PHYS (GSI_REG_BASE_PHYS + \
+						  0x000000f0)
+#define HWIO_IC_UCONTROLLER_GPR_BCK_PRS_LSB_OFFS (GSI_REG_BASE_OFFS + \
+						  0x000000f0)
+#define HWIO_IC_UCONTROLLER_GPR_BCK_PRS_MSB_ADDR (GSI_REG_BASE + \
+						  0x000000f4)
+#define HWIO_IC_UCONTROLLER_GPR_BCK_PRS_MSB_PHYS (GSI_REG_BASE_PHYS + \
+						  0x000000f4)
+#define HWIO_IC_UCONTROLLER_GPR_BCK_PRS_MSB_OFFS (GSI_REG_BASE_OFFS + \
+						  0x000000f4)
+#define HWIO_IC_INT_WEIGHT_REE_ADDR (GSI_REG_BASE + 0x00000100)
+#define HWIO_IC_INT_WEIGHT_REE_PHYS (GSI_REG_BASE_PHYS + 0x00000100)
+#define HWIO_IC_INT_WEIGHT_REE_OFFS (GSI_REG_BASE_OFFS + 0x00000100)
+#define HWIO_IC_INT_WEIGHT_EVT_ENG_ADDR (GSI_REG_BASE + 0x00000104)
+#define HWIO_IC_INT_WEIGHT_EVT_ENG_PHYS (GSI_REG_BASE_PHYS + 0x00000104)
+#define HWIO_IC_INT_WEIGHT_EVT_ENG_OFFS (GSI_REG_BASE_OFFS + 0x00000104)
+#define HWIO_IC_INT_WEIGHT_INT_ENG_ADDR (GSI_REG_BASE + 0x00000108)
+#define HWIO_IC_INT_WEIGHT_INT_ENG_PHYS (GSI_REG_BASE_PHYS + 0x00000108)
+#define HWIO_IC_INT_WEIGHT_INT_ENG_OFFS (GSI_REG_BASE_OFFS + 0x00000108)
+#define HWIO_IC_INT_WEIGHT_CSR_ADDR (GSI_REG_BASE + 0x0000010c)
+#define HWIO_IC_INT_WEIGHT_CSR_PHYS (GSI_REG_BASE_PHYS + 0x0000010c)
+#define HWIO_IC_INT_WEIGHT_CSR_OFFS (GSI_REG_BASE_OFFS + 0x0000010c)
+#define HWIO_IC_INT_WEIGHT_TLV_ENG_ADDR (GSI_REG_BASE + 0x00000110)
+#define HWIO_IC_INT_WEIGHT_TLV_ENG_PHYS (GSI_REG_BASE_PHYS + 0x00000110)
+#define HWIO_IC_INT_WEIGHT_TLV_ENG_OFFS (GSI_REG_BASE_OFFS + 0x00000110)
+#define HWIO_IC_INT_WEIGHT_TIMER_ENG_ADDR (GSI_REG_BASE + 0x00000114)
+#define HWIO_IC_INT_WEIGHT_TIMER_ENG_PHYS (GSI_REG_BASE_PHYS + 0x00000114)
+#define HWIO_IC_INT_WEIGHT_TIMER_ENG_OFFS (GSI_REG_BASE_OFFS + 0x00000114)
+#define HWIO_IC_INT_WEIGHT_DB_ENG_ADDR (GSI_REG_BASE + 0x00000118)
+#define HWIO_IC_INT_WEIGHT_DB_ENG_PHYS (GSI_REG_BASE_PHYS + 0x00000118)
+#define HWIO_IC_INT_WEIGHT_DB_ENG_OFFS (GSI_REG_BASE_OFFS + 0x00000118)
+#define HWIO_IC_INT_WEIGHT_RD_WR_ENG_ADDR (GSI_REG_BASE + 0x0000011c)
+#define HWIO_IC_INT_WEIGHT_RD_WR_ENG_PHYS (GSI_REG_BASE_PHYS + 0x0000011c)
+#define HWIO_IC_INT_WEIGHT_RD_WR_ENG_OFFS (GSI_REG_BASE_OFFS + 0x0000011c)
+#define HWIO_IC_INT_WEIGHT_UCONTROLLER_ENG_ADDR (GSI_REG_BASE + 0x00000120)
+#define HWIO_IC_INT_WEIGHT_UCONTROLLER_ENG_PHYS (GSI_REG_BASE_PHYS + \
+						 0x00000120)
+#define HWIO_IC_INT_WEIGHT_UCONTROLLER_ENG_OFFS (GSI_REG_BASE_OFFS + \
+						 0x00000120)
+#define HWIO_IC_INT_WEIGHT_SDMA_ADDR (GSI_REG_BASE + 0x00000124)
+#define HWIO_IC_INT_WEIGHT_SDMA_PHYS (GSI_REG_BASE_PHYS + 0x00000124)
+#define HWIO_IC_INT_WEIGHT_SDMA_OFFS (GSI_REG_BASE_OFFS + 0x00000124)
+#define HWIO_GSI_SDMA_CFG_ADDR (GSI_REG_BASE + 0x0000003c)
+#define HWIO_GSI_SDMA_CFG_PHYS (GSI_REG_BASE_PHYS + 0x0000003c)
+#define HWIO_GSI_SDMA_CFG_OFFS (GSI_REG_BASE_OFFS + 0x0000003c)
+#define HWIO_GSI_SDMA_CACHEATTR_ADDR (GSI_REG_BASE + 0x00000094)
+#define HWIO_GSI_SDMA_CACHEATTR_PHYS (GSI_REG_BASE_PHYS + 0x00000094)
+#define HWIO_GSI_SDMA_CACHEATTR_OFFS (GSI_REG_BASE_OFFS + 0x00000094)
+#define HWIO_GSI_SDMA_SG_IOVEC_LSB_n_ADDR(n) (GSI_REG_BASE + 0x00000140 + \
+					      0x8 * (n))
+#define HWIO_GSI_SDMA_SG_IOVEC_LSB_n_PHYS(n) (GSI_REG_BASE_PHYS + \
+					      0x00000140 + 0x8 * (n))
+#define HWIO_GSI_SDMA_SG_IOVEC_LSB_n_OFFS(n) (GSI_REG_BASE_OFFS + \
+					      0x00000140 + 0x8 * (n))
+#define HWIO_GSI_SDMA_SG_IOVEC_MSB_n_ADDR(n) (GSI_REG_BASE + 0x00000144 + \
+					      0x8 * (n))
+#define HWIO_GSI_SDMA_SG_IOVEC_MSB_n_PHYS(n) (GSI_REG_BASE_PHYS + \
+					      0x00000144 + 0x8 * (n))
+#define HWIO_GSI_SDMA_SG_IOVEC_MSB_n_OFFS(n) (GSI_REG_BASE_OFFS + \
+					      0x00000144 + 0x8 * (n))
+#define HWIO_GSI_MANAGER_EE_QOS_n_ADDR(n) (GSI_REG_BASE + 0x00000300 + \
+					   0x4 * (n))
+#define HWIO_GSI_MANAGER_EE_QOS_n_PHYS(n) (GSI_REG_BASE_PHYS + \
+					   0x00000300 + 0x4 * (n))
+#define HWIO_GSI_MANAGER_EE_QOS_n_OFFS(n) (GSI_REG_BASE_OFFS + \
+					   0x00000300 + 0x4 * (n))
+#define HWIO_GSI_SHRAM_PTR_CH_CNTXT_BASE_ADDR_ADDR (GSI_REG_BASE + \
+						    0x00000200)
+#define HWIO_GSI_SHRAM_PTR_CH_CNTXT_BASE_ADDR_PHYS (GSI_REG_BASE_PHYS +	\
+						    0x00000200)
+#define HWIO_GSI_SHRAM_PTR_CH_CNTXT_BASE_ADDR_OFFS (GSI_REG_BASE_OFFS +	\
+						    0x00000200)
+#define HWIO_GSI_SHRAM_PTR_EV_CNTXT_BASE_ADDR_ADDR (GSI_REG_BASE + \
+						    0x00000204)
+#define HWIO_GSI_SHRAM_PTR_EV_CNTXT_BASE_ADDR_PHYS (GSI_REG_BASE_PHYS +	\
+						    0x00000204)
+#define HWIO_GSI_SHRAM_PTR_EV_CNTXT_BASE_ADDR_OFFS (GSI_REG_BASE_OFFS +	\
+						    0x00000204)
+#define HWIO_GSI_SHRAM_PTR_RE_STORAGE_BASE_ADDR_ADDR (GSI_REG_BASE + \
+						      0x00000208)
+#define HWIO_GSI_SHRAM_PTR_RE_STORAGE_BASE_ADDR_PHYS (GSI_REG_BASE_PHYS + \
+						      0x00000208)
+#define HWIO_GSI_SHRAM_PTR_RE_STORAGE_BASE_ADDR_OFFS (GSI_REG_BASE_OFFS + \
+						      0x00000208)
+#define HWIO_GSI_SHRAM_PTR_RE_ESC_BUF_BASE_ADDR_ADDR (GSI_REG_BASE + \
+						      0x0000020c)
+#define HWIO_GSI_SHRAM_PTR_RE_ESC_BUF_BASE_ADDR_PHYS (GSI_REG_BASE_PHYS + \
+						      0x0000020c)
+#define HWIO_GSI_SHRAM_PTR_RE_ESC_BUF_BASE_ADDR_OFFS (GSI_REG_BASE_OFFS + \
+						      0x0000020c)
+#define HWIO_GSI_SHRAM_PTR_EE_SCRACH_BASE_ADDR_ADDR (GSI_REG_BASE + \
+						     0x00000240)
+#define HWIO_GSI_SHRAM_PTR_EE_SCRACH_BASE_ADDR_PHYS (GSI_REG_BASE_PHYS + \
+						     0x00000240)
+#define HWIO_GSI_SHRAM_PTR_EE_SCRACH_BASE_ADDR_OFFS (GSI_REG_BASE_OFFS + \
+						     0x00000240)
+#define HWIO_GSI_SHRAM_PTR_FUNC_STACK_BASE_ADDR_ADDR (GSI_REG_BASE + \
+						      0x00000244)
+#define HWIO_GSI_SHRAM_PTR_FUNC_STACK_BASE_ADDR_PHYS (GSI_REG_BASE_PHYS + \
+						      0x00000244)
+#define HWIO_GSI_SHRAM_PTR_FUNC_STACK_BASE_ADDR_OFFS (GSI_REG_BASE_OFFS + \
+						      0x00000244)
+#define HWIO_GSI_SHRAM_PTR_MCS_SCRATCH_BASE_ADDR_ADDR (GSI_REG_BASE + \
+						       0x00000248)
+#define HWIO_GSI_SHRAM_PTR_MCS_SCRATCH_BASE_ADDR_PHYS (GSI_REG_BASE_PHYS + \
+						       0x00000248)
+#define HWIO_GSI_SHRAM_PTR_MCS_SCRATCH_BASE_ADDR_OFFS (GSI_REG_BASE_OFFS + \
+						       0x00000248)
+#define HWIO_GSI_SHRAM_PTR_MCS_SCRATCH1_BASE_ADDR_ADDR (GSI_REG_BASE + \
+							0x0000024c)
+#define HWIO_GSI_SHRAM_PTR_MCS_SCRATCH1_BASE_ADDR_PHYS (GSI_REG_BASE_PHYS \
+							+ 0x0000024c)
+#define HWIO_GSI_SHRAM_PTR_MCS_SCRATCH1_BASE_ADDR_OFFS (GSI_REG_BASE_OFFS \
+							+ 0x0000024c)
+#define HWIO_GSI_SHRAM_PTR_MCS_SCRATCH2_BASE_ADDR_ADDR (GSI_REG_BASE + \
+							0x00000250)
+#define HWIO_GSI_SHRAM_PTR_MCS_SCRATCH2_BASE_ADDR_PHYS (GSI_REG_BASE_PHYS \
+							+ 0x00000250)
+#define HWIO_GSI_SHRAM_PTR_MCS_SCRATCH2_BASE_ADDR_OFFS (GSI_REG_BASE_OFFS \
+							+ 0x00000250)
+#define HWIO_GSI_SHRAM_PTR_CH_VP_TRANS_TABLE_BASE_ADDR_ADDR (GSI_REG_BASE \
+							     + 0x00000254)
+#define HWIO_GSI_SHRAM_PTR_CH_VP_TRANS_TABLE_BASE_ADDR_PHYS ( \
+		GSI_REG_BASE_PHYS + 0x00000254)
+#define HWIO_GSI_SHRAM_PTR_CH_VP_TRANS_TABLE_BASE_ADDR_OFFS ( \
+		GSI_REG_BASE_OFFS + 0x00000254)
+#define HWIO_GSI_SHRAM_PTR_EV_VP_TRANS_TABLE_BASE_ADDR_ADDR (GSI_REG_BASE \
+							     + 0x00000258)
+#define HWIO_GSI_SHRAM_PTR_EV_VP_TRANS_TABLE_BASE_ADDR_PHYS ( \
+		GSI_REG_BASE_PHYS + 0x00000258)
+#define HWIO_GSI_SHRAM_PTR_EV_VP_TRANS_TABLE_BASE_ADDR_OFFS ( \
+		GSI_REG_BASE_OFFS + 0x00000258)
+#define HWIO_GSI_SHRAM_PTR_USER_INFO_DATA_BASE_ADDR_ADDR (GSI_REG_BASE + \
+							  0x0000025c)
+#define HWIO_GSI_SHRAM_PTR_USER_INFO_DATA_BASE_ADDR_PHYS ( \
+		GSI_REG_BASE_PHYS + 0x0000025c)
+#define HWIO_GSI_SHRAM_PTR_USER_INFO_DATA_BASE_ADDR_OFFS ( \
+		GSI_REG_BASE_OFFS + 0x0000025c)
+#define HWIO_GSI_SHRAM_PTR_EE_CMD_FIFO_BASE_ADDR_ADDR (GSI_REG_BASE + \
+						       0x00000260)
+#define HWIO_GSI_SHRAM_PTR_EE_CMD_FIFO_BASE_ADDR_PHYS (GSI_REG_BASE_PHYS + \
+						       0x00000260)
+#define HWIO_GSI_SHRAM_PTR_EE_CMD_FIFO_BASE_ADDR_OFFS (GSI_REG_BASE_OFFS + \
+						       0x00000260)
+#define HWIO_GSI_SHRAM_PTR_CH_CMD_FIFO_BASE_ADDR_ADDR (GSI_REG_BASE + \
+						       0x00000264)
+#define HWIO_GSI_SHRAM_PTR_CH_CMD_FIFO_BASE_ADDR_PHYS (GSI_REG_BASE_PHYS + \
+						       0x00000264)
+#define HWIO_GSI_SHRAM_PTR_CH_CMD_FIFO_BASE_ADDR_OFFS (GSI_REG_BASE_OFFS + \
+						       0x00000264)
+#define HWIO_GSI_IRAM_PTR_CH_CMD_ADDR (GSI_REG_BASE + 0x00000400)
+#define HWIO_GSI_IRAM_PTR_CH_CMD_PHYS (GSI_REG_BASE_PHYS + 0x00000400)
+#define HWIO_GSI_IRAM_PTR_CH_CMD_OFFS (GSI_REG_BASE_OFFS + 0x00000400)
+#define HWIO_GSI_IRAM_PTR_EE_GENERIC_CMD_ADDR (GSI_REG_BASE + 0x00000404)
+#define HWIO_GSI_IRAM_PTR_EE_GENERIC_CMD_PHYS (GSI_REG_BASE_PHYS + \
+					       0x00000404)
+#define HWIO_GSI_IRAM_PTR_EE_GENERIC_CMD_OFFS (GSI_REG_BASE_OFFS + \
+					       0x00000404)
+#define HWIO_GSI_IRAM_PTR_TLV_CH_NOT_FULL_ADDR (GSI_REG_BASE + 0x00000408)
+#define HWIO_GSI_IRAM_PTR_TLV_CH_NOT_FULL_PHYS (GSI_REG_BASE_PHYS + \
+						0x00000408)
+#define HWIO_GSI_IRAM_PTR_TLV_CH_NOT_FULL_OFFS (GSI_REG_BASE_OFFS + \
+						0x00000408)
+#define HWIO_GSI_IRAM_PTR_CH_DB_ADDR (GSI_REG_BASE + 0x00000418)
+#define HWIO_GSI_IRAM_PTR_CH_DB_PHYS (GSI_REG_BASE_PHYS + 0x00000418)
+#define HWIO_GSI_IRAM_PTR_CH_DB_OFFS (GSI_REG_BASE_OFFS + 0x00000418)
+#define HWIO_GSI_IRAM_PTR_EV_DB_ADDR (GSI_REG_BASE + 0x0000041c)
+#define HWIO_GSI_IRAM_PTR_EV_DB_PHYS (GSI_REG_BASE_PHYS + 0x0000041c)
+#define HWIO_GSI_IRAM_PTR_EV_DB_OFFS (GSI_REG_BASE_OFFS + 0x0000041c)
+#define HWIO_GSI_IRAM_PTR_NEW_RE_ADDR (GSI_REG_BASE + 0x00000420)
+#define HWIO_GSI_IRAM_PTR_NEW_RE_PHYS (GSI_REG_BASE_PHYS + 0x00000420)
+#define HWIO_GSI_IRAM_PTR_NEW_RE_OFFS (GSI_REG_BASE_OFFS + 0x00000420)
+#define HWIO_GSI_IRAM_PTR_CH_DIS_COMP_ADDR (GSI_REG_BASE + 0x00000424)
+#define HWIO_GSI_IRAM_PTR_CH_DIS_COMP_PHYS (GSI_REG_BASE_PHYS + 0x00000424)
+#define HWIO_GSI_IRAM_PTR_CH_DIS_COMP_OFFS (GSI_REG_BASE_OFFS + 0x00000424)
+#define HWIO_GSI_IRAM_PTR_CH_EMPTY_ADDR (GSI_REG_BASE + 0x00000428)
+#define HWIO_GSI_IRAM_PTR_CH_EMPTY_PHYS (GSI_REG_BASE_PHYS + 0x00000428)
+#define HWIO_GSI_IRAM_PTR_CH_EMPTY_OFFS (GSI_REG_BASE_OFFS + 0x00000428)
+#define HWIO_GSI_IRAM_PTR_EVENT_GEN_COMP_ADDR (GSI_REG_BASE + 0x0000042c)
+#define HWIO_GSI_IRAM_PTR_EVENT_GEN_COMP_PHYS (GSI_REG_BASE_PHYS + \
+					       0x0000042c)
+#define HWIO_GSI_IRAM_PTR_EVENT_GEN_COMP_OFFS (GSI_REG_BASE_OFFS + \
+					       0x0000042c)
+#define HWIO_GSI_IRAM_PTR_PERIPH_IF_TLV_IN_0_ADDR (GSI_REG_BASE + \
+						   0x00000430)
+#define HWIO_GSI_IRAM_PTR_PERIPH_IF_TLV_IN_0_PHYS (GSI_REG_BASE_PHYS + \
+						   0x00000430)
+#define HWIO_GSI_IRAM_PTR_PERIPH_IF_TLV_IN_0_OFFS (GSI_REG_BASE_OFFS + \
+						   0x00000430)
+#define HWIO_GSI_IRAM_PTR_PERIPH_IF_TLV_IN_2_ADDR (GSI_REG_BASE + \
+						   0x00000434)
+#define HWIO_GSI_IRAM_PTR_PERIPH_IF_TLV_IN_2_PHYS (GSI_REG_BASE_PHYS + \
+						   0x00000434)
+#define HWIO_GSI_IRAM_PTR_PERIPH_IF_TLV_IN_2_OFFS (GSI_REG_BASE_OFFS + \
+						   0x00000434)
+#define HWIO_GSI_IRAM_PTR_PERIPH_IF_TLV_IN_1_ADDR (GSI_REG_BASE + \
+						   0x00000438)
+#define HWIO_GSI_IRAM_PTR_PERIPH_IF_TLV_IN_1_PHYS (GSI_REG_BASE_PHYS + \
+						   0x00000438)
+#define HWIO_GSI_IRAM_PTR_PERIPH_IF_TLV_IN_1_OFFS (GSI_REG_BASE_OFFS + \
+						   0x00000438)
+#define HWIO_GSI_IRAM_PTR_TIMER_EXPIRED_ADDR (GSI_REG_BASE + 0x0000043c)
+#define HWIO_GSI_IRAM_PTR_TIMER_EXPIRED_PHYS (GSI_REG_BASE_PHYS + \
+					      0x0000043c)
+#define HWIO_GSI_IRAM_PTR_TIMER_EXPIRED_OFFS (GSI_REG_BASE_OFFS + \
+					      0x0000043c)
+#define HWIO_GSI_IRAM_PTR_WRITE_ENG_COMP_ADDR (GSI_REG_BASE + 0x00000440)
+#define HWIO_GSI_IRAM_PTR_WRITE_ENG_COMP_PHYS (GSI_REG_BASE_PHYS + \
+					       0x00000440)
+#define HWIO_GSI_IRAM_PTR_WRITE_ENG_COMP_OFFS (GSI_REG_BASE_OFFS + \
+					       0x00000440)
+#define HWIO_GSI_IRAM_PTR_READ_ENG_COMP_ADDR (GSI_REG_BASE + 0x00000444)
+#define HWIO_GSI_IRAM_PTR_READ_ENG_COMP_PHYS (GSI_REG_BASE_PHYS + \
+					      0x00000444)
+#define HWIO_GSI_IRAM_PTR_READ_ENG_COMP_OFFS (GSI_REG_BASE_OFFS + \
+					      0x00000444)
+#define HWIO_GSI_IRAM_PTR_UC_GP_INT_ADDR (GSI_REG_BASE + 0x00000448)
+#define HWIO_GSI_IRAM_PTR_UC_GP_INT_PHYS (GSI_REG_BASE_PHYS + 0x00000448)
+#define HWIO_GSI_IRAM_PTR_UC_GP_INT_OFFS (GSI_REG_BASE_OFFS + 0x00000448)
+#define HWIO_GSI_IRAM_PTR_INT_MOD_STOPPED_ADDR (GSI_REG_BASE + 0x0000044c)
+#define HWIO_GSI_IRAM_PTR_INT_MOD_STOPPED_PHYS (GSI_REG_BASE_PHYS + \
+						0x0000044c)
+#define HWIO_GSI_IRAM_PTR_INT_MOD_STOPPED_OFFS (GSI_REG_BASE_OFFS + \
+						0x0000044c)
+#define HWIO_GSI_IRAM_PTR_SDMA_INT_n_ADDR(n) (GSI_REG_BASE + 0x00000450 + \
+					      0x4 * (n))
+#define HWIO_GSI_IRAM_PTR_SDMA_INT_n_PHYS(n) (GSI_REG_BASE_PHYS + \
+					      0x00000450 + 0x4 * (n))
+#define HWIO_GSI_IRAM_PTR_SDMA_INT_n_OFFS(n) (GSI_REG_BASE_OFFS + \
+					      0x00000450 + 0x4 * (n))
+#define HWIO_GSI_INST_RAM_n_ADDR(n) (GSI_REG_BASE + 0x0001b000 + 0x4 * (n))
+#define HWIO_GSI_INST_RAM_n_PHYS(n) (GSI_REG_BASE_PHYS + 0x0001b000 + \
+				     0x4 * (n))
+#define HWIO_GSI_INST_RAM_n_OFFS(n) (GSI_REG_BASE_OFFS + 0x0001b000 + \
+				     0x4 * (n))
+#define HWIO_GSI_SHRAM_n_ADDR(n) (GSI_REG_BASE + 0x00002000 + 0x4 * (n))
+#define HWIO_GSI_SHRAM_n_PHYS(n) (GSI_REG_BASE_PHYS + 0x00002000 + 0x4 * \
+				  (n))
+#define HWIO_GSI_SHRAM_n_OFFS(n) (GSI_REG_BASE_OFFS + 0x00002000 + 0x4 * \
+				  (n))
+#define HWIO_GSI_SHRAM_n_RMSK 0xffffffff
+#define HWIO_GSI_SHRAM_n_MAXn 1343
+#define HWIO_GSI_SHRAM_n_ATTR 0x3
+#define HWIO_GSI_SHRAM_n_INI(n) in_dword_masked(HWIO_GSI_SHRAM_n_ADDR( \
+							n), \
+						HWIO_GSI_SHRAM_n_RMSK)
+#define HWIO_GSI_SHRAM_n_INMI(n, mask) in_dword_masked(	\
+		HWIO_GSI_SHRAM_n_ADDR(n), \
+		mask)
+#define HWIO_GSI_SHRAM_n_OUTI(n, val) out_dword(HWIO_GSI_SHRAM_n_ADDR( \
+							n), val)
+#define HWIO_GSI_SHRAM_n_OUTMI(n, mask, val) out_dword_masked_ns( \
+		HWIO_GSI_SHRAM_n_ADDR(n), \
+		mask, \
+		val, \
+		HWIO_GSI_SHRAM_n_INI(n))
+#define HWIO_GSI_SHRAM_n_SHRAM_BMSK 0xffffffff
+#define HWIO_GSI_SHRAM_n_SHRAM_SHFT 0x0
+#define HWIO_GSI_MAP_EE_n_CH_k_VP_TABLE_ADDR(n, k) (GSI_REG_BASE + \
+						    0x00003800 + 0x80 *	\
+						    (n) + 0x4 * (k))
+#define HWIO_GSI_MAP_EE_n_CH_k_VP_TABLE_PHYS(n, k) (GSI_REG_BASE_PHYS +	\
+						    0x00003800 + 0x80 *	\
+						    (n) + 0x4 * (k))
+#define HWIO_GSI_MAP_EE_n_CH_k_VP_TABLE_OFFS(n, k) (GSI_REG_BASE_OFFS +	\
+						    0x00003800 + 0x80 *	\
+						    (n) + 0x4 * (k))
+#define HWIO_GSI_MAP_EE_n_CH_k_VP_TABLE_RMSK 0x3f
+#define HWIO_GSI_MAP_EE_n_CH_k_VP_TABLE_MAXn 2
+#define HWIO_GSI_MAP_EE_n_CH_k_VP_TABLE_MAXk 22
+#define HWIO_GSI_MAP_EE_n_CH_k_VP_TABLE_ATTR 0x3
+#define HWIO_GSI_MAP_EE_n_CH_k_VP_TABLE_INI2(n, k) in_dword_masked( \
+		HWIO_GSI_MAP_EE_n_CH_k_VP_TABLE_ADDR(n, k), \
+		HWIO_GSI_MAP_EE_n_CH_k_VP_TABLE_RMSK)
+#define HWIO_GSI_MAP_EE_n_CH_k_VP_TABLE_INMI2(n, k, mask) in_dword_masked( \
+		HWIO_GSI_MAP_EE_n_CH_k_VP_TABLE_ADDR(n, k), \
+		mask)
+#define HWIO_GSI_MAP_EE_n_CH_k_VP_TABLE_OUTI2(n, k, val) out_dword( \
+		HWIO_GSI_MAP_EE_n_CH_k_VP_TABLE_ADDR(n, k), \
+		val)
+#define HWIO_GSI_MAP_EE_n_CH_k_VP_TABLE_OUTMI2(n, k, mask, \
+					       val) out_dword_masked_ns( \
+		HWIO_GSI_MAP_EE_n_CH_k_VP_TABLE_ADDR(n,	\
+						     k), \
+		mask, \
+		val, \
+		HWIO_GSI_MAP_EE_n_CH_k_VP_TABLE_INI2(n, k))
+#define HWIO_GSI_MAP_EE_n_CH_k_VP_TABLE_VALID_BMSK 0x20
+#define HWIO_GSI_MAP_EE_n_CH_k_VP_TABLE_VALID_SHFT 0x5
+#define HWIO_GSI_MAP_EE_n_CH_k_VP_TABLE_PHY_CH_BMSK 0x1f
+#define HWIO_GSI_MAP_EE_n_CH_k_VP_TABLE_PHY_CH_SHFT 0x0
+#define HWIO_GSI_TEST_BUS_SEL_ADDR (GSI_REG_BASE + 0x00001000)
+#define HWIO_GSI_TEST_BUS_SEL_PHYS (GSI_REG_BASE_PHYS + 0x00001000)
+#define HWIO_GSI_TEST_BUS_SEL_OFFS (GSI_REG_BASE_OFFS + 0x00001000)
+#define HWIO_GSI_TEST_BUS_SEL_RMSK 0xf00ff
+#define HWIO_GSI_TEST_BUS_SEL_ATTR 0x3
+#define HWIO_GSI_TEST_BUS_SEL_IN in_dword_masked( \
+		HWIO_GSI_TEST_BUS_SEL_ADDR, \
+		HWIO_GSI_TEST_BUS_SEL_RMSK)
+#define HWIO_GSI_TEST_BUS_SEL_INM(m) in_dword_masked( \
+		HWIO_GSI_TEST_BUS_SEL_ADDR, \
+		m)
+#define HWIO_GSI_TEST_BUS_SEL_OUT(v) out_dword(HWIO_GSI_TEST_BUS_SEL_ADDR, \
+					       v)
+#define HWIO_GSI_TEST_BUS_SEL_OUTM(m, v) out_dword_masked_ns( \
+		HWIO_GSI_TEST_BUS_SEL_ADDR, \
+		m, \
+		v, \
+		HWIO_GSI_TEST_BUS_SEL_IN)
+#define HWIO_GSI_TEST_BUS_SEL_GSI_HW_EVENTS_SEL_BMSK 0xf0000
+#define HWIO_GSI_TEST_BUS_SEL_GSI_HW_EVENTS_SEL_SHFT 0x10
+#define HWIO_GSI_TEST_BUS_SEL_GSI_TESTBUS_SEL_BMSK 0xff
+#define HWIO_GSI_TEST_BUS_SEL_GSI_TESTBUS_SEL_SHFT 0x0
+#define HWIO_GSI_TEST_BUS_SEL_GSI_TESTBUS_SEL_ZEROS_FVAL 0x0
+#define HWIO_GSI_TEST_BUS_SEL_GSI_TESTBUS_SEL_MCS_0_FVAL 0x1
+#define HWIO_GSI_TEST_BUS_SEL_GSI_TESTBUS_SEL_MCS_1_FVAL 0x2
+#define HWIO_GSI_TEST_BUS_SEL_GSI_TESTBUS_SEL_MCS_2_FVAL 0x3
+#define HWIO_GSI_TEST_BUS_SEL_GSI_TESTBUS_SEL_MCS_3_FVAL 0x4
+#define HWIO_GSI_TEST_BUS_SEL_GSI_TESTBUS_SEL_MCS_4_FVAL 0x5
+#define HWIO_GSI_TEST_BUS_SEL_GSI_TESTBUS_SEL_DB_ENG_FVAL 0x9
+#define HWIO_GSI_TEST_BUS_SEL_GSI_TESTBUS_SEL_REE_0_FVAL 0xb
+#define HWIO_GSI_TEST_BUS_SEL_GSI_TESTBUS_SEL_REE_1_FVAL 0xc
+#define HWIO_GSI_TEST_BUS_SEL_GSI_TESTBUS_SEL_REE_2_FVAL 0xd
+#define HWIO_GSI_TEST_BUS_SEL_GSI_TESTBUS_SEL_REE_3_FVAL 0xe
+#define HWIO_GSI_TEST_BUS_SEL_GSI_TESTBUS_SEL_REE_4_FVAL 0xf
+#define HWIO_GSI_TEST_BUS_SEL_GSI_TESTBUS_SEL_REE_5_FVAL 0x10
+#define HWIO_GSI_TEST_BUS_SEL_GSI_TESTBUS_SEL_REE_6_FVAL 0x11
+#define HWIO_GSI_TEST_BUS_SEL_GSI_TESTBUS_SEL_REE_7_FVAL 0x12
+#define HWIO_GSI_TEST_BUS_SEL_GSI_TESTBUS_SEL_EVE_0_FVAL 0x13
+#define HWIO_GSI_TEST_BUS_SEL_GSI_TESTBUS_SEL_EVE_1_FVAL 0x14
+#define HWIO_GSI_TEST_BUS_SEL_GSI_TESTBUS_SEL_EVE_2_FVAL 0x15
+#define HWIO_GSI_TEST_BUS_SEL_GSI_TESTBUS_SEL_EVE_3_FVAL 0x16
+#define HWIO_GSI_TEST_BUS_SEL_GSI_TESTBUS_SEL_EVE_4_FVAL 0x17
+#define HWIO_GSI_TEST_BUS_SEL_GSI_TESTBUS_SEL_EVE_5_FVAL 0x18
+#define HWIO_GSI_TEST_BUS_SEL_GSI_TESTBUS_SEL_IE_0_FVAL 0x1b
+#define HWIO_GSI_TEST_BUS_SEL_GSI_TESTBUS_SEL_IE_1_FVAL 0x1c
+#define HWIO_GSI_TEST_BUS_SEL_GSI_TESTBUS_SEL_IE_2_FVAL 0x1d
+#define HWIO_GSI_TEST_BUS_SEL_GSI_TESTBUS_SEL_IC_0_FVAL 0x1f
+#define HWIO_GSI_TEST_BUS_SEL_GSI_TESTBUS_SEL_IC_1_FVAL 0x20
+#define HWIO_GSI_TEST_BUS_SEL_GSI_TESTBUS_SEL_IC_2_FVAL 0x21
+#define HWIO_GSI_TEST_BUS_SEL_GSI_TESTBUS_SEL_IC_3_FVAL 0x22
+#define HWIO_GSI_TEST_BUS_SEL_GSI_TESTBUS_SEL_IC_4_FVAL 0x23
+#define HWIO_GSI_TEST_BUS_SEL_GSI_TESTBUS_SEL_MOQA_0_FVAL 0x27
+#define HWIO_GSI_TEST_BUS_SEL_GSI_TESTBUS_SEL_MOQA_1_FVAL 0x28
+#define HWIO_GSI_TEST_BUS_SEL_GSI_TESTBUS_SEL_MOQA_2_FVAL 0x29
+#define HWIO_GSI_TEST_BUS_SEL_GSI_TESTBUS_SEL_MOQA_3_FVAL 0x2a
+#define HWIO_GSI_TEST_BUS_SEL_GSI_TESTBUS_SEL_TMR_0_FVAL 0x2b
+#define HWIO_GSI_TEST_BUS_SEL_GSI_TESTBUS_SEL_TMR_1_FVAL 0x2c
+#define HWIO_GSI_TEST_BUS_SEL_GSI_TESTBUS_SEL_TMR_2_FVAL 0x2d
+#define HWIO_GSI_TEST_BUS_SEL_GSI_TESTBUS_SEL_TMR_3_FVAL 0x2e
+#define HWIO_GSI_TEST_BUS_SEL_GSI_TESTBUS_SEL_RD_WR_0_FVAL 0x33
+#define HWIO_GSI_TEST_BUS_SEL_GSI_TESTBUS_SEL_RD_WR_1_FVAL 0x34
+#define HWIO_GSI_TEST_BUS_SEL_GSI_TESTBUS_SEL_RD_WR_2_FVAL 0x35
+#define HWIO_GSI_TEST_BUS_SEL_GSI_TESTBUS_SEL_RD_WR_3_FVAL 0x36
+#define HWIO_GSI_TEST_BUS_SEL_GSI_TESTBUS_SEL_CSR_FVAL 0x3a
+#define HWIO_GSI_TEST_BUS_SEL_GSI_TESTBUS_SEL_SDMA_0_FVAL 0x3c
+#define HWIO_GSI_TEST_BUS_SEL_GSI_TESTBUS_SEL_SMDA_1_FVAL 0x3d
+#define HWIO_GSI_TEST_BUS_SEL_GSI_TESTBUS_SEL_CSR_1_FVAL 0x3e
+#define HWIO_GSI_TEST_BUS_SEL_GSI_TESTBUS_SEL_CSR_2_FVAL 0x3f
+#define HWIO_GSI_TEST_BUS_SEL_GSI_TESTBUS_SEL_MCS_5_FVAL 0x40
+#define HWIO_GSI_TEST_BUS_SEL_GSI_TESTBUS_SEL_IC_5_FVAL 0x41
+#define HWIO_GSI_TEST_BUS_SEL_GSI_TESTBUS_SEL_CSR_3_FVAL 0x42
+#define HWIO_GSI_TEST_BUS_SEL_GSI_TESTBUS_SEL_TLV_0_FVAL 0x43
+#define HWIO_GSI_TEST_BUS_SEL_GSI_TESTBUS_SEL_REE_8_FVAL 0x44
+#define HWIO_GSI_TEST_BUS_REG_ADDR (GSI_REG_BASE + 0x00001008)
+#define HWIO_GSI_TEST_BUS_REG_PHYS (GSI_REG_BASE_PHYS + 0x00001008)
+#define HWIO_GSI_TEST_BUS_REG_OFFS (GSI_REG_BASE_OFFS + 0x00001008)
+#define HWIO_GSI_TEST_BUS_REG_RMSK 0xffffffff
+#define HWIO_GSI_TEST_BUS_REG_ATTR 0x1
+#define HWIO_GSI_TEST_BUS_REG_IN in_dword_masked( \
+		HWIO_GSI_TEST_BUS_REG_ADDR, \
+		HWIO_GSI_TEST_BUS_REG_RMSK)
+#define HWIO_GSI_TEST_BUS_REG_INM(m) in_dword_masked( \
+		HWIO_GSI_TEST_BUS_REG_ADDR, \
+		m)
+#define HWIO_GSI_TEST_BUS_REG_GSI_TESTBUS_REG_BMSK 0xffffffff
+#define HWIO_GSI_TEST_BUS_REG_GSI_TESTBUS_REG_SHFT 0x0
+#define HWIO_GSI_DEBUG_BUSY_REG_ADDR (GSI_REG_BASE + 0x00001010)
+#define HWIO_GSI_DEBUG_BUSY_REG_PHYS (GSI_REG_BASE_PHYS + 0x00001010)
+#define HWIO_GSI_DEBUG_BUSY_REG_OFFS (GSI_REG_BASE_OFFS + 0x00001010)
+#define HWIO_GSI_DEBUG_EVENT_PENDING_ADDR (GSI_REG_BASE + 0x00001014)
+#define HWIO_GSI_DEBUG_EVENT_PENDING_PHYS (GSI_REG_BASE_PHYS + 0x00001014)
+#define HWIO_GSI_DEBUG_EVENT_PENDING_OFFS (GSI_REG_BASE_OFFS + 0x00001014)
+#define HWIO_GSI_DEBUG_TIMER_PENDING_ADDR (GSI_REG_BASE + 0x00001018)
+#define HWIO_GSI_DEBUG_TIMER_PENDING_PHYS (GSI_REG_BASE_PHYS + 0x00001018)
+#define HWIO_GSI_DEBUG_TIMER_PENDING_OFFS (GSI_REG_BASE_OFFS + 0x00001018)
+#define HWIO_GSI_DEBUG_RD_WR_PENDING_ADDR (GSI_REG_BASE + 0x0000101c)
+#define HWIO_GSI_DEBUG_RD_WR_PENDING_PHYS (GSI_REG_BASE_PHYS + 0x0000101c)
+#define HWIO_GSI_DEBUG_RD_WR_PENDING_OFFS (GSI_REG_BASE_OFFS + 0x0000101c)
+#define HWIO_GSI_DEBUG_COUNTER_CFGn_ADDR(n) (GSI_REG_BASE + 0x00001200 + \
+					     0x4 * (n))
+#define HWIO_GSI_DEBUG_COUNTER_CFGn_PHYS(n) (GSI_REG_BASE_PHYS + \
+					     0x00001200 + 0x4 * (n))
+#define HWIO_GSI_DEBUG_COUNTER_CFGn_OFFS(n) (GSI_REG_BASE_OFFS + \
+					     0x00001200 + 0x4 * (n))
+#define HWIO_GSI_DEBUG_COUNTERn_ADDR(n) (GSI_REG_BASE + 0x00001240 + 0x4 * \
+					 (n))
+#define HWIO_GSI_DEBUG_COUNTERn_PHYS(n) (GSI_REG_BASE_PHYS + 0x00001240 + \
+					 0x4 * (n))
+#define HWIO_GSI_DEBUG_COUNTERn_OFFS(n) (GSI_REG_BASE_OFFS + 0x00001240 + \
+					 0x4 * (n))
+#define HWIO_GSI_DEBUG_COUNTERn_RMSK 0xffff
+#define HWIO_GSI_DEBUG_COUNTERn_MAXn 7
+#define HWIO_GSI_DEBUG_COUNTERn_ATTR 0x1
+#define HWIO_GSI_DEBUG_COUNTERn_INI(n) in_dword_masked(	\
+		HWIO_GSI_DEBUG_COUNTERn_ADDR(n), \
+		HWIO_GSI_DEBUG_COUNTERn_RMSK)
+#define HWIO_GSI_DEBUG_COUNTERn_INMI(n, mask) in_dword_masked( \
+		HWIO_GSI_DEBUG_COUNTERn_ADDR(n), \
+		mask)
+#define HWIO_GSI_DEBUG_COUNTERn_COUNTER_VALUE_BMSK 0xffff
+#define HWIO_GSI_DEBUG_COUNTERn_COUNTER_VALUE_SHFT 0x0
+#define HWIO_GSI_DEBUG_PC_FROM_SW_ADDR (GSI_REG_BASE + 0x00001040)
+#define HWIO_GSI_DEBUG_PC_FROM_SW_PHYS (GSI_REG_BASE_PHYS + 0x00001040)
+#define HWIO_GSI_DEBUG_PC_FROM_SW_OFFS (GSI_REG_BASE_OFFS + 0x00001040)
+#define HWIO_GSI_DEBUG_SW_STALL_ADDR (GSI_REG_BASE + 0x00001044)
+#define HWIO_GSI_DEBUG_SW_STALL_PHYS (GSI_REG_BASE_PHYS + 0x00001044)
+#define HWIO_GSI_DEBUG_SW_STALL_OFFS (GSI_REG_BASE_OFFS + 0x00001044)
+#define HWIO_GSI_DEBUG_PC_FOR_DEBUG_ADDR (GSI_REG_BASE + 0x00001048)
+#define HWIO_GSI_DEBUG_PC_FOR_DEBUG_PHYS (GSI_REG_BASE_PHYS + 0x00001048)
+#define HWIO_GSI_DEBUG_PC_FOR_DEBUG_OFFS (GSI_REG_BASE_OFFS + 0x00001048)
+#define HWIO_GSI_DEBUG_QSB_LOG_SEL_ADDR (GSI_REG_BASE + 0x00001050)
+#define HWIO_GSI_DEBUG_QSB_LOG_SEL_PHYS (GSI_REG_BASE_PHYS + 0x00001050)
+#define HWIO_GSI_DEBUG_QSB_LOG_SEL_OFFS (GSI_REG_BASE_OFFS + 0x00001050)
+#define HWIO_GSI_DEBUG_QSB_LOG_CLR_ADDR (GSI_REG_BASE + 0x00001058)
+#define HWIO_GSI_DEBUG_QSB_LOG_CLR_PHYS (GSI_REG_BASE_PHYS + 0x00001058)
+#define HWIO_GSI_DEBUG_QSB_LOG_CLR_OFFS (GSI_REG_BASE_OFFS + 0x00001058)
+#define HWIO_GSI_DEBUG_QSB_LOG_ERR_TRNS_ID_ADDR (GSI_REG_BASE + 0x00001060)
+#define HWIO_GSI_DEBUG_QSB_LOG_ERR_TRNS_ID_PHYS (GSI_REG_BASE_PHYS + \
+						 0x00001060)
+#define HWIO_GSI_DEBUG_QSB_LOG_ERR_TRNS_ID_OFFS (GSI_REG_BASE_OFFS + \
+						 0x00001060)
+#define HWIO_GSI_DEBUG_QSB_LOG_0_ADDR (GSI_REG_BASE + 0x00001064)
+#define HWIO_GSI_DEBUG_QSB_LOG_0_PHYS (GSI_REG_BASE_PHYS + 0x00001064)
+#define HWIO_GSI_DEBUG_QSB_LOG_0_OFFS (GSI_REG_BASE_OFFS + 0x00001064)
+#define HWIO_GSI_DEBUG_QSB_LOG_1_ADDR (GSI_REG_BASE + 0x00001068)
+#define HWIO_GSI_DEBUG_QSB_LOG_1_PHYS (GSI_REG_BASE_PHYS + 0x00001068)
+#define HWIO_GSI_DEBUG_QSB_LOG_1_OFFS (GSI_REG_BASE_OFFS + 0x00001068)
+#define HWIO_GSI_DEBUG_QSB_LOG_2_ADDR (GSI_REG_BASE + 0x0000106c)
+#define HWIO_GSI_DEBUG_QSB_LOG_2_PHYS (GSI_REG_BASE_PHYS + 0x0000106c)
+#define HWIO_GSI_DEBUG_QSB_LOG_2_OFFS (GSI_REG_BASE_OFFS + 0x0000106c)
+#define HWIO_GSI_DEBUG_QSB_LOG_LAST_MISC_IDn_ADDR(n) (GSI_REG_BASE + \
+						      0x00001070 + 0x4 * \
+						      (n))
+#define HWIO_GSI_DEBUG_QSB_LOG_LAST_MISC_IDn_PHYS(n) (GSI_REG_BASE_PHYS + \
+						      0x00001070 + 0x4 * \
+						      (n))
+#define HWIO_GSI_DEBUG_QSB_LOG_LAST_MISC_IDn_OFFS(n) (GSI_REG_BASE_OFFS + \
+						      0x00001070 + 0x4 * \
+						      (n))
+#define HWIO_GSI_DEBUG_SW_RF_n_WRITE_ADDR(n) (GSI_REG_BASE + 0x00001080 + \
+					      0x4 * (n))
+#define HWIO_GSI_DEBUG_SW_RF_n_WRITE_PHYS(n) (GSI_REG_BASE_PHYS + \
+					      0x00001080 + 0x4 * (n))
+#define HWIO_GSI_DEBUG_SW_RF_n_WRITE_OFFS(n) (GSI_REG_BASE_OFFS + \
+					      0x00001080 + 0x4 * (n))
+#define HWIO_GSI_DEBUG_SW_RF_n_READ_ADDR(n) (GSI_REG_BASE + 0x00001100 + \
+					     0x4 * (n))
+#define HWIO_GSI_DEBUG_SW_RF_n_READ_PHYS(n) (GSI_REG_BASE_PHYS + \
+					     0x00001100 + 0x4 * (n))
+#define HWIO_GSI_DEBUG_SW_RF_n_READ_OFFS(n) (GSI_REG_BASE_OFFS + \
+					     0x00001100 + 0x4 * (n))
+#define HWIO_GSI_DEBUG_SW_RF_n_READ_RMSK 0xffffffff
+#define HWIO_GSI_DEBUG_SW_RF_n_READ_MAXn 31
+#define HWIO_GSI_DEBUG_SW_RF_n_READ_ATTR 0x1
+#define HWIO_GSI_DEBUG_SW_RF_n_READ_INI(n) in_dword_masked( \
+		HWIO_GSI_DEBUG_SW_RF_n_READ_ADDR(n), \
+		HWIO_GSI_DEBUG_SW_RF_n_READ_RMSK)
+#define HWIO_GSI_DEBUG_SW_RF_n_READ_INMI(n, mask) in_dword_masked( \
+		HWIO_GSI_DEBUG_SW_RF_n_READ_ADDR(n), \
+		mask)
+#define HWIO_GSI_DEBUG_SW_RF_n_READ_RF_REG_BMSK 0xffffffff
+#define HWIO_GSI_DEBUG_SW_RF_n_READ_RF_REG_SHFT 0x0
+#define HWIO_GSI_DEBUG_EE_n_CH_k_VP_TABLE_ADDR(n, k) (GSI_REG_BASE + \
+						      0x00001400 + 0x80 * \
+						      (n) + 0x4 * (k))
+#define HWIO_GSI_DEBUG_EE_n_CH_k_VP_TABLE_PHYS(n, k) (GSI_REG_BASE_PHYS + \
+						      0x00001400 + 0x80 * \
+						      (n) + 0x4 * (k))
+#define HWIO_GSI_DEBUG_EE_n_CH_k_VP_TABLE_OFFS(n, k) (GSI_REG_BASE_OFFS + \
+						      0x00001400 + 0x80 * \
+						      (n) + 0x4 * (k))
+#define HWIO_GSI_DEBUG_EE_n_EV_k_VP_TABLE_ADDR(n, k) (GSI_REG_BASE + \
+						      0x00001600 + 0x80 * \
+						      (n) + 0x4 * (k))
+#define HWIO_GSI_DEBUG_EE_n_EV_k_VP_TABLE_PHYS(n, k) (GSI_REG_BASE_PHYS + \
+						      0x00001600 + 0x80 * \
+						      (n) + 0x4 * (k))
+#define HWIO_GSI_DEBUG_EE_n_EV_k_VP_TABLE_OFFS(n, k) (GSI_REG_BASE_OFFS + \
+						      0x00001600 + 0x80 * \
+						      (n) + 0x4 * (k))
+#define HWIO_GSI_DEBUG_EE_n_EV_k_VP_TABLE_RMSK 0x3f
+#define HWIO_GSI_DEBUG_EE_n_EV_k_VP_TABLE_MAXn 3
+#define HWIO_GSI_DEBUG_EE_n_EV_k_VP_TABLE_MAXk 19
+#define HWIO_GSI_DEBUG_EE_n_EV_k_VP_TABLE_ATTR 0x1
+#define HWIO_GSI_DEBUG_EE_n_EV_k_VP_TABLE_INI2(n, k) in_dword_masked( \
+		HWIO_GSI_DEBUG_EE_n_EV_k_VP_TABLE_ADDR(n, k), \
+		HWIO_GSI_DEBUG_EE_n_EV_k_VP_TABLE_RMSK)
+#define HWIO_GSI_DEBUG_EE_n_EV_k_VP_TABLE_INMI2(n, k, \
+						mask) in_dword_masked( \
+		HWIO_GSI_DEBUG_EE_n_EV_k_VP_TABLE_ADDR(n, \
+						       k), \
+		mask)
+#define HWIO_GSI_DEBUG_EE_n_EV_k_VP_TABLE_VALID_BMSK 0x20
+#define HWIO_GSI_DEBUG_EE_n_EV_k_VP_TABLE_VALID_SHFT 0x5
+#define HWIO_GSI_DEBUG_EE_n_EV_k_VP_TABLE_PHY_EV_CH_BMSK 0x1f
+#define HWIO_GSI_DEBUG_EE_n_EV_k_VP_TABLE_PHY_EV_CH_SHFT 0x0
+#define HWIO_GSI_DEBUG_SDMA_TRANS_DB_n_ADDR(n) (GSI_REG_BASE + \
+						0x00001800 + 0x4 * (n))
+#define HWIO_GSI_DEBUG_SDMA_TRANS_DB_n_PHYS(n) (GSI_REG_BASE_PHYS + \
+						0x00001800 + 0x4 * (n))
+#define HWIO_GSI_DEBUG_SDMA_TRANS_DB_n_OFFS(n) (GSI_REG_BASE_OFFS + \
+						0x00001800 + 0x4 * (n))
+#define HWIO_GSI_UC_SRC_IRQ_ADDR (GSI_REG_BASE + 0x00000500)
+#define HWIO_GSI_UC_SRC_IRQ_PHYS (GSI_REG_BASE_PHYS + 0x00000500)
+#define HWIO_GSI_UC_SRC_IRQ_OFFS (GSI_REG_BASE_OFFS + 0x00000500)
+#define HWIO_GSI_UC_SRC_IRQ_MSK_ADDR (GSI_REG_BASE + 0x00000504)
+#define HWIO_GSI_UC_SRC_IRQ_MSK_PHYS (GSI_REG_BASE_PHYS + 0x00000504)
+#define HWIO_GSI_UC_SRC_IRQ_MSK_OFFS (GSI_REG_BASE_OFFS + 0x00000504)
+#define HWIO_GSI_UC_SRC_IRQ_CLR_ADDR (GSI_REG_BASE + 0x00000508)
+#define HWIO_GSI_UC_SRC_IRQ_CLR_PHYS (GSI_REG_BASE_PHYS + 0x00000508)
+#define HWIO_GSI_UC_SRC_IRQ_CLR_OFFS (GSI_REG_BASE_OFFS + 0x00000508)
+#define HWIO_GSI_ACC_ARGS_n_ADDR(n) (GSI_REG_BASE + 0x0000050c + 0x4 * (n))
+#define HWIO_GSI_ACC_ARGS_n_PHYS(n) (GSI_REG_BASE_PHYS + 0x0000050c + \
+				     0x4 * (n))
+#define HWIO_GSI_ACC_ARGS_n_OFFS(n) (GSI_REG_BASE_OFFS + 0x0000050c + \
+				     0x4 * (n))
+#define HWIO_GSI_ACC_ROUTINE_ADDR (GSI_REG_BASE + 0x00000524)
+#define HWIO_GSI_ACC_ROUTINE_PHYS (GSI_REG_BASE_PHYS + 0x00000524)
+#define HWIO_GSI_ACC_ROUTINE_OFFS (GSI_REG_BASE_OFFS + 0x00000524)
+#define HWIO_GSI_ACC_GO_ADDR (GSI_REG_BASE + 0x00000528)
+#define HWIO_GSI_ACC_GO_PHYS (GSI_REG_BASE_PHYS + 0x00000528)
+#define HWIO_GSI_ACC_GO_OFFS (GSI_REG_BASE_OFFS + 0x00000528)
+#define HWIO_GSI_ACC_2_UC_MCS_STTS_ADDR (GSI_REG_BASE + 0x0000052c)
+#define HWIO_GSI_ACC_2_UC_MCS_STTS_PHYS (GSI_REG_BASE_PHYS + 0x0000052c)
+#define HWIO_GSI_ACC_2_UC_MCS_STTS_OFFS (GSI_REG_BASE_OFFS + 0x0000052c)
+#define HWIO_GSI_ACC_2_UC_MCS_RET_VAL_LSB_ADDR (GSI_REG_BASE + 0x00000530)
+#define HWIO_GSI_ACC_2_UC_MCS_RET_VAL_LSB_PHYS (GSI_REG_BASE_PHYS + \
+						0x00000530)
+#define HWIO_GSI_ACC_2_UC_MCS_RET_VAL_LSB_OFFS (GSI_REG_BASE_OFFS + \
+						0x00000530)
+#define HWIO_GSI_ACC_2_UC_MCS_RET_VAL_MSB_ADDR (GSI_REG_BASE + 0x00000534)
+#define HWIO_GSI_ACC_2_UC_MCS_RET_VAL_MSB_PHYS (GSI_REG_BASE_PHYS + \
+						0x00000534)
+#define HWIO_GSI_ACC_2_UC_MCS_RET_VAL_MSB_OFFS (GSI_REG_BASE_OFFS + \
+						0x00000534)
+#define HWIO_GSI_IC_2_UC_MCS_VLD_ADDR (GSI_REG_BASE + 0x00000538)
+#define HWIO_GSI_IC_2_UC_MCS_VLD_PHYS (GSI_REG_BASE_PHYS + 0x00000538)
+#define HWIO_GSI_IC_2_UC_MCS_VLD_OFFS (GSI_REG_BASE_OFFS + 0x00000538)
+#define HWIO_GSI_IC_2_UC_MCS_PC_ADDR (GSI_REG_BASE + 0x0000053c)
+#define HWIO_GSI_IC_2_UC_MCS_PC_PHYS (GSI_REG_BASE_PHYS + 0x0000053c)
+#define HWIO_GSI_IC_2_UC_MCS_PC_OFFS (GSI_REG_BASE_OFFS + 0x0000053c)
+#define HWIO_GSI_IC_2_UC_MCS_ARGS_n_ADDR(n) (GSI_REG_BASE + 0x00000540 + \
+					     0x4 * (n))
+#define HWIO_GSI_IC_2_UC_MCS_ARGS_n_PHYS(n) (GSI_REG_BASE_PHYS + \
+					     0x00000540 + 0x4 * (n))
+#define HWIO_GSI_IC_2_UC_MCS_ARGS_n_OFFS(n) (GSI_REG_BASE_OFFS + \
+					     0x00000540 + 0x4 * (n))
+#define HWIO_GSI_UC_TLV_IN_VLD_ADDR (GSI_REG_BASE + 0x00000558)
+#define HWIO_GSI_UC_TLV_IN_VLD_PHYS (GSI_REG_BASE_PHYS + 0x00000558)
+#define HWIO_GSI_UC_TLV_IN_VLD_OFFS (GSI_REG_BASE_OFFS + 0x00000558)
+#define HWIO_GSI_UC_TLV_IN_ROUTINE_ADDR (GSI_REG_BASE + 0x0000055c)
+#define HWIO_GSI_UC_TLV_IN_ROUTINE_PHYS (GSI_REG_BASE_PHYS + 0x0000055c)
+#define HWIO_GSI_UC_TLV_IN_ROUTINE_OFFS (GSI_REG_BASE_OFFS + 0x0000055c)
+#define HWIO_GSI_UC_TLV_IN_ARGS_n_ADDR(n) (GSI_REG_BASE + 0x00000560 + \
+					   0x4 * (n))
+#define HWIO_GSI_UC_TLV_IN_ARGS_n_PHYS(n) (GSI_REG_BASE_PHYS + \
+					   0x00000560 + 0x4 * (n))
+#define HWIO_GSI_UC_TLV_IN_ARGS_n_OFFS(n) (GSI_REG_BASE_OFFS + \
+					   0x00000560 + 0x4 * (n))
+#define HWIO_EE_n_GSI_CH_k_CNTXT_0_ADDR(n, k) (GSI_REG_BASE + 0x0000f000 + \
+					       0x4000 * (n) + 0x80 * (k))
+#define HWIO_EE_n_GSI_CH_k_CNTXT_0_PHYS(n, k) (GSI_REG_BASE_PHYS + \
+					       0x0000f000 + 0x4000 * (n) + \
+					       0x80 * (k))
+#define HWIO_EE_n_GSI_CH_k_CNTXT_0_OFFS(n, k) (GSI_REG_BASE_OFFS + \
+					       0x0000f000 + 0x4000 * (n) + \
+					       0x80 * (k))
+#define HWIO_EE_n_GSI_CH_k_CNTXT_0_RMSK 0xfff7ffff
+#define HWIO_EE_n_GSI_CH_k_CNTXT_0_MAXn 2
+#define HWIO_EE_n_GSI_CH_k_CNTXT_0_MAXk 22
+#define HWIO_EE_n_GSI_CH_k_CNTXT_0_ATTR 0x3
+#define HWIO_EE_n_GSI_CH_k_CNTXT_0_INI2(n, k) in_dword_masked( \
+		HWIO_EE_n_GSI_CH_k_CNTXT_0_ADDR(n, k), \
+		HWIO_EE_n_GSI_CH_k_CNTXT_0_RMSK)
+#define HWIO_EE_n_GSI_CH_k_CNTXT_0_INMI2(n, k, mask) in_dword_masked( \
+		HWIO_EE_n_GSI_CH_k_CNTXT_0_ADDR(n, k), \
+		mask)
+#define HWIO_EE_n_GSI_CH_k_CNTXT_0_OUTI2(n, k, val) out_dword( \
+		HWIO_EE_n_GSI_CH_k_CNTXT_0_ADDR(n, k), \
+		val)
+#define HWIO_EE_n_GSI_CH_k_CNTXT_0_OUTMI2(n, k, mask, \
+					  val) out_dword_masked_ns( \
+		HWIO_EE_n_GSI_CH_k_CNTXT_0_ADDR(n, \
+						k), \
+		mask, \
+		val, \
+		HWIO_EE_n_GSI_CH_k_CNTXT_0_INI2(n, k))
+#define HWIO_EE_n_GSI_CH_k_CNTXT_0_ELEMENT_SIZE_BMSK 0xff000000
+#define HWIO_EE_n_GSI_CH_k_CNTXT_0_ELEMENT_SIZE_SHFT 0x18
+#define HWIO_EE_n_GSI_CH_k_CNTXT_0_CHSTATE_BMSK 0xf00000
+#define HWIO_EE_n_GSI_CH_k_CNTXT_0_CHSTATE_SHFT 0x14
+#define HWIO_EE_n_GSI_CH_k_CNTXT_0_CHSTATE_NOT_ALLOCATED_FVAL 0x0
+#define HWIO_EE_n_GSI_CH_k_CNTXT_0_CHSTATE_ALLOCATED_FVAL 0x1
+#define HWIO_EE_n_GSI_CH_k_CNTXT_0_CHSTATE_STARTED_FVAL 0x2
+#define HWIO_EE_n_GSI_CH_k_CNTXT_0_CHSTATE_STOPPED_FVAL 0x3
+#define HWIO_EE_n_GSI_CH_k_CNTXT_0_CHSTATE_STOP_IN_PROC_FVAL 0x4
+#define HWIO_EE_n_GSI_CH_k_CNTXT_0_CHSTATE_ERROR_FVAL 0xf
+#define HWIO_EE_n_GSI_CH_k_CNTXT_0_ERINDEX_BMSK 0x7c000
+#define HWIO_EE_n_GSI_CH_k_CNTXT_0_ERINDEX_SHFT 0xe
+#define HWIO_EE_n_GSI_CH_k_CNTXT_0_CHTYPE_PROTOCOL_MSB_BMSK 0x2000
+#define HWIO_EE_n_GSI_CH_k_CNTXT_0_CHTYPE_PROTOCOL_MSB_SHFT 0xd
+#define HWIO_EE_n_GSI_CH_k_CNTXT_0_CHID_BMSK 0x1f00
+#define HWIO_EE_n_GSI_CH_k_CNTXT_0_CHID_SHFT 0x8
+#define HWIO_EE_n_GSI_CH_k_CNTXT_0_EE_BMSK 0xf0
+#define HWIO_EE_n_GSI_CH_k_CNTXT_0_EE_SHFT 0x4
+#define HWIO_EE_n_GSI_CH_k_CNTXT_0_CHTYPE_DIR_BMSK 0x8
+#define HWIO_EE_n_GSI_CH_k_CNTXT_0_CHTYPE_DIR_SHFT 0x3
+#define HWIO_EE_n_GSI_CH_k_CNTXT_0_CHTYPE_DIR_INBOUND_FVAL 0x0
+#define HWIO_EE_n_GSI_CH_k_CNTXT_0_CHTYPE_DIR_OUTBOUND_FVAL 0x1
+#define HWIO_EE_n_GSI_CH_k_CNTXT_0_CHTYPE_PROTOCOL_BMSK 0x7
+#define HWIO_EE_n_GSI_CH_k_CNTXT_0_CHTYPE_PROTOCOL_SHFT 0x0
+#define HWIO_EE_n_GSI_CH_k_CNTXT_0_CHTYPE_PROTOCOL_MHI_FVAL 0x0
+#define HWIO_EE_n_GSI_CH_k_CNTXT_0_CHTYPE_PROTOCOL_XHCI_FVAL 0x1
+#define HWIO_EE_n_GSI_CH_k_CNTXT_0_CHTYPE_PROTOCOL_GPI_FVAL 0x2
+#define HWIO_EE_n_GSI_CH_k_CNTXT_0_CHTYPE_PROTOCOL_XDCI_FVAL 0x3
+#define HWIO_EE_n_GSI_CH_k_CNTXT_1_ADDR(n, k) (GSI_REG_BASE + 0x0000f004 + \
+					       0x4000 * (n) + 0x80 * (k))
+#define HWIO_EE_n_GSI_CH_k_CNTXT_1_PHYS(n, k) (GSI_REG_BASE_PHYS + \
+					       0x0000f004 + 0x4000 * (n) + \
+					       0x80 * (k))
+#define HWIO_EE_n_GSI_CH_k_CNTXT_1_OFFS(n, k) (GSI_REG_BASE_OFFS + \
+					       0x0000f004 + 0x4000 * (n) + \
+					       0x80 * (k))
+#define HWIO_EE_n_GSI_CH_k_CNTXT_1_RMSK 0xffff
+#define HWIO_EE_n_GSI_CH_k_CNTXT_1_MAXn 2
+#define HWIO_EE_n_GSI_CH_k_CNTXT_1_MAXk 22
+#define HWIO_EE_n_GSI_CH_k_CNTXT_1_ATTR 0x3
+#define HWIO_EE_n_GSI_CH_k_CNTXT_1_INI2(n, k) in_dword_masked( \
+		HWIO_EE_n_GSI_CH_k_CNTXT_1_ADDR(n, k), \
+		HWIO_EE_n_GSI_CH_k_CNTXT_1_RMSK)
+#define HWIO_EE_n_GSI_CH_k_CNTXT_1_INMI2(n, k, mask) in_dword_masked( \
+		HWIO_EE_n_GSI_CH_k_CNTXT_1_ADDR(n, k), \
+		mask)
+#define HWIO_EE_n_GSI_CH_k_CNTXT_1_OUTI2(n, k, val) out_dword( \
+		HWIO_EE_n_GSI_CH_k_CNTXT_1_ADDR(n, k), \
+		val)
+#define HWIO_EE_n_GSI_CH_k_CNTXT_1_OUTMI2(n, k, mask, \
+					  val) out_dword_masked_ns( \
+		HWIO_EE_n_GSI_CH_k_CNTXT_1_ADDR(n, \
+						k), \
+		mask, \
+		val, \
+		HWIO_EE_n_GSI_CH_k_CNTXT_1_INI2(n, k))
+#define HWIO_EE_n_GSI_CH_k_CNTXT_1_R_LENGTH_BMSK 0xffff
+#define HWIO_EE_n_GSI_CH_k_CNTXT_1_R_LENGTH_SHFT 0x0
+#define HWIO_EE_n_GSI_CH_k_CNTXT_2_ADDR(n, k) (GSI_REG_BASE + 0x0000f008 + \
+					       0x4000 * (n) + 0x80 * (k))
+#define HWIO_EE_n_GSI_CH_k_CNTXT_2_PHYS(n, k) (GSI_REG_BASE_PHYS + \
+					       0x0000f008 + 0x4000 * (n) + \
+					       0x80 * (k))
+#define HWIO_EE_n_GSI_CH_k_CNTXT_2_OFFS(n, k) (GSI_REG_BASE_OFFS + \
+					       0x0000f008 + 0x4000 * (n) + \
+					       0x80 * (k))
+#define HWIO_EE_n_GSI_CH_k_CNTXT_2_RMSK 0xffffffff
+#define HWIO_EE_n_GSI_CH_k_CNTXT_2_MAXn 2
+#define HWIO_EE_n_GSI_CH_k_CNTXT_2_MAXk 22
+#define HWIO_EE_n_GSI_CH_k_CNTXT_2_ATTR 0x3
+#define HWIO_EE_n_GSI_CH_k_CNTXT_2_INI2(n, k) in_dword_masked( \
+		HWIO_EE_n_GSI_CH_k_CNTXT_2_ADDR(n, k), \
+		HWIO_EE_n_GSI_CH_k_CNTXT_2_RMSK)
+#define HWIO_EE_n_GSI_CH_k_CNTXT_2_INMI2(n, k, mask) in_dword_masked( \
+		HWIO_EE_n_GSI_CH_k_CNTXT_2_ADDR(n, k), \
+		mask)
+#define HWIO_EE_n_GSI_CH_k_CNTXT_2_OUTI2(n, k, val) out_dword( \
+		HWIO_EE_n_GSI_CH_k_CNTXT_2_ADDR(n, k), \
+		val)
+#define HWIO_EE_n_GSI_CH_k_CNTXT_2_OUTMI2(n, k, mask, \
+					  val) out_dword_masked_ns( \
+		HWIO_EE_n_GSI_CH_k_CNTXT_2_ADDR(n, \
+						k), \
+		mask, \
+		val, \
+		HWIO_EE_n_GSI_CH_k_CNTXT_2_INI2(n, k))
+#define HWIO_EE_n_GSI_CH_k_CNTXT_2_R_BASE_ADDR_LSBS_BMSK 0xffffffff
+#define HWIO_EE_n_GSI_CH_k_CNTXT_2_R_BASE_ADDR_LSBS_SHFT 0x0
+#define HWIO_EE_n_GSI_CH_k_CNTXT_3_ADDR(n, k) (GSI_REG_BASE + 0x0000f00c + \
+					       0x4000 * (n) + 0x80 * (k))
+#define HWIO_EE_n_GSI_CH_k_CNTXT_3_PHYS(n, k) (GSI_REG_BASE_PHYS + \
+					       0x0000f00c + 0x4000 * (n) + \
+					       0x80 * (k))
+#define HWIO_EE_n_GSI_CH_k_CNTXT_3_OFFS(n, k) (GSI_REG_BASE_OFFS + \
+					       0x0000f00c + 0x4000 * (n) + \
+					       0x80 * (k))
+#define HWIO_EE_n_GSI_CH_k_CNTXT_3_RMSK 0xffffffff
+#define HWIO_EE_n_GSI_CH_k_CNTXT_3_MAXn 2
+#define HWIO_EE_n_GSI_CH_k_CNTXT_3_MAXk 22
+#define HWIO_EE_n_GSI_CH_k_CNTXT_3_ATTR 0x3
+#define HWIO_EE_n_GSI_CH_k_CNTXT_3_INI2(n, k) in_dword_masked( \
+		HWIO_EE_n_GSI_CH_k_CNTXT_3_ADDR(n, k), \
+		HWIO_EE_n_GSI_CH_k_CNTXT_3_RMSK)
+#define HWIO_EE_n_GSI_CH_k_CNTXT_3_INMI2(n, k, mask) in_dword_masked( \
+		HWIO_EE_n_GSI_CH_k_CNTXT_3_ADDR(n, k), \
+		mask)
+#define HWIO_EE_n_GSI_CH_k_CNTXT_3_OUTI2(n, k, val) out_dword( \
+		HWIO_EE_n_GSI_CH_k_CNTXT_3_ADDR(n, k), \
+		val)
+#define HWIO_EE_n_GSI_CH_k_CNTXT_3_OUTMI2(n, k, mask, \
+					  val) out_dword_masked_ns( \
+		HWIO_EE_n_GSI_CH_k_CNTXT_3_ADDR(n, \
+						k), \
+		mask, \
+		val, \
+		HWIO_EE_n_GSI_CH_k_CNTXT_3_INI2(n, k))
+#define HWIO_EE_n_GSI_CH_k_CNTXT_3_R_BASE_ADDR_MSBS_BMSK 0xffffffff
+#define HWIO_EE_n_GSI_CH_k_CNTXT_3_R_BASE_ADDR_MSBS_SHFT 0x0
+#define HWIO_EE_n_GSI_CH_k_CNTXT_4_ADDR(n, k) (GSI_REG_BASE + 0x0000f010 + \
+					       0x4000 * (n) + 0x80 * (k))
+#define HWIO_EE_n_GSI_CH_k_CNTXT_4_PHYS(n, k) (GSI_REG_BASE_PHYS + \
+					       0x0000f010 + 0x4000 * (n) + \
+					       0x80 * (k))
+#define HWIO_EE_n_GSI_CH_k_CNTXT_4_OFFS(n, k) (GSI_REG_BASE_OFFS + \
+					       0x0000f010 + 0x4000 * (n) + \
+					       0x80 * (k))
+#define HWIO_EE_n_GSI_CH_k_CNTXT_4_RMSK 0xffffffff
+#define HWIO_EE_n_GSI_CH_k_CNTXT_4_MAXn 2
+#define HWIO_EE_n_GSI_CH_k_CNTXT_4_MAXk 22
+#define HWIO_EE_n_GSI_CH_k_CNTXT_4_ATTR 0x3
+#define HWIO_EE_n_GSI_CH_k_CNTXT_4_INI2(n, k) in_dword_masked( \
+		HWIO_EE_n_GSI_CH_k_CNTXT_4_ADDR(n, k), \
+		HWIO_EE_n_GSI_CH_k_CNTXT_4_RMSK)
+#define HWIO_EE_n_GSI_CH_k_CNTXT_4_INMI2(n, k, mask) in_dword_masked( \
+		HWIO_EE_n_GSI_CH_k_CNTXT_4_ADDR(n, k), \
+		mask)
+#define HWIO_EE_n_GSI_CH_k_CNTXT_4_OUTI2(n, k, val) out_dword( \
+		HWIO_EE_n_GSI_CH_k_CNTXT_4_ADDR(n, k), \
+		val)
+#define HWIO_EE_n_GSI_CH_k_CNTXT_4_OUTMI2(n, k, mask, \
+					  val) out_dword_masked_ns( \
+		HWIO_EE_n_GSI_CH_k_CNTXT_4_ADDR(n, \
+						k), \
+		mask, \
+		val, \
+		HWIO_EE_n_GSI_CH_k_CNTXT_4_INI2(n, k))
+#define HWIO_EE_n_GSI_CH_k_CNTXT_4_READ_PTR_LSB_BMSK 0xffffffff
+#define HWIO_EE_n_GSI_CH_k_CNTXT_4_READ_PTR_LSB_SHFT 0x0
+#define HWIO_EE_n_GSI_CH_k_CNTXT_5_ADDR(n, k) (GSI_REG_BASE + 0x0000f014 + \
+					       0x4000 * (n) + 0x80 * (k))
+#define HWIO_EE_n_GSI_CH_k_CNTXT_5_PHYS(n, k) (GSI_REG_BASE_PHYS + \
+					       0x0000f014 + 0x4000 * (n) + \
+					       0x80 * (k))
+#define HWIO_EE_n_GSI_CH_k_CNTXT_5_OFFS(n, k) (GSI_REG_BASE_OFFS + \
+					       0x0000f014 + 0x4000 * (n) + \
+					       0x80 * (k))
+#define HWIO_EE_n_GSI_CH_k_CNTXT_5_RMSK 0xffffffff
+#define HWIO_EE_n_GSI_CH_k_CNTXT_5_MAXn 2
+#define HWIO_EE_n_GSI_CH_k_CNTXT_5_MAXk 22
+#define HWIO_EE_n_GSI_CH_k_CNTXT_5_ATTR 0x1
+#define HWIO_EE_n_GSI_CH_k_CNTXT_5_INI2(n, k) in_dword_masked( \
+		HWIO_EE_n_GSI_CH_k_CNTXT_5_ADDR(n, k), \
+		HWIO_EE_n_GSI_CH_k_CNTXT_5_RMSK)
+#define HWIO_EE_n_GSI_CH_k_CNTXT_5_INMI2(n, k, mask) in_dword_masked( \
+		HWIO_EE_n_GSI_CH_k_CNTXT_5_ADDR(n, k), \
+		mask)
+#define HWIO_EE_n_GSI_CH_k_CNTXT_5_READ_PTR_MSB_BMSK 0xffffffff
+#define HWIO_EE_n_GSI_CH_k_CNTXT_5_READ_PTR_MSB_SHFT 0x0
+#define HWIO_EE_n_GSI_CH_k_CNTXT_6_ADDR(n, k) (GSI_REG_BASE + 0x0000f018 + \
+					       0x4000 * (n) + 0x80 * (k))
+#define HWIO_EE_n_GSI_CH_k_CNTXT_6_PHYS(n, k) (GSI_REG_BASE_PHYS + \
+					       0x0000f018 + 0x4000 * (n) + \
+					       0x80 * (k))
+#define HWIO_EE_n_GSI_CH_k_CNTXT_6_OFFS(n, k) (GSI_REG_BASE_OFFS + \
+					       0x0000f018 + 0x4000 * (n) + \
+					       0x80 * (k))
+#define HWIO_EE_n_GSI_CH_k_CNTXT_6_RMSK 0xffffffff
+#define HWIO_EE_n_GSI_CH_k_CNTXT_6_MAXn 2
+#define HWIO_EE_n_GSI_CH_k_CNTXT_6_MAXk 22
+#define HWIO_EE_n_GSI_CH_k_CNTXT_6_ATTR 0x1
+#define HWIO_EE_n_GSI_CH_k_CNTXT_6_INI2(n, k) in_dword_masked( \
+		HWIO_EE_n_GSI_CH_k_CNTXT_6_ADDR(n, k), \
+		HWIO_EE_n_GSI_CH_k_CNTXT_6_RMSK)
+#define HWIO_EE_n_GSI_CH_k_CNTXT_6_INMI2(n, k, mask) in_dword_masked( \
+		HWIO_EE_n_GSI_CH_k_CNTXT_6_ADDR(n, k), \
+		mask)
+#define HWIO_EE_n_GSI_CH_k_CNTXT_6_WRITE_PTR_LSB_BMSK 0xffffffff
+#define HWIO_EE_n_GSI_CH_k_CNTXT_6_WRITE_PTR_LSB_SHFT 0x0
+#define HWIO_EE_n_GSI_CH_k_CNTXT_7_ADDR(n, k) (GSI_REG_BASE + 0x0000f01c + \
+					       0x4000 * (n) + 0x80 * (k))
+#define HWIO_EE_n_GSI_CH_k_CNTXT_7_PHYS(n, k) (GSI_REG_BASE_PHYS + \
+					       0x0000f01c + 0x4000 * (n) + \
+					       0x80 * (k))
+#define HWIO_EE_n_GSI_CH_k_CNTXT_7_OFFS(n, k) (GSI_REG_BASE_OFFS + \
+					       0x0000f01c + 0x4000 * (n) + \
+					       0x80 * (k))
+#define HWIO_EE_n_GSI_CH_k_CNTXT_7_RMSK 0xffffffff
+#define HWIO_EE_n_GSI_CH_k_CNTXT_7_MAXn 2
+#define HWIO_EE_n_GSI_CH_k_CNTXT_7_MAXk 22
+#define HWIO_EE_n_GSI_CH_k_CNTXT_7_ATTR 0x1
+#define HWIO_EE_n_GSI_CH_k_CNTXT_7_INI2(n, k) in_dword_masked( \
+		HWIO_EE_n_GSI_CH_k_CNTXT_7_ADDR(n, k), \
+		HWIO_EE_n_GSI_CH_k_CNTXT_7_RMSK)
+#define HWIO_EE_n_GSI_CH_k_CNTXT_7_INMI2(n, k, mask) in_dword_masked( \
+		HWIO_EE_n_GSI_CH_k_CNTXT_7_ADDR(n, k), \
+		mask)
+#define HWIO_EE_n_GSI_CH_k_CNTXT_7_WRITE_PTR_MSB_BMSK 0xffffffff
+#define HWIO_EE_n_GSI_CH_k_CNTXT_7_WRITE_PTR_MSB_SHFT 0x0
+#define HWIO_EE_n_GSI_CH_k_RE_FETCH_READ_PTR_ADDR(n, k) (GSI_REG_BASE +	\
+							 0x0000f054 + \
+							 0x4000 * (n) +	\
+							 0x80 * (k))
+#define HWIO_EE_n_GSI_CH_k_RE_FETCH_READ_PTR_PHYS(n, \
+						  k) (GSI_REG_BASE_PHYS + \
+						      0x0000f054 + \
+						      0x4000 * (n) + \
+						      0x80 * (k))
+#define HWIO_EE_n_GSI_CH_k_RE_FETCH_READ_PTR_OFFS(n, \
+						  k) (GSI_REG_BASE_OFFS + \
+						      0x0000f054 + \
+						      0x4000 * (n) + \
+						      0x80 * (k))
+#define HWIO_EE_n_GSI_CH_k_RE_FETCH_READ_PTR_RMSK 0xffff
+#define HWIO_EE_n_GSI_CH_k_RE_FETCH_READ_PTR_MAXn 2
+#define HWIO_EE_n_GSI_CH_k_RE_FETCH_READ_PTR_MAXk 22
+#define HWIO_EE_n_GSI_CH_k_RE_FETCH_READ_PTR_ATTR 0x3
+#define HWIO_EE_n_GSI_CH_k_RE_FETCH_READ_PTR_INI2(n, k) in_dword_masked( \
+		HWIO_EE_n_GSI_CH_k_RE_FETCH_READ_PTR_ADDR(n, k), \
+		HWIO_EE_n_GSI_CH_k_RE_FETCH_READ_PTR_RMSK)
+#define HWIO_EE_n_GSI_CH_k_RE_FETCH_READ_PTR_INMI2(n, k, \
+						   mask) in_dword_masked( \
+		HWIO_EE_n_GSI_CH_k_RE_FETCH_READ_PTR_ADDR(n, \
+							  k), \
+		mask)
+#define HWIO_EE_n_GSI_CH_k_RE_FETCH_READ_PTR_OUTI2(n, k, val) out_dword( \
+		HWIO_EE_n_GSI_CH_k_RE_FETCH_READ_PTR_ADDR(n, k), \
+		val)
+#define HWIO_EE_n_GSI_CH_k_RE_FETCH_READ_PTR_OUTMI2(n, k, mask,	\
+						    val) \
+	out_dword_masked_ns(HWIO_EE_n_GSI_CH_k_RE_FETCH_READ_PTR_ADDR( \
+				    n, \
+				    k), mask, val, \
+			    HWIO_EE_n_GSI_CH_k_RE_FETCH_READ_PTR_INI2(n, k))
+#define HWIO_EE_n_GSI_CH_k_RE_FETCH_READ_PTR_READ_PTR_BMSK 0xffff
+#define HWIO_EE_n_GSI_CH_k_RE_FETCH_READ_PTR_READ_PTR_SHFT 0x0
+#define HWIO_EE_n_GSI_CH_k_RE_FETCH_WRITE_PTR_ADDR(n, k) (GSI_REG_BASE + \
+							  0x0000f058 + \
+							  0x4000 * (n) + \
+							  0x80 * (k))
+#define HWIO_EE_n_GSI_CH_k_RE_FETCH_WRITE_PTR_PHYS(n, \
+						   k) (GSI_REG_BASE_PHYS + \
+						       0x0000f058 + \
+						       0x4000 * (n) + \
+						       0x80 * (k))
+#define HWIO_EE_n_GSI_CH_k_RE_FETCH_WRITE_PTR_OFFS(n, \
+						   k) (GSI_REG_BASE_OFFS + \
+						       0x0000f058 + \
+						       0x4000 * (n) + \
+						       0x80 * (k))
+#define HWIO_EE_n_GSI_CH_k_RE_FETCH_WRITE_PTR_RMSK 0xffff
+#define HWIO_EE_n_GSI_CH_k_RE_FETCH_WRITE_PTR_MAXn 2
+#define HWIO_EE_n_GSI_CH_k_RE_FETCH_WRITE_PTR_MAXk 22
+#define HWIO_EE_n_GSI_CH_k_RE_FETCH_WRITE_PTR_ATTR 0x3
+#define HWIO_EE_n_GSI_CH_k_RE_FETCH_WRITE_PTR_INI2(n, k) in_dword_masked( \
+		HWIO_EE_n_GSI_CH_k_RE_FETCH_WRITE_PTR_ADDR(n, k), \
+		HWIO_EE_n_GSI_CH_k_RE_FETCH_WRITE_PTR_RMSK)
+#define HWIO_EE_n_GSI_CH_k_RE_FETCH_WRITE_PTR_INMI2(n, k, \
+						    mask) in_dword_masked( \
+		HWIO_EE_n_GSI_CH_k_RE_FETCH_WRITE_PTR_ADDR(n, \
+							   k), \
+		mask)
+#define HWIO_EE_n_GSI_CH_k_RE_FETCH_WRITE_PTR_OUTI2(n, k, val) out_dword( \
+		HWIO_EE_n_GSI_CH_k_RE_FETCH_WRITE_PTR_ADDR(n, k), \
+		val)
+#define HWIO_EE_n_GSI_CH_k_RE_FETCH_WRITE_PTR_OUTMI2(n, k, mask, \
+						     val) \
+	out_dword_masked_ns(HWIO_EE_n_GSI_CH_k_RE_FETCH_WRITE_PTR_ADDR(	\
+				    n, \
+				    k), mask, val, \
+			    HWIO_EE_n_GSI_CH_k_RE_FETCH_WRITE_PTR_INI2(n, \
+								       k))
+#define HWIO_EE_n_GSI_CH_k_RE_FETCH_WRITE_PTR_RE_INTR_DB_BMSK 0xffff
+#define HWIO_EE_n_GSI_CH_k_RE_FETCH_WRITE_PTR_RE_INTR_DB_SHFT 0x0
+#define HWIO_EE_n_GSI_CH_k_QOS_ADDR(n, k) (GSI_REG_BASE + 0x0000f05c + \
+					   0x4000 * (n) + 0x80 * (k))
+#define HWIO_EE_n_GSI_CH_k_QOS_PHYS(n, k) (GSI_REG_BASE_PHYS + \
+					   0x0000f05c + 0x4000 * (n) + \
+					   0x80 * (k))
+#define HWIO_EE_n_GSI_CH_k_QOS_OFFS(n, k) (GSI_REG_BASE_OFFS + \
+					   0x0000f05c + 0x4000 * (n) + \
+					   0x80 * (k))
+#define HWIO_EE_n_GSI_CH_k_QOS_RMSK 0xff3f0f
+#define HWIO_EE_n_GSI_CH_k_QOS_MAXn 2
+#define HWIO_EE_n_GSI_CH_k_QOS_MAXk 22
+#define HWIO_EE_n_GSI_CH_k_QOS_ATTR 0x3
+#define HWIO_EE_n_GSI_CH_k_QOS_INI2(n, k) in_dword_masked( \
+		HWIO_EE_n_GSI_CH_k_QOS_ADDR(n, k), \
+		HWIO_EE_n_GSI_CH_k_QOS_RMSK)
+#define HWIO_EE_n_GSI_CH_k_QOS_INMI2(n, k, mask) in_dword_masked( \
+		HWIO_EE_n_GSI_CH_k_QOS_ADDR(n, k), \
+		mask)
+#define HWIO_EE_n_GSI_CH_k_QOS_OUTI2(n, k, val) out_dword( \
+		HWIO_EE_n_GSI_CH_k_QOS_ADDR(n, k), \
+		val)
+#define HWIO_EE_n_GSI_CH_k_QOS_OUTMI2(n, k, mask, val) out_dword_masked_ns( \
+		HWIO_EE_n_GSI_CH_k_QOS_ADDR(n, k), \
+		mask, \
+		val, \
+		HWIO_EE_n_GSI_CH_k_QOS_INI2(n, k))
+#define HWIO_EE_n_GSI_CH_k_QOS_EMPTY_LVL_THRSHOLD_BMSK 0xff0000
+#define HWIO_EE_n_GSI_CH_k_QOS_EMPTY_LVL_THRSHOLD_SHFT 0x10
+#define HWIO_EE_n_GSI_CH_k_QOS_PREFETCH_MODE_BMSK 0x3c00
+#define HWIO_EE_n_GSI_CH_k_QOS_PREFETCH_MODE_SHFT 0xa
+#define HWIO_EE_n_GSI_CH_k_QOS_PREFETCH_MODE_USE_PREFETCH_BUFS_FVAL 0x0
+#define HWIO_EE_n_GSI_CH_k_QOS_PREFETCH_MODE_ESCAPE_BUF_ONLY_FVAL 0x1
+#define HWIO_EE_n_GSI_CH_k_QOS_PREFETCH_MODE_SMART_PRE_FETCH_FVAL 0x2
+#define HWIO_EE_n_GSI_CH_k_QOS_PREFETCH_MODE_FREE_PRE_FETCH_FVAL 0x3
+#define HWIO_EE_n_GSI_CH_k_QOS_USE_DB_ENG_BMSK 0x200
+#define HWIO_EE_n_GSI_CH_k_QOS_USE_DB_ENG_SHFT 0x9
+#define HWIO_EE_n_GSI_CH_k_QOS_MAX_PREFETCH_BMSK 0x100
+#define HWIO_EE_n_GSI_CH_k_QOS_MAX_PREFETCH_SHFT 0x8
+#define HWIO_EE_n_GSI_CH_k_QOS_MAX_PREFETCH_ONE_PREFETCH_SEG_FVAL 0x0
+#define HWIO_EE_n_GSI_CH_k_QOS_MAX_PREFETCH_TWO_PREFETCH_SEG_FVAL 0x1
+#define HWIO_EE_n_GSI_CH_k_QOS_WRR_WEIGHT_BMSK 0xf
+#define HWIO_EE_n_GSI_CH_k_QOS_WRR_WEIGHT_SHFT 0x0
+#define HWIO_EE_n_GSI_CH_k_SCRATCH_0_ADDR(n, k) (GSI_REG_BASE +	\
+						 0x0000f060 + 0x4000 * \
+						 (n) + 0x80 * (k))
+#define HWIO_EE_n_GSI_CH_k_SCRATCH_0_PHYS(n, k) (GSI_REG_BASE_PHYS + \
+						 0x0000f060 + 0x4000 * \
+						 (n) + 0x80 * (k))
+#define HWIO_EE_n_GSI_CH_k_SCRATCH_0_OFFS(n, k) (GSI_REG_BASE_OFFS + \
+						 0x0000f060 + 0x4000 * \
+						 (n) + 0x80 * (k))
+#define HWIO_EE_n_GSI_CH_k_SCRATCH_0_RMSK 0xffffffff
+#define HWIO_EE_n_GSI_CH_k_SCRATCH_0_MAXn 2
+#define HWIO_EE_n_GSI_CH_k_SCRATCH_0_MAXk 22
+#define HWIO_EE_n_GSI_CH_k_SCRATCH_0_ATTR 0x3
+#define HWIO_EE_n_GSI_CH_k_SCRATCH_0_INI2(n, k) in_dword_masked( \
+		HWIO_EE_n_GSI_CH_k_SCRATCH_0_ADDR(n, k), \
+		HWIO_EE_n_GSI_CH_k_SCRATCH_0_RMSK)
+#define HWIO_EE_n_GSI_CH_k_SCRATCH_0_INMI2(n, k, mask) in_dword_masked(	\
+		HWIO_EE_n_GSI_CH_k_SCRATCH_0_ADDR(n, k), \
+		mask)
+#define HWIO_EE_n_GSI_CH_k_SCRATCH_0_OUTI2(n, k, val) out_dword( \
+		HWIO_EE_n_GSI_CH_k_SCRATCH_0_ADDR(n, k), \
+		val)
+#define HWIO_EE_n_GSI_CH_k_SCRATCH_0_OUTMI2(n, k, mask,	\
+					    val) out_dword_masked_ns( \
+		HWIO_EE_n_GSI_CH_k_SCRATCH_0_ADDR(n, \
+						  k), \
+		mask, \
+		val, \
+		HWIO_EE_n_GSI_CH_k_SCRATCH_0_INI2(n, k))
+#define HWIO_EE_n_GSI_CH_k_SCRATCH_0_SCRATCH_BMSK 0xffffffff
+#define HWIO_EE_n_GSI_CH_k_SCRATCH_0_SCRATCH_SHFT 0x0
+#define HWIO_EE_n_GSI_CH_k_SCRATCH_1_ADDR(n, k) (GSI_REG_BASE +	\
+						 0x0000f064 + 0x4000 * \
+						 (n) + 0x80 * (k))
+#define HWIO_EE_n_GSI_CH_k_SCRATCH_1_PHYS(n, k) (GSI_REG_BASE_PHYS + \
+						 0x0000f064 + 0x4000 * \
+						 (n) + 0x80 * (k))
+#define HWIO_EE_n_GSI_CH_k_SCRATCH_1_OFFS(n, k) (GSI_REG_BASE_OFFS + \
+						 0x0000f064 + 0x4000 * \
+						 (n) + 0x80 * (k))
+#define HWIO_EE_n_GSI_CH_k_SCRATCH_1_RMSK 0xffffffff
+#define HWIO_EE_n_GSI_CH_k_SCRATCH_1_MAXn 2
+#define HWIO_EE_n_GSI_CH_k_SCRATCH_1_MAXk 22
+#define HWIO_EE_n_GSI_CH_k_SCRATCH_1_ATTR 0x3
+#define HWIO_EE_n_GSI_CH_k_SCRATCH_1_INI2(n, k) in_dword_masked( \
+		HWIO_EE_n_GSI_CH_k_SCRATCH_1_ADDR(n, k), \
+		HWIO_EE_n_GSI_CH_k_SCRATCH_1_RMSK)
+#define HWIO_EE_n_GSI_CH_k_SCRATCH_1_INMI2(n, k, mask) in_dword_masked(	\
+		HWIO_EE_n_GSI_CH_k_SCRATCH_1_ADDR(n, k), \
+		mask)
+#define HWIO_EE_n_GSI_CH_k_SCRATCH_1_OUTI2(n, k, val) out_dword( \
+		HWIO_EE_n_GSI_CH_k_SCRATCH_1_ADDR(n, k), \
+		val)
+#define HWIO_EE_n_GSI_CH_k_SCRATCH_1_OUTMI2(n, k, mask,	\
+					    val) out_dword_masked_ns( \
+		HWIO_EE_n_GSI_CH_k_SCRATCH_1_ADDR(n, \
+						  k), \
+		mask, \
+		val, \
+		HWIO_EE_n_GSI_CH_k_SCRATCH_1_INI2(n, k))
+#define HWIO_EE_n_GSI_CH_k_SCRATCH_1_SCRATCH_BMSK 0xffffffff
+#define HWIO_EE_n_GSI_CH_k_SCRATCH_1_SCRATCH_SHFT 0x0
+#define HWIO_EE_n_GSI_CH_k_SCRATCH_2_ADDR(n, k) (GSI_REG_BASE +	\
+						 0x0000f068 + 0x4000 * \
+						 (n) + 0x80 * (k))
+#define HWIO_EE_n_GSI_CH_k_SCRATCH_2_PHYS(n, k) (GSI_REG_BASE_PHYS + \
+						 0x0000f068 + 0x4000 * \
+						 (n) + 0x80 * (k))
+#define HWIO_EE_n_GSI_CH_k_SCRATCH_2_OFFS(n, k) (GSI_REG_BASE_OFFS + \
+						 0x0000f068 + 0x4000 * \
+						 (n) + 0x80 * (k))
+#define HWIO_EE_n_GSI_CH_k_SCRATCH_2_RMSK 0xffffffff
+#define HWIO_EE_n_GSI_CH_k_SCRATCH_2_MAXn 2
+#define HWIO_EE_n_GSI_CH_k_SCRATCH_2_MAXk 22
+#define HWIO_EE_n_GSI_CH_k_SCRATCH_2_ATTR 0x3
+#define HWIO_EE_n_GSI_CH_k_SCRATCH_2_INI2(n, k) in_dword_masked( \
+		HWIO_EE_n_GSI_CH_k_SCRATCH_2_ADDR(n, k), \
+		HWIO_EE_n_GSI_CH_k_SCRATCH_2_RMSK)
+#define HWIO_EE_n_GSI_CH_k_SCRATCH_2_INMI2(n, k, mask) in_dword_masked(	\
+		HWIO_EE_n_GSI_CH_k_SCRATCH_2_ADDR(n, k), \
+		mask)
+#define HWIO_EE_n_GSI_CH_k_SCRATCH_2_OUTI2(n, k, val) out_dword( \
+		HWIO_EE_n_GSI_CH_k_SCRATCH_2_ADDR(n, k), \
+		val)
+#define HWIO_EE_n_GSI_CH_k_SCRATCH_2_OUTMI2(n, k, mask,	\
+					    val) out_dword_masked_ns( \
+		HWIO_EE_n_GSI_CH_k_SCRATCH_2_ADDR(n, \
+						  k), \
+		mask, \
+		val, \
+		HWIO_EE_n_GSI_CH_k_SCRATCH_2_INI2(n, k))
+#define HWIO_EE_n_GSI_CH_k_SCRATCH_2_SCRATCH_BMSK 0xffffffff
+#define HWIO_EE_n_GSI_CH_k_SCRATCH_2_SCRATCH_SHFT 0x0
+#define HWIO_EE_n_GSI_CH_k_SCRATCH_3_ADDR(n, k) (GSI_REG_BASE +	\
+						 0x0000f06c + 0x4000 * \
+						 (n) + 0x80 * (k))
+#define HWIO_EE_n_GSI_CH_k_SCRATCH_3_PHYS(n, k) (GSI_REG_BASE_PHYS + \
+						 0x0000f06c + 0x4000 * \
+						 (n) + 0x80 * (k))
+#define HWIO_EE_n_GSI_CH_k_SCRATCH_3_OFFS(n, k) (GSI_REG_BASE_OFFS + \
+						 0x0000f06c + 0x4000 * \
+						 (n) + 0x80 * (k))
+#define HWIO_EE_n_GSI_CH_k_SCRATCH_3_RMSK 0xffffffff
+#define HWIO_EE_n_GSI_CH_k_SCRATCH_3_MAXn 2
+#define HWIO_EE_n_GSI_CH_k_SCRATCH_3_MAXk 22
+#define HWIO_EE_n_GSI_CH_k_SCRATCH_3_ATTR 0x3
+#define HWIO_EE_n_GSI_CH_k_SCRATCH_3_INI2(n, k) in_dword_masked( \
+		HWIO_EE_n_GSI_CH_k_SCRATCH_3_ADDR(n, k), \
+		HWIO_EE_n_GSI_CH_k_SCRATCH_3_RMSK)
+#define HWIO_EE_n_GSI_CH_k_SCRATCH_3_INMI2(n, k, mask) in_dword_masked(	\
+		HWIO_EE_n_GSI_CH_k_SCRATCH_3_ADDR(n, k), \
+		mask)
+#define HWIO_EE_n_GSI_CH_k_SCRATCH_3_OUTI2(n, k, val) out_dword( \
+		HWIO_EE_n_GSI_CH_k_SCRATCH_3_ADDR(n, k), \
+		val)
+#define HWIO_EE_n_GSI_CH_k_SCRATCH_3_OUTMI2(n, k, mask,	\
+					    val) out_dword_masked_ns( \
+		HWIO_EE_n_GSI_CH_k_SCRATCH_3_ADDR(n, \
+						  k), \
+		mask, \
+		val, \
+		HWIO_EE_n_GSI_CH_k_SCRATCH_3_INI2(n, k))
+#define HWIO_EE_n_GSI_CH_k_SCRATCH_3_SCRATCH_BMSK 0xffffffff
+#define HWIO_EE_n_GSI_CH_k_SCRATCH_3_SCRATCH_SHFT 0x0
+#define HWIO_EE_n_GSI_CH_k_DB_ENG_WRITE_PTR_ADDR(n, k) (GSI_REG_BASE + \
+							0x0000f070 + \
+							0x4000 * (n) + \
+							0x80 * (k))
+#define HWIO_EE_n_GSI_CH_k_DB_ENG_WRITE_PTR_PHYS(n, \
+						 k) (GSI_REG_BASE_PHYS + \
+						     0x0000f070 + 0x4000 * \
+						     (n) + 0x80 * (k))
+#define HWIO_EE_n_GSI_CH_k_DB_ENG_WRITE_PTR_OFFS(n, \
+						 k) (GSI_REG_BASE_OFFS + \
+						     0x0000f070 + 0x4000 * \
+						     (n) + 0x80 * (k))
+#define HWIO_EE_n_EV_CH_k_CNTXT_0_ADDR(n, k) (GSI_REG_BASE + 0x00010000 + \
+					      0x4000 * (n) + 0x80 * (k))
+#define HWIO_EE_n_EV_CH_k_CNTXT_0_PHYS(n, k) (GSI_REG_BASE_PHYS + \
+					      0x00010000 + 0x4000 * (n) + \
+					      0x80 * (k))
+#define HWIO_EE_n_EV_CH_k_CNTXT_0_OFFS(n, k) (GSI_REG_BASE_OFFS + \
+					      0x00010000 + 0x4000 * (n) + \
+					      0x80 * (k))
+#define HWIO_EE_n_EV_CH_k_CNTXT_0_RMSK 0xfff1ffff
+#define HWIO_EE_n_EV_CH_k_CNTXT_0_MAXn 2
+#define HWIO_EE_n_EV_CH_k_CNTXT_0_MAXk 19
+#define HWIO_EE_n_EV_CH_k_CNTXT_0_ATTR 0x3
+#define HWIO_EE_n_EV_CH_k_CNTXT_0_INI2(n, k) in_dword_masked( \
+		HWIO_EE_n_EV_CH_k_CNTXT_0_ADDR(n, k), \
+		HWIO_EE_n_EV_CH_k_CNTXT_0_RMSK)
+#define HWIO_EE_n_EV_CH_k_CNTXT_0_INMI2(n, k, mask) in_dword_masked( \
+		HWIO_EE_n_EV_CH_k_CNTXT_0_ADDR(n, k), \
+		mask)
+#define HWIO_EE_n_EV_CH_k_CNTXT_0_OUTI2(n, k, val) out_dword( \
+		HWIO_EE_n_EV_CH_k_CNTXT_0_ADDR(n, k), \
+		val)
+#define HWIO_EE_n_EV_CH_k_CNTXT_0_OUTMI2(n, k, mask, \
+					 val) out_dword_masked_ns( \
+		HWIO_EE_n_EV_CH_k_CNTXT_0_ADDR(n, \
+					       k), \
+		mask, \
+		val, \
+		HWIO_EE_n_EV_CH_k_CNTXT_0_INI2(n, k))
+#define HWIO_EE_n_EV_CH_k_CNTXT_0_ELEMENT_SIZE_BMSK 0xff000000
+#define HWIO_EE_n_EV_CH_k_CNTXT_0_ELEMENT_SIZE_SHFT 0x18
+#define HWIO_EE_n_EV_CH_k_CNTXT_0_CHSTATE_BMSK 0xf00000
+#define HWIO_EE_n_EV_CH_k_CNTXT_0_CHSTATE_SHFT 0x14
+#define HWIO_EE_n_EV_CH_k_CNTXT_0_CHSTATE_NOT_ALLOCATED_FVAL 0x0
+#define HWIO_EE_n_EV_CH_k_CNTXT_0_CHSTATE_ALLOCATED_FVAL 0x1
+#define HWIO_EE_n_EV_CH_k_CNTXT_0_INTYPE_BMSK 0x10000
+#define HWIO_EE_n_EV_CH_k_CNTXT_0_INTYPE_SHFT 0x10
+#define HWIO_EE_n_EV_CH_k_CNTXT_0_INTYPE_MSI_FVAL 0x0
+#define HWIO_EE_n_EV_CH_k_CNTXT_0_INTYPE_IRQ_FVAL 0x1
+#define HWIO_EE_n_EV_CH_k_CNTXT_0_EVCHID_BMSK 0xff00
+#define HWIO_EE_n_EV_CH_k_CNTXT_0_EVCHID_SHFT 0x8
+#define HWIO_EE_n_EV_CH_k_CNTXT_0_EE_BMSK 0xf0
+#define HWIO_EE_n_EV_CH_k_CNTXT_0_EE_SHFT 0x4
+#define HWIO_EE_n_EV_CH_k_CNTXT_0_CHTYPE_BMSK 0xf
+#define HWIO_EE_n_EV_CH_k_CNTXT_0_CHTYPE_SHFT 0x0
+#define HWIO_EE_n_EV_CH_k_CNTXT_0_CHTYPE_MHI_EV_FVAL 0x0
+#define HWIO_EE_n_EV_CH_k_CNTXT_0_CHTYPE_XHCI_EV_FVAL 0x1
+#define HWIO_EE_n_EV_CH_k_CNTXT_0_CHTYPE_GPI_EV_FVAL 0x2
+#define HWIO_EE_n_EV_CH_k_CNTXT_0_CHTYPE_XDCI_FVAL 0x3
+#define HWIO_EE_n_EV_CH_k_CNTXT_1_ADDR(n, k) (GSI_REG_BASE + 0x00010004 + \
+					      0x4000 * (n) + 0x80 * (k))
+#define HWIO_EE_n_EV_CH_k_CNTXT_1_PHYS(n, k) (GSI_REG_BASE_PHYS + \
+					      0x00010004 + 0x4000 * (n) + \
+					      0x80 * (k))
+#define HWIO_EE_n_EV_CH_k_CNTXT_1_OFFS(n, k) (GSI_REG_BASE_OFFS + \
+					      0x00010004 + 0x4000 * (n) + \
+					      0x80 * (k))
+#define HWIO_EE_n_EV_CH_k_CNTXT_1_RMSK 0xffff
+#define HWIO_EE_n_EV_CH_k_CNTXT_1_MAXn 2
+#define HWIO_EE_n_EV_CH_k_CNTXT_1_MAXk 19
+#define HWIO_EE_n_EV_CH_k_CNTXT_1_ATTR 0x3
+#define HWIO_EE_n_EV_CH_k_CNTXT_1_INI2(n, k) in_dword_masked( \
+		HWIO_EE_n_EV_CH_k_CNTXT_1_ADDR(n, k), \
+		HWIO_EE_n_EV_CH_k_CNTXT_1_RMSK)
+#define HWIO_EE_n_EV_CH_k_CNTXT_1_INMI2(n, k, mask) in_dword_masked( \
+		HWIO_EE_n_EV_CH_k_CNTXT_1_ADDR(n, k), \
+		mask)
+#define HWIO_EE_n_EV_CH_k_CNTXT_1_OUTI2(n, k, val) out_dword( \
+		HWIO_EE_n_EV_CH_k_CNTXT_1_ADDR(n, k), \
+		val)
+#define HWIO_EE_n_EV_CH_k_CNTXT_1_OUTMI2(n, k, mask, \
+					 val) out_dword_masked_ns( \
+		HWIO_EE_n_EV_CH_k_CNTXT_1_ADDR(n, \
+					       k), \
+		mask, \
+		val, \
+		HWIO_EE_n_EV_CH_k_CNTXT_1_INI2(n, k))
+#define HWIO_EE_n_EV_CH_k_CNTXT_1_R_LENGTH_BMSK 0xffff
+#define HWIO_EE_n_EV_CH_k_CNTXT_1_R_LENGTH_SHFT 0x0
+#define HWIO_EE_n_EV_CH_k_CNTXT_2_ADDR(n, k) (GSI_REG_BASE + 0x00010008 + \
+					      0x4000 * (n) + 0x80 * (k))
+#define HWIO_EE_n_EV_CH_k_CNTXT_2_PHYS(n, k) (GSI_REG_BASE_PHYS + \
+					      0x00010008 + 0x4000 * (n) + \
+					      0x80 * (k))
+#define HWIO_EE_n_EV_CH_k_CNTXT_2_OFFS(n, k) (GSI_REG_BASE_OFFS + \
+					      0x00010008 + 0x4000 * (n) + \
+					      0x80 * (k))
+#define HWIO_EE_n_EV_CH_k_CNTXT_2_RMSK 0xffffffff
+#define HWIO_EE_n_EV_CH_k_CNTXT_2_MAXn 2
+#define HWIO_EE_n_EV_CH_k_CNTXT_2_MAXk 19
+#define HWIO_EE_n_EV_CH_k_CNTXT_2_ATTR 0x3
+#define HWIO_EE_n_EV_CH_k_CNTXT_2_INI2(n, k) in_dword_masked( \
+		HWIO_EE_n_EV_CH_k_CNTXT_2_ADDR(n, k), \
+		HWIO_EE_n_EV_CH_k_CNTXT_2_RMSK)
+#define HWIO_EE_n_EV_CH_k_CNTXT_2_INMI2(n, k, mask) in_dword_masked( \
+		HWIO_EE_n_EV_CH_k_CNTXT_2_ADDR(n, k), \
+		mask)
+#define HWIO_EE_n_EV_CH_k_CNTXT_2_OUTI2(n, k, val) out_dword( \
+		HWIO_EE_n_EV_CH_k_CNTXT_2_ADDR(n, k), \
+		val)
+#define HWIO_EE_n_EV_CH_k_CNTXT_2_OUTMI2(n, k, mask, \
+					 val) out_dword_masked_ns( \
+		HWIO_EE_n_EV_CH_k_CNTXT_2_ADDR(n, \
+					       k), \
+		mask, \
+		val, \
+		HWIO_EE_n_EV_CH_k_CNTXT_2_INI2(n, k))
+#define HWIO_EE_n_EV_CH_k_CNTXT_2_R_BASE_ADDR_LSBS_BMSK 0xffffffff
+#define HWIO_EE_n_EV_CH_k_CNTXT_2_R_BASE_ADDR_LSBS_SHFT 0x0
+#define HWIO_EE_n_EV_CH_k_CNTXT_3_ADDR(n, k) (GSI_REG_BASE + 0x0001000c + \
+					      0x4000 * (n) + 0x80 * (k))
+#define HWIO_EE_n_EV_CH_k_CNTXT_3_PHYS(n, k) (GSI_REG_BASE_PHYS + \
+					      0x0001000c + 0x4000 * (n) + \
+					      0x80 * (k))
+#define HWIO_EE_n_EV_CH_k_CNTXT_3_OFFS(n, k) (GSI_REG_BASE_OFFS + \
+					      0x0001000c + 0x4000 * (n) + \
+					      0x80 * (k))
+#define HWIO_EE_n_EV_CH_k_CNTXT_3_RMSK 0xffffffff
+#define HWIO_EE_n_EV_CH_k_CNTXT_3_MAXn 2
+#define HWIO_EE_n_EV_CH_k_CNTXT_3_MAXk 19
+#define HWIO_EE_n_EV_CH_k_CNTXT_3_ATTR 0x3
+#define HWIO_EE_n_EV_CH_k_CNTXT_3_INI2(n, k) in_dword_masked( \
+		HWIO_EE_n_EV_CH_k_CNTXT_3_ADDR(n, k), \
+		HWIO_EE_n_EV_CH_k_CNTXT_3_RMSK)
+#define HWIO_EE_n_EV_CH_k_CNTXT_3_INMI2(n, k, mask) in_dword_masked( \
+		HWIO_EE_n_EV_CH_k_CNTXT_3_ADDR(n, k), \
+		mask)
+#define HWIO_EE_n_EV_CH_k_CNTXT_3_OUTI2(n, k, val) out_dword( \
+		HWIO_EE_n_EV_CH_k_CNTXT_3_ADDR(n, k), \
+		val)
+#define HWIO_EE_n_EV_CH_k_CNTXT_3_OUTMI2(n, k, mask, \
+					 val) out_dword_masked_ns( \
+		HWIO_EE_n_EV_CH_k_CNTXT_3_ADDR(n, \
+					       k), \
+		mask, \
+		val, \
+		HWIO_EE_n_EV_CH_k_CNTXT_3_INI2(n, k))
+#define HWIO_EE_n_EV_CH_k_CNTXT_3_R_BASE_ADDR_MSBS_BMSK 0xffffffff
+#define HWIO_EE_n_EV_CH_k_CNTXT_3_R_BASE_ADDR_MSBS_SHFT 0x0
+#define HWIO_EE_n_EV_CH_k_CNTXT_4_ADDR(n, k) (GSI_REG_BASE + 0x00010010 + \
+					      0x4000 * (n) + 0x80 * (k))
+#define HWIO_EE_n_EV_CH_k_CNTXT_4_PHYS(n, k) (GSI_REG_BASE_PHYS + \
+					      0x00010010 + 0x4000 * (n) + \
+					      0x80 * (k))
+#define HWIO_EE_n_EV_CH_k_CNTXT_4_OFFS(n, k) (GSI_REG_BASE_OFFS + \
+					      0x00010010 + 0x4000 * (n) + \
+					      0x80 * (k))
+#define HWIO_EE_n_EV_CH_k_CNTXT_4_RMSK 0xffffffff
+#define HWIO_EE_n_EV_CH_k_CNTXT_4_MAXn 2
+#define HWIO_EE_n_EV_CH_k_CNTXT_4_MAXk 19
+#define HWIO_EE_n_EV_CH_k_CNTXT_4_ATTR 0x3
+#define HWIO_EE_n_EV_CH_k_CNTXT_4_INI2(n, k) in_dword_masked( \
+		HWIO_EE_n_EV_CH_k_CNTXT_4_ADDR(n, k), \
+		HWIO_EE_n_EV_CH_k_CNTXT_4_RMSK)
+#define HWIO_EE_n_EV_CH_k_CNTXT_4_INMI2(n, k, mask) in_dword_masked( \
+		HWIO_EE_n_EV_CH_k_CNTXT_4_ADDR(n, k), \
+		mask)
+#define HWIO_EE_n_EV_CH_k_CNTXT_4_OUTI2(n, k, val) out_dword( \
+		HWIO_EE_n_EV_CH_k_CNTXT_4_ADDR(n, k), \
+		val)
+#define HWIO_EE_n_EV_CH_k_CNTXT_4_OUTMI2(n, k, mask, \
+					 val) out_dword_masked_ns( \
+		HWIO_EE_n_EV_CH_k_CNTXT_4_ADDR(n, \
+					       k), \
+		mask, \
+		val, \
+		HWIO_EE_n_EV_CH_k_CNTXT_4_INI2(n, k))
+#define HWIO_EE_n_EV_CH_k_CNTXT_4_READ_PTR_LSB_BMSK 0xffffffff
+#define HWIO_EE_n_EV_CH_k_CNTXT_4_READ_PTR_LSB_SHFT 0x0
+#define HWIO_EE_n_EV_CH_k_CNTXT_5_ADDR(n, k) (GSI_REG_BASE + 0x00010014 + \
+					      0x4000 * (n) + 0x80 * (k))
+#define HWIO_EE_n_EV_CH_k_CNTXT_5_PHYS(n, k) (GSI_REG_BASE_PHYS + \
+					      0x00010014 + 0x4000 * (n) + \
+					      0x80 * (k))
+#define HWIO_EE_n_EV_CH_k_CNTXT_5_OFFS(n, k) (GSI_REG_BASE_OFFS + \
+					      0x00010014 + 0x4000 * (n) + \
+					      0x80 * (k))
+#define HWIO_EE_n_EV_CH_k_CNTXT_5_RMSK 0xffffffff
+#define HWIO_EE_n_EV_CH_k_CNTXT_5_MAXn 2
+#define HWIO_EE_n_EV_CH_k_CNTXT_5_MAXk 19
+#define HWIO_EE_n_EV_CH_k_CNTXT_5_ATTR 0x1
+#define HWIO_EE_n_EV_CH_k_CNTXT_5_INI2(n, k) in_dword_masked( \
+		HWIO_EE_n_EV_CH_k_CNTXT_5_ADDR(n, k), \
+		HWIO_EE_n_EV_CH_k_CNTXT_5_RMSK)
+#define HWIO_EE_n_EV_CH_k_CNTXT_5_INMI2(n, k, mask) in_dword_masked( \
+		HWIO_EE_n_EV_CH_k_CNTXT_5_ADDR(n, k), \
+		mask)
+#define HWIO_EE_n_EV_CH_k_CNTXT_5_READ_PTR_MSB_BMSK 0xffffffff
+#define HWIO_EE_n_EV_CH_k_CNTXT_5_READ_PTR_MSB_SHFT 0x0
+#define HWIO_EE_n_EV_CH_k_CNTXT_6_ADDR(n, k) (GSI_REG_BASE + 0x00010018 + \
+					      0x4000 * (n) + 0x80 * (k))
+#define HWIO_EE_n_EV_CH_k_CNTXT_6_PHYS(n, k) (GSI_REG_BASE_PHYS + \
+					      0x00010018 + 0x4000 * (n) + \
+					      0x80 * (k))
+#define HWIO_EE_n_EV_CH_k_CNTXT_6_OFFS(n, k) (GSI_REG_BASE_OFFS + \
+					      0x00010018 + 0x4000 * (n) + \
+					      0x80 * (k))
+#define HWIO_EE_n_EV_CH_k_CNTXT_6_RMSK 0xffffffff
+#define HWIO_EE_n_EV_CH_k_CNTXT_6_MAXn 2
+#define HWIO_EE_n_EV_CH_k_CNTXT_6_MAXk 19
+#define HWIO_EE_n_EV_CH_k_CNTXT_6_ATTR 0x1
+#define HWIO_EE_n_EV_CH_k_CNTXT_6_INI2(n, k) in_dword_masked( \
+		HWIO_EE_n_EV_CH_k_CNTXT_6_ADDR(n, k), \
+		HWIO_EE_n_EV_CH_k_CNTXT_6_RMSK)
+#define HWIO_EE_n_EV_CH_k_CNTXT_6_INMI2(n, k, mask) in_dword_masked( \
+		HWIO_EE_n_EV_CH_k_CNTXT_6_ADDR(n, k), \
+		mask)
+#define HWIO_EE_n_EV_CH_k_CNTXT_6_WRITE_PTR_LSB_BMSK 0xffffffff
+#define HWIO_EE_n_EV_CH_k_CNTXT_6_WRITE_PTR_LSB_SHFT 0x0
+#define HWIO_EE_n_EV_CH_k_CNTXT_7_ADDR(n, k) (GSI_REG_BASE + 0x0001001c + \
+					      0x4000 * (n) + 0x80 * (k))
+#define HWIO_EE_n_EV_CH_k_CNTXT_7_PHYS(n, k) (GSI_REG_BASE_PHYS + \
+					      0x0001001c + 0x4000 * (n) + \
+					      0x80 * (k))
+#define HWIO_EE_n_EV_CH_k_CNTXT_7_OFFS(n, k) (GSI_REG_BASE_OFFS + \
+					      0x0001001c + 0x4000 * (n) + \
+					      0x80 * (k))
+#define HWIO_EE_n_EV_CH_k_CNTXT_7_RMSK 0xffffffff
+#define HWIO_EE_n_EV_CH_k_CNTXT_7_MAXn 2
+#define HWIO_EE_n_EV_CH_k_CNTXT_7_MAXk 19
+#define HWIO_EE_n_EV_CH_k_CNTXT_7_ATTR 0x1
+#define HWIO_EE_n_EV_CH_k_CNTXT_7_INI2(n, k) in_dword_masked( \
+		HWIO_EE_n_EV_CH_k_CNTXT_7_ADDR(n, k), \
+		HWIO_EE_n_EV_CH_k_CNTXT_7_RMSK)
+#define HWIO_EE_n_EV_CH_k_CNTXT_7_INMI2(n, k, mask) in_dword_masked( \
+		HWIO_EE_n_EV_CH_k_CNTXT_7_ADDR(n, k), \
+		mask)
+#define HWIO_EE_n_EV_CH_k_CNTXT_7_WRITE_PTR_MSB_BMSK 0xffffffff
+#define HWIO_EE_n_EV_CH_k_CNTXT_7_WRITE_PTR_MSB_SHFT 0x0
+#define HWIO_EE_n_EV_CH_k_CNTXT_8_ADDR(n, k) (GSI_REG_BASE + 0x00010020 + \
+					      0x4000 * (n) + 0x80 * (k))
+#define HWIO_EE_n_EV_CH_k_CNTXT_8_PHYS(n, k) (GSI_REG_BASE_PHYS + \
+					      0x00010020 + 0x4000 * (n) + \
+					      0x80 * (k))
+#define HWIO_EE_n_EV_CH_k_CNTXT_8_OFFS(n, k) (GSI_REG_BASE_OFFS + \
+					      0x00010020 + 0x4000 * (n) + \
+					      0x80 * (k))
+#define HWIO_EE_n_EV_CH_k_CNTXT_8_RMSK 0xffffffff
+#define HWIO_EE_n_EV_CH_k_CNTXT_8_MAXn 2
+#define HWIO_EE_n_EV_CH_k_CNTXT_8_MAXk 19
+#define HWIO_EE_n_EV_CH_k_CNTXT_8_ATTR 0x3
+#define HWIO_EE_n_EV_CH_k_CNTXT_8_INI2(n, k) in_dword_masked( \
+		HWIO_EE_n_EV_CH_k_CNTXT_8_ADDR(n, k), \
+		HWIO_EE_n_EV_CH_k_CNTXT_8_RMSK)
+#define HWIO_EE_n_EV_CH_k_CNTXT_8_INMI2(n, k, mask) in_dword_masked( \
+		HWIO_EE_n_EV_CH_k_CNTXT_8_ADDR(n, k), \
+		mask)
+#define HWIO_EE_n_EV_CH_k_CNTXT_8_OUTI2(n, k, val) out_dword( \
+		HWIO_EE_n_EV_CH_k_CNTXT_8_ADDR(n, k), \
+		val)
+#define HWIO_EE_n_EV_CH_k_CNTXT_8_OUTMI2(n, k, mask, \
+					 val) out_dword_masked_ns( \
+		HWIO_EE_n_EV_CH_k_CNTXT_8_ADDR(n, \
+					       k), \
+		mask, \
+		val, \
+		HWIO_EE_n_EV_CH_k_CNTXT_8_INI2(n, k))
+#define HWIO_EE_n_EV_CH_k_CNTXT_8_INT_MOD_CNT_BMSK 0xff000000
+#define HWIO_EE_n_EV_CH_k_CNTXT_8_INT_MOD_CNT_SHFT 0x18
+#define HWIO_EE_n_EV_CH_k_CNTXT_8_INT_MODC_BMSK 0xff0000
+#define HWIO_EE_n_EV_CH_k_CNTXT_8_INT_MODC_SHFT 0x10
+#define HWIO_EE_n_EV_CH_k_CNTXT_8_INT_MODT_BMSK 0xffff
+#define HWIO_EE_n_EV_CH_k_CNTXT_8_INT_MODT_SHFT 0x0
+#define HWIO_EE_n_EV_CH_k_CNTXT_9_ADDR(n, k) (GSI_REG_BASE + 0x00010024 + \
+					      0x4000 * (n) + 0x80 * (k))
+#define HWIO_EE_n_EV_CH_k_CNTXT_9_PHYS(n, k) (GSI_REG_BASE_PHYS + \
+					      0x00010024 + 0x4000 * (n) + \
+					      0x80 * (k))
+#define HWIO_EE_n_EV_CH_k_CNTXT_9_OFFS(n, k) (GSI_REG_BASE_OFFS + \
+					      0x00010024 + 0x4000 * (n) + \
+					      0x80 * (k))
+#define HWIO_EE_n_EV_CH_k_CNTXT_9_RMSK 0xffffffff
+#define HWIO_EE_n_EV_CH_k_CNTXT_9_MAXn 2
+#define HWIO_EE_n_EV_CH_k_CNTXT_9_MAXk 19
+#define HWIO_EE_n_EV_CH_k_CNTXT_9_ATTR 0x3
+#define HWIO_EE_n_EV_CH_k_CNTXT_9_INI2(n, k) in_dword_masked( \
+		HWIO_EE_n_EV_CH_k_CNTXT_9_ADDR(n, k), \
+		HWIO_EE_n_EV_CH_k_CNTXT_9_RMSK)
+#define HWIO_EE_n_EV_CH_k_CNTXT_9_INMI2(n, k, mask) in_dword_masked( \
+		HWIO_EE_n_EV_CH_k_CNTXT_9_ADDR(n, k), \
+		mask)
+#define HWIO_EE_n_EV_CH_k_CNTXT_9_OUTI2(n, k, val) out_dword( \
+		HWIO_EE_n_EV_CH_k_CNTXT_9_ADDR(n, k), \
+		val)
+#define HWIO_EE_n_EV_CH_k_CNTXT_9_OUTMI2(n, k, mask, \
+					 val) out_dword_masked_ns( \
+		HWIO_EE_n_EV_CH_k_CNTXT_9_ADDR(n, \
+					       k), \
+		mask, \
+		val, \
+		HWIO_EE_n_EV_CH_k_CNTXT_9_INI2(n, k))
+#define HWIO_EE_n_EV_CH_k_CNTXT_9_INTVEC_BMSK 0xffffffff
+#define HWIO_EE_n_EV_CH_k_CNTXT_9_INTVEC_SHFT 0x0
+#define HWIO_EE_n_EV_CH_k_CNTXT_10_ADDR(n, k) (GSI_REG_BASE + 0x00010028 + \
+					       0x4000 * (n) + 0x80 * (k))
+#define HWIO_EE_n_EV_CH_k_CNTXT_10_PHYS(n, k) (GSI_REG_BASE_PHYS + \
+					       0x00010028 + 0x4000 * (n) + \
+					       0x80 * (k))
+#define HWIO_EE_n_EV_CH_k_CNTXT_10_OFFS(n, k) (GSI_REG_BASE_OFFS + \
+					       0x00010028 + 0x4000 * (n) + \
+					       0x80 * (k))
+#define HWIO_EE_n_EV_CH_k_CNTXT_10_RMSK 0xffffffff
+#define HWIO_EE_n_EV_CH_k_CNTXT_10_MAXn 2
+#define HWIO_EE_n_EV_CH_k_CNTXT_10_MAXk 19
+#define HWIO_EE_n_EV_CH_k_CNTXT_10_ATTR 0x3
+#define HWIO_EE_n_EV_CH_k_CNTXT_10_INI2(n, k) in_dword_masked( \
+		HWIO_EE_n_EV_CH_k_CNTXT_10_ADDR(n, k), \
+		HWIO_EE_n_EV_CH_k_CNTXT_10_RMSK)
+#define HWIO_EE_n_EV_CH_k_CNTXT_10_INMI2(n, k, mask) in_dword_masked( \
+		HWIO_EE_n_EV_CH_k_CNTXT_10_ADDR(n, k), \
+		mask)
+#define HWIO_EE_n_EV_CH_k_CNTXT_10_OUTI2(n, k, val) out_dword( \
+		HWIO_EE_n_EV_CH_k_CNTXT_10_ADDR(n, k), \
+		val)
+#define HWIO_EE_n_EV_CH_k_CNTXT_10_OUTMI2(n, k, mask, \
+					  val) out_dword_masked_ns( \
+		HWIO_EE_n_EV_CH_k_CNTXT_10_ADDR(n, \
+						k), \
+		mask, \
+		val, \
+		HWIO_EE_n_EV_CH_k_CNTXT_10_INI2(n, k))
+#define HWIO_EE_n_EV_CH_k_CNTXT_10_MSI_ADDR_LSB_BMSK 0xffffffff
+#define HWIO_EE_n_EV_CH_k_CNTXT_10_MSI_ADDR_LSB_SHFT 0x0
+#define HWIO_EE_n_EV_CH_k_CNTXT_11_ADDR(n, k) (GSI_REG_BASE + 0x0001002c + \
+					       0x4000 * (n) + 0x80 * (k))
+#define HWIO_EE_n_EV_CH_k_CNTXT_11_PHYS(n, k) (GSI_REG_BASE_PHYS + \
+					       0x0001002c + 0x4000 * (n) + \
+					       0x80 * (k))
+#define HWIO_EE_n_EV_CH_k_CNTXT_11_OFFS(n, k) (GSI_REG_BASE_OFFS + \
+					       0x0001002c + 0x4000 * (n) + \
+					       0x80 * (k))
+#define HWIO_EE_n_EV_CH_k_CNTXT_11_RMSK 0xffffffff
+#define HWIO_EE_n_EV_CH_k_CNTXT_11_MAXn 2
+#define HWIO_EE_n_EV_CH_k_CNTXT_11_MAXk 19
+#define HWIO_EE_n_EV_CH_k_CNTXT_11_ATTR 0x3
+#define HWIO_EE_n_EV_CH_k_CNTXT_11_INI2(n, k) in_dword_masked( \
+		HWIO_EE_n_EV_CH_k_CNTXT_11_ADDR(n, k), \
+		HWIO_EE_n_EV_CH_k_CNTXT_11_RMSK)
+#define HWIO_EE_n_EV_CH_k_CNTXT_11_INMI2(n, k, mask) in_dword_masked( \
+		HWIO_EE_n_EV_CH_k_CNTXT_11_ADDR(n, k), \
+		mask)
+#define HWIO_EE_n_EV_CH_k_CNTXT_11_OUTI2(n, k, val) out_dword( \
+		HWIO_EE_n_EV_CH_k_CNTXT_11_ADDR(n, k), \
+		val)
+#define HWIO_EE_n_EV_CH_k_CNTXT_11_OUTMI2(n, k, mask, \
+					  val) out_dword_masked_ns( \
+		HWIO_EE_n_EV_CH_k_CNTXT_11_ADDR(n, \
+						k), \
+		mask, \
+		val, \
+		HWIO_EE_n_EV_CH_k_CNTXT_11_INI2(n, k))
+#define HWIO_EE_n_EV_CH_k_CNTXT_11_MSI_ADDR_MSB_BMSK 0xffffffff
+#define HWIO_EE_n_EV_CH_k_CNTXT_11_MSI_ADDR_MSB_SHFT 0x0
+#define HWIO_EE_n_EV_CH_k_CNTXT_12_ADDR(n, k) (GSI_REG_BASE + 0x00010030 + \
+					       0x4000 * (n) + 0x80 * (k))
+#define HWIO_EE_n_EV_CH_k_CNTXT_12_PHYS(n, k) (GSI_REG_BASE_PHYS + \
+					       0x00010030 + 0x4000 * (n) + \
+					       0x80 * (k))
+#define HWIO_EE_n_EV_CH_k_CNTXT_12_OFFS(n, k) (GSI_REG_BASE_OFFS + \
+					       0x00010030 + 0x4000 * (n) + \
+					       0x80 * (k))
+#define HWIO_EE_n_EV_CH_k_CNTXT_12_RMSK 0xffffffff
+#define HWIO_EE_n_EV_CH_k_CNTXT_12_MAXn 2
+#define HWIO_EE_n_EV_CH_k_CNTXT_12_MAXk 19
+#define HWIO_EE_n_EV_CH_k_CNTXT_12_ATTR 0x3
+#define HWIO_EE_n_EV_CH_k_CNTXT_12_INI2(n, k) in_dword_masked( \
+		HWIO_EE_n_EV_CH_k_CNTXT_12_ADDR(n, k), \
+		HWIO_EE_n_EV_CH_k_CNTXT_12_RMSK)
+#define HWIO_EE_n_EV_CH_k_CNTXT_12_INMI2(n, k, mask) in_dword_masked( \
+		HWIO_EE_n_EV_CH_k_CNTXT_12_ADDR(n, k), \
+		mask)
+#define HWIO_EE_n_EV_CH_k_CNTXT_12_OUTI2(n, k, val) out_dword( \
+		HWIO_EE_n_EV_CH_k_CNTXT_12_ADDR(n, k), \
+		val)
+#define HWIO_EE_n_EV_CH_k_CNTXT_12_OUTMI2(n, k, mask, \
+					  val) out_dword_masked_ns( \
+		HWIO_EE_n_EV_CH_k_CNTXT_12_ADDR(n, \
+						k), \
+		mask, \
+		val, \
+		HWIO_EE_n_EV_CH_k_CNTXT_12_INI2(n, k))
+#define HWIO_EE_n_EV_CH_k_CNTXT_12_RP_UPDATE_ADDR_LSB_BMSK 0xffffffff
+#define HWIO_EE_n_EV_CH_k_CNTXT_12_RP_UPDATE_ADDR_LSB_SHFT 0x0
+#define HWIO_EE_n_EV_CH_k_CNTXT_13_ADDR(n, k) (GSI_REG_BASE + 0x00010034 + \
+					       0x4000 * (n) + 0x80 * (k))
+#define HWIO_EE_n_EV_CH_k_CNTXT_13_PHYS(n, k) (GSI_REG_BASE_PHYS + \
+					       0x00010034 + 0x4000 * (n) + \
+					       0x80 * (k))
+#define HWIO_EE_n_EV_CH_k_CNTXT_13_OFFS(n, k) (GSI_REG_BASE_OFFS + \
+					       0x00010034 + 0x4000 * (n) + \
+					       0x80 * (k))
+#define HWIO_EE_n_EV_CH_k_CNTXT_13_RMSK 0xffffffff
+#define HWIO_EE_n_EV_CH_k_CNTXT_13_MAXn 2
+#define HWIO_EE_n_EV_CH_k_CNTXT_13_MAXk 19
+#define HWIO_EE_n_EV_CH_k_CNTXT_13_ATTR 0x3
+#define HWIO_EE_n_EV_CH_k_CNTXT_13_INI2(n, k) in_dword_masked( \
+		HWIO_EE_n_EV_CH_k_CNTXT_13_ADDR(n, k), \
+		HWIO_EE_n_EV_CH_k_CNTXT_13_RMSK)
+#define HWIO_EE_n_EV_CH_k_CNTXT_13_INMI2(n, k, mask) in_dword_masked( \
+		HWIO_EE_n_EV_CH_k_CNTXT_13_ADDR(n, k), \
+		mask)
+#define HWIO_EE_n_EV_CH_k_CNTXT_13_OUTI2(n, k, val) out_dword( \
+		HWIO_EE_n_EV_CH_k_CNTXT_13_ADDR(n, k), \
+		val)
+#define HWIO_EE_n_EV_CH_k_CNTXT_13_OUTMI2(n, k, mask, \
+					  val) out_dword_masked_ns( \
+		HWIO_EE_n_EV_CH_k_CNTXT_13_ADDR(n, \
+						k), \
+		mask, \
+		val, \
+		HWIO_EE_n_EV_CH_k_CNTXT_13_INI2(n, k))
+#define HWIO_EE_n_EV_CH_k_CNTXT_13_RP_UPDATE_ADDR_MSB_BMSK 0xffffffff
+#define HWIO_EE_n_EV_CH_k_CNTXT_13_RP_UPDATE_ADDR_MSB_SHFT 0x0
+#define HWIO_EE_n_EV_CH_k_SCRATCH_0_ADDR(n, k) (GSI_REG_BASE + \
+						0x00010048 + 0x4000 * \
+						(n) + 0x80 * (k))
+#define HWIO_EE_n_EV_CH_k_SCRATCH_0_PHYS(n, k) (GSI_REG_BASE_PHYS + \
+						0x00010048 + 0x4000 * \
+						(n) + 0x80 * (k))
+#define HWIO_EE_n_EV_CH_k_SCRATCH_0_OFFS(n, k) (GSI_REG_BASE_OFFS + \
+						0x00010048 + 0x4000 * \
+						(n) + 0x80 * (k))
+#define HWIO_EE_n_EV_CH_k_SCRATCH_0_RMSK 0xffffffff
+#define HWIO_EE_n_EV_CH_k_SCRATCH_0_MAXn 2
+#define HWIO_EE_n_EV_CH_k_SCRATCH_0_MAXk 19
+#define HWIO_EE_n_EV_CH_k_SCRATCH_0_ATTR 0x3
+#define HWIO_EE_n_EV_CH_k_SCRATCH_0_INI2(n, k) in_dword_masked(	\
+		HWIO_EE_n_EV_CH_k_SCRATCH_0_ADDR(n, k),	\
+		HWIO_EE_n_EV_CH_k_SCRATCH_0_RMSK)
+#define HWIO_EE_n_EV_CH_k_SCRATCH_0_INMI2(n, k, mask) in_dword_masked( \
+		HWIO_EE_n_EV_CH_k_SCRATCH_0_ADDR(n, k),	\
+		mask)
+#define HWIO_EE_n_EV_CH_k_SCRATCH_0_OUTI2(n, k, val) out_dword(	\
+		HWIO_EE_n_EV_CH_k_SCRATCH_0_ADDR(n, k),	\
+		val)
+#define HWIO_EE_n_EV_CH_k_SCRATCH_0_OUTMI2(n, k, mask, \
+					   val) out_dword_masked_ns( \
+		HWIO_EE_n_EV_CH_k_SCRATCH_0_ADDR(n, \
+						 k), \
+		mask, \
+		val, \
+		HWIO_EE_n_EV_CH_k_SCRATCH_0_INI2(n, k))
+#define HWIO_EE_n_EV_CH_k_SCRATCH_0_SCRATCH_BMSK 0xffffffff
+#define HWIO_EE_n_EV_CH_k_SCRATCH_0_SCRATCH_SHFT 0x0
+#define HWIO_EE_n_EV_CH_k_SCRATCH_1_ADDR(n, k) (GSI_REG_BASE + \
+						0x0001004c + 0x4000 * \
+						(n) + 0x80 * (k))
+#define HWIO_EE_n_EV_CH_k_SCRATCH_1_PHYS(n, k) (GSI_REG_BASE_PHYS + \
+						0x0001004c + 0x4000 * \
+						(n) + 0x80 * (k))
+#define HWIO_EE_n_EV_CH_k_SCRATCH_1_OFFS(n, k) (GSI_REG_BASE_OFFS + \
+						0x0001004c + 0x4000 * \
+						(n) + 0x80 * (k))
+#define HWIO_EE_n_EV_CH_k_SCRATCH_1_RMSK 0xffffffff
+#define HWIO_EE_n_EV_CH_k_SCRATCH_1_MAXn 2
+#define HWIO_EE_n_EV_CH_k_SCRATCH_1_MAXk 19
+#define HWIO_EE_n_EV_CH_k_SCRATCH_1_ATTR 0x3
+#define HWIO_EE_n_EV_CH_k_SCRATCH_1_INI2(n, k) in_dword_masked(	\
+		HWIO_EE_n_EV_CH_k_SCRATCH_1_ADDR(n, k),	\
+		HWIO_EE_n_EV_CH_k_SCRATCH_1_RMSK)
+#define HWIO_EE_n_EV_CH_k_SCRATCH_1_INMI2(n, k, mask) in_dword_masked( \
+		HWIO_EE_n_EV_CH_k_SCRATCH_1_ADDR(n, k),	\
+		mask)
+#define HWIO_EE_n_EV_CH_k_SCRATCH_1_OUTI2(n, k, val) out_dword(	\
+		HWIO_EE_n_EV_CH_k_SCRATCH_1_ADDR(n, k),	\
+		val)
+#define HWIO_EE_n_EV_CH_k_SCRATCH_1_OUTMI2(n, k, mask, \
+					   val) out_dword_masked_ns( \
+		HWIO_EE_n_EV_CH_k_SCRATCH_1_ADDR(n, \
+						 k), \
+		mask, \
+		val, \
+		HWIO_EE_n_EV_CH_k_SCRATCH_1_INI2(n, k))
+#define HWIO_EE_n_EV_CH_k_SCRATCH_1_SCRATCH_BMSK 0xffffffff
+#define HWIO_EE_n_EV_CH_k_SCRATCH_1_SCRATCH_SHFT 0x0
+#define HWIO_EE_n_GSI_CH_k_DOORBELL_0_ADDR(n, k) (GSI_REG_BASE + \
+						  0x00011000 + 0x4000 *	\
+						  (n) + 0x8 * (k))
+#define HWIO_EE_n_GSI_CH_k_DOORBELL_0_PHYS(n, k) (GSI_REG_BASE_PHYS + \
+						  0x00011000 + 0x4000 *	\
+						  (n) + 0x8 * (k))
+#define HWIO_EE_n_GSI_CH_k_DOORBELL_0_OFFS(n, k) (GSI_REG_BASE_OFFS + \
+						  0x00011000 + 0x4000 *	\
+						  (n) + 0x8 * (k))
+#define HWIO_EE_n_GSI_CH_k_DOORBELL_1_ADDR(n, k) (GSI_REG_BASE + \
+						  0x00011004 + 0x4000 *	\
+						  (n) + 0x8 * (k))
+#define HWIO_EE_n_GSI_CH_k_DOORBELL_1_PHYS(n, k) (GSI_REG_BASE_PHYS + \
+						  0x00011004 + 0x4000 *	\
+						  (n) + 0x8 * (k))
+#define HWIO_EE_n_GSI_CH_k_DOORBELL_1_OFFS(n, k) (GSI_REG_BASE_OFFS + \
+						  0x00011004 + 0x4000 *	\
+						  (n) + 0x8 * (k))
+#define HWIO_EE_n_EV_CH_k_DOORBELL_0_ADDR(n, k) (GSI_REG_BASE +	\
+						 0x00011100 + 0x4000 * \
+						 (n) + 0x8 * (k))
+#define HWIO_EE_n_EV_CH_k_DOORBELL_0_PHYS(n, k) (GSI_REG_BASE_PHYS + \
+						 0x00011100 + 0x4000 * \
+						 (n) + 0x8 * (k))
+#define HWIO_EE_n_EV_CH_k_DOORBELL_0_OFFS(n, k) (GSI_REG_BASE_OFFS + \
+						 0x00011100 + 0x4000 * \
+						 (n) + 0x8 * (k))
+#define HWIO_EE_n_EV_CH_k_DOORBELL_1_ADDR(n, k) (GSI_REG_BASE +	\
+						 0x00011104 + 0x4000 * \
+						 (n) + 0x8 * (k))
+#define HWIO_EE_n_EV_CH_k_DOORBELL_1_PHYS(n, k) (GSI_REG_BASE_PHYS + \
+						 0x00011104 + 0x4000 * \
+						 (n) + 0x8 * (k))
+#define HWIO_EE_n_EV_CH_k_DOORBELL_1_OFFS(n, k) (GSI_REG_BASE_OFFS + \
+						 0x00011104 + 0x4000 * \
+						 (n) + 0x8 * (k))
+#define HWIO_EE_n_GSI_STATUS_ADDR(n) (GSI_REG_BASE + 0x00012000 + 0x4000 * \
+				      (n))
+#define HWIO_EE_n_GSI_STATUS_PHYS(n) (GSI_REG_BASE_PHYS + 0x00012000 + \
+				      0x4000 * (n))
+#define HWIO_EE_n_GSI_STATUS_OFFS(n) (GSI_REG_BASE_OFFS + 0x00012000 + \
+				      0x4000 * (n))
+#define HWIO_EE_n_GSI_STATUS_RMSK 0x1
+#define HWIO_EE_n_GSI_STATUS_MAXn 2
+#define HWIO_EE_n_GSI_STATUS_ATTR 0x1
+#define HWIO_EE_n_GSI_STATUS_INI(n) in_dword_masked( \
+		HWIO_EE_n_GSI_STATUS_ADDR(n), \
+		HWIO_EE_n_GSI_STATUS_RMSK)
+#define HWIO_EE_n_GSI_STATUS_INMI(n, mask) in_dword_masked( \
+		HWIO_EE_n_GSI_STATUS_ADDR(n), \
+		mask)
+#define HWIO_EE_n_GSI_STATUS_ENABLED_BMSK 0x1
+#define HWIO_EE_n_GSI_STATUS_ENABLED_SHFT 0x0
+#define HWIO_EE_n_GSI_CH_CMD_ADDR(n) (GSI_REG_BASE + 0x00012008 + 0x4000 * \
+				      (n))
+#define HWIO_EE_n_GSI_CH_CMD_PHYS(n) (GSI_REG_BASE_PHYS + 0x00012008 + \
+				      0x4000 * (n))
+#define HWIO_EE_n_GSI_CH_CMD_OFFS(n) (GSI_REG_BASE_OFFS + 0x00012008 + \
+				      0x4000 * (n))
+#define HWIO_EE_n_EV_CH_CMD_ADDR(n) (GSI_REG_BASE + 0x00012010 + 0x4000 * \
+				     (n))
+#define HWIO_EE_n_EV_CH_CMD_PHYS(n) (GSI_REG_BASE_PHYS + 0x00012010 + \
+				     0x4000 * (n))
+#define HWIO_EE_n_EV_CH_CMD_OFFS(n) (GSI_REG_BASE_OFFS + 0x00012010 + \
+				     0x4000 * (n))
+#define HWIO_EE_n_GSI_EE_GENERIC_CMD_ADDR(n) (GSI_REG_BASE + 0x00012018 + \
+					      0x4000 * (n))
+#define HWIO_EE_n_GSI_EE_GENERIC_CMD_PHYS(n) (GSI_REG_BASE_PHYS + \
+					      0x00012018 + 0x4000 * (n))
+#define HWIO_EE_n_GSI_EE_GENERIC_CMD_OFFS(n) (GSI_REG_BASE_OFFS + \
+					      0x00012018 + 0x4000 * (n))
+#define HWIO_EE_n_GSI_HW_PARAM_0_ADDR(n) (GSI_REG_BASE + 0x00012038 + \
+					  0x4000 * (n))
+#define HWIO_EE_n_GSI_HW_PARAM_0_PHYS(n) (GSI_REG_BASE_PHYS + 0x00012038 + \
+					  0x4000 * (n))
+#define HWIO_EE_n_GSI_HW_PARAM_0_OFFS(n) (GSI_REG_BASE_OFFS + 0x00012038 + \
+					  0x4000 * (n))
+#define HWIO_EE_n_GSI_HW_PARAM_1_ADDR(n) (GSI_REG_BASE + 0x0001203c + \
+					  0x4000 * (n))
+#define HWIO_EE_n_GSI_HW_PARAM_1_PHYS(n) (GSI_REG_BASE_PHYS + 0x0001203c + \
+					  0x4000 * (n))
+#define HWIO_EE_n_GSI_HW_PARAM_1_OFFS(n) (GSI_REG_BASE_OFFS + 0x0001203c + \
+					  0x4000 * (n))
+#define HWIO_EE_n_GSI_HW_PARAM_2_ADDR(n) (GSI_REG_BASE + 0x00012040 + \
+					  0x4000 * (n))
+#define HWIO_EE_n_GSI_HW_PARAM_2_PHYS(n) (GSI_REG_BASE_PHYS + 0x00012040 + \
+					  0x4000 * (n))
+#define HWIO_EE_n_GSI_HW_PARAM_2_OFFS(n) (GSI_REG_BASE_OFFS + 0x00012040 + \
+					  0x4000 * (n))
+#define HWIO_EE_n_GSI_SW_VERSION_ADDR(n) (GSI_REG_BASE + 0x00012044 + \
+					  0x4000 * (n))
+#define HWIO_EE_n_GSI_SW_VERSION_PHYS(n) (GSI_REG_BASE_PHYS + 0x00012044 + \
+					  0x4000 * (n))
+#define HWIO_EE_n_GSI_SW_VERSION_OFFS(n) (GSI_REG_BASE_OFFS + 0x00012044 + \
+					  0x4000 * (n))
+#define HWIO_EE_n_GSI_MCS_CODE_VER_ADDR(n) (GSI_REG_BASE + 0x00012048 +	\
+					    0x4000 * (n))
+#define HWIO_EE_n_GSI_MCS_CODE_VER_PHYS(n) (GSI_REG_BASE_PHYS +	\
+					    0x00012048 + 0x4000 * (n))
+#define HWIO_EE_n_GSI_MCS_CODE_VER_OFFS(n) (GSI_REG_BASE_OFFS +	\
+					    0x00012048 + 0x4000 * (n))
+#define HWIO_EE_n_GSI_HW_PARAM_3_ADDR(n) (GSI_REG_BASE + 0x0001204c + \
+					  0x4000 * (n))
+#define HWIO_EE_n_GSI_HW_PARAM_3_PHYS(n) (GSI_REG_BASE_PHYS + 0x0001204c + \
+					  0x4000 * (n))
+#define HWIO_EE_n_GSI_HW_PARAM_3_OFFS(n) (GSI_REG_BASE_OFFS + 0x0001204c + \
+					  0x4000 * (n))
+#define HWIO_EE_n_CNTXT_TYPE_IRQ_ADDR(n) (GSI_REG_BASE + 0x00012080 + \
+					  0x4000 * (n))
+#define HWIO_EE_n_CNTXT_TYPE_IRQ_PHYS(n) (GSI_REG_BASE_PHYS + 0x00012080 + \
+					  0x4000 * (n))
+#define HWIO_EE_n_CNTXT_TYPE_IRQ_OFFS(n) (GSI_REG_BASE_OFFS + 0x00012080 + \
+					  0x4000 * (n))
+#define HWIO_EE_n_CNTXT_TYPE_IRQ_RMSK 0x7f
+#define HWIO_EE_n_CNTXT_TYPE_IRQ_MAXn 2
+#define HWIO_EE_n_CNTXT_TYPE_IRQ_ATTR 0x1
+#define HWIO_EE_n_CNTXT_TYPE_IRQ_INI(n) in_dword_masked( \
+		HWIO_EE_n_CNTXT_TYPE_IRQ_ADDR(n), \
+		HWIO_EE_n_CNTXT_TYPE_IRQ_RMSK)
+#define HWIO_EE_n_CNTXT_TYPE_IRQ_INMI(n, mask) in_dword_masked(	\
+		HWIO_EE_n_CNTXT_TYPE_IRQ_ADDR(n), \
+		mask)
+#define HWIO_EE_n_CNTXT_TYPE_IRQ_GENERAL_BMSK 0x40
+#define HWIO_EE_n_CNTXT_TYPE_IRQ_GENERAL_SHFT 0x6
+#define HWIO_EE_n_CNTXT_TYPE_IRQ_INTER_EE_EV_CTRL_BMSK 0x20
+#define HWIO_EE_n_CNTXT_TYPE_IRQ_INTER_EE_EV_CTRL_SHFT 0x5
+#define HWIO_EE_n_CNTXT_TYPE_IRQ_INTER_EE_CH_CTRL_BMSK 0x10
+#define HWIO_EE_n_CNTXT_TYPE_IRQ_INTER_EE_CH_CTRL_SHFT 0x4
+#define HWIO_EE_n_CNTXT_TYPE_IRQ_IEOB_BMSK 0x8
+#define HWIO_EE_n_CNTXT_TYPE_IRQ_IEOB_SHFT 0x3
+#define HWIO_EE_n_CNTXT_TYPE_IRQ_GLOB_EE_BMSK 0x4
+#define HWIO_EE_n_CNTXT_TYPE_IRQ_GLOB_EE_SHFT 0x2
+#define HWIO_EE_n_CNTXT_TYPE_IRQ_EV_CTRL_BMSK 0x2
+#define HWIO_EE_n_CNTXT_TYPE_IRQ_EV_CTRL_SHFT 0x1
+#define HWIO_EE_n_CNTXT_TYPE_IRQ_CH_CTRL_BMSK 0x1
+#define HWIO_EE_n_CNTXT_TYPE_IRQ_CH_CTRL_SHFT 0x0
+#define HWIO_EE_n_CNTXT_TYPE_IRQ_MSK_ADDR(n) (GSI_REG_BASE + 0x00012088 + \
+					      0x4000 * (n))
+#define HWIO_EE_n_CNTXT_TYPE_IRQ_MSK_PHYS(n) (GSI_REG_BASE_PHYS + \
+					      0x00012088 + 0x4000 * (n))
+#define HWIO_EE_n_CNTXT_TYPE_IRQ_MSK_OFFS(n) (GSI_REG_BASE_OFFS + \
+					      0x00012088 + 0x4000 * (n))
+#define HWIO_EE_n_CNTXT_TYPE_IRQ_MSK_RMSK 0x7f
+#define HWIO_EE_n_CNTXT_TYPE_IRQ_MSK_MAXn 2
+#define HWIO_EE_n_CNTXT_TYPE_IRQ_MSK_ATTR 0x3
+#define HWIO_EE_n_CNTXT_TYPE_IRQ_MSK_INI(n) in_dword_masked( \
+		HWIO_EE_n_CNTXT_TYPE_IRQ_MSK_ADDR(n), \
+		HWIO_EE_n_CNTXT_TYPE_IRQ_MSK_RMSK)
+#define HWIO_EE_n_CNTXT_TYPE_IRQ_MSK_INMI(n, mask) in_dword_masked( \
+		HWIO_EE_n_CNTXT_TYPE_IRQ_MSK_ADDR(n), \
+		mask)
+#define HWIO_EE_n_CNTXT_TYPE_IRQ_MSK_OUTI(n, val) out_dword( \
+		HWIO_EE_n_CNTXT_TYPE_IRQ_MSK_ADDR(n), \
+		val)
+#define HWIO_EE_n_CNTXT_TYPE_IRQ_MSK_OUTMI(n, mask, \
+					   val) out_dword_masked_ns( \
+		HWIO_EE_n_CNTXT_TYPE_IRQ_MSK_ADDR( \
+			n), \
+		mask, \
+		val, \
+		HWIO_EE_n_CNTXT_TYPE_IRQ_MSK_INI(n))
+#define HWIO_EE_n_CNTXT_TYPE_IRQ_MSK_GENERAL_BMSK 0x40
+#define HWIO_EE_n_CNTXT_TYPE_IRQ_MSK_GENERAL_SHFT 0x6
+#define HWIO_EE_n_CNTXT_TYPE_IRQ_MSK_INTER_EE_EV_CTRL_BMSK 0x20
+#define HWIO_EE_n_CNTXT_TYPE_IRQ_MSK_INTER_EE_EV_CTRL_SHFT 0x5
+#define HWIO_EE_n_CNTXT_TYPE_IRQ_MSK_INTER_EE_CH_CTRL_BMSK 0x10
+#define HWIO_EE_n_CNTXT_TYPE_IRQ_MSK_INTER_EE_CH_CTRL_SHFT 0x4
+#define HWIO_EE_n_CNTXT_TYPE_IRQ_MSK_IEOB_BMSK 0x8
+#define HWIO_EE_n_CNTXT_TYPE_IRQ_MSK_IEOB_SHFT 0x3
+#define HWIO_EE_n_CNTXT_TYPE_IRQ_MSK_GLOB_EE_BMSK 0x4
+#define HWIO_EE_n_CNTXT_TYPE_IRQ_MSK_GLOB_EE_SHFT 0x2
+#define HWIO_EE_n_CNTXT_TYPE_IRQ_MSK_EV_CTRL_BMSK 0x2
+#define HWIO_EE_n_CNTXT_TYPE_IRQ_MSK_EV_CTRL_SHFT 0x1
+#define HWIO_EE_n_CNTXT_TYPE_IRQ_MSK_CH_CTRL_BMSK 0x1
+#define HWIO_EE_n_CNTXT_TYPE_IRQ_MSK_CH_CTRL_SHFT 0x0
+#define HWIO_EE_n_CNTXT_SRC_GSI_CH_IRQ_ADDR(n) (GSI_REG_BASE + \
+						0x00012090 + 0x4000 * (n))
+#define HWIO_EE_n_CNTXT_SRC_GSI_CH_IRQ_PHYS(n) (GSI_REG_BASE_PHYS + \
+						0x00012090 + 0x4000 * (n))
+#define HWIO_EE_n_CNTXT_SRC_GSI_CH_IRQ_OFFS(n) (GSI_REG_BASE_OFFS + \
+						0x00012090 + 0x4000 * (n))
+#define HWIO_EE_n_CNTXT_SRC_GSI_CH_IRQ_RMSK 0xffffffff
+#define HWIO_EE_n_CNTXT_SRC_GSI_CH_IRQ_MAXn 2
+#define HWIO_EE_n_CNTXT_SRC_GSI_CH_IRQ_ATTR 0x1
+#define HWIO_EE_n_CNTXT_SRC_GSI_CH_IRQ_INI(n) in_dword_masked( \
+		HWIO_EE_n_CNTXT_SRC_GSI_CH_IRQ_ADDR(n),	\
+		HWIO_EE_n_CNTXT_SRC_GSI_CH_IRQ_RMSK)
+#define HWIO_EE_n_CNTXT_SRC_GSI_CH_IRQ_INMI(n, mask) in_dword_masked( \
+		HWIO_EE_n_CNTXT_SRC_GSI_CH_IRQ_ADDR(n),	\
+		mask)
+#define HWIO_EE_n_CNTXT_SRC_GSI_CH_IRQ_GSI_CH_BIT_MAP_BMSK 0xffffffff
+#define HWIO_EE_n_CNTXT_SRC_GSI_CH_IRQ_GSI_CH_BIT_MAP_SHFT 0x0
+#define HWIO_EE_n_CNTXT_SRC_EV_CH_IRQ_ADDR(n) (GSI_REG_BASE + 0x00012094 + \
+					       0x4000 * (n))
+#define HWIO_EE_n_CNTXT_SRC_EV_CH_IRQ_PHYS(n) (GSI_REG_BASE_PHYS + \
+					       0x00012094 + 0x4000 * (n))
+#define HWIO_EE_n_CNTXT_SRC_EV_CH_IRQ_OFFS(n) (GSI_REG_BASE_OFFS + \
+					       0x00012094 + 0x4000 * (n))
+#define HWIO_EE_n_CNTXT_SRC_EV_CH_IRQ_RMSK 0xffffffff
+#define HWIO_EE_n_CNTXT_SRC_EV_CH_IRQ_MAXn 2
+#define HWIO_EE_n_CNTXT_SRC_EV_CH_IRQ_ATTR 0x1
+#define HWIO_EE_n_CNTXT_SRC_EV_CH_IRQ_INI(n) in_dword_masked( \
+		HWIO_EE_n_CNTXT_SRC_EV_CH_IRQ_ADDR(n), \
+		HWIO_EE_n_CNTXT_SRC_EV_CH_IRQ_RMSK)
+#define HWIO_EE_n_CNTXT_SRC_EV_CH_IRQ_INMI(n, mask) in_dword_masked( \
+		HWIO_EE_n_CNTXT_SRC_EV_CH_IRQ_ADDR(n), \
+		mask)
+#define HWIO_EE_n_CNTXT_SRC_EV_CH_IRQ_EV_CH_BIT_MAP_BMSK 0xffffffff
+#define HWIO_EE_n_CNTXT_SRC_EV_CH_IRQ_EV_CH_BIT_MAP_SHFT 0x0
+#define HWIO_EE_n_CNTXT_SRC_GSI_CH_IRQ_MSK_ADDR(n) (GSI_REG_BASE + \
+						    0x00012098 + 0x4000 * \
+						    (n))
+#define HWIO_EE_n_CNTXT_SRC_GSI_CH_IRQ_MSK_PHYS(n) (GSI_REG_BASE_PHYS +	\
+						    0x00012098 + 0x4000 * \
+						    (n))
+#define HWIO_EE_n_CNTXT_SRC_GSI_CH_IRQ_MSK_OFFS(n) (GSI_REG_BASE_OFFS +	\
+						    0x00012098 + 0x4000 * \
+						    (n))
+#define HWIO_EE_n_CNTXT_SRC_GSI_CH_IRQ_MSK_RMSK 0x7fffff
+#define HWIO_EE_n_CNTXT_SRC_GSI_CH_IRQ_MSK_MAXn 2
+#define HWIO_EE_n_CNTXT_SRC_GSI_CH_IRQ_MSK_ATTR 0x3
+#define HWIO_EE_n_CNTXT_SRC_GSI_CH_IRQ_MSK_INI(n) in_dword_masked( \
+		HWIO_EE_n_CNTXT_SRC_GSI_CH_IRQ_MSK_ADDR(n), \
+		HWIO_EE_n_CNTXT_SRC_GSI_CH_IRQ_MSK_RMSK)
+#define HWIO_EE_n_CNTXT_SRC_GSI_CH_IRQ_MSK_INMI(n, mask) in_dword_masked( \
+		HWIO_EE_n_CNTXT_SRC_GSI_CH_IRQ_MSK_ADDR(n), \
+		mask)
+#define HWIO_EE_n_CNTXT_SRC_GSI_CH_IRQ_MSK_OUTI(n, val) out_dword( \
+		HWIO_EE_n_CNTXT_SRC_GSI_CH_IRQ_MSK_ADDR(n), \
+		val)
+#define HWIO_EE_n_CNTXT_SRC_GSI_CH_IRQ_MSK_OUTMI(n, mask, \
+						 val) out_dword_masked_ns( \
+		HWIO_EE_n_CNTXT_SRC_GSI_CH_IRQ_MSK_ADDR( \
+			n), \
+		mask, \
+		val, \
+		HWIO_EE_n_CNTXT_SRC_GSI_CH_IRQ_MSK_INI(n))
+#define HWIO_EE_n_CNTXT_SRC_GSI_CH_IRQ_MSK_GSI_CH_BIT_MAP_MSK_BMSK \
+	0x7fffff
+#define HWIO_EE_n_CNTXT_SRC_GSI_CH_IRQ_MSK_GSI_CH_BIT_MAP_MSK_SHFT 0x0
+#define HWIO_EE_n_CNTXT_SRC_EV_CH_IRQ_MSK_ADDR(n) (GSI_REG_BASE + \
+						   0x0001209c + 0x4000 * \
+						   (n))
+#define HWIO_EE_n_CNTXT_SRC_EV_CH_IRQ_MSK_PHYS(n) (GSI_REG_BASE_PHYS + \
+						   0x0001209c + 0x4000 * \
+						   (n))
+#define HWIO_EE_n_CNTXT_SRC_EV_CH_IRQ_MSK_OFFS(n) (GSI_REG_BASE_OFFS + \
+						   0x0001209c + 0x4000 * \
+						   (n))
+#define HWIO_EE_n_CNTXT_SRC_EV_CH_IRQ_MSK_RMSK 0xfffff
+#define HWIO_EE_n_CNTXT_SRC_EV_CH_IRQ_MSK_MAXn 2
+#define HWIO_EE_n_CNTXT_SRC_EV_CH_IRQ_MSK_ATTR 0x3
+#define HWIO_EE_n_CNTXT_SRC_EV_CH_IRQ_MSK_INI(n) in_dword_masked( \
+		HWIO_EE_n_CNTXT_SRC_EV_CH_IRQ_MSK_ADDR(n), \
+		HWIO_EE_n_CNTXT_SRC_EV_CH_IRQ_MSK_RMSK)
+#define HWIO_EE_n_CNTXT_SRC_EV_CH_IRQ_MSK_INMI(n, mask) in_dword_masked( \
+		HWIO_EE_n_CNTXT_SRC_EV_CH_IRQ_MSK_ADDR(n), \
+		mask)
+#define HWIO_EE_n_CNTXT_SRC_EV_CH_IRQ_MSK_OUTI(n, val) out_dword( \
+		HWIO_EE_n_CNTXT_SRC_EV_CH_IRQ_MSK_ADDR(n), \
+		val)
+#define HWIO_EE_n_CNTXT_SRC_EV_CH_IRQ_MSK_OUTMI(n, mask, \
+						val) out_dword_masked_ns( \
+		HWIO_EE_n_CNTXT_SRC_EV_CH_IRQ_MSK_ADDR(	\
+			n), \
+		mask, \
+		val, \
+		HWIO_EE_n_CNTXT_SRC_EV_CH_IRQ_MSK_INI(n))
+#define HWIO_EE_n_CNTXT_SRC_EV_CH_IRQ_MSK_EV_CH_BIT_MAP_MSK_BMSK 0xfffff
+#define HWIO_EE_n_CNTXT_SRC_EV_CH_IRQ_MSK_EV_CH_BIT_MAP_MSK_SHFT 0x0
+#define HWIO_EE_n_CNTXT_SRC_GSI_CH_IRQ_CLR_ADDR(n) (GSI_REG_BASE + \
+						    0x000120a0 + 0x4000 * \
+						    (n))
+#define HWIO_EE_n_CNTXT_SRC_GSI_CH_IRQ_CLR_PHYS(n) (GSI_REG_BASE_PHYS +	\
+						    0x000120a0 + 0x4000 * \
+						    (n))
+#define HWIO_EE_n_CNTXT_SRC_GSI_CH_IRQ_CLR_OFFS(n) (GSI_REG_BASE_OFFS +	\
+						    0x000120a0 + 0x4000 * \
+						    (n))
+#define HWIO_EE_n_CNTXT_SRC_GSI_CH_IRQ_CLR_RMSK 0xffffffff
+#define HWIO_EE_n_CNTXT_SRC_GSI_CH_IRQ_CLR_MAXn 2
+#define HWIO_EE_n_CNTXT_SRC_GSI_CH_IRQ_CLR_ATTR 0x2
+#define HWIO_EE_n_CNTXT_SRC_GSI_CH_IRQ_CLR_OUTI(n, val) out_dword( \
+		HWIO_EE_n_CNTXT_SRC_GSI_CH_IRQ_CLR_ADDR(n), \
+		val)
+#define HWIO_EE_n_CNTXT_SRC_GSI_CH_IRQ_CLR_GSI_CH_BIT_MAP_BMSK 0xffffffff
+#define HWIO_EE_n_CNTXT_SRC_GSI_CH_IRQ_CLR_GSI_CH_BIT_MAP_SHFT 0x0
+#define HWIO_EE_n_CNTXT_SRC_EV_CH_IRQ_CLR_ADDR(n) (GSI_REG_BASE + \
+						   0x000120a4 + 0x4000 * \
+						   (n))
+#define HWIO_EE_n_CNTXT_SRC_EV_CH_IRQ_CLR_PHYS(n) (GSI_REG_BASE_PHYS + \
+						   0x000120a4 + 0x4000 * \
+						   (n))
+#define HWIO_EE_n_CNTXT_SRC_EV_CH_IRQ_CLR_OFFS(n) (GSI_REG_BASE_OFFS + \
+						   0x000120a4 + 0x4000 * \
+						   (n))
+#define HWIO_EE_n_CNTXT_SRC_EV_CH_IRQ_CLR_RMSK 0xffffffff
+#define HWIO_EE_n_CNTXT_SRC_EV_CH_IRQ_CLR_MAXn 2
+#define HWIO_EE_n_CNTXT_SRC_EV_CH_IRQ_CLR_ATTR 0x2
+#define HWIO_EE_n_CNTXT_SRC_EV_CH_IRQ_CLR_OUTI(n, val) out_dword( \
+		HWIO_EE_n_CNTXT_SRC_EV_CH_IRQ_CLR_ADDR(n), \
+		val)
+#define HWIO_EE_n_CNTXT_SRC_EV_CH_IRQ_CLR_EV_CH_BIT_MAP_BMSK 0xffffffff
+#define HWIO_EE_n_CNTXT_SRC_EV_CH_IRQ_CLR_EV_CH_BIT_MAP_SHFT 0x0
+#define HWIO_EE_n_CNTXT_SRC_IEOB_IRQ_ADDR(n) (GSI_REG_BASE + 0x000120b0 + \
+					      0x4000 * (n))
+#define HWIO_EE_n_CNTXT_SRC_IEOB_IRQ_PHYS(n) (GSI_REG_BASE_PHYS + \
+					      0x000120b0 + 0x4000 * (n))
+#define HWIO_EE_n_CNTXT_SRC_IEOB_IRQ_OFFS(n) (GSI_REG_BASE_OFFS + \
+					      0x000120b0 + 0x4000 * (n))
+#define HWIO_EE_n_CNTXT_SRC_IEOB_IRQ_RMSK 0xffffffff
+#define HWIO_EE_n_CNTXT_SRC_IEOB_IRQ_MAXn 2
+#define HWIO_EE_n_CNTXT_SRC_IEOB_IRQ_ATTR 0x1
+#define HWIO_EE_n_CNTXT_SRC_IEOB_IRQ_INI(n) in_dword_masked( \
+		HWIO_EE_n_CNTXT_SRC_IEOB_IRQ_ADDR(n), \
+		HWIO_EE_n_CNTXT_SRC_IEOB_IRQ_RMSK)
+#define HWIO_EE_n_CNTXT_SRC_IEOB_IRQ_INMI(n, mask) in_dword_masked( \
+		HWIO_EE_n_CNTXT_SRC_IEOB_IRQ_ADDR(n), \
+		mask)
+#define HWIO_EE_n_CNTXT_SRC_IEOB_IRQ_EV_CH_BIT_MAP_BMSK 0xffffffff
+#define HWIO_EE_n_CNTXT_SRC_IEOB_IRQ_EV_CH_BIT_MAP_SHFT 0x0
+#define HWIO_EE_n_CNTXT_SRC_IEOB_IRQ_MSK_ADDR(n) (GSI_REG_BASE + \
+						  0x000120b8 + 0x4000 * (n))
+#define HWIO_EE_n_CNTXT_SRC_IEOB_IRQ_MSK_PHYS(n) (GSI_REG_BASE_PHYS + \
+						  0x000120b8 + 0x4000 * (n))
+#define HWIO_EE_n_CNTXT_SRC_IEOB_IRQ_MSK_OFFS(n) (GSI_REG_BASE_OFFS + \
+						  0x000120b8 + 0x4000 * (n))
+#define HWIO_EE_n_CNTXT_SRC_IEOB_IRQ_MSK_RMSK 0xfffff
+#define HWIO_EE_n_CNTXT_SRC_IEOB_IRQ_MSK_MAXn 2
+#define HWIO_EE_n_CNTXT_SRC_IEOB_IRQ_MSK_ATTR 0x3
+#define HWIO_EE_n_CNTXT_SRC_IEOB_IRQ_MSK_INI(n) in_dword_masked( \
+		HWIO_EE_n_CNTXT_SRC_IEOB_IRQ_MSK_ADDR(n), \
+		HWIO_EE_n_CNTXT_SRC_IEOB_IRQ_MSK_RMSK)
+#define HWIO_EE_n_CNTXT_SRC_IEOB_IRQ_MSK_INMI(n, mask) in_dword_masked(	\
+		HWIO_EE_n_CNTXT_SRC_IEOB_IRQ_MSK_ADDR(n), \
+		mask)
+#define HWIO_EE_n_CNTXT_SRC_IEOB_IRQ_MSK_OUTI(n, val) out_dword( \
+		HWIO_EE_n_CNTXT_SRC_IEOB_IRQ_MSK_ADDR(n), \
+		val)
+#define HWIO_EE_n_CNTXT_SRC_IEOB_IRQ_MSK_OUTMI(n, mask,	\
+					       val) out_dword_masked_ns( \
+		HWIO_EE_n_CNTXT_SRC_IEOB_IRQ_MSK_ADDR( \
+			n), \
+		mask, \
+		val, \
+		HWIO_EE_n_CNTXT_SRC_IEOB_IRQ_MSK_INI(n))
+#define HWIO_EE_n_CNTXT_SRC_IEOB_IRQ_MSK_EV_CH_BIT_MAP_MSK_BMSK 0xfffff
+#define HWIO_EE_n_CNTXT_SRC_IEOB_IRQ_MSK_EV_CH_BIT_MAP_MSK_SHFT 0x0
+#define HWIO_EE_n_CNTXT_SRC_IEOB_IRQ_CLR_ADDR(n) (GSI_REG_BASE + \
+						  0x000120c0 + 0x4000 * (n))
+#define HWIO_EE_n_CNTXT_SRC_IEOB_IRQ_CLR_PHYS(n) (GSI_REG_BASE_PHYS + \
+						  0x000120c0 + 0x4000 * (n))
+#define HWIO_EE_n_CNTXT_SRC_IEOB_IRQ_CLR_OFFS(n) (GSI_REG_BASE_OFFS + \
+						  0x000120c0 + 0x4000 * (n))
+#define HWIO_EE_n_CNTXT_SRC_IEOB_IRQ_CLR_RMSK 0xffffffff
+#define HWIO_EE_n_CNTXT_SRC_IEOB_IRQ_CLR_MAXn 2
+#define HWIO_EE_n_CNTXT_SRC_IEOB_IRQ_CLR_ATTR 0x2
+#define HWIO_EE_n_CNTXT_SRC_IEOB_IRQ_CLR_OUTI(n, val) out_dword( \
+		HWIO_EE_n_CNTXT_SRC_IEOB_IRQ_CLR_ADDR(n), \
+		val)
+#define HWIO_EE_n_CNTXT_SRC_IEOB_IRQ_CLR_EV_CH_BIT_MAP_BMSK 0xffffffff
+#define HWIO_EE_n_CNTXT_SRC_IEOB_IRQ_CLR_EV_CH_BIT_MAP_SHFT 0x0
+#define HWIO_EE_n_CNTXT_GLOB_IRQ_STTS_ADDR(n) (GSI_REG_BASE + 0x00012100 + \
+					       0x4000 * (n))
+#define HWIO_EE_n_CNTXT_GLOB_IRQ_STTS_PHYS(n) (GSI_REG_BASE_PHYS + \
+					       0x00012100 + 0x4000 * (n))
+#define HWIO_EE_n_CNTXT_GLOB_IRQ_STTS_OFFS(n) (GSI_REG_BASE_OFFS + \
+					       0x00012100 + 0x4000 * (n))
+#define HWIO_EE_n_CNTXT_GLOB_IRQ_STTS_RMSK 0xf
+#define HWIO_EE_n_CNTXT_GLOB_IRQ_STTS_MAXn 2
+#define HWIO_EE_n_CNTXT_GLOB_IRQ_STTS_ATTR 0x1
+#define HWIO_EE_n_CNTXT_GLOB_IRQ_STTS_INI(n) in_dword_masked( \
+		HWIO_EE_n_CNTXT_GLOB_IRQ_STTS_ADDR(n), \
+		HWIO_EE_n_CNTXT_GLOB_IRQ_STTS_RMSK)
+#define HWIO_EE_n_CNTXT_GLOB_IRQ_STTS_INMI(n, mask) in_dword_masked( \
+		HWIO_EE_n_CNTXT_GLOB_IRQ_STTS_ADDR(n), \
+		mask)
+#define HWIO_EE_n_CNTXT_GLOB_IRQ_STTS_GP_INT3_BMSK 0x8
+#define HWIO_EE_n_CNTXT_GLOB_IRQ_STTS_GP_INT3_SHFT 0x3
+#define HWIO_EE_n_CNTXT_GLOB_IRQ_STTS_GP_INT2_BMSK 0x4
+#define HWIO_EE_n_CNTXT_GLOB_IRQ_STTS_GP_INT2_SHFT 0x2
+#define HWIO_EE_n_CNTXT_GLOB_IRQ_STTS_GP_INT1_BMSK 0x2
+#define HWIO_EE_n_CNTXT_GLOB_IRQ_STTS_GP_INT1_SHFT 0x1
+#define HWIO_EE_n_CNTXT_GLOB_IRQ_STTS_ERROR_INT_BMSK 0x1
+#define HWIO_EE_n_CNTXT_GLOB_IRQ_STTS_ERROR_INT_SHFT 0x0
+#define HWIO_EE_n_CNTXT_GLOB_IRQ_EN_ADDR(n) (GSI_REG_BASE + 0x00012108 + \
+					     0x4000 * (n))
+#define HWIO_EE_n_CNTXT_GLOB_IRQ_EN_PHYS(n) (GSI_REG_BASE_PHYS + \
+					     0x00012108 + 0x4000 * (n))
+#define HWIO_EE_n_CNTXT_GLOB_IRQ_EN_OFFS(n) (GSI_REG_BASE_OFFS + \
+					     0x00012108 + 0x4000 * (n))
+#define HWIO_EE_n_CNTXT_GLOB_IRQ_CLR_ADDR(n) (GSI_REG_BASE + 0x00012110 + \
+					      0x4000 * (n))
+#define HWIO_EE_n_CNTXT_GLOB_IRQ_CLR_PHYS(n) (GSI_REG_BASE_PHYS + \
+					      0x00012110 + 0x4000 * (n))
+#define HWIO_EE_n_CNTXT_GLOB_IRQ_CLR_OFFS(n) (GSI_REG_BASE_OFFS + \
+					      0x00012110 + 0x4000 * (n))
+#define HWIO_EE_n_CNTXT_GSI_IRQ_STTS_ADDR(n) (GSI_REG_BASE + 0x00012118 + \
+					      0x4000 * (n))
+#define HWIO_EE_n_CNTXT_GSI_IRQ_STTS_PHYS(n) (GSI_REG_BASE_PHYS + \
+					      0x00012118 + 0x4000 * (n))
+#define HWIO_EE_n_CNTXT_GSI_IRQ_STTS_OFFS(n) (GSI_REG_BASE_OFFS + \
+					      0x00012118 + 0x4000 * (n))
+#define HWIO_EE_n_CNTXT_GSI_IRQ_STTS_RMSK 0xf
+#define HWIO_EE_n_CNTXT_GSI_IRQ_STTS_MAXn 2
+#define HWIO_EE_n_CNTXT_GSI_IRQ_STTS_ATTR 0x1
+#define HWIO_EE_n_CNTXT_GSI_IRQ_STTS_INI(n) in_dword_masked( \
+		HWIO_EE_n_CNTXT_GSI_IRQ_STTS_ADDR(n), \
+		HWIO_EE_n_CNTXT_GSI_IRQ_STTS_RMSK)
+#define HWIO_EE_n_CNTXT_GSI_IRQ_STTS_INMI(n, mask) in_dword_masked( \
+		HWIO_EE_n_CNTXT_GSI_IRQ_STTS_ADDR(n), \
+		mask)
+#define HWIO_EE_n_CNTXT_GSI_IRQ_STTS_GSI_MCS_STACK_OVRFLOW_BMSK 0x8
+#define HWIO_EE_n_CNTXT_GSI_IRQ_STTS_GSI_MCS_STACK_OVRFLOW_SHFT 0x3
+#define HWIO_EE_n_CNTXT_GSI_IRQ_STTS_GSI_CMD_FIFO_OVRFLOW_BMSK 0x4
+#define HWIO_EE_n_CNTXT_GSI_IRQ_STTS_GSI_CMD_FIFO_OVRFLOW_SHFT 0x2
+#define HWIO_EE_n_CNTXT_GSI_IRQ_STTS_GSI_BUS_ERROR_BMSK 0x2
+#define HWIO_EE_n_CNTXT_GSI_IRQ_STTS_GSI_BUS_ERROR_SHFT 0x1
+#define HWIO_EE_n_CNTXT_GSI_IRQ_STTS_GSI_BREAK_POINT_BMSK 0x1
+#define HWIO_EE_n_CNTXT_GSI_IRQ_STTS_GSI_BREAK_POINT_SHFT 0x0
+#define HWIO_EE_n_CNTXT_GSI_IRQ_EN_ADDR(n) (GSI_REG_BASE + 0x00012120 +	\
+					    0x4000 * (n))
+#define HWIO_EE_n_CNTXT_GSI_IRQ_EN_PHYS(n) (GSI_REG_BASE_PHYS +	\
+					    0x00012120 + 0x4000 * (n))
+#define HWIO_EE_n_CNTXT_GSI_IRQ_EN_OFFS(n) (GSI_REG_BASE_OFFS +	\
+					    0x00012120 + 0x4000 * (n))
+#define HWIO_EE_n_CNTXT_GSI_IRQ_CLR_ADDR(n) (GSI_REG_BASE + 0x00012128 + \
+					     0x4000 * (n))
+#define HWIO_EE_n_CNTXT_GSI_IRQ_CLR_PHYS(n) (GSI_REG_BASE_PHYS + \
+					     0x00012128 + 0x4000 * (n))
+#define HWIO_EE_n_CNTXT_GSI_IRQ_CLR_OFFS(n) (GSI_REG_BASE_OFFS + \
+					     0x00012128 + 0x4000 * (n))
+#define HWIO_EE_n_CNTXT_INTSET_ADDR(n) (GSI_REG_BASE + 0x00012180 + \
+					0x4000 * (n))
+#define HWIO_EE_n_CNTXT_INTSET_PHYS(n) (GSI_REG_BASE_PHYS + 0x00012180 + \
+					0x4000 * (n))
+#define HWIO_EE_n_CNTXT_INTSET_OFFS(n) (GSI_REG_BASE_OFFS + 0x00012180 + \
+					0x4000 * (n))
+#define HWIO_EE_n_CNTXT_INTSET_RMSK 0x1
+#define HWIO_EE_n_CNTXT_INTSET_MAXn 2
+#define HWIO_EE_n_CNTXT_INTSET_ATTR 0x3
+#define HWIO_EE_n_CNTXT_INTSET_INI(n) in_dword_masked( \
+		HWIO_EE_n_CNTXT_INTSET_ADDR(n),	\
+		HWIO_EE_n_CNTXT_INTSET_RMSK)
+#define HWIO_EE_n_CNTXT_INTSET_INMI(n, mask) in_dword_masked( \
+		HWIO_EE_n_CNTXT_INTSET_ADDR(n),	\
+		mask)
+#define HWIO_EE_n_CNTXT_INTSET_OUTI(n, val) out_dword( \
+		HWIO_EE_n_CNTXT_INTSET_ADDR(n),	\
+		val)
+#define HWIO_EE_n_CNTXT_INTSET_OUTMI(n, mask, val) out_dword_masked_ns(	\
+		HWIO_EE_n_CNTXT_INTSET_ADDR(n),	\
+		mask, \
+		val, \
+		HWIO_EE_n_CNTXT_INTSET_INI(n))
+#define HWIO_EE_n_CNTXT_INTSET_INTYPE_BMSK 0x1
+#define HWIO_EE_n_CNTXT_INTSET_INTYPE_SHFT 0x0
+#define HWIO_EE_n_CNTXT_INTSET_INTYPE_MSI_FVAL 0x0
+#define HWIO_EE_n_CNTXT_INTSET_INTYPE_IRQ_FVAL 0x1
+#define HWIO_EE_n_CNTXT_MSI_BASE_LSB_ADDR(n) (GSI_REG_BASE + 0x00012188 + \
+					      0x4000 * (n))
+#define HWIO_EE_n_CNTXT_MSI_BASE_LSB_PHYS(n) (GSI_REG_BASE_PHYS + \
+					      0x00012188 + 0x4000 * (n))
+#define HWIO_EE_n_CNTXT_MSI_BASE_LSB_OFFS(n) (GSI_REG_BASE_OFFS + \
+					      0x00012188 + 0x4000 * (n))
+#define HWIO_EE_n_CNTXT_MSI_BASE_LSB_RMSK 0xffffffff
+#define HWIO_EE_n_CNTXT_MSI_BASE_LSB_MAXn 2
+#define HWIO_EE_n_CNTXT_MSI_BASE_LSB_ATTR 0x3
+#define HWIO_EE_n_CNTXT_MSI_BASE_LSB_INI(n) in_dword_masked( \
+		HWIO_EE_n_CNTXT_MSI_BASE_LSB_ADDR(n), \
+		HWIO_EE_n_CNTXT_MSI_BASE_LSB_RMSK)
+#define HWIO_EE_n_CNTXT_MSI_BASE_LSB_INMI(n, mask) in_dword_masked( \
+		HWIO_EE_n_CNTXT_MSI_BASE_LSB_ADDR(n), \
+		mask)
+#define HWIO_EE_n_CNTXT_MSI_BASE_LSB_OUTI(n, val) out_dword( \
+		HWIO_EE_n_CNTXT_MSI_BASE_LSB_ADDR(n), \
+		val)
+#define HWIO_EE_n_CNTXT_MSI_BASE_LSB_OUTMI(n, mask, \
+					   val) out_dword_masked_ns( \
+		HWIO_EE_n_CNTXT_MSI_BASE_LSB_ADDR( \
+			n), \
+		mask, \
+		val, \
+		HWIO_EE_n_CNTXT_MSI_BASE_LSB_INI(n))
+#define HWIO_EE_n_CNTXT_MSI_BASE_LSB_MSI_ADDR_LSB_BMSK 0xffffffff
+#define HWIO_EE_n_CNTXT_MSI_BASE_LSB_MSI_ADDR_LSB_SHFT 0x0
+#define HWIO_EE_n_CNTXT_MSI_BASE_MSB_ADDR(n) (GSI_REG_BASE + 0x0001218c + \
+					      0x4000 * (n))
+#define HWIO_EE_n_CNTXT_MSI_BASE_MSB_PHYS(n) (GSI_REG_BASE_PHYS + \
+					      0x0001218c + 0x4000 * (n))
+#define HWIO_EE_n_CNTXT_MSI_BASE_MSB_OFFS(n) (GSI_REG_BASE_OFFS + \
+					      0x0001218c + 0x4000 * (n))
+#define HWIO_EE_n_CNTXT_MSI_BASE_MSB_RMSK 0xffffffff
+#define HWIO_EE_n_CNTXT_MSI_BASE_MSB_MAXn 2
+#define HWIO_EE_n_CNTXT_MSI_BASE_MSB_ATTR 0x3
+#define HWIO_EE_n_CNTXT_MSI_BASE_MSB_INI(n) in_dword_masked( \
+		HWIO_EE_n_CNTXT_MSI_BASE_MSB_ADDR(n), \
+		HWIO_EE_n_CNTXT_MSI_BASE_MSB_RMSK)
+#define HWIO_EE_n_CNTXT_MSI_BASE_MSB_INMI(n, mask) in_dword_masked( \
+		HWIO_EE_n_CNTXT_MSI_BASE_MSB_ADDR(n), \
+		mask)
+#define HWIO_EE_n_CNTXT_MSI_BASE_MSB_OUTI(n, val) out_dword( \
+		HWIO_EE_n_CNTXT_MSI_BASE_MSB_ADDR(n), \
+		val)
+#define HWIO_EE_n_CNTXT_MSI_BASE_MSB_OUTMI(n, mask, \
+					   val) out_dword_masked_ns( \
+		HWIO_EE_n_CNTXT_MSI_BASE_MSB_ADDR( \
+			n), \
+		mask, \
+		val, \
+		HWIO_EE_n_CNTXT_MSI_BASE_MSB_INI(n))
+#define HWIO_EE_n_CNTXT_MSI_BASE_MSB_MSI_ADDR_MSB_BMSK 0xffffffff
+#define HWIO_EE_n_CNTXT_MSI_BASE_MSB_MSI_ADDR_MSB_SHFT 0x0
+#define HWIO_EE_n_CNTXT_INT_VEC_ADDR(n) (GSI_REG_BASE + 0x00012190 + \
+					 0x4000 * (n))
+#define HWIO_EE_n_CNTXT_INT_VEC_PHYS(n) (GSI_REG_BASE_PHYS + 0x00012190 + \
+					 0x4000 * (n))
+#define HWIO_EE_n_CNTXT_INT_VEC_OFFS(n) (GSI_REG_BASE_OFFS + 0x00012190 + \
+					 0x4000 * (n))
+#define HWIO_EE_n_ERROR_LOG_ADDR(n) (GSI_REG_BASE + 0x00012200 + 0x4000 * \
+				     (n))
+#define HWIO_EE_n_ERROR_LOG_PHYS(n) (GSI_REG_BASE_PHYS + 0x00012200 + \
+				     0x4000 * (n))
+#define HWIO_EE_n_ERROR_LOG_OFFS(n) (GSI_REG_BASE_OFFS + 0x00012200 + \
+				     0x4000 * (n))
+#define HWIO_EE_n_ERROR_LOG_RMSK 0xffffffff
+#define HWIO_EE_n_ERROR_LOG_MAXn 2
+#define HWIO_EE_n_ERROR_LOG_ATTR 0x3
+#define HWIO_EE_n_ERROR_LOG_INI(n) in_dword_masked( \
+		HWIO_EE_n_ERROR_LOG_ADDR(n), \
+		HWIO_EE_n_ERROR_LOG_RMSK)
+#define HWIO_EE_n_ERROR_LOG_INMI(n, mask) in_dword_masked( \
+		HWIO_EE_n_ERROR_LOG_ADDR(n), \
+		mask)
+#define HWIO_EE_n_ERROR_LOG_OUTI(n, val) out_dword( \
+		HWIO_EE_n_ERROR_LOG_ADDR(n), \
+		val)
+#define HWIO_EE_n_ERROR_LOG_OUTMI(n, mask, val) out_dword_masked_ns( \
+		HWIO_EE_n_ERROR_LOG_ADDR(n), \
+		mask, \
+		val, \
+		HWIO_EE_n_ERROR_LOG_INI(n))
+#define HWIO_EE_n_ERROR_LOG_ERROR_LOG_BMSK 0xffffffff
+#define HWIO_EE_n_ERROR_LOG_ERROR_LOG_SHFT 0x0
+#define HWIO_EE_n_ERROR_LOG_CLR_ADDR(n) (GSI_REG_BASE + 0x00012210 + \
+					 0x4000 * (n))
+#define HWIO_EE_n_ERROR_LOG_CLR_PHYS(n) (GSI_REG_BASE_PHYS + 0x00012210 + \
+					 0x4000 * (n))
+#define HWIO_EE_n_ERROR_LOG_CLR_OFFS(n) (GSI_REG_BASE_OFFS + 0x00012210 + \
+					 0x4000 * (n))
+#define HWIO_EE_n_ERROR_LOG_CLR_RMSK 0xffffffff
+#define HWIO_EE_n_ERROR_LOG_CLR_MAXn 2
+#define HWIO_EE_n_ERROR_LOG_CLR_ATTR 0x2
+#define HWIO_EE_n_ERROR_LOG_CLR_OUTI(n, val) out_dword(	\
+		HWIO_EE_n_ERROR_LOG_CLR_ADDR(n), \
+		val)
+#define HWIO_EE_n_ERROR_LOG_CLR_ERROR_LOG_CLR_BMSK 0xffffffff
+#define HWIO_EE_n_ERROR_LOG_CLR_ERROR_LOG_CLR_SHFT 0x0
+#define HWIO_EE_n_CNTXT_SCRATCH_0_ADDR(n) (GSI_REG_BASE + 0x00012400 + \
+					   0x4000 * (n))
+#define HWIO_EE_n_CNTXT_SCRATCH_0_PHYS(n) (GSI_REG_BASE_PHYS + \
+					   0x00012400 + 0x4000 * (n))
+#define HWIO_EE_n_CNTXT_SCRATCH_0_OFFS(n) (GSI_REG_BASE_OFFS + \
+					   0x00012400 + 0x4000 * (n))
+#define HWIO_EE_n_CNTXT_SCRATCH_0_RMSK 0xffffffff
+#define HWIO_EE_n_CNTXT_SCRATCH_0_MAXn 2
+#define HWIO_EE_n_CNTXT_SCRATCH_0_ATTR 0x3
+#define HWIO_EE_n_CNTXT_SCRATCH_0_INI(n) in_dword_masked( \
+		HWIO_EE_n_CNTXT_SCRATCH_0_ADDR(n), \
+		HWIO_EE_n_CNTXT_SCRATCH_0_RMSK)
+#define HWIO_EE_n_CNTXT_SCRATCH_0_INMI(n, mask) in_dword_masked( \
+		HWIO_EE_n_CNTXT_SCRATCH_0_ADDR(n), \
+		mask)
+#define HWIO_EE_n_CNTXT_SCRATCH_0_OUTI(n, val) out_dword( \
+		HWIO_EE_n_CNTXT_SCRATCH_0_ADDR(n), \
+		val)
+#define HWIO_EE_n_CNTXT_SCRATCH_0_OUTMI(n, mask, val) out_dword_masked_ns( \
+		HWIO_EE_n_CNTXT_SCRATCH_0_ADDR(n), \
+		mask, \
+		val, \
+		HWIO_EE_n_CNTXT_SCRATCH_0_INI(n))
+#define HWIO_EE_n_CNTXT_SCRATCH_0_SCRATCH_BMSK 0xffffffff
+#define HWIO_EE_n_CNTXT_SCRATCH_0_SCRATCH_SHFT 0x0
+#define HWIO_EE_n_CNTXT_SCRATCH_1_ADDR(n) (GSI_REG_BASE + 0x00012404 + \
+					   0x4000 * (n))
+#define HWIO_EE_n_CNTXT_SCRATCH_1_PHYS(n) (GSI_REG_BASE_PHYS + \
+					   0x00012404 + 0x4000 * (n))
+#define HWIO_EE_n_CNTXT_SCRATCH_1_OFFS(n) (GSI_REG_BASE_OFFS + \
+					   0x00012404 + 0x4000 * (n))
+#define HWIO_EE_n_CNTXT_SCRATCH_1_RMSK 0xffffffff
+#define HWIO_EE_n_CNTXT_SCRATCH_1_MAXn 2
+#define HWIO_EE_n_CNTXT_SCRATCH_1_ATTR 0x3
+#define HWIO_EE_n_CNTXT_SCRATCH_1_INI(n) in_dword_masked( \
+		HWIO_EE_n_CNTXT_SCRATCH_1_ADDR(n), \
+		HWIO_EE_n_CNTXT_SCRATCH_1_RMSK)
+#define HWIO_EE_n_CNTXT_SCRATCH_1_INMI(n, mask) in_dword_masked( \
+		HWIO_EE_n_CNTXT_SCRATCH_1_ADDR(n), \
+		mask)
+#define HWIO_EE_n_CNTXT_SCRATCH_1_OUTI(n, val) out_dword( \
+		HWIO_EE_n_CNTXT_SCRATCH_1_ADDR(n), \
+		val)
+#define HWIO_EE_n_CNTXT_SCRATCH_1_OUTMI(n, mask, val) out_dword_masked_ns( \
+		HWIO_EE_n_CNTXT_SCRATCH_1_ADDR(n), \
+		mask, \
+		val, \
+		HWIO_EE_n_CNTXT_SCRATCH_1_INI(n))
+#define HWIO_EE_n_CNTXT_SCRATCH_1_SCRATCH_BMSK 0xffffffff
+#define HWIO_EE_n_CNTXT_SCRATCH_1_SCRATCH_SHFT 0x0
+#define HWIO_GSI_MCS_CFG_ADDR (GSI_REG_BASE + 0x0000b000)
+#define HWIO_GSI_MCS_CFG_PHYS (GSI_REG_BASE_PHYS + 0x0000b000)
+#define HWIO_GSI_MCS_CFG_OFFS (GSI_REG_BASE_OFFS + 0x0000b000)
+#define HWIO_GSI_TZ_FW_AUTH_LOCK_ADDR (GSI_REG_BASE + 0x0000b008)
+#define HWIO_GSI_TZ_FW_AUTH_LOCK_PHYS (GSI_REG_BASE_PHYS + 0x0000b008)
+#define HWIO_GSI_TZ_FW_AUTH_LOCK_OFFS (GSI_REG_BASE_OFFS + 0x0000b008)
+#define HWIO_GSI_MSA_FW_AUTH_LOCK_ADDR (GSI_REG_BASE + 0x0000b010)
+#define HWIO_GSI_MSA_FW_AUTH_LOCK_PHYS (GSI_REG_BASE_PHYS + 0x0000b010)
+#define HWIO_GSI_MSA_FW_AUTH_LOCK_OFFS (GSI_REG_BASE_OFFS + 0x0000b010)
+#define HWIO_GSI_SP_FW_AUTH_LOCK_ADDR (GSI_REG_BASE + 0x0000b018)
+#define HWIO_GSI_SP_FW_AUTH_LOCK_PHYS (GSI_REG_BASE_PHYS + 0x0000b018)
+#define HWIO_GSI_SP_FW_AUTH_LOCK_OFFS (GSI_REG_BASE_OFFS + 0x0000b018)
+#define HWIO_INTER_EE_n_ORIGINATOR_EE_ADDR(n) (GSI_REG_BASE + 0x0000c000 + \
+					       0x1000 * (n))
+#define HWIO_INTER_EE_n_ORIGINATOR_EE_PHYS(n) (GSI_REG_BASE_PHYS + \
+					       0x0000c000 + 0x1000 * (n))
+#define HWIO_INTER_EE_n_ORIGINATOR_EE_OFFS(n) (GSI_REG_BASE_OFFS + \
+					       0x0000c000 + 0x1000 * (n))
+#define HWIO_INTER_EE_n_GSI_CH_CMD_ADDR(n) (GSI_REG_BASE + 0x0000c008 +	\
+					    0x1000 * (n))
+#define HWIO_INTER_EE_n_GSI_CH_CMD_PHYS(n) (GSI_REG_BASE_PHYS +	\
+					    0x0000c008 + 0x1000 * (n))
+#define HWIO_INTER_EE_n_GSI_CH_CMD_OFFS(n) (GSI_REG_BASE_OFFS +	\
+					    0x0000c008 + 0x1000 * (n))
+#define HWIO_INTER_EE_n_EV_CH_CMD_ADDR(n) (GSI_REG_BASE + 0x0000c010 + \
+					   0x1000 * (n))
+#define HWIO_INTER_EE_n_EV_CH_CMD_PHYS(n) (GSI_REG_BASE_PHYS + \
+					   0x0000c010 + 0x1000 * (n))
+#define HWIO_INTER_EE_n_EV_CH_CMD_OFFS(n) (GSI_REG_BASE_OFFS + \
+					   0x0000c010 + 0x1000 * (n))
+#define HWIO_INTER_EE_n_SRC_GSI_CH_IRQ_ADDR(n) (GSI_REG_BASE + \
+						0x0000c018 + 0x1000 * (n))
+#define HWIO_INTER_EE_n_SRC_GSI_CH_IRQ_PHYS(n) (GSI_REG_BASE_PHYS + \
+						0x0000c018 + 0x1000 * (n))
+#define HWIO_INTER_EE_n_SRC_GSI_CH_IRQ_OFFS(n) (GSI_REG_BASE_OFFS + \
+						0x0000c018 + 0x1000 * (n))
+#define HWIO_INTER_EE_n_SRC_EV_CH_IRQ_ADDR(n) (GSI_REG_BASE + 0x0000c01c + \
+					       0x1000 * (n))
+#define HWIO_INTER_EE_n_SRC_EV_CH_IRQ_PHYS(n) (GSI_REG_BASE_PHYS + \
+					       0x0000c01c + 0x1000 * (n))
+#define HWIO_INTER_EE_n_SRC_EV_CH_IRQ_OFFS(n) (GSI_REG_BASE_OFFS + \
+					       0x0000c01c + 0x1000 * (n))
+#define HWIO_INTER_EE_n_SRC_GSI_CH_IRQ_MSK_ADDR(n) (GSI_REG_BASE + \
+						    0x0000c020 + 0x1000 * \
+						    (n))
+#define HWIO_INTER_EE_n_SRC_GSI_CH_IRQ_MSK_PHYS(n) (GSI_REG_BASE_PHYS +	\
+						    0x0000c020 + 0x1000 * \
+						    (n))
+#define HWIO_INTER_EE_n_SRC_GSI_CH_IRQ_MSK_OFFS(n) (GSI_REG_BASE_OFFS +	\
+						    0x0000c020 + 0x1000 * \
+						    (n))
+#define HWIO_INTER_EE_n_SRC_EV_CH_IRQ_MSK_ADDR(n) (GSI_REG_BASE + \
+						   0x0000c024 + 0x1000 * \
+						   (n))
+#define HWIO_INTER_EE_n_SRC_EV_CH_IRQ_MSK_PHYS(n) (GSI_REG_BASE_PHYS + \
+						   0x0000c024 + 0x1000 * \
+						   (n))
+#define HWIO_INTER_EE_n_SRC_EV_CH_IRQ_MSK_OFFS(n) (GSI_REG_BASE_OFFS + \
+						   0x0000c024 + 0x1000 * \
+						   (n))
+#define HWIO_INTER_EE_n_SRC_GSI_CH_IRQ_CLR_ADDR(n) (GSI_REG_BASE + \
+						    0x0000c028 + 0x1000 * \
+						    (n))
+#define HWIO_INTER_EE_n_SRC_GSI_CH_IRQ_CLR_PHYS(n) (GSI_REG_BASE_PHYS +	\
+						    0x0000c028 + 0x1000 * \
+						    (n))
+#define HWIO_INTER_EE_n_SRC_GSI_CH_IRQ_CLR_OFFS(n) (GSI_REG_BASE_OFFS +	\
+						    0x0000c028 + 0x1000 * \
+						    (n))
+#define HWIO_INTER_EE_n_SRC_EV_CH_IRQ_CLR_ADDR(n) (GSI_REG_BASE + \
+						   0x0000c02c + 0x1000 * \
+						   (n))
+#define HWIO_INTER_EE_n_SRC_EV_CH_IRQ_CLR_PHYS(n) (GSI_REG_BASE_PHYS + \
+						   0x0000c02c + 0x1000 * \
+						   (n))
+#define HWIO_INTER_EE_n_SRC_EV_CH_IRQ_CLR_OFFS(n) (GSI_REG_BASE_OFFS + \
+						   0x0000c02c + 0x1000 * \
+						   (n))
+#endif

+ 530 - 0
ipa/ipa_v3/dump/ipa4.5/gsi_hwio_def.h

@@ -0,0 +1,530 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2019, The Linux Foundation. All rights reserved.
+ */
+#if !defined(_GSI_HWIO_DEF_H_)
+#define _GSI_HWIO_DEF_H_
+struct gsi_hwio_def_gsi_cfg_s {
+	u32	gsi_enable : 1;
+	u32	mcs_enable : 1;
+	u32	double_mcs_clk_freq : 1;
+	u32	uc_is_mcs : 1;
+	u32	gsi_pwr_clps : 1;
+	u32	bp_mtrix_disable : 1;
+	u32	reserved0 : 2;
+	u32	sleep_clk_div : 4;
+	u32	reserved1 : 20;
+};
+union gsi_hwio_def_gsi_cfg_u {
+	struct gsi_hwio_def_gsi_cfg_s	def;
+	u32				value;
+};
+struct gsi_hwio_def_gsi_ree_cfg_s {
+	u32	move_to_esc_clr_mode_trsh : 1;
+	u32	channel_empty_int_enable : 1;
+	u32	reserved0 : 6;
+	u32	max_burst_size : 8;
+	u32	reserved1 : 16;
+};
+union gsi_hwio_def_gsi_ree_cfg_u {
+	struct gsi_hwio_def_gsi_ree_cfg_s	def;
+	u32					value;
+};
+struct gsi_hwio_def_gsi_manager_ee_qos_n_s {
+	u32	ee_prio : 2;
+	u32	reserved0 : 6;
+	u32	max_ch_alloc : 5;
+	u32	reserved1 : 3;
+	u32	max_ev_alloc : 5;
+	u32	reserved2 : 11;
+};
+union gsi_hwio_def_gsi_manager_ee_qos_n_u {
+	struct gsi_hwio_def_gsi_manager_ee_qos_n_s	def;
+	u32						value;
+};
+struct gsi_hwio_def_gsi_shram_n_s {
+	u32 shram : 32;
+};
+union gsi_hwio_def_gsi_shram_n_u {
+	struct gsi_hwio_def_gsi_shram_n_s	def;
+	u32					value;
+};
+struct gsi_hwio_def_gsi_map_ee_n_ch_k_vp_table_s {
+	u32	phy_ch : 5;
+	u32	valid : 1;
+	u32	reserved0 : 26;
+};
+union gsi_hwio_def_gsi_map_ee_n_ch_k_vp_table_u {
+	struct gsi_hwio_def_gsi_map_ee_n_ch_k_vp_table_s	def;
+	u32							value;
+};
+struct gsi_hwio_def_gsi_test_bus_sel_s {
+	u32	gsi_testbus_sel : 8;
+	u32	reserved0 : 8;
+	u32	gsi_hw_events_sel : 4;
+	u32	reserved1 : 12;
+};
+union gsi_hwio_def_gsi_test_bus_sel_u {
+	struct gsi_hwio_def_gsi_test_bus_sel_s	def;
+	u32					value;
+};
+struct gsi_hwio_def_gsi_test_bus_reg_s {
+	u32 gsi_testbus_reg : 32;
+};
+union gsi_hwio_def_gsi_test_bus_reg_u {
+	struct gsi_hwio_def_gsi_test_bus_reg_s	def;
+	u32					value;
+};
+struct gsi_hwio_def_gsi_debug_countern_s {
+	u32	counter_value : 16;
+	u32	reserved0 : 16;
+};
+union gsi_hwio_def_gsi_debug_countern_u {
+	struct gsi_hwio_def_gsi_debug_countern_s	def;
+	u32						value;
+};
+struct gsi_hwio_def_gsi_debug_qsb_log_last_misc_idn_s {
+	u32 addr_20_0 : 21;
+	u32 write : 1;
+	u32 tid : 5;
+	u32 mid : 5;
+};
+union gsi_hwio_def_gsi_debug_qsb_log_last_misc_idn_u {
+	struct gsi_hwio_def_gsi_debug_qsb_log_last_misc_idn_s   def;
+	u32						value;
+};
+struct gsi_hwio_def_gsi_debug_sw_rf_n_read_s {
+	u32 rf_reg : 32;
+};
+union gsi_hwio_def_gsi_debug_sw_rf_n_read_u {
+	struct gsi_hwio_def_gsi_debug_sw_rf_n_read_s	def;
+	u32						value;
+};
+struct gsi_hwio_def_gsi_debug_ee_n_ev_k_vp_table_s {
+	u32	phy_ev_ch : 5;
+	u32	valid : 1;
+	u32	reserved0 : 26;
+};
+union gsi_hwio_def_gsi_debug_ee_n_ev_k_vp_table_u {
+	struct gsi_hwio_def_gsi_debug_ee_n_ev_k_vp_table_s	def;
+	u32							value;
+};
+struct gsi_hwio_def_ee_n_gsi_ch_k_cntxt_0_s {
+	u32	chtype_protocol : 3;
+	u32	chtype_dir : 1;
+	u32	ee : 4;
+	u32	chid : 5;
+	u32	chtype_protocol_msb : 1;
+	u32	erindex : 5;
+	u32	reserved0 : 1;
+	u32	chstate : 4;
+	u32	element_size : 8;
+};
+union gsi_hwio_def_ee_n_gsi_ch_k_cntxt_0_u {
+	struct gsi_hwio_def_ee_n_gsi_ch_k_cntxt_0_s	def;
+	u32						value;
+};
+struct gsi_hwio_def_ee_n_gsi_ch_k_cntxt_1_s {
+	u32	r_length : 16;
+	u32	reserved0 : 16;
+};
+union gsi_hwio_def_ee_n_gsi_ch_k_cntxt_1_u {
+	struct gsi_hwio_def_ee_n_gsi_ch_k_cntxt_1_s	def;
+	u32						value;
+};
+struct gsi_hwio_def_ee_n_gsi_ch_k_cntxt_2_s {
+	u32 r_base_addr_lsbs : 32;
+};
+union gsi_hwio_def_ee_n_gsi_ch_k_cntxt_2_u {
+	struct gsi_hwio_def_ee_n_gsi_ch_k_cntxt_2_s	def;
+	u32						value;
+};
+struct gsi_hwio_def_ee_n_gsi_ch_k_cntxt_3_s {
+	u32 r_base_addr_msbs : 32;
+};
+union gsi_hwio_def_ee_n_gsi_ch_k_cntxt_3_u {
+	struct gsi_hwio_def_ee_n_gsi_ch_k_cntxt_3_s	def;
+	u32						value;
+};
+struct gsi_hwio_def_ee_n_gsi_ch_k_cntxt_4_s {
+	u32 read_ptr_lsb : 32;
+};
+union gsi_hwio_def_ee_n_gsi_ch_k_cntxt_4_u {
+	struct gsi_hwio_def_ee_n_gsi_ch_k_cntxt_4_s	def;
+	u32						value;
+};
+struct gsi_hwio_def_ee_n_gsi_ch_k_cntxt_5_s {
+	u32 read_ptr_msb : 32;
+};
+union gsi_hwio_def_ee_n_gsi_ch_k_cntxt_5_u {
+	struct gsi_hwio_def_ee_n_gsi_ch_k_cntxt_5_s	def;
+	u32						value;
+};
+struct gsi_hwio_def_ee_n_gsi_ch_k_cntxt_6_s {
+	u32 write_ptr_lsb : 32;
+};
+union gsi_hwio_def_ee_n_gsi_ch_k_cntxt_6_u {
+	struct gsi_hwio_def_ee_n_gsi_ch_k_cntxt_6_s	def;
+	u32						value;
+};
+struct gsi_hwio_def_ee_n_gsi_ch_k_cntxt_7_s {
+	u32 write_ptr_msb : 32;
+};
+union gsi_hwio_def_ee_n_gsi_ch_k_cntxt_7_u {
+	struct gsi_hwio_def_ee_n_gsi_ch_k_cntxt_7_s	def;
+	u32						value;
+};
+struct gsi_hwio_def_ee_n_gsi_ch_k_re_fetch_read_ptr_s {
+	u32	read_ptr : 16;
+	u32	reserved0 : 16;
+};
+union gsi_hwio_def_ee_n_gsi_ch_k_re_fetch_read_ptr_u {
+	struct gsi_hwio_def_ee_n_gsi_ch_k_re_fetch_read_ptr_s	def;
+	u32							value;
+};
+struct gsi_hwio_def_ee_n_gsi_ch_k_re_fetch_write_ptr_s {
+	u32	re_intr_db : 16;
+	u32	reserved0 : 16;
+};
+union gsi_hwio_def_ee_n_gsi_ch_k_re_fetch_write_ptr_u {
+	struct gsi_hwio_def_ee_n_gsi_ch_k_re_fetch_write_ptr_s	def;
+	u32							value;
+};
+struct gsi_hwio_def_ee_n_gsi_ch_k_qos_s {
+	u32	wrr_weight : 4;
+	u32	reserved0 : 4;
+	u32	max_prefetch : 1;
+	u32	use_db_eng : 1;
+	u32	prefetch_mode : 4;
+	u32	reserved1 : 2;
+	u32	empty_lvl_thrshold : 8;
+	u32	reserved2 : 8;
+};
+union gsi_hwio_def_ee_n_gsi_ch_k_qos_u {
+	struct gsi_hwio_def_ee_n_gsi_ch_k_qos_s def;
+	u32					value;
+};
+struct gsi_hwio_def_ee_n_gsi_ch_k_scratch_0_s {
+	u32 scratch : 32;
+};
+union gsi_hwio_def_ee_n_gsi_ch_k_scratch_0_u {
+	struct gsi_hwio_def_ee_n_gsi_ch_k_scratch_0_s	def;
+	u32						value;
+};
+struct gsi_hwio_def_ee_n_gsi_ch_k_scratch_1_s {
+	u32 scratch : 32;
+};
+union gsi_hwio_def_ee_n_gsi_ch_k_scratch_1_u {
+	struct gsi_hwio_def_ee_n_gsi_ch_k_scratch_1_s	def;
+	u32						value;
+};
+struct gsi_hwio_def_ee_n_gsi_ch_k_scratch_2_s {
+	u32 scratch : 32;
+};
+union gsi_hwio_def_ee_n_gsi_ch_k_scratch_2_u {
+	struct gsi_hwio_def_ee_n_gsi_ch_k_scratch_2_s	def;
+	u32						value;
+};
+struct gsi_hwio_def_ee_n_gsi_ch_k_scratch_3_s {
+	u32 scratch : 32;
+};
+union gsi_hwio_def_ee_n_gsi_ch_k_scratch_3_u {
+	struct gsi_hwio_def_ee_n_gsi_ch_k_scratch_3_s	def;
+	u32						value;
+};
+struct gsi_hwio_def_ee_n_ev_ch_k_cntxt_0_s {
+	u32	chtype : 4;
+	u32	ee : 4;
+	u32	evchid : 8;
+	u32	intype : 1;
+	u32	reserved0 : 3;
+	u32	chstate : 4;
+	u32	element_size : 8;
+};
+union gsi_hwio_def_ee_n_ev_ch_k_cntxt_0_u {
+	struct gsi_hwio_def_ee_n_ev_ch_k_cntxt_0_s	def;
+	u32						value;
+};
+struct gsi_hwio_def_ee_n_ev_ch_k_cntxt_1_s {
+	u32	r_length : 16;
+	u32	reserved0 : 16;
+};
+union gsi_hwio_def_ee_n_ev_ch_k_cntxt_1_u {
+	struct gsi_hwio_def_ee_n_ev_ch_k_cntxt_1_s	def;
+	u32						value;
+};
+struct gsi_hwio_def_ee_n_ev_ch_k_cntxt_2_s {
+	u32 r_base_addr_lsbs : 32;
+};
+union gsi_hwio_def_ee_n_ev_ch_k_cntxt_2_u {
+	struct gsi_hwio_def_ee_n_ev_ch_k_cntxt_2_s	def;
+	u32						value;
+};
+struct gsi_hwio_def_ee_n_ev_ch_k_cntxt_3_s {
+	u32 r_base_addr_msbs : 32;
+};
+union gsi_hwio_def_ee_n_ev_ch_k_cntxt_3_u {
+	struct gsi_hwio_def_ee_n_ev_ch_k_cntxt_3_s	def;
+	u32						value;
+};
+struct gsi_hwio_def_ee_n_ev_ch_k_cntxt_4_s {
+	u32 read_ptr_lsb : 32;
+};
+union gsi_hwio_def_ee_n_ev_ch_k_cntxt_4_u {
+	struct gsi_hwio_def_ee_n_ev_ch_k_cntxt_4_s	def;
+	u32						value;
+};
+struct gsi_hwio_def_ee_n_ev_ch_k_cntxt_5_s {
+	u32 read_ptr_msb : 32;
+};
+union gsi_hwio_def_ee_n_ev_ch_k_cntxt_5_u {
+	struct gsi_hwio_def_ee_n_ev_ch_k_cntxt_5_s	def;
+	u32						value;
+};
+struct gsi_hwio_def_ee_n_ev_ch_k_cntxt_6_s {
+	u32 write_ptr_lsb : 32;
+};
+union gsi_hwio_def_ee_n_ev_ch_k_cntxt_6_u {
+	struct gsi_hwio_def_ee_n_ev_ch_k_cntxt_6_s	def;
+	u32						value;
+};
+struct gsi_hwio_def_ee_n_ev_ch_k_cntxt_7_s {
+	u32 write_ptr_msb : 32;
+};
+union gsi_hwio_def_ee_n_ev_ch_k_cntxt_7_u {
+	struct gsi_hwio_def_ee_n_ev_ch_k_cntxt_7_s	def;
+	u32						value;
+};
+struct gsi_hwio_def_ee_n_ev_ch_k_cntxt_8_s {
+	u32	int_modt : 16;
+	u32	int_modc : 8;
+	u32	int_mod_cnt : 8;
+};
+union gsi_hwio_def_ee_n_ev_ch_k_cntxt_8_u {
+	struct gsi_hwio_def_ee_n_ev_ch_k_cntxt_8_s	def;
+	u32						value;
+};
+struct gsi_hwio_def_ee_n_ev_ch_k_cntxt_9_s {
+	u32 intvec : 32;
+};
+union gsi_hwio_def_ee_n_ev_ch_k_cntxt_9_u {
+	struct gsi_hwio_def_ee_n_ev_ch_k_cntxt_9_s	def;
+	u32						value;
+};
+struct gsi_hwio_def_ee_n_ev_ch_k_cntxt_10_s {
+	u32 msi_addr_lsb : 32;
+};
+union gsi_hwio_def_ee_n_ev_ch_k_cntxt_10_u {
+	struct gsi_hwio_def_ee_n_ev_ch_k_cntxt_10_s	def;
+	u32						value;
+};
+struct gsi_hwio_def_ee_n_ev_ch_k_cntxt_11_s {
+	u32 msi_addr_msb : 32;
+};
+union gsi_hwio_def_ee_n_ev_ch_k_cntxt_11_u {
+	struct gsi_hwio_def_ee_n_ev_ch_k_cntxt_11_s	def;
+	u32						value;
+};
+struct gsi_hwio_def_ee_n_ev_ch_k_cntxt_12_s {
+	u32 rp_update_addr_lsb : 32;
+};
+union gsi_hwio_def_ee_n_ev_ch_k_cntxt_12_u {
+	struct gsi_hwio_def_ee_n_ev_ch_k_cntxt_12_s	def;
+	u32						value;
+};
+struct gsi_hwio_def_ee_n_ev_ch_k_cntxt_13_s {
+	u32 rp_update_addr_msb : 32;
+};
+union gsi_hwio_def_ee_n_ev_ch_k_cntxt_13_u {
+	struct gsi_hwio_def_ee_n_ev_ch_k_cntxt_13_s	def;
+	u32						value;
+};
+struct gsi_hwio_def_ee_n_ev_ch_k_scratch_0_s {
+	u32 scratch : 32;
+};
+union gsi_hwio_def_ee_n_ev_ch_k_scratch_0_u {
+	struct gsi_hwio_def_ee_n_ev_ch_k_scratch_0_s	def;
+	u32						value;
+};
+struct gsi_hwio_def_ee_n_ev_ch_k_scratch_1_s {
+	u32 scratch : 32;
+};
+union gsi_hwio_def_ee_n_ev_ch_k_scratch_1_u {
+	struct gsi_hwio_def_ee_n_ev_ch_k_scratch_1_s	def;
+	u32						value;
+};
+struct gsi_hwio_def_ee_n_gsi_status_s {
+	u32	enabled : 1;
+	u32	reserved0 : 31;
+};
+union gsi_hwio_def_ee_n_gsi_status_u {
+	struct gsi_hwio_def_ee_n_gsi_status_s	def;
+	u32					value;
+};
+struct gsi_hwio_def_ee_n_cntxt_type_irq_s {
+	u32	ch_ctrl : 1;
+	u32	ev_ctrl : 1;
+	u32	glob_ee : 1;
+	u32	ieob : 1;
+	u32	inter_ee_ch_ctrl : 1;
+	u32	inter_ee_ev_ctrl : 1;
+	u32	general : 1;
+	u32	reserved0 : 25;
+};
+union gsi_hwio_def_ee_n_cntxt_type_irq_u {
+	struct gsi_hwio_def_ee_n_cntxt_type_irq_s	def;
+	u32						value;
+};
+struct gsi_hwio_def_ee_n_cntxt_type_irq_msk_s {
+	u32	ch_ctrl : 1;
+	u32	ev_ctrl : 1;
+	u32	glob_ee : 1;
+	u32	ieob : 1;
+	u32	inter_ee_ch_ctrl : 1;
+	u32	inter_ee_ev_ctrl : 1;
+	u32	general : 1;
+	u32	reserved0 : 25;
+};
+union gsi_hwio_def_ee_n_cntxt_type_irq_msk_u {
+	struct gsi_hwio_def_ee_n_cntxt_type_irq_msk_s	def;
+	u32						value;
+};
+struct gsi_hwio_def_ee_n_cntxt_src_gsi_ch_irq_s {
+	u32 gsi_ch_bit_map : 32;
+};
+union gsi_hwio_def_ee_n_cntxt_src_gsi_ch_irq_u {
+	struct gsi_hwio_def_ee_n_cntxt_src_gsi_ch_irq_s def;
+	u32						value;
+};
+struct gsi_hwio_def_ee_n_cntxt_src_ev_ch_irq_s {
+	u32 ev_ch_bit_map : 32;
+};
+union gsi_hwio_def_ee_n_cntxt_src_ev_ch_irq_u {
+	struct gsi_hwio_def_ee_n_cntxt_src_ev_ch_irq_s	def;
+	u32						value;
+};
+struct gsi_hwio_def_ee_n_cntxt_src_gsi_ch_irq_msk_s {
+	u32	gsi_ch_bit_map_msk : 23;
+	u32	reserved0 : 9;
+};
+union gsi_hwio_def_ee_n_cntxt_src_gsi_ch_irq_msk_u {
+	struct gsi_hwio_def_ee_n_cntxt_src_gsi_ch_irq_msk_s	def;
+	u32							value;
+};
+struct gsi_hwio_def_ee_n_cntxt_src_ev_ch_irq_msk_s {
+	u32	ev_ch_bit_map_msk : 20;
+	u32	reserved0 : 12;
+};
+union gsi_hwio_def_ee_n_cntxt_src_ev_ch_irq_msk_u {
+	struct gsi_hwio_def_ee_n_cntxt_src_ev_ch_irq_msk_s	def;
+	u32							value;
+};
+struct gsi_hwio_def_ee_n_cntxt_src_gsi_ch_irq_clr_s {
+	u32 gsi_ch_bit_map : 32;
+};
+union gsi_hwio_def_ee_n_cntxt_src_gsi_ch_irq_clr_u {
+	struct gsi_hwio_def_ee_n_cntxt_src_gsi_ch_irq_clr_s	def;
+	u32							value;
+};
+struct gsi_hwio_def_ee_n_cntxt_src_ev_ch_irq_clr_s {
+	u32 ev_ch_bit_map : 32;
+};
+union gsi_hwio_def_ee_n_cntxt_src_ev_ch_irq_clr_u {
+	struct gsi_hwio_def_ee_n_cntxt_src_ev_ch_irq_clr_s	def;
+	u32							value;
+};
+struct gsi_hwio_def_ee_n_cntxt_src_ieob_irq_s {
+	u32 ev_ch_bit_map : 32;
+};
+union gsi_hwio_def_ee_n_cntxt_src_ieob_irq_u {
+	struct gsi_hwio_def_ee_n_cntxt_src_ieob_irq_s	def;
+	u32						value;
+};
+struct gsi_hwio_def_ee_n_cntxt_src_ieob_irq_msk_s {
+	u32	ev_ch_bit_map_msk : 20;
+	u32	reserved0 : 12;
+};
+union gsi_hwio_def_ee_n_cntxt_src_ieob_irq_msk_u {
+	struct gsi_hwio_def_ee_n_cntxt_src_ieob_irq_msk_s	def;
+	u32							value;
+};
+struct gsi_hwio_def_ee_n_cntxt_src_ieob_irq_clr_s {
+	u32 ev_ch_bit_map : 32;
+};
+union gsi_hwio_def_ee_n_cntxt_src_ieob_irq_clr_u {
+	struct gsi_hwio_def_ee_n_cntxt_src_ieob_irq_clr_s	def;
+	u32							value;
+};
+struct gsi_hwio_def_ee_n_cntxt_glob_irq_stts_s {
+	u32	error_int : 1;
+	u32	gp_int1 : 1;
+	u32	gp_int2 : 1;
+	u32	gp_int3 : 1;
+	u32	reserved0 : 28;
+};
+union gsi_hwio_def_ee_n_cntxt_glob_irq_stts_u {
+	struct gsi_hwio_def_ee_n_cntxt_glob_irq_stts_s	def;
+	u32						value;
+};
+struct gsi_hwio_def_ee_n_cntxt_gsi_irq_stts_s {
+	u32	gsi_break_point : 1;
+	u32	gsi_bus_error : 1;
+	u32	gsi_cmd_fifo_ovrflow : 1;
+	u32	gsi_mcs_stack_ovrflow : 1;
+	u32	reserved0 : 28;
+};
+union gsi_hwio_def_ee_n_cntxt_gsi_irq_stts_u {
+	struct gsi_hwio_def_ee_n_cntxt_gsi_irq_stts_s	def;
+	u32						value;
+};
+struct gsi_hwio_def_ee_n_cntxt_intset_s {
+	u32	intype : 1;
+	u32	reserved0 : 31;
+};
+union gsi_hwio_def_ee_n_cntxt_intset_u {
+	struct gsi_hwio_def_ee_n_cntxt_intset_s def;
+	u32					value;
+};
+struct gsi_hwio_def_ee_n_cntxt_msi_base_lsb_s {
+	u32 msi_addr_lsb : 32;
+};
+union gsi_hwio_def_ee_n_cntxt_msi_base_lsb_u {
+	struct gsi_hwio_def_ee_n_cntxt_msi_base_lsb_s	def;
+	u32						value;
+};
+struct gsi_hwio_def_ee_n_cntxt_msi_base_msb_s {
+	u32 msi_addr_msb : 32;
+};
+union gsi_hwio_def_ee_n_cntxt_msi_base_msb_u {
+	struct gsi_hwio_def_ee_n_cntxt_msi_base_msb_s	def;
+	u32						value;
+};
+struct gsi_hwio_def_ee_n_error_log_s {
+	u32 error_log : 32;
+};
+union gsi_hwio_def_ee_n_error_log_u {
+	struct gsi_hwio_def_ee_n_error_log_s	def;
+	u32					value;
+};
+struct gsi_hwio_def_ee_n_error_log_clr_s {
+	u32 error_log_clr : 32;
+};
+union gsi_hwio_def_ee_n_error_log_clr_u {
+	struct gsi_hwio_def_ee_n_error_log_clr_s	def;
+	u32						value;
+};
+struct gsi_hwio_def_ee_n_cntxt_scratch_0_s {
+	u32 scratch : 32;
+};
+union gsi_hwio_def_ee_n_cntxt_scratch_0_u {
+	struct gsi_hwio_def_ee_n_cntxt_scratch_0_s	def;
+	u32						value;
+};
+struct gsi_hwio_def_ee_n_cntxt_scratch_1_s {
+	u32 scratch : 32;
+};
+union gsi_hwio_def_ee_n_cntxt_scratch_1_u {
+	struct gsi_hwio_def_ee_n_cntxt_scratch_1_s	def;
+	u32						value;
+};
+#endif

+ 42 - 0
ipa/ipa_v3/dump/ipa4.5/ipa_access_control.h

@@ -0,0 +1,42 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2019, The Linux Foundation. All rights reserved.
+ */
+#if !defined(_IPA_ACCESS_CONTROL_H_)
+#define _IPA_ACCESS_CONTROL_H_
+
+#include "ipa_reg_dump.h"
+
+/*
+ * The following is target specific.
+ */
+static struct reg_mem_access_map_t mem_access_map[] = {
+	/*------------------------------------------------------------*/
+	/*      Range               Use when              Use when    */
+	/*  Begin    End           SD_ENABLED           SD_DISABLED   */
+	/*------------------------------------------------------------*/
+	{ 0x04000, 0x05000, { &io_matrix[AN_COMBO], &io_matrix[AN_COMBO] } },
+	{ 0x1F000, 0x27000, { &io_matrix[AN_COMBO], &io_matrix[AN_COMBO] } },
+	{ 0x05000, 0x0f000, { &io_matrix[AA_COMBO], &io_matrix[AN_COMBO] } },
+	{ 0x0f000, 0x10000, { &io_matrix[NN_COMBO], &io_matrix[NN_COMBO] } },
+	{ 0x13000, 0x17000, { &io_matrix[AA_COMBO], &io_matrix[AA_COMBO] } },
+	{ 0x17000, 0x1b000, { &io_matrix[AN_COMBO], &io_matrix[AN_COMBO] } },
+	{ 0x1b000, 0x1f000, { &io_matrix[AN_COMBO], &io_matrix[AN_COMBO] } },
+	{ 0x10000, 0x11000, { &io_matrix[AA_COMBO], &io_matrix[AA_COMBO] } },
+	{ 0x11000, 0x12000, { &io_matrix[AN_COMBO], &io_matrix[AN_COMBO] } },
+	{ 0x12000, 0x13000, { &io_matrix[AA_COMBO], &io_matrix[AA_COMBO] } },
+	{ 0x43000, 0x44000, { &io_matrix[AA_COMBO], &io_matrix[AA_COMBO] } },
+	{ 0x44000, 0x45000, { &io_matrix[AA_COMBO], &io_matrix[AA_COMBO] } },
+	{ 0x45000, 0x47000, { &io_matrix[AN_COMBO], &io_matrix[AN_COMBO] } },
+	{ 0x40000, 0x42000, { &io_matrix[AA_COMBO], &io_matrix[AA_COMBO] } },
+	{ 0x42000, 0x43000, { &io_matrix[AA_COMBO], &io_matrix[AN_COMBO] } },
+	{ 0x50000, 0x60000, { &io_matrix[AA_COMBO], &io_matrix[AA_COMBO] } },
+	{ 0x60000, 0x80000, { &io_matrix[AN_COMBO], &io_matrix[NN_COMBO] } },
+	{ 0x80000, 0x81000, { &io_matrix[NN_COMBO], &io_matrix[NN_COMBO] } },
+	{ 0x81000, 0x83000, { &io_matrix[AN_COMBO], &io_matrix[AN_COMBO] } },
+	{ 0xa0000, 0xc0000, { &io_matrix[AN_COMBO], &io_matrix[AN_COMBO] } },
+	{ 0xc0000, 0xc2000, { &io_matrix[AA_COMBO], &io_matrix[AA_COMBO] } },
+	{ 0xc2000, 0xd0000, { &io_matrix[AA_COMBO], &io_matrix[AA_COMBO] } },
+};
+
+#endif /* #if !defined(_IPA_ACCESS_CONTROL_H_) */

+ 12 - 0
ipa/ipa_v3/dump/ipa4.5/ipa_gcc_hwio.h

@@ -0,0 +1,12 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2019, The Linux Foundation. All rights reserved.
+ */
+#if !defined(_IPA_GCC_HWIO_H_)
+#define _IPA_GCC_HWIO_H_
+/*
+ *
+ * HWIO register definitions to follow:
+ *
+ */
+#endif

+ 7 - 0
ipa/ipa_v3/dump/ipa4.5/ipa_gcc_hwio_def.h

@@ -0,0 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2019, The Linux Foundation. All rights reserved.
+ */
+#if !defined(_IPA_GCC_HWIO_DEF_H_)
+#define _IPA_GCC_HWIO_DEF_H_
+#endif

+ 593 - 0
ipa/ipa_v3/dump/ipa4.5/ipa_hw_common_ex.h

@@ -0,0 +1,593 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2019, The Linux Foundation. All rights reserved.
+ */
+#if !defined(_IPA_HW_COMMON_EX_H_)
+#define _IPA_HW_COMMON_EX_H_
+
+/* VLVL defs are available for 854 */
+#define FEATURE_VLVL_DEFS                            true
+
+#define FEATURE_IPA_HW_VERSION_4_5                   true
+
+/* Important Platform Specific Values : IRQ_NUM, IRQ_CNT, BCR */
+#define IPA_HW_BAM_IRQ_NUM                           639
+
+/* Q6 IRQ number for IPA. */
+#define IPA_HW_IRQ_NUM                               640
+
+/* Total number of different interrupts that can be enabled */
+#define IPA_HW_IRQ_CNT_TOTAL                         23
+
+/* IPAv4 spare reg value */
+#define IPA_HW_SPARE_1_REG_VAL                       0xC0000005
+
+/* Whether to allow setting step mode on IPA when we crash or not */
+#define IPA_CFG_HW_IS_STEP_MODE_ALLOWED              (false)
+
+/* GSI MHI related definitions */
+#define IPA_HW_GSI_MHI_CONSUMER_CHANNEL_NUM          0x0
+#define IPA_HW_GSI_MHI_PRODUCER_CHANNEL_NUM          0x1
+
+#define IPA_HW_GSI_MHI_CONSUMER_EP_NUM               0x1
+#define IPA_HW_GSI_MHI_PRODUCER_EP_NUM               0x11
+
+/* IPA ZIP WA related Macros */
+#define IPA_HW_DCMP_SRC_PIPE                         0x8
+#define IPA_HW_DCMP_DEST_PIPE                        0x4
+#define IPA_HW_ACK_MNGR_MASK                         0x1D
+#define IPA_HW_DCMP_SRC_GRP                          0x5
+
+/* IPA Clock resource name */
+#define IPA_CLK_RESOURCE_NAME                        "/clk/pcnoc"
+
+/* IPA Clock Bus Client name */
+#define IPA_CLK_BUS_CLIENT_NAME                      "IPA_PCNOC_BUS_CLIENT"
+
+/* HPS Sequences */
+#define IPA_HW_PKT_PROCESS_HPS_DMA                      0x0
+#define IPA_HW_PKT_PROCESS_HPS_DMA_DECIPH_CIPHE         0x1
+#define IPA_HW_PKT_PROCESS_HPS_PKT_PRS_NO_DECIPH_UCP    0x2
+#define IPA_HW_PKT_PROCESS_HPS_PKT_PRS_DECIPH_UCP       0x3
+#define IPA_HW_PKT_PROCESS_HPS_2_PKT_PRS_NO_DECIPH      0x4
+#define IPA_HW_PKT_PROCESS_HPS_2_PKT_PRS_DECIPH         0x5
+#define IPA_HW_PKT_PROCESS_HPS_PKT_PRS_NO_DECIPH_NO_UCP 0x6
+#define IPA_HW_PKT_PROCESS_HPS_PKT_PRS_DECIPH_NO_UCP    0x7
+#define IPA_HW_PKT_PROCESS_HPS_DMA_PARSER               0x8
+#define IPA_HW_PKT_PROCESS_HPS_DMA_DECIPH_PARSER        0x9
+#define IPA_HW_PKT_PROCESS_HPS_2_PKT_PRS_UCP_TWICE_NO_DECIPH  0xA
+#define IPA_HW_PKT_PROCESS_HPS_2_PKT_PRS_UCP_TWICE_DECIPH     0xB
+#define IPA_HW_PKT_PROCESS_HPS_3_PKT_PRS_UCP_TWICE_NO_DECIPH  0xC
+#define IPA_HW_PKT_PROCESS_HPS_3_PKT_PRS_UCP_TWICE_DECIPH     0xD
+
+/* DPS Sequences */
+#define IPA_HW_PKT_PROCESS_DPS_DMA                      0x0
+#define IPA_HW_PKT_PROCESS_DPS_DMA_WITH_DECIPH          0x1
+#define IPA_HW_PKT_PROCESS_DPS_DMA_WITH_DECOMP          0x2
+#define IPA_HW_PKT_PROCESS_DPS_DMA_WITH_CIPH            0x3
+
+/* Src RSRC GRP config */
+#define IPA_HW_SRC_RSRC_GRP_01_RSRC_TYPE_0           0x0B040803
+#define IPA_HW_SRC_RSRC_GRP_01_RSRC_TYPE_1           0x0C0C0909
+#define IPA_HW_SRC_RSRC_GRP_01_RSRC_TYPE_2           0x0E0E0909
+#define IPA_HW_SRC_RSRC_GRP_01_RSRC_TYPE_3           0x3F003F00
+#define IPA_HW_SRC_RSRC_GRP_01_RSRC_TYPE_4           0x10101616
+
+#define IPA_HW_SRC_RSRC_GRP_23_RSRC_TYPE_0           0x01010101
+#define IPA_HW_SRC_RSRC_GRP_23_RSRC_TYPE_1           0x02020202
+#define IPA_HW_SRC_RSRC_GRP_23_RSRC_TYPE_2           0x04040404
+#define IPA_HW_SRC_RSRC_GRP_23_RSRC_TYPE_3           0x3F003F00
+#define IPA_HW_SRC_RSRC_GRP_23_RSRC_TYPE_4           0x02020606
+
+#define IPA_HW_SRC_RSRC_GRP_45_RSRC_TYPE_0           0x00000000
+#define IPA_HW_SRC_RSRC_GRP_45_RSRC_TYPE_1           0x00000000
+#define IPA_HW_SRC_RSRC_GRP_45_RSRC_TYPE_2           0x00000000
+#define IPA_HW_SRC_RSRC_GRP_45_RSRC_TYPE_3           0x00003F00
+#define IPA_HW_SRC_RSRC_GRP_45_RSRC_TYPE_4           0x00000000
+
+/* Dest RSRC GRP config */
+#define IPA_HW_DST_RSRC_GRP_01_RSRC_TYPE_0           0x05051010
+#define IPA_HW_DST_RSRC_GRP_01_RSRC_TYPE_1           0x3F013F02
+
+#define IPA_HW_DST_RSRC_GRP_23_RSRC_TYPE_0           0x02020202
+#define IPA_HW_DST_RSRC_GRP_23_RSRC_TYPE_1           0x02010201
+
+#define IPA_HW_DST_RSRC_GRP_45_RSRC_TYPE_0           0x00000000
+#define IPA_HW_DST_RSRC_GRP_45_RSRC_TYPE_1           0x00000200
+
+#define IPA_HW_RX_HPS_CLIENTS_MIN_DEPTH_0            0x03030303
+#define IPA_HW_RX_HPS_CLIENTS_MAX_DEPTH_0            0x03030303
+
+#define IPA_HW_RSRP_GRP_0                            0x0
+#define IPA_HW_RSRP_GRP_1                            0x1
+#define IPA_HW_RSRP_GRP_2                            0x2
+#define IPA_HW_RSRP_GRP_3                            0x3
+
+#define IPA_HW_PCIE_SRC_RSRP_GRP                     IPA_HW_RSRP_GRP_0
+#define IPA_HW_PCIE_DEST_RSRP_GRP                    IPA_HW_RSRP_GRP_0
+
+#define IPA_HW_DDR_SRC_RSRP_GRP                      IPA_HW_RSRP_GRP_1
+#define IPA_HW_DDR_DEST_RSRP_GRP                     IPA_HW_RSRP_GRP_1
+
+#define IPA_HW_DMA_SRC_RSRP_GRP                      IPA_HW_RSRP_GRP_2
+#define IPA_HW_DMA_DEST_RSRP_GRP                     IPA_HW_RSRP_GRP_2
+
+#define IPA_HW_SRC_RSRP_TYPE_MAX 0x05
+#define IPA_HW_DST_RSRP_TYPE_MAX 0x02
+
+#define GSI_HW_QSB_LOG_MISC_MAX 0x4
+
+/* IPA Clock Bus Client name */
+#define IPA_CLK_BUS_CLIENT_NAME                      "IPA_PCNOC_BUS_CLIENT"
+
+/* Is IPA decompression feature enabled */
+#define IPA_HW_IS_DECOMPRESSION_ENABLED              (1)
+
+/* Whether to allow setting step mode on IPA when we crash or not */
+#define IPA_HW_IS_STEP_MODE_ALLOWED                  (true)
+
+/* Max number of virtual pipes for UL QBAP provided by HW */
+#define IPA_HW_MAX_VP_NUM                             (32)
+
+/*
+ * HW specific clock vote freq values in KHz
+ * (BIMC/SNOC/PCNOC/IPA/Q6 CPU)
+ */
+enum ipa_hw_clk_freq_e {
+	/* BIMC */
+	IPA_HW_CLK_FREQ_BIMC_PEAK       = 518400,
+	IPA_HW_CLK_FREQ_BIMC_NOM_PLUS   = 404200,
+	IPA_HW_CLK_FREQ_BIMC_NOM        = 404200,
+	IPA_HW_CLK_FREQ_BIMC_SVS        = 100000,
+
+	/* PCNOC */
+	IPA_HW_CLK_FREQ_PCNOC_PEAK      = 133330,
+	IPA_HW_CLK_FREQ_PCNOC_NOM_PLUS  = 100000,
+	IPA_HW_CLK_FREQ_PCNOC_NOM       = 100000,
+	IPA_HW_CLK_FREQ_PCNOC_SVS       = 50000,
+
+	/*IPA_HW_CLK_SNOC*/
+	IPA_HW_CLK_FREQ_SNOC_PEAK       = 200000,
+	IPA_HW_CLK_FREQ_SNOC_NOM_PLUS   = 150000,
+	IPA_HW_CLK_FREQ_SNOC_NOM        = 150000,
+	IPA_HW_CLK_FREQ_SNOC_SVS        = 85000,
+	IPA_HW_CLK_FREQ_SNOC_SVS_2      = 50000,
+
+	/* IPA */
+	IPA_HW_CLK_FREQ_IPA_PEAK        = 600000,
+	IPA_HW_CLK_FREQ_IPA_NOM_PLUS    = 500000,
+	IPA_HW_CLK_FREQ_IPA_NOM         = 500000,
+	IPA_HW_CLK_FREQ_IPA_SVS         = 250000,
+	IPA_HW_CLK_FREQ_IPA_SVS_2       = 150000,
+
+	/* Q6 CPU */
+	IPA_HW_CLK_FREQ_Q6_PEAK         = 729600,
+	IPA_HW_CLK_FREQ_Q6_NOM_PLUS     = 729600,
+	IPA_HW_CLK_FREQ_Q6_NOM          = 729600,
+	IPA_HW_CLK_FREQ_Q6_SVS          = 729600,
+};
+
+enum ipa_hw_qtimer_gran_e {
+	IPA_HW_QTIMER_GRAN_0 = 0, /* granularity 0 is 10us */
+	IPA_HW_QTIMER_GRAN_1 = 1, /* granularity 1 is 100us */
+	IPA_HW_QTIMER_GRAN_MAX,
+};
+
+/* Pipe ID of all the IPA pipes */
+enum ipa_hw_pipe_id_e {
+	IPA_HW_PIPE_ID_0,
+	IPA_HW_PIPE_ID_1,
+	IPA_HW_PIPE_ID_2,
+	IPA_HW_PIPE_ID_3,
+	IPA_HW_PIPE_ID_4,
+	IPA_HW_PIPE_ID_5,
+	IPA_HW_PIPE_ID_6,
+	IPA_HW_PIPE_ID_7,
+	IPA_HW_PIPE_ID_8,
+	IPA_HW_PIPE_ID_9,
+	IPA_HW_PIPE_ID_10,
+	IPA_HW_PIPE_ID_11,
+	IPA_HW_PIPE_ID_12,
+	IPA_HW_PIPE_ID_13,
+	IPA_HW_PIPE_ID_14,
+	IPA_HW_PIPE_ID_15,
+	IPA_HW_PIPE_ID_16,
+	IPA_HW_PIPE_ID_17,
+	IPA_HW_PIPE_ID_18,
+	IPA_HW_PIPE_ID_19,
+	IPA_HW_PIPE_ID_20,
+	IPA_HW_PIPE_ID_21,
+	IPA_HW_PIPE_ID_22,
+	IPA_HW_PIPE_ID_23,
+	IPA_HW_PIPE_ID_24,
+	IPA_HW_PIPE_ID_25,
+	IPA_HW_PIPE_ID_26,
+	IPA_HW_PIPE_ID_27,
+	IPA_HW_PIPE_ID_28,
+	IPA_HW_PIPE_ID_29,
+	IPA_HW_PIPE_ID_30,
+	IPA_HW_PIPE_ID_MAX
+};
+
+/* Pipe ID's of System Bam Endpoints between Q6 & IPA */
+enum ipa_hw_q6_pipe_id_e {
+	/* Pipes used by IPA Q6 driver */
+	IPA_HW_Q6_DL_CONSUMER_PIPE_ID           = IPA_HW_PIPE_ID_5,
+	IPA_HW_Q6_CTL_CONSUMER_PIPE_ID          = IPA_HW_PIPE_ID_6,
+	IPA_HW_Q6_DL_NLO_CONSUMER_PIPE_ID       = IPA_HW_PIPE_ID_8,
+
+	IPA_HW_Q6_UL_ACC_ACK_PRODUCER_PIPE_ID   = IPA_HW_PIPE_ID_20,
+	IPA_HW_Q6_UL_PRODUCER_PIPE_ID           = IPA_HW_PIPE_ID_21,
+	IPA_HW_Q6_DL_PRODUCER_PIPE_ID           = IPA_HW_PIPE_ID_17,
+	IPA_HW_Q6_QBAP_STATUS_PRODUCER_PIPE_ID  = IPA_HW_PIPE_ID_18,
+	IPA_HW_Q6_UL_ACC_DATA_PRODUCER_PIPE_ID  = IPA_HW_PIPE_ID_19,
+
+	IPA_HW_Q6_UL_ACK_PRODUCER_PIPE_ID  =
+	  IPA_HW_Q6_UL_ACC_ACK_PRODUCER_PIPE_ID,
+	IPA_HW_Q6_UL_DATA_PRODUCER_PIPE_ID =
+	  IPA_HW_Q6_UL_ACC_DATA_PRODUCER_PIPE_ID,
+
+	IPA_HW_Q6_DMA_ASYNC_CONSUMER_PIPE_ID    = IPA_HW_PIPE_ID_4,
+	IPA_HW_Q6_DMA_ASYNC_PRODUCER_PIPE_ID    = IPA_HW_PIPE_ID_29,
+
+	/* Test Simulator Pipes */
+	IPA_HW_Q6_SIM_UL_CONSUMER_PIPE_0_ID     = IPA_HW_PIPE_ID_0,
+	IPA_HW_Q6_SIM_UL_CONSUMER_PIPE_1_ID     = IPA_HW_PIPE_ID_1,
+
+	/* GSI UT channel SW->IPA */
+	IPA_HW_Q6_GSI_UT_CONSUMER_PIPE_1_ID     = IPA_HW_PIPE_ID_3,
+	/* GSI UT channel SW->IPA */
+	IPA_HW_Q6_GSI_UT_CONSUMER_PIPE_2_ID     = IPA_HW_PIPE_ID_10,
+
+	IPA_HW_Q6_SIM_UL_CONSUMER_PIPE_2_ID     = IPA_HW_PIPE_ID_7,
+
+	/* GSI UT channel IPA->SW */
+	IPA_HW_Q6_DIAG_CONSUMER_PIPE_ID         = IPA_HW_PIPE_ID_9,
+
+	IPA_HW_Q6_SIM_DL_PRODUCER_PIPE_0_ID     = IPA_HW_PIPE_ID_23,
+	IPA_HW_Q6_SIM_DL_PRODUCER_PIPE_1_ID     = IPA_HW_PIPE_ID_24,
+
+	IPA_HW_Q6_SIM_DL_PRODUCER_PIPE_2_ID     = IPA_HW_PIPE_ID_25,
+
+	/* GSI UT channel IPA->SW */
+	IPA_HW_Q6_GSI_UT_PRODUCER_PIPE_1_ID     = IPA_HW_PIPE_ID_26,
+
+	/* GSI UT channel IPA->SW */
+	IPA_HW_Q6_GSI_UT_PRODUCER_PIPE_2_ID     = IPA_HW_PIPE_ID_27,
+	IPA_HW_Q6_PIPE_ID_MAX                   = IPA_HW_PIPE_ID_MAX,
+};
+
+enum ipa_hw_q6_pipe_ch_id_e {
+	/* Channels used by IPA Q6 driver */
+	IPA_HW_Q6_DL_CONSUMER_PIPE_CH_ID                = 0,
+	IPA_HW_Q6_CTL_CONSUMER_PIPE_CH_ID               = 1,
+	IPA_HW_Q6_DL_NLO_CONSUMER_PIPE_CH_ID            = 2,
+	IPA_HW_Q6_UL_ACC_PATH_ACK_PRODUCER_PIPE_CH_ID   = 6,
+	IPA_HW_Q6_UL_PRODUCER_PIPE_CH_ID                = 7,
+	IPA_HW_Q6_DL_PRODUCER_PIPE_CH_ID                = 3,
+	IPA_HW_Q6_UL_ACC_PATH_DATA_PRODUCER_PIPE_CH_ID  = 5,
+	IPA_HW_Q6_QBAP_STATUS_PRODUCER_PIPE_CH_ID       = 4,
+
+	IPA_HW_Q6_DMA_ASYNC_CONSUMER_PIPE_CH_ID         = 8,
+	IPA_HW_Q6_DMA_ASYNC_PRODUCER_PIPE_CH_ID         = 9,
+	/* CH_ID 8 and 9 are Q6 SPARE CONSUMERs */
+
+	/* Test Simulator Channels */
+	IPA_HW_Q6_SIM_UL_CONSUMER_PIPE_0_CH_ID     = 10,
+	IPA_HW_Q6_SIM_DL_PRODUCER_PIPE_0_CH_ID     = 11,
+	IPA_HW_Q6_SIM_UL_CONSUMER_PIPE_1_CH_ID     = 12,
+	IPA_HW_Q6_SIM_DL_PRODUCER_PIPE_1_CH_ID     = 13,
+	IPA_HW_Q6_SIM_UL_CONSUMER_PIPE_2_CH_ID     = 14,
+	IPA_HW_Q6_SIM_DL_PRODUCER_PIPE_2_CH_ID     = 15,
+	/* GSI UT channel SW->IPA */
+	IPA_HW_Q6_GSI_UT_CONSUMER_PIPE_1_CH_ID     = 16,
+	/* GSI UT channel IPA->SW */
+	IPA_HW_Q6_GSI_UT_PRODUCER_PIPE_1_CH_ID     = 17,
+	/* GSI UT channel SW->IPA */
+	IPA_HW_Q6_GSI_UT_CONSUMER_PIPE_2_CH_ID     = 18,
+	/* GSI UT channel IPA->SW */
+	IPA_HW_Q6_GSI_UT_PRODUCER_PIPE_2_CH_ID     = 19,
+};
+
+/* System Bam Endpoints between Q6 & IPA */
+enum ipa_hw_q6_pipe_e {
+	/* DL Pipe IPA->Q6 */
+	IPA_HW_Q6_DL_PRODUCER_PIPE = 0,
+	/* UL Pipe IPA->Q6 */
+	IPA_HW_Q6_UL_PRODUCER_PIPE = 1,
+	/* DL Pipe Q6->IPA */
+	IPA_HW_Q6_DL_CONSUMER_PIPE = 2,
+	/* CTL Pipe Q6->IPA */
+	IPA_HW_Q6_CTL_CONSUMER_PIPE = 3,
+	/*  Q6 -> IPA,  DL NLO  */
+	IPA_HW_Q6_DL_NLO_CONSUMER_PIPE = 4,
+	/* DMA ASYNC CONSUMER */
+	IPA_HW_Q6_DMA_ASYNC_CONSUMER_PIPE = 5,
+	/* DMA ASYNC PRODUCER */
+	IPA_HW_Q6_DMA_ASYNC_PRODUCER_PIPE = 6,
+	/* UL Acc Path Data Pipe IPA->Q6 */
+	IPA_HW_Q6_UL_ACC_DATA_PRODUCER_PIPE = 7,
+	/* UL Acc Path ACK Pipe IPA->Q6 */
+	IPA_HW_Q6_UL_ACC_ACK_PRODUCER_PIPE = 8,
+	/* UL Acc Path QBAP status Pipe IPA->Q6 */
+	IPA_HW_Q6_QBAP_STATUS_PRODUCER_PIPE = 9,
+	/* Diag status pipe IPA->Q6 */
+	/* Used only when FEATURE_IPA_TEST_PER_SIM is ON */
+	/* SIM Pipe IPA->Sim */
+	IPA_HW_Q6_SIM_DL_PRODUCER_PIPE_0 = 10,
+	/* SIM Pipe Sim->IPA */
+	IPA_HW_Q6_SIM_DL_PRODUCER_PIPE_1 = 11,
+	/* SIM Pipe Sim->IPA */
+	IPA_HW_Q6_SIM_DL_PRODUCER_PIPE_2 = 12,
+	/* SIM Pipe Sim->IPA */
+	IPA_HW_Q6_SIM_UL_CONSUMER_PIPE_0 = 13,
+	/* SIM B2B PROD Pipe  */
+	IPA_HW_Q6_SIM_UL_CONSUMER_PIPE_1 = 14,
+	/* SIM Pipe IPA->Sim */
+	IPA_HW_Q6_SIM_UL_CONSUMER_PIPE_2 = 15,
+	/* End FEATURE_IPA_TEST_PER_SIM */
+	/* GSI UT channel SW->IPA */
+	IPA_HW_Q6_GSI_UT_CONSUMER_PIPE_1 = 16,
+	/* GSI UT channel IPA->SW */
+	IPA_HW_Q6_GSI_UT_PRODUCER_PIPE_1 = 17,
+	/* GSI UT channel SW->IPA */
+	IPA_HW_Q6_GSI_UT_CONSUMER_PIPE_2 = 18,
+	/* GSI UT channel IPA->SW */
+	IPA_HW_Q6_GSI_UT_PRODUCER_PIPE_2 = 19,
+
+	IPA_HW_Q6_PIPE_TOTAL
+};
+
+/* System Bam Endpoints between Q6 & IPA */
+enum ipa_hw_q6_gsi_ev_e { /* In Sdx24 0..11 */
+	/* DL Pipe IPA->Q6 */
+	IPA_HW_Q6_DL_PRODUCER_PIPE_GSI_EV = 0,
+	/* UL Pipe IPA->Q6 */
+	IPA_HW_Q6_UL_PRODUCER_PIPE_GSI_EV = 1,
+	/* DL Pipe Q6->IPA */
+	//IPA_HW_Q6_DL_CONSUMER_PIPE_GSI_EV = 2,
+	/* CTL Pipe Q6->IPA */
+	//IPA_HW_Q6_CTL_CONSUMER_PIPE_GSI_EV = 3,
+	/*  Q6 -> IPA,  LTE DL Optimized path */
+	//IPA_HW_Q6_LTE_DL_CONSUMER_PIPE_GSI_EV = 4,
+	/* LWA DL(Wifi to Q6) */
+	//IPA_HW_Q6_LWA_DL_PRODUCER_PIPE_GSI_EV = 5,
+	/* Diag status pipe IPA->Q6 */
+	//IPA_HW_Q6_DIAG_STATUS_PRODUCER_PIPE_GSI_EV = 6,
+	/* Used only when FEATURE_IPA_TEST_PER_SIM is ON */
+	/* SIM Pipe IPA->Sim */
+	IPA_HW_Q6_SIM_DL_PRODUCER_PIPE_0_GSI_EV = 2,
+	/* SIM Pipe Sim->IPA */
+	IPA_HW_Q6_SIM_DL_PRODUCER_PIPE_1_GSI_EV = 3,
+	/* SIM Pipe Sim->IPA */
+	IPA_HW_Q6_SIM_DL_PRODUCER_PIPE_2_GSI_EV = 4,
+	/* SIM Pipe Sim->IPA */
+	IPA_HW_Q6_SIM_1_GSI_EV = 5,
+	IPA_HW_Q6_SIM_2_GSI_EV = 6,
+	IPA_HW_Q6_SIM_3_GSI_EV = 7,
+	IPA_HW_Q6_SIM_4_GSI_EV = 8,
+
+	IPA_HW_Q6_PIPE_GSI_EV_TOTAL
+};
+
+/*
+ * All the IRQ's supported by the IPA HW. Use this enum to set IRQ_EN
+ * register and read IRQ_STTS register
+ */
+enum ipa_hw_irq_e {
+	IPA_HW_IRQ_GSI_HWP                     = (1 << 25),
+	IPA_HW_IRQ_GSI_IPA_IF_TLV_RCVD         = (1 << 24),
+	IPA_HW_IRQ_GSI_EE_IRQ                  = (1 << 23),
+	IPA_HW_IRQ_DCMP_ERR                    = (1 << 22),
+	IPA_HW_IRQ_HWP_ERR                     = (1 << 21),
+	IPA_HW_IRQ_RED_MARKER_ABOVE            = (1 << 20),
+	IPA_HW_IRQ_YELLOW_MARKER_ABOVE         = (1 << 19),
+	IPA_HW_IRQ_RED_MARKER_BELOW            = (1 << 18),
+	IPA_HW_IRQ_YELLOW_MARKER_BELOW         = (1 << 17),
+	IPA_HW_IRQ_BAM_IDLE_IRQ                = (1 << 16),
+	IPA_HW_IRQ_TX_HOLB_DROP                = (1 << 15),
+	IPA_HW_IRQ_TX_SUSPEND                  = (1 << 14),
+	IPA_HW_IRQ_PROC_ERR                    = (1 << 13),
+	IPA_HW_IRQ_STEP_MODE                   = (1 << 12),
+	IPA_HW_IRQ_TX_ERR                      = (1 << 11),
+	IPA_HW_IRQ_DEAGGR_ERR                  = (1 << 10),
+	IPA_HW_IRQ_RX_ERR                      = (1 << 9),
+	IPA_HW_IRQ_PROC_TO_HW_ACK_Q_NOT_EMPTY  = (1 << 8),
+	IPA_HW_IRQ_HWP_RX_CMD_Q_NOT_FULL       = (1 << 7),
+	IPA_HW_IRQ_HWP_IN_Q_NOT_EMPTY          = (1 << 6),
+	IPA_HW_IRQ_HWP_IRQ_3                   = (1 << 5),
+	IPA_HW_IRQ_HWP_IRQ_2                   = (1 << 4),
+	IPA_HW_IRQ_HWP_IRQ_1                   = (1 << 3),
+	IPA_HW_IRQ_HWP_IRQ_0                   = (1 << 2),
+	IPA_HW_IRQ_EOT_COAL                    = (1 << 1),
+	IPA_HW_IRQ_BAD_SNOC_ACCESS             = (1 << 0),
+	IPA_HW_IRQ_NONE                        = 0,
+	IPA_HW_IRQ_ALL                         = 0xFFFFFFFF
+};
+
+/*
+ * All the IRQ sources supported by the IPA HW. Use this enum to set
+ * IRQ_SRCS register
+ */
+enum ipa_hw_irq_srcs_e {
+	IPA_HW_IRQ_SRCS_PIPE_0  = (1 << IPA_HW_PIPE_ID_0),
+	IPA_HW_IRQ_SRCS_PIPE_1  = (1 << IPA_HW_PIPE_ID_1),
+	IPA_HW_IRQ_SRCS_PIPE_2  = (1 << IPA_HW_PIPE_ID_2),
+	IPA_HW_IRQ_SRCS_PIPE_3  = (1 << IPA_HW_PIPE_ID_3),
+	IPA_HW_IRQ_SRCS_PIPE_4  = (1 << IPA_HW_PIPE_ID_4),
+	IPA_HW_IRQ_SRCS_PIPE_5  = (1 << IPA_HW_PIPE_ID_5),
+	IPA_HW_IRQ_SRCS_PIPE_6  = (1 << IPA_HW_PIPE_ID_6),
+	IPA_HW_IRQ_SRCS_PIPE_7  = (1 << IPA_HW_PIPE_ID_7),
+	IPA_HW_IRQ_SRCS_PIPE_8  = (1 << IPA_HW_PIPE_ID_8),
+	IPA_HW_IRQ_SRCS_PIPE_9  = (1 << IPA_HW_PIPE_ID_9),
+	IPA_HW_IRQ_SRCS_PIPE_10 = (1 << IPA_HW_PIPE_ID_10),
+	IPA_HW_IRQ_SRCS_PIPE_11 = (1 << IPA_HW_PIPE_ID_11),
+	IPA_HW_IRQ_SRCS_PIPE_12 = (1 << IPA_HW_PIPE_ID_12),
+	IPA_HW_IRQ_SRCS_PIPE_13 = (1 << IPA_HW_PIPE_ID_13),
+	IPA_HW_IRQ_SRCS_PIPE_14 = (1 << IPA_HW_PIPE_ID_14),
+	IPA_HW_IRQ_SRCS_PIPE_15 = (1 << IPA_HW_PIPE_ID_15),
+	IPA_HW_IRQ_SRCS_PIPE_16 = (1 << IPA_HW_PIPE_ID_16),
+	IPA_HW_IRQ_SRCS_PIPE_17 = (1 << IPA_HW_PIPE_ID_17),
+	IPA_HW_IRQ_SRCS_PIPE_18 = (1 << IPA_HW_PIPE_ID_18),
+	IPA_HW_IRQ_SRCS_PIPE_19 = (1 << IPA_HW_PIPE_ID_19),
+	IPA_HW_IRQ_SRCS_PIPE_20 = (1 << IPA_HW_PIPE_ID_20),
+	IPA_HW_IRQ_SRCS_PIPE_21 = (1 << IPA_HW_PIPE_ID_21),
+	IPA_HW_IRQ_SRCS_PIPE_22 = (1 << IPA_HW_PIPE_ID_22),
+	IPA_HW_IRQ_SRCS_NONE    = 0,
+	IPA_HW_IRQ_SRCS_ALL     = 0xFFFFFFFF,
+};
+
+/*
+ * Total number of channel contexts that need to be saved for APPS
+ */
+#define IPA_HW_REG_SAVE_GSI_NUM_CH_CNTXT_A7          20
+
+/*
+ * Total number of channel contexts that need to be saved for UC
+ */
+#define IPA_HW_REG_SAVE_GSI_NUM_CH_CNTXT_UC          2
+
+/*
+ * Total number of event ring contexts that need to be saved for APPS
+ */
+#define IPA_HW_REG_SAVE_GSI_NUM_EVT_CNTXT_A7         19
+
+/*
+ * Total number of event ring contexts that need to be saved for UC
+ */
+#define IPA_HW_REG_SAVE_GSI_NUM_EVT_CNTXT_UC         1
+
+/*
+ * Total number of endpoints for which ipa_reg_save.pipes[endp_number]
+ * are not saved by default (only if ipa_cfg.gen.full_reg_trace =
+ * true) There is no extra endpoints in Stingray
+ */
+#define IPA_HW_REG_SAVE_NUM_ENDP_EXTRA               0
+
+/*
+ * Total number of endpoints for which ipa_reg_save.pipes[endp_number]
+ * are always saved
+ */
+#define IPA_HW_REG_SAVE_NUM_ACTIVE_PIPES             IPA_HW_PIPE_ID_MAX
+
+/*
+ * SHRAM Bytes per ch
+ */
+#define IPA_REG_SAVE_BYTES_PER_CHNL_SHRAM         12
+
+/*
+ * Total number of rx splt cmdq's see:
+ * ipa_rx_splt_cmdq_n_cmd[IPA_RX_SPLT_CMDQ_MAX]
+ */
+#define IPA_RX_SPLT_CMDQ_MAX 4
+
+/*
+ * Although not necessary for the numbers below, the use of round_up
+ * is so that future developers know that these particular constants
+ * have to be a multiple of four bytes, because the IPA memory reads
+ * that they drive are always 32 bits...
+ */
+#define IPA_IU_ADDR   0x000A0000
+#define IPA_IU_SIZE   round_up(40704, sizeof(u32))
+
+#define IPA_SRAM_ADDR 0x00050000
+#define IPA_SRAM_SIZE round_up(19232, sizeof(u32))
+
+#define IPA_MBOX_ADDR 0x000C2000
+#define IPA_MBOX_SIZE round_up(256, sizeof(u32))
+
+#define IPA_HRAM_ADDR 0x00060000
+#define IPA_HRAM_SIZE round_up(47536, sizeof(u32))
+
+#define IPA_SEQ_ADDR  0x00081000
+#define IPA_SEQ_SIZE  round_up(768, sizeof(u32))
+
+#define IPA_GSI_ADDR  0x00006000
+#define IPA_GSI_SIZE  round_up(5376, sizeof(u32))
+
+/*
+ * Macro to define a particular register cfg entry for all pipe
+ * indexed register
+ */
+#define IPA_REG_SAVE_CFG_ENTRY_PIPE_ENDP(reg_name, var_name)	\
+	({ GEN_1xVECTOR_REG_OFST(reg_name, 0), \
+		(u32 *)&ipa_reg_save.ipa.pipes[0].endp.var_name }, \
+	{ GEN_1xVECTOR_REG_OFST(reg_name, 1), \
+		(u32 *)&ipa_reg_save.ipa.pipes[1].endp.var_name }, \
+	{ GEN_1xVECTOR_REG_OFST(reg_name, 2), \
+		(u32 *)&ipa_reg_save.ipa.pipes[2].endp.var_name }, \
+	{ GEN_1xVECTOR_REG_OFST(reg_name, 3), \
+		(u32 *)&ipa_reg_save.ipa.pipes[3].endp.var_name }, \
+	{ GEN_1xVECTOR_REG_OFST(reg_name, 4), \
+		(u32 *)&ipa_reg_save.ipa.pipes[4].endp.var_name }, \
+	{ GEN_1xVECTOR_REG_OFST(reg_name, 5), \
+		(u32 *)&ipa_reg_save.ipa.pipes[5].endp.var_name }, \
+	{ GEN_1xVECTOR_REG_OFST(reg_name, 6), \
+		(u32 *)&ipa_reg_save.ipa.pipes[6].endp.var_name }, \
+	{ GEN_1xVECTOR_REG_OFST(reg_name, 7), \
+		(u32 *)&ipa_reg_save.ipa.pipes[7].endp.var_name }, \
+	{ GEN_1xVECTOR_REG_OFST(reg_name, 8), \
+		(u32 *)&ipa_reg_save.ipa.pipes[8].endp.var_name }, \
+	{ GEN_1xVECTOR_REG_OFST(reg_name, 9), \
+		(u32 *)&ipa_reg_save.ipa.pipes[9].endp.var_name }, \
+	{ GEN_1xVECTOR_REG_OFST(reg_name, 10), \
+		(u32 *)&ipa_reg_save.ipa.pipes[10].endp.var_name }, \
+	{ GEN_1xVECTOR_REG_OFST(reg_name, 11), \
+		(u32 *)&ipa_reg_save.ipa.pipes[11].endp.var_name }, \
+	{ GEN_1xVECTOR_REG_OFST(reg_name, 12), \
+		(u32 *)&ipa_reg_save.ipa.pipes[12].endp.var_name }, \
+	{ GEN_1xVECTOR_REG_OFST(reg_name, 13), \
+		(u32 *)&ipa_reg_save.ipa.pipes[13].endp.var_name }, \
+	{ GEN_1xVECTOR_REG_OFST(reg_name, 14), \
+		(u32 *)&ipa_reg_save.ipa.pipes[14].endp.var_name }, \
+	{ GEN_1xVECTOR_REG_OFST(reg_name, 15), \
+		(u32 *)&ipa_reg_save.ipa.pipes[15].endp.var_name }, \
+	{ GEN_1xVECTOR_REG_OFST(reg_name, 16), \
+		(u32 *)&ipa_reg_save.ipa.pipes[16].endp.var_name }, \
+	{ GEN_1xVECTOR_REG_OFST(reg_name, 17), \
+		(u32 *)&ipa_reg_save.ipa.pipes[17].endp.var_name }, \
+	{ GEN_1xVECTOR_REG_OFST(reg_name, 18), \
+		(u32 *)&ipa_reg_save.ipa.pipes[18].endp.var_name }, \
+	{ GEN_1xVECTOR_REG_OFST(reg_name, 19), \
+		(u32 *)&ipa_reg_save.ipa.pipes[19].endp.var_name }, \
+	{ GEN_1xVECTOR_REG_OFST(reg_name, 20), \
+		(u32 *)&ipa_reg_save.ipa.pipes[20].endp.var_name }, \
+	{ GEN_1xVECTOR_REG_OFST(reg_name, 21), \
+		(u32 *)&ipa_reg_save.ipa.pipes[21].endp.var_name }, \
+	{ GEN_1xVECTOR_REG_OFST(reg_name, 22), \
+		(u32 *)&ipa_reg_save.ipa.pipes[22].endp.var_name }, \
+	{ GEN_1xVECTOR_REG_OFST(reg_name, 23), \
+		(u32 *)&ipa_reg_save.ipa.pipes[23].endp.var_name }, \
+	{ GEN_1xVECTOR_REG_OFST(reg_name, 24), \
+		(u32 *)&ipa_reg_save.ipa.pipes[24].endp.var_name }, \
+	{ GEN_1xVECTOR_REG_OFST(reg_name, 25), \
+		(u32 *)&ipa_reg_save.ipa.pipes[25].endp.var_name }, \
+	{ GEN_1xVECTOR_REG_OFST(reg_name, 26), \
+		(u32 *)&ipa_reg_save.ipa.pipes[26].endp.var_name }, \
+	{ GEN_1xVECTOR_REG_OFST(reg_name, 27), \
+		(u32 *)&ipa_reg_save.ipa.pipes[27].endp.var_name }, \
+	{ GEN_1xVECTOR_REG_OFST(reg_name, 28), \
+		(u32 *)&ipa_reg_save.ipa.pipes[28].endp.var_name }, \
+	{ GEN_1xVECTOR_REG_OFST(reg_name, 29), \
+		(u32 *)&ipa_reg_save.ipa.pipes[29].endp.var_name }, \
+	{ GEN_1xVECTOR_REG_OFST(reg_name, 30), \
+		(u32 *)&ipa_reg_save.ipa.pipes[30].endp.var_name })
+
+/*
+ * Macro to define a particular register cfg entry for the remaining
+ * pipe indexed register.  In Stingray case we don't have extra
+ * endpoints so it is intentially empty
+ */
+#define IPA_HW_REG_SAVE_CFG_ENTRY_PIPE_ENDP_EXTRA(REG_NAME, VAR_NAME)  \
+	{ 0, 0 }
+
+/*
+ * Macro to set the active flag for all active pipe indexed register
+ * In Stingray case we don't have extra endpoints so it is intentially
+ * empty
+ */
+#define IPA_HW_REG_SAVE_CFG_ENTRY_PIPE_ENDP_EXTRA_ACTIVE()  \
+	do { \
+	} while (0)
+
+#endif /* #if !defined(_IPA_HW_COMMON_EX_H_) */

+ 10895 - 0
ipa/ipa_v3/dump/ipa4.5/ipa_hwio.h

@@ -0,0 +1,10895 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2019, The Linux Foundation. All rights reserved.
+ */
+#if !defined(_IPA_HWIO_H_)
+#define _IPA_HWIO_H_
+/*
+ *
+ * HWIO register definitions to follow:
+ *
+ */
+#define IPA_GSI_TOP_GSI_REG_BASE (IPA_0_IPA_WRAPPER_BASE + 0x00004000)
+#define IPA_GSI_TOP_GSI_REG_BASE_PHYS (IPA_0_IPA_WRAPPER_BASE_PHYS + \
+				       0x00004000)
+#define IPA_GSI_TOP_GSI_REG_BASE_OFFS 0x00004000
+#define HWIO_IPA_GSI_TOP_GSI_CFG_ADDR (IPA_GSI_TOP_GSI_REG_BASE + \
+				       0x00000000)
+#define HWIO_IPA_GSI_TOP_GSI_CFG_PHYS (IPA_GSI_TOP_GSI_REG_BASE_PHYS + \
+				       0x00000000)
+#define HWIO_IPA_GSI_TOP_GSI_CFG_OFFS (IPA_GSI_TOP_GSI_REG_BASE_OFFS + \
+				       0x00000000)
+#define HWIO_IPA_GSI_TOP_GSI_CFG_RMSK 0xf3f
+#define HWIO_IPA_GSI_TOP_GSI_CFG_ATTR 0x3
+#define HWIO_IPA_GSI_TOP_GSI_CFG_IN in_dword_masked( \
+		HWIO_IPA_GSI_TOP_GSI_CFG_ADDR, \
+		HWIO_IPA_GSI_TOP_GSI_CFG_RMSK)
+#define HWIO_IPA_GSI_TOP_GSI_CFG_INM(m) in_dword_masked( \
+		HWIO_IPA_GSI_TOP_GSI_CFG_ADDR, \
+		m)
+#define HWIO_IPA_GSI_TOP_GSI_CFG_OUT(v) out_dword( \
+		HWIO_IPA_GSI_TOP_GSI_CFG_ADDR, \
+		v)
+#define HWIO_IPA_GSI_TOP_GSI_CFG_OUTM(m, v) out_dword_masked_ns( \
+		HWIO_IPA_GSI_TOP_GSI_CFG_ADDR, \
+		m, \
+		v, \
+		HWIO_IPA_GSI_TOP_GSI_CFG_IN)
+#define HWIO_IPA_GSI_TOP_GSI_CFG_SLEEP_CLK_DIV_BMSK 0xf00
+#define HWIO_IPA_GSI_TOP_GSI_CFG_SLEEP_CLK_DIV_SHFT 0x8
+#define HWIO_IPA_GSI_TOP_GSI_CFG_BP_MTRIX_DISABLE_BMSK 0x20
+#define HWIO_IPA_GSI_TOP_GSI_CFG_BP_MTRIX_DISABLE_SHFT 0x5
+#define HWIO_IPA_GSI_TOP_GSI_CFG_GSI_PWR_CLPS_BMSK 0x10
+#define HWIO_IPA_GSI_TOP_GSI_CFG_GSI_PWR_CLPS_SHFT 0x4
+#define HWIO_IPA_GSI_TOP_GSI_CFG_UC_IS_MCS_BMSK 0x8
+#define HWIO_IPA_GSI_TOP_GSI_CFG_UC_IS_MCS_SHFT 0x3
+#define HWIO_IPA_GSI_TOP_GSI_CFG_DOUBLE_MCS_CLK_FREQ_BMSK 0x4
+#define HWIO_IPA_GSI_TOP_GSI_CFG_DOUBLE_MCS_CLK_FREQ_SHFT 0x2
+#define HWIO_IPA_GSI_TOP_GSI_CFG_MCS_ENABLE_BMSK 0x2
+#define HWIO_IPA_GSI_TOP_GSI_CFG_MCS_ENABLE_SHFT 0x1
+#define HWIO_IPA_GSI_TOP_GSI_CFG_GSI_ENABLE_BMSK 0x1
+#define HWIO_IPA_GSI_TOP_GSI_CFG_GSI_ENABLE_SHFT 0x0
+#define HWIO_IPA_GSI_TOP_GSI_MANAGER_MCS_CODE_VER_ADDR ( \
+		IPA_GSI_TOP_GSI_REG_BASE + 0x00000008)
+#define HWIO_IPA_GSI_TOP_GSI_MANAGER_MCS_CODE_VER_PHYS ( \
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x00000008)
+#define HWIO_IPA_GSI_TOP_GSI_MANAGER_MCS_CODE_VER_OFFS ( \
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x00000008)
+#define HWIO_IPA_GSI_TOP_GSI_ZEROS_ADDR (IPA_GSI_TOP_GSI_REG_BASE + \
+					 0x00000010)
+#define HWIO_IPA_GSI_TOP_GSI_ZEROS_PHYS (IPA_GSI_TOP_GSI_REG_BASE_PHYS + \
+					 0x00000010)
+#define HWIO_IPA_GSI_TOP_GSI_ZEROS_OFFS (IPA_GSI_TOP_GSI_REG_BASE_OFFS + \
+					 0x00000010)
+#define HWIO_IPA_GSI_TOP_GSI_PERIPH_BASE_ADDR_LSB_ADDR ( \
+		IPA_GSI_TOP_GSI_REG_BASE + 0x00000018)
+#define HWIO_IPA_GSI_TOP_GSI_PERIPH_BASE_ADDR_LSB_PHYS ( \
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x00000018)
+#define HWIO_IPA_GSI_TOP_GSI_PERIPH_BASE_ADDR_LSB_OFFS ( \
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x00000018)
+#define HWIO_IPA_GSI_TOP_GSI_PERIPH_BASE_ADDR_MSB_ADDR ( \
+		IPA_GSI_TOP_GSI_REG_BASE + 0x0000001c)
+#define HWIO_IPA_GSI_TOP_GSI_PERIPH_BASE_ADDR_MSB_PHYS ( \
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x0000001c)
+#define HWIO_IPA_GSI_TOP_GSI_PERIPH_BASE_ADDR_MSB_OFFS ( \
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x0000001c)
+#define HWIO_IPA_GSI_TOP_GSI_PERIPH_PENDING_ADDR (IPA_GSI_TOP_GSI_REG_BASE \
+						  + 0x00000020)
+#define HWIO_IPA_GSI_TOP_GSI_PERIPH_PENDING_PHYS ( \
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x00000020)
+#define HWIO_IPA_GSI_TOP_GSI_PERIPH_PENDING_OFFS ( \
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x00000020)
+#define HWIO_IPA_GSI_TOP_GSI_MOQA_CFG_ADDR (IPA_GSI_TOP_GSI_REG_BASE + \
+					    0x00000030)
+#define HWIO_IPA_GSI_TOP_GSI_MOQA_CFG_PHYS (IPA_GSI_TOP_GSI_REG_BASE_PHYS \
+					    + 0x00000030)
+#define HWIO_IPA_GSI_TOP_GSI_MOQA_CFG_OFFS (IPA_GSI_TOP_GSI_REG_BASE_OFFS \
+					    + 0x00000030)
+#define HWIO_IPA_GSI_TOP_GSI_REE_CFG_ADDR (IPA_GSI_TOP_GSI_REG_BASE + \
+					   0x00000038)
+#define HWIO_IPA_GSI_TOP_GSI_REE_CFG_PHYS (IPA_GSI_TOP_GSI_REG_BASE_PHYS + \
+					   0x00000038)
+#define HWIO_IPA_GSI_TOP_GSI_REE_CFG_OFFS (IPA_GSI_TOP_GSI_REG_BASE_OFFS + \
+					   0x00000038)
+#define HWIO_IPA_GSI_TOP_GSI_REE_CFG_RMSK 0xff03
+#define HWIO_IPA_GSI_TOP_GSI_REE_CFG_ATTR 0x3
+#define HWIO_IPA_GSI_TOP_GSI_REE_CFG_IN in_dword_masked( \
+		HWIO_IPA_GSI_TOP_GSI_REE_CFG_ADDR, \
+		HWIO_IPA_GSI_TOP_GSI_REE_CFG_RMSK)
+#define HWIO_IPA_GSI_TOP_GSI_REE_CFG_INM(m) in_dword_masked( \
+		HWIO_IPA_GSI_TOP_GSI_REE_CFG_ADDR, \
+		m)
+#define HWIO_IPA_GSI_TOP_GSI_REE_CFG_OUT(v) out_dword( \
+		HWIO_IPA_GSI_TOP_GSI_REE_CFG_ADDR, \
+		v)
+#define HWIO_IPA_GSI_TOP_GSI_REE_CFG_OUTM(m, v) out_dword_masked_ns( \
+		HWIO_IPA_GSI_TOP_GSI_REE_CFG_ADDR, \
+		m, \
+		v, \
+		HWIO_IPA_GSI_TOP_GSI_REE_CFG_IN)
+#define HWIO_IPA_GSI_TOP_GSI_REE_CFG_MAX_BURST_SIZE_BMSK 0xff00
+#define HWIO_IPA_GSI_TOP_GSI_REE_CFG_MAX_BURST_SIZE_SHFT 0x8
+#define HWIO_IPA_GSI_TOP_GSI_REE_CFG_CHANNEL_EMPTY_INT_ENABLE_BMSK 0x2
+#define HWIO_IPA_GSI_TOP_GSI_REE_CFG_CHANNEL_EMPTY_INT_ENABLE_SHFT 0x1
+#define HWIO_IPA_GSI_TOP_GSI_REE_CFG_MOVE_TO_ESC_CLR_MODE_TRSH_BMSK 0x1
+#define HWIO_IPA_GSI_TOP_GSI_REE_CFG_MOVE_TO_ESC_CLR_MODE_TRSH_SHFT 0x0
+#define HWIO_IPA_GSI_TOP_GSI_CGC_CTRL_ADDR (IPA_GSI_TOP_GSI_REG_BASE + \
+					    0x00000060)
+#define HWIO_IPA_GSI_TOP_GSI_CGC_CTRL_PHYS (IPA_GSI_TOP_GSI_REG_BASE_PHYS \
+					    + 0x00000060)
+#define HWIO_IPA_GSI_TOP_GSI_CGC_CTRL_OFFS (IPA_GSI_TOP_GSI_REG_BASE_OFFS \
+					    + 0x00000060)
+#define HWIO_IPA_GSI_TOP_GSI_MSI_CACHEATTR_ADDR (IPA_GSI_TOP_GSI_REG_BASE \
+						 + 0x00000080)
+#define HWIO_IPA_GSI_TOP_GSI_MSI_CACHEATTR_PHYS ( \
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x00000080)
+#define HWIO_IPA_GSI_TOP_GSI_MSI_CACHEATTR_OFFS ( \
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x00000080)
+#define HWIO_IPA_GSI_TOP_GSI_EVENT_CACHEATTR_ADDR ( \
+		IPA_GSI_TOP_GSI_REG_BASE + 0x00000084)
+#define HWIO_IPA_GSI_TOP_GSI_EVENT_CACHEATTR_PHYS ( \
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x00000084)
+#define HWIO_IPA_GSI_TOP_GSI_EVENT_CACHEATTR_OFFS ( \
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x00000084)
+#define HWIO_IPA_GSI_TOP_GSI_DATA_CACHEATTR_ADDR (IPA_GSI_TOP_GSI_REG_BASE \
+						  + 0x00000088)
+#define HWIO_IPA_GSI_TOP_GSI_DATA_CACHEATTR_PHYS ( \
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x00000088)
+#define HWIO_IPA_GSI_TOP_GSI_DATA_CACHEATTR_OFFS ( \
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x00000088)
+#define HWIO_IPA_GSI_TOP_GSI_TRE_CACHEATTR_ADDR (IPA_GSI_TOP_GSI_REG_BASE \
+						 + 0x00000090)
+#define HWIO_IPA_GSI_TOP_GSI_TRE_CACHEATTR_PHYS ( \
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x00000090)
+#define HWIO_IPA_GSI_TOP_GSI_TRE_CACHEATTR_OFFS ( \
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x00000090)
+#define HWIO_IPA_GSI_TOP_IC_DISABLE_CHNL_BCK_PRS_LSB_ADDR ( \
+		IPA_GSI_TOP_GSI_REG_BASE + 0x000000a0)
+#define HWIO_IPA_GSI_TOP_IC_DISABLE_CHNL_BCK_PRS_LSB_PHYS ( \
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x000000a0)
+#define HWIO_IPA_GSI_TOP_IC_DISABLE_CHNL_BCK_PRS_LSB_OFFS ( \
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x000000a0)
+#define HWIO_IPA_GSI_TOP_IC_DISABLE_CHNL_BCK_PRS_MSB_ADDR ( \
+		IPA_GSI_TOP_GSI_REG_BASE + 0x000000a4)
+#define HWIO_IPA_GSI_TOP_IC_DISABLE_CHNL_BCK_PRS_MSB_PHYS ( \
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x000000a4)
+#define HWIO_IPA_GSI_TOP_IC_DISABLE_CHNL_BCK_PRS_MSB_OFFS ( \
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x000000a4)
+#define HWIO_IPA_GSI_TOP_IC_GEN_EVNT_BCK_PRS_LSB_ADDR (	\
+		IPA_GSI_TOP_GSI_REG_BASE + 0x000000a8)
+#define HWIO_IPA_GSI_TOP_IC_GEN_EVNT_BCK_PRS_LSB_PHYS (	\
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x000000a8)
+#define HWIO_IPA_GSI_TOP_IC_GEN_EVNT_BCK_PRS_LSB_OFFS (	\
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x000000a8)
+#define HWIO_IPA_GSI_TOP_IC_GEN_EVNT_BCK_PRS_MSB_ADDR (	\
+		IPA_GSI_TOP_GSI_REG_BASE + 0x000000ac)
+#define HWIO_IPA_GSI_TOP_IC_GEN_EVNT_BCK_PRS_MSB_PHYS (	\
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x000000ac)
+#define HWIO_IPA_GSI_TOP_IC_GEN_EVNT_BCK_PRS_MSB_OFFS (	\
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x000000ac)
+#define HWIO_IPA_GSI_TOP_IC_GEN_INT_BCK_PRS_LSB_ADDR ( \
+		IPA_GSI_TOP_GSI_REG_BASE + 0x000000b0)
+#define HWIO_IPA_GSI_TOP_IC_GEN_INT_BCK_PRS_LSB_PHYS ( \
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x000000b0)
+#define HWIO_IPA_GSI_TOP_IC_GEN_INT_BCK_PRS_LSB_OFFS ( \
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x000000b0)
+#define HWIO_IPA_GSI_TOP_IC_GEN_INT_BCK_PRS_MSB_ADDR ( \
+		IPA_GSI_TOP_GSI_REG_BASE + 0x000000b4)
+#define HWIO_IPA_GSI_TOP_IC_GEN_INT_BCK_PRS_MSB_PHYS ( \
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x000000b4)
+#define HWIO_IPA_GSI_TOP_IC_GEN_INT_BCK_PRS_MSB_OFFS ( \
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x000000b4)
+#define HWIO_IPA_GSI_TOP_IC_STOP_INT_MOD_BCK_PRS_LSB_ADDR ( \
+		IPA_GSI_TOP_GSI_REG_BASE + 0x000000b8)
+#define HWIO_IPA_GSI_TOP_IC_STOP_INT_MOD_BCK_PRS_LSB_PHYS ( \
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x000000b8)
+#define HWIO_IPA_GSI_TOP_IC_STOP_INT_MOD_BCK_PRS_LSB_OFFS ( \
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x000000b8)
+#define HWIO_IPA_GSI_TOP_IC_STOP_INT_MOD_BCK_PRS_MSB_ADDR ( \
+		IPA_GSI_TOP_GSI_REG_BASE + 0x000000bc)
+#define HWIO_IPA_GSI_TOP_IC_STOP_INT_MOD_BCK_PRS_MSB_PHYS ( \
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x000000bc)
+#define HWIO_IPA_GSI_TOP_IC_STOP_INT_MOD_BCK_PRS_MSB_OFFS ( \
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x000000bc)
+#define HWIO_IPA_GSI_TOP_IC_PROCESS_DESC_BCK_PRS_LSB_ADDR ( \
+		IPA_GSI_TOP_GSI_REG_BASE + 0x000000c0)
+#define HWIO_IPA_GSI_TOP_IC_PROCESS_DESC_BCK_PRS_LSB_PHYS ( \
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x000000c0)
+#define HWIO_IPA_GSI_TOP_IC_PROCESS_DESC_BCK_PRS_LSB_OFFS ( \
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x000000c0)
+#define HWIO_IPA_GSI_TOP_IC_PROCESS_DESC_BCK_PRS_MSB_ADDR ( \
+		IPA_GSI_TOP_GSI_REG_BASE + 0x000000c4)
+#define HWIO_IPA_GSI_TOP_IC_PROCESS_DESC_BCK_PRS_MSB_PHYS ( \
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x000000c4)
+#define HWIO_IPA_GSI_TOP_IC_PROCESS_DESC_BCK_PRS_MSB_OFFS ( \
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x000000c4)
+#define HWIO_IPA_GSI_TOP_IC_TLV_STOP_BCK_PRS_LSB_ADDR (	\
+		IPA_GSI_TOP_GSI_REG_BASE + 0x000000c8)
+#define HWIO_IPA_GSI_TOP_IC_TLV_STOP_BCK_PRS_LSB_PHYS (	\
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x000000c8)
+#define HWIO_IPA_GSI_TOP_IC_TLV_STOP_BCK_PRS_LSB_OFFS (	\
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x000000c8)
+#define HWIO_IPA_GSI_TOP_IC_TLV_STOP_BCK_PRS_MSB_ADDR (	\
+		IPA_GSI_TOP_GSI_REG_BASE + 0x000000cc)
+#define HWIO_IPA_GSI_TOP_IC_TLV_STOP_BCK_PRS_MSB_PHYS (	\
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x000000cc)
+#define HWIO_IPA_GSI_TOP_IC_TLV_STOP_BCK_PRS_MSB_OFFS (	\
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x000000cc)
+#define HWIO_IPA_GSI_TOP_IC_TLV_RESET_BCK_PRS_LSB_ADDR ( \
+		IPA_GSI_TOP_GSI_REG_BASE + 0x000000d0)
+#define HWIO_IPA_GSI_TOP_IC_TLV_RESET_BCK_PRS_LSB_PHYS ( \
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x000000d0)
+#define HWIO_IPA_GSI_TOP_IC_TLV_RESET_BCK_PRS_LSB_OFFS ( \
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x000000d0)
+#define HWIO_IPA_GSI_TOP_IC_TLV_RESET_BCK_PRS_MSB_ADDR ( \
+		IPA_GSI_TOP_GSI_REG_BASE + 0x000000d4)
+#define HWIO_IPA_GSI_TOP_IC_TLV_RESET_BCK_PRS_MSB_PHYS ( \
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x000000d4)
+#define HWIO_IPA_GSI_TOP_IC_TLV_RESET_BCK_PRS_MSB_OFFS ( \
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x000000d4)
+#define HWIO_IPA_GSI_TOP_IC_RGSTR_TIMER_BCK_PRS_LSB_ADDR ( \
+		IPA_GSI_TOP_GSI_REG_BASE + 0x000000d8)
+#define HWIO_IPA_GSI_TOP_IC_RGSTR_TIMER_BCK_PRS_LSB_PHYS ( \
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x000000d8)
+#define HWIO_IPA_GSI_TOP_IC_RGSTR_TIMER_BCK_PRS_LSB_OFFS ( \
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x000000d8)
+#define HWIO_IPA_GSI_TOP_IC_RGSTR_TIMER_BCK_PRS_MSB_ADDR ( \
+		IPA_GSI_TOP_GSI_REG_BASE + 0x000000dc)
+#define HWIO_IPA_GSI_TOP_IC_RGSTR_TIMER_BCK_PRS_MSB_PHYS ( \
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x000000dc)
+#define HWIO_IPA_GSI_TOP_IC_RGSTR_TIMER_BCK_PRS_MSB_OFFS ( \
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x000000dc)
+#define HWIO_IPA_GSI_TOP_IC_READ_BCK_PRS_LSB_ADDR ( \
+		IPA_GSI_TOP_GSI_REG_BASE + 0x000000e0)
+#define HWIO_IPA_GSI_TOP_IC_READ_BCK_PRS_LSB_PHYS ( \
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x000000e0)
+#define HWIO_IPA_GSI_TOP_IC_READ_BCK_PRS_LSB_OFFS ( \
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x000000e0)
+#define HWIO_IPA_GSI_TOP_IC_READ_BCK_PRS_MSB_ADDR ( \
+		IPA_GSI_TOP_GSI_REG_BASE + 0x000000e4)
+#define HWIO_IPA_GSI_TOP_IC_READ_BCK_PRS_MSB_PHYS ( \
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x000000e4)
+#define HWIO_IPA_GSI_TOP_IC_READ_BCK_PRS_MSB_OFFS ( \
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x000000e4)
+#define HWIO_IPA_GSI_TOP_IC_WRITE_BCK_PRS_LSB_ADDR ( \
+		IPA_GSI_TOP_GSI_REG_BASE + 0x000000e8)
+#define HWIO_IPA_GSI_TOP_IC_WRITE_BCK_PRS_LSB_PHYS ( \
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x000000e8)
+#define HWIO_IPA_GSI_TOP_IC_WRITE_BCK_PRS_LSB_OFFS ( \
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x000000e8)
+#define HWIO_IPA_GSI_TOP_IC_WRITE_BCK_PRS_MSB_ADDR ( \
+		IPA_GSI_TOP_GSI_REG_BASE + 0x000000ec)
+#define HWIO_IPA_GSI_TOP_IC_WRITE_BCK_PRS_MSB_PHYS ( \
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x000000ec)
+#define HWIO_IPA_GSI_TOP_IC_WRITE_BCK_PRS_MSB_OFFS ( \
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x000000ec)
+#define HWIO_IPA_GSI_TOP_IC_UCONTROLLER_GPR_BCK_PRS_LSB_ADDR ( \
+		IPA_GSI_TOP_GSI_REG_BASE + 0x000000f0)
+#define HWIO_IPA_GSI_TOP_IC_UCONTROLLER_GPR_BCK_PRS_LSB_PHYS ( \
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x000000f0)
+#define HWIO_IPA_GSI_TOP_IC_UCONTROLLER_GPR_BCK_PRS_LSB_OFFS ( \
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x000000f0)
+#define HWIO_IPA_GSI_TOP_IC_UCONTROLLER_GPR_BCK_PRS_MSB_ADDR ( \
+		IPA_GSI_TOP_GSI_REG_BASE + 0x000000f4)
+#define HWIO_IPA_GSI_TOP_IC_UCONTROLLER_GPR_BCK_PRS_MSB_PHYS ( \
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x000000f4)
+#define HWIO_IPA_GSI_TOP_IC_UCONTROLLER_GPR_BCK_PRS_MSB_OFFS ( \
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x000000f4)
+#define HWIO_IPA_GSI_TOP_IC_INT_WEIGHT_REE_ADDR (IPA_GSI_TOP_GSI_REG_BASE \
+						 + 0x00000100)
+#define HWIO_IPA_GSI_TOP_IC_INT_WEIGHT_REE_PHYS ( \
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x00000100)
+#define HWIO_IPA_GSI_TOP_IC_INT_WEIGHT_REE_OFFS ( \
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x00000100)
+#define HWIO_IPA_GSI_TOP_IC_INT_WEIGHT_EVT_ENG_ADDR ( \
+		IPA_GSI_TOP_GSI_REG_BASE + 0x00000104)
+#define HWIO_IPA_GSI_TOP_IC_INT_WEIGHT_EVT_ENG_PHYS ( \
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x00000104)
+#define HWIO_IPA_GSI_TOP_IC_INT_WEIGHT_EVT_ENG_OFFS ( \
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x00000104)
+#define HWIO_IPA_GSI_TOP_IC_INT_WEIGHT_INT_ENG_ADDR ( \
+		IPA_GSI_TOP_GSI_REG_BASE + 0x00000108)
+#define HWIO_IPA_GSI_TOP_IC_INT_WEIGHT_INT_ENG_PHYS ( \
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x00000108)
+#define HWIO_IPA_GSI_TOP_IC_INT_WEIGHT_INT_ENG_OFFS ( \
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x00000108)
+#define HWIO_IPA_GSI_TOP_IC_INT_WEIGHT_CSR_ADDR (IPA_GSI_TOP_GSI_REG_BASE \
+						 + 0x0000010c)
+#define HWIO_IPA_GSI_TOP_IC_INT_WEIGHT_CSR_PHYS ( \
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x0000010c)
+#define HWIO_IPA_GSI_TOP_IC_INT_WEIGHT_CSR_OFFS ( \
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x0000010c)
+#define HWIO_IPA_GSI_TOP_IC_INT_WEIGHT_TLV_ENG_ADDR ( \
+		IPA_GSI_TOP_GSI_REG_BASE + 0x00000110)
+#define HWIO_IPA_GSI_TOP_IC_INT_WEIGHT_TLV_ENG_PHYS ( \
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x00000110)
+#define HWIO_IPA_GSI_TOP_IC_INT_WEIGHT_TLV_ENG_OFFS ( \
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x00000110)
+#define HWIO_IPA_GSI_TOP_IC_INT_WEIGHT_TIMER_ENG_ADDR (	\
+		IPA_GSI_TOP_GSI_REG_BASE + 0x00000114)
+#define HWIO_IPA_GSI_TOP_IC_INT_WEIGHT_TIMER_ENG_PHYS (	\
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x00000114)
+#define HWIO_IPA_GSI_TOP_IC_INT_WEIGHT_TIMER_ENG_OFFS (	\
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x00000114)
+#define HWIO_IPA_GSI_TOP_IC_INT_WEIGHT_DB_ENG_ADDR ( \
+		IPA_GSI_TOP_GSI_REG_BASE + 0x00000118)
+#define HWIO_IPA_GSI_TOP_IC_INT_WEIGHT_DB_ENG_PHYS ( \
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x00000118)
+#define HWIO_IPA_GSI_TOP_IC_INT_WEIGHT_DB_ENG_OFFS ( \
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x00000118)
+#define HWIO_IPA_GSI_TOP_IC_INT_WEIGHT_RD_WR_ENG_ADDR (	\
+		IPA_GSI_TOP_GSI_REG_BASE + 0x0000011c)
+#define HWIO_IPA_GSI_TOP_IC_INT_WEIGHT_RD_WR_ENG_PHYS (	\
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x0000011c)
+#define HWIO_IPA_GSI_TOP_IC_INT_WEIGHT_RD_WR_ENG_OFFS (	\
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x0000011c)
+#define HWIO_IPA_GSI_TOP_IC_INT_WEIGHT_UCONTROLLER_ENG_ADDR ( \
+		IPA_GSI_TOP_GSI_REG_BASE + 0x00000120)
+#define HWIO_IPA_GSI_TOP_IC_INT_WEIGHT_UCONTROLLER_ENG_PHYS ( \
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x00000120)
+#define HWIO_IPA_GSI_TOP_IC_INT_WEIGHT_UCONTROLLER_ENG_OFFS ( \
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x00000120)
+#define HWIO_IPA_GSI_TOP_IC_INT_WEIGHT_SDMA_ADDR (IPA_GSI_TOP_GSI_REG_BASE \
+						  + 0x00000124)
+#define HWIO_IPA_GSI_TOP_IC_INT_WEIGHT_SDMA_PHYS ( \
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x00000124)
+#define HWIO_IPA_GSI_TOP_IC_INT_WEIGHT_SDMA_OFFS ( \
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x00000124)
+#define HWIO_IPA_GSI_TOP_GSI_SDMA_CFG_ADDR (IPA_GSI_TOP_GSI_REG_BASE + \
+					    0x0000003c)
+#define HWIO_IPA_GSI_TOP_GSI_SDMA_CFG_PHYS (IPA_GSI_TOP_GSI_REG_BASE_PHYS \
+					    + 0x0000003c)
+#define HWIO_IPA_GSI_TOP_GSI_SDMA_CFG_OFFS (IPA_GSI_TOP_GSI_REG_BASE_OFFS \
+					    + 0x0000003c)
+#define HWIO_IPA_GSI_TOP_GSI_SDMA_CACHEATTR_ADDR (IPA_GSI_TOP_GSI_REG_BASE \
+						  + 0x00000094)
+#define HWIO_IPA_GSI_TOP_GSI_SDMA_CACHEATTR_PHYS ( \
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x00000094)
+#define HWIO_IPA_GSI_TOP_GSI_SDMA_CACHEATTR_OFFS ( \
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x00000094)
+#define HWIO_IPA_GSI_TOP_GSI_SDMA_SG_IOVEC_LSB_n_ADDR(n) ( \
+		IPA_GSI_TOP_GSI_REG_BASE + 0x00000140 + 0x8 * (n))
+#define HWIO_IPA_GSI_TOP_GSI_SDMA_SG_IOVEC_LSB_n_PHYS(n) ( \
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x00000140 + 0x8 * (n))
+#define HWIO_IPA_GSI_TOP_GSI_SDMA_SG_IOVEC_LSB_n_OFFS(n) ( \
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x00000140 + 0x8 * (n))
+#define HWIO_IPA_GSI_TOP_GSI_SDMA_SG_IOVEC_MSB_n_ADDR(n) ( \
+		IPA_GSI_TOP_GSI_REG_BASE + 0x00000144 + 0x8 * (n))
+#define HWIO_IPA_GSI_TOP_GSI_SDMA_SG_IOVEC_MSB_n_PHYS(n) ( \
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x00000144 + 0x8 * (n))
+#define HWIO_IPA_GSI_TOP_GSI_SDMA_SG_IOVEC_MSB_n_OFFS(n) ( \
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x00000144 + 0x8 * (n))
+#define HWIO_IPA_GSI_TOP_GSI_MANAGER_EE_QOS_n_ADDR(n) (	\
+		IPA_GSI_TOP_GSI_REG_BASE + 0x00000300 + 0x4 * (n))
+#define HWIO_IPA_GSI_TOP_GSI_MANAGER_EE_QOS_n_PHYS(n) (	\
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x00000300 + 0x4 * (n))
+#define HWIO_IPA_GSI_TOP_GSI_MANAGER_EE_QOS_n_OFFS(n) (	\
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x00000300 + 0x4 * (n))
+#define HWIO_IPA_GSI_TOP_GSI_SHRAM_PTR_CH_CNTXT_BASE_ADDR_ADDR ( \
+		IPA_GSI_TOP_GSI_REG_BASE + 0x00000200)
+#define HWIO_IPA_GSI_TOP_GSI_SHRAM_PTR_CH_CNTXT_BASE_ADDR_PHYS ( \
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x00000200)
+#define HWIO_IPA_GSI_TOP_GSI_SHRAM_PTR_CH_CNTXT_BASE_ADDR_OFFS ( \
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x00000200)
+#define HWIO_IPA_GSI_TOP_GSI_SHRAM_PTR_CH_CNTXT_BASE_ADDR_RMSK 0xffff
+#define HWIO_IPA_GSI_TOP_GSI_SHRAM_PTR_CH_CNTXT_BASE_ADDR_ATTR 0x3
+#define HWIO_IPA_GSI_TOP_GSI_SHRAM_PTR_CH_CNTXT_BASE_ADDR_IN \
+	in_dword_masked( \
+		HWIO_IPA_GSI_TOP_GSI_SHRAM_PTR_CH_CNTXT_BASE_ADDR_ADDR,	\
+		HWIO_IPA_GSI_TOP_GSI_SHRAM_PTR_CH_CNTXT_BASE_ADDR_RMSK)
+#define HWIO_IPA_GSI_TOP_GSI_SHRAM_PTR_CH_CNTXT_BASE_ADDR_INM(m) \
+	in_dword_masked( \
+		HWIO_IPA_GSI_TOP_GSI_SHRAM_PTR_CH_CNTXT_BASE_ADDR_ADDR,	\
+		m)
+#define HWIO_IPA_GSI_TOP_GSI_SHRAM_PTR_CH_CNTXT_BASE_ADDR_OUT(v) out_dword( \
+		HWIO_IPA_GSI_TOP_GSI_SHRAM_PTR_CH_CNTXT_BASE_ADDR_ADDR,	\
+		v)
+#define HWIO_IPA_GSI_TOP_GSI_SHRAM_PTR_CH_CNTXT_BASE_ADDR_OUTM(m, \
+							       v) \
+	out_dword_masked_ns( \
+		HWIO_IPA_GSI_TOP_GSI_SHRAM_PTR_CH_CNTXT_BASE_ADDR_ADDR,	\
+		m, \
+		v, \
+		HWIO_IPA_GSI_TOP_GSI_SHRAM_PTR_CH_CNTXT_BASE_ADDR_IN)
+#define HWIO_IPA_GSI_TOP_GSI_SHRAM_PTR_CH_CNTXT_BASE_ADDR_SHRAM_PTR_BMSK \
+	0xffff
+#define HWIO_IPA_GSI_TOP_GSI_SHRAM_PTR_CH_CNTXT_BASE_ADDR_SHRAM_PTR_SHFT \
+	0x0
+#define HWIO_IPA_GSI_TOP_GSI_SHRAM_PTR_EV_CNTXT_BASE_ADDR_ADDR ( \
+		IPA_GSI_TOP_GSI_REG_BASE + 0x00000204)
+#define HWIO_IPA_GSI_TOP_GSI_SHRAM_PTR_EV_CNTXT_BASE_ADDR_PHYS ( \
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x00000204)
+#define HWIO_IPA_GSI_TOP_GSI_SHRAM_PTR_EV_CNTXT_BASE_ADDR_OFFS ( \
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x00000204)
+#define HWIO_IPA_GSI_TOP_GSI_SHRAM_PTR_EV_CNTXT_BASE_ADDR_RMSK 0xffff
+#define HWIO_IPA_GSI_TOP_GSI_SHRAM_PTR_EV_CNTXT_BASE_ADDR_ATTR 0x3
+#define HWIO_IPA_GSI_TOP_GSI_SHRAM_PTR_EV_CNTXT_BASE_ADDR_IN \
+	in_dword_masked( \
+		HWIO_IPA_GSI_TOP_GSI_SHRAM_PTR_EV_CNTXT_BASE_ADDR_ADDR,	\
+		HWIO_IPA_GSI_TOP_GSI_SHRAM_PTR_EV_CNTXT_BASE_ADDR_RMSK)
+#define HWIO_IPA_GSI_TOP_GSI_SHRAM_PTR_EV_CNTXT_BASE_ADDR_INM(m) \
+	in_dword_masked( \
+		HWIO_IPA_GSI_TOP_GSI_SHRAM_PTR_EV_CNTXT_BASE_ADDR_ADDR,	\
+		m)
+#define HWIO_IPA_GSI_TOP_GSI_SHRAM_PTR_EV_CNTXT_BASE_ADDR_OUT(v) out_dword( \
+		HWIO_IPA_GSI_TOP_GSI_SHRAM_PTR_EV_CNTXT_BASE_ADDR_ADDR,	\
+		v)
+#define HWIO_IPA_GSI_TOP_GSI_SHRAM_PTR_EV_CNTXT_BASE_ADDR_OUTM(m, \
+							       v) \
+	out_dword_masked_ns( \
+		HWIO_IPA_GSI_TOP_GSI_SHRAM_PTR_EV_CNTXT_BASE_ADDR_ADDR,	\
+		m, \
+		v, \
+		HWIO_IPA_GSI_TOP_GSI_SHRAM_PTR_EV_CNTXT_BASE_ADDR_IN)
+#define HWIO_IPA_GSI_TOP_GSI_SHRAM_PTR_EV_CNTXT_BASE_ADDR_SHRAM_PTR_BMSK \
+	0xffff
+#define HWIO_IPA_GSI_TOP_GSI_SHRAM_PTR_EV_CNTXT_BASE_ADDR_SHRAM_PTR_SHFT \
+	0x0
+#define HWIO_IPA_GSI_TOP_GSI_SHRAM_PTR_RE_STORAGE_BASE_ADDR_ADDR ( \
+		IPA_GSI_TOP_GSI_REG_BASE + 0x00000208)
+#define HWIO_IPA_GSI_TOP_GSI_SHRAM_PTR_RE_STORAGE_BASE_ADDR_PHYS ( \
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x00000208)
+#define HWIO_IPA_GSI_TOP_GSI_SHRAM_PTR_RE_STORAGE_BASE_ADDR_OFFS ( \
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x00000208)
+#define HWIO_IPA_GSI_TOP_GSI_SHRAM_PTR_RE_STORAGE_BASE_ADDR_RMSK 0xffff
+#define HWIO_IPA_GSI_TOP_GSI_SHRAM_PTR_RE_STORAGE_BASE_ADDR_ATTR 0x3
+#define HWIO_IPA_GSI_TOP_GSI_SHRAM_PTR_RE_STORAGE_BASE_ADDR_IN \
+	in_dword_masked( \
+		HWIO_IPA_GSI_TOP_GSI_SHRAM_PTR_RE_STORAGE_BASE_ADDR_ADDR, \
+		HWIO_IPA_GSI_TOP_GSI_SHRAM_PTR_RE_STORAGE_BASE_ADDR_RMSK)
+#define HWIO_IPA_GSI_TOP_GSI_SHRAM_PTR_RE_STORAGE_BASE_ADDR_INM(m) \
+	in_dword_masked( \
+		HWIO_IPA_GSI_TOP_GSI_SHRAM_PTR_RE_STORAGE_BASE_ADDR_ADDR, \
+		m)
+#define HWIO_IPA_GSI_TOP_GSI_SHRAM_PTR_RE_STORAGE_BASE_ADDR_OUT(v) \
+	out_dword(HWIO_IPA_GSI_TOP_GSI_SHRAM_PTR_RE_STORAGE_BASE_ADDR_ADDR, \
+		  v)
+#define HWIO_IPA_GSI_TOP_GSI_SHRAM_PTR_RE_STORAGE_BASE_ADDR_OUTM(m, \
+								 v) \
+	out_dword_masked_ns( \
+		HWIO_IPA_GSI_TOP_GSI_SHRAM_PTR_RE_STORAGE_BASE_ADDR_ADDR, \
+		m, \
+		v, \
+		HWIO_IPA_GSI_TOP_GSI_SHRAM_PTR_RE_STORAGE_BASE_ADDR_IN)
+#define HWIO_IPA_GSI_TOP_GSI_SHRAM_PTR_RE_STORAGE_BASE_ADDR_SHRAM_PTR_BMSK \
+	0xffff
+#define HWIO_IPA_GSI_TOP_GSI_SHRAM_PTR_RE_STORAGE_BASE_ADDR_SHRAM_PTR_SHFT \
+	0x0
+#define HWIO_IPA_GSI_TOP_GSI_SHRAM_PTR_RE_ESC_BUF_BASE_ADDR_ADDR ( \
+		IPA_GSI_TOP_GSI_REG_BASE + 0x0000020c)
+#define HWIO_IPA_GSI_TOP_GSI_SHRAM_PTR_RE_ESC_BUF_BASE_ADDR_PHYS ( \
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x0000020c)
+#define HWIO_IPA_GSI_TOP_GSI_SHRAM_PTR_RE_ESC_BUF_BASE_ADDR_OFFS ( \
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x0000020c)
+#define HWIO_IPA_GSI_TOP_GSI_SHRAM_PTR_RE_ESC_BUF_BASE_ADDR_RMSK 0xffff
+#define HWIO_IPA_GSI_TOP_GSI_SHRAM_PTR_RE_ESC_BUF_BASE_ADDR_ATTR 0x3
+#define HWIO_IPA_GSI_TOP_GSI_SHRAM_PTR_RE_ESC_BUF_BASE_ADDR_IN \
+	in_dword_masked( \
+		HWIO_IPA_GSI_TOP_GSI_SHRAM_PTR_RE_ESC_BUF_BASE_ADDR_ADDR, \
+		HWIO_IPA_GSI_TOP_GSI_SHRAM_PTR_RE_ESC_BUF_BASE_ADDR_RMSK)
+#define HWIO_IPA_GSI_TOP_GSI_SHRAM_PTR_RE_ESC_BUF_BASE_ADDR_INM(m) \
+	in_dword_masked( \
+		HWIO_IPA_GSI_TOP_GSI_SHRAM_PTR_RE_ESC_BUF_BASE_ADDR_ADDR, \
+		m)
+#define HWIO_IPA_GSI_TOP_GSI_SHRAM_PTR_RE_ESC_BUF_BASE_ADDR_OUT(v) \
+	out_dword(HWIO_IPA_GSI_TOP_GSI_SHRAM_PTR_RE_ESC_BUF_BASE_ADDR_ADDR, \
+		  v)
+#define HWIO_IPA_GSI_TOP_GSI_SHRAM_PTR_RE_ESC_BUF_BASE_ADDR_OUTM(m, \
+								 v) \
+	out_dword_masked_ns( \
+		HWIO_IPA_GSI_TOP_GSI_SHRAM_PTR_RE_ESC_BUF_BASE_ADDR_ADDR, \
+		m, \
+		v, \
+		HWIO_IPA_GSI_TOP_GSI_SHRAM_PTR_RE_ESC_BUF_BASE_ADDR_IN)
+#define HWIO_IPA_GSI_TOP_GSI_SHRAM_PTR_RE_ESC_BUF_BASE_ADDR_SHRAM_PTR_BMSK \
+	0xffff
+#define HWIO_IPA_GSI_TOP_GSI_SHRAM_PTR_RE_ESC_BUF_BASE_ADDR_SHRAM_PTR_SHFT \
+	0x0
+#define HWIO_IPA_GSI_TOP_GSI_SHRAM_PTR_EE_SCRACH_BASE_ADDR_ADDR ( \
+		IPA_GSI_TOP_GSI_REG_BASE + 0x00000240)
+#define HWIO_IPA_GSI_TOP_GSI_SHRAM_PTR_EE_SCRACH_BASE_ADDR_PHYS ( \
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x00000240)
+#define HWIO_IPA_GSI_TOP_GSI_SHRAM_PTR_EE_SCRACH_BASE_ADDR_OFFS ( \
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x00000240)
+#define HWIO_IPA_GSI_TOP_GSI_SHRAM_PTR_EE_SCRACH_BASE_ADDR_RMSK 0xffff
+#define HWIO_IPA_GSI_TOP_GSI_SHRAM_PTR_EE_SCRACH_BASE_ADDR_ATTR 0x3
+#define HWIO_IPA_GSI_TOP_GSI_SHRAM_PTR_EE_SCRACH_BASE_ADDR_IN \
+	in_dword_masked( \
+		HWIO_IPA_GSI_TOP_GSI_SHRAM_PTR_EE_SCRACH_BASE_ADDR_ADDR, \
+		HWIO_IPA_GSI_TOP_GSI_SHRAM_PTR_EE_SCRACH_BASE_ADDR_RMSK)
+#define HWIO_IPA_GSI_TOP_GSI_SHRAM_PTR_EE_SCRACH_BASE_ADDR_INM(m) \
+	in_dword_masked( \
+		HWIO_IPA_GSI_TOP_GSI_SHRAM_PTR_EE_SCRACH_BASE_ADDR_ADDR, \
+		m)
+#define HWIO_IPA_GSI_TOP_GSI_SHRAM_PTR_EE_SCRACH_BASE_ADDR_OUT(v) \
+	out_dword(HWIO_IPA_GSI_TOP_GSI_SHRAM_PTR_EE_SCRACH_BASE_ADDR_ADDR, \
+		  v)
+#define HWIO_IPA_GSI_TOP_GSI_SHRAM_PTR_EE_SCRACH_BASE_ADDR_OUTM(m, \
+								v) \
+	out_dword_masked_ns( \
+		HWIO_IPA_GSI_TOP_GSI_SHRAM_PTR_EE_SCRACH_BASE_ADDR_ADDR, \
+		m, \
+		v, \
+		HWIO_IPA_GSI_TOP_GSI_SHRAM_PTR_EE_SCRACH_BASE_ADDR_IN)
+#define HWIO_IPA_GSI_TOP_GSI_SHRAM_PTR_EE_SCRACH_BASE_ADDR_SHRAM_PTR_BMSK \
+	0xffff
+#define HWIO_IPA_GSI_TOP_GSI_SHRAM_PTR_EE_SCRACH_BASE_ADDR_SHRAM_PTR_SHFT \
+	0x0
+#define HWIO_IPA_GSI_TOP_GSI_SHRAM_PTR_FUNC_STACK_BASE_ADDR_ADDR ( \
+		IPA_GSI_TOP_GSI_REG_BASE + 0x00000244)
+#define HWIO_IPA_GSI_TOP_GSI_SHRAM_PTR_FUNC_STACK_BASE_ADDR_PHYS ( \
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x00000244)
+#define HWIO_IPA_GSI_TOP_GSI_SHRAM_PTR_FUNC_STACK_BASE_ADDR_OFFS ( \
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x00000244)
+#define HWIO_IPA_GSI_TOP_GSI_SHRAM_PTR_FUNC_STACK_BASE_ADDR_RMSK 0xffff
+#define HWIO_IPA_GSI_TOP_GSI_SHRAM_PTR_FUNC_STACK_BASE_ADDR_ATTR 0x3
+#define HWIO_IPA_GSI_TOP_GSI_SHRAM_PTR_FUNC_STACK_BASE_ADDR_IN \
+	in_dword_masked( \
+		HWIO_IPA_GSI_TOP_GSI_SHRAM_PTR_FUNC_STACK_BASE_ADDR_ADDR, \
+		HWIO_IPA_GSI_TOP_GSI_SHRAM_PTR_FUNC_STACK_BASE_ADDR_RMSK)
+#define HWIO_IPA_GSI_TOP_GSI_SHRAM_PTR_FUNC_STACK_BASE_ADDR_INM(m) \
+	in_dword_masked( \
+		HWIO_IPA_GSI_TOP_GSI_SHRAM_PTR_FUNC_STACK_BASE_ADDR_ADDR, \
+		m)
+#define HWIO_IPA_GSI_TOP_GSI_SHRAM_PTR_FUNC_STACK_BASE_ADDR_OUT(v) \
+	out_dword(HWIO_IPA_GSI_TOP_GSI_SHRAM_PTR_FUNC_STACK_BASE_ADDR_ADDR, \
+		  v)
+#define HWIO_IPA_GSI_TOP_GSI_SHRAM_PTR_FUNC_STACK_BASE_ADDR_OUTM(m, \
+								 v) \
+	out_dword_masked_ns( \
+		HWIO_IPA_GSI_TOP_GSI_SHRAM_PTR_FUNC_STACK_BASE_ADDR_ADDR, \
+		m, \
+		v, \
+		HWIO_IPA_GSI_TOP_GSI_SHRAM_PTR_FUNC_STACK_BASE_ADDR_IN)
+#define HWIO_IPA_GSI_TOP_GSI_SHRAM_PTR_FUNC_STACK_BASE_ADDR_SHRAM_PTR_BMSK \
+	0xffff
+#define HWIO_IPA_GSI_TOP_GSI_SHRAM_PTR_FUNC_STACK_BASE_ADDR_SHRAM_PTR_SHFT \
+	0x0
+#define HWIO_IPA_GSI_TOP_GSI_SHRAM_PTR_MCS_SCRATCH_BASE_ADDR_ADDR ( \
+		IPA_GSI_TOP_GSI_REG_BASE + 0x00000248)
+#define HWIO_IPA_GSI_TOP_GSI_SHRAM_PTR_MCS_SCRATCH_BASE_ADDR_PHYS ( \
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x00000248)
+#define HWIO_IPA_GSI_TOP_GSI_SHRAM_PTR_MCS_SCRATCH_BASE_ADDR_OFFS ( \
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x00000248)
+#define HWIO_IPA_GSI_TOP_GSI_SHRAM_PTR_MCS_SCRATCH1_BASE_ADDR_ADDR ( \
+		IPA_GSI_TOP_GSI_REG_BASE + 0x0000024c)
+#define HWIO_IPA_GSI_TOP_GSI_SHRAM_PTR_MCS_SCRATCH1_BASE_ADDR_PHYS ( \
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x0000024c)
+#define HWIO_IPA_GSI_TOP_GSI_SHRAM_PTR_MCS_SCRATCH1_BASE_ADDR_OFFS ( \
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x0000024c)
+#define HWIO_IPA_GSI_TOP_GSI_SHRAM_PTR_MCS_SCRATCH2_BASE_ADDR_ADDR ( \
+		IPA_GSI_TOP_GSI_REG_BASE + 0x00000250)
+#define HWIO_IPA_GSI_TOP_GSI_SHRAM_PTR_MCS_SCRATCH2_BASE_ADDR_PHYS ( \
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x00000250)
+#define HWIO_IPA_GSI_TOP_GSI_SHRAM_PTR_MCS_SCRATCH2_BASE_ADDR_OFFS ( \
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x00000250)
+#define HWIO_IPA_GSI_TOP_GSI_SHRAM_PTR_CH_VP_TRANS_TABLE_BASE_ADDR_ADDR ( \
+		IPA_GSI_TOP_GSI_REG_BASE + 0x00000254)
+#define HWIO_IPA_GSI_TOP_GSI_SHRAM_PTR_CH_VP_TRANS_TABLE_BASE_ADDR_PHYS ( \
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x00000254)
+#define HWIO_IPA_GSI_TOP_GSI_SHRAM_PTR_CH_VP_TRANS_TABLE_BASE_ADDR_OFFS ( \
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x00000254)
+#define HWIO_IPA_GSI_TOP_GSI_SHRAM_PTR_EV_VP_TRANS_TABLE_BASE_ADDR_ADDR ( \
+		IPA_GSI_TOP_GSI_REG_BASE + 0x00000258)
+#define HWIO_IPA_GSI_TOP_GSI_SHRAM_PTR_EV_VP_TRANS_TABLE_BASE_ADDR_PHYS ( \
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x00000258)
+#define HWIO_IPA_GSI_TOP_GSI_SHRAM_PTR_EV_VP_TRANS_TABLE_BASE_ADDR_OFFS ( \
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x00000258)
+#define HWIO_IPA_GSI_TOP_GSI_SHRAM_PTR_USER_INFO_DATA_BASE_ADDR_ADDR ( \
+		IPA_GSI_TOP_GSI_REG_BASE + 0x0000025c)
+#define HWIO_IPA_GSI_TOP_GSI_SHRAM_PTR_USER_INFO_DATA_BASE_ADDR_PHYS ( \
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x0000025c)
+#define HWIO_IPA_GSI_TOP_GSI_SHRAM_PTR_USER_INFO_DATA_BASE_ADDR_OFFS ( \
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x0000025c)
+#define HWIO_IPA_GSI_TOP_GSI_SHRAM_PTR_EE_CMD_FIFO_BASE_ADDR_ADDR ( \
+		IPA_GSI_TOP_GSI_REG_BASE + 0x00000260)
+#define HWIO_IPA_GSI_TOP_GSI_SHRAM_PTR_EE_CMD_FIFO_BASE_ADDR_PHYS ( \
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x00000260)
+#define HWIO_IPA_GSI_TOP_GSI_SHRAM_PTR_EE_CMD_FIFO_BASE_ADDR_OFFS ( \
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x00000260)
+#define HWIO_IPA_GSI_TOP_GSI_SHRAM_PTR_CH_CMD_FIFO_BASE_ADDR_ADDR ( \
+		IPA_GSI_TOP_GSI_REG_BASE + 0x00000264)
+#define HWIO_IPA_GSI_TOP_GSI_SHRAM_PTR_CH_CMD_FIFO_BASE_ADDR_PHYS ( \
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x00000264)
+#define HWIO_IPA_GSI_TOP_GSI_SHRAM_PTR_CH_CMD_FIFO_BASE_ADDR_OFFS ( \
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x00000264)
+#define HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_CH_CMD_ADDR ( \
+		IPA_GSI_TOP_GSI_REG_BASE + 0x00000400)
+#define HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_CH_CMD_PHYS ( \
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x00000400)
+#define HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_CH_CMD_OFFS ( \
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x00000400)
+#define HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_CH_CMD_RMSK 0xfff
+#define HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_CH_CMD_ATTR 0x3
+#define HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_CH_CMD_IN in_dword_masked( \
+		HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_CH_CMD_ADDR, \
+		HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_CH_CMD_RMSK)
+#define HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_CH_CMD_INM(m) in_dword_masked( \
+		HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_CH_CMD_ADDR, \
+		m)
+#define HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_CH_CMD_OUT(v) out_dword( \
+		HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_CH_CMD_ADDR, \
+		v)
+#define HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_CH_CMD_OUTM(m, \
+						  v) out_dword_masked_ns( \
+		HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_CH_CMD_ADDR, \
+		m, \
+		v, \
+		HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_CH_CMD_IN)
+#define HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_CH_CMD_IRAM_PTR_BMSK 0xfff
+#define HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_CH_CMD_IRAM_PTR_SHFT 0x0
+#define HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_EE_GENERIC_CMD_ADDR ( \
+		IPA_GSI_TOP_GSI_REG_BASE + 0x00000404)
+#define HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_EE_GENERIC_CMD_PHYS ( \
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x00000404)
+#define HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_EE_GENERIC_CMD_OFFS ( \
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x00000404)
+#define HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_EE_GENERIC_CMD_RMSK 0xfff
+#define HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_EE_GENERIC_CMD_ATTR 0x3
+#define HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_EE_GENERIC_CMD_IN in_dword_masked( \
+		HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_EE_GENERIC_CMD_ADDR, \
+		HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_EE_GENERIC_CMD_RMSK)
+#define HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_EE_GENERIC_CMD_INM(m) \
+	in_dword_masked(HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_EE_GENERIC_CMD_ADDR, \
+			m)
+#define HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_EE_GENERIC_CMD_OUT(v) out_dword( \
+		HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_EE_GENERIC_CMD_ADDR, \
+		v)
+#define HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_EE_GENERIC_CMD_OUTM(m, \
+							  v) \
+	out_dword_masked_ns( \
+		HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_EE_GENERIC_CMD_ADDR, \
+		m, \
+		v, \
+		HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_EE_GENERIC_CMD_IN)
+#define HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_EE_GENERIC_CMD_IRAM_PTR_BMSK 0xfff
+#define HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_EE_GENERIC_CMD_IRAM_PTR_SHFT 0x0
+#define HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_TLV_CH_NOT_FULL_ADDR ( \
+		IPA_GSI_TOP_GSI_REG_BASE + 0x00000408)
+#define HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_TLV_CH_NOT_FULL_PHYS ( \
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x00000408)
+#define HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_TLV_CH_NOT_FULL_OFFS ( \
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x00000408)
+#define HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_CH_DB_ADDR (IPA_GSI_TOP_GSI_REG_BASE \
+						  + 0x00000418)
+#define HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_CH_DB_PHYS ( \
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x00000418)
+#define HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_CH_DB_OFFS ( \
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x00000418)
+#define HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_CH_DB_RMSK 0xfff
+#define HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_CH_DB_ATTR 0x3
+#define HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_CH_DB_IN in_dword_masked(	\
+		HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_CH_DB_ADDR, \
+		HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_CH_DB_RMSK)
+#define HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_CH_DB_INM(m) in_dword_masked( \
+		HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_CH_DB_ADDR, \
+		m)
+#define HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_CH_DB_OUT(v) out_dword( \
+		HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_CH_DB_ADDR, \
+		v)
+#define HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_CH_DB_OUTM(m, v) out_dword_masked_ns( \
+		HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_CH_DB_ADDR, \
+		m, \
+		v, \
+		HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_CH_DB_IN)
+#define HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_CH_DB_IRAM_PTR_BMSK 0xfff
+#define HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_CH_DB_IRAM_PTR_SHFT 0x0
+#define HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_EV_DB_ADDR (IPA_GSI_TOP_GSI_REG_BASE \
+						  + 0x0000041c)
+#define HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_EV_DB_PHYS ( \
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x0000041c)
+#define HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_EV_DB_OFFS ( \
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x0000041c)
+#define HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_EV_DB_RMSK 0xfff
+#define HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_EV_DB_ATTR 0x3
+#define HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_EV_DB_IN in_dword_masked(	\
+		HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_EV_DB_ADDR, \
+		HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_EV_DB_RMSK)
+#define HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_EV_DB_INM(m) in_dword_masked( \
+		HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_EV_DB_ADDR, \
+		m)
+#define HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_EV_DB_OUT(v) out_dword( \
+		HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_EV_DB_ADDR, \
+		v)
+#define HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_EV_DB_OUTM(m, v) out_dword_masked_ns( \
+		HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_EV_DB_ADDR, \
+		m, \
+		v, \
+		HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_EV_DB_IN)
+#define HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_EV_DB_IRAM_PTR_BMSK 0xfff
+#define HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_EV_DB_IRAM_PTR_SHFT 0x0
+#define HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_NEW_RE_ADDR ( \
+		IPA_GSI_TOP_GSI_REG_BASE + 0x00000420)
+#define HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_NEW_RE_PHYS ( \
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x00000420)
+#define HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_NEW_RE_OFFS ( \
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x00000420)
+#define HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_NEW_RE_RMSK 0xfff
+#define HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_NEW_RE_ATTR 0x3
+#define HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_NEW_RE_IN in_dword_masked( \
+		HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_NEW_RE_ADDR, \
+		HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_NEW_RE_RMSK)
+#define HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_NEW_RE_INM(m) in_dword_masked( \
+		HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_NEW_RE_ADDR, \
+		m)
+#define HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_NEW_RE_OUT(v) out_dword( \
+		HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_NEW_RE_ADDR, \
+		v)
+#define HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_NEW_RE_OUTM(m, \
+						  v) out_dword_masked_ns( \
+		HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_NEW_RE_ADDR, \
+		m, \
+		v, \
+		HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_NEW_RE_IN)
+#define HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_NEW_RE_IRAM_PTR_BMSK 0xfff
+#define HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_NEW_RE_IRAM_PTR_SHFT 0x0
+#define HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_CH_DIS_COMP_ADDR ( \
+		IPA_GSI_TOP_GSI_REG_BASE + 0x00000424)
+#define HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_CH_DIS_COMP_PHYS ( \
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x00000424)
+#define HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_CH_DIS_COMP_OFFS ( \
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x00000424)
+#define HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_CH_DIS_COMP_RMSK 0xfff
+#define HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_CH_DIS_COMP_ATTR 0x3
+#define HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_CH_DIS_COMP_IN in_dword_masked( \
+		HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_CH_DIS_COMP_ADDR,	\
+		HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_CH_DIS_COMP_RMSK)
+#define HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_CH_DIS_COMP_INM(m) in_dword_masked( \
+		HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_CH_DIS_COMP_ADDR,	\
+		m)
+#define HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_CH_DIS_COMP_OUT(v) out_dword( \
+		HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_CH_DIS_COMP_ADDR,	\
+		v)
+#define HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_CH_DIS_COMP_OUTM(m, \
+						       v) \
+	out_dword_masked_ns( \
+		HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_CH_DIS_COMP_ADDR,	\
+		m, \
+		v, \
+		HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_CH_DIS_COMP_IN)
+#define HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_CH_DIS_COMP_IRAM_PTR_BMSK 0xfff
+#define HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_CH_DIS_COMP_IRAM_PTR_SHFT 0x0
+#define HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_CH_EMPTY_ADDR ( \
+		IPA_GSI_TOP_GSI_REG_BASE + 0x00000428)
+#define HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_CH_EMPTY_PHYS ( \
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x00000428)
+#define HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_CH_EMPTY_OFFS ( \
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x00000428)
+#define HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_CH_EMPTY_RMSK 0xfff
+#define HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_CH_EMPTY_ATTR 0x3
+#define HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_CH_EMPTY_IN in_dword_masked( \
+		HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_CH_EMPTY_ADDR, \
+		HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_CH_EMPTY_RMSK)
+#define HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_CH_EMPTY_INM(m) in_dword_masked( \
+		HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_CH_EMPTY_ADDR, \
+		m)
+#define HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_CH_EMPTY_OUT(v) out_dword( \
+		HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_CH_EMPTY_ADDR, \
+		v)
+#define HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_CH_EMPTY_OUTM(m, \
+						    v) out_dword_masked_ns( \
+		HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_CH_EMPTY_ADDR, \
+		m, \
+		v, \
+		HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_CH_EMPTY_IN)
+#define HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_CH_EMPTY_IRAM_PTR_BMSK 0xfff
+#define HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_CH_EMPTY_IRAM_PTR_SHFT 0x0
+#define HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_EVENT_GEN_COMP_ADDR ( \
+		IPA_GSI_TOP_GSI_REG_BASE + 0x0000042c)
+#define HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_EVENT_GEN_COMP_PHYS ( \
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x0000042c)
+#define HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_EVENT_GEN_COMP_OFFS ( \
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x0000042c)
+#define HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_EVENT_GEN_COMP_RMSK 0xfff
+#define HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_EVENT_GEN_COMP_ATTR 0x3
+#define HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_EVENT_GEN_COMP_IN in_dword_masked( \
+		HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_EVENT_GEN_COMP_ADDR, \
+		HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_EVENT_GEN_COMP_RMSK)
+#define HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_EVENT_GEN_COMP_INM(m) \
+	in_dword_masked(HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_EVENT_GEN_COMP_ADDR, \
+			m)
+#define HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_EVENT_GEN_COMP_OUT(v) out_dword( \
+		HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_EVENT_GEN_COMP_ADDR, \
+		v)
+#define HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_EVENT_GEN_COMP_OUTM(m, \
+							  v) \
+	out_dword_masked_ns( \
+		HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_EVENT_GEN_COMP_ADDR, \
+		m, \
+		v, \
+		HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_EVENT_GEN_COMP_IN)
+#define HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_EVENT_GEN_COMP_IRAM_PTR_BMSK 0xfff
+#define HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_EVENT_GEN_COMP_IRAM_PTR_SHFT 0x0
+#define HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_PERIPH_IF_TLV_IN_0_ADDR (	\
+		IPA_GSI_TOP_GSI_REG_BASE + 0x00000430)
+#define HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_PERIPH_IF_TLV_IN_0_PHYS (	\
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x00000430)
+#define HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_PERIPH_IF_TLV_IN_0_OFFS (	\
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x00000430)
+#define HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_PERIPH_IF_TLV_IN_2_ADDR (	\
+		IPA_GSI_TOP_GSI_REG_BASE + 0x00000434)
+#define HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_PERIPH_IF_TLV_IN_2_PHYS (	\
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x00000434)
+#define HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_PERIPH_IF_TLV_IN_2_OFFS (	\
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x00000434)
+#define HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_PERIPH_IF_TLV_IN_1_ADDR (	\
+		IPA_GSI_TOP_GSI_REG_BASE + 0x00000438)
+#define HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_PERIPH_IF_TLV_IN_1_PHYS (	\
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x00000438)
+#define HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_PERIPH_IF_TLV_IN_1_OFFS (	\
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x00000438)
+#define HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_TIMER_EXPIRED_ADDR ( \
+		IPA_GSI_TOP_GSI_REG_BASE + 0x0000043c)
+#define HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_TIMER_EXPIRED_PHYS ( \
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x0000043c)
+#define HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_TIMER_EXPIRED_OFFS ( \
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x0000043c)
+#define HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_TIMER_EXPIRED_RMSK 0xfff
+#define HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_TIMER_EXPIRED_ATTR 0x3
+#define HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_TIMER_EXPIRED_IN in_dword_masked(	\
+		HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_TIMER_EXPIRED_ADDR, \
+		HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_TIMER_EXPIRED_RMSK)
+#define HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_TIMER_EXPIRED_INM(m) in_dword_masked( \
+		HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_TIMER_EXPIRED_ADDR, \
+		m)
+#define HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_TIMER_EXPIRED_OUT(v) out_dword( \
+		HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_TIMER_EXPIRED_ADDR, \
+		v)
+#define HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_TIMER_EXPIRED_OUTM(m, \
+							 v) \
+	out_dword_masked_ns( \
+		HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_TIMER_EXPIRED_ADDR, \
+		m, \
+		v, \
+		HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_TIMER_EXPIRED_IN)
+#define HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_TIMER_EXPIRED_IRAM_PTR_BMSK 0xfff
+#define HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_TIMER_EXPIRED_IRAM_PTR_SHFT 0x0
+#define HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_WRITE_ENG_COMP_ADDR ( \
+		IPA_GSI_TOP_GSI_REG_BASE + 0x00000440)
+#define HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_WRITE_ENG_COMP_PHYS ( \
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x00000440)
+#define HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_WRITE_ENG_COMP_OFFS ( \
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x00000440)
+#define HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_WRITE_ENG_COMP_RMSK 0xfff
+#define HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_WRITE_ENG_COMP_ATTR 0x3
+#define HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_WRITE_ENG_COMP_IN in_dword_masked( \
+		HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_WRITE_ENG_COMP_ADDR, \
+		HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_WRITE_ENG_COMP_RMSK)
+#define HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_WRITE_ENG_COMP_INM(m) \
+	in_dword_masked(HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_WRITE_ENG_COMP_ADDR, \
+			m)
+#define HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_WRITE_ENG_COMP_OUT(v) out_dword( \
+		HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_WRITE_ENG_COMP_ADDR, \
+		v)
+#define HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_WRITE_ENG_COMP_OUTM(m, \
+							  v) \
+	out_dword_masked_ns( \
+		HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_WRITE_ENG_COMP_ADDR, \
+		m, \
+		v, \
+		HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_WRITE_ENG_COMP_IN)
+#define HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_WRITE_ENG_COMP_IRAM_PTR_BMSK 0xfff
+#define HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_WRITE_ENG_COMP_IRAM_PTR_SHFT 0x0
+#define HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_READ_ENG_COMP_ADDR ( \
+		IPA_GSI_TOP_GSI_REG_BASE + 0x00000444)
+#define HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_READ_ENG_COMP_PHYS ( \
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x00000444)
+#define HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_READ_ENG_COMP_OFFS ( \
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x00000444)
+#define HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_READ_ENG_COMP_RMSK 0xfff
+#define HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_READ_ENG_COMP_ATTR 0x3
+#define HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_READ_ENG_COMP_IN in_dword_masked(	\
+		HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_READ_ENG_COMP_ADDR, \
+		HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_READ_ENG_COMP_RMSK)
+#define HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_READ_ENG_COMP_INM(m) in_dword_masked( \
+		HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_READ_ENG_COMP_ADDR, \
+		m)
+#define HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_READ_ENG_COMP_OUT(v) out_dword( \
+		HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_READ_ENG_COMP_ADDR, \
+		v)
+#define HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_READ_ENG_COMP_OUTM(m, \
+							 v) \
+	out_dword_masked_ns( \
+		HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_READ_ENG_COMP_ADDR, \
+		m, \
+		v, \
+		HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_READ_ENG_COMP_IN)
+#define HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_READ_ENG_COMP_IRAM_PTR_BMSK 0xfff
+#define HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_READ_ENG_COMP_IRAM_PTR_SHFT 0x0
+#define HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_UC_GP_INT_ADDR ( \
+		IPA_GSI_TOP_GSI_REG_BASE + 0x00000448)
+#define HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_UC_GP_INT_PHYS ( \
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x00000448)
+#define HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_UC_GP_INT_OFFS ( \
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x00000448)
+#define HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_UC_GP_INT_RMSK 0xfff
+#define HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_UC_GP_INT_ATTR 0x3
+#define HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_UC_GP_INT_IN in_dword_masked( \
+		HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_UC_GP_INT_ADDR, \
+		HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_UC_GP_INT_RMSK)
+#define HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_UC_GP_INT_INM(m) in_dword_masked(	\
+		HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_UC_GP_INT_ADDR, \
+		m)
+#define HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_UC_GP_INT_OUT(v) out_dword( \
+		HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_UC_GP_INT_ADDR, \
+		v)
+#define HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_UC_GP_INT_OUTM(m,	\
+						     v)	\
+	out_dword_masked_ns(HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_UC_GP_INT_ADDR, \
+			    m, \
+			    v, \
+			    HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_UC_GP_INT_IN)
+#define HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_UC_GP_INT_IRAM_PTR_BMSK 0xfff
+#define HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_UC_GP_INT_IRAM_PTR_SHFT 0x0
+#define HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_INT_MOD_STOPPED_ADDR ( \
+		IPA_GSI_TOP_GSI_REG_BASE + 0x0000044c)
+#define HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_INT_MOD_STOPPED_PHYS ( \
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x0000044c)
+#define HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_INT_MOD_STOPPED_OFFS ( \
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x0000044c)
+#define HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_INT_MOD_STOPPED_RMSK 0xfff
+#define HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_INT_MOD_STOPPED_ATTR 0x3
+#define HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_INT_MOD_STOPPED_IN in_dword_masked( \
+		HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_INT_MOD_STOPPED_ADDR, \
+		HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_INT_MOD_STOPPED_RMSK)
+#define HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_INT_MOD_STOPPED_INM(m) \
+	in_dword_masked(HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_INT_MOD_STOPPED_ADDR, \
+			m)
+#define HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_INT_MOD_STOPPED_OUT(v) out_dword( \
+		HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_INT_MOD_STOPPED_ADDR, \
+		v)
+#define HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_INT_MOD_STOPPED_OUTM(m, \
+							   v) \
+	out_dword_masked_ns( \
+		HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_INT_MOD_STOPPED_ADDR, \
+		m, \
+		v, \
+		HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_INT_MOD_STOPPED_IN)
+#define HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_INT_MOD_STOPPED_IRAM_PTR_BMSK 0xfff
+#define HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_INT_MOD_STOPPED_IRAM_PTR_SHFT 0x0
+#define HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_SDMA_INT_n_ADDR(n) ( \
+		IPA_GSI_TOP_GSI_REG_BASE + 0x00000450 + 0x4 * (n))
+#define HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_SDMA_INT_n_PHYS(n) ( \
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x00000450 + 0x4 * (n))
+#define HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_SDMA_INT_n_OFFS(n) ( \
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x00000450 + 0x4 * (n))
+#define HWIO_IPA_GSI_TOP_GSI_INST_RAM_n_ADDR(n) (IPA_GSI_TOP_GSI_REG_BASE \
+						 + 0x0001b000 + 0x4 * (n))
+#define HWIO_IPA_GSI_TOP_GSI_INST_RAM_n_PHYS(n) ( \
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x0001b000 + 0x4 * (n))
+#define HWIO_IPA_GSI_TOP_GSI_INST_RAM_n_OFFS(n) ( \
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x0001b000 + 0x4 * (n))
+#define HWIO_IPA_GSI_TOP_GSI_INST_RAM_n_RMSK 0xffffffff
+#define HWIO_IPA_GSI_TOP_GSI_INST_RAM_n_MAXn 8191
+#define HWIO_IPA_GSI_TOP_GSI_INST_RAM_n_ATTR 0x3
+#define HWIO_IPA_GSI_TOP_GSI_INST_RAM_n_INI(n) \
+	in_dword_masked( \
+		HWIO_IPA_GSI_TOP_GSI_INST_RAM_n_ADDR(n), \
+		HWIO_IPA_GSI_TOP_GSI_INST_RAM_n_RMSK)
+#define HWIO_IPA_GSI_TOP_GSI_INST_RAM_n_INMI(n, mask) \
+	in_dword_masked( \
+		HWIO_IPA_GSI_TOP_GSI_INST_RAM_n_ADDR(n), \
+		mask)
+#define HWIO_IPA_GSI_TOP_GSI_INST_RAM_n_OUTI(n, val) \
+	out_dword( \
+		HWIO_IPA_GSI_TOP_GSI_INST_RAM_n_ADDR(n), \
+		val)
+#define HWIO_IPA_GSI_TOP_GSI_INST_RAM_n_OUTMI(n, mask, \
+					      val) out_dword_masked_ns(	\
+		HWIO_IPA_GSI_TOP_GSI_INST_RAM_n_ADDR( \
+			n), \
+		mask, \
+		val, \
+		HWIO_IPA_GSI_TOP_GSI_INST_RAM_n_INI(n))
+#define HWIO_IPA_GSI_TOP_GSI_INST_RAM_n_INST_BYTE_3_BMSK 0xff000000
+#define HWIO_IPA_GSI_TOP_GSI_INST_RAM_n_INST_BYTE_3_SHFT 0x18
+#define HWIO_IPA_GSI_TOP_GSI_INST_RAM_n_INST_BYTE_2_BMSK 0xff0000
+#define HWIO_IPA_GSI_TOP_GSI_INST_RAM_n_INST_BYTE_2_SHFT 0x10
+#define HWIO_IPA_GSI_TOP_GSI_INST_RAM_n_INST_BYTE_1_BMSK 0xff00
+#define HWIO_IPA_GSI_TOP_GSI_INST_RAM_n_INST_BYTE_1_SHFT 0x8
+#define HWIO_IPA_GSI_TOP_GSI_INST_RAM_n_INST_BYTE_0_BMSK 0xff
+#define HWIO_IPA_GSI_TOP_GSI_INST_RAM_n_INST_BYTE_0_SHFT 0x0
+#define HWIO_IPA_GSI_TOP_GSI_SHRAM_n_ADDR(n) (IPA_GSI_TOP_GSI_REG_BASE + \
+					      0x00002000 + 0x4 * (n))
+#define HWIO_IPA_GSI_TOP_GSI_SHRAM_n_PHYS(n) ( \
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x00002000 + 0x4 * (n))
+#define HWIO_IPA_GSI_TOP_GSI_SHRAM_n_OFFS(n) ( \
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x00002000 + 0x4 * (n))
+#define HWIO_IPA_GSI_TOP_GSI_SHRAM_n_RMSK 0xffffffff
+#define HWIO_IPA_GSI_TOP_GSI_SHRAM_n_MAXn 1343
+#define HWIO_IPA_GSI_TOP_GSI_SHRAM_n_ATTR 0x3
+#define HWIO_IPA_GSI_TOP_GSI_SHRAM_n_INI(n) in_dword_masked( \
+		HWIO_IPA_GSI_TOP_GSI_SHRAM_n_ADDR(n), \
+		HWIO_IPA_GSI_TOP_GSI_SHRAM_n_RMSK)
+#define HWIO_IPA_GSI_TOP_GSI_SHRAM_n_INMI(n, mask) in_dword_masked( \
+		HWIO_IPA_GSI_TOP_GSI_SHRAM_n_ADDR(n), \
+		mask)
+#define HWIO_IPA_GSI_TOP_GSI_SHRAM_n_OUTI(n, val) out_dword( \
+		HWIO_IPA_GSI_TOP_GSI_SHRAM_n_ADDR(n), \
+		val)
+#define HWIO_IPA_GSI_TOP_GSI_SHRAM_n_OUTMI(n, mask, \
+					   val) out_dword_masked_ns( \
+		HWIO_IPA_GSI_TOP_GSI_SHRAM_n_ADDR( \
+			n), \
+		mask, \
+		val, \
+		HWIO_IPA_GSI_TOP_GSI_SHRAM_n_INI(n))
+#define HWIO_IPA_GSI_TOP_GSI_SHRAM_n_SHRAM_BMSK 0xffffffff
+#define HWIO_IPA_GSI_TOP_GSI_SHRAM_n_SHRAM_SHFT 0x0
+#define HWIO_IPA_GSI_TOP_GSI_MAP_EE_n_CH_k_VP_TABLE_ADDR(n, \
+							 k) ( \
+		IPA_GSI_TOP_GSI_REG_BASE + 0x00003800 + 0x80 * (n) + 0x4 * \
+		(k))
+#define HWIO_IPA_GSI_TOP_GSI_MAP_EE_n_CH_k_VP_TABLE_PHYS(n, \
+							 k) ( \
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x00003800 + 0x80 * (n) + \
+		0x4 * \
+		(k))
+#define HWIO_IPA_GSI_TOP_GSI_MAP_EE_n_CH_k_VP_TABLE_OFFS(n, \
+							 k) ( \
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x00003800 + 0x80 * (n) + \
+		0x4 * \
+		(k))
+#define HWIO_IPA_GSI_TOP_GSI_MAP_EE_n_CH_k_VP_TABLE_RMSK 0x3f
+#define HWIO_IPA_GSI_TOP_GSI_MAP_EE_n_CH_k_VP_TABLE_MAXn 2
+#define HWIO_IPA_GSI_TOP_GSI_MAP_EE_n_CH_k_VP_TABLE_MAXk 22
+#define HWIO_IPA_GSI_TOP_GSI_MAP_EE_n_CH_k_VP_TABLE_ATTR 0x3
+#define HWIO_IPA_GSI_TOP_GSI_MAP_EE_n_CH_k_VP_TABLE_INI2(n, \
+							 k) \
+	in_dword_masked(HWIO_IPA_GSI_TOP_GSI_MAP_EE_n_CH_k_VP_TABLE_ADDR( \
+				n, \
+				k), \
+			HWIO_IPA_GSI_TOP_GSI_MAP_EE_n_CH_k_VP_TABLE_RMSK)
+#define HWIO_IPA_GSI_TOP_GSI_MAP_EE_n_CH_k_VP_TABLE_INMI2(n, k,	\
+							  mask)	\
+	in_dword_masked( \
+		HWIO_IPA_GSI_TOP_GSI_MAP_EE_n_CH_k_VP_TABLE_ADDR(n, k),	\
+		mask)
+#define HWIO_IPA_GSI_TOP_GSI_MAP_EE_n_CH_k_VP_TABLE_OUTI2(n, k,	\
+							  val) out_dword( \
+		HWIO_IPA_GSI_TOP_GSI_MAP_EE_n_CH_k_VP_TABLE_ADDR(n, \
+								 k), \
+		val)
+#define HWIO_IPA_GSI_TOP_GSI_MAP_EE_n_CH_k_VP_TABLE_OUTMI2(n, k, mask, \
+							   val)	\
+	out_dword_masked_ns( \
+		HWIO_IPA_GSI_TOP_GSI_MAP_EE_n_CH_k_VP_TABLE_ADDR(n, k),	\
+		mask, \
+		val, \
+		HWIO_IPA_GSI_TOP_GSI_MAP_EE_n_CH_k_VP_TABLE_INI2(n, k))
+#define HWIO_IPA_GSI_TOP_GSI_MAP_EE_n_CH_k_VP_TABLE_VALID_BMSK 0x20
+#define HWIO_IPA_GSI_TOP_GSI_MAP_EE_n_CH_k_VP_TABLE_VALID_SHFT 0x5
+#define HWIO_IPA_GSI_TOP_GSI_MAP_EE_n_CH_k_VP_TABLE_PHY_CH_BMSK 0x1f
+#define HWIO_IPA_GSI_TOP_GSI_MAP_EE_n_CH_k_VP_TABLE_PHY_CH_SHFT 0x0
+#define HWIO_IPA_GSI_TOP_GSI_TEST_BUS_SEL_ADDR (IPA_GSI_TOP_GSI_REG_BASE + \
+						0x00001000)
+#define HWIO_IPA_GSI_TOP_GSI_TEST_BUS_SEL_PHYS ( \
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x00001000)
+#define HWIO_IPA_GSI_TOP_GSI_TEST_BUS_SEL_OFFS ( \
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x00001000)
+#define HWIO_IPA_GSI_TOP_GSI_TEST_BUS_SEL_RMSK 0xf00ff
+#define HWIO_IPA_GSI_TOP_GSI_TEST_BUS_SEL_ATTR 0x3
+#define HWIO_IPA_GSI_TOP_GSI_TEST_BUS_SEL_IN in_dword_masked( \
+		HWIO_IPA_GSI_TOP_GSI_TEST_BUS_SEL_ADDR,	\
+		HWIO_IPA_GSI_TOP_GSI_TEST_BUS_SEL_RMSK)
+#define HWIO_IPA_GSI_TOP_GSI_TEST_BUS_SEL_INM(m) in_dword_masked( \
+		HWIO_IPA_GSI_TOP_GSI_TEST_BUS_SEL_ADDR,	\
+		m)
+#define HWIO_IPA_GSI_TOP_GSI_TEST_BUS_SEL_OUT(v) out_dword( \
+		HWIO_IPA_GSI_TOP_GSI_TEST_BUS_SEL_ADDR,	\
+		v)
+#define HWIO_IPA_GSI_TOP_GSI_TEST_BUS_SEL_OUTM(m, v) out_dword_masked_ns( \
+		HWIO_IPA_GSI_TOP_GSI_TEST_BUS_SEL_ADDR,	\
+		m, \
+		v, \
+		HWIO_IPA_GSI_TOP_GSI_TEST_BUS_SEL_IN)
+#define HWIO_IPA_GSI_TOP_GSI_TEST_BUS_SEL_GSI_HW_EVENTS_SEL_BMSK 0xf0000
+#define HWIO_IPA_GSI_TOP_GSI_TEST_BUS_SEL_GSI_HW_EVENTS_SEL_SHFT 0x10
+#define HWIO_IPA_GSI_TOP_GSI_TEST_BUS_SEL_GSI_TESTBUS_SEL_BMSK 0xff
+#define HWIO_IPA_GSI_TOP_GSI_TEST_BUS_SEL_GSI_TESTBUS_SEL_SHFT 0x0
+#define HWIO_IPA_GSI_TOP_GSI_TEST_BUS_SEL_GSI_TESTBUS_SEL_ZEROS_FVAL 0x0
+#define HWIO_IPA_GSI_TOP_GSI_TEST_BUS_SEL_GSI_TESTBUS_SEL_MCS_0_FVAL 0x1
+#define HWIO_IPA_GSI_TOP_GSI_TEST_BUS_SEL_GSI_TESTBUS_SEL_MCS_1_FVAL 0x2
+#define HWIO_IPA_GSI_TOP_GSI_TEST_BUS_SEL_GSI_TESTBUS_SEL_MCS_2_FVAL 0x3
+#define HWIO_IPA_GSI_TOP_GSI_TEST_BUS_SEL_GSI_TESTBUS_SEL_MCS_3_FVAL 0x4
+#define HWIO_IPA_GSI_TOP_GSI_TEST_BUS_SEL_GSI_TESTBUS_SEL_MCS_4_FVAL 0x5
+#define HWIO_IPA_GSI_TOP_GSI_TEST_BUS_SEL_GSI_TESTBUS_SEL_DB_ENG_FVAL 0x9
+#define HWIO_IPA_GSI_TOP_GSI_TEST_BUS_SEL_GSI_TESTBUS_SEL_REE_0_FVAL 0xb
+#define HWIO_IPA_GSI_TOP_GSI_TEST_BUS_SEL_GSI_TESTBUS_SEL_REE_1_FVAL 0xc
+#define HWIO_IPA_GSI_TOP_GSI_TEST_BUS_SEL_GSI_TESTBUS_SEL_REE_2_FVAL 0xd
+#define HWIO_IPA_GSI_TOP_GSI_TEST_BUS_SEL_GSI_TESTBUS_SEL_REE_3_FVAL 0xe
+#define HWIO_IPA_GSI_TOP_GSI_TEST_BUS_SEL_GSI_TESTBUS_SEL_REE_4_FVAL 0xf
+#define HWIO_IPA_GSI_TOP_GSI_TEST_BUS_SEL_GSI_TESTBUS_SEL_REE_5_FVAL 0x10
+#define HWIO_IPA_GSI_TOP_GSI_TEST_BUS_SEL_GSI_TESTBUS_SEL_REE_6_FVAL 0x11
+#define HWIO_IPA_GSI_TOP_GSI_TEST_BUS_SEL_GSI_TESTBUS_SEL_REE_7_FVAL 0x12
+#define HWIO_IPA_GSI_TOP_GSI_TEST_BUS_SEL_GSI_TESTBUS_SEL_EVE_0_FVAL 0x13
+#define HWIO_IPA_GSI_TOP_GSI_TEST_BUS_SEL_GSI_TESTBUS_SEL_EVE_1_FVAL 0x14
+#define HWIO_IPA_GSI_TOP_GSI_TEST_BUS_SEL_GSI_TESTBUS_SEL_EVE_2_FVAL 0x15
+#define HWIO_IPA_GSI_TOP_GSI_TEST_BUS_SEL_GSI_TESTBUS_SEL_EVE_3_FVAL 0x16
+#define HWIO_IPA_GSI_TOP_GSI_TEST_BUS_SEL_GSI_TESTBUS_SEL_EVE_4_FVAL 0x17
+#define HWIO_IPA_GSI_TOP_GSI_TEST_BUS_SEL_GSI_TESTBUS_SEL_EVE_5_FVAL 0x18
+#define HWIO_IPA_GSI_TOP_GSI_TEST_BUS_SEL_GSI_TESTBUS_SEL_IE_0_FVAL 0x1b
+#define HWIO_IPA_GSI_TOP_GSI_TEST_BUS_SEL_GSI_TESTBUS_SEL_IE_1_FVAL 0x1c
+#define HWIO_IPA_GSI_TOP_GSI_TEST_BUS_SEL_GSI_TESTBUS_SEL_IE_2_FVAL 0x1d
+#define HWIO_IPA_GSI_TOP_GSI_TEST_BUS_SEL_GSI_TESTBUS_SEL_IC_0_FVAL 0x1f
+#define HWIO_IPA_GSI_TOP_GSI_TEST_BUS_SEL_GSI_TESTBUS_SEL_IC_1_FVAL 0x20
+#define HWIO_IPA_GSI_TOP_GSI_TEST_BUS_SEL_GSI_TESTBUS_SEL_IC_2_FVAL 0x21
+#define HWIO_IPA_GSI_TOP_GSI_TEST_BUS_SEL_GSI_TESTBUS_SEL_IC_3_FVAL 0x22
+#define HWIO_IPA_GSI_TOP_GSI_TEST_BUS_SEL_GSI_TESTBUS_SEL_IC_4_FVAL 0x23
+#define HWIO_IPA_GSI_TOP_GSI_TEST_BUS_SEL_GSI_TESTBUS_SEL_MOQA_0_FVAL 0x27
+#define HWIO_IPA_GSI_TOP_GSI_TEST_BUS_SEL_GSI_TESTBUS_SEL_MOQA_1_FVAL 0x28
+#define HWIO_IPA_GSI_TOP_GSI_TEST_BUS_SEL_GSI_TESTBUS_SEL_MOQA_2_FVAL 0x29
+#define HWIO_IPA_GSI_TOP_GSI_TEST_BUS_SEL_GSI_TESTBUS_SEL_MOQA_3_FVAL 0x2a
+#define HWIO_IPA_GSI_TOP_GSI_TEST_BUS_SEL_GSI_TESTBUS_SEL_TMR_0_FVAL 0x2b
+#define HWIO_IPA_GSI_TOP_GSI_TEST_BUS_SEL_GSI_TESTBUS_SEL_TMR_1_FVAL 0x2c
+#define HWIO_IPA_GSI_TOP_GSI_TEST_BUS_SEL_GSI_TESTBUS_SEL_TMR_2_FVAL 0x2d
+#define HWIO_IPA_GSI_TOP_GSI_TEST_BUS_SEL_GSI_TESTBUS_SEL_TMR_3_FVAL 0x2e
+#define HWIO_IPA_GSI_TOP_GSI_TEST_BUS_SEL_GSI_TESTBUS_SEL_RD_WR_0_FVAL \
+	0x33
+#define HWIO_IPA_GSI_TOP_GSI_TEST_BUS_SEL_GSI_TESTBUS_SEL_RD_WR_1_FVAL \
+	0x34
+#define HWIO_IPA_GSI_TOP_GSI_TEST_BUS_SEL_GSI_TESTBUS_SEL_RD_WR_2_FVAL \
+	0x35
+#define HWIO_IPA_GSI_TOP_GSI_TEST_BUS_SEL_GSI_TESTBUS_SEL_RD_WR_3_FVAL \
+	0x36
+#define HWIO_IPA_GSI_TOP_GSI_TEST_BUS_SEL_GSI_TESTBUS_SEL_CSR_FVAL 0x3a
+#define HWIO_IPA_GSI_TOP_GSI_TEST_BUS_SEL_GSI_TESTBUS_SEL_SDMA_0_FVAL 0x3c
+#define HWIO_IPA_GSI_TOP_GSI_TEST_BUS_SEL_GSI_TESTBUS_SEL_SMDA_1_FVAL 0x3d
+#define HWIO_IPA_GSI_TOP_GSI_TEST_BUS_SEL_GSI_TESTBUS_SEL_CSR_1_FVAL 0x3e
+#define HWIO_IPA_GSI_TOP_GSI_TEST_BUS_SEL_GSI_TESTBUS_SEL_CSR_2_FVAL 0x3f
+#define HWIO_IPA_GSI_TOP_GSI_TEST_BUS_SEL_GSI_TESTBUS_SEL_MCS_5_FVAL 0x40
+#define HWIO_IPA_GSI_TOP_GSI_TEST_BUS_SEL_GSI_TESTBUS_SEL_IC_5_FVAL 0x41
+#define HWIO_IPA_GSI_TOP_GSI_TEST_BUS_SEL_GSI_TESTBUS_SEL_CSR_3_FVAL 0x42
+#define HWIO_IPA_GSI_TOP_GSI_TEST_BUS_SEL_GSI_TESTBUS_SEL_TLV_0_FVAL 0x43
+#define HWIO_IPA_GSI_TOP_GSI_TEST_BUS_SEL_GSI_TESTBUS_SEL_REE_8_FVAL 0x44
+#define HWIO_IPA_GSI_TOP_GSI_TEST_BUS_REG_ADDR (IPA_GSI_TOP_GSI_REG_BASE + \
+						0x00001008)
+#define HWIO_IPA_GSI_TOP_GSI_TEST_BUS_REG_PHYS ( \
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x00001008)
+#define HWIO_IPA_GSI_TOP_GSI_TEST_BUS_REG_OFFS ( \
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x00001008)
+#define HWIO_IPA_GSI_TOP_GSI_TEST_BUS_REG_RMSK 0xffffffff
+#define HWIO_IPA_GSI_TOP_GSI_TEST_BUS_REG_ATTR 0x1
+#define HWIO_IPA_GSI_TOP_GSI_TEST_BUS_REG_IN in_dword_masked( \
+		HWIO_IPA_GSI_TOP_GSI_TEST_BUS_REG_ADDR,	\
+		HWIO_IPA_GSI_TOP_GSI_TEST_BUS_REG_RMSK)
+#define HWIO_IPA_GSI_TOP_GSI_TEST_BUS_REG_INM(m) in_dword_masked( \
+		HWIO_IPA_GSI_TOP_GSI_TEST_BUS_REG_ADDR,	\
+		m)
+#define HWIO_IPA_GSI_TOP_GSI_TEST_BUS_REG_GSI_TESTBUS_REG_BMSK 0xffffffff
+#define HWIO_IPA_GSI_TOP_GSI_TEST_BUS_REG_GSI_TESTBUS_REG_SHFT 0x0
+#define HWIO_IPA_GSI_TOP_GSI_DEBUG_BUSY_REG_ADDR (IPA_GSI_TOP_GSI_REG_BASE \
+						  + 0x00001010)
+#define HWIO_IPA_GSI_TOP_GSI_DEBUG_BUSY_REG_PHYS ( \
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x00001010)
+#define HWIO_IPA_GSI_TOP_GSI_DEBUG_BUSY_REG_OFFS ( \
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x00001010)
+#define HWIO_IPA_GSI_TOP_GSI_DEBUG_BUSY_REG_RMSK 0x1fff
+#define HWIO_IPA_GSI_TOP_GSI_DEBUG_BUSY_REG_ATTR 0x1
+#define HWIO_IPA_GSI_TOP_GSI_DEBUG_BUSY_REG_IN in_dword_masked(	\
+		HWIO_IPA_GSI_TOP_GSI_DEBUG_BUSY_REG_ADDR, \
+		HWIO_IPA_GSI_TOP_GSI_DEBUG_BUSY_REG_RMSK)
+#define HWIO_IPA_GSI_TOP_GSI_DEBUG_BUSY_REG_INM(m) in_dword_masked( \
+		HWIO_IPA_GSI_TOP_GSI_DEBUG_BUSY_REG_ADDR, \
+		m)
+#define HWIO_IPA_GSI_TOP_GSI_DEBUG_BUSY_REG_SDMA_BUSY_BMSK 0x1000
+#define HWIO_IPA_GSI_TOP_GSI_DEBUG_BUSY_REG_SDMA_BUSY_SHFT 0xc
+#define HWIO_IPA_GSI_TOP_GSI_DEBUG_BUSY_REG_IC_BUSY_BMSK 0x800
+#define HWIO_IPA_GSI_TOP_GSI_DEBUG_BUSY_REG_IC_BUSY_SHFT 0xb
+#define HWIO_IPA_GSI_TOP_GSI_DEBUG_BUSY_REG_UC_BUSY_BMSK 0x400
+#define HWIO_IPA_GSI_TOP_GSI_DEBUG_BUSY_REG_UC_BUSY_SHFT 0xa
+#define HWIO_IPA_GSI_TOP_GSI_DEBUG_BUSY_REG_DBG_CNT_BUSY_BMSK 0x200
+#define HWIO_IPA_GSI_TOP_GSI_DEBUG_BUSY_REG_DBG_CNT_BUSY_SHFT 0x9
+#define HWIO_IPA_GSI_TOP_GSI_DEBUG_BUSY_REG_DB_ENG_BUSY_BMSK 0x100
+#define HWIO_IPA_GSI_TOP_GSI_DEBUG_BUSY_REG_DB_ENG_BUSY_SHFT 0x8
+#define HWIO_IPA_GSI_TOP_GSI_DEBUG_BUSY_REG_REE_PWR_CLPS_BUSY_BMSK 0x80
+#define HWIO_IPA_GSI_TOP_GSI_DEBUG_BUSY_REG_REE_PWR_CLPS_BUSY_SHFT 0x7
+#define HWIO_IPA_GSI_TOP_GSI_DEBUG_BUSY_REG_INT_ENG_BUSY_BMSK 0x40
+#define HWIO_IPA_GSI_TOP_GSI_DEBUG_BUSY_REG_INT_ENG_BUSY_SHFT 0x6
+#define HWIO_IPA_GSI_TOP_GSI_DEBUG_BUSY_REG_EV_ENG_BUSY_BMSK 0x20
+#define HWIO_IPA_GSI_TOP_GSI_DEBUG_BUSY_REG_EV_ENG_BUSY_SHFT 0x5
+#define HWIO_IPA_GSI_TOP_GSI_DEBUG_BUSY_REG_RD_WR_BUSY_BMSK 0x10
+#define HWIO_IPA_GSI_TOP_GSI_DEBUG_BUSY_REG_RD_WR_BUSY_SHFT 0x4
+#define HWIO_IPA_GSI_TOP_GSI_DEBUG_BUSY_REG_TIMER_BUSY_BMSK 0x8
+#define HWIO_IPA_GSI_TOP_GSI_DEBUG_BUSY_REG_TIMER_BUSY_SHFT 0x3
+#define HWIO_IPA_GSI_TOP_GSI_DEBUG_BUSY_REG_MCS_BUSY_BMSK 0x4
+#define HWIO_IPA_GSI_TOP_GSI_DEBUG_BUSY_REG_MCS_BUSY_SHFT 0x2
+#define HWIO_IPA_GSI_TOP_GSI_DEBUG_BUSY_REG_REE_BUSY_BMSK 0x2
+#define HWIO_IPA_GSI_TOP_GSI_DEBUG_BUSY_REG_REE_BUSY_SHFT 0x1
+#define HWIO_IPA_GSI_TOP_GSI_DEBUG_BUSY_REG_CSR_BUSY_BMSK 0x1
+#define HWIO_IPA_GSI_TOP_GSI_DEBUG_BUSY_REG_CSR_BUSY_SHFT 0x0
+#define HWIO_IPA_GSI_TOP_GSI_DEBUG_EVENT_PENDING_ADDR (	\
+		IPA_GSI_TOP_GSI_REG_BASE + 0x00001014)
+#define HWIO_IPA_GSI_TOP_GSI_DEBUG_EVENT_PENDING_PHYS (	\
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x00001014)
+#define HWIO_IPA_GSI_TOP_GSI_DEBUG_EVENT_PENDING_OFFS (	\
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x00001014)
+#define HWIO_IPA_GSI_TOP_GSI_DEBUG_EVENT_PENDING_RMSK 0xffffffff
+#define HWIO_IPA_GSI_TOP_GSI_DEBUG_EVENT_PENDING_ATTR 0x1
+#define HWIO_IPA_GSI_TOP_GSI_DEBUG_EVENT_PENDING_IN in_dword_masked( \
+		HWIO_IPA_GSI_TOP_GSI_DEBUG_EVENT_PENDING_ADDR, \
+		HWIO_IPA_GSI_TOP_GSI_DEBUG_EVENT_PENDING_RMSK)
+#define HWIO_IPA_GSI_TOP_GSI_DEBUG_EVENT_PENDING_INM(m) in_dword_masked( \
+		HWIO_IPA_GSI_TOP_GSI_DEBUG_EVENT_PENDING_ADDR, \
+		m)
+#define HWIO_IPA_GSI_TOP_GSI_DEBUG_EVENT_PENDING_CHID_BIT_MAP_BMSK \
+	0xffffffff
+#define HWIO_IPA_GSI_TOP_GSI_DEBUG_EVENT_PENDING_CHID_BIT_MAP_SHFT 0x0
+#define HWIO_IPA_GSI_TOP_GSI_DEBUG_TIMER_PENDING_ADDR (	\
+		IPA_GSI_TOP_GSI_REG_BASE + 0x00001018)
+#define HWIO_IPA_GSI_TOP_GSI_DEBUG_TIMER_PENDING_PHYS (	\
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x00001018)
+#define HWIO_IPA_GSI_TOP_GSI_DEBUG_TIMER_PENDING_OFFS (	\
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x00001018)
+#define HWIO_IPA_GSI_TOP_GSI_DEBUG_TIMER_PENDING_RMSK 0xffffffff
+#define HWIO_IPA_GSI_TOP_GSI_DEBUG_TIMER_PENDING_ATTR 0x1
+#define HWIO_IPA_GSI_TOP_GSI_DEBUG_TIMER_PENDING_IN in_dword_masked( \
+		HWIO_IPA_GSI_TOP_GSI_DEBUG_TIMER_PENDING_ADDR, \
+		HWIO_IPA_GSI_TOP_GSI_DEBUG_TIMER_PENDING_RMSK)
+#define HWIO_IPA_GSI_TOP_GSI_DEBUG_TIMER_PENDING_INM(m) in_dword_masked( \
+		HWIO_IPA_GSI_TOP_GSI_DEBUG_TIMER_PENDING_ADDR, \
+		m)
+#define HWIO_IPA_GSI_TOP_GSI_DEBUG_TIMER_PENDING_CHID_BIT_MAP_BMSK \
+	0xffffffff
+#define HWIO_IPA_GSI_TOP_GSI_DEBUG_TIMER_PENDING_CHID_BIT_MAP_SHFT 0x0
+#define HWIO_IPA_GSI_TOP_GSI_DEBUG_RD_WR_PENDING_ADDR (	\
+		IPA_GSI_TOP_GSI_REG_BASE + 0x0000101c)
+#define HWIO_IPA_GSI_TOP_GSI_DEBUG_RD_WR_PENDING_PHYS (	\
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x0000101c)
+#define HWIO_IPA_GSI_TOP_GSI_DEBUG_RD_WR_PENDING_OFFS (	\
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x0000101c)
+#define HWIO_IPA_GSI_TOP_GSI_DEBUG_RD_WR_PENDING_RMSK 0xffffffff
+#define HWIO_IPA_GSI_TOP_GSI_DEBUG_RD_WR_PENDING_ATTR 0x1
+#define HWIO_IPA_GSI_TOP_GSI_DEBUG_RD_WR_PENDING_IN in_dword_masked( \
+		HWIO_IPA_GSI_TOP_GSI_DEBUG_RD_WR_PENDING_ADDR, \
+		HWIO_IPA_GSI_TOP_GSI_DEBUG_RD_WR_PENDING_RMSK)
+#define HWIO_IPA_GSI_TOP_GSI_DEBUG_RD_WR_PENDING_INM(m) in_dword_masked( \
+		HWIO_IPA_GSI_TOP_GSI_DEBUG_RD_WR_PENDING_ADDR, \
+		m)
+#define HWIO_IPA_GSI_TOP_GSI_DEBUG_RD_WR_PENDING_CHID_BIT_MAP_BMSK \
+	0xffffffff
+#define HWIO_IPA_GSI_TOP_GSI_DEBUG_RD_WR_PENDING_CHID_BIT_MAP_SHFT 0x0
+#define HWIO_IPA_GSI_TOP_GSI_DEBUG_COUNTER_CFGn_ADDR(n) ( \
+		IPA_GSI_TOP_GSI_REG_BASE + 0x00001200 + 0x4 * (n))
+#define HWIO_IPA_GSI_TOP_GSI_DEBUG_COUNTER_CFGn_PHYS(n) ( \
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x00001200 + 0x4 * (n))
+#define HWIO_IPA_GSI_TOP_GSI_DEBUG_COUNTER_CFGn_OFFS(n) ( \
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x00001200 + 0x4 * (n))
+#define HWIO_IPA_GSI_TOP_GSI_DEBUG_COUNTERn_ADDR(n) ( \
+		IPA_GSI_TOP_GSI_REG_BASE + 0x00001240 + 0x4 * (n))
+#define HWIO_IPA_GSI_TOP_GSI_DEBUG_COUNTERn_PHYS(n) ( \
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x00001240 + 0x4 * (n))
+#define HWIO_IPA_GSI_TOP_GSI_DEBUG_COUNTERn_OFFS(n) ( \
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x00001240 + 0x4 * (n))
+#define HWIO_IPA_GSI_TOP_GSI_DEBUG_COUNTERn_RMSK 0xffff
+#define HWIO_IPA_GSI_TOP_GSI_DEBUG_COUNTERn_MAXn 7
+#define HWIO_IPA_GSI_TOP_GSI_DEBUG_COUNTERn_ATTR 0x1
+#define HWIO_IPA_GSI_TOP_GSI_DEBUG_COUNTERn_INI(n) in_dword_masked( \
+		HWIO_IPA_GSI_TOP_GSI_DEBUG_COUNTERn_ADDR(n), \
+		HWIO_IPA_GSI_TOP_GSI_DEBUG_COUNTERn_RMSK)
+#define HWIO_IPA_GSI_TOP_GSI_DEBUG_COUNTERn_INMI(n, mask) in_dword_masked( \
+		HWIO_IPA_GSI_TOP_GSI_DEBUG_COUNTERn_ADDR(n), \
+		mask)
+#define HWIO_IPA_GSI_TOP_GSI_DEBUG_COUNTERn_COUNTER_VALUE_BMSK 0xffff
+#define HWIO_IPA_GSI_TOP_GSI_DEBUG_COUNTERn_COUNTER_VALUE_SHFT 0x0
+#define HWIO_IPA_GSI_TOP_GSI_DEBUG_PC_FROM_SW_ADDR ( \
+		IPA_GSI_TOP_GSI_REG_BASE + 0x00001040)
+#define HWIO_IPA_GSI_TOP_GSI_DEBUG_PC_FROM_SW_PHYS ( \
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x00001040)
+#define HWIO_IPA_GSI_TOP_GSI_DEBUG_PC_FROM_SW_OFFS ( \
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x00001040)
+#define HWIO_IPA_GSI_TOP_GSI_DEBUG_PC_FROM_SW_RMSK 0xfff
+#define HWIO_IPA_GSI_TOP_GSI_DEBUG_PC_FROM_SW_ATTR 0x3
+#define HWIO_IPA_GSI_TOP_GSI_DEBUG_PC_FROM_SW_IN in_dword_masked( \
+		HWIO_IPA_GSI_TOP_GSI_DEBUG_PC_FROM_SW_ADDR, \
+		HWIO_IPA_GSI_TOP_GSI_DEBUG_PC_FROM_SW_RMSK)
+#define HWIO_IPA_GSI_TOP_GSI_DEBUG_PC_FROM_SW_INM(m) in_dword_masked( \
+		HWIO_IPA_GSI_TOP_GSI_DEBUG_PC_FROM_SW_ADDR, \
+		m)
+#define HWIO_IPA_GSI_TOP_GSI_DEBUG_PC_FROM_SW_OUT(v) out_dword(	\
+		HWIO_IPA_GSI_TOP_GSI_DEBUG_PC_FROM_SW_ADDR, \
+		v)
+#define HWIO_IPA_GSI_TOP_GSI_DEBUG_PC_FROM_SW_OUTM(m, \
+						   v) out_dword_masked_ns( \
+		HWIO_IPA_GSI_TOP_GSI_DEBUG_PC_FROM_SW_ADDR, \
+		m, \
+		v, \
+		HWIO_IPA_GSI_TOP_GSI_DEBUG_PC_FROM_SW_IN)
+#define HWIO_IPA_GSI_TOP_GSI_DEBUG_PC_FROM_SW_IRAM_PTR_BMSK 0xfff
+#define HWIO_IPA_GSI_TOP_GSI_DEBUG_PC_FROM_SW_IRAM_PTR_SHFT 0x0
+#define HWIO_IPA_GSI_TOP_GSI_DEBUG_SW_STALL_ADDR (IPA_GSI_TOP_GSI_REG_BASE \
+						  + 0x00001044)
+#define HWIO_IPA_GSI_TOP_GSI_DEBUG_SW_STALL_PHYS ( \
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x00001044)
+#define HWIO_IPA_GSI_TOP_GSI_DEBUG_SW_STALL_OFFS ( \
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x00001044)
+#define HWIO_IPA_GSI_TOP_GSI_DEBUG_SW_STALL_RMSK 0x1
+#define HWIO_IPA_GSI_TOP_GSI_DEBUG_SW_STALL_ATTR 0x3
+#define HWIO_IPA_GSI_TOP_GSI_DEBUG_SW_STALL_IN in_dword_masked(	\
+		HWIO_IPA_GSI_TOP_GSI_DEBUG_SW_STALL_ADDR, \
+		HWIO_IPA_GSI_TOP_GSI_DEBUG_SW_STALL_RMSK)
+#define HWIO_IPA_GSI_TOP_GSI_DEBUG_SW_STALL_INM(m) in_dword_masked( \
+		HWIO_IPA_GSI_TOP_GSI_DEBUG_SW_STALL_ADDR, \
+		m)
+#define HWIO_IPA_GSI_TOP_GSI_DEBUG_SW_STALL_OUT(v) out_dword( \
+		HWIO_IPA_GSI_TOP_GSI_DEBUG_SW_STALL_ADDR, \
+		v)
+#define HWIO_IPA_GSI_TOP_GSI_DEBUG_SW_STALL_OUTM(m, v) out_dword_masked_ns( \
+		HWIO_IPA_GSI_TOP_GSI_DEBUG_SW_STALL_ADDR, \
+		m, \
+		v, \
+		HWIO_IPA_GSI_TOP_GSI_DEBUG_SW_STALL_IN)
+#define HWIO_IPA_GSI_TOP_GSI_DEBUG_SW_STALL_MCS_STALL_BMSK 0x1
+#define HWIO_IPA_GSI_TOP_GSI_DEBUG_SW_STALL_MCS_STALL_SHFT 0x0
+#define HWIO_IPA_GSI_TOP_GSI_DEBUG_PC_FOR_DEBUG_ADDR ( \
+		IPA_GSI_TOP_GSI_REG_BASE + 0x00001048)
+#define HWIO_IPA_GSI_TOP_GSI_DEBUG_PC_FOR_DEBUG_PHYS ( \
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x00001048)
+#define HWIO_IPA_GSI_TOP_GSI_DEBUG_PC_FOR_DEBUG_OFFS ( \
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x00001048)
+#define HWIO_IPA_GSI_TOP_GSI_DEBUG_PC_FOR_DEBUG_RMSK 0xfff
+#define HWIO_IPA_GSI_TOP_GSI_DEBUG_PC_FOR_DEBUG_ATTR 0x1
+#define HWIO_IPA_GSI_TOP_GSI_DEBUG_PC_FOR_DEBUG_IN in_dword_masked( \
+		HWIO_IPA_GSI_TOP_GSI_DEBUG_PC_FOR_DEBUG_ADDR, \
+		HWIO_IPA_GSI_TOP_GSI_DEBUG_PC_FOR_DEBUG_RMSK)
+#define HWIO_IPA_GSI_TOP_GSI_DEBUG_PC_FOR_DEBUG_INM(m) in_dword_masked(	\
+		HWIO_IPA_GSI_TOP_GSI_DEBUG_PC_FOR_DEBUG_ADDR, \
+		m)
+#define HWIO_IPA_GSI_TOP_GSI_DEBUG_PC_FOR_DEBUG_IRAM_PTR_BMSK 0xfff
+#define HWIO_IPA_GSI_TOP_GSI_DEBUG_PC_FOR_DEBUG_IRAM_PTR_SHFT 0x0
+#define HWIO_IPA_GSI_TOP_GSI_DEBUG_QSB_LOG_SEL_ADDR ( \
+		IPA_GSI_TOP_GSI_REG_BASE + 0x00001050)
+#define HWIO_IPA_GSI_TOP_GSI_DEBUG_QSB_LOG_SEL_PHYS ( \
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x00001050)
+#define HWIO_IPA_GSI_TOP_GSI_DEBUG_QSB_LOG_SEL_OFFS ( \
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x00001050)
+#define HWIO_IPA_GSI_TOP_GSI_DEBUG_QSB_LOG_CLR_ADDR ( \
+		IPA_GSI_TOP_GSI_REG_BASE + 0x00001058)
+#define HWIO_IPA_GSI_TOP_GSI_DEBUG_QSB_LOG_CLR_PHYS ( \
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x00001058)
+#define HWIO_IPA_GSI_TOP_GSI_DEBUG_QSB_LOG_CLR_OFFS ( \
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x00001058)
+#define HWIO_IPA_GSI_TOP_GSI_DEBUG_QSB_LOG_ERR_TRNS_ID_ADDR ( \
+		IPA_GSI_TOP_GSI_REG_BASE + 0x00001060)
+#define HWIO_IPA_GSI_TOP_GSI_DEBUG_QSB_LOG_ERR_TRNS_ID_PHYS ( \
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x00001060)
+#define HWIO_IPA_GSI_TOP_GSI_DEBUG_QSB_LOG_ERR_TRNS_ID_OFFS ( \
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x00001060)
+#define HWIO_IPA_GSI_TOP_GSI_DEBUG_QSB_LOG_ERR_TRNS_ID_RMSK 0x1ffff01
+#define HWIO_IPA_GSI_TOP_GSI_DEBUG_QSB_LOG_ERR_TRNS_ID_ATTR 0x1
+#define HWIO_IPA_GSI_TOP_GSI_DEBUG_QSB_LOG_ERR_TRNS_ID_IN in_dword_masked( \
+		HWIO_IPA_GSI_TOP_GSI_DEBUG_QSB_LOG_ERR_TRNS_ID_ADDR, \
+		HWIO_IPA_GSI_TOP_GSI_DEBUG_QSB_LOG_ERR_TRNS_ID_RMSK)
+#define HWIO_IPA_GSI_TOP_GSI_DEBUG_QSB_LOG_ERR_TRNS_ID_INM(m) \
+	in_dword_masked( \
+		HWIO_IPA_GSI_TOP_GSI_DEBUG_QSB_LOG_ERR_TRNS_ID_ADDR, \
+		m)
+#define HWIO_IPA_GSI_TOP_GSI_DEBUG_QSB_LOG_ERR_TRNS_ID_ERR_SAVED_BMSK \
+	0x1000000
+#define HWIO_IPA_GSI_TOP_GSI_DEBUG_QSB_LOG_ERR_TRNS_ID_ERR_SAVED_SHFT 0x18
+#define HWIO_IPA_GSI_TOP_GSI_DEBUG_QSB_LOG_ERR_TRNS_ID_ERR_MID_BMSK \
+	0xff0000
+#define HWIO_IPA_GSI_TOP_GSI_DEBUG_QSB_LOG_ERR_TRNS_ID_ERR_MID_SHFT 0x10
+#define HWIO_IPA_GSI_TOP_GSI_DEBUG_QSB_LOG_ERR_TRNS_ID_ERR_TID_BMSK 0xff00
+#define HWIO_IPA_GSI_TOP_GSI_DEBUG_QSB_LOG_ERR_TRNS_ID_ERR_TID_SHFT 0x8
+#define HWIO_IPA_GSI_TOP_GSI_DEBUG_QSB_LOG_ERR_TRNS_ID_ERR_WRITE_BMSK 0x1
+#define HWIO_IPA_GSI_TOP_GSI_DEBUG_QSB_LOG_ERR_TRNS_ID_ERR_WRITE_SHFT 0x0
+#define HWIO_IPA_GSI_TOP_GSI_DEBUG_QSB_LOG_0_ADDR ( \
+		IPA_GSI_TOP_GSI_REG_BASE + 0x00001064)
+#define HWIO_IPA_GSI_TOP_GSI_DEBUG_QSB_LOG_0_PHYS ( \
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x00001064)
+#define HWIO_IPA_GSI_TOP_GSI_DEBUG_QSB_LOG_0_OFFS ( \
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x00001064)
+#define HWIO_IPA_GSI_TOP_GSI_DEBUG_QSB_LOG_1_ADDR ( \
+		IPA_GSI_TOP_GSI_REG_BASE + 0x00001068)
+#define HWIO_IPA_GSI_TOP_GSI_DEBUG_QSB_LOG_1_PHYS ( \
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x00001068)
+#define HWIO_IPA_GSI_TOP_GSI_DEBUG_QSB_LOG_1_OFFS ( \
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x00001068)
+#define HWIO_IPA_GSI_TOP_GSI_DEBUG_QSB_LOG_2_ADDR ( \
+		IPA_GSI_TOP_GSI_REG_BASE + 0x0000106c)
+#define HWIO_IPA_GSI_TOP_GSI_DEBUG_QSB_LOG_2_PHYS ( \
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x0000106c)
+#define HWIO_IPA_GSI_TOP_GSI_DEBUG_QSB_LOG_2_OFFS ( \
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x0000106c)
+#define HWIO_IPA_GSI_TOP_GSI_DEBUG_QSB_LOG_LAST_MISC_IDn_ADDR(n) ( \
+		IPA_GSI_TOP_GSI_REG_BASE + 0x00001070 + 0x4 * (n))
+#define HWIO_IPA_GSI_TOP_GSI_DEBUG_QSB_LOG_LAST_MISC_IDn_PHYS(n) ( \
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x00001070 + 0x4 * (n))
+#define HWIO_IPA_GSI_TOP_GSI_DEBUG_QSB_LOG_LAST_MISC_IDn_OFFS(n) ( \
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x00001070 + 0x4 * (n))
+#define HWIO_IPA_GSI_TOP_GSI_DEBUG_SW_RF_n_WRITE_ADDR(n) ( \
+		IPA_GSI_TOP_GSI_REG_BASE + 0x00001080 + 0x4 * (n))
+#define HWIO_IPA_GSI_TOP_GSI_DEBUG_SW_RF_n_WRITE_PHYS(n) ( \
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x00001080 + 0x4 * (n))
+#define HWIO_IPA_GSI_TOP_GSI_DEBUG_SW_RF_n_WRITE_OFFS(n) ( \
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x00001080 + 0x4 * (n))
+#define HWIO_IPA_GSI_TOP_GSI_DEBUG_SW_RF_n_READ_ADDR(n) ( \
+		IPA_GSI_TOP_GSI_REG_BASE + 0x00001100 + 0x4 * (n))
+#define HWIO_IPA_GSI_TOP_GSI_DEBUG_SW_RF_n_READ_PHYS(n) ( \
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x00001100 + 0x4 * (n))
+#define HWIO_IPA_GSI_TOP_GSI_DEBUG_SW_RF_n_READ_OFFS(n) ( \
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x00001100 + 0x4 * (n))
+#define HWIO_IPA_GSI_TOP_GSI_DEBUG_SW_RF_n_READ_RMSK 0xffffffff
+#define HWIO_IPA_GSI_TOP_GSI_DEBUG_SW_RF_n_READ_MAXn 31
+#define HWIO_IPA_GSI_TOP_GSI_DEBUG_SW_RF_n_READ_ATTR 0x1
+#define HWIO_IPA_GSI_TOP_GSI_DEBUG_SW_RF_n_READ_INI(n) in_dword_masked(	\
+		HWIO_IPA_GSI_TOP_GSI_DEBUG_SW_RF_n_READ_ADDR(n), \
+		HWIO_IPA_GSI_TOP_GSI_DEBUG_SW_RF_n_READ_RMSK)
+#define HWIO_IPA_GSI_TOP_GSI_DEBUG_SW_RF_n_READ_INMI(n,	\
+						     mask) in_dword_masked( \
+		HWIO_IPA_GSI_TOP_GSI_DEBUG_SW_RF_n_READ_ADDR( \
+			n), \
+		mask)
+#define HWIO_IPA_GSI_TOP_GSI_DEBUG_SW_RF_n_READ_RF_REG_BMSK 0xffffffff
+#define HWIO_IPA_GSI_TOP_GSI_DEBUG_SW_RF_n_READ_RF_REG_SHFT 0x0
+#define HWIO_IPA_GSI_TOP_GSI_DEBUG_EE_n_CH_k_VP_TABLE_ADDR(n, \
+							   k) (	\
+		IPA_GSI_TOP_GSI_REG_BASE + 0x00001400 + 0x80 * (n) + 0x4 * \
+		(k))
+#define HWIO_IPA_GSI_TOP_GSI_DEBUG_EE_n_CH_k_VP_TABLE_PHYS(n, \
+							   k) (	\
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x00001400 + 0x80 * (n) + \
+		0x4 * \
+		(k))
+#define HWIO_IPA_GSI_TOP_GSI_DEBUG_EE_n_CH_k_VP_TABLE_OFFS(n, \
+							   k) (	\
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x00001400 + 0x80 * (n) + \
+		0x4 * \
+		(k))
+#define HWIO_IPA_GSI_TOP_GSI_DEBUG_EE_n_EV_k_VP_TABLE_ADDR(n, \
+							   k) (	\
+		IPA_GSI_TOP_GSI_REG_BASE + 0x00001600 + 0x80 * (n) + 0x4 * \
+		(k))
+#define HWIO_IPA_GSI_TOP_GSI_DEBUG_EE_n_EV_k_VP_TABLE_PHYS(n, \
+							   k) (	\
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x00001600 + 0x80 * (n) + \
+		0x4 * \
+		(k))
+#define HWIO_IPA_GSI_TOP_GSI_DEBUG_EE_n_EV_k_VP_TABLE_OFFS(n, \
+							   k) (	\
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x00001600 + 0x80 * (n) + \
+		0x4 * \
+		(k))
+#define HWIO_IPA_GSI_TOP_GSI_DEBUG_EE_n_EV_k_VP_TABLE_RMSK 0x3f
+#define HWIO_IPA_GSI_TOP_GSI_DEBUG_EE_n_EV_k_VP_TABLE_MAXn 3
+#define HWIO_IPA_GSI_TOP_GSI_DEBUG_EE_n_EV_k_VP_TABLE_MAXk 19
+#define HWIO_IPA_GSI_TOP_GSI_DEBUG_EE_n_EV_k_VP_TABLE_ATTR 0x1
+#define HWIO_IPA_GSI_TOP_GSI_DEBUG_EE_n_EV_k_VP_TABLE_INI2(n, \
+							   k) \
+	in_dword_masked( \
+		HWIO_IPA_GSI_TOP_GSI_DEBUG_EE_n_EV_k_VP_TABLE_ADDR(n, k), \
+		HWIO_IPA_GSI_TOP_GSI_DEBUG_EE_n_EV_k_VP_TABLE_RMSK)
+#define HWIO_IPA_GSI_TOP_GSI_DEBUG_EE_n_EV_k_VP_TABLE_INMI2(n, k, \
+							    mask) \
+	in_dword_masked( \
+		HWIO_IPA_GSI_TOP_GSI_DEBUG_EE_n_EV_k_VP_TABLE_ADDR(n, k), \
+		mask)
+#define HWIO_IPA_GSI_TOP_GSI_DEBUG_EE_n_EV_k_VP_TABLE_VALID_BMSK 0x20
+#define HWIO_IPA_GSI_TOP_GSI_DEBUG_EE_n_EV_k_VP_TABLE_VALID_SHFT 0x5
+#define HWIO_IPA_GSI_TOP_GSI_DEBUG_EE_n_EV_k_VP_TABLE_PHY_EV_CH_BMSK 0x1f
+#define HWIO_IPA_GSI_TOP_GSI_DEBUG_EE_n_EV_k_VP_TABLE_PHY_EV_CH_SHFT 0x0
+#define HWIO_IPA_GSI_TOP_GSI_DEBUG_SDMA_TRANS_DB_n_ADDR(n) ( \
+		IPA_GSI_TOP_GSI_REG_BASE + 0x00001800 + 0x4 * (n))
+#define HWIO_IPA_GSI_TOP_GSI_DEBUG_SDMA_TRANS_DB_n_PHYS(n) ( \
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x00001800 + 0x4 * (n))
+#define HWIO_IPA_GSI_TOP_GSI_DEBUG_SDMA_TRANS_DB_n_OFFS(n) ( \
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x00001800 + 0x4 * (n))
+#define HWIO_IPA_GSI_TOP_GSI_UC_SRC_IRQ_ADDR (IPA_GSI_TOP_GSI_REG_BASE + \
+					      0x00000500)
+#define HWIO_IPA_GSI_TOP_GSI_UC_SRC_IRQ_PHYS ( \
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x00000500)
+#define HWIO_IPA_GSI_TOP_GSI_UC_SRC_IRQ_OFFS ( \
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x00000500)
+#define HWIO_IPA_GSI_TOP_GSI_UC_SRC_IRQ_MSK_ADDR (IPA_GSI_TOP_GSI_REG_BASE \
+						  + 0x00000504)
+#define HWIO_IPA_GSI_TOP_GSI_UC_SRC_IRQ_MSK_PHYS ( \
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x00000504)
+#define HWIO_IPA_GSI_TOP_GSI_UC_SRC_IRQ_MSK_OFFS ( \
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x00000504)
+#define HWIO_IPA_GSI_TOP_GSI_UC_SRC_IRQ_CLR_ADDR (IPA_GSI_TOP_GSI_REG_BASE \
+						  + 0x00000508)
+#define HWIO_IPA_GSI_TOP_GSI_UC_SRC_IRQ_CLR_PHYS ( \
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x00000508)
+#define HWIO_IPA_GSI_TOP_GSI_UC_SRC_IRQ_CLR_OFFS ( \
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x00000508)
+#define HWIO_IPA_GSI_TOP_GSI_ACC_ARGS_n_ADDR(n) (IPA_GSI_TOP_GSI_REG_BASE \
+						 + 0x0000050c + 0x4 * (n))
+#define HWIO_IPA_GSI_TOP_GSI_ACC_ARGS_n_PHYS(n) ( \
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x0000050c + 0x4 * (n))
+#define HWIO_IPA_GSI_TOP_GSI_ACC_ARGS_n_OFFS(n) ( \
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x0000050c + 0x4 * (n))
+#define HWIO_IPA_GSI_TOP_GSI_ACC_ROUTINE_ADDR (IPA_GSI_TOP_GSI_REG_BASE + \
+					       0x00000524)
+#define HWIO_IPA_GSI_TOP_GSI_ACC_ROUTINE_PHYS (	\
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x00000524)
+#define HWIO_IPA_GSI_TOP_GSI_ACC_ROUTINE_OFFS (	\
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x00000524)
+#define HWIO_IPA_GSI_TOP_GSI_ACC_GO_ADDR (IPA_GSI_TOP_GSI_REG_BASE + \
+					  0x00000528)
+#define HWIO_IPA_GSI_TOP_GSI_ACC_GO_PHYS (IPA_GSI_TOP_GSI_REG_BASE_PHYS + \
+					  0x00000528)
+#define HWIO_IPA_GSI_TOP_GSI_ACC_GO_OFFS (IPA_GSI_TOP_GSI_REG_BASE_OFFS + \
+					  0x00000528)
+#define HWIO_IPA_GSI_TOP_GSI_ACC_2_UC_MCS_STTS_ADDR ( \
+		IPA_GSI_TOP_GSI_REG_BASE + 0x0000052c)
+#define HWIO_IPA_GSI_TOP_GSI_ACC_2_UC_MCS_STTS_PHYS ( \
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x0000052c)
+#define HWIO_IPA_GSI_TOP_GSI_ACC_2_UC_MCS_STTS_OFFS ( \
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x0000052c)
+#define HWIO_IPA_GSI_TOP_GSI_ACC_2_UC_MCS_RET_VAL_LSB_ADDR ( \
+		IPA_GSI_TOP_GSI_REG_BASE + 0x00000530)
+#define HWIO_IPA_GSI_TOP_GSI_ACC_2_UC_MCS_RET_VAL_LSB_PHYS ( \
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x00000530)
+#define HWIO_IPA_GSI_TOP_GSI_ACC_2_UC_MCS_RET_VAL_LSB_OFFS ( \
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x00000530)
+#define HWIO_IPA_GSI_TOP_GSI_ACC_2_UC_MCS_RET_VAL_MSB_ADDR ( \
+		IPA_GSI_TOP_GSI_REG_BASE + 0x00000534)
+#define HWIO_IPA_GSI_TOP_GSI_ACC_2_UC_MCS_RET_VAL_MSB_PHYS ( \
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x00000534)
+#define HWIO_IPA_GSI_TOP_GSI_ACC_2_UC_MCS_RET_VAL_MSB_OFFS ( \
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x00000534)
+#define HWIO_IPA_GSI_TOP_GSI_IC_2_UC_MCS_VLD_ADDR ( \
+		IPA_GSI_TOP_GSI_REG_BASE + 0x00000538)
+#define HWIO_IPA_GSI_TOP_GSI_IC_2_UC_MCS_VLD_PHYS ( \
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x00000538)
+#define HWIO_IPA_GSI_TOP_GSI_IC_2_UC_MCS_VLD_OFFS ( \
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x00000538)
+#define HWIO_IPA_GSI_TOP_GSI_IC_2_UC_MCS_PC_ADDR (IPA_GSI_TOP_GSI_REG_BASE \
+						  + 0x0000053c)
+#define HWIO_IPA_GSI_TOP_GSI_IC_2_UC_MCS_PC_PHYS ( \
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x0000053c)
+#define HWIO_IPA_GSI_TOP_GSI_IC_2_UC_MCS_PC_OFFS ( \
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x0000053c)
+#define HWIO_IPA_GSI_TOP_GSI_IC_2_UC_MCS_ARGS_n_ADDR(n) ( \
+		IPA_GSI_TOP_GSI_REG_BASE + 0x00000540 + 0x4 * (n))
+#define HWIO_IPA_GSI_TOP_GSI_IC_2_UC_MCS_ARGS_n_PHYS(n) ( \
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x00000540 + 0x4 * (n))
+#define HWIO_IPA_GSI_TOP_GSI_IC_2_UC_MCS_ARGS_n_OFFS(n) ( \
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x00000540 + 0x4 * (n))
+#define HWIO_IPA_GSI_TOP_GSI_UC_TLV_IN_VLD_ADDR (IPA_GSI_TOP_GSI_REG_BASE \
+						 + 0x00000558)
+#define HWIO_IPA_GSI_TOP_GSI_UC_TLV_IN_VLD_PHYS ( \
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x00000558)
+#define HWIO_IPA_GSI_TOP_GSI_UC_TLV_IN_VLD_OFFS ( \
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x00000558)
+#define HWIO_IPA_GSI_TOP_GSI_UC_TLV_IN_ROUTINE_ADDR ( \
+		IPA_GSI_TOP_GSI_REG_BASE + 0x0000055c)
+#define HWIO_IPA_GSI_TOP_GSI_UC_TLV_IN_ROUTINE_PHYS ( \
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x0000055c)
+#define HWIO_IPA_GSI_TOP_GSI_UC_TLV_IN_ROUTINE_OFFS ( \
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x0000055c)
+#define HWIO_IPA_GSI_TOP_GSI_UC_TLV_IN_ARGS_n_ADDR(n) (	\
+		IPA_GSI_TOP_GSI_REG_BASE + 0x00000560 + 0x4 * (n))
+#define HWIO_IPA_GSI_TOP_GSI_UC_TLV_IN_ARGS_n_PHYS(n) (	\
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x00000560 + 0x4 * (n))
+#define HWIO_IPA_GSI_TOP_GSI_UC_TLV_IN_ARGS_n_OFFS(n) (	\
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x00000560 + 0x4 * (n))
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_CNTXT_0_ADDR(n, \
+						    k) ( \
+		IPA_GSI_TOP_GSI_REG_BASE + 0x0000f000 + 0x4000 * (n) + \
+		0x80 * \
+		(k))
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_CNTXT_0_PHYS(n, \
+						    k) ( \
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x0000f000 + 0x4000 * \
+		(n) + \
+		0x80 * (k))
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_CNTXT_0_OFFS(n, \
+						    k) ( \
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x0000f000 + 0x4000 * \
+		(n) + \
+		0x80 * (k))
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_CNTXT_0_RMSK 0xfff7ffff
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_CNTXT_0_MAXn 2
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_CNTXT_0_MAXk 22
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_CNTXT_0_ATTR 0x3
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_CNTXT_0_INI2(n, k) in_dword_masked( \
+		HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_CNTXT_0_ADDR(n, k), \
+		HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_CNTXT_0_RMSK)
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_CNTXT_0_INMI2(n, k, \
+						     mask) in_dword_masked( \
+		HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_CNTXT_0_ADDR(n, \
+							    k),	\
+		mask)
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_CNTXT_0_OUTI2(n, k, val) out_dword( \
+		HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_CNTXT_0_ADDR(n, k), \
+		val)
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_CNTXT_0_OUTMI2(n, k, mask, \
+						      val) \
+	out_dword_masked_ns( \
+		HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_CNTXT_0_ADDR(n, k), \
+		mask, \
+		val, \
+		HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_CNTXT_0_INI2(n, k))
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_CNTXT_0_ELEMENT_SIZE_BMSK \
+	0xff000000
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_CNTXT_0_ELEMENT_SIZE_SHFT 0x18
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_CNTXT_0_CHSTATE_BMSK 0xf00000
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_CNTXT_0_CHSTATE_SHFT 0x14
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_CNTXT_0_CHSTATE_NOT_ALLOCATED_FVAL \
+	0x0
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_CNTXT_0_CHSTATE_ALLOCATED_FVAL 0x1
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_CNTXT_0_CHSTATE_STARTED_FVAL 0x2
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_CNTXT_0_CHSTATE_STOPPED_FVAL 0x3
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_CNTXT_0_CHSTATE_STOP_IN_PROC_FVAL \
+	0x4
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_CNTXT_0_CHSTATE_ERROR_FVAL 0xf
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_CNTXT_0_ERINDEX_BMSK 0x7c000
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_CNTXT_0_ERINDEX_SHFT 0xe
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_CNTXT_0_CHTYPE_PROTOCOL_MSB_BMSK	\
+	0x2000
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_CNTXT_0_CHTYPE_PROTOCOL_MSB_SHFT	\
+	0xd
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_CNTXT_0_CHID_BMSK 0x1f00
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_CNTXT_0_CHID_SHFT 0x8
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_CNTXT_0_EE_BMSK 0xf0
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_CNTXT_0_EE_SHFT 0x4
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_CNTXT_0_CHTYPE_DIR_BMSK 0x8
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_CNTXT_0_CHTYPE_DIR_SHFT 0x3
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_CNTXT_0_CHTYPE_DIR_INBOUND_FVAL 0x0
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_CNTXT_0_CHTYPE_DIR_OUTBOUND_FVAL	\
+	0x1
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_CNTXT_0_CHTYPE_PROTOCOL_BMSK 0x7
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_CNTXT_0_CHTYPE_PROTOCOL_SHFT 0x0
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_CNTXT_0_CHTYPE_PROTOCOL_MHI_FVAL	\
+	0x0
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_CNTXT_0_CHTYPE_PROTOCOL_XHCI_FVAL \
+	0x1
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_CNTXT_0_CHTYPE_PROTOCOL_GPI_FVAL	\
+	0x2
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_CNTXT_0_CHTYPE_PROTOCOL_XDCI_FVAL \
+	0x3
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_CNTXT_1_ADDR(n, \
+						    k) ( \
+		IPA_GSI_TOP_GSI_REG_BASE + 0x0000f004 + 0x4000 * (n) + \
+		0x80 * \
+		(k))
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_CNTXT_1_PHYS(n, \
+						    k) ( \
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x0000f004 + 0x4000 * \
+		(n) + \
+		0x80 * (k))
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_CNTXT_1_OFFS(n, \
+						    k) ( \
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x0000f004 + 0x4000 * \
+		(n) + \
+		0x80 * (k))
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_CNTXT_1_RMSK 0xffff
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_CNTXT_1_MAXn 2
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_CNTXT_1_MAXk 22
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_CNTXT_1_ATTR 0x3
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_CNTXT_1_INI2(n, k) in_dword_masked( \
+		HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_CNTXT_1_ADDR(n, k), \
+		HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_CNTXT_1_RMSK)
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_CNTXT_1_INMI2(n, k, \
+						     mask) in_dword_masked( \
+		HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_CNTXT_1_ADDR(n, \
+							    k),	\
+		mask)
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_CNTXT_1_OUTI2(n, k, val) out_dword( \
+		HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_CNTXT_1_ADDR(n, k), \
+		val)
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_CNTXT_1_OUTMI2(n, k, mask, \
+						      val) \
+	out_dword_masked_ns( \
+		HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_CNTXT_1_ADDR(n, k), \
+		mask, \
+		val, \
+		HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_CNTXT_1_INI2(n, k))
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_CNTXT_1_R_LENGTH_BMSK 0xffff
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_CNTXT_1_R_LENGTH_SHFT 0x0
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_CNTXT_2_ADDR(n, \
+						    k) ( \
+		IPA_GSI_TOP_GSI_REG_BASE + 0x0000f008 + 0x4000 * (n) + \
+		0x80 * \
+		(k))
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_CNTXT_2_PHYS(n, \
+						    k) ( \
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x0000f008 + 0x4000 * \
+		(n) + \
+		0x80 * (k))
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_CNTXT_2_OFFS(n, \
+						    k) ( \
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x0000f008 + 0x4000 * \
+		(n) + \
+		0x80 * (k))
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_CNTXT_2_RMSK 0xffffffff
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_CNTXT_2_MAXn 2
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_CNTXT_2_MAXk 22
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_CNTXT_2_ATTR 0x3
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_CNTXT_2_INI2(n, k) in_dword_masked( \
+		HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_CNTXT_2_ADDR(n, k), \
+		HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_CNTXT_2_RMSK)
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_CNTXT_2_INMI2(n, k, \
+						     mask) in_dword_masked( \
+		HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_CNTXT_2_ADDR(n, \
+							    k),	\
+		mask)
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_CNTXT_2_OUTI2(n, k, val) out_dword( \
+		HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_CNTXT_2_ADDR(n, k), \
+		val)
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_CNTXT_2_OUTMI2(n, k, mask, \
+						      val) \
+	out_dword_masked_ns( \
+		HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_CNTXT_2_ADDR(n, k), \
+		mask, \
+		val, \
+		HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_CNTXT_2_INI2(n, k))
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_CNTXT_2_R_BASE_ADDR_LSBS_BMSK \
+	0xffffffff
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_CNTXT_2_R_BASE_ADDR_LSBS_SHFT 0x0
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_CNTXT_3_ADDR(n, \
+						    k) ( \
+		IPA_GSI_TOP_GSI_REG_BASE + 0x0000f00c + 0x4000 * (n) + \
+		0x80 * \
+		(k))
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_CNTXT_3_PHYS(n, \
+						    k) ( \
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x0000f00c + 0x4000 * \
+		(n) + \
+		0x80 * (k))
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_CNTXT_3_OFFS(n, \
+						    k) ( \
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x0000f00c + 0x4000 * \
+		(n) + \
+		0x80 * (k))
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_CNTXT_3_RMSK 0xffffffff
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_CNTXT_3_MAXn 2
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_CNTXT_3_MAXk 22
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_CNTXT_3_ATTR 0x3
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_CNTXT_3_INI2(n, k) in_dword_masked( \
+		HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_CNTXT_3_ADDR(n, k), \
+		HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_CNTXT_3_RMSK)
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_CNTXT_3_INMI2(n, k, \
+						     mask) in_dword_masked( \
+		HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_CNTXT_3_ADDR(n, \
+							    k),	\
+		mask)
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_CNTXT_3_OUTI2(n, k, val) out_dword( \
+		HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_CNTXT_3_ADDR(n, k), \
+		val)
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_CNTXT_3_OUTMI2(n, k, mask, \
+						      val) \
+	out_dword_masked_ns( \
+		HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_CNTXT_3_ADDR(n, k), \
+		mask, \
+		val, \
+		HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_CNTXT_3_INI2(n, k))
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_CNTXT_3_R_BASE_ADDR_MSBS_BMSK \
+	0xffffffff
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_CNTXT_3_R_BASE_ADDR_MSBS_SHFT 0x0
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_CNTXT_4_ADDR(n, \
+						    k) ( \
+		IPA_GSI_TOP_GSI_REG_BASE + 0x0000f010 + 0x4000 * (n) + \
+		0x80 * \
+		(k))
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_CNTXT_4_PHYS(n, \
+						    k) ( \
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x0000f010 + 0x4000 * \
+		(n) + \
+		0x80 * (k))
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_CNTXT_4_OFFS(n, \
+						    k) ( \
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x0000f010 + 0x4000 * \
+		(n) + \
+		0x80 * (k))
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_CNTXT_4_RMSK 0xffffffff
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_CNTXT_4_MAXn 2
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_CNTXT_4_MAXk 22
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_CNTXT_4_ATTR 0x3
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_CNTXT_4_INI2(n, k) in_dword_masked( \
+		HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_CNTXT_4_ADDR(n, k), \
+		HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_CNTXT_4_RMSK)
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_CNTXT_4_INMI2(n, k, \
+						     mask) in_dword_masked( \
+		HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_CNTXT_4_ADDR(n, \
+							    k),	\
+		mask)
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_CNTXT_4_OUTI2(n, k, val) out_dword( \
+		HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_CNTXT_4_ADDR(n, k), \
+		val)
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_CNTXT_4_OUTMI2(n, k, mask, \
+						      val) \
+	out_dword_masked_ns( \
+		HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_CNTXT_4_ADDR(n, k), \
+		mask, \
+		val, \
+		HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_CNTXT_4_INI2(n, k))
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_CNTXT_4_READ_PTR_LSB_BMSK \
+	0xffffffff
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_CNTXT_4_READ_PTR_LSB_SHFT 0x0
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_CNTXT_5_ADDR(n, \
+						    k) ( \
+		IPA_GSI_TOP_GSI_REG_BASE + 0x0000f014 + 0x4000 * (n) + \
+		0x80 * \
+		(k))
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_CNTXT_5_PHYS(n, \
+						    k) ( \
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x0000f014 + 0x4000 * \
+		(n) + \
+		0x80 * (k))
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_CNTXT_5_OFFS(n, \
+						    k) ( \
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x0000f014 + 0x4000 * \
+		(n) + \
+		0x80 * (k))
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_CNTXT_5_RMSK 0xffffffff
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_CNTXT_5_MAXn 2
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_CNTXT_5_MAXk 22
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_CNTXT_5_ATTR 0x1
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_CNTXT_5_INI2(n, k) in_dword_masked( \
+		HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_CNTXT_5_ADDR(n, k), \
+		HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_CNTXT_5_RMSK)
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_CNTXT_5_INMI2(n, k, \
+						     mask) in_dword_masked( \
+		HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_CNTXT_5_ADDR(n, \
+							    k),	\
+		mask)
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_CNTXT_5_READ_PTR_MSB_BMSK \
+	0xffffffff
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_CNTXT_5_READ_PTR_MSB_SHFT 0x0
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_CNTXT_6_ADDR(n, \
+						    k) ( \
+		IPA_GSI_TOP_GSI_REG_BASE + 0x0000f018 + 0x4000 * (n) + \
+		0x80 * \
+		(k))
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_CNTXT_6_PHYS(n, \
+						    k) ( \
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x0000f018 + 0x4000 * \
+		(n) + \
+		0x80 * (k))
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_CNTXT_6_OFFS(n, \
+						    k) ( \
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x0000f018 + 0x4000 * \
+		(n) + \
+		0x80 * (k))
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_CNTXT_6_RMSK 0xffffffff
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_CNTXT_6_MAXn 2
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_CNTXT_6_MAXk 22
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_CNTXT_6_ATTR 0x1
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_CNTXT_6_INI2(n, k) in_dword_masked( \
+		HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_CNTXT_6_ADDR(n, k), \
+		HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_CNTXT_6_RMSK)
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_CNTXT_6_INMI2(n, k, \
+						     mask) in_dword_masked( \
+		HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_CNTXT_6_ADDR(n, \
+							    k),	\
+		mask)
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_CNTXT_6_WRITE_PTR_LSB_BMSK \
+	0xffffffff
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_CNTXT_6_WRITE_PTR_LSB_SHFT 0x0
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_CNTXT_7_ADDR(n, \
+						    k) ( \
+		IPA_GSI_TOP_GSI_REG_BASE + 0x0000f01c + 0x4000 * (n) + \
+		0x80 * \
+		(k))
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_CNTXT_7_PHYS(n, \
+						    k) ( \
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x0000f01c + 0x4000 * \
+		(n) + \
+		0x80 * (k))
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_CNTXT_7_OFFS(n, \
+						    k) ( \
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x0000f01c + 0x4000 * \
+		(n) + \
+		0x80 * (k))
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_CNTXT_7_RMSK 0xffffffff
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_CNTXT_7_MAXn 2
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_CNTXT_7_MAXk 22
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_CNTXT_7_ATTR 0x1
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_CNTXT_7_INI2(n, k) in_dword_masked( \
+		HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_CNTXT_7_ADDR(n, k), \
+		HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_CNTXT_7_RMSK)
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_CNTXT_7_INMI2(n, k, \
+						     mask) in_dword_masked( \
+		HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_CNTXT_7_ADDR(n, \
+							    k),	\
+		mask)
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_CNTXT_7_WRITE_PTR_MSB_BMSK \
+	0xffffffff
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_CNTXT_7_WRITE_PTR_MSB_SHFT 0x0
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_RE_FETCH_READ_PTR_ADDR(n, \
+							      k) ( \
+		IPA_GSI_TOP_GSI_REG_BASE + 0x0000f054 + 0x4000 * (n) + \
+		0x80 * \
+		(k))
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_RE_FETCH_READ_PTR_PHYS(n, \
+							      k) ( \
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x0000f054 + 0x4000 * \
+		(n) + \
+		0x80 * \
+		(k))
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_RE_FETCH_READ_PTR_OFFS(n, \
+							      k) ( \
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x0000f054 + 0x4000 * \
+		(n) + \
+		0x80 * \
+		(k))
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_RE_FETCH_READ_PTR_RMSK 0xffff
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_RE_FETCH_READ_PTR_MAXn 2
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_RE_FETCH_READ_PTR_MAXk 22
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_RE_FETCH_READ_PTR_ATTR 0x3
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_RE_FETCH_READ_PTR_INI2(n, \
+							      k) \
+	in_dword_masked( \
+		HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_RE_FETCH_READ_PTR_ADDR(n, k), \
+		HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_RE_FETCH_READ_PTR_RMSK)
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_RE_FETCH_READ_PTR_INMI2(n, k, \
+							       mask) \
+	in_dword_masked( \
+		HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_RE_FETCH_READ_PTR_ADDR(n, k), \
+		mask)
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_RE_FETCH_READ_PTR_OUTI2(n, k, \
+							       val) \
+	out_dword( \
+		HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_RE_FETCH_READ_PTR_ADDR(n, k), \
+		val)
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_RE_FETCH_READ_PTR_OUTMI2(n, \
+								k, \
+								mask, \
+								val) \
+	out_dword_masked_ns( \
+		HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_RE_FETCH_READ_PTR_ADDR(n, k), \
+		mask, \
+		val, \
+		HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_RE_FETCH_READ_PTR_INI2(n, k))
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_RE_FETCH_READ_PTR_READ_PTR_BMSK \
+	0xffff
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_RE_FETCH_READ_PTR_READ_PTR_SHFT 0x0
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_RE_FETCH_WRITE_PTR_ADDR(n, \
+							       k) ( \
+		IPA_GSI_TOP_GSI_REG_BASE + 0x0000f058 + 0x4000 * (n) + \
+		0x80 * \
+		(k))
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_RE_FETCH_WRITE_PTR_PHYS(n, \
+							       k) ( \
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x0000f058 + 0x4000 * \
+		(n) + \
+		0x80 * \
+		(k))
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_RE_FETCH_WRITE_PTR_OFFS(n, \
+							       k) ( \
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x0000f058 + 0x4000 * \
+		(n) + \
+		0x80 * \
+		(k))
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_RE_FETCH_WRITE_PTR_RMSK 0xffff
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_RE_FETCH_WRITE_PTR_MAXn 2
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_RE_FETCH_WRITE_PTR_MAXk 22
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_RE_FETCH_WRITE_PTR_ATTR 0x3
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_RE_FETCH_WRITE_PTR_INI2(n, \
+							       k) \
+	in_dword_masked( \
+		HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_RE_FETCH_WRITE_PTR_ADDR(n, \
+								       k), \
+		HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_RE_FETCH_WRITE_PTR_RMSK)
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_RE_FETCH_WRITE_PTR_INMI2(n, k, \
+								mask) \
+	in_dword_masked( \
+		HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_RE_FETCH_WRITE_PTR_ADDR(n, \
+								       k), \
+		mask)
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_RE_FETCH_WRITE_PTR_OUTI2(n, k, \
+								val) \
+	out_dword( \
+		HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_RE_FETCH_WRITE_PTR_ADDR(n, \
+								       k), \
+		val)
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_RE_FETCH_WRITE_PTR_OUTMI2(n, \
+								 k, \
+								 mask, \
+								 val) \
+	out_dword_masked_ns( \
+		HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_RE_FETCH_WRITE_PTR_ADDR(n, \
+								       k), \
+		mask, \
+		val, \
+		HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_RE_FETCH_WRITE_PTR_INI2(n, \
+								       k))
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_RE_FETCH_WRITE_PTR_RE_INTR_DB_BMSK \
+	0xffff
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_RE_FETCH_WRITE_PTR_RE_INTR_DB_SHFT \
+	0x0
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_QOS_ADDR(n, \
+						k) ( \
+		IPA_GSI_TOP_GSI_REG_BASE + 0x0000f05c + 0x4000 * (n) + \
+		0x80 * \
+		(k))
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_QOS_PHYS(n, \
+						k) ( \
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x0000f05c + 0x4000 * \
+		(n) + \
+		0x80 * (k))
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_QOS_OFFS(n, \
+						k) ( \
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x0000f05c + 0x4000 * \
+		(n) + \
+		0x80 * (k))
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_QOS_RMSK 0xff3f0f
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_QOS_MAXn 2
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_QOS_MAXk 22
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_QOS_ATTR 0x3
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_QOS_INI2(n, k) in_dword_masked( \
+		HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_QOS_ADDR(n, k), \
+		HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_QOS_RMSK)
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_QOS_INMI2(n, k, \
+						 mask) in_dword_masked(	\
+		HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_QOS_ADDR(n, \
+							k), \
+		mask)
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_QOS_OUTI2(n, k, val) out_dword( \
+		HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_QOS_ADDR(n, k), \
+		val)
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_QOS_OUTMI2(n, k, mask, \
+						  val) out_dword_masked_ns( \
+		HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_QOS_ADDR(n, \
+							k), \
+		mask, \
+		val, \
+		HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_QOS_INI2(n, k))
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_QOS_EMPTY_LVL_THRSHOLD_BMSK \
+	0xff0000
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_QOS_EMPTY_LVL_THRSHOLD_SHFT 0x10
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_QOS_PREFETCH_MODE_BMSK 0x3c00
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_QOS_PREFETCH_MODE_SHFT 0xa
+#define	\
+	HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_QOS_PREFETCH_MODE_ESCAPE_BUF_ONLY_FVAL \
+	0x1
+#define	\
+	HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_QOS_PREFETCH_MODE_SMART_PRE_FETCH_FVAL \
+	0x2
+#define	\
+	HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_QOS_PREFETCH_MODE_FREE_PRE_FETCH_FVAL \
+	0x3
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_QOS_USE_DB_ENG_BMSK 0x200
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_QOS_USE_DB_ENG_SHFT 0x9
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_QOS_MAX_PREFETCH_BMSK 0x100
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_QOS_MAX_PREFETCH_SHFT 0x8
+#define	\
+	HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_QOS_MAX_PREFETCH_ONE_PREFETCH_SEG_FVAL \
+	0x0
+#define	\
+	HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_QOS_MAX_PREFETCH_TWO_PREFETCH_SEG_FVAL \
+	0x1
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_QOS_WRR_WEIGHT_BMSK 0xf
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_QOS_WRR_WEIGHT_SHFT 0x0
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_SCRATCH_0_ADDR(n, \
+						      k) ( \
+		IPA_GSI_TOP_GSI_REG_BASE + 0x0000f060 + 0x4000 * (n) + \
+		0x80 * \
+		(k))
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_SCRATCH_0_PHYS(n, \
+						      k) ( \
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x0000f060 + 0x4000 * \
+		(n) + \
+		0x80 * (k))
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_SCRATCH_0_OFFS(n, \
+						      k) ( \
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x0000f060 + 0x4000 * \
+		(n) + \
+		0x80 * (k))
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_SCRATCH_0_RMSK 0xffffffff
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_SCRATCH_0_MAXn 2
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_SCRATCH_0_MAXk 22
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_SCRATCH_0_ATTR 0x3
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_SCRATCH_0_INI2(n, \
+						      k) in_dword_masked( \
+		HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_SCRATCH_0_ADDR(n, \
+							      k), \
+		HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_SCRATCH_0_RMSK)
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_SCRATCH_0_INMI2(n, k, \
+						       mask) \
+	in_dword_masked(HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_SCRATCH_0_ADDR( \
+				n, \
+				k), mask)
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_SCRATCH_0_OUTI2(n, k, \
+						       val) out_dword( \
+		HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_SCRATCH_0_ADDR(n, \
+							      k), \
+		val)
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_SCRATCH_0_OUTMI2(n, k, mask, \
+							val) \
+	out_dword_masked_ns( \
+		HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_SCRATCH_0_ADDR(n, k), \
+		mask, \
+		val, \
+		HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_SCRATCH_0_INI2(n, k))
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_SCRATCH_0_SCRATCH_BMSK 0xffffffff
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_SCRATCH_0_SCRATCH_SHFT 0x0
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_SCRATCH_1_ADDR(n, \
+						      k) ( \
+		IPA_GSI_TOP_GSI_REG_BASE + 0x0000f064 + 0x4000 * (n) + \
+		0x80 * \
+		(k))
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_SCRATCH_1_PHYS(n, \
+						      k) ( \
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x0000f064 + 0x4000 * \
+		(n) + \
+		0x80 * (k))
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_SCRATCH_1_OFFS(n, \
+						      k) ( \
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x0000f064 + 0x4000 * \
+		(n) + \
+		0x80 * (k))
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_SCRATCH_1_RMSK 0xffffffff
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_SCRATCH_1_MAXn 2
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_SCRATCH_1_MAXk 22
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_SCRATCH_1_ATTR 0x3
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_SCRATCH_1_INI2(n, \
+						      k) in_dword_masked( \
+		HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_SCRATCH_1_ADDR(n, \
+							      k), \
+		HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_SCRATCH_1_RMSK)
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_SCRATCH_1_INMI2(n, k, \
+						       mask) \
+	in_dword_masked(HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_SCRATCH_1_ADDR( \
+				n, \
+				k), mask)
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_SCRATCH_1_OUTI2(n, k, \
+						       val) out_dword( \
+		HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_SCRATCH_1_ADDR(n, \
+							      k), \
+		val)
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_SCRATCH_1_OUTMI2(n, k, mask, \
+							val) \
+	out_dword_masked_ns( \
+		HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_SCRATCH_1_ADDR(n, k), \
+		mask, \
+		val, \
+		HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_SCRATCH_1_INI2(n, k))
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_SCRATCH_1_SCRATCH_BMSK 0xffffffff
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_SCRATCH_1_SCRATCH_SHFT 0x0
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_SCRATCH_2_ADDR(n, \
+						      k) ( \
+		IPA_GSI_TOP_GSI_REG_BASE + 0x0000f068 + 0x4000 * (n) + \
+		0x80 * \
+		(k))
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_SCRATCH_2_PHYS(n, \
+						      k) ( \
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x0000f068 + 0x4000 * \
+		(n) + \
+		0x80 * (k))
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_SCRATCH_2_OFFS(n, \
+						      k) ( \
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x0000f068 + 0x4000 * \
+		(n) + \
+		0x80 * (k))
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_SCRATCH_2_RMSK 0xffffffff
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_SCRATCH_2_MAXn 2
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_SCRATCH_2_MAXk 22
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_SCRATCH_2_ATTR 0x3
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_SCRATCH_2_INI2(n, \
+						      k) in_dword_masked( \
+		HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_SCRATCH_2_ADDR(n, \
+							      k), \
+		HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_SCRATCH_2_RMSK)
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_SCRATCH_2_INMI2(n, k, \
+						       mask) \
+	in_dword_masked(HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_SCRATCH_2_ADDR( \
+				n, \
+				k), mask)
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_SCRATCH_2_OUTI2(n, k, \
+						       val) out_dword( \
+		HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_SCRATCH_2_ADDR(n, \
+							      k), \
+		val)
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_SCRATCH_2_OUTMI2(n, k, mask, \
+							val) \
+	out_dword_masked_ns( \
+		HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_SCRATCH_2_ADDR(n, k), \
+		mask, \
+		val, \
+		HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_SCRATCH_2_INI2(n, k))
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_SCRATCH_2_SCRATCH_BMSK 0xffffffff
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_SCRATCH_2_SCRATCH_SHFT 0x0
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_SCRATCH_3_ADDR(n, \
+						      k) ( \
+		IPA_GSI_TOP_GSI_REG_BASE + 0x0000f06c + 0x4000 * (n) + \
+		0x80 * \
+		(k))
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_SCRATCH_3_PHYS(n, \
+						      k) ( \
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x0000f06c + 0x4000 * \
+		(n) + \
+		0x80 * (k))
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_SCRATCH_3_OFFS(n, \
+						      k) ( \
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x0000f06c + 0x4000 * \
+		(n) + \
+		0x80 * (k))
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_SCRATCH_3_RMSK 0xffffffff
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_SCRATCH_3_MAXn 2
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_SCRATCH_3_MAXk 22
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_SCRATCH_3_ATTR 0x3
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_SCRATCH_3_INI2(n, \
+						      k) in_dword_masked( \
+		HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_SCRATCH_3_ADDR(n, \
+							      k), \
+		HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_SCRATCH_3_RMSK)
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_SCRATCH_3_INMI2(n, k, \
+						       mask) \
+	in_dword_masked(HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_SCRATCH_3_ADDR( \
+				n, \
+				k), mask)
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_SCRATCH_3_OUTI2(n, k, \
+						       val) out_dword( \
+		HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_SCRATCH_3_ADDR(n, \
+							      k), \
+		val)
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_SCRATCH_3_OUTMI2(n, k, mask, \
+							val) \
+	out_dword_masked_ns( \
+		HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_SCRATCH_3_ADDR(n, k), \
+		mask, \
+		val, \
+		HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_SCRATCH_3_INI2(n, k))
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_SCRATCH_3_SCRATCH_BMSK 0xffffffff
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_SCRATCH_3_SCRATCH_SHFT 0x0
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_DB_ENG_WRITE_PTR_ADDR(n,	\
+							     k) ( \
+		IPA_GSI_TOP_GSI_REG_BASE + 0x0000f070 + 0x4000 * (n) + \
+		0x80 * \
+		(k))
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_DB_ENG_WRITE_PTR_PHYS(n,	\
+							     k) ( \
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x0000f070 + 0x4000 * \
+		(n) + \
+		0x80 * (k))
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_DB_ENG_WRITE_PTR_OFFS(n,	\
+							     k) ( \
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x0000f070 + 0x4000 * \
+		(n) + \
+		0x80 * (k))
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_0_ADDR(n, \
+						   k) (	\
+		IPA_GSI_TOP_GSI_REG_BASE + 0x00010000 + 0x4000 * (n) + \
+		0x80 * \
+		(k))
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_0_PHYS(n, \
+						   k) (	\
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x00010000 + 0x4000 * \
+		(n) + \
+		0x80 * (k))
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_0_OFFS(n, \
+						   k) (	\
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x00010000 + 0x4000 * \
+		(n) + \
+		0x80 * (k))
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_0_RMSK 0xfff1ffff
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_0_MAXn 2
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_0_MAXk 19
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_0_ATTR 0x3
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_0_INI2(n, k) in_dword_masked( \
+		HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_0_ADDR(n, k), \
+		HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_0_RMSK)
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_0_INMI2(n, k, \
+						    mask) in_dword_masked( \
+		HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_0_ADDR(n, \
+							   k), \
+		mask)
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_0_OUTI2(n, k, val) out_dword( \
+		HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_0_ADDR(n, k), \
+		val)
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_0_OUTMI2(n, k, mask, \
+						     val) \
+	out_dword_masked_ns(HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_0_ADDR(	\
+				    n, \
+				    k), mask, val, \
+			    HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_0_INI2(n, \
+								       k))
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_0_ELEMENT_SIZE_BMSK 0xff000000
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_0_ELEMENT_SIZE_SHFT 0x18
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_0_CHSTATE_BMSK 0xf00000
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_0_CHSTATE_SHFT 0x14
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_0_CHSTATE_NOT_ALLOCATED_FVAL \
+	0x0
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_0_CHSTATE_ALLOCATED_FVAL 0x1
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_0_INTYPE_BMSK 0x10000
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_0_INTYPE_SHFT 0x10
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_0_INTYPE_MSI_FVAL 0x0
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_0_INTYPE_IRQ_FVAL 0x1
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_0_EVCHID_BMSK 0xff00
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_0_EVCHID_SHFT 0x8
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_0_EE_BMSK 0xf0
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_0_EE_SHFT 0x4
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_0_CHTYPE_BMSK 0xf
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_0_CHTYPE_SHFT 0x0
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_0_CHTYPE_MHI_EV_FVAL 0x0
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_0_CHTYPE_XHCI_EV_FVAL 0x1
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_0_CHTYPE_GPI_EV_FVAL 0x2
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_0_CHTYPE_XDCI_FVAL 0x3
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_1_ADDR(n, \
+						   k) (	\
+		IPA_GSI_TOP_GSI_REG_BASE + 0x00010004 + 0x4000 * (n) + \
+		0x80 * \
+		(k))
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_1_PHYS(n, \
+						   k) (	\
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x00010004 + 0x4000 * \
+		(n) + \
+		0x80 * (k))
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_1_OFFS(n, \
+						   k) (	\
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x00010004 + 0x4000 * \
+		(n) + \
+		0x80 * (k))
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_1_RMSK 0xffff
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_1_MAXn 2
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_1_MAXk 19
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_1_ATTR 0x3
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_1_INI2(n, k) in_dword_masked( \
+		HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_1_ADDR(n, k), \
+		HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_1_RMSK)
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_1_INMI2(n, k, \
+						    mask) in_dword_masked( \
+		HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_1_ADDR(n, \
+							   k), \
+		mask)
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_1_OUTI2(n, k, val) out_dword( \
+		HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_1_ADDR(n, k), \
+		val)
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_1_OUTMI2(n, k, mask, \
+						     val) \
+	out_dword_masked_ns(HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_1_ADDR(	\
+				    n, \
+				    k), mask, val, \
+			    HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_1_INI2(n, \
+								       k))
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_1_R_LENGTH_BMSK 0xffff
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_1_R_LENGTH_SHFT 0x0
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_2_ADDR(n, \
+						   k) (	\
+		IPA_GSI_TOP_GSI_REG_BASE + 0x00010008 + 0x4000 * (n) + \
+		0x80 * \
+		(k))
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_2_PHYS(n, \
+						   k) (	\
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x00010008 + 0x4000 * \
+		(n) + \
+		0x80 * (k))
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_2_OFFS(n, \
+						   k) (	\
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x00010008 + 0x4000 * \
+		(n) + \
+		0x80 * (k))
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_2_RMSK 0xffffffff
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_2_MAXn 2
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_2_MAXk 19
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_2_ATTR 0x3
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_2_INI2(n, k) in_dword_masked( \
+		HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_2_ADDR(n, k), \
+		HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_2_RMSK)
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_2_INMI2(n, k, \
+						    mask) in_dword_masked( \
+		HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_2_ADDR(n, \
+							   k), \
+		mask)
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_2_OUTI2(n, k, val) out_dword( \
+		HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_2_ADDR(n, k), \
+		val)
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_2_OUTMI2(n, k, mask, \
+						     val) \
+	out_dword_masked_ns(HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_2_ADDR(	\
+				    n, \
+				    k), mask, val, \
+			    HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_2_INI2(n, \
+								       k))
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_2_R_BASE_ADDR_LSBS_BMSK \
+	0xffffffff
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_2_R_BASE_ADDR_LSBS_SHFT 0x0
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_3_ADDR(n, \
+						   k) (	\
+		IPA_GSI_TOP_GSI_REG_BASE + 0x0001000c + 0x4000 * (n) + \
+		0x80 * \
+		(k))
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_3_PHYS(n, \
+						   k) (	\
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x0001000c + 0x4000 * \
+		(n) + \
+		0x80 * (k))
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_3_OFFS(n, \
+						   k) (	\
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x0001000c + 0x4000 * \
+		(n) + \
+		0x80 * (k))
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_3_RMSK 0xffffffff
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_3_MAXn 2
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_3_MAXk 19
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_3_ATTR 0x3
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_3_INI2(n, k) in_dword_masked( \
+		HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_3_ADDR(n, k), \
+		HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_3_RMSK)
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_3_INMI2(n, k, \
+						    mask) in_dword_masked( \
+		HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_3_ADDR(n, \
+							   k), \
+		mask)
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_3_OUTI2(n, k, val) out_dword( \
+		HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_3_ADDR(n, k), \
+		val)
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_3_OUTMI2(n, k, mask, \
+						     val) \
+	out_dword_masked_ns(HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_3_ADDR(	\
+				    n, \
+				    k), mask, val, \
+			    HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_3_INI2(n, \
+								       k))
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_3_R_BASE_ADDR_MSBS_BMSK \
+	0xffffffff
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_3_R_BASE_ADDR_MSBS_SHFT 0x0
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_4_ADDR(n, \
+						   k) (	\
+		IPA_GSI_TOP_GSI_REG_BASE + 0x00010010 + 0x4000 * (n) + \
+		0x80 * \
+		(k))
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_4_PHYS(n, \
+						   k) (	\
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x00010010 + 0x4000 * \
+		(n) + \
+		0x80 * (k))
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_4_OFFS(n, \
+						   k) (	\
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x00010010 + 0x4000 * \
+		(n) + \
+		0x80 * (k))
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_4_RMSK 0xffffffff
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_4_MAXn 2
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_4_MAXk 19
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_4_ATTR 0x3
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_4_INI2(n, k) in_dword_masked( \
+		HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_4_ADDR(n, k), \
+		HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_4_RMSK)
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_4_INMI2(n, k, \
+						    mask) in_dword_masked( \
+		HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_4_ADDR(n, \
+							   k), \
+		mask)
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_4_OUTI2(n, k, val) out_dword( \
+		HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_4_ADDR(n, k), \
+		val)
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_4_OUTMI2(n, k, mask, \
+						     val) \
+	out_dword_masked_ns(HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_4_ADDR(	\
+				    n, \
+				    k), mask, val, \
+			    HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_4_INI2(n, \
+								       k))
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_4_READ_PTR_LSB_BMSK 0xffffffff
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_4_READ_PTR_LSB_SHFT 0x0
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_5_ADDR(n, \
+						   k) (	\
+		IPA_GSI_TOP_GSI_REG_BASE + 0x00010014 + 0x4000 * (n) + \
+		0x80 * \
+		(k))
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_5_PHYS(n, \
+						   k) (	\
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x00010014 + 0x4000 * \
+		(n) + \
+		0x80 * (k))
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_5_OFFS(n, \
+						   k) (	\
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x00010014 + 0x4000 * \
+		(n) + \
+		0x80 * (k))
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_5_RMSK 0xffffffff
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_5_MAXn 2
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_5_MAXk 19
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_5_ATTR 0x1
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_5_INI2(n, k) in_dword_masked( \
+		HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_5_ADDR(n, k), \
+		HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_5_RMSK)
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_5_INMI2(n, k, \
+						    mask) in_dword_masked( \
+		HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_5_ADDR(n, \
+							   k), \
+		mask)
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_5_READ_PTR_MSB_BMSK 0xffffffff
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_5_READ_PTR_MSB_SHFT 0x0
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_6_ADDR(n, \
+						   k) (	\
+		IPA_GSI_TOP_GSI_REG_BASE + 0x00010018 + 0x4000 * (n) + \
+		0x80 * \
+		(k))
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_6_PHYS(n, \
+						   k) (	\
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x00010018 + 0x4000 * \
+		(n) + \
+		0x80 * (k))
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_6_OFFS(n, \
+						   k) (	\
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x00010018 + 0x4000 * \
+		(n) + \
+		0x80 * (k))
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_6_RMSK 0xffffffff
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_6_MAXn 2
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_6_MAXk 19
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_6_ATTR 0x1
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_6_INI2(n, k) in_dword_masked( \
+		HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_6_ADDR(n, k), \
+		HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_6_RMSK)
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_6_INMI2(n, k, \
+						    mask) in_dword_masked( \
+		HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_6_ADDR(n, \
+							   k), \
+		mask)
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_6_WRITE_PTR_LSB_BMSK \
+	0xffffffff
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_6_WRITE_PTR_LSB_SHFT 0x0
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_7_ADDR(n, \
+						   k) (	\
+		IPA_GSI_TOP_GSI_REG_BASE + 0x0001001c + 0x4000 * (n) + \
+		0x80 * \
+		(k))
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_7_PHYS(n, \
+						   k) (	\
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x0001001c + 0x4000 * \
+		(n) + \
+		0x80 * (k))
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_7_OFFS(n, \
+						   k) (	\
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x0001001c + 0x4000 * \
+		(n) + \
+		0x80 * (k))
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_7_RMSK 0xffffffff
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_7_MAXn 2
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_7_MAXk 19
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_7_ATTR 0x1
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_7_INI2(n, k) in_dword_masked( \
+		HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_7_ADDR(n, k), \
+		HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_7_RMSK)
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_7_INMI2(n, k, \
+						    mask) in_dword_masked( \
+		HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_7_ADDR(n, \
+							   k), \
+		mask)
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_7_WRITE_PTR_MSB_BMSK \
+	0xffffffff
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_7_WRITE_PTR_MSB_SHFT 0x0
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_8_ADDR(n, \
+						   k) (	\
+		IPA_GSI_TOP_GSI_REG_BASE + 0x00010020 + 0x4000 * (n) + \
+		0x80 * \
+		(k))
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_8_PHYS(n, \
+						   k) (	\
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x00010020 + 0x4000 * \
+		(n) + \
+		0x80 * (k))
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_8_OFFS(n, \
+						   k) (	\
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x00010020 + 0x4000 * \
+		(n) + \
+		0x80 * (k))
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_8_RMSK 0xffffffff
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_8_MAXn 2
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_8_MAXk 19
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_8_ATTR 0x3
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_8_INI2(n, k) in_dword_masked( \
+		HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_8_ADDR(n, k), \
+		HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_8_RMSK)
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_8_INMI2(n, k, \
+						    mask) in_dword_masked( \
+		HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_8_ADDR(n, \
+							   k), \
+		mask)
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_8_OUTI2(n, k, val) out_dword( \
+		HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_8_ADDR(n, k), \
+		val)
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_8_OUTMI2(n, k, mask, \
+						     val) \
+	out_dword_masked_ns(HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_8_ADDR(	\
+				    n, \
+				    k), mask, val, \
+			    HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_8_INI2(n, \
+								       k))
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_8_INT_MOD_CNT_BMSK 0xff000000
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_8_INT_MOD_CNT_SHFT 0x18
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_8_INT_MODC_BMSK 0xff0000
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_8_INT_MODC_SHFT 0x10
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_8_INT_MODT_BMSK 0xffff
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_8_INT_MODT_SHFT 0x0
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_9_ADDR(n, \
+						   k) (	\
+		IPA_GSI_TOP_GSI_REG_BASE + 0x00010024 + 0x4000 * (n) + \
+		0x80 * \
+		(k))
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_9_PHYS(n, \
+						   k) (	\
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x00010024 + 0x4000 * \
+		(n) + \
+		0x80 * (k))
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_9_OFFS(n, \
+						   k) (	\
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x00010024 + 0x4000 * \
+		(n) + \
+		0x80 * (k))
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_9_RMSK 0xffffffff
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_9_MAXn 2
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_9_MAXk 19
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_9_ATTR 0x3
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_9_INI2(n, k) in_dword_masked( \
+		HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_9_ADDR(n, k), \
+		HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_9_RMSK)
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_9_INMI2(n, k, \
+						    mask) in_dword_masked( \
+		HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_9_ADDR(n, \
+							   k), \
+		mask)
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_9_OUTI2(n, k, val) out_dword( \
+		HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_9_ADDR(n, k), \
+		val)
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_9_OUTMI2(n, k, mask, \
+						     val) \
+	out_dword_masked_ns(HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_9_ADDR(	\
+				    n, \
+				    k), mask, val, \
+			    HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_9_INI2(n, \
+								       k))
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_9_INTVEC_BMSK 0xffffffff
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_9_INTVEC_SHFT 0x0
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_10_ADDR(n, \
+						    k) ( \
+		IPA_GSI_TOP_GSI_REG_BASE + 0x00010028 + 0x4000 * (n) + \
+		0x80 * \
+		(k))
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_10_PHYS(n, \
+						    k) ( \
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x00010028 + 0x4000 * \
+		(n) + \
+		0x80 * (k))
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_10_OFFS(n, \
+						    k) ( \
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x00010028 + 0x4000 * \
+		(n) + \
+		0x80 * (k))
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_10_RMSK 0xffffffff
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_10_MAXn 2
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_10_MAXk 19
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_10_ATTR 0x3
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_10_INI2(n, k) in_dword_masked( \
+		HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_10_ADDR(n, k), \
+		HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_10_RMSK)
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_10_INMI2(n, k, \
+						     mask) in_dword_masked( \
+		HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_10_ADDR(n, \
+							    k),	\
+		mask)
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_10_OUTI2(n, k, val) out_dword( \
+		HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_10_ADDR(n, k), \
+		val)
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_10_OUTMI2(n, k, mask, \
+						      val) \
+	out_dword_masked_ns( \
+		HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_10_ADDR(n, k), \
+		mask, \
+		val, \
+		HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_10_INI2(n, k))
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_10_MSI_ADDR_LSB_BMSK \
+	0xffffffff
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_10_MSI_ADDR_LSB_SHFT 0x0
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_11_ADDR(n, \
+						    k) ( \
+		IPA_GSI_TOP_GSI_REG_BASE + 0x0001002c + 0x4000 * (n) + \
+		0x80 * \
+		(k))
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_11_PHYS(n, \
+						    k) ( \
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x0001002c + 0x4000 * \
+		(n) + \
+		0x80 * (k))
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_11_OFFS(n, \
+						    k) ( \
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x0001002c + 0x4000 * \
+		(n) + \
+		0x80 * (k))
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_11_RMSK 0xffffffff
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_11_MAXn 2
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_11_MAXk 19
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_11_ATTR 0x3
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_11_INI2(n, k) in_dword_masked( \
+		HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_11_ADDR(n, k), \
+		HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_11_RMSK)
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_11_INMI2(n, k, \
+						     mask) in_dword_masked( \
+		HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_11_ADDR(n, \
+							    k),	\
+		mask)
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_11_OUTI2(n, k, val) out_dword( \
+		HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_11_ADDR(n, k), \
+		val)
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_11_OUTMI2(n, k, mask, \
+						      val) \
+	out_dword_masked_ns( \
+		HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_11_ADDR(n, k), \
+		mask, \
+		val, \
+		HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_11_INI2(n, k))
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_11_MSI_ADDR_MSB_BMSK \
+	0xffffffff
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_11_MSI_ADDR_MSB_SHFT 0x0
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_12_ADDR(n, \
+						    k) ( \
+		IPA_GSI_TOP_GSI_REG_BASE + 0x00010030 + 0x4000 * (n) + \
+		0x80 * \
+		(k))
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_12_PHYS(n, \
+						    k) ( \
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x00010030 + 0x4000 * \
+		(n) + \
+		0x80 * (k))
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_12_OFFS(n, \
+						    k) ( \
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x00010030 + 0x4000 * \
+		(n) + \
+		0x80 * (k))
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_12_RMSK 0xffffffff
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_12_MAXn 2
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_12_MAXk 19
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_12_ATTR 0x3
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_12_INI2(n, k) in_dword_masked( \
+		HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_12_ADDR(n, k), \
+		HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_12_RMSK)
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_12_INMI2(n, k, \
+						     mask) in_dword_masked( \
+		HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_12_ADDR(n, \
+							    k),	\
+		mask)
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_12_OUTI2(n, k, val) out_dword( \
+		HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_12_ADDR(n, k), \
+		val)
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_12_OUTMI2(n, k, mask, \
+						      val) \
+	out_dword_masked_ns( \
+		HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_12_ADDR(n, k), \
+		mask, \
+		val, \
+		HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_12_INI2(n, k))
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_12_RP_UPDATE_ADDR_LSB_BMSK \
+	0xffffffff
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_12_RP_UPDATE_ADDR_LSB_SHFT 0x0
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_13_ADDR(n, \
+						    k) ( \
+		IPA_GSI_TOP_GSI_REG_BASE + 0x00010034 + 0x4000 * (n) + \
+		0x80 * \
+		(k))
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_13_PHYS(n, \
+						    k) ( \
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x00010034 + 0x4000 * \
+		(n) + \
+		0x80 * (k))
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_13_OFFS(n, \
+						    k) ( \
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x00010034 + 0x4000 * \
+		(n) + \
+		0x80 * (k))
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_13_RMSK 0xffffffff
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_13_MAXn 2
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_13_MAXk 19
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_13_ATTR 0x3
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_13_INI2(n, k) in_dword_masked( \
+		HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_13_ADDR(n, k), \
+		HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_13_RMSK)
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_13_INMI2(n, k, \
+						     mask) in_dword_masked( \
+		HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_13_ADDR(n, \
+							    k),	\
+		mask)
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_13_OUTI2(n, k, val) out_dword( \
+		HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_13_ADDR(n, k), \
+		val)
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_13_OUTMI2(n, k, mask, \
+						      val) \
+	out_dword_masked_ns( \
+		HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_13_ADDR(n, k), \
+		mask, \
+		val, \
+		HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_13_INI2(n, k))
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_13_RP_UPDATE_ADDR_MSB_BMSK \
+	0xffffffff
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_13_RP_UPDATE_ADDR_MSB_SHFT 0x0
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_SCRATCH_0_ADDR(n,	\
+						     k) ( \
+		IPA_GSI_TOP_GSI_REG_BASE + 0x00010048 + 0x4000 * (n) + \
+		0x80 * \
+		(k))
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_SCRATCH_0_PHYS(n,	\
+						     k) ( \
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x00010048 + 0x4000 * \
+		(n) + \
+		0x80 * (k))
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_SCRATCH_0_OFFS(n,	\
+						     k) ( \
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x00010048 + 0x4000 * \
+		(n) + \
+		0x80 * (k))
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_SCRATCH_0_RMSK 0xffffffff
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_SCRATCH_0_MAXn 2
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_SCRATCH_0_MAXk 19
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_SCRATCH_0_ATTR 0x3
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_SCRATCH_0_INI2(n, k) in_dword_masked( \
+		HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_SCRATCH_0_ADDR(n, k), \
+		HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_SCRATCH_0_RMSK)
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_SCRATCH_0_INMI2(n, k, \
+						      mask) \
+	in_dword_masked(HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_SCRATCH_0_ADDR( \
+				n, \
+				k), mask)
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_SCRATCH_0_OUTI2(n, k, val) out_dword( \
+		HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_SCRATCH_0_ADDR(n, k), \
+		val)
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_SCRATCH_0_OUTMI2(n, k, mask, \
+						       val) \
+	out_dword_masked_ns( \
+		HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_SCRATCH_0_ADDR(n, k), \
+		mask, \
+		val, \
+		HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_SCRATCH_0_INI2(n, k))
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_SCRATCH_0_SCRATCH_BMSK 0xffffffff
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_SCRATCH_0_SCRATCH_SHFT 0x0
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_SCRATCH_1_ADDR(n,	\
+						     k) ( \
+		IPA_GSI_TOP_GSI_REG_BASE + 0x0001004c + 0x4000 * (n) + \
+		0x80 * \
+		(k))
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_SCRATCH_1_PHYS(n,	\
+						     k) ( \
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x0001004c + 0x4000 * \
+		(n) + \
+		0x80 * (k))
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_SCRATCH_1_OFFS(n,	\
+						     k) ( \
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x0001004c + 0x4000 * \
+		(n) + \
+		0x80 * (k))
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_SCRATCH_1_RMSK 0xffffffff
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_SCRATCH_1_MAXn 2
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_SCRATCH_1_MAXk 19
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_SCRATCH_1_ATTR 0x3
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_SCRATCH_1_INI2(n, k) in_dword_masked( \
+		HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_SCRATCH_1_ADDR(n, k), \
+		HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_SCRATCH_1_RMSK)
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_SCRATCH_1_INMI2(n, k, \
+						      mask) \
+	in_dword_masked(HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_SCRATCH_1_ADDR( \
+				n, \
+				k), mask)
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_SCRATCH_1_OUTI2(n, k, val) out_dword( \
+		HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_SCRATCH_1_ADDR(n, k), \
+		val)
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_SCRATCH_1_OUTMI2(n, k, mask, \
+						       val) \
+	out_dword_masked_ns( \
+		HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_SCRATCH_1_ADDR(n, k), \
+		mask, \
+		val, \
+		HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_SCRATCH_1_INI2(n, k))
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_SCRATCH_1_SCRATCH_BMSK 0xffffffff
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_SCRATCH_1_SCRATCH_SHFT 0x0
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_DOORBELL_0_ADDR(n, \
+						       k) ( \
+		IPA_GSI_TOP_GSI_REG_BASE + 0x00011000 + 0x4000 * (n) + \
+		0x8 * (k))
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_DOORBELL_0_PHYS(n, \
+						       k) ( \
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x00011000 + 0x4000 * \
+		(n) + \
+		0x8 * (k))
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_DOORBELL_0_OFFS(n, \
+						       k) ( \
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x00011000 + 0x4000 * \
+		(n) + \
+		0x8 * (k))
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_DOORBELL_1_ADDR(n, \
+						       k) ( \
+		IPA_GSI_TOP_GSI_REG_BASE + 0x00011004 + 0x4000 * (n) + \
+		0x8 * (k))
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_DOORBELL_1_PHYS(n, \
+						       k) ( \
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x00011004 + 0x4000 * \
+		(n) + \
+		0x8 * (k))
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_DOORBELL_1_OFFS(n, \
+						       k) ( \
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x00011004 + 0x4000 * \
+		(n) + \
+		0x8 * (k))
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_DOORBELL_0_ADDR(n, \
+						      k) ( \
+		IPA_GSI_TOP_GSI_REG_BASE + 0x00011100 + 0x4000 * (n) + \
+		0x8 * (k))
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_DOORBELL_0_PHYS(n, \
+						      k) ( \
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x00011100 + 0x4000 * \
+		(n) + \
+		0x8 * (k))
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_DOORBELL_0_OFFS(n, \
+						      k) ( \
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x00011100 + 0x4000 * \
+		(n) + \
+		0x8 * (k))
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_DOORBELL_1_ADDR(n, \
+						      k) ( \
+		IPA_GSI_TOP_GSI_REG_BASE + 0x00011104 + 0x4000 * (n) + \
+		0x8 * (k))
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_DOORBELL_1_PHYS(n, \
+						      k) ( \
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x00011104 + 0x4000 * \
+		(n) + \
+		0x8 * (k))
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_DOORBELL_1_OFFS(n, \
+						      k) ( \
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x00011104 + 0x4000 * \
+		(n) + \
+		0x8 * (k))
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_STATUS_ADDR(n) (IPA_GSI_TOP_GSI_REG_BASE \
+						  + 0x00012000 + 0x4000 * \
+						  (n))
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_STATUS_PHYS(n) ( \
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x00012000 + 0x4000 * (n))
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_STATUS_OFFS(n) ( \
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x00012000 + 0x4000 * (n))
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_STATUS_RMSK 0x1
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_STATUS_MAXn 2
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_STATUS_ATTR 0x1
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_STATUS_INI(n) in_dword_masked( \
+		HWIO_IPA_GSI_TOP_EE_n_GSI_STATUS_ADDR(n), \
+		HWIO_IPA_GSI_TOP_EE_n_GSI_STATUS_RMSK)
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_STATUS_INMI(n, mask) in_dword_masked(	\
+		HWIO_IPA_GSI_TOP_EE_n_GSI_STATUS_ADDR(n), \
+		mask)
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_STATUS_ENABLED_BMSK 0x1
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_STATUS_ENABLED_SHFT 0x0
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_CMD_ADDR(n) (IPA_GSI_TOP_GSI_REG_BASE \
+						  + 0x00012008 + 0x4000 * \
+						  (n))
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_CMD_PHYS(n) ( \
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x00012008 + 0x4000 * (n))
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_CMD_OFFS(n) ( \
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x00012008 + 0x4000 * (n))
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_CMD_ADDR(n) (IPA_GSI_TOP_GSI_REG_BASE \
+						 + 0x00012010 + 0x4000 * \
+						 (n))
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_CMD_PHYS(n) ( \
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x00012010 + 0x4000 * (n))
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_CMD_OFFS(n) ( \
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x00012010 + 0x4000 * (n))
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_EE_GENERIC_CMD_ADDR(n) ( \
+		IPA_GSI_TOP_GSI_REG_BASE + 0x00012018 + 0x4000 * (n))
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_EE_GENERIC_CMD_PHYS(n) ( \
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x00012018 + 0x4000 * (n))
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_EE_GENERIC_CMD_OFFS(n) ( \
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x00012018 + 0x4000 * (n))
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_HW_PARAM_0_ADDR(n) ( \
+		IPA_GSI_TOP_GSI_REG_BASE + 0x00012038 + 0x4000 * (n))
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_HW_PARAM_0_PHYS(n) ( \
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x00012038 + 0x4000 * (n))
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_HW_PARAM_0_OFFS(n) ( \
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x00012038 + 0x4000 * (n))
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_HW_PARAM_1_ADDR(n) ( \
+		IPA_GSI_TOP_GSI_REG_BASE + 0x0001203c + 0x4000 * (n))
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_HW_PARAM_1_PHYS(n) ( \
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x0001203c + 0x4000 * (n))
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_HW_PARAM_1_OFFS(n) ( \
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x0001203c + 0x4000 * (n))
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_HW_PARAM_2_ADDR(n) ( \
+		IPA_GSI_TOP_GSI_REG_BASE + 0x00012040 + 0x4000 * (n))
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_HW_PARAM_2_PHYS(n) ( \
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x00012040 + 0x4000 * (n))
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_HW_PARAM_2_OFFS(n) ( \
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x00012040 + 0x4000 * (n))
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_SW_VERSION_ADDR(n) ( \
+		IPA_GSI_TOP_GSI_REG_BASE + 0x00012044 + 0x4000 * (n))
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_SW_VERSION_PHYS(n) ( \
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x00012044 + 0x4000 * (n))
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_SW_VERSION_OFFS(n) ( \
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x00012044 + 0x4000 * (n))
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_MCS_CODE_VER_ADDR(n) ( \
+		IPA_GSI_TOP_GSI_REG_BASE + 0x00012048 + 0x4000 * (n))
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_MCS_CODE_VER_PHYS(n) ( \
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x00012048 + 0x4000 * (n))
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_MCS_CODE_VER_OFFS(n) ( \
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x00012048 + 0x4000 * (n))
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_HW_PARAM_3_ADDR(n) ( \
+		IPA_GSI_TOP_GSI_REG_BASE + 0x0001204c + 0x4000 * (n))
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_HW_PARAM_3_PHYS(n) ( \
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x0001204c + 0x4000 * (n))
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_HW_PARAM_3_OFFS(n) ( \
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x0001204c + 0x4000 * (n))
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_TYPE_IRQ_ADDR(n) ( \
+		IPA_GSI_TOP_GSI_REG_BASE + 0x00012080 + 0x4000 * (n))
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_TYPE_IRQ_PHYS(n) ( \
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x00012080 + 0x4000 * (n))
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_TYPE_IRQ_OFFS(n) ( \
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x00012080 + 0x4000 * (n))
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_TYPE_IRQ_RMSK 0x7f
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_TYPE_IRQ_MAXn 2
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_TYPE_IRQ_ATTR 0x1
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_TYPE_IRQ_INI(n) in_dword_masked( \
+		HWIO_IPA_GSI_TOP_EE_n_CNTXT_TYPE_IRQ_ADDR(n), \
+		HWIO_IPA_GSI_TOP_EE_n_CNTXT_TYPE_IRQ_RMSK)
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_TYPE_IRQ_INMI(n, mask) in_dword_masked( \
+		HWIO_IPA_GSI_TOP_EE_n_CNTXT_TYPE_IRQ_ADDR(n), \
+		mask)
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_TYPE_IRQ_GENERAL_BMSK 0x40
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_TYPE_IRQ_GENERAL_SHFT 0x6
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_TYPE_IRQ_INTER_EE_EV_CTRL_BMSK 0x20
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_TYPE_IRQ_INTER_EE_EV_CTRL_SHFT 0x5
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_TYPE_IRQ_INTER_EE_CH_CTRL_BMSK 0x10
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_TYPE_IRQ_INTER_EE_CH_CTRL_SHFT 0x4
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_TYPE_IRQ_IEOB_BMSK 0x8
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_TYPE_IRQ_IEOB_SHFT 0x3
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_TYPE_IRQ_GLOB_EE_BMSK 0x4
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_TYPE_IRQ_GLOB_EE_SHFT 0x2
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_TYPE_IRQ_EV_CTRL_BMSK 0x2
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_TYPE_IRQ_EV_CTRL_SHFT 0x1
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_TYPE_IRQ_CH_CTRL_BMSK 0x1
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_TYPE_IRQ_CH_CTRL_SHFT 0x0
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_TYPE_IRQ_MSK_ADDR(n) ( \
+		IPA_GSI_TOP_GSI_REG_BASE + 0x00012088 + 0x4000 * (n))
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_TYPE_IRQ_MSK_PHYS(n) ( \
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x00012088 + 0x4000 * (n))
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_TYPE_IRQ_MSK_OFFS(n) ( \
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x00012088 + 0x4000 * (n))
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_TYPE_IRQ_MSK_RMSK 0x7f
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_TYPE_IRQ_MSK_MAXn 2
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_TYPE_IRQ_MSK_ATTR 0x3
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_TYPE_IRQ_MSK_INI(n) in_dword_masked( \
+		HWIO_IPA_GSI_TOP_EE_n_CNTXT_TYPE_IRQ_MSK_ADDR(n), \
+		HWIO_IPA_GSI_TOP_EE_n_CNTXT_TYPE_IRQ_MSK_RMSK)
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_TYPE_IRQ_MSK_INMI(n, \
+						      mask) \
+	in_dword_masked(HWIO_IPA_GSI_TOP_EE_n_CNTXT_TYPE_IRQ_MSK_ADDR( \
+				n), \
+			mask)
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_TYPE_IRQ_MSK_OUTI(n, val) out_dword( \
+		HWIO_IPA_GSI_TOP_EE_n_CNTXT_TYPE_IRQ_MSK_ADDR(n), \
+		val)
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_TYPE_IRQ_MSK_OUTMI(n, mask,	\
+						       val) \
+	out_dword_masked_ns( \
+		HWIO_IPA_GSI_TOP_EE_n_CNTXT_TYPE_IRQ_MSK_ADDR(n), \
+		mask, \
+		val, \
+		HWIO_IPA_GSI_TOP_EE_n_CNTXT_TYPE_IRQ_MSK_INI(n))
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_TYPE_IRQ_MSK_GENERAL_BMSK 0x40
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_TYPE_IRQ_MSK_GENERAL_SHFT 0x6
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_TYPE_IRQ_MSK_INTER_EE_EV_CTRL_BMSK \
+	0x20
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_TYPE_IRQ_MSK_INTER_EE_EV_CTRL_SHFT 0x5
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_TYPE_IRQ_MSK_INTER_EE_CH_CTRL_BMSK \
+	0x10
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_TYPE_IRQ_MSK_INTER_EE_CH_CTRL_SHFT 0x4
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_TYPE_IRQ_MSK_IEOB_BMSK 0x8
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_TYPE_IRQ_MSK_IEOB_SHFT 0x3
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_TYPE_IRQ_MSK_GLOB_EE_BMSK 0x4
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_TYPE_IRQ_MSK_GLOB_EE_SHFT 0x2
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_TYPE_IRQ_MSK_EV_CTRL_BMSK 0x2
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_TYPE_IRQ_MSK_EV_CTRL_SHFT 0x1
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_TYPE_IRQ_MSK_CH_CTRL_BMSK 0x1
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_TYPE_IRQ_MSK_CH_CTRL_SHFT 0x0
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_SRC_GSI_CH_IRQ_ADDR(n) ( \
+		IPA_GSI_TOP_GSI_REG_BASE + 0x00012090 + 0x4000 * (n))
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_SRC_GSI_CH_IRQ_PHYS(n) ( \
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x00012090 + 0x4000 * (n))
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_SRC_GSI_CH_IRQ_OFFS(n) ( \
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x00012090 + 0x4000 * (n))
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_SRC_GSI_CH_IRQ_RMSK 0xffffffff
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_SRC_GSI_CH_IRQ_MAXn 2
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_SRC_GSI_CH_IRQ_ATTR 0x1
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_SRC_GSI_CH_IRQ_INI(n) in_dword_masked( \
+		HWIO_IPA_GSI_TOP_EE_n_CNTXT_SRC_GSI_CH_IRQ_ADDR(n), \
+		HWIO_IPA_GSI_TOP_EE_n_CNTXT_SRC_GSI_CH_IRQ_RMSK)
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_SRC_GSI_CH_IRQ_INMI(n, \
+							mask) \
+	in_dword_masked( \
+		HWIO_IPA_GSI_TOP_EE_n_CNTXT_SRC_GSI_CH_IRQ_ADDR(n), \
+		mask)
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_SRC_GSI_CH_IRQ_GSI_CH_BIT_MAP_BMSK \
+	0xffffffff
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_SRC_GSI_CH_IRQ_GSI_CH_BIT_MAP_SHFT 0x0
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_SRC_EV_CH_IRQ_ADDR(n) ( \
+		IPA_GSI_TOP_GSI_REG_BASE + 0x00012094 + 0x4000 * (n))
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_SRC_EV_CH_IRQ_PHYS(n) ( \
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x00012094 + 0x4000 * (n))
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_SRC_EV_CH_IRQ_OFFS(n) ( \
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x00012094 + 0x4000 * (n))
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_SRC_EV_CH_IRQ_RMSK 0xffffffff
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_SRC_EV_CH_IRQ_MAXn 2
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_SRC_EV_CH_IRQ_ATTR 0x1
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_SRC_EV_CH_IRQ_INI(n) in_dword_masked( \
+		HWIO_IPA_GSI_TOP_EE_n_CNTXT_SRC_EV_CH_IRQ_ADDR(n), \
+		HWIO_IPA_GSI_TOP_EE_n_CNTXT_SRC_EV_CH_IRQ_RMSK)
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_SRC_EV_CH_IRQ_INMI(n, \
+						       mask) \
+	in_dword_masked( \
+		HWIO_IPA_GSI_TOP_EE_n_CNTXT_SRC_EV_CH_IRQ_ADDR(n), \
+		mask)
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_SRC_EV_CH_IRQ_EV_CH_BIT_MAP_BMSK \
+	0xffffffff
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_SRC_EV_CH_IRQ_EV_CH_BIT_MAP_SHFT 0x0
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_SRC_GSI_CH_IRQ_MSK_ADDR(n) ( \
+		IPA_GSI_TOP_GSI_REG_BASE + 0x00012098 + 0x4000 * (n))
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_SRC_GSI_CH_IRQ_MSK_PHYS(n) ( \
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x00012098 + 0x4000 * (n))
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_SRC_GSI_CH_IRQ_MSK_OFFS(n) ( \
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x00012098 + 0x4000 * (n))
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_SRC_GSI_CH_IRQ_MSK_RMSK 0x7fffff
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_SRC_GSI_CH_IRQ_MSK_MAXn 2
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_SRC_GSI_CH_IRQ_MSK_ATTR 0x3
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_SRC_GSI_CH_IRQ_MSK_INI(n) \
+	in_dword_masked( \
+		HWIO_IPA_GSI_TOP_EE_n_CNTXT_SRC_GSI_CH_IRQ_MSK_ADDR( \
+			n), \
+		HWIO_IPA_GSI_TOP_EE_n_CNTXT_SRC_GSI_CH_IRQ_MSK_RMSK)
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_SRC_GSI_CH_IRQ_MSK_INMI(n, \
+							    mask) \
+	in_dword_masked( \
+		HWIO_IPA_GSI_TOP_EE_n_CNTXT_SRC_GSI_CH_IRQ_MSK_ADDR(n),	\
+		mask)
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_SRC_GSI_CH_IRQ_MSK_OUTI(n, \
+							    val) out_dword( \
+		HWIO_IPA_GSI_TOP_EE_n_CNTXT_SRC_GSI_CH_IRQ_MSK_ADDR( \
+			n), \
+		val)
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_SRC_GSI_CH_IRQ_MSK_OUTMI(n, mask, \
+							     val) \
+	out_dword_masked_ns( \
+		HWIO_IPA_GSI_TOP_EE_n_CNTXT_SRC_GSI_CH_IRQ_MSK_ADDR(n),	\
+		mask, \
+		val, \
+		HWIO_IPA_GSI_TOP_EE_n_CNTXT_SRC_GSI_CH_IRQ_MSK_INI(n))
+#define	\
+	HWIO_IPA_GSI_TOP_EE_n_CNTXT_SRC_GSI_CH_IRQ_MSK_GSI_CH_BIT_MAP_MSK_BMSK \
+	0x7fffff
+#define	\
+	HWIO_IPA_GSI_TOP_EE_n_CNTXT_SRC_GSI_CH_IRQ_MSK_GSI_CH_BIT_MAP_MSK_SHFT \
+	0x0
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_SRC_EV_CH_IRQ_MSK_ADDR(n) (	\
+		IPA_GSI_TOP_GSI_REG_BASE + 0x0001209c + 0x4000 * (n))
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_SRC_EV_CH_IRQ_MSK_PHYS(n) (	\
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x0001209c + 0x4000 * (n))
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_SRC_EV_CH_IRQ_MSK_OFFS(n) (	\
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x0001209c + 0x4000 * (n))
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_SRC_EV_CH_IRQ_MSK_RMSK 0xfffff
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_SRC_EV_CH_IRQ_MSK_MAXn 2
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_SRC_EV_CH_IRQ_MSK_ATTR 0x3
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_SRC_EV_CH_IRQ_MSK_INI(n) \
+	in_dword_masked(HWIO_IPA_GSI_TOP_EE_n_CNTXT_SRC_EV_CH_IRQ_MSK_ADDR( \
+				n), \
+			HWIO_IPA_GSI_TOP_EE_n_CNTXT_SRC_EV_CH_IRQ_MSK_RMSK)
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_SRC_EV_CH_IRQ_MSK_INMI(n, \
+							   mask) \
+	in_dword_masked( \
+		HWIO_IPA_GSI_TOP_EE_n_CNTXT_SRC_EV_CH_IRQ_MSK_ADDR(n), \
+		mask)
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_SRC_EV_CH_IRQ_MSK_OUTI(n, \
+							   val) out_dword( \
+		HWIO_IPA_GSI_TOP_EE_n_CNTXT_SRC_EV_CH_IRQ_MSK_ADDR( \
+			n), \
+		val)
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_SRC_EV_CH_IRQ_MSK_OUTMI(n, mask, \
+							    val) \
+	out_dword_masked_ns( \
+		HWIO_IPA_GSI_TOP_EE_n_CNTXT_SRC_EV_CH_IRQ_MSK_ADDR(n), \
+		mask, \
+		val, \
+		HWIO_IPA_GSI_TOP_EE_n_CNTXT_SRC_EV_CH_IRQ_MSK_INI(n))
+#define	\
+	HWIO_IPA_GSI_TOP_EE_n_CNTXT_SRC_EV_CH_IRQ_MSK_EV_CH_BIT_MAP_MSK_BMSK \
+	0xfffff
+#define	\
+	HWIO_IPA_GSI_TOP_EE_n_CNTXT_SRC_EV_CH_IRQ_MSK_EV_CH_BIT_MAP_MSK_SHFT \
+	0x0
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_SRC_GSI_CH_IRQ_CLR_ADDR(n) ( \
+		IPA_GSI_TOP_GSI_REG_BASE + 0x000120a0 + 0x4000 * (n))
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_SRC_GSI_CH_IRQ_CLR_PHYS(n) ( \
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x000120a0 + 0x4000 * (n))
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_SRC_GSI_CH_IRQ_CLR_OFFS(n) ( \
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x000120a0 + 0x4000 * (n))
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_SRC_GSI_CH_IRQ_CLR_RMSK 0xffffffff
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_SRC_GSI_CH_IRQ_CLR_MAXn 2
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_SRC_GSI_CH_IRQ_CLR_ATTR 0x2
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_SRC_GSI_CH_IRQ_CLR_OUTI(n, \
+							    val) out_dword( \
+		HWIO_IPA_GSI_TOP_EE_n_CNTXT_SRC_GSI_CH_IRQ_CLR_ADDR( \
+			n), \
+		val)
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_SRC_GSI_CH_IRQ_CLR_GSI_CH_BIT_MAP_BMSK \
+	0xffffffff
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_SRC_GSI_CH_IRQ_CLR_GSI_CH_BIT_MAP_SHFT \
+	0x0
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_SRC_EV_CH_IRQ_CLR_ADDR(n) (	\
+		IPA_GSI_TOP_GSI_REG_BASE + 0x000120a4 + 0x4000 * (n))
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_SRC_EV_CH_IRQ_CLR_PHYS(n) (	\
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x000120a4 + 0x4000 * (n))
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_SRC_EV_CH_IRQ_CLR_OFFS(n) (	\
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x000120a4 + 0x4000 * (n))
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_SRC_EV_CH_IRQ_CLR_RMSK 0xffffffff
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_SRC_EV_CH_IRQ_CLR_MAXn 2
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_SRC_EV_CH_IRQ_CLR_ATTR 0x2
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_SRC_EV_CH_IRQ_CLR_OUTI(n, \
+							   val) out_dword( \
+		HWIO_IPA_GSI_TOP_EE_n_CNTXT_SRC_EV_CH_IRQ_CLR_ADDR( \
+			n), \
+		val)
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_SRC_EV_CH_IRQ_CLR_EV_CH_BIT_MAP_BMSK \
+	0xffffffff
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_SRC_EV_CH_IRQ_CLR_EV_CH_BIT_MAP_SHFT \
+	0x0
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_SRC_IEOB_IRQ_ADDR(n) ( \
+		IPA_GSI_TOP_GSI_REG_BASE + 0x000120b0 + 0x4000 * (n))
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_SRC_IEOB_IRQ_PHYS(n) ( \
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x000120b0 + 0x4000 * (n))
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_SRC_IEOB_IRQ_OFFS(n) ( \
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x000120b0 + 0x4000 * (n))
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_SRC_IEOB_IRQ_RMSK 0xffffffff
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_SRC_IEOB_IRQ_MAXn 2
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_SRC_IEOB_IRQ_ATTR 0x1
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_SRC_IEOB_IRQ_INI(n) in_dword_masked( \
+		HWIO_IPA_GSI_TOP_EE_n_CNTXT_SRC_IEOB_IRQ_ADDR(n), \
+		HWIO_IPA_GSI_TOP_EE_n_CNTXT_SRC_IEOB_IRQ_RMSK)
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_SRC_IEOB_IRQ_INMI(n, \
+						      mask) \
+	in_dword_masked(HWIO_IPA_GSI_TOP_EE_n_CNTXT_SRC_IEOB_IRQ_ADDR( \
+				n), \
+			mask)
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_SRC_IEOB_IRQ_EV_CH_BIT_MAP_BMSK \
+	0xffffffff
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_SRC_IEOB_IRQ_EV_CH_BIT_MAP_SHFT 0x0
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_SRC_IEOB_IRQ_MSK_ADDR(n) ( \
+		IPA_GSI_TOP_GSI_REG_BASE + 0x000120b8 + 0x4000 * (n))
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_SRC_IEOB_IRQ_MSK_PHYS(n) ( \
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x000120b8 + 0x4000 * (n))
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_SRC_IEOB_IRQ_MSK_OFFS(n) ( \
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x000120b8 + 0x4000 * (n))
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_SRC_IEOB_IRQ_MSK_RMSK 0xfffff
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_SRC_IEOB_IRQ_MSK_MAXn 2
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_SRC_IEOB_IRQ_MSK_ATTR 0x3
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_SRC_IEOB_IRQ_MSK_INI(n) \
+	in_dword_masked(HWIO_IPA_GSI_TOP_EE_n_CNTXT_SRC_IEOB_IRQ_MSK_ADDR( \
+				n), \
+			HWIO_IPA_GSI_TOP_EE_n_CNTXT_SRC_IEOB_IRQ_MSK_RMSK)
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_SRC_IEOB_IRQ_MSK_INMI(n, \
+							  mask)	\
+	in_dword_masked( \
+		HWIO_IPA_GSI_TOP_EE_n_CNTXT_SRC_IEOB_IRQ_MSK_ADDR(n), \
+		mask)
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_SRC_IEOB_IRQ_MSK_OUTI(n, \
+							  val) out_dword( \
+		HWIO_IPA_GSI_TOP_EE_n_CNTXT_SRC_IEOB_IRQ_MSK_ADDR( \
+			n), \
+		val)
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_SRC_IEOB_IRQ_MSK_OUTMI(n, mask, \
+							   val)	\
+	out_dword_masked_ns( \
+		HWIO_IPA_GSI_TOP_EE_n_CNTXT_SRC_IEOB_IRQ_MSK_ADDR(n), \
+		mask, \
+		val, \
+		HWIO_IPA_GSI_TOP_EE_n_CNTXT_SRC_IEOB_IRQ_MSK_INI(n))
+#define	\
+	HWIO_IPA_GSI_TOP_EE_n_CNTXT_SRC_IEOB_IRQ_MSK_EV_CH_BIT_MAP_MSK_BMSK \
+	0xfffff
+#define	\
+	HWIO_IPA_GSI_TOP_EE_n_CNTXT_SRC_IEOB_IRQ_MSK_EV_CH_BIT_MAP_MSK_SHFT \
+	0x0
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_SRC_IEOB_IRQ_CLR_ADDR(n) ( \
+		IPA_GSI_TOP_GSI_REG_BASE + 0x000120c0 + 0x4000 * (n))
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_SRC_IEOB_IRQ_CLR_PHYS(n) ( \
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x000120c0 + 0x4000 * (n))
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_SRC_IEOB_IRQ_CLR_OFFS(n) ( \
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x000120c0 + 0x4000 * (n))
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_SRC_IEOB_IRQ_CLR_RMSK 0xffffffff
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_SRC_IEOB_IRQ_CLR_MAXn 2
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_SRC_IEOB_IRQ_CLR_ATTR 0x2
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_SRC_IEOB_IRQ_CLR_OUTI(n, \
+							  val) out_dword( \
+		HWIO_IPA_GSI_TOP_EE_n_CNTXT_SRC_IEOB_IRQ_CLR_ADDR( \
+			n), \
+		val)
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_SRC_IEOB_IRQ_CLR_EV_CH_BIT_MAP_BMSK	\
+	0xffffffff
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_SRC_IEOB_IRQ_CLR_EV_CH_BIT_MAP_SHFT	\
+	0x0
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_GLOB_IRQ_STTS_ADDR(n) ( \
+		IPA_GSI_TOP_GSI_REG_BASE + 0x00012100 + 0x4000 * (n))
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_GLOB_IRQ_STTS_PHYS(n) ( \
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x00012100 + 0x4000 * (n))
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_GLOB_IRQ_STTS_OFFS(n) ( \
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x00012100 + 0x4000 * (n))
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_GLOB_IRQ_STTS_RMSK 0xf
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_GLOB_IRQ_STTS_MAXn 2
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_GLOB_IRQ_STTS_ATTR 0x1
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_GLOB_IRQ_STTS_INI(n) in_dword_masked( \
+		HWIO_IPA_GSI_TOP_EE_n_CNTXT_GLOB_IRQ_STTS_ADDR(n), \
+		HWIO_IPA_GSI_TOP_EE_n_CNTXT_GLOB_IRQ_STTS_RMSK)
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_GLOB_IRQ_STTS_INMI(n, \
+						       mask) \
+	in_dword_masked( \
+		HWIO_IPA_GSI_TOP_EE_n_CNTXT_GLOB_IRQ_STTS_ADDR(n), \
+		mask)
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_GLOB_IRQ_STTS_GP_INT3_BMSK 0x8
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_GLOB_IRQ_STTS_GP_INT3_SHFT 0x3
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_GLOB_IRQ_STTS_GP_INT2_BMSK 0x4
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_GLOB_IRQ_STTS_GP_INT2_SHFT 0x2
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_GLOB_IRQ_STTS_GP_INT1_BMSK 0x2
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_GLOB_IRQ_STTS_GP_INT1_SHFT 0x1
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_GLOB_IRQ_STTS_ERROR_INT_BMSK 0x1
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_GLOB_IRQ_STTS_ERROR_INT_SHFT 0x0
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_GLOB_IRQ_EN_ADDR(n) ( \
+		IPA_GSI_TOP_GSI_REG_BASE + 0x00012108 + 0x4000 * (n))
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_GLOB_IRQ_EN_PHYS(n) ( \
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x00012108 + 0x4000 * (n))
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_GLOB_IRQ_EN_OFFS(n) ( \
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x00012108 + 0x4000 * (n))
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_GLOB_IRQ_CLR_ADDR(n) ( \
+		IPA_GSI_TOP_GSI_REG_BASE + 0x00012110 + 0x4000 * (n))
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_GLOB_IRQ_CLR_PHYS(n) ( \
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x00012110 + 0x4000 * (n))
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_GLOB_IRQ_CLR_OFFS(n) ( \
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x00012110 + 0x4000 * (n))
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_GSI_IRQ_STTS_ADDR(n) ( \
+		IPA_GSI_TOP_GSI_REG_BASE + 0x00012118 + 0x4000 * (n))
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_GSI_IRQ_STTS_PHYS(n) ( \
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x00012118 + 0x4000 * (n))
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_GSI_IRQ_STTS_OFFS(n) ( \
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x00012118 + 0x4000 * (n))
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_GSI_IRQ_STTS_RMSK 0xf
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_GSI_IRQ_STTS_MAXn 2
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_GSI_IRQ_STTS_ATTR 0x1
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_GSI_IRQ_STTS_INI(n) in_dword_masked( \
+		HWIO_IPA_GSI_TOP_EE_n_CNTXT_GSI_IRQ_STTS_ADDR(n), \
+		HWIO_IPA_GSI_TOP_EE_n_CNTXT_GSI_IRQ_STTS_RMSK)
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_GSI_IRQ_STTS_INMI(n, \
+						      mask) \
+	in_dword_masked(HWIO_IPA_GSI_TOP_EE_n_CNTXT_GSI_IRQ_STTS_ADDR( \
+				n), \
+			mask)
+#define	\
+	HWIO_IPA_GSI_TOP_EE_n_CNTXT_GSI_IRQ_STTS_GSI_MCS_STACK_OVRFLOW_BMSK \
+	0x8
+#define	\
+	HWIO_IPA_GSI_TOP_EE_n_CNTXT_GSI_IRQ_STTS_GSI_MCS_STACK_OVRFLOW_SHFT \
+	0x3
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_GSI_IRQ_STTS_GSI_CMD_FIFO_OVRFLOW_BMSK \
+	0x4
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_GSI_IRQ_STTS_GSI_CMD_FIFO_OVRFLOW_SHFT \
+	0x2
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_GSI_IRQ_STTS_GSI_BUS_ERROR_BMSK 0x2
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_GSI_IRQ_STTS_GSI_BUS_ERROR_SHFT 0x1
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_GSI_IRQ_STTS_GSI_BREAK_POINT_BMSK 0x1
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_GSI_IRQ_STTS_GSI_BREAK_POINT_SHFT 0x0
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_GSI_IRQ_EN_ADDR(n) ( \
+		IPA_GSI_TOP_GSI_REG_BASE + 0x00012120 + 0x4000 * (n))
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_GSI_IRQ_EN_PHYS(n) ( \
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x00012120 + 0x4000 * (n))
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_GSI_IRQ_EN_OFFS(n) ( \
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x00012120 + 0x4000 * (n))
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_GSI_IRQ_CLR_ADDR(n) ( \
+		IPA_GSI_TOP_GSI_REG_BASE + 0x00012128 + 0x4000 * (n))
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_GSI_IRQ_CLR_PHYS(n) ( \
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x00012128 + 0x4000 * (n))
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_GSI_IRQ_CLR_OFFS(n) ( \
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x00012128 + 0x4000 * (n))
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_INTSET_ADDR(n) ( \
+		IPA_GSI_TOP_GSI_REG_BASE + 0x00012180 + 0x4000 * (n))
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_INTSET_PHYS(n) ( \
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x00012180 + 0x4000 * (n))
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_INTSET_OFFS(n) ( \
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x00012180 + 0x4000 * (n))
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_INTSET_RMSK 0x1
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_INTSET_MAXn 2
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_INTSET_ATTR 0x3
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_INTSET_INI(n) in_dword_masked( \
+		HWIO_IPA_GSI_TOP_EE_n_CNTXT_INTSET_ADDR(n), \
+		HWIO_IPA_GSI_TOP_EE_n_CNTXT_INTSET_RMSK)
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_INTSET_INMI(n, mask) in_dword_masked( \
+		HWIO_IPA_GSI_TOP_EE_n_CNTXT_INTSET_ADDR(n), \
+		mask)
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_INTSET_OUTI(n, val) out_dword( \
+		HWIO_IPA_GSI_TOP_EE_n_CNTXT_INTSET_ADDR(n), \
+		val)
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_INTSET_OUTMI(n, mask, \
+						 val) out_dword_masked_ns( \
+		HWIO_IPA_GSI_TOP_EE_n_CNTXT_INTSET_ADDR( \
+			n), \
+		mask, \
+		val, \
+		HWIO_IPA_GSI_TOP_EE_n_CNTXT_INTSET_INI(n))
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_INTSET_INTYPE_BMSK 0x1
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_INTSET_INTYPE_SHFT 0x0
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_INTSET_INTYPE_MSI_FVAL 0x0
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_INTSET_INTYPE_IRQ_FVAL 0x1
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_MSI_BASE_LSB_ADDR(n) ( \
+		IPA_GSI_TOP_GSI_REG_BASE + 0x00012188 + 0x4000 * (n))
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_MSI_BASE_LSB_PHYS(n) ( \
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x00012188 + 0x4000 * (n))
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_MSI_BASE_LSB_OFFS(n) ( \
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x00012188 + 0x4000 * (n))
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_MSI_BASE_LSB_RMSK 0xffffffff
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_MSI_BASE_LSB_MAXn 2
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_MSI_BASE_LSB_ATTR 0x3
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_MSI_BASE_LSB_INI(n) in_dword_masked( \
+		HWIO_IPA_GSI_TOP_EE_n_CNTXT_MSI_BASE_LSB_ADDR(n), \
+		HWIO_IPA_GSI_TOP_EE_n_CNTXT_MSI_BASE_LSB_RMSK)
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_MSI_BASE_LSB_INMI(n, \
+						      mask) \
+	in_dword_masked(HWIO_IPA_GSI_TOP_EE_n_CNTXT_MSI_BASE_LSB_ADDR( \
+				n), \
+			mask)
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_MSI_BASE_LSB_OUTI(n, val) out_dword( \
+		HWIO_IPA_GSI_TOP_EE_n_CNTXT_MSI_BASE_LSB_ADDR(n), \
+		val)
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_MSI_BASE_LSB_OUTMI(n, mask,	\
+						       val) \
+	out_dword_masked_ns( \
+		HWIO_IPA_GSI_TOP_EE_n_CNTXT_MSI_BASE_LSB_ADDR(n), \
+		mask, \
+		val, \
+		HWIO_IPA_GSI_TOP_EE_n_CNTXT_MSI_BASE_LSB_INI(n))
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_MSI_BASE_LSB_MSI_ADDR_LSB_BMSK \
+	0xffffffff
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_MSI_BASE_LSB_MSI_ADDR_LSB_SHFT 0x0
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_MSI_BASE_MSB_ADDR(n) ( \
+		IPA_GSI_TOP_GSI_REG_BASE + 0x0001218c + 0x4000 * (n))
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_MSI_BASE_MSB_PHYS(n) ( \
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x0001218c + 0x4000 * (n))
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_MSI_BASE_MSB_OFFS(n) ( \
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x0001218c + 0x4000 * (n))
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_MSI_BASE_MSB_RMSK 0xffffffff
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_MSI_BASE_MSB_MAXn 2
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_MSI_BASE_MSB_ATTR 0x3
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_MSI_BASE_MSB_INI(n) in_dword_masked( \
+		HWIO_IPA_GSI_TOP_EE_n_CNTXT_MSI_BASE_MSB_ADDR(n), \
+		HWIO_IPA_GSI_TOP_EE_n_CNTXT_MSI_BASE_MSB_RMSK)
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_MSI_BASE_MSB_INMI(n, \
+						      mask) \
+	in_dword_masked(HWIO_IPA_GSI_TOP_EE_n_CNTXT_MSI_BASE_MSB_ADDR( \
+				n), \
+			mask)
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_MSI_BASE_MSB_OUTI(n, val) out_dword( \
+		HWIO_IPA_GSI_TOP_EE_n_CNTXT_MSI_BASE_MSB_ADDR(n), \
+		val)
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_MSI_BASE_MSB_OUTMI(n, mask,	\
+						       val) \
+	out_dword_masked_ns( \
+		HWIO_IPA_GSI_TOP_EE_n_CNTXT_MSI_BASE_MSB_ADDR(n), \
+		mask, \
+		val, \
+		HWIO_IPA_GSI_TOP_EE_n_CNTXT_MSI_BASE_MSB_INI(n))
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_MSI_BASE_MSB_MSI_ADDR_MSB_BMSK \
+	0xffffffff
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_MSI_BASE_MSB_MSI_ADDR_MSB_SHFT 0x0
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_INT_VEC_ADDR(n) ( \
+		IPA_GSI_TOP_GSI_REG_BASE + 0x00012190 + 0x4000 * (n))
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_INT_VEC_PHYS(n) ( \
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x00012190 + 0x4000 * (n))
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_INT_VEC_OFFS(n) ( \
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x00012190 + 0x4000 * (n))
+#define HWIO_IPA_GSI_TOP_EE_n_ERROR_LOG_ADDR(n) (IPA_GSI_TOP_GSI_REG_BASE \
+						 + 0x00012200 + 0x4000 * \
+						 (n))
+#define HWIO_IPA_GSI_TOP_EE_n_ERROR_LOG_PHYS(n) ( \
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x00012200 + 0x4000 * (n))
+#define HWIO_IPA_GSI_TOP_EE_n_ERROR_LOG_OFFS(n) ( \
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x00012200 + 0x4000 * (n))
+#define HWIO_IPA_GSI_TOP_EE_n_ERROR_LOG_RMSK 0xffffffff
+#define HWIO_IPA_GSI_TOP_EE_n_ERROR_LOG_MAXn 2
+#define HWIO_IPA_GSI_TOP_EE_n_ERROR_LOG_ATTR 0x3
+#define HWIO_IPA_GSI_TOP_EE_n_ERROR_LOG_INI(n) in_dword_masked(	\
+		HWIO_IPA_GSI_TOP_EE_n_ERROR_LOG_ADDR(n), \
+		HWIO_IPA_GSI_TOP_EE_n_ERROR_LOG_RMSK)
+#define HWIO_IPA_GSI_TOP_EE_n_ERROR_LOG_INMI(n, mask) in_dword_masked( \
+		HWIO_IPA_GSI_TOP_EE_n_ERROR_LOG_ADDR(n), \
+		mask)
+#define HWIO_IPA_GSI_TOP_EE_n_ERROR_LOG_OUTI(n, val) out_dword(	\
+		HWIO_IPA_GSI_TOP_EE_n_ERROR_LOG_ADDR(n), \
+		val)
+#define HWIO_IPA_GSI_TOP_EE_n_ERROR_LOG_OUTMI(n, mask, \
+					      val) out_dword_masked_ns(	\
+		HWIO_IPA_GSI_TOP_EE_n_ERROR_LOG_ADDR( \
+			n), \
+		mask, \
+		val, \
+		HWIO_IPA_GSI_TOP_EE_n_ERROR_LOG_INI(n))
+#define HWIO_IPA_GSI_TOP_EE_n_ERROR_LOG_ERROR_LOG_BMSK 0xffffffff
+#define HWIO_IPA_GSI_TOP_EE_n_ERROR_LOG_ERROR_LOG_SHFT 0x0
+#define HWIO_IPA_GSI_TOP_EE_n_ERROR_LOG_CLR_ADDR(n) ( \
+		IPA_GSI_TOP_GSI_REG_BASE + 0x00012210 + 0x4000 * (n))
+#define HWIO_IPA_GSI_TOP_EE_n_ERROR_LOG_CLR_PHYS(n) ( \
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x00012210 + 0x4000 * (n))
+#define HWIO_IPA_GSI_TOP_EE_n_ERROR_LOG_CLR_OFFS(n) ( \
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x00012210 + 0x4000 * (n))
+#define HWIO_IPA_GSI_TOP_EE_n_ERROR_LOG_CLR_RMSK 0xffffffff
+#define HWIO_IPA_GSI_TOP_EE_n_ERROR_LOG_CLR_MAXn 2
+#define HWIO_IPA_GSI_TOP_EE_n_ERROR_LOG_CLR_ATTR 0x2
+#define HWIO_IPA_GSI_TOP_EE_n_ERROR_LOG_CLR_OUTI(n, val) out_dword( \
+		HWIO_IPA_GSI_TOP_EE_n_ERROR_LOG_CLR_ADDR(n), \
+		val)
+#define HWIO_IPA_GSI_TOP_EE_n_ERROR_LOG_CLR_ERROR_LOG_CLR_BMSK 0xffffffff
+#define HWIO_IPA_GSI_TOP_EE_n_ERROR_LOG_CLR_ERROR_LOG_CLR_SHFT 0x0
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_SCRATCH_0_ADDR(n) (	\
+		IPA_GSI_TOP_GSI_REG_BASE + 0x00012400 + 0x4000 * (n))
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_SCRATCH_0_PHYS(n) (	\
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x00012400 + 0x4000 * (n))
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_SCRATCH_0_OFFS(n) (	\
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x00012400 + 0x4000 * (n))
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_SCRATCH_0_RMSK 0xffffffff
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_SCRATCH_0_MAXn 2
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_SCRATCH_0_ATTR 0x3
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_SCRATCH_0_INI(n) in_dword_masked( \
+		HWIO_IPA_GSI_TOP_EE_n_CNTXT_SCRATCH_0_ADDR(n), \
+		HWIO_IPA_GSI_TOP_EE_n_CNTXT_SCRATCH_0_RMSK)
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_SCRATCH_0_INMI(n, \
+						   mask) in_dword_masked( \
+		HWIO_IPA_GSI_TOP_EE_n_CNTXT_SCRATCH_0_ADDR( \
+			n), \
+		mask)
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_SCRATCH_0_OUTI(n, val) out_dword( \
+		HWIO_IPA_GSI_TOP_EE_n_CNTXT_SCRATCH_0_ADDR(n), \
+		val)
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_SCRATCH_0_OUTMI(n, mask, \
+						    val) \
+	out_dword_masked_ns(HWIO_IPA_GSI_TOP_EE_n_CNTXT_SCRATCH_0_ADDR(	\
+				    n),	\
+			    mask, val, \
+			    HWIO_IPA_GSI_TOP_EE_n_CNTXT_SCRATCH_0_INI(n))
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_SCRATCH_0_SCRATCH_BMSK 0xffffffff
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_SCRATCH_0_SCRATCH_SHFT 0x0
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_SCRATCH_1_ADDR(n) (	\
+		IPA_GSI_TOP_GSI_REG_BASE + 0x00012404 + 0x4000 * (n))
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_SCRATCH_1_PHYS(n) (	\
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x00012404 + 0x4000 * (n))
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_SCRATCH_1_OFFS(n) (	\
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x00012404 + 0x4000 * (n))
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_SCRATCH_1_RMSK 0xffffffff
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_SCRATCH_1_MAXn 2
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_SCRATCH_1_ATTR 0x3
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_SCRATCH_1_INI(n) in_dword_masked( \
+		HWIO_IPA_GSI_TOP_EE_n_CNTXT_SCRATCH_1_ADDR(n), \
+		HWIO_IPA_GSI_TOP_EE_n_CNTXT_SCRATCH_1_RMSK)
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_SCRATCH_1_INMI(n, \
+						   mask) in_dword_masked( \
+		HWIO_IPA_GSI_TOP_EE_n_CNTXT_SCRATCH_1_ADDR( \
+			n), \
+		mask)
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_SCRATCH_1_OUTI(n, val) out_dword( \
+		HWIO_IPA_GSI_TOP_EE_n_CNTXT_SCRATCH_1_ADDR(n), \
+		val)
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_SCRATCH_1_OUTMI(n, mask, \
+						    val) \
+	out_dword_masked_ns(HWIO_IPA_GSI_TOP_EE_n_CNTXT_SCRATCH_1_ADDR(	\
+				    n),	\
+			    mask, val, \
+			    HWIO_IPA_GSI_TOP_EE_n_CNTXT_SCRATCH_1_INI(n))
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_SCRATCH_1_SCRATCH_BMSK 0xffffffff
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_SCRATCH_1_SCRATCH_SHFT 0x0
+#define HWIO_IPA_GSI_TOP_GSI_MCS_CFG_ADDR (IPA_GSI_TOP_GSI_REG_BASE + \
+					   0x0000b000)
+#define HWIO_IPA_GSI_TOP_GSI_MCS_CFG_PHYS (IPA_GSI_TOP_GSI_REG_BASE_PHYS + \
+					   0x0000b000)
+#define HWIO_IPA_GSI_TOP_GSI_MCS_CFG_OFFS (IPA_GSI_TOP_GSI_REG_BASE_OFFS + \
+					   0x0000b000)
+#define HWIO_IPA_GSI_TOP_GSI_TZ_FW_AUTH_LOCK_ADDR ( \
+		IPA_GSI_TOP_GSI_REG_BASE + 0x0000b008)
+#define HWIO_IPA_GSI_TOP_GSI_TZ_FW_AUTH_LOCK_PHYS ( \
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x0000b008)
+#define HWIO_IPA_GSI_TOP_GSI_TZ_FW_AUTH_LOCK_OFFS ( \
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x0000b008)
+#define HWIO_IPA_GSI_TOP_GSI_MSA_FW_AUTH_LOCK_ADDR ( \
+		IPA_GSI_TOP_GSI_REG_BASE + 0x0000b010)
+#define HWIO_IPA_GSI_TOP_GSI_MSA_FW_AUTH_LOCK_PHYS ( \
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x0000b010)
+#define HWIO_IPA_GSI_TOP_GSI_MSA_FW_AUTH_LOCK_OFFS ( \
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x0000b010)
+#define HWIO_IPA_GSI_TOP_GSI_SP_FW_AUTH_LOCK_ADDR ( \
+		IPA_GSI_TOP_GSI_REG_BASE + 0x0000b018)
+#define HWIO_IPA_GSI_TOP_GSI_SP_FW_AUTH_LOCK_PHYS ( \
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x0000b018)
+#define HWIO_IPA_GSI_TOP_GSI_SP_FW_AUTH_LOCK_OFFS ( \
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x0000b018)
+#define HWIO_IPA_GSI_TOP_INTER_EE_n_ORIGINATOR_EE_ADDR(n) ( \
+		IPA_GSI_TOP_GSI_REG_BASE + 0x0000c000 + 0x1000 * (n))
+#define HWIO_IPA_GSI_TOP_INTER_EE_n_ORIGINATOR_EE_PHYS(n) ( \
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x0000c000 + 0x1000 * (n))
+#define HWIO_IPA_GSI_TOP_INTER_EE_n_ORIGINATOR_EE_OFFS(n) ( \
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x0000c000 + 0x1000 * (n))
+#define HWIO_IPA_GSI_TOP_INTER_EE_n_GSI_CH_CMD_ADDR(n) ( \
+		IPA_GSI_TOP_GSI_REG_BASE + 0x0000c008 + 0x1000 * (n))
+#define HWIO_IPA_GSI_TOP_INTER_EE_n_GSI_CH_CMD_PHYS(n) ( \
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x0000c008 + 0x1000 * (n))
+#define HWIO_IPA_GSI_TOP_INTER_EE_n_GSI_CH_CMD_OFFS(n) ( \
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x0000c008 + 0x1000 * (n))
+#define HWIO_IPA_GSI_TOP_INTER_EE_n_EV_CH_CMD_ADDR(n) (	\
+		IPA_GSI_TOP_GSI_REG_BASE + 0x0000c010 + 0x1000 * (n))
+#define HWIO_IPA_GSI_TOP_INTER_EE_n_EV_CH_CMD_PHYS(n) (	\
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x0000c010 + 0x1000 * (n))
+#define HWIO_IPA_GSI_TOP_INTER_EE_n_EV_CH_CMD_OFFS(n) (	\
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x0000c010 + 0x1000 * (n))
+#define HWIO_IPA_GSI_TOP_INTER_EE_n_SRC_GSI_CH_IRQ_ADDR(n) ( \
+		IPA_GSI_TOP_GSI_REG_BASE + 0x0000c018 + 0x1000 * (n))
+#define HWIO_IPA_GSI_TOP_INTER_EE_n_SRC_GSI_CH_IRQ_PHYS(n) ( \
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x0000c018 + 0x1000 * (n))
+#define HWIO_IPA_GSI_TOP_INTER_EE_n_SRC_GSI_CH_IRQ_OFFS(n) ( \
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x0000c018 + 0x1000 * (n))
+#define HWIO_IPA_GSI_TOP_INTER_EE_n_SRC_EV_CH_IRQ_ADDR(n) ( \
+		IPA_GSI_TOP_GSI_REG_BASE + 0x0000c01c + 0x1000 * (n))
+#define HWIO_IPA_GSI_TOP_INTER_EE_n_SRC_EV_CH_IRQ_PHYS(n) ( \
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x0000c01c + 0x1000 * (n))
+#define HWIO_IPA_GSI_TOP_INTER_EE_n_SRC_EV_CH_IRQ_OFFS(n) ( \
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x0000c01c + 0x1000 * (n))
+#define HWIO_IPA_GSI_TOP_INTER_EE_n_SRC_GSI_CH_IRQ_MSK_ADDR(n) ( \
+		IPA_GSI_TOP_GSI_REG_BASE + 0x0000c020 + 0x1000 * (n))
+#define HWIO_IPA_GSI_TOP_INTER_EE_n_SRC_GSI_CH_IRQ_MSK_PHYS(n) ( \
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x0000c020 + 0x1000 * (n))
+#define HWIO_IPA_GSI_TOP_INTER_EE_n_SRC_GSI_CH_IRQ_MSK_OFFS(n) ( \
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x0000c020 + 0x1000 * (n))
+#define HWIO_IPA_GSI_TOP_INTER_EE_n_SRC_EV_CH_IRQ_MSK_ADDR(n) (	\
+		IPA_GSI_TOP_GSI_REG_BASE + 0x0000c024 + 0x1000 * (n))
+#define HWIO_IPA_GSI_TOP_INTER_EE_n_SRC_EV_CH_IRQ_MSK_PHYS(n) (	\
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x0000c024 + 0x1000 * (n))
+#define HWIO_IPA_GSI_TOP_INTER_EE_n_SRC_EV_CH_IRQ_MSK_OFFS(n) (	\
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x0000c024 + 0x1000 * (n))
+#define HWIO_IPA_GSI_TOP_INTER_EE_n_SRC_GSI_CH_IRQ_CLR_ADDR(n) ( \
+		IPA_GSI_TOP_GSI_REG_BASE + 0x0000c028 + 0x1000 * (n))
+#define HWIO_IPA_GSI_TOP_INTER_EE_n_SRC_GSI_CH_IRQ_CLR_PHYS(n) ( \
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x0000c028 + 0x1000 * (n))
+#define HWIO_IPA_GSI_TOP_INTER_EE_n_SRC_GSI_CH_IRQ_CLR_OFFS(n) ( \
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x0000c028 + 0x1000 * (n))
+#define HWIO_IPA_GSI_TOP_INTER_EE_n_SRC_EV_CH_IRQ_CLR_ADDR(n) (	\
+		IPA_GSI_TOP_GSI_REG_BASE + 0x0000c02c + 0x1000 * (n))
+#define HWIO_IPA_GSI_TOP_INTER_EE_n_SRC_EV_CH_IRQ_CLR_PHYS(n) (	\
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x0000c02c + 0x1000 * (n))
+#define HWIO_IPA_GSI_TOP_INTER_EE_n_SRC_EV_CH_IRQ_CLR_OFFS(n) (	\
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x0000c02c + 0x1000 * (n))
+#define IPA_CFG_REG_BASE (IPA_0_IPA_WRAPPER_BASE + 0x00040000)
+#define IPA_CFG_REG_BASE_PHYS (IPA_0_IPA_WRAPPER_BASE_PHYS + 0x00040000)
+#define IPA_CFG_REG_BASE_OFFS 0x00040000
+#define HWIO_IPA_COMP_HW_VERSION_ADDR (IPA_CFG_REG_BASE + 0x00000030)
+#define HWIO_IPA_COMP_HW_VERSION_PHYS (IPA_CFG_REG_BASE_PHYS + 0x00000030)
+#define HWIO_IPA_COMP_HW_VERSION_OFFS (IPA_CFG_REG_BASE_OFFS + 0x00000030)
+#define HWIO_IPA_COMP_HW_VERSION_RMSK 0xffffffff
+#define HWIO_IPA_COMP_HW_VERSION_ATTR 0x1
+#define HWIO_IPA_COMP_HW_VERSION_IN in_dword_masked( \
+		HWIO_IPA_COMP_HW_VERSION_ADDR, \
+		HWIO_IPA_COMP_HW_VERSION_RMSK)
+#define HWIO_IPA_COMP_HW_VERSION_INM(m) in_dword_masked( \
+		HWIO_IPA_COMP_HW_VERSION_ADDR, \
+		m)
+#define HWIO_IPA_COMP_HW_VERSION_MAJOR_BMSK 0xf0000000
+#define HWIO_IPA_COMP_HW_VERSION_MAJOR_SHFT 0x1c
+#define HWIO_IPA_COMP_HW_VERSION_MINOR_BMSK 0xfff0000
+#define HWIO_IPA_COMP_HW_VERSION_MINOR_SHFT 0x10
+#define HWIO_IPA_COMP_HW_VERSION_STEP_BMSK 0xffff
+#define HWIO_IPA_COMP_HW_VERSION_STEP_SHFT 0x0
+#define HWIO_IPA_VERSION_ADDR (IPA_CFG_REG_BASE + 0x00000034)
+#define HWIO_IPA_VERSION_PHYS (IPA_CFG_REG_BASE_PHYS + 0x00000034)
+#define HWIO_IPA_VERSION_OFFS (IPA_CFG_REG_BASE_OFFS + 0x00000034)
+#define HWIO_IPA_ENABLED_PIPES_ADDR (IPA_CFG_REG_BASE + 0x00000038)
+#define HWIO_IPA_ENABLED_PIPES_PHYS (IPA_CFG_REG_BASE_PHYS + 0x00000038)
+#define HWIO_IPA_ENABLED_PIPES_OFFS (IPA_CFG_REG_BASE_OFFS + 0x00000038)
+#define HWIO_IPA_COMP_CFG_ADDR (IPA_CFG_REG_BASE + 0x0000003c)
+#define HWIO_IPA_COMP_CFG_PHYS (IPA_CFG_REG_BASE_PHYS + 0x0000003c)
+#define HWIO_IPA_COMP_CFG_OFFS (IPA_CFG_REG_BASE_OFFS + 0x0000003c)
+#define HWIO_IPA_COMP_CFG_RMSK 0x3fffee
+#define HWIO_IPA_COMP_CFG_ATTR 0x3
+#define HWIO_IPA_COMP_CFG_IN in_dword_masked(HWIO_IPA_COMP_CFG_ADDR, \
+					     HWIO_IPA_COMP_CFG_RMSK)
+#define HWIO_IPA_COMP_CFG_INM(m) in_dword_masked(HWIO_IPA_COMP_CFG_ADDR, m)
+#define HWIO_IPA_COMP_CFG_OUT(v) out_dword(HWIO_IPA_COMP_CFG_ADDR, v)
+#define HWIO_IPA_COMP_CFG_OUTM(m, v) out_dword_masked_ns( \
+		HWIO_IPA_COMP_CFG_ADDR,	\
+		m, \
+		v, \
+		HWIO_IPA_COMP_CFG_IN)
+#define HWIO_IPA_COMP_CFG_IPA_FULL_FLUSH_WAIT_RSC_CLOSURE_EN_BMSK 0x200000
+#define HWIO_IPA_COMP_CFG_IPA_FULL_FLUSH_WAIT_RSC_CLOSURE_EN_SHFT 0x15
+#define HWIO_IPA_COMP_CFG_IPA_ATOMIC_FETCHER_ARB_LOCK_DIS_BMSK 0x1e0000
+#define HWIO_IPA_COMP_CFG_IPA_ATOMIC_FETCHER_ARB_LOCK_DIS_SHFT 0x11
+#define HWIO_IPA_COMP_CFG_IPA_QMB_SELECT_BY_ADDRESS_GLOBAL_EN_BMSK 0x10000
+#define HWIO_IPA_COMP_CFG_IPA_QMB_SELECT_BY_ADDRESS_GLOBAL_EN_SHFT 0x10
+#define HWIO_IPA_COMP_CFG_GSI_MULTI_AXI_MASTERS_DIS_BMSK 0x8000
+#define HWIO_IPA_COMP_CFG_GSI_MULTI_AXI_MASTERS_DIS_SHFT 0xf
+#define HWIO_IPA_COMP_CFG_GSI_SNOC_CNOC_LOOP_PROTECTION_DISABLE_BMSK \
+	0x4000
+#define HWIO_IPA_COMP_CFG_GSI_SNOC_CNOC_LOOP_PROTECTION_DISABLE_SHFT 0xe
+#define HWIO_IPA_COMP_CFG_GEN_QMB_0_SNOC_CNOC_LOOP_PROTECTION_DISABLE_BMSK \
+	0x2000
+#define HWIO_IPA_COMP_CFG_GEN_QMB_0_SNOC_CNOC_LOOP_PROTECTION_DISABLE_SHFT \
+	0xd
+#define HWIO_IPA_COMP_CFG_GEN_QMB_1_MULTI_INORDER_WR_DIS_BMSK 0x1000
+#define HWIO_IPA_COMP_CFG_GEN_QMB_1_MULTI_INORDER_WR_DIS_SHFT 0xc
+#define HWIO_IPA_COMP_CFG_GEN_QMB_0_MULTI_INORDER_WR_DIS_BMSK 0x800
+#define HWIO_IPA_COMP_CFG_GEN_QMB_0_MULTI_INORDER_WR_DIS_SHFT 0xb
+#define HWIO_IPA_COMP_CFG_GEN_QMB_1_MULTI_INORDER_RD_DIS_BMSK 0x400
+#define HWIO_IPA_COMP_CFG_GEN_QMB_1_MULTI_INORDER_RD_DIS_SHFT 0xa
+#define HWIO_IPA_COMP_CFG_GEN_QMB_0_MULTI_INORDER_RD_DIS_BMSK 0x200
+#define HWIO_IPA_COMP_CFG_GEN_QMB_0_MULTI_INORDER_RD_DIS_SHFT 0x9
+#define HWIO_IPA_COMP_CFG_GSI_MULTI_INORDER_WR_DIS_BMSK 0x100
+#define HWIO_IPA_COMP_CFG_GSI_MULTI_INORDER_WR_DIS_SHFT 0x8
+#define HWIO_IPA_COMP_CFG_GSI_MULTI_INORDER_RD_DIS_BMSK 0x80
+#define HWIO_IPA_COMP_CFG_GSI_MULTI_INORDER_RD_DIS_SHFT 0x7
+#define HWIO_IPA_COMP_CFG_IPA_QMB_SELECT_BY_ADDRESS_PROD_EN_BMSK 0x40
+#define HWIO_IPA_COMP_CFG_IPA_QMB_SELECT_BY_ADDRESS_PROD_EN_SHFT 0x6
+#define HWIO_IPA_COMP_CFG_IPA_QMB_SELECT_BY_ADDRESS_CONS_EN_BMSK 0x20
+#define HWIO_IPA_COMP_CFG_IPA_QMB_SELECT_BY_ADDRESS_CONS_EN_SHFT 0x5
+#define HWIO_IPA_COMP_CFG_GEN_QMB_1_SNOC_BYPASS_DIS_BMSK 0x8
+#define HWIO_IPA_COMP_CFG_GEN_QMB_1_SNOC_BYPASS_DIS_SHFT 0x3
+#define HWIO_IPA_COMP_CFG_GEN_QMB_0_SNOC_BYPASS_DIS_BMSK 0x4
+#define HWIO_IPA_COMP_CFG_GEN_QMB_0_SNOC_BYPASS_DIS_SHFT 0x2
+#define HWIO_IPA_COMP_CFG_GSI_SNOC_BYPASS_DIS_BMSK 0x2
+#define HWIO_IPA_COMP_CFG_GSI_SNOC_BYPASS_DIS_SHFT 0x1
+#define HWIO_IPA_CLKON_CFG_ADDR (IPA_CFG_REG_BASE + 0x00000044)
+#define HWIO_IPA_CLKON_CFG_PHYS (IPA_CFG_REG_BASE_PHYS + 0x00000044)
+#define HWIO_IPA_CLKON_CFG_OFFS (IPA_CFG_REG_BASE_OFFS + 0x00000044)
+#define HWIO_IPA_ROUTE_ADDR (IPA_CFG_REG_BASE + 0x00000048)
+#define HWIO_IPA_ROUTE_PHYS (IPA_CFG_REG_BASE_PHYS + 0x00000048)
+#define HWIO_IPA_ROUTE_OFFS (IPA_CFG_REG_BASE_OFFS + 0x00000048)
+#define HWIO_IPA_ROUTE_RMSK 0x13fffff
+#define HWIO_IPA_ROUTE_ATTR 0x3
+#define HWIO_IPA_ROUTE_IN in_dword_masked(HWIO_IPA_ROUTE_ADDR, \
+					  HWIO_IPA_ROUTE_RMSK)
+#define HWIO_IPA_ROUTE_INM(m) in_dword_masked(HWIO_IPA_ROUTE_ADDR, m)
+#define HWIO_IPA_ROUTE_OUT(v) out_dword(HWIO_IPA_ROUTE_ADDR, v)
+#define HWIO_IPA_ROUTE_OUTM(m, v) out_dword_masked_ns(HWIO_IPA_ROUTE_ADDR, \
+						      m, \
+						      v, \
+						      HWIO_IPA_ROUTE_IN)
+#define HWIO_IPA_ROUTE_ROUTE_DEF_RETAIN_HDR_BMSK 0x1000000
+#define HWIO_IPA_ROUTE_ROUTE_DEF_RETAIN_HDR_SHFT 0x18
+#define HWIO_IPA_ROUTE_ROUTE_FRAG_DEF_PIPE_BMSK 0x3e0000
+#define HWIO_IPA_ROUTE_ROUTE_FRAG_DEF_PIPE_SHFT 0x11
+#define HWIO_IPA_ROUTE_ROUTE_DEF_HDR_OFST_BMSK 0x1ff80
+#define HWIO_IPA_ROUTE_ROUTE_DEF_HDR_OFST_SHFT 0x7
+#define HWIO_IPA_ROUTE_ROUTE_DEF_HDR_TABLE_BMSK 0x40
+#define HWIO_IPA_ROUTE_ROUTE_DEF_HDR_TABLE_SHFT 0x6
+#define HWIO_IPA_ROUTE_ROUTE_DEF_PIPE_BMSK 0x3e
+#define HWIO_IPA_ROUTE_ROUTE_DEF_PIPE_SHFT 0x1
+#define HWIO_IPA_ROUTE_ROUTE_DIS_BMSK 0x1
+#define HWIO_IPA_ROUTE_ROUTE_DIS_SHFT 0x0
+#define HWIO_IPA_FILTER_ADDR (IPA_CFG_REG_BASE + 0x0000004c)
+#define HWIO_IPA_FILTER_PHYS (IPA_CFG_REG_BASE_PHYS + 0x0000004c)
+#define HWIO_IPA_FILTER_OFFS (IPA_CFG_REG_BASE_OFFS + 0x0000004c)
+#define HWIO_IPA_MASTER_PRIORITY_ADDR (IPA_CFG_REG_BASE + 0x00000050)
+#define HWIO_IPA_MASTER_PRIORITY_PHYS (IPA_CFG_REG_BASE_PHYS + 0x00000050)
+#define HWIO_IPA_MASTER_PRIORITY_OFFS (IPA_CFG_REG_BASE_OFFS + 0x00000050)
+#define HWIO_IPA_SHARED_MEM_SIZE_ADDR (IPA_CFG_REG_BASE + 0x00000054)
+#define HWIO_IPA_SHARED_MEM_SIZE_PHYS (IPA_CFG_REG_BASE_PHYS + 0x00000054)
+#define HWIO_IPA_SHARED_MEM_SIZE_OFFS (IPA_CFG_REG_BASE_OFFS + 0x00000054)
+#define HWIO_IPA_NAT_TIMER_ADDR (IPA_CFG_REG_BASE + 0x00000058)
+#define HWIO_IPA_NAT_TIMER_PHYS (IPA_CFG_REG_BASE_PHYS + 0x00000058)
+#define HWIO_IPA_NAT_TIMER_OFFS (IPA_CFG_REG_BASE_OFFS + 0x00000058)
+#define HWIO_IPA_TAG_TIMER_ADDR (IPA_CFG_REG_BASE + 0x00000060)
+#define HWIO_IPA_TAG_TIMER_PHYS (IPA_CFG_REG_BASE_PHYS + 0x00000060)
+#define HWIO_IPA_TAG_TIMER_OFFS (IPA_CFG_REG_BASE_OFFS + 0x00000060)
+#define HWIO_IPA_FRAG_RULES_CLR_ADDR (IPA_CFG_REG_BASE + 0x0000006c)
+#define HWIO_IPA_FRAG_RULES_CLR_PHYS (IPA_CFG_REG_BASE_PHYS + 0x0000006c)
+#define HWIO_IPA_FRAG_RULES_CLR_OFFS (IPA_CFG_REG_BASE_OFFS + 0x0000006c)
+#define HWIO_IPA_PROC_IPH_CFG_ADDR (IPA_CFG_REG_BASE + 0x00000070)
+#define HWIO_IPA_PROC_IPH_CFG_PHYS (IPA_CFG_REG_BASE_PHYS + 0x00000070)
+#define HWIO_IPA_PROC_IPH_CFG_OFFS (IPA_CFG_REG_BASE_OFFS + 0x00000070)
+#define HWIO_IPA_PROC_IPH_CFG_RMSK 0x1ff0ff7
+#define HWIO_IPA_PROC_IPH_CFG_ATTR 0x3
+#define HWIO_IPA_PROC_IPH_CFG_IN in_dword_masked( \
+		HWIO_IPA_PROC_IPH_CFG_ADDR, \
+		HWIO_IPA_PROC_IPH_CFG_RMSK)
+#define HWIO_IPA_PROC_IPH_CFG_INM(m) in_dword_masked( \
+		HWIO_IPA_PROC_IPH_CFG_ADDR, \
+		m)
+#define HWIO_IPA_PROC_IPH_CFG_OUT(v) out_dword(HWIO_IPA_PROC_IPH_CFG_ADDR, \
+					       v)
+#define HWIO_IPA_PROC_IPH_CFG_OUTM(m, v) out_dword_masked_ns( \
+		HWIO_IPA_PROC_IPH_CFG_ADDR, \
+		m, \
+		v, \
+		HWIO_IPA_PROC_IPH_CFG_IN)
+#define HWIO_IPA_PROC_IPH_CFG_D_DCPH_MULTI_ENGINE_DISABLE_BMSK 0x1000000
+#define HWIO_IPA_PROC_IPH_CFG_D_DCPH_MULTI_ENGINE_DISABLE_SHFT 0x18
+#define HWIO_IPA_PROC_IPH_CFG_IPH_PKT_PARSER_PROTOCOL_STOP_VALUE_BMSK \
+	0xff0000
+#define HWIO_IPA_PROC_IPH_CFG_IPH_PKT_PARSER_PROTOCOL_STOP_VALUE_SHFT 0x10
+#define HWIO_IPA_PROC_IPH_CFG_IPH_PKT_PARSER_IHL_TO_2ND_FRAG_EN_BMSK 0x800
+#define HWIO_IPA_PROC_IPH_CFG_IPH_PKT_PARSER_IHL_TO_2ND_FRAG_EN_SHFT 0xb
+#define HWIO_IPA_PROC_IPH_CFG_IPH_PKT_PARSER_PROTOCOL_STOP_DEST_BMSK 0x400
+#define HWIO_IPA_PROC_IPH_CFG_IPH_PKT_PARSER_PROTOCOL_STOP_DEST_SHFT 0xa
+#define HWIO_IPA_PROC_IPH_CFG_IPH_PKT_PARSER_PROTOCOL_STOP_HOP_BMSK 0x200
+#define HWIO_IPA_PROC_IPH_CFG_IPH_PKT_PARSER_PROTOCOL_STOP_HOP_SHFT 0x9
+#define HWIO_IPA_PROC_IPH_CFG_IPH_PKT_PARSER_PROTOCOL_STOP_ENABLE_BMSK \
+	0x100
+#define HWIO_IPA_PROC_IPH_CFG_IPH_PKT_PARSER_PROTOCOL_STOP_ENABLE_SHFT 0x8
+#define HWIO_IPA_PROC_IPH_CFG_FTCH_DCPH_OVERLAP_ENABLE_BMSK 0x80
+#define HWIO_IPA_PROC_IPH_CFG_FTCH_DCPH_OVERLAP_ENABLE_SHFT 0x7
+#define HWIO_IPA_PROC_IPH_CFG_PIPESTAGE_OVERLAP_DISABLE_BMSK 0x40
+#define HWIO_IPA_PROC_IPH_CFG_PIPESTAGE_OVERLAP_DISABLE_SHFT 0x6
+#define HWIO_IPA_PROC_IPH_CFG_STATUS_FROM_IPH_FRST_ALWAYS_BMSK 0x10
+#define HWIO_IPA_PROC_IPH_CFG_STATUS_FROM_IPH_FRST_ALWAYS_SHFT 0x4
+#define HWIO_IPA_PROC_IPH_CFG_IPH_PIPELINING_DISABLE_BMSK 0x4
+#define HWIO_IPA_PROC_IPH_CFG_IPH_PIPELINING_DISABLE_SHFT 0x2
+#define HWIO_IPA_PROC_IPH_CFG_IPH_THRESHOLD_BMSK 0x3
+#define HWIO_IPA_PROC_IPH_CFG_IPH_THRESHOLD_SHFT 0x0
+#define HWIO_IPA_QSB_MAX_WRITES_ADDR (IPA_CFG_REG_BASE + 0x00000074)
+#define HWIO_IPA_QSB_MAX_WRITES_PHYS (IPA_CFG_REG_BASE_PHYS + 0x00000074)
+#define HWIO_IPA_QSB_MAX_WRITES_OFFS (IPA_CFG_REG_BASE_OFFS + 0x00000074)
+#define HWIO_IPA_QSB_MAX_READS_ADDR (IPA_CFG_REG_BASE + 0x00000078)
+#define HWIO_IPA_QSB_MAX_READS_PHYS (IPA_CFG_REG_BASE_PHYS + 0x00000078)
+#define HWIO_IPA_QSB_MAX_READS_OFFS (IPA_CFG_REG_BASE_OFFS + 0x00000078)
+#define HWIO_IPA_QSB_OUTSTANDING_COUNTER_ADDR (IPA_CFG_REG_BASE + \
+					       0x0000007c)
+#define HWIO_IPA_QSB_OUTSTANDING_COUNTER_PHYS (IPA_CFG_REG_BASE_PHYS + \
+					       0x0000007c)
+#define HWIO_IPA_QSB_OUTSTANDING_COUNTER_OFFS (IPA_CFG_REG_BASE_OFFS + \
+					       0x0000007c)
+#define HWIO_IPA_QSB_OUTSTANDING_BEATS_COUNTER_ADDR (IPA_CFG_REG_BASE +	\
+						     0x00000080)
+#define HWIO_IPA_QSB_OUTSTANDING_BEATS_COUNTER_PHYS (IPA_CFG_REG_BASE_PHYS \
+						     + 0x00000080)
+#define HWIO_IPA_QSB_OUTSTANDING_BEATS_COUNTER_OFFS (IPA_CFG_REG_BASE_OFFS \
+						     + 0x00000080)
+#define HWIO_IPA_QSB_READ_CFG_ADDR (IPA_CFG_REG_BASE + 0x00000084)
+#define HWIO_IPA_QSB_READ_CFG_PHYS (IPA_CFG_REG_BASE_PHYS + 0x00000084)
+#define HWIO_IPA_QSB_READ_CFG_OFFS (IPA_CFG_REG_BASE_OFFS + 0x00000084)
+#define HWIO_IPA_DPL_TIMER_LSB_ADDR (IPA_CFG_REG_BASE + 0x00000088)
+#define HWIO_IPA_DPL_TIMER_LSB_PHYS (IPA_CFG_REG_BASE_PHYS + 0x00000088)
+#define HWIO_IPA_DPL_TIMER_LSB_OFFS (IPA_CFG_REG_BASE_OFFS + 0x00000088)
+#define HWIO_IPA_DPL_TIMER_LSB_RMSK 0xffffffff
+#define HWIO_IPA_DPL_TIMER_LSB_ATTR 0x3
+#define HWIO_IPA_DPL_TIMER_LSB_IN in_dword_masked( \
+		HWIO_IPA_DPL_TIMER_LSB_ADDR, \
+		HWIO_IPA_DPL_TIMER_LSB_RMSK)
+#define HWIO_IPA_DPL_TIMER_LSB_INM(m) in_dword_masked( \
+		HWIO_IPA_DPL_TIMER_LSB_ADDR, \
+		m)
+#define HWIO_IPA_DPL_TIMER_LSB_OUT(v) out_dword( \
+		HWIO_IPA_DPL_TIMER_LSB_ADDR, \
+		v)
+#define HWIO_IPA_DPL_TIMER_LSB_OUTM(m, v) out_dword_masked_ns( \
+		HWIO_IPA_DPL_TIMER_LSB_ADDR, \
+		m, \
+		v, \
+		HWIO_IPA_DPL_TIMER_LSB_IN)
+#define HWIO_IPA_DPL_TIMER_LSB_TOD_LSB_BMSK 0xffffffff
+#define HWIO_IPA_DPL_TIMER_LSB_TOD_LSB_SHFT 0x0
+#define HWIO_IPA_DPL_TIMER_MSB_ADDR (IPA_CFG_REG_BASE + 0x0000008c)
+#define HWIO_IPA_DPL_TIMER_MSB_PHYS (IPA_CFG_REG_BASE_PHYS + 0x0000008c)
+#define HWIO_IPA_DPL_TIMER_MSB_OFFS (IPA_CFG_REG_BASE_OFFS + 0x0000008c)
+#define HWIO_IPA_DPL_TIMER_MSB_RMSK 0x8000ffff
+#define HWIO_IPA_DPL_TIMER_MSB_ATTR 0x3
+#define HWIO_IPA_DPL_TIMER_MSB_IN in_dword_masked( \
+		HWIO_IPA_DPL_TIMER_MSB_ADDR, \
+		HWIO_IPA_DPL_TIMER_MSB_RMSK)
+#define HWIO_IPA_DPL_TIMER_MSB_INM(m) in_dword_masked( \
+		HWIO_IPA_DPL_TIMER_MSB_ADDR, \
+		m)
+#define HWIO_IPA_DPL_TIMER_MSB_OUT(v) out_dword( \
+		HWIO_IPA_DPL_TIMER_MSB_ADDR, \
+		v)
+#define HWIO_IPA_DPL_TIMER_MSB_OUTM(m, v) out_dword_masked_ns( \
+		HWIO_IPA_DPL_TIMER_MSB_ADDR, \
+		m, \
+		v, \
+		HWIO_IPA_DPL_TIMER_MSB_IN)
+#define HWIO_IPA_DPL_TIMER_MSB_TIMER_EN_BMSK 0x80000000
+#define HWIO_IPA_DPL_TIMER_MSB_TIMER_EN_SHFT 0x1f
+#define HWIO_IPA_DPL_TIMER_MSB_TOD_MSB_BMSK 0xffff
+#define HWIO_IPA_DPL_TIMER_MSB_TOD_MSB_SHFT 0x0
+#define HWIO_IPA_STATE_TX_WRAPPER_ADDR (IPA_CFG_REG_BASE + 0x00000090)
+#define HWIO_IPA_STATE_TX_WRAPPER_PHYS (IPA_CFG_REG_BASE_PHYS + 0x00000090)
+#define HWIO_IPA_STATE_TX_WRAPPER_OFFS (IPA_CFG_REG_BASE_OFFS + 0x00000090)
+#define HWIO_IPA_STATE_TX_WRAPPER_RMSK 0x1e01ffff
+#define HWIO_IPA_STATE_TX_WRAPPER_ATTR 0x1
+#define HWIO_IPA_STATE_TX_WRAPPER_IN in_dword_masked( \
+		HWIO_IPA_STATE_TX_WRAPPER_ADDR,	\
+		HWIO_IPA_STATE_TX_WRAPPER_RMSK)
+#define HWIO_IPA_STATE_TX_WRAPPER_INM(m) in_dword_masked( \
+		HWIO_IPA_STATE_TX_WRAPPER_ADDR,	\
+		m)
+#define HWIO_IPA_STATE_TX_WRAPPER_COAL_SLAVE_OPEN_FRAME_BMSK 0x1e000000
+#define HWIO_IPA_STATE_TX_WRAPPER_COAL_SLAVE_OPEN_FRAME_SHFT 0x19
+#define HWIO_IPA_STATE_TX_WRAPPER_COAL_SLAVE_CTX_IDLE_BMSK 0x10000
+#define HWIO_IPA_STATE_TX_WRAPPER_COAL_SLAVE_CTX_IDLE_SHFT 0x10
+#define HWIO_IPA_STATE_TX_WRAPPER_COAL_SLAVE_IDLE_BMSK 0x8000
+#define HWIO_IPA_STATE_TX_WRAPPER_COAL_SLAVE_IDLE_SHFT 0xf
+#define HWIO_IPA_STATE_TX_WRAPPER_COAL_DIRECT_DMA_BMSK 0x6000
+#define HWIO_IPA_STATE_TX_WRAPPER_COAL_DIRECT_DMA_SHFT 0xd
+#define HWIO_IPA_STATE_TX_WRAPPER_NLO_DIRECT_DMA_BMSK 0x1800
+#define HWIO_IPA_STATE_TX_WRAPPER_NLO_DIRECT_DMA_SHFT 0xb
+#define HWIO_IPA_STATE_TX_WRAPPER_PKT_DROP_CNT_IDLE_BMSK 0x400
+#define HWIO_IPA_STATE_TX_WRAPPER_PKT_DROP_CNT_IDLE_SHFT 0xa
+#define HWIO_IPA_STATE_TX_WRAPPER_TRNSEQ_FORCE_VALID_BMSK 0x200
+#define HWIO_IPA_STATE_TX_WRAPPER_TRNSEQ_FORCE_VALID_SHFT 0x9
+#define HWIO_IPA_STATE_TX_WRAPPER_MBIM_DIRECT_DMA_BMSK 0x180
+#define HWIO_IPA_STATE_TX_WRAPPER_MBIM_DIRECT_DMA_SHFT 0x7
+#define HWIO_IPA_STATE_TX_WRAPPER_IPA_MBIM_PKT_FMS_IDLE_BMSK 0x40
+#define HWIO_IPA_STATE_TX_WRAPPER_IPA_MBIM_PKT_FMS_IDLE_SHFT 0x6
+#define HWIO_IPA_STATE_TX_WRAPPER_IPA_PROD_BRESP_TOGGLE_IDLE_BMSK 0x20
+#define HWIO_IPA_STATE_TX_WRAPPER_IPA_PROD_BRESP_TOGGLE_IDLE_SHFT 0x5
+#define HWIO_IPA_STATE_TX_WRAPPER_IPA_PROD_BRESP_EMPTY_BMSK 0x10
+#define HWIO_IPA_STATE_TX_WRAPPER_IPA_PROD_BRESP_EMPTY_SHFT 0x4
+#define HWIO_IPA_STATE_TX_WRAPPER_IPA_PROD_ACKMNGR_STATE_IDLE_BMSK 0x8
+#define HWIO_IPA_STATE_TX_WRAPPER_IPA_PROD_ACKMNGR_STATE_IDLE_SHFT 0x3
+#define HWIO_IPA_STATE_TX_WRAPPER_IPA_PROD_ACKMNGR_DB_EMPTY_BMSK 0x4
+#define HWIO_IPA_STATE_TX_WRAPPER_IPA_PROD_ACKMNGR_DB_EMPTY_SHFT 0x2
+#define HWIO_IPA_STATE_TX_WRAPPER_TX1_IDLE_BMSK 0x2
+#define HWIO_IPA_STATE_TX_WRAPPER_TX1_IDLE_SHFT 0x1
+#define HWIO_IPA_STATE_TX_WRAPPER_TX0_IDLE_BMSK 0x1
+#define HWIO_IPA_STATE_TX_WRAPPER_TX0_IDLE_SHFT 0x0
+#define HWIO_IPA_STATE_TX1_ADDR (IPA_CFG_REG_BASE + 0x00000094)
+#define HWIO_IPA_STATE_TX1_PHYS (IPA_CFG_REG_BASE_PHYS + 0x00000094)
+#define HWIO_IPA_STATE_TX1_OFFS (IPA_CFG_REG_BASE_OFFS + 0x00000094)
+#define HWIO_IPA_STATE_TX1_RMSK 0xffffffff
+#define HWIO_IPA_STATE_TX1_ATTR 0x1
+#define HWIO_IPA_STATE_TX1_IN in_dword_masked(HWIO_IPA_STATE_TX1_ADDR, \
+					      HWIO_IPA_STATE_TX1_RMSK)
+#define HWIO_IPA_STATE_TX1_INM(m) in_dword_masked(HWIO_IPA_STATE_TX1_ADDR, \
+						  m)
+#define HWIO_IPA_STATE_TX1_SUSPEND_REQ_EMPTY_BMSK 0x80000000
+#define HWIO_IPA_STATE_TX1_SUSPEND_REQ_EMPTY_SHFT 0x1f
+#define HWIO_IPA_STATE_TX1_LAST_CMD_PIPE_BMSK 0x7c000000
+#define HWIO_IPA_STATE_TX1_LAST_CMD_PIPE_SHFT 0x1a
+#define HWIO_IPA_STATE_TX1_CS_SNIF_IDLE_BMSK 0x2000000
+#define HWIO_IPA_STATE_TX1_CS_SNIF_IDLE_SHFT 0x19
+#define HWIO_IPA_STATE_TX1_SUSPEND_EMPTY_BMSK 0x1000000
+#define HWIO_IPA_STATE_TX1_SUSPEND_EMPTY_SHFT 0x18
+#define HWIO_IPA_STATE_TX1_RSRCREL_IDLE_BMSK 0x800000
+#define HWIO_IPA_STATE_TX1_RSRCREL_IDLE_SHFT 0x17
+#define HWIO_IPA_STATE_TX1_HOLB_MASK_IDLE_BMSK 0x400000
+#define HWIO_IPA_STATE_TX1_HOLB_MASK_IDLE_SHFT 0x16
+#define HWIO_IPA_STATE_TX1_HOLB_IDLE_BMSK 0x200000
+#define HWIO_IPA_STATE_TX1_HOLB_IDLE_SHFT 0x15
+#define HWIO_IPA_STATE_TX1_ALIGNER_EMPTY_BMSK 0x100000
+#define HWIO_IPA_STATE_TX1_ALIGNER_EMPTY_SHFT 0x14
+#define HWIO_IPA_STATE_TX1_PF_EMPTY_BMSK 0x80000
+#define HWIO_IPA_STATE_TX1_PF_EMPTY_SHFT 0x13
+#define HWIO_IPA_STATE_TX1_PF_IDLE_BMSK 0x40000
+#define HWIO_IPA_STATE_TX1_PF_IDLE_SHFT 0x12
+#define HWIO_IPA_STATE_TX1_DMAW_LAST_OUTSD_IDLE_BMSK 0x20000
+#define HWIO_IPA_STATE_TX1_DMAW_LAST_OUTSD_IDLE_SHFT 0x11
+#define HWIO_IPA_STATE_TX1_DMAW_IDLE_BMSK 0x10000
+#define HWIO_IPA_STATE_TX1_DMAW_IDLE_SHFT 0x10
+#define HWIO_IPA_STATE_TX1_AR_IDLE_BMSK 0x8000
+#define HWIO_IPA_STATE_TX1_AR_IDLE_SHFT 0xf
+#define HWIO_IPA_STATE_TX1_TX_CMD_BRESP_INJ_IDLE_BMSK 0x4000
+#define HWIO_IPA_STATE_TX1_TX_CMD_BRESP_INJ_IDLE_SHFT 0xe
+#define HWIO_IPA_STATE_TX1_TX_CMD_BRESP_ALOC_IDLE_BMSK 0x2000
+#define HWIO_IPA_STATE_TX1_TX_CMD_BRESP_ALOC_IDLE_SHFT 0xd
+#define HWIO_IPA_STATE_TX1_TX_CMD_SNIF_IDLE_BMSK 0x1000
+#define HWIO_IPA_STATE_TX1_TX_CMD_SNIF_IDLE_SHFT 0xc
+#define HWIO_IPA_STATE_TX1_TX_CMD_TRNSEQ_IDLE_BMSK 0x800
+#define HWIO_IPA_STATE_TX1_TX_CMD_TRNSEQ_IDLE_SHFT 0xb
+#define HWIO_IPA_STATE_TX1_TX_CMD_MAIN_IDLE_BMSK 0x400
+#define HWIO_IPA_STATE_TX1_TX_CMD_MAIN_IDLE_SHFT 0xa
+#define HWIO_IPA_STATE_TX1_PA_PUB_CNT_EMPTY_BMSK 0x200
+#define HWIO_IPA_STATE_TX1_PA_PUB_CNT_EMPTY_SHFT 0x9
+#define HWIO_IPA_STATE_TX1_PA_RST_IDLE_BMSK 0x100
+#define HWIO_IPA_STATE_TX1_PA_RST_IDLE_SHFT 0x8
+#define HWIO_IPA_STATE_TX1_PA_CTX_IDLE_BMSK 0x80
+#define HWIO_IPA_STATE_TX1_PA_CTX_IDLE_SHFT 0x7
+#define HWIO_IPA_STATE_TX1_PA_IDLE_BMSK 0x40
+#define HWIO_IPA_STATE_TX1_PA_IDLE_SHFT 0x6
+#define HWIO_IPA_STATE_TX1_ARBIT_TYPE_BMSK 0x38
+#define HWIO_IPA_STATE_TX1_ARBIT_TYPE_SHFT 0x3
+#define HWIO_IPA_STATE_TX1_FLOPPED_ARBIT_TYPE_BMSK 0x7
+#define HWIO_IPA_STATE_TX1_FLOPPED_ARBIT_TYPE_SHFT 0x0
+#define HWIO_IPA_STATE_FETCHER_ADDR (IPA_CFG_REG_BASE + 0x00000098)
+#define HWIO_IPA_STATE_FETCHER_PHYS (IPA_CFG_REG_BASE_PHYS + 0x00000098)
+#define HWIO_IPA_STATE_FETCHER_OFFS (IPA_CFG_REG_BASE_OFFS + 0x00000098)
+#define HWIO_IPA_STATE_FETCHER_RMSK 0xfffff
+#define HWIO_IPA_STATE_FETCHER_ATTR 0x1
+#define HWIO_IPA_STATE_FETCHER_IN in_dword_masked( \
+		HWIO_IPA_STATE_FETCHER_ADDR, \
+		HWIO_IPA_STATE_FETCHER_RMSK)
+#define HWIO_IPA_STATE_FETCHER_INM(m) in_dword_masked( \
+		HWIO_IPA_STATE_FETCHER_ADDR, \
+		m)
+#define HWIO_IPA_STATE_FETCHER_IPA_HPS_IMM_CMD_EXEC_STATE_IDLE_BMSK \
+	0x80000
+#define HWIO_IPA_STATE_FETCHER_IPA_HPS_IMM_CMD_EXEC_STATE_IDLE_SHFT 0x13
+#define HWIO_IPA_STATE_FETCHER_IPA_HPS_DMAR_SLOT_STATE_IDLE_BMSK 0x7f000
+#define HWIO_IPA_STATE_FETCHER_IPA_HPS_DMAR_SLOT_STATE_IDLE_SHFT 0xc
+#define HWIO_IPA_STATE_FETCHER_IPA_HPS_DMAR_STATE_IDLE_BMSK 0xfe0
+#define HWIO_IPA_STATE_FETCHER_IPA_HPS_DMAR_STATE_IDLE_SHFT 0x5
+#define HWIO_IPA_STATE_FETCHER_IPA_HPS_FTCH_CMPLT_STATE_IDLE_BMSK 0x10
+#define HWIO_IPA_STATE_FETCHER_IPA_HPS_FTCH_CMPLT_STATE_IDLE_SHFT 0x4
+#define HWIO_IPA_STATE_FETCHER_IPA_HPS_FTCH_IMM_STATE_IDLE_BMSK 0x8
+#define HWIO_IPA_STATE_FETCHER_IPA_HPS_FTCH_IMM_STATE_IDLE_SHFT 0x3
+#define HWIO_IPA_STATE_FETCHER_IPA_HPS_FTCH_PKT_STATE_IDLE_BMSK 0x4
+#define HWIO_IPA_STATE_FETCHER_IPA_HPS_FTCH_PKT_STATE_IDLE_SHFT 0x2
+#define HWIO_IPA_STATE_FETCHER_IPA_HPS_FTCH_ALLOC_STATE_IDLE_BMSK 0x2
+#define HWIO_IPA_STATE_FETCHER_IPA_HPS_FTCH_ALLOC_STATE_IDLE_SHFT 0x1
+#define HWIO_IPA_STATE_FETCHER_IPA_HPS_FTCH_STATE_IDLE_BMSK 0x1
+#define HWIO_IPA_STATE_FETCHER_IPA_HPS_FTCH_STATE_IDLE_SHFT 0x0
+#define HWIO_IPA_STATE_FETCHER_MASK_0_ADDR (IPA_CFG_REG_BASE + 0x0000009c)
+#define HWIO_IPA_STATE_FETCHER_MASK_0_PHYS (IPA_CFG_REG_BASE_PHYS + \
+					    0x0000009c)
+#define HWIO_IPA_STATE_FETCHER_MASK_0_OFFS (IPA_CFG_REG_BASE_OFFS + \
+					    0x0000009c)
+#define HWIO_IPA_STATE_FETCHER_MASK_0_RMSK 0xffffffff
+#define HWIO_IPA_STATE_FETCHER_MASK_0_ATTR 0x1
+#define HWIO_IPA_STATE_FETCHER_MASK_0_IN in_dword_masked( \
+		HWIO_IPA_STATE_FETCHER_MASK_0_ADDR, \
+		HWIO_IPA_STATE_FETCHER_MASK_0_RMSK)
+#define HWIO_IPA_STATE_FETCHER_MASK_0_INM(m) in_dword_masked( \
+		HWIO_IPA_STATE_FETCHER_MASK_0_ADDR, \
+		m)
+#define	\
+	HWIO_IPA_STATE_FETCHER_MASK_0_MASK_QUEUE_NO_RESOURCES_HPS_DMAR_BMSK \
+	0xff000000
+#define	\
+	HWIO_IPA_STATE_FETCHER_MASK_0_MASK_QUEUE_NO_RESOURCES_HPS_DMAR_SHFT \
+	0x18
+#define HWIO_IPA_STATE_FETCHER_MASK_0_MASK_QUEUE_NO_RESOURCES_CONTEXT_BMSK \
+	0xff0000
+#define HWIO_IPA_STATE_FETCHER_MASK_0_MASK_QUEUE_NO_RESOURCES_CONTEXT_SHFT \
+	0x10
+#define HWIO_IPA_STATE_FETCHER_MASK_0_MASK_QUEUE_IMM_EXEC_BMSK 0xff00
+#define HWIO_IPA_STATE_FETCHER_MASK_0_MASK_QUEUE_IMM_EXEC_SHFT 0x8
+#define HWIO_IPA_STATE_FETCHER_MASK_0_MASK_QUEUE_DMAR_USES_QUEUE_BMSK 0xff
+#define HWIO_IPA_STATE_FETCHER_MASK_0_MASK_QUEUE_DMAR_USES_QUEUE_SHFT 0x0
+#define HWIO_IPA_STATE_FETCHER_MASK_1_ADDR (IPA_CFG_REG_BASE + 0x000000cc)
+#define HWIO_IPA_STATE_FETCHER_MASK_1_PHYS (IPA_CFG_REG_BASE_PHYS + \
+					    0x000000cc)
+#define HWIO_IPA_STATE_FETCHER_MASK_1_OFFS (IPA_CFG_REG_BASE_OFFS + \
+					    0x000000cc)
+#define HWIO_IPA_STATE_FETCHER_MASK_1_RMSK 0xffffffff
+#define HWIO_IPA_STATE_FETCHER_MASK_1_ATTR 0x1
+#define HWIO_IPA_STATE_FETCHER_MASK_1_IN in_dword_masked( \
+		HWIO_IPA_STATE_FETCHER_MASK_1_ADDR, \
+		HWIO_IPA_STATE_FETCHER_MASK_1_RMSK)
+#define HWIO_IPA_STATE_FETCHER_MASK_1_INM(m) in_dword_masked( \
+		HWIO_IPA_STATE_FETCHER_MASK_1_ADDR, \
+		m)
+#define HWIO_IPA_STATE_FETCHER_MASK_1_MASK_QUEUE_NO_SPACE_DPL_FIFO_BMSK	\
+	0xff000000
+#define HWIO_IPA_STATE_FETCHER_MASK_1_MASK_QUEUE_NO_SPACE_DPL_FIFO_SHFT	\
+	0x18
+#define HWIO_IPA_STATE_FETCHER_MASK_1_MASK_QUEUE_STEP_MODE_BMSK 0xff0000
+#define HWIO_IPA_STATE_FETCHER_MASK_1_MASK_QUEUE_STEP_MODE_SHFT 0x10
+#define HWIO_IPA_STATE_FETCHER_MASK_1_MASK_QUEUE_ARB_LOCK_BMSK 0xff00
+#define HWIO_IPA_STATE_FETCHER_MASK_1_MASK_QUEUE_ARB_LOCK_SHFT 0x8
+#define	\
+	HWIO_IPA_STATE_FETCHER_MASK_1_MASK_QUEUE_NO_RESOURCES_ACK_ENTRY_BMSK \
+	0xff
+#define	\
+	HWIO_IPA_STATE_FETCHER_MASK_1_MASK_QUEUE_NO_RESOURCES_ACK_ENTRY_SHFT \
+	0x0
+#define HWIO_IPA_STATE_DPL_FIFO_ADDR (IPA_CFG_REG_BASE + 0x000000d0)
+#define HWIO_IPA_STATE_DPL_FIFO_PHYS (IPA_CFG_REG_BASE_PHYS + 0x000000d0)
+#define HWIO_IPA_STATE_DPL_FIFO_OFFS (IPA_CFG_REG_BASE_OFFS + 0x000000d0)
+#define HWIO_IPA_STATE_DPL_FIFO_RMSK 0x7
+#define HWIO_IPA_STATE_DPL_FIFO_ATTR 0x1
+#define HWIO_IPA_STATE_DPL_FIFO_IN in_dword_masked( \
+		HWIO_IPA_STATE_DPL_FIFO_ADDR, \
+		HWIO_IPA_STATE_DPL_FIFO_RMSK)
+#define HWIO_IPA_STATE_DPL_FIFO_INM(m) in_dword_masked(	\
+		HWIO_IPA_STATE_DPL_FIFO_ADDR, \
+		m)
+#define HWIO_IPA_STATE_DPL_FIFO_POP_FSM_STATE_BMSK 0x7
+#define HWIO_IPA_STATE_DPL_FIFO_POP_FSM_STATE_SHFT 0x0
+#define HWIO_IPA_STATE_COAL_MASTER_ADDR (IPA_CFG_REG_BASE + 0x000000d4)
+#define HWIO_IPA_STATE_COAL_MASTER_PHYS (IPA_CFG_REG_BASE_PHYS + \
+					 0x000000d4)
+#define HWIO_IPA_STATE_COAL_MASTER_OFFS (IPA_CFG_REG_BASE_OFFS + \
+					 0x000000d4)
+#define HWIO_IPA_STATE_COAL_MASTER_RMSK 0xffffffff
+#define HWIO_IPA_STATE_COAL_MASTER_ATTR 0x1
+#define HWIO_IPA_STATE_COAL_MASTER_IN in_dword_masked( \
+		HWIO_IPA_STATE_COAL_MASTER_ADDR, \
+		HWIO_IPA_STATE_COAL_MASTER_RMSK)
+#define HWIO_IPA_STATE_COAL_MASTER_INM(m) in_dword_masked( \
+		HWIO_IPA_STATE_COAL_MASTER_ADDR, \
+		m)
+#define HWIO_IPA_STATE_COAL_MASTER_VP_TIMER_EXPIRED_BMSK 0xf0000000
+#define HWIO_IPA_STATE_COAL_MASTER_VP_TIMER_EXPIRED_SHFT 0x1c
+#define HWIO_IPA_STATE_COAL_MASTER_LRU_VP_BMSK 0xf000000
+#define HWIO_IPA_STATE_COAL_MASTER_LRU_VP_SHFT 0x18
+#define HWIO_IPA_STATE_COAL_MASTER_INIT_VP_FSM_STATE_BMSK 0xf00000
+#define HWIO_IPA_STATE_COAL_MASTER_INIT_VP_FSM_STATE_SHFT 0x14
+#define HWIO_IPA_STATE_COAL_MASTER_CHECK_FIT_FSM_STATE_BMSK 0xf0000
+#define HWIO_IPA_STATE_COAL_MASTER_CHECK_FIT_FSM_STATE_SHFT 0x10
+#define HWIO_IPA_STATE_COAL_MASTER_HASH_CALC_FSM_STATE_BMSK 0xf000
+#define HWIO_IPA_STATE_COAL_MASTER_HASH_CALC_FSM_STATE_SHFT 0xc
+#define HWIO_IPA_STATE_COAL_MASTER_FIND_OPEN_FSM_STATE_BMSK 0xf00
+#define HWIO_IPA_STATE_COAL_MASTER_FIND_OPEN_FSM_STATE_SHFT 0x8
+#define HWIO_IPA_STATE_COAL_MASTER_MAIN_FSM_STATE_BMSK 0xf0
+#define HWIO_IPA_STATE_COAL_MASTER_MAIN_FSM_STATE_SHFT 0x4
+#define HWIO_IPA_STATE_COAL_MASTER_VP_VLD_BMSK 0xf
+#define HWIO_IPA_STATE_COAL_MASTER_VP_VLD_SHFT 0x0
+#define HWIO_IPA_STATE_DFETCHER_ADDR (IPA_CFG_REG_BASE + 0x000000a0)
+#define HWIO_IPA_STATE_DFETCHER_PHYS (IPA_CFG_REG_BASE_PHYS + 0x000000a0)
+#define HWIO_IPA_STATE_DFETCHER_OFFS (IPA_CFG_REG_BASE_OFFS + 0x000000a0)
+#define HWIO_IPA_STATE_DFETCHER_RMSK 0x3f3f3
+#define HWIO_IPA_STATE_DFETCHER_ATTR 0x1
+#define HWIO_IPA_STATE_DFETCHER_IN in_dword_masked( \
+		HWIO_IPA_STATE_DFETCHER_ADDR, \
+		HWIO_IPA_STATE_DFETCHER_RMSK)
+#define HWIO_IPA_STATE_DFETCHER_INM(m) in_dword_masked(	\
+		HWIO_IPA_STATE_DFETCHER_ADDR, \
+		m)
+#define HWIO_IPA_STATE_DFETCHER_IPA_DPS_DMAR_SLOT_STATE_IDLE_BMSK 0x3f000
+#define HWIO_IPA_STATE_DFETCHER_IPA_DPS_DMAR_SLOT_STATE_IDLE_SHFT 0xc
+#define HWIO_IPA_STATE_DFETCHER_IPA_DPS_DMAR_STATE_IDLE_BMSK 0x3f0
+#define HWIO_IPA_STATE_DFETCHER_IPA_DPS_DMAR_STATE_IDLE_SHFT 0x4
+#define HWIO_IPA_STATE_DFETCHER_IPA_DPS_FTCH_CMPLT_STATE_IDLE_BMSK 0x2
+#define HWIO_IPA_STATE_DFETCHER_IPA_DPS_FTCH_CMPLT_STATE_IDLE_SHFT 0x1
+#define HWIO_IPA_STATE_DFETCHER_IPA_DPS_FTCH_PKT_STATE_IDLE_BMSK 0x1
+#define HWIO_IPA_STATE_DFETCHER_IPA_DPS_FTCH_PKT_STATE_IDLE_SHFT 0x0
+#define HWIO_IPA_STATE_ACL_ADDR (IPA_CFG_REG_BASE + 0x000000a4)
+#define HWIO_IPA_STATE_ACL_PHYS (IPA_CFG_REG_BASE_PHYS + 0x000000a4)
+#define HWIO_IPA_STATE_ACL_OFFS (IPA_CFG_REG_BASE_OFFS + 0x000000a4)
+#define HWIO_IPA_STATE_ACL_RMSK 0xffcffff
+#define HWIO_IPA_STATE_ACL_ATTR 0x1
+#define HWIO_IPA_STATE_ACL_IN in_dword_masked(HWIO_IPA_STATE_ACL_ADDR, \
+					      HWIO_IPA_STATE_ACL_RMSK)
+#define HWIO_IPA_STATE_ACL_INM(m) in_dword_masked(HWIO_IPA_STATE_ACL_ADDR, \
+						  m)
+#define HWIO_IPA_STATE_ACL_IPA_HPS_COAL_MASTER_ACTIVE_BMSK 0x8000000
+#define HWIO_IPA_STATE_ACL_IPA_HPS_COAL_MASTER_ACTIVE_SHFT 0x1b
+#define HWIO_IPA_STATE_ACL_IPA_HPS_COAL_MASTER_EMPTY_BMSK 0x4000000
+#define HWIO_IPA_STATE_ACL_IPA_HPS_COAL_MASTER_EMPTY_SHFT 0x1a
+#define HWIO_IPA_STATE_ACL_IPA_DPS_D_DCPH_2ND_ACTIVE_BMSK 0x2000000
+#define HWIO_IPA_STATE_ACL_IPA_DPS_D_DCPH_2ND_ACTIVE_SHFT 0x19
+#define HWIO_IPA_STATE_ACL_IPA_DPS_D_DCPH_2ND_EMPTY_BMSK 0x1000000
+#define HWIO_IPA_STATE_ACL_IPA_DPS_D_DCPH_2ND_EMPTY_SHFT 0x18
+#define HWIO_IPA_STATE_ACL_IPA_DPS_SEQUENCER_IDLE_BMSK 0x800000
+#define HWIO_IPA_STATE_ACL_IPA_DPS_SEQUENCER_IDLE_SHFT 0x17
+#define HWIO_IPA_STATE_ACL_IPA_HPS_SEQUENCER_IDLE_BMSK 0x400000
+#define HWIO_IPA_STATE_ACL_IPA_HPS_SEQUENCER_IDLE_SHFT 0x16
+#define HWIO_IPA_STATE_ACL_IPA_DPS_D_DCPH_2_ACTIVE_BMSK 0x200000
+#define HWIO_IPA_STATE_ACL_IPA_DPS_D_DCPH_2_ACTIVE_SHFT 0x15
+#define HWIO_IPA_STATE_ACL_IPA_DPS_D_DCPH_2_EMPTY_BMSK 0x100000
+#define HWIO_IPA_STATE_ACL_IPA_DPS_D_DCPH_2_EMPTY_SHFT 0x14
+#define HWIO_IPA_STATE_ACL_IPA_DPS_DISPATCHER_ACTIVE_BMSK 0x80000
+#define HWIO_IPA_STATE_ACL_IPA_DPS_DISPATCHER_ACTIVE_SHFT 0x13
+#define HWIO_IPA_STATE_ACL_IPA_DPS_DISPATCHER_EMPTY_BMSK 0x40000
+#define HWIO_IPA_STATE_ACL_IPA_DPS_DISPATCHER_EMPTY_SHFT 0x12
+#define HWIO_IPA_STATE_ACL_IPA_DPS_D_DCPH_ACTIVE_BMSK 0x8000
+#define HWIO_IPA_STATE_ACL_IPA_DPS_D_DCPH_ACTIVE_SHFT 0xf
+#define HWIO_IPA_STATE_ACL_IPA_DPS_D_DCPH_EMPTY_BMSK 0x4000
+#define HWIO_IPA_STATE_ACL_IPA_DPS_D_DCPH_EMPTY_SHFT 0xe
+#define HWIO_IPA_STATE_ACL_IPA_HPS_ENQUEUER_ACTIVE_BMSK 0x2000
+#define HWIO_IPA_STATE_ACL_IPA_HPS_ENQUEUER_ACTIVE_SHFT 0xd
+#define HWIO_IPA_STATE_ACL_IPA_HPS_ENQUEUER_EMPTY_BMSK 0x1000
+#define HWIO_IPA_STATE_ACL_IPA_HPS_ENQUEUER_EMPTY_SHFT 0xc
+#define HWIO_IPA_STATE_ACL_IPA_HPS_UCP_ACTIVE_BMSK 0x800
+#define HWIO_IPA_STATE_ACL_IPA_HPS_UCP_ACTIVE_SHFT 0xb
+#define HWIO_IPA_STATE_ACL_IPA_HPS_UCP_EMPTY_BMSK 0x400
+#define HWIO_IPA_STATE_ACL_IPA_HPS_UCP_EMPTY_SHFT 0xa
+#define HWIO_IPA_STATE_ACL_IPA_HPS_HDRI_ACTIVE_BMSK 0x200
+#define HWIO_IPA_STATE_ACL_IPA_HPS_HDRI_ACTIVE_SHFT 0x9
+#define HWIO_IPA_STATE_ACL_IPA_HPS_HDRI_EMPTY_BMSK 0x100
+#define HWIO_IPA_STATE_ACL_IPA_HPS_HDRI_EMPTY_SHFT 0x8
+#define HWIO_IPA_STATE_ACL_IPA_HPS_ROUTER_ACTIVE_BMSK 0x80
+#define HWIO_IPA_STATE_ACL_IPA_HPS_ROUTER_ACTIVE_SHFT 0x7
+#define HWIO_IPA_STATE_ACL_IPA_HPS_ROUTER_EMPTY_BMSK 0x40
+#define HWIO_IPA_STATE_ACL_IPA_HPS_ROUTER_EMPTY_SHFT 0x6
+#define HWIO_IPA_STATE_ACL_IPA_HPS_FILTER_NAT_ACTIVE_BMSK 0x20
+#define HWIO_IPA_STATE_ACL_IPA_HPS_FILTER_NAT_ACTIVE_SHFT 0x5
+#define HWIO_IPA_STATE_ACL_IPA_HPS_FILTER_NAT_EMPTY_BMSK 0x10
+#define HWIO_IPA_STATE_ACL_IPA_HPS_FILTER_NAT_EMPTY_SHFT 0x4
+#define HWIO_IPA_STATE_ACL_IPA_HPS_PKT_PARSER_ACTIVE_BMSK 0x8
+#define HWIO_IPA_STATE_ACL_IPA_HPS_PKT_PARSER_ACTIVE_SHFT 0x3
+#define HWIO_IPA_STATE_ACL_IPA_HPS_PKT_PARSER_EMPTY_BMSK 0x4
+#define HWIO_IPA_STATE_ACL_IPA_HPS_PKT_PARSER_EMPTY_SHFT 0x2
+#define HWIO_IPA_STATE_ACL_IPA_HPS_H_DCPH_ACTIVE_BMSK 0x2
+#define HWIO_IPA_STATE_ACL_IPA_HPS_H_DCPH_ACTIVE_SHFT 0x1
+#define HWIO_IPA_STATE_ACL_IPA_HPS_H_DCPH_EMPTY_BMSK 0x1
+#define HWIO_IPA_STATE_ACL_IPA_HPS_H_DCPH_EMPTY_SHFT 0x0
+#define HWIO_IPA_STATE_GSI_TLV_ADDR (IPA_CFG_REG_BASE + 0x000000b8)
+#define HWIO_IPA_STATE_GSI_TLV_PHYS (IPA_CFG_REG_BASE_PHYS + 0x000000b8)
+#define HWIO_IPA_STATE_GSI_TLV_OFFS (IPA_CFG_REG_BASE_OFFS + 0x000000b8)
+#define HWIO_IPA_STATE_GSI_TLV_RMSK 0x1
+#define HWIO_IPA_STATE_GSI_TLV_ATTR 0x1
+#define HWIO_IPA_STATE_GSI_TLV_IN in_dword_masked( \
+		HWIO_IPA_STATE_GSI_TLV_ADDR, \
+		HWIO_IPA_STATE_GSI_TLV_RMSK)
+#define HWIO_IPA_STATE_GSI_TLV_INM(m) in_dword_masked( \
+		HWIO_IPA_STATE_GSI_TLV_ADDR, \
+		m)
+#define HWIO_IPA_STATE_GSI_TLV_IPA_GSI_TOGGLE_FSM_IDLE_BMSK 0x1
+#define HWIO_IPA_STATE_GSI_TLV_IPA_GSI_TOGGLE_FSM_IDLE_SHFT 0x0
+#define HWIO_IPA_STATE_GSI_AOS_ADDR (IPA_CFG_REG_BASE + 0x000000bc)
+#define HWIO_IPA_STATE_GSI_AOS_PHYS (IPA_CFG_REG_BASE_PHYS + 0x000000bc)
+#define HWIO_IPA_STATE_GSI_AOS_OFFS (IPA_CFG_REG_BASE_OFFS + 0x000000bc)
+#define HWIO_IPA_STATE_GSI_AOS_RMSK 0x1
+#define HWIO_IPA_STATE_GSI_AOS_ATTR 0x1
+#define HWIO_IPA_STATE_GSI_AOS_IN in_dword_masked( \
+		HWIO_IPA_STATE_GSI_AOS_ADDR, \
+		HWIO_IPA_STATE_GSI_AOS_RMSK)
+#define HWIO_IPA_STATE_GSI_AOS_INM(m) in_dword_masked( \
+		HWIO_IPA_STATE_GSI_AOS_ADDR, \
+		m)
+#define HWIO_IPA_STATE_GSI_AOS_IPA_GSI_AOS_FSM_IDLE_BMSK 0x1
+#define HWIO_IPA_STATE_GSI_AOS_IPA_GSI_AOS_FSM_IDLE_SHFT 0x0
+#define HWIO_IPA_STATE_GSI_IF_ADDR (IPA_CFG_REG_BASE + 0x000000c0)
+#define HWIO_IPA_STATE_GSI_IF_PHYS (IPA_CFG_REG_BASE_PHYS + 0x000000c0)
+#define HWIO_IPA_STATE_GSI_IF_OFFS (IPA_CFG_REG_BASE_OFFS + 0x000000c0)
+#define HWIO_IPA_STATE_GSI_IF_RMSK 0xff
+#define HWIO_IPA_STATE_GSI_IF_ATTR 0x1
+#define HWIO_IPA_STATE_GSI_IF_IN in_dword_masked( \
+		HWIO_IPA_STATE_GSI_IF_ADDR, \
+		HWIO_IPA_STATE_GSI_IF_RMSK)
+#define HWIO_IPA_STATE_GSI_IF_INM(m) in_dword_masked( \
+		HWIO_IPA_STATE_GSI_IF_ADDR, \
+		m)
+#define HWIO_IPA_STATE_GSI_IF_IPA_GSI_PROD_FSM_TX_1_BMSK 0xf0
+#define HWIO_IPA_STATE_GSI_IF_IPA_GSI_PROD_FSM_TX_1_SHFT 0x4
+#define HWIO_IPA_STATE_GSI_IF_IPA_GSI_PROD_FSM_TX_0_BMSK 0xf
+#define HWIO_IPA_STATE_GSI_IF_IPA_GSI_PROD_FSM_TX_0_SHFT 0x0
+#define HWIO_IPA_STATE_GSI_SKIP_ADDR (IPA_CFG_REG_BASE + 0x000000c4)
+#define HWIO_IPA_STATE_GSI_SKIP_PHYS (IPA_CFG_REG_BASE_PHYS + 0x000000c4)
+#define HWIO_IPA_STATE_GSI_SKIP_OFFS (IPA_CFG_REG_BASE_OFFS + 0x000000c4)
+#define HWIO_IPA_STATE_GSI_SKIP_RMSK 0x3
+#define HWIO_IPA_STATE_GSI_SKIP_ATTR 0x1
+#define HWIO_IPA_STATE_GSI_SKIP_IN in_dword_masked( \
+		HWIO_IPA_STATE_GSI_SKIP_ADDR, \
+		HWIO_IPA_STATE_GSI_SKIP_RMSK)
+#define HWIO_IPA_STATE_GSI_SKIP_INM(m) in_dword_masked(	\
+		HWIO_IPA_STATE_GSI_SKIP_ADDR, \
+		m)
+#define HWIO_IPA_STATE_GSI_SKIP_IPA_GSI_SKIP_FSM_BMSK 0x3
+#define HWIO_IPA_STATE_GSI_SKIP_IPA_GSI_SKIP_FSM_SHFT 0x0
+#define HWIO_IPA_STATE_GSI_IF_CONS_ADDR (IPA_CFG_REG_BASE + 0x000000c8)
+#define HWIO_IPA_STATE_GSI_IF_CONS_PHYS (IPA_CFG_REG_BASE_PHYS + \
+					 0x000000c8)
+#define HWIO_IPA_STATE_GSI_IF_CONS_OFFS (IPA_CFG_REG_BASE_OFFS + \
+					 0x000000c8)
+#define HWIO_IPA_STATE_GSI_IF_CONS_RMSK 0x7ffffff
+#define HWIO_IPA_STATE_GSI_IF_CONS_ATTR 0x1
+#define HWIO_IPA_STATE_GSI_IF_CONS_IN in_dword_masked( \
+		HWIO_IPA_STATE_GSI_IF_CONS_ADDR, \
+		HWIO_IPA_STATE_GSI_IF_CONS_RMSK)
+#define HWIO_IPA_STATE_GSI_IF_CONS_INM(m) in_dword_masked( \
+		HWIO_IPA_STATE_GSI_IF_CONS_ADDR, \
+		m)
+#define	\
+	HWIO_IPA_STATE_GSI_IF_CONS_IPA_STATE_GSI_IF_CONS_RX_REQ_NO_ZERO_BMSK \
+	0x7fe0000
+#define	\
+	HWIO_IPA_STATE_GSI_IF_CONS_IPA_STATE_GSI_IF_CONS_RX_REQ_NO_ZERO_SHFT \
+	0x11
+#define HWIO_IPA_STATE_GSI_IF_CONS_IPA_STATE_GSI_IF_CONS_RX_REQ_BMSK \
+	0x1ff80
+#define HWIO_IPA_STATE_GSI_IF_CONS_IPA_STATE_GSI_IF_CONS_RX_REQ_SHFT 0x7
+#define HWIO_IPA_STATE_GSI_IF_CONS_IPA_STATE_GSI_IF_CONS_CACHE_VLD_BMSK	\
+	0x7e
+#define HWIO_IPA_STATE_GSI_IF_CONS_IPA_STATE_GSI_IF_CONS_CACHE_VLD_SHFT	\
+	0x1
+#define HWIO_IPA_STATE_GSI_IF_CONS_IPA_STATE_GSI_IF_CONS_STATE_BMSK 0x1
+#define HWIO_IPA_STATE_GSI_IF_CONS_IPA_STATE_GSI_IF_CONS_STATE_SHFT 0x0
+#define HWIO_IPA_STATE_ADDR (IPA_CFG_REG_BASE + 0x000000a8)
+#define HWIO_IPA_STATE_PHYS (IPA_CFG_REG_BASE_PHYS + 0x000000a8)
+#define HWIO_IPA_STATE_OFFS (IPA_CFG_REG_BASE_OFFS + 0x000000a8)
+#define HWIO_IPA_STATE_RMSK 0xf7ffffff
+#define HWIO_IPA_STATE_ATTR 0x1
+#define HWIO_IPA_STATE_IN in_dword_masked(HWIO_IPA_STATE_ADDR, \
+					  HWIO_IPA_STATE_RMSK)
+#define HWIO_IPA_STATE_INM(m) in_dword_masked(HWIO_IPA_STATE_ADDR, m)
+#define HWIO_IPA_STATE_IPA_UC_RX_HND_CMDQ_EMPTY_BMSK 0x80000000
+#define HWIO_IPA_STATE_IPA_UC_RX_HND_CMDQ_EMPTY_SHFT 0x1f
+#define HWIO_IPA_STATE_IPA_DPS_TX_EMPTY_BMSK 0x40000000
+#define HWIO_IPA_STATE_IPA_DPS_TX_EMPTY_SHFT 0x1e
+#define HWIO_IPA_STATE_IPA_HPS_DPS_EMPTY_BMSK 0x20000000
+#define HWIO_IPA_STATE_IPA_HPS_DPS_EMPTY_SHFT 0x1d
+#define HWIO_IPA_STATE_IPA_RX_HPS_EMPTY_BMSK 0x10000000
+#define HWIO_IPA_STATE_IPA_RX_HPS_EMPTY_SHFT 0x1c
+#define HWIO_IPA_STATE_IPA_RX_SPLT_CMDQ_EMPTY_BMSK 0x7800000
+#define HWIO_IPA_STATE_IPA_RX_SPLT_CMDQ_EMPTY_SHFT 0x17
+#define HWIO_IPA_STATE_IPA_TX_COMMANDER_CMDQ_EMPTY_BMSK 0x400000
+#define HWIO_IPA_STATE_IPA_TX_COMMANDER_CMDQ_EMPTY_SHFT 0x16
+#define HWIO_IPA_STATE_IPA_RX_ACKQ_EMPTY_BMSK 0x200000
+#define HWIO_IPA_STATE_IPA_RX_ACKQ_EMPTY_SHFT 0x15
+#define HWIO_IPA_STATE_IPA_UC_ACKQ_EMPTY_BMSK 0x100000
+#define HWIO_IPA_STATE_IPA_UC_ACKQ_EMPTY_SHFT 0x14
+#define HWIO_IPA_STATE_IPA_TX_ACKQ_EMPTY_BMSK 0x80000
+#define HWIO_IPA_STATE_IPA_TX_ACKQ_EMPTY_SHFT 0x13
+#define HWIO_IPA_STATE_IPA_NTF_TX_EMPTY_BMSK 0x40000
+#define HWIO_IPA_STATE_IPA_NTF_TX_EMPTY_SHFT 0x12
+#define HWIO_IPA_STATE_IPA_FULL_IDLE_BMSK 0x20000
+#define HWIO_IPA_STATE_IPA_FULL_IDLE_SHFT 0x11
+#define HWIO_IPA_STATE_IPA_PROD_BRESP_IDLE_BMSK 0x10000
+#define HWIO_IPA_STATE_IPA_PROD_BRESP_IDLE_SHFT 0x10
+#define HWIO_IPA_STATE_IPA_PROD_ACKMNGR_STATE_IDLE_BMSK 0x8000
+#define HWIO_IPA_STATE_IPA_PROD_ACKMNGR_STATE_IDLE_SHFT 0xf
+#define HWIO_IPA_STATE_IPA_PROD_ACKMNGR_DB_EMPTY_BMSK 0x4000
+#define HWIO_IPA_STATE_IPA_PROD_ACKMNGR_DB_EMPTY_SHFT 0xe
+#define HWIO_IPA_STATE_IPA_TX_ACKQ_FULL_BMSK 0x2000
+#define HWIO_IPA_STATE_IPA_TX_ACKQ_FULL_SHFT 0xd
+#define HWIO_IPA_STATE_IPA_ACKMNGR_STATE_IDLE_BMSK 0x1000
+#define HWIO_IPA_STATE_IPA_ACKMNGR_STATE_IDLE_SHFT 0xc
+#define HWIO_IPA_STATE_IPA_ACKMNGR_DB_EMPTY_BMSK 0x800
+#define HWIO_IPA_STATE_IPA_ACKMNGR_DB_EMPTY_SHFT 0xb
+#define HWIO_IPA_STATE_IPA_RSRC_STATE_IDLE_BMSK 0x400
+#define HWIO_IPA_STATE_IPA_RSRC_STATE_IDLE_SHFT 0xa
+#define HWIO_IPA_STATE_IPA_RSRC_MNGR_DB_EMPTY_BMSK 0x200
+#define HWIO_IPA_STATE_IPA_RSRC_MNGR_DB_EMPTY_SHFT 0x9
+#define HWIO_IPA_STATE_MBIM_AGGR_IDLE_BMSK 0x100
+#define HWIO_IPA_STATE_MBIM_AGGR_IDLE_SHFT 0x8
+#define HWIO_IPA_STATE_AGGR_IDLE_BMSK 0x80
+#define HWIO_IPA_STATE_AGGR_IDLE_SHFT 0x7
+#define HWIO_IPA_STATE_IPA_NOC_IDLE_BMSK 0x40
+#define HWIO_IPA_STATE_IPA_NOC_IDLE_SHFT 0x6
+#define HWIO_IPA_STATE_IPA_STATUS_SNIFFER_IDLE_BMSK 0x20
+#define HWIO_IPA_STATE_IPA_STATUS_SNIFFER_IDLE_SHFT 0x5
+#define HWIO_IPA_STATE_BAM_GSI_IDLE_BMSK 0x10
+#define HWIO_IPA_STATE_BAM_GSI_IDLE_SHFT 0x4
+#define HWIO_IPA_STATE_DPL_FIFO_IDLE_BMSK 0x8
+#define HWIO_IPA_STATE_DPL_FIFO_IDLE_SHFT 0x3
+#define HWIO_IPA_STATE_TX_IDLE_BMSK 0x4
+#define HWIO_IPA_STATE_TX_IDLE_SHFT 0x2
+#define HWIO_IPA_STATE_RX_IDLE_BMSK 0x2
+#define HWIO_IPA_STATE_RX_IDLE_SHFT 0x1
+#define HWIO_IPA_STATE_RX_WAIT_BMSK 0x1
+#define HWIO_IPA_STATE_RX_WAIT_SHFT 0x0
+#define HWIO_IPA_STATE_RX_ACTIVE_ADDR (IPA_CFG_REG_BASE + 0x000000ac)
+#define HWIO_IPA_STATE_RX_ACTIVE_PHYS (IPA_CFG_REG_BASE_PHYS + 0x000000ac)
+#define HWIO_IPA_STATE_RX_ACTIVE_OFFS (IPA_CFG_REG_BASE_OFFS + 0x000000ac)
+#define HWIO_IPA_STATE_RX_ACTIVE_RMSK 0x1fff
+#define HWIO_IPA_STATE_RX_ACTIVE_ATTR 0x1
+#define HWIO_IPA_STATE_RX_ACTIVE_IN in_dword_masked( \
+		HWIO_IPA_STATE_RX_ACTIVE_ADDR, \
+		HWIO_IPA_STATE_RX_ACTIVE_RMSK)
+#define HWIO_IPA_STATE_RX_ACTIVE_INM(m) in_dword_masked( \
+		HWIO_IPA_STATE_RX_ACTIVE_ADDR, \
+		m)
+#define HWIO_IPA_STATE_RX_ACTIVE_ENDPOINTS_BMSK 0x1fff
+#define HWIO_IPA_STATE_RX_ACTIVE_ENDPOINTS_SHFT 0x0
+#define HWIO_IPA_STATE_TX0_ADDR (IPA_CFG_REG_BASE + 0x000000b0)
+#define HWIO_IPA_STATE_TX0_PHYS (IPA_CFG_REG_BASE_PHYS + 0x000000b0)
+#define HWIO_IPA_STATE_TX0_OFFS (IPA_CFG_REG_BASE_OFFS + 0x000000b0)
+#define HWIO_IPA_STATE_TX0_RMSK 0xfffffff
+#define HWIO_IPA_STATE_TX0_ATTR 0x1
+#define HWIO_IPA_STATE_TX0_IN in_dword_masked(HWIO_IPA_STATE_TX0_ADDR, \
+					      HWIO_IPA_STATE_TX0_RMSK)
+#define HWIO_IPA_STATE_TX0_INM(m) in_dword_masked(HWIO_IPA_STATE_TX0_ADDR, \
+						  m)
+#define HWIO_IPA_STATE_TX0_LAST_CMD_PIPE_BMSK 0xf800000
+#define HWIO_IPA_STATE_TX0_LAST_CMD_PIPE_SHFT 0x17
+#define HWIO_IPA_STATE_TX0_CS_SNIF_IDLE_BMSK 0x400000
+#define HWIO_IPA_STATE_TX0_CS_SNIF_IDLE_SHFT 0x16
+#define HWIO_IPA_STATE_TX0_SUSPEND_EMPTY_BMSK 0x200000
+#define HWIO_IPA_STATE_TX0_SUSPEND_EMPTY_SHFT 0x15
+#define HWIO_IPA_STATE_TX0_RSRCREL_IDLE_BMSK 0x100000
+#define HWIO_IPA_STATE_TX0_RSRCREL_IDLE_SHFT 0x14
+#define HWIO_IPA_STATE_TX0_HOLB_MASK_IDLE_BMSK 0x80000
+#define HWIO_IPA_STATE_TX0_HOLB_MASK_IDLE_SHFT 0x13
+#define HWIO_IPA_STATE_TX0_HOLB_IDLE_BMSK 0x40000
+#define HWIO_IPA_STATE_TX0_HOLB_IDLE_SHFT 0x12
+#define HWIO_IPA_STATE_TX0_ALIGNER_EMPTY_BMSK 0x20000
+#define HWIO_IPA_STATE_TX0_ALIGNER_EMPTY_SHFT 0x11
+#define HWIO_IPA_STATE_TX0_PF_EMPTY_BMSK 0x10000
+#define HWIO_IPA_STATE_TX0_PF_EMPTY_SHFT 0x10
+#define HWIO_IPA_STATE_TX0_PF_IDLE_BMSK 0x8000
+#define HWIO_IPA_STATE_TX0_PF_IDLE_SHFT 0xf
+#define HWIO_IPA_STATE_TX0_DMAW_LAST_OUTSD_IDLE_BMSK 0x4000
+#define HWIO_IPA_STATE_TX0_DMAW_LAST_OUTSD_IDLE_SHFT 0xe
+#define HWIO_IPA_STATE_TX0_DMAW_IDLE_BMSK 0x2000
+#define HWIO_IPA_STATE_TX0_DMAW_IDLE_SHFT 0xd
+#define HWIO_IPA_STATE_TX0_AR_IDLE_BMSK 0x1000
+#define HWIO_IPA_STATE_TX0_AR_IDLE_SHFT 0xc
+#define HWIO_IPA_STATE_TX0_TX_CMD_BRESP_INJ_IDLE_BMSK 0x800
+#define HWIO_IPA_STATE_TX0_TX_CMD_BRESP_INJ_IDLE_SHFT 0xb
+#define HWIO_IPA_STATE_TX0_TX_CMD_BRESP_ALOC_IDLE_BMSK 0x400
+#define HWIO_IPA_STATE_TX0_TX_CMD_BRESP_ALOC_IDLE_SHFT 0xa
+#define HWIO_IPA_STATE_TX0_TX_CMD_SNIF_IDLE_BMSK 0x200
+#define HWIO_IPA_STATE_TX0_TX_CMD_SNIF_IDLE_SHFT 0x9
+#define HWIO_IPA_STATE_TX0_TX_CMD_TRNSEQ_IDLE_BMSK 0x100
+#define HWIO_IPA_STATE_TX0_TX_CMD_TRNSEQ_IDLE_SHFT 0x8
+#define HWIO_IPA_STATE_TX0_TX_CMD_MAIN_IDLE_BMSK 0x80
+#define HWIO_IPA_STATE_TX0_TX_CMD_MAIN_IDLE_SHFT 0x7
+#define HWIO_IPA_STATE_TX0_PA_PUB_CNT_EMPTY_BMSK 0x40
+#define HWIO_IPA_STATE_TX0_PA_PUB_CNT_EMPTY_SHFT 0x6
+#define HWIO_IPA_STATE_TX0_PA_CTX_IDLE_BMSK 0x20
+#define HWIO_IPA_STATE_TX0_PA_CTX_IDLE_SHFT 0x5
+#define HWIO_IPA_STATE_TX0_PA_IDLE_BMSK 0x10
+#define HWIO_IPA_STATE_TX0_PA_IDLE_SHFT 0x4
+#define HWIO_IPA_STATE_TX0_NEXT_ARBIT_TYPE_BMSK 0xc
+#define HWIO_IPA_STATE_TX0_NEXT_ARBIT_TYPE_SHFT 0x2
+#define HWIO_IPA_STATE_TX0_LAST_ARBIT_TYPE_BMSK 0x3
+#define HWIO_IPA_STATE_TX0_LAST_ARBIT_TYPE_SHFT 0x0
+#define HWIO_IPA_STATE_AGGR_ACTIVE_ADDR (IPA_CFG_REG_BASE + 0x000000b4)
+#define HWIO_IPA_STATE_AGGR_ACTIVE_PHYS (IPA_CFG_REG_BASE_PHYS + \
+					 0x000000b4)
+#define HWIO_IPA_STATE_AGGR_ACTIVE_OFFS (IPA_CFG_REG_BASE_OFFS + \
+					 0x000000b4)
+#define HWIO_IPA_STATE_AGGR_ACTIVE_RMSK 0x7fffffff
+#define HWIO_IPA_STATE_AGGR_ACTIVE_ATTR 0x1
+#define HWIO_IPA_STATE_AGGR_ACTIVE_IN in_dword_masked( \
+		HWIO_IPA_STATE_AGGR_ACTIVE_ADDR, \
+		HWIO_IPA_STATE_AGGR_ACTIVE_RMSK)
+#define HWIO_IPA_STATE_AGGR_ACTIVE_INM(m) in_dword_masked( \
+		HWIO_IPA_STATE_AGGR_ACTIVE_ADDR, \
+		m)
+#define HWIO_IPA_STATE_AGGR_ACTIVE_ENDPOINTS_BMSK 0x7fffffff
+#define HWIO_IPA_STATE_AGGR_ACTIVE_ENDPOINTS_SHFT 0x0
+#define HWIO_IPA_GENERIC_RAM_ARBITER_PRIORITY_ADDR (IPA_CFG_REG_BASE + \
+						    0x000000d8)
+#define HWIO_IPA_GENERIC_RAM_ARBITER_PRIORITY_PHYS (IPA_CFG_REG_BASE_PHYS \
+						    + 0x000000d8)
+#define HWIO_IPA_GENERIC_RAM_ARBITER_PRIORITY_OFFS (IPA_CFG_REG_BASE_OFFS \
+						    + 0x000000d8)
+#define HWIO_IPA_STATE_NLO_AGGR_ADDR (IPA_CFG_REG_BASE + 0x000000dc)
+#define HWIO_IPA_STATE_NLO_AGGR_PHYS (IPA_CFG_REG_BASE_PHYS + 0x000000dc)
+#define HWIO_IPA_STATE_NLO_AGGR_OFFS (IPA_CFG_REG_BASE_OFFS + 0x000000dc)
+#define HWIO_IPA_STATE_NLO_AGGR_RMSK 0xffffffff
+#define HWIO_IPA_STATE_NLO_AGGR_ATTR 0x1
+#define HWIO_IPA_STATE_NLO_AGGR_IN in_dword_masked( \
+		HWIO_IPA_STATE_NLO_AGGR_ADDR, \
+		HWIO_IPA_STATE_NLO_AGGR_RMSK)
+#define HWIO_IPA_STATE_NLO_AGGR_INM(m) in_dword_masked(	\
+		HWIO_IPA_STATE_NLO_AGGR_ADDR, \
+		m)
+#define HWIO_IPA_STATE_NLO_AGGR_NLO_AGGR_STATE_BMSK 0xffffffff
+#define HWIO_IPA_STATE_NLO_AGGR_NLO_AGGR_STATE_SHFT 0x0
+#define HWIO_IPA_STATE_COAL_MASTER_1_ADDR (IPA_CFG_REG_BASE + 0x000000e0)
+#define HWIO_IPA_STATE_COAL_MASTER_1_PHYS (IPA_CFG_REG_BASE_PHYS + \
+					   0x000000e0)
+#define HWIO_IPA_STATE_COAL_MASTER_1_OFFS (IPA_CFG_REG_BASE_OFFS + \
+					   0x000000e0)
+#define HWIO_IPA_STATE_COAL_MASTER_1_RMSK 0x3fffffff
+#define HWIO_IPA_STATE_COAL_MASTER_1_ATTR 0x1
+#define HWIO_IPA_STATE_COAL_MASTER_1_IN in_dword_masked( \
+		HWIO_IPA_STATE_COAL_MASTER_1_ADDR, \
+		HWIO_IPA_STATE_COAL_MASTER_1_RMSK)
+#define HWIO_IPA_STATE_COAL_MASTER_1_INM(m) in_dword_masked( \
+		HWIO_IPA_STATE_COAL_MASTER_1_ADDR, \
+		m)
+#define HWIO_IPA_STATE_COAL_MASTER_1_ARBITER_STATE_BMSK 0x3c000000
+#define HWIO_IPA_STATE_COAL_MASTER_1_ARBITER_STATE_SHFT 0x1a
+#define HWIO_IPA_STATE_COAL_MASTER_1_CHECK_FIT_FSM_STATE_BMSK 0x3c00000
+#define HWIO_IPA_STATE_COAL_MASTER_1_CHECK_FIT_FSM_STATE_SHFT 0x16
+#define HWIO_IPA_STATE_COAL_MASTER_1_CHECK_FIT_RD_CTX_LINE_BMSK 0x3f0000
+#define HWIO_IPA_STATE_COAL_MASTER_1_CHECK_FIT_RD_CTX_LINE_SHFT 0x10
+#define HWIO_IPA_STATE_COAL_MASTER_1_INIT_VP_FSM_STATE_BMSK 0xf000
+#define HWIO_IPA_STATE_COAL_MASTER_1_INIT_VP_FSM_STATE_SHFT 0xc
+#define HWIO_IPA_STATE_COAL_MASTER_1_INIT_VP_RD_PKT_LINE_BMSK 0xfc0
+#define HWIO_IPA_STATE_COAL_MASTER_1_INIT_VP_RD_PKT_LINE_SHFT 0x6
+#define HWIO_IPA_STATE_COAL_MASTER_1_INIT_VP_WR_CTX_LINE_BMSK 0x3f
+#define HWIO_IPA_STATE_COAL_MASTER_1_INIT_VP_WR_CTX_LINE_SHFT 0x0
+#define HWIO_IPA_YELLOW_MARKER_BELOW_ADDR (IPA_CFG_REG_BASE + 0x00000110)
+#define HWIO_IPA_YELLOW_MARKER_BELOW_PHYS (IPA_CFG_REG_BASE_PHYS + \
+					   0x00000110)
+#define HWIO_IPA_YELLOW_MARKER_BELOW_OFFS (IPA_CFG_REG_BASE_OFFS + \
+					   0x00000110)
+#define HWIO_IPA_YELLOW_MARKER_BELOW_EN_ADDR (IPA_CFG_REG_BASE + \
+					      0x00000114)
+#define HWIO_IPA_YELLOW_MARKER_BELOW_EN_PHYS (IPA_CFG_REG_BASE_PHYS + \
+					      0x00000114)
+#define HWIO_IPA_YELLOW_MARKER_BELOW_EN_OFFS (IPA_CFG_REG_BASE_OFFS + \
+					      0x00000114)
+#define HWIO_IPA_YELLOW_MARKER_BELOW_CLR_ADDR (IPA_CFG_REG_BASE + \
+					       0x00000118)
+#define HWIO_IPA_YELLOW_MARKER_BELOW_CLR_PHYS (IPA_CFG_REG_BASE_PHYS + \
+					       0x00000118)
+#define HWIO_IPA_YELLOW_MARKER_BELOW_CLR_OFFS (IPA_CFG_REG_BASE_OFFS + \
+					       0x00000118)
+#define HWIO_IPA_RED_MARKER_BELOW_ADDR (IPA_CFG_REG_BASE + 0x0000011c)
+#define HWIO_IPA_RED_MARKER_BELOW_PHYS (IPA_CFG_REG_BASE_PHYS + 0x0000011c)
+#define HWIO_IPA_RED_MARKER_BELOW_OFFS (IPA_CFG_REG_BASE_OFFS + 0x0000011c)
+#define HWIO_IPA_RED_MARKER_BELOW_EN_ADDR (IPA_CFG_REG_BASE + 0x00000120)
+#define HWIO_IPA_RED_MARKER_BELOW_EN_PHYS (IPA_CFG_REG_BASE_PHYS + \
+					   0x00000120)
+#define HWIO_IPA_RED_MARKER_BELOW_EN_OFFS (IPA_CFG_REG_BASE_OFFS + \
+					   0x00000120)
+#define HWIO_IPA_RED_MARKER_BELOW_CLR_ADDR (IPA_CFG_REG_BASE + 0x00000124)
+#define HWIO_IPA_RED_MARKER_BELOW_CLR_PHYS (IPA_CFG_REG_BASE_PHYS + \
+					    0x00000124)
+#define HWIO_IPA_RED_MARKER_BELOW_CLR_OFFS (IPA_CFG_REG_BASE_OFFS + \
+					    0x00000124)
+#define HWIO_IPA_YELLOW_MARKER_SHADOW_ADDR (IPA_CFG_REG_BASE + 0x00000128)
+#define HWIO_IPA_YELLOW_MARKER_SHADOW_PHYS (IPA_CFG_REG_BASE_PHYS + \
+					    0x00000128)
+#define HWIO_IPA_YELLOW_MARKER_SHADOW_OFFS (IPA_CFG_REG_BASE_OFFS + \
+					    0x00000128)
+#define HWIO_IPA_RED_MARKER_SHADOW_ADDR (IPA_CFG_REG_BASE + 0x0000012c)
+#define HWIO_IPA_RED_MARKER_SHADOW_PHYS (IPA_CFG_REG_BASE_PHYS + \
+					 0x0000012c)
+#define HWIO_IPA_RED_MARKER_SHADOW_OFFS (IPA_CFG_REG_BASE_OFFS + \
+					 0x0000012c)
+#define HWIO_IPA_YELLOW_MARKER_ABOVE_ADDR (IPA_CFG_REG_BASE + 0x00000130)
+#define HWIO_IPA_YELLOW_MARKER_ABOVE_PHYS (IPA_CFG_REG_BASE_PHYS + \
+					   0x00000130)
+#define HWIO_IPA_YELLOW_MARKER_ABOVE_OFFS (IPA_CFG_REG_BASE_OFFS + \
+					   0x00000130)
+#define HWIO_IPA_YELLOW_MARKER_ABOVE_EN_ADDR (IPA_CFG_REG_BASE + \
+					      0x00000134)
+#define HWIO_IPA_YELLOW_MARKER_ABOVE_EN_PHYS (IPA_CFG_REG_BASE_PHYS + \
+					      0x00000134)
+#define HWIO_IPA_YELLOW_MARKER_ABOVE_EN_OFFS (IPA_CFG_REG_BASE_OFFS + \
+					      0x00000134)
+#define HWIO_IPA_YELLOW_MARKER_ABOVE_CLR_ADDR (IPA_CFG_REG_BASE + \
+					       0x00000138)
+#define HWIO_IPA_YELLOW_MARKER_ABOVE_CLR_PHYS (IPA_CFG_REG_BASE_PHYS + \
+					       0x00000138)
+#define HWIO_IPA_YELLOW_MARKER_ABOVE_CLR_OFFS (IPA_CFG_REG_BASE_OFFS + \
+					       0x00000138)
+#define HWIO_IPA_RED_MARKER_ABOVE_ADDR (IPA_CFG_REG_BASE + 0x0000013c)
+#define HWIO_IPA_RED_MARKER_ABOVE_PHYS (IPA_CFG_REG_BASE_PHYS + 0x0000013c)
+#define HWIO_IPA_RED_MARKER_ABOVE_OFFS (IPA_CFG_REG_BASE_OFFS + 0x0000013c)
+#define HWIO_IPA_RED_MARKER_ABOVE_EN_ADDR (IPA_CFG_REG_BASE + 0x00000140)
+#define HWIO_IPA_RED_MARKER_ABOVE_EN_PHYS (IPA_CFG_REG_BASE_PHYS + \
+					   0x00000140)
+#define HWIO_IPA_RED_MARKER_ABOVE_EN_OFFS (IPA_CFG_REG_BASE_OFFS + \
+					   0x00000140)
+#define HWIO_IPA_RED_MARKER_ABOVE_CLR_ADDR (IPA_CFG_REG_BASE + 0x00000144)
+#define HWIO_IPA_RED_MARKER_ABOVE_CLR_PHYS (IPA_CFG_REG_BASE_PHYS + \
+					    0x00000144)
+#define HWIO_IPA_RED_MARKER_ABOVE_CLR_OFFS (IPA_CFG_REG_BASE_OFFS + \
+					    0x00000144)
+#define HWIO_IPA_FILT_ROUT_HASH_EN_ADDR (IPA_CFG_REG_BASE + 0x00000148)
+#define HWIO_IPA_FILT_ROUT_HASH_EN_PHYS (IPA_CFG_REG_BASE_PHYS + \
+					 0x00000148)
+#define HWIO_IPA_FILT_ROUT_HASH_EN_OFFS (IPA_CFG_REG_BASE_OFFS + \
+					 0x00000148)
+#define HWIO_IPA_FILT_ROUT_HASH_EN_RMSK 0x1111
+#define HWIO_IPA_FILT_ROUT_HASH_EN_ATTR 0x3
+#define HWIO_IPA_FILT_ROUT_HASH_EN_IN in_dword_masked( \
+		HWIO_IPA_FILT_ROUT_HASH_EN_ADDR, \
+		HWIO_IPA_FILT_ROUT_HASH_EN_RMSK)
+#define HWIO_IPA_FILT_ROUT_HASH_EN_INM(m) in_dword_masked( \
+		HWIO_IPA_FILT_ROUT_HASH_EN_ADDR, \
+		m)
+#define HWIO_IPA_FILT_ROUT_HASH_EN_OUT(v) out_dword( \
+		HWIO_IPA_FILT_ROUT_HASH_EN_ADDR, \
+		v)
+#define HWIO_IPA_FILT_ROUT_HASH_EN_OUTM(m, v) out_dword_masked_ns( \
+		HWIO_IPA_FILT_ROUT_HASH_EN_ADDR, \
+		m, \
+		v, \
+		HWIO_IPA_FILT_ROUT_HASH_EN_IN)
+#define HWIO_IPA_FILT_ROUT_HASH_EN_IPV4_FILTER_HASH_EN_BMSK 0x1000
+#define HWIO_IPA_FILT_ROUT_HASH_EN_IPV4_FILTER_HASH_EN_SHFT 0xc
+#define HWIO_IPA_FILT_ROUT_HASH_EN_IPV4_ROUTER_HASH_EN_BMSK 0x100
+#define HWIO_IPA_FILT_ROUT_HASH_EN_IPV4_ROUTER_HASH_EN_SHFT 0x8
+#define HWIO_IPA_FILT_ROUT_HASH_EN_IPV6_FILTER_HASH_EN_BMSK 0x10
+#define HWIO_IPA_FILT_ROUT_HASH_EN_IPV6_FILTER_HASH_EN_SHFT 0x4
+#define HWIO_IPA_FILT_ROUT_HASH_EN_IPV6_ROUTER_HASH_EN_BMSK 0x1
+#define HWIO_IPA_FILT_ROUT_HASH_EN_IPV6_ROUTER_HASH_EN_SHFT 0x0
+#define HWIO_IPA_FILT_ROUT_HASH_FLUSH_ADDR (IPA_CFG_REG_BASE + 0x0000014c)
+#define HWIO_IPA_FILT_ROUT_HASH_FLUSH_PHYS (IPA_CFG_REG_BASE_PHYS + \
+					    0x0000014c)
+#define HWIO_IPA_FILT_ROUT_HASH_FLUSH_OFFS (IPA_CFG_REG_BASE_OFFS + \
+					    0x0000014c)
+#define HWIO_IPA_FILT_ROUT_HASH_FLUSH_RMSK 0x1111
+#define HWIO_IPA_FILT_ROUT_HASH_FLUSH_ATTR 0x2
+#define HWIO_IPA_FILT_ROUT_HASH_FLUSH_OUT(v) out_dword(	\
+		HWIO_IPA_FILT_ROUT_HASH_FLUSH_ADDR, \
+		v)
+#define HWIO_IPA_FILT_ROUT_HASH_FLUSH_IPV4_FILTER_HASH_FLUSH_BMSK 0x1000
+#define HWIO_IPA_FILT_ROUT_HASH_FLUSH_IPV4_FILTER_HASH_FLUSH_SHFT 0xc
+#define HWIO_IPA_FILT_ROUT_HASH_FLUSH_IPV4_ROUTER_HASH_FLUSH_BMSK 0x100
+#define HWIO_IPA_FILT_ROUT_HASH_FLUSH_IPV4_ROUTER_HASH_FLUSH_SHFT 0x8
+#define HWIO_IPA_FILT_ROUT_HASH_FLUSH_IPV6_FILTER_HASH_FLUSH_BMSK 0x10
+#define HWIO_IPA_FILT_ROUT_HASH_FLUSH_IPV6_FILTER_HASH_FLUSH_SHFT 0x4
+#define HWIO_IPA_FILT_ROUT_HASH_FLUSH_IPV6_ROUTER_HASH_FLUSH_BMSK 0x1
+#define HWIO_IPA_FILT_ROUT_HASH_FLUSH_IPV6_ROUTER_HASH_FLUSH_SHFT 0x0
+#define HWIO_IPA_FILT_ROUT_CFG_ADDR (IPA_CFG_REG_BASE + 0x00000150)
+#define HWIO_IPA_FILT_ROUT_CFG_PHYS (IPA_CFG_REG_BASE_PHYS + 0x00000150)
+#define HWIO_IPA_FILT_ROUT_CFG_OFFS (IPA_CFG_REG_BASE_OFFS + 0x00000150)
+#define HWIO_IPA_IPV4_FILTER_INIT_VALUES_ADDR (IPA_CFG_REG_BASE + \
+					       0x00000160)
+#define HWIO_IPA_IPV4_FILTER_INIT_VALUES_PHYS (IPA_CFG_REG_BASE_PHYS + \
+					       0x00000160)
+#define HWIO_IPA_IPV4_FILTER_INIT_VALUES_OFFS (IPA_CFG_REG_BASE_OFFS + \
+					       0x00000160)
+#define HWIO_IPA_IPV4_FILTER_INIT_VALUES_RMSK 0xffffffff
+#define HWIO_IPA_IPV4_FILTER_INIT_VALUES_ATTR 0x1
+#define HWIO_IPA_IPV4_FILTER_INIT_VALUES_IN in_dword_masked( \
+		HWIO_IPA_IPV4_FILTER_INIT_VALUES_ADDR, \
+		HWIO_IPA_IPV4_FILTER_INIT_VALUES_RMSK)
+#define HWIO_IPA_IPV4_FILTER_INIT_VALUES_INM(m) in_dword_masked( \
+		HWIO_IPA_IPV4_FILTER_INIT_VALUES_ADDR, \
+		m)
+#define	\
+	HWIO_IPA_IPV4_FILTER_INIT_VALUES_IP_V4_FILTER_INIT_HASHED_ADDR_BMSK \
+	0xffff
+#define	\
+	HWIO_IPA_IPV4_FILTER_INIT_VALUES_IP_V4_FILTER_INIT_HASHED_ADDR_SHFT \
+	0x0
+#define HWIO_IPA_IPV6_FILTER_INIT_VALUES_ADDR (IPA_CFG_REG_BASE + \
+					       0x00000164)
+#define HWIO_IPA_IPV6_FILTER_INIT_VALUES_PHYS (IPA_CFG_REG_BASE_PHYS + \
+					       0x00000164)
+#define HWIO_IPA_IPV6_FILTER_INIT_VALUES_OFFS (IPA_CFG_REG_BASE_OFFS + \
+					       0x00000164)
+#define HWIO_IPA_IPV6_FILTER_INIT_VALUES_RMSK 0xffffffff
+#define HWIO_IPA_IPV6_FILTER_INIT_VALUES_ATTR 0x1
+#define HWIO_IPA_IPV6_FILTER_INIT_VALUES_IN in_dword_masked( \
+		HWIO_IPA_IPV6_FILTER_INIT_VALUES_ADDR, \
+		HWIO_IPA_IPV6_FILTER_INIT_VALUES_RMSK)
+#define HWIO_IPA_IPV6_FILTER_INIT_VALUES_INM(m) in_dword_masked( \
+		HWIO_IPA_IPV6_FILTER_INIT_VALUES_ADDR, \
+		m)
+#define	\
+	HWIO_IPA_IPV6_FILTER_INIT_VALUES_IP_V6_FILTER_INIT_HASHED_ADDR_BMSK \
+	0xffff
+#define	\
+	HWIO_IPA_IPV6_FILTER_INIT_VALUES_IP_V6_FILTER_INIT_HASHED_ADDR_SHFT \
+	0x0
+#define HWIO_IPA_IPV4_NAT_INIT_VALUES_0_ADDR (IPA_CFG_REG_BASE + \
+					      0x00000178)
+#define HWIO_IPA_IPV4_NAT_INIT_VALUES_0_PHYS (IPA_CFG_REG_BASE_PHYS + \
+					      0x00000178)
+#define HWIO_IPA_IPV4_NAT_INIT_VALUES_0_OFFS (IPA_CFG_REG_BASE_OFFS + \
+					      0x00000178)
+#define HWIO_IPA_IPV4_NAT_INIT_VALUES_0_MSB_ADDR (IPA_CFG_REG_BASE + \
+						  0x0000017c)
+#define HWIO_IPA_IPV4_NAT_INIT_VALUES_0_MSB_PHYS (IPA_CFG_REG_BASE_PHYS + \
+						  0x0000017c)
+#define HWIO_IPA_IPV4_NAT_INIT_VALUES_0_MSB_OFFS (IPA_CFG_REG_BASE_OFFS + \
+						  0x0000017c)
+#define HWIO_IPA_IPV4_NAT_INIT_VALUES_1_ADDR (IPA_CFG_REG_BASE + \
+					      0x00000180)
+#define HWIO_IPA_IPV4_NAT_INIT_VALUES_1_PHYS (IPA_CFG_REG_BASE_PHYS + \
+					      0x00000180)
+#define HWIO_IPA_IPV4_NAT_INIT_VALUES_1_OFFS (IPA_CFG_REG_BASE_OFFS + \
+					      0x00000180)
+#define HWIO_IPA_IPV4_NAT_INIT_VALUES_1_MSB_ADDR (IPA_CFG_REG_BASE + \
+						  0x00000184)
+#define HWIO_IPA_IPV4_NAT_INIT_VALUES_1_MSB_PHYS (IPA_CFG_REG_BASE_PHYS + \
+						  0x00000184)
+#define HWIO_IPA_IPV4_NAT_INIT_VALUES_1_MSB_OFFS (IPA_CFG_REG_BASE_OFFS + \
+						  0x00000184)
+#define HWIO_IPA_IPV4_NAT_INIT_VALUES_2_ADDR (IPA_CFG_REG_BASE + \
+					      0x00000188)
+#define HWIO_IPA_IPV4_NAT_INIT_VALUES_2_PHYS (IPA_CFG_REG_BASE_PHYS + \
+					      0x00000188)
+#define HWIO_IPA_IPV4_NAT_INIT_VALUES_2_OFFS (IPA_CFG_REG_BASE_OFFS + \
+					      0x00000188)
+#define HWIO_IPA_IPV4_NAT_INIT_VALUES_2_MSB_ADDR (IPA_CFG_REG_BASE + \
+						  0x0000018c)
+#define HWIO_IPA_IPV4_NAT_INIT_VALUES_2_MSB_PHYS (IPA_CFG_REG_BASE_PHYS + \
+						  0x0000018c)
+#define HWIO_IPA_IPV4_NAT_INIT_VALUES_2_MSB_OFFS (IPA_CFG_REG_BASE_OFFS + \
+						  0x0000018c)
+#define HWIO_IPA_IPV4_NAT_INIT_VALUES_3_ADDR (IPA_CFG_REG_BASE + \
+					      0x00000190)
+#define HWIO_IPA_IPV4_NAT_INIT_VALUES_3_PHYS (IPA_CFG_REG_BASE_PHYS + \
+					      0x00000190)
+#define HWIO_IPA_IPV4_NAT_INIT_VALUES_3_OFFS (IPA_CFG_REG_BASE_OFFS + \
+					      0x00000190)
+#define HWIO_IPA_IPV4_NAT_INIT_VALUES_3_MSB_ADDR (IPA_CFG_REG_BASE + \
+						  0x00000194)
+#define HWIO_IPA_IPV4_NAT_INIT_VALUES_3_MSB_PHYS (IPA_CFG_REG_BASE_PHYS + \
+						  0x00000194)
+#define HWIO_IPA_IPV4_NAT_INIT_VALUES_3_MSB_OFFS (IPA_CFG_REG_BASE_OFFS + \
+						  0x00000194)
+#define HWIO_IPA_IPV4_NAT_INIT_VALUES_4_ADDR (IPA_CFG_REG_BASE + \
+					      0x00000198)
+#define HWIO_IPA_IPV4_NAT_INIT_VALUES_4_PHYS (IPA_CFG_REG_BASE_PHYS + \
+					      0x00000198)
+#define HWIO_IPA_IPV4_NAT_INIT_VALUES_4_OFFS (IPA_CFG_REG_BASE_OFFS + \
+					      0x00000198)
+#define HWIO_IPA_IPV4_NAT_INIT_VALUES_5_ADDR (IPA_CFG_REG_BASE + \
+					      0x0000019c)
+#define HWIO_IPA_IPV4_NAT_INIT_VALUES_5_PHYS (IPA_CFG_REG_BASE_PHYS + \
+					      0x0000019c)
+#define HWIO_IPA_IPV4_NAT_INIT_VALUES_5_OFFS (IPA_CFG_REG_BASE_OFFS + \
+					      0x0000019c)
+#define HWIO_IPA_IPV4_ROUTE_INIT_VALUES_ADDR (IPA_CFG_REG_BASE + \
+					      0x000001a0)
+#define HWIO_IPA_IPV4_ROUTE_INIT_VALUES_PHYS (IPA_CFG_REG_BASE_PHYS + \
+					      0x000001a0)
+#define HWIO_IPA_IPV4_ROUTE_INIT_VALUES_OFFS (IPA_CFG_REG_BASE_OFFS + \
+					      0x000001a0)
+#define HWIO_IPA_IPV4_ROUTE_INIT_VALUES_RMSK 0xffffffff
+#define HWIO_IPA_IPV4_ROUTE_INIT_VALUES_ATTR 0x1
+#define HWIO_IPA_IPV4_ROUTE_INIT_VALUES_IN in_dword_masked( \
+		HWIO_IPA_IPV4_ROUTE_INIT_VALUES_ADDR, \
+		HWIO_IPA_IPV4_ROUTE_INIT_VALUES_RMSK)
+#define HWIO_IPA_IPV4_ROUTE_INIT_VALUES_INM(m) in_dword_masked(	\
+		HWIO_IPA_IPV4_ROUTE_INIT_VALUES_ADDR, \
+		m)
+#define	\
+	HWIO_IPA_IPV4_ROUTE_INIT_VALUES_IP_V4_ROUTE_INIT_NON_HASHED_ADDR_BMSK \
+	0xffff0000
+#define	\
+	HWIO_IPA_IPV4_ROUTE_INIT_VALUES_IP_V4_ROUTE_INIT_NON_HASHED_ADDR_SHFT \
+	0x10
+#define HWIO_IPA_IPV4_ROUTE_INIT_VALUES_IP_V4_ROUTE_INIT_HASHED_ADDR_BMSK \
+	0xffff
+#define HWIO_IPA_IPV4_ROUTE_INIT_VALUES_IP_V4_ROUTE_INIT_HASHED_ADDR_SHFT \
+	0x0
+#define HWIO_IPA_IPV6_ROUTE_INIT_VALUES_ADDR (IPA_CFG_REG_BASE + \
+					      0x000001a4)
+#define HWIO_IPA_IPV6_ROUTE_INIT_VALUES_PHYS (IPA_CFG_REG_BASE_PHYS + \
+					      0x000001a4)
+#define HWIO_IPA_IPV6_ROUTE_INIT_VALUES_OFFS (IPA_CFG_REG_BASE_OFFS + \
+					      0x000001a4)
+#define HWIO_IPA_IPV6_ROUTE_INIT_VALUES_RMSK 0xffffffff
+#define HWIO_IPA_IPV6_ROUTE_INIT_VALUES_ATTR 0x1
+#define HWIO_IPA_IPV6_ROUTE_INIT_VALUES_IN in_dword_masked( \
+		HWIO_IPA_IPV6_ROUTE_INIT_VALUES_ADDR, \
+		HWIO_IPA_IPV6_ROUTE_INIT_VALUES_RMSK)
+#define HWIO_IPA_IPV6_ROUTE_INIT_VALUES_INM(m) in_dword_masked(	\
+		HWIO_IPA_IPV6_ROUTE_INIT_VALUES_ADDR, \
+		m)
+#define	\
+	HWIO_IPA_IPV6_ROUTE_INIT_VALUES_IP_V6_ROUTE_INIT_NON_HASHED_ADDR_BMSK \
+	0xffff0000
+#define	\
+	HWIO_IPA_IPV6_ROUTE_INIT_VALUES_IP_V6_ROUTE_INIT_NON_HASHED_ADDR_SHFT \
+	0x10
+#define HWIO_IPA_IPV6_ROUTE_INIT_VALUES_IP_V6_ROUTE_INIT_HASHED_ADDR_BMSK \
+	0xffff
+#define HWIO_IPA_IPV6_ROUTE_INIT_VALUES_IP_V6_ROUTE_INIT_HASHED_ADDR_SHFT \
+	0x0
+#define HWIO_IPA_IPV6_CONN_TRACK_INIT_VALUES_0_ADDR (IPA_CFG_REG_BASE +	\
+						     0x000001a8)
+#define HWIO_IPA_IPV6_CONN_TRACK_INIT_VALUES_0_PHYS (IPA_CFG_REG_BASE_PHYS \
+						     + 0x000001a8)
+#define HWIO_IPA_IPV6_CONN_TRACK_INIT_VALUES_0_OFFS (IPA_CFG_REG_BASE_OFFS \
+						     + 0x000001a8)
+#define HWIO_IPA_IPV6_CONN_TRACK_INIT_VALUES_0_MSB_ADDR (IPA_CFG_REG_BASE \
+							 + 0x000001ac)
+#define HWIO_IPA_IPV6_CONN_TRACK_INIT_VALUES_0_MSB_PHYS ( \
+		IPA_CFG_REG_BASE_PHYS + 0x000001ac)
+#define HWIO_IPA_IPV6_CONN_TRACK_INIT_VALUES_0_MSB_OFFS ( \
+		IPA_CFG_REG_BASE_OFFS + 0x000001ac)
+#define HWIO_IPA_IPV6_CONN_TRACK_INIT_VALUES_1_ADDR (IPA_CFG_REG_BASE +	\
+						     0x000001b0)
+#define HWIO_IPA_IPV6_CONN_TRACK_INIT_VALUES_1_PHYS (IPA_CFG_REG_BASE_PHYS \
+						     + 0x000001b0)
+#define HWIO_IPA_IPV6_CONN_TRACK_INIT_VALUES_1_OFFS (IPA_CFG_REG_BASE_OFFS \
+						     + 0x000001b0)
+#define HWIO_IPA_IPV6_CONN_TRACK_INIT_VALUES_1_MSB_ADDR (IPA_CFG_REG_BASE \
+							 + 0x000001b4)
+#define HWIO_IPA_IPV6_CONN_TRACK_INIT_VALUES_1_MSB_PHYS ( \
+		IPA_CFG_REG_BASE_PHYS + 0x000001b4)
+#define HWIO_IPA_IPV6_CONN_TRACK_INIT_VALUES_1_MSB_OFFS ( \
+		IPA_CFG_REG_BASE_OFFS + 0x000001b4)
+#define HWIO_IPA_IPV6_CONN_TRACK_INIT_VALUES_2_ADDR (IPA_CFG_REG_BASE +	\
+						     0x000001b8)
+#define HWIO_IPA_IPV6_CONN_TRACK_INIT_VALUES_2_PHYS (IPA_CFG_REG_BASE_PHYS \
+						     + 0x000001b8)
+#define HWIO_IPA_IPV6_CONN_TRACK_INIT_VALUES_2_OFFS (IPA_CFG_REG_BASE_OFFS \
+						     + 0x000001b8)
+#define HWIO_IPA_HDR_INIT_LOCAL_VALUES_ADDR (IPA_CFG_REG_BASE + 0x000001c0)
+#define HWIO_IPA_HDR_INIT_LOCAL_VALUES_PHYS (IPA_CFG_REG_BASE_PHYS + \
+					     0x000001c0)
+#define HWIO_IPA_HDR_INIT_LOCAL_VALUES_OFFS (IPA_CFG_REG_BASE_OFFS + \
+					     0x000001c0)
+#define HWIO_IPA_HDR_INIT_SYSTEM_VALUES_ADDR (IPA_CFG_REG_BASE + \
+					      0x000001c4)
+#define HWIO_IPA_HDR_INIT_SYSTEM_VALUES_PHYS (IPA_CFG_REG_BASE_PHYS + \
+					      0x000001c4)
+#define HWIO_IPA_HDR_INIT_SYSTEM_VALUES_OFFS (IPA_CFG_REG_BASE_OFFS + \
+					      0x000001c4)
+#define HWIO_IPA_HDR_INIT_SYSTEM_VALUES_MSB_ADDR (IPA_CFG_REG_BASE + \
+						  0x000001c8)
+#define HWIO_IPA_HDR_INIT_SYSTEM_VALUES_MSB_PHYS (IPA_CFG_REG_BASE_PHYS + \
+						  0x000001c8)
+#define HWIO_IPA_HDR_INIT_SYSTEM_VALUES_MSB_OFFS (IPA_CFG_REG_BASE_OFFS + \
+						  0x000001c8)
+#define HWIO_IPA_IMM_CMD_ACCESS_PIPE_VALUES_ADDR (IPA_CFG_REG_BASE + \
+						  0x000001cc)
+#define HWIO_IPA_IMM_CMD_ACCESS_PIPE_VALUES_PHYS (IPA_CFG_REG_BASE_PHYS + \
+						  0x000001cc)
+#define HWIO_IPA_IMM_CMD_ACCESS_PIPE_VALUES_OFFS (IPA_CFG_REG_BASE_OFFS + \
+						  0x000001cc)
+#define HWIO_IPA_FRAG_VALUES_ADDR (IPA_CFG_REG_BASE + 0x000001d8)
+#define HWIO_IPA_FRAG_VALUES_PHYS (IPA_CFG_REG_BASE_PHYS + 0x000001d8)
+#define HWIO_IPA_FRAG_VALUES_OFFS (IPA_CFG_REG_BASE_OFFS + 0x000001d8)
+#define HWIO_IPA_BAM_ACTIVATED_PORTS_ADDR (IPA_CFG_REG_BASE + 0x000001dc)
+#define HWIO_IPA_BAM_ACTIVATED_PORTS_PHYS (IPA_CFG_REG_BASE_PHYS + \
+					   0x000001dc)
+#define HWIO_IPA_BAM_ACTIVATED_PORTS_OFFS (IPA_CFG_REG_BASE_OFFS + \
+					   0x000001dc)
+#define HWIO_IPA_BAM_ACTIVATED_PORTS_RMSK 0x7fffffff
+#define HWIO_IPA_BAM_ACTIVATED_PORTS_ATTR 0x1
+#define HWIO_IPA_BAM_ACTIVATED_PORTS_IN in_dword_masked( \
+		HWIO_IPA_BAM_ACTIVATED_PORTS_ADDR, \
+		HWIO_IPA_BAM_ACTIVATED_PORTS_RMSK)
+#define HWIO_IPA_BAM_ACTIVATED_PORTS_INM(m) in_dword_masked( \
+		HWIO_IPA_BAM_ACTIVATED_PORTS_ADDR, \
+		m)
+#define HWIO_IPA_BAM_ACTIVATED_PORTS_ENDPOINTS_BMSK 0x7fffffff
+#define HWIO_IPA_BAM_ACTIVATED_PORTS_ENDPOINTS_SHFT 0x0
+#define HWIO_IPA_SYS_PKT_PROC_CNTXT_BASE_ADDR (IPA_CFG_REG_BASE + \
+					       0x000001e0)
+#define HWIO_IPA_SYS_PKT_PROC_CNTXT_BASE_PHYS (IPA_CFG_REG_BASE_PHYS + \
+					       0x000001e0)
+#define HWIO_IPA_SYS_PKT_PROC_CNTXT_BASE_OFFS (IPA_CFG_REG_BASE_OFFS + \
+					       0x000001e0)
+#define HWIO_IPA_SYS_PKT_PROC_CNTXT_BASE_RMSK 0xffffffff
+#define HWIO_IPA_SYS_PKT_PROC_CNTXT_BASE_ATTR 0x3
+#define HWIO_IPA_SYS_PKT_PROC_CNTXT_BASE_IN in_dword_masked( \
+		HWIO_IPA_SYS_PKT_PROC_CNTXT_BASE_ADDR, \
+		HWIO_IPA_SYS_PKT_PROC_CNTXT_BASE_RMSK)
+#define HWIO_IPA_SYS_PKT_PROC_CNTXT_BASE_INM(m) in_dword_masked( \
+		HWIO_IPA_SYS_PKT_PROC_CNTXT_BASE_ADDR, \
+		m)
+#define HWIO_IPA_SYS_PKT_PROC_CNTXT_BASE_OUT(v) out_dword( \
+		HWIO_IPA_SYS_PKT_PROC_CNTXT_BASE_ADDR, \
+		v)
+#define HWIO_IPA_SYS_PKT_PROC_CNTXT_BASE_OUTM(m, v) out_dword_masked_ns( \
+		HWIO_IPA_SYS_PKT_PROC_CNTXT_BASE_ADDR, \
+		m, \
+		v, \
+		HWIO_IPA_SYS_PKT_PROC_CNTXT_BASE_IN)
+#define HWIO_IPA_SYS_PKT_PROC_CNTXT_BASE_ADDR_BMSK 0xfffffff8
+#define HWIO_IPA_SYS_PKT_PROC_CNTXT_BASE_ADDR_SHFT 0x3
+#define HWIO_IPA_SYS_PKT_PROC_CNTXT_BASE_ZERO_BMSK 0x7
+#define HWIO_IPA_SYS_PKT_PROC_CNTXT_BASE_ZERO_SHFT 0x0
+#define HWIO_IPA_SYS_PKT_PROC_CNTXT_BASE_MSB_ADDR (IPA_CFG_REG_BASE + \
+						   0x000001e4)
+#define HWIO_IPA_SYS_PKT_PROC_CNTXT_BASE_MSB_PHYS (IPA_CFG_REG_BASE_PHYS + \
+						   0x000001e4)
+#define HWIO_IPA_SYS_PKT_PROC_CNTXT_BASE_MSB_OFFS (IPA_CFG_REG_BASE_OFFS + \
+						   0x000001e4)
+#define HWIO_IPA_SYS_PKT_PROC_CNTXT_BASE_MSB_RMSK 0xffffffff
+#define HWIO_IPA_SYS_PKT_PROC_CNTXT_BASE_MSB_ATTR 0x3
+#define HWIO_IPA_SYS_PKT_PROC_CNTXT_BASE_MSB_IN in_dword_masked( \
+		HWIO_IPA_SYS_PKT_PROC_CNTXT_BASE_MSB_ADDR, \
+		HWIO_IPA_SYS_PKT_PROC_CNTXT_BASE_MSB_RMSK)
+#define HWIO_IPA_SYS_PKT_PROC_CNTXT_BASE_MSB_INM(m) in_dword_masked( \
+		HWIO_IPA_SYS_PKT_PROC_CNTXT_BASE_MSB_ADDR, \
+		m)
+#define HWIO_IPA_SYS_PKT_PROC_CNTXT_BASE_MSB_OUT(v) out_dword( \
+		HWIO_IPA_SYS_PKT_PROC_CNTXT_BASE_MSB_ADDR, \
+		v)
+#define HWIO_IPA_SYS_PKT_PROC_CNTXT_BASE_MSB_OUTM(m, \
+						  v) out_dword_masked_ns( \
+		HWIO_IPA_SYS_PKT_PROC_CNTXT_BASE_MSB_ADDR, \
+		m, \
+		v, \
+		HWIO_IPA_SYS_PKT_PROC_CNTXT_BASE_MSB_IN)
+#define HWIO_IPA_SYS_PKT_PROC_CNTXT_BASE_MSB_ADDR_BMSK 0xffffffff
+#define HWIO_IPA_SYS_PKT_PROC_CNTXT_BASE_MSB_ADDR_SHFT 0x0
+#define HWIO_IPA_LOCAL_PKT_PROC_CNTXT_BASE_ADDR (IPA_CFG_REG_BASE + \
+						 0x000001e8)
+#define HWIO_IPA_LOCAL_PKT_PROC_CNTXT_BASE_PHYS (IPA_CFG_REG_BASE_PHYS + \
+						 0x000001e8)
+#define HWIO_IPA_LOCAL_PKT_PROC_CNTXT_BASE_OFFS (IPA_CFG_REG_BASE_OFFS + \
+						 0x000001e8)
+#define HWIO_IPA_LOCAL_PKT_PROC_CNTXT_BASE_RMSK 0x3ffff
+#define HWIO_IPA_LOCAL_PKT_PROC_CNTXT_BASE_ATTR 0x3
+#define HWIO_IPA_LOCAL_PKT_PROC_CNTXT_BASE_IN in_dword_masked( \
+		HWIO_IPA_LOCAL_PKT_PROC_CNTXT_BASE_ADDR, \
+		HWIO_IPA_LOCAL_PKT_PROC_CNTXT_BASE_RMSK)
+#define HWIO_IPA_LOCAL_PKT_PROC_CNTXT_BASE_INM(m) in_dword_masked( \
+		HWIO_IPA_LOCAL_PKT_PROC_CNTXT_BASE_ADDR, \
+		m)
+#define HWIO_IPA_LOCAL_PKT_PROC_CNTXT_BASE_OUT(v) out_dword( \
+		HWIO_IPA_LOCAL_PKT_PROC_CNTXT_BASE_ADDR, \
+		v)
+#define HWIO_IPA_LOCAL_PKT_PROC_CNTXT_BASE_OUTM(m, v) out_dword_masked_ns( \
+		HWIO_IPA_LOCAL_PKT_PROC_CNTXT_BASE_ADDR, \
+		m, \
+		v, \
+		HWIO_IPA_LOCAL_PKT_PROC_CNTXT_BASE_IN)
+#define HWIO_IPA_LOCAL_PKT_PROC_CNTXT_BASE_ADDR_BMSK 0x3fff8
+#define HWIO_IPA_LOCAL_PKT_PROC_CNTXT_BASE_ADDR_SHFT 0x3
+#define HWIO_IPA_LOCAL_PKT_PROC_CNTXT_BASE_ZERO_BMSK 0x7
+#define HWIO_IPA_LOCAL_PKT_PROC_CNTXT_BASE_ZERO_SHFT 0x0
+#define HWIO_IPA_AGGR_FORCE_CLOSE_ADDR (IPA_CFG_REG_BASE + 0x000001ec)
+#define HWIO_IPA_AGGR_FORCE_CLOSE_PHYS (IPA_CFG_REG_BASE_PHYS + 0x000001ec)
+#define HWIO_IPA_AGGR_FORCE_CLOSE_OFFS (IPA_CFG_REG_BASE_OFFS + 0x000001ec)
+#define HWIO_IPA_SCND_FRAG_VALUES_ADDR (IPA_CFG_REG_BASE + 0x000001f4)
+#define HWIO_IPA_SCND_FRAG_VALUES_PHYS (IPA_CFG_REG_BASE_PHYS + 0x000001f4)
+#define HWIO_IPA_SCND_FRAG_VALUES_OFFS (IPA_CFG_REG_BASE_OFFS + 0x000001f4)
+#define HWIO_IPA_TX_CFG_ADDR (IPA_CFG_REG_BASE + 0x000001fc)
+#define HWIO_IPA_TX_CFG_PHYS (IPA_CFG_REG_BASE_PHYS + 0x000001fc)
+#define HWIO_IPA_TX_CFG_OFFS (IPA_CFG_REG_BASE_OFFS + 0x000001fc)
+#define HWIO_IPA_NAT_UC_EXTERNAL_CFG_ADDR (IPA_CFG_REG_BASE + 0x00000200)
+#define HWIO_IPA_NAT_UC_EXTERNAL_CFG_PHYS (IPA_CFG_REG_BASE_PHYS + \
+					   0x00000200)
+#define HWIO_IPA_NAT_UC_EXTERNAL_CFG_OFFS (IPA_CFG_REG_BASE_OFFS + \
+					   0x00000200)
+#define HWIO_IPA_NAT_UC_LOCAL_CFG_ADDR (IPA_CFG_REG_BASE + 0x00000204)
+#define HWIO_IPA_NAT_UC_LOCAL_CFG_PHYS (IPA_CFG_REG_BASE_PHYS + 0x00000204)
+#define HWIO_IPA_NAT_UC_LOCAL_CFG_OFFS (IPA_CFG_REG_BASE_OFFS + 0x00000204)
+#define HWIO_IPA_NAT_UC_SHARED_CFG_ADDR (IPA_CFG_REG_BASE + 0x00000208)
+#define HWIO_IPA_NAT_UC_SHARED_CFG_PHYS (IPA_CFG_REG_BASE_PHYS + \
+					 0x00000208)
+#define HWIO_IPA_NAT_UC_SHARED_CFG_OFFS (IPA_CFG_REG_BASE_OFFS + \
+					 0x00000208)
+#define HWIO_IPA_RAM_INTLV_CFG_ADDR (IPA_CFG_REG_BASE + 0x0000020c)
+#define HWIO_IPA_RAM_INTLV_CFG_PHYS (IPA_CFG_REG_BASE_PHYS + 0x0000020c)
+#define HWIO_IPA_RAM_INTLV_CFG_OFFS (IPA_CFG_REG_BASE_OFFS + 0x0000020c)
+#define HWIO_IPA_FLAVOR_0_ADDR (IPA_CFG_REG_BASE + 0x00000210)
+#define HWIO_IPA_FLAVOR_0_PHYS (IPA_CFG_REG_BASE_PHYS + 0x00000210)
+#define HWIO_IPA_FLAVOR_0_OFFS (IPA_CFG_REG_BASE_OFFS + 0x00000210)
+#define HWIO_IPA_FLAVOR_1_ADDR (IPA_CFG_REG_BASE + 0x00000214)
+#define HWIO_IPA_FLAVOR_1_PHYS (IPA_CFG_REG_BASE_PHYS + 0x00000214)
+#define HWIO_IPA_FLAVOR_1_OFFS (IPA_CFG_REG_BASE_OFFS + 0x00000214)
+#define HWIO_IPA_FLAVOR_2_ADDR (IPA_CFG_REG_BASE + 0x00000218)
+#define HWIO_IPA_FLAVOR_2_PHYS (IPA_CFG_REG_BASE_PHYS + 0x00000218)
+#define HWIO_IPA_FLAVOR_2_OFFS (IPA_CFG_REG_BASE_OFFS + 0x00000218)
+#define HWIO_IPA_FLAVOR_3_ADDR (IPA_CFG_REG_BASE + 0x0000021c)
+#define HWIO_IPA_FLAVOR_3_PHYS (IPA_CFG_REG_BASE_PHYS + 0x0000021c)
+#define HWIO_IPA_FLAVOR_3_OFFS (IPA_CFG_REG_BASE_OFFS + 0x0000021c)
+#define HWIO_IPA_FLAVOR_4_ADDR (IPA_CFG_REG_BASE + 0x00000220)
+#define HWIO_IPA_FLAVOR_4_PHYS (IPA_CFG_REG_BASE_PHYS + 0x00000220)
+#define HWIO_IPA_FLAVOR_4_OFFS (IPA_CFG_REG_BASE_OFFS + 0x00000220)
+#define HWIO_IPA_FLAVOR_5_ADDR (IPA_CFG_REG_BASE + 0x00000224)
+#define HWIO_IPA_FLAVOR_5_PHYS (IPA_CFG_REG_BASE_PHYS + 0x00000224)
+#define HWIO_IPA_FLAVOR_5_OFFS (IPA_CFG_REG_BASE_OFFS + 0x00000224)
+#define HWIO_IPA_FLAVOR_6_ADDR (IPA_CFG_REG_BASE + 0x00000228)
+#define HWIO_IPA_FLAVOR_6_PHYS (IPA_CFG_REG_BASE_PHYS + 0x00000228)
+#define HWIO_IPA_FLAVOR_6_OFFS (IPA_CFG_REG_BASE_OFFS + 0x00000228)
+#define HWIO_IPA_FLAVOR_7_ADDR (IPA_CFG_REG_BASE + 0x0000022c)
+#define HWIO_IPA_FLAVOR_7_PHYS (IPA_CFG_REG_BASE_PHYS + 0x0000022c)
+#define HWIO_IPA_FLAVOR_7_OFFS (IPA_CFG_REG_BASE_OFFS + 0x0000022c)
+#define HWIO_IPA_CONN_TRACK_UC_EXTERNAL_CFG_ADDR (IPA_CFG_REG_BASE + \
+						  0x00000230)
+#define HWIO_IPA_CONN_TRACK_UC_EXTERNAL_CFG_PHYS (IPA_CFG_REG_BASE_PHYS + \
+						  0x00000230)
+#define HWIO_IPA_CONN_TRACK_UC_EXTERNAL_CFG_OFFS (IPA_CFG_REG_BASE_OFFS + \
+						  0x00000230)
+#define HWIO_IPA_CONN_TRACK_UC_LOCAL_CFG_ADDR (IPA_CFG_REG_BASE + \
+					       0x00000234)
+#define HWIO_IPA_CONN_TRACK_UC_LOCAL_CFG_PHYS (IPA_CFG_REG_BASE_PHYS + \
+					       0x00000234)
+#define HWIO_IPA_CONN_TRACK_UC_LOCAL_CFG_OFFS (IPA_CFG_REG_BASE_OFFS + \
+					       0x00000234)
+#define HWIO_IPA_CONN_TRACK_UC_SHARED_CFG_ADDR (IPA_CFG_REG_BASE + \
+						0x00000238)
+#define HWIO_IPA_CONN_TRACK_UC_SHARED_CFG_PHYS (IPA_CFG_REG_BASE_PHYS +	\
+						0x00000238)
+#define HWIO_IPA_CONN_TRACK_UC_SHARED_CFG_OFFS (IPA_CFG_REG_BASE_OFFS +	\
+						0x00000238)
+#define HWIO_IPA_IDLE_INDICATION_CFG_ADDR (IPA_CFG_REG_BASE + 0x00000240)
+#define HWIO_IPA_IDLE_INDICATION_CFG_PHYS (IPA_CFG_REG_BASE_PHYS + \
+					   0x00000240)
+#define HWIO_IPA_IDLE_INDICATION_CFG_OFFS (IPA_CFG_REG_BASE_OFFS + \
+					   0x00000240)
+#define HWIO_IPA_QTIME_TIMESTAMP_CFG_ADDR (IPA_CFG_REG_BASE + 0x0000024c)
+#define HWIO_IPA_QTIME_TIMESTAMP_CFG_PHYS (IPA_CFG_REG_BASE_PHYS + \
+					   0x0000024c)
+#define HWIO_IPA_QTIME_TIMESTAMP_CFG_OFFS (IPA_CFG_REG_BASE_OFFS + \
+					   0x0000024c)
+#define HWIO_IPA_TIMERS_XO_CLK_DIV_CFG_ADDR (IPA_CFG_REG_BASE + 0x00000250)
+#define HWIO_IPA_TIMERS_XO_CLK_DIV_CFG_PHYS (IPA_CFG_REG_BASE_PHYS + \
+					     0x00000250)
+#define HWIO_IPA_TIMERS_XO_CLK_DIV_CFG_OFFS (IPA_CFG_REG_BASE_OFFS + \
+					     0x00000250)
+#define HWIO_IPA_TIMERS_PULSE_GRAN_CFG_ADDR (IPA_CFG_REG_BASE + 0x00000254)
+#define HWIO_IPA_TIMERS_PULSE_GRAN_CFG_PHYS (IPA_CFG_REG_BASE_PHYS + \
+					     0x00000254)
+#define HWIO_IPA_TIMERS_PULSE_GRAN_CFG_OFFS (IPA_CFG_REG_BASE_OFFS + \
+					     0x00000254)
+#define HWIO_IPA_QTIME_SMP_ADDR (IPA_CFG_REG_BASE + 0x00000260)
+#define HWIO_IPA_QTIME_SMP_PHYS (IPA_CFG_REG_BASE_PHYS + 0x00000260)
+#define HWIO_IPA_QTIME_SMP_OFFS (IPA_CFG_REG_BASE_OFFS + 0x00000260)
+#define HWIO_IPA_QTIME_LSB_ADDR (IPA_CFG_REG_BASE + 0x00000264)
+#define HWIO_IPA_QTIME_LSB_PHYS (IPA_CFG_REG_BASE_PHYS + 0x00000264)
+#define HWIO_IPA_QTIME_LSB_OFFS (IPA_CFG_REG_BASE_OFFS + 0x00000264)
+#define HWIO_IPA_QTIME_MSB_ADDR (IPA_CFG_REG_BASE + 0x00000268)
+#define HWIO_IPA_QTIME_MSB_PHYS (IPA_CFG_REG_BASE_PHYS + 0x00000268)
+#define HWIO_IPA_QTIME_MSB_OFFS (IPA_CFG_REG_BASE_OFFS + 0x00000268)
+#define HWIO_IPA_SRC_RSRC_AMOUNT_REDUCE_EN_ADDR (IPA_CFG_REG_BASE + \
+						 0x00000334)
+#define HWIO_IPA_SRC_RSRC_AMOUNT_REDUCE_EN_PHYS (IPA_CFG_REG_BASE_PHYS + \
+						 0x00000334)
+#define HWIO_IPA_SRC_RSRC_AMOUNT_REDUCE_EN_OFFS (IPA_CFG_REG_BASE_OFFS + \
+						 0x00000334)
+#define HWIO_IPA_SRC_RSRC_AMOUNT_REDUCE_VALUES_0_ADDR (IPA_CFG_REG_BASE + \
+						       0x00000338)
+#define HWIO_IPA_SRC_RSRC_AMOUNT_REDUCE_VALUES_0_PHYS (	\
+		IPA_CFG_REG_BASE_PHYS + 0x00000338)
+#define HWIO_IPA_SRC_RSRC_AMOUNT_REDUCE_VALUES_0_OFFS (	\
+		IPA_CFG_REG_BASE_OFFS + 0x00000338)
+#define HWIO_IPA_SRC_RSRC_AMOUNT_REDUCE_VALUES_1_ADDR (IPA_CFG_REG_BASE + \
+						       0x0000033c)
+#define HWIO_IPA_SRC_RSRC_AMOUNT_REDUCE_VALUES_1_PHYS (	\
+		IPA_CFG_REG_BASE_PHYS + 0x0000033c)
+#define HWIO_IPA_SRC_RSRC_AMOUNT_REDUCE_VALUES_1_OFFS (	\
+		IPA_CFG_REG_BASE_OFFS + 0x0000033c)
+#define HWIO_IPA_DST_RSRC_AMOUNT_REDUCE_EN_ADDR (IPA_CFG_REG_BASE + \
+						 0x00000340)
+#define HWIO_IPA_DST_RSRC_AMOUNT_REDUCE_EN_PHYS (IPA_CFG_REG_BASE_PHYS + \
+						 0x00000340)
+#define HWIO_IPA_DST_RSRC_AMOUNT_REDUCE_EN_OFFS (IPA_CFG_REG_BASE_OFFS + \
+						 0x00000340)
+#define HWIO_IPA_DST_RSRC_AMOUNT_REDUCE_VALUES_0_ADDR (IPA_CFG_REG_BASE + \
+						       0x00000344)
+#define HWIO_IPA_DST_RSRC_AMOUNT_REDUCE_VALUES_0_PHYS (	\
+		IPA_CFG_REG_BASE_PHYS + 0x00000344)
+#define HWIO_IPA_DST_RSRC_AMOUNT_REDUCE_VALUES_0_OFFS (	\
+		IPA_CFG_REG_BASE_OFFS + 0x00000344)
+#define HWIO_IPA_HPS_DPS_CMDQ_RED_IRQ_MASK_ENABLE_ADDR (IPA_CFG_REG_BASE + \
+							0x00000348)
+#define HWIO_IPA_HPS_DPS_CMDQ_RED_IRQ_MASK_ENABLE_PHYS ( \
+		IPA_CFG_REG_BASE_PHYS + 0x00000348)
+#define HWIO_IPA_HPS_DPS_CMDQ_RED_IRQ_MASK_ENABLE_OFFS ( \
+		IPA_CFG_REG_BASE_OFFS + 0x00000348)
+#define HWIO_IPA_SRC_RSRC_GRP_01_RSRC_TYPE_n_ADDR(n) (IPA_CFG_REG_BASE + \
+						      0x00000400 + 0x20 * \
+						      (n))
+#define HWIO_IPA_SRC_RSRC_GRP_01_RSRC_TYPE_n_PHYS(n) ( \
+		IPA_CFG_REG_BASE_PHYS + 0x00000400 + 0x20 * (n))
+#define HWIO_IPA_SRC_RSRC_GRP_01_RSRC_TYPE_n_OFFS(n) ( \
+		IPA_CFG_REG_BASE_OFFS + 0x00000400 + 0x20 * (n))
+#define HWIO_IPA_SRC_RSRC_GRP_01_RSRC_TYPE_n_RMSK 0x3f3f3f3f
+#define HWIO_IPA_SRC_RSRC_GRP_01_RSRC_TYPE_n_MAXn 4
+#define HWIO_IPA_SRC_RSRC_GRP_01_RSRC_TYPE_n_ATTR 0x3
+#define HWIO_IPA_SRC_RSRC_GRP_01_RSRC_TYPE_n_INI(n) in_dword_masked( \
+		HWIO_IPA_SRC_RSRC_GRP_01_RSRC_TYPE_n_ADDR(n), \
+		HWIO_IPA_SRC_RSRC_GRP_01_RSRC_TYPE_n_RMSK)
+#define HWIO_IPA_SRC_RSRC_GRP_01_RSRC_TYPE_n_INMI(n, mask) in_dword_masked( \
+		HWIO_IPA_SRC_RSRC_GRP_01_RSRC_TYPE_n_ADDR(n), \
+		mask)
+#define HWIO_IPA_SRC_RSRC_GRP_01_RSRC_TYPE_n_OUTI(n, val) out_dword( \
+		HWIO_IPA_SRC_RSRC_GRP_01_RSRC_TYPE_n_ADDR(n), \
+		val)
+#define HWIO_IPA_SRC_RSRC_GRP_01_RSRC_TYPE_n_OUTMI(n, mask, \
+						   val)	\
+	out_dword_masked_ns(HWIO_IPA_SRC_RSRC_GRP_01_RSRC_TYPE_n_ADDR( \
+				    n),	\
+			    mask, val, \
+			    HWIO_IPA_SRC_RSRC_GRP_01_RSRC_TYPE_n_INI(n))
+#define HWIO_IPA_SRC_RSRC_GRP_01_RSRC_TYPE_n_SRC_RSRC_GRP_1_MAX_LIMIT_BMSK \
+	0x3f000000
+#define HWIO_IPA_SRC_RSRC_GRP_01_RSRC_TYPE_n_SRC_RSRC_GRP_1_MAX_LIMIT_SHFT \
+	0x18
+#define HWIO_IPA_SRC_RSRC_GRP_01_RSRC_TYPE_n_SRC_RSRC_GRP_1_MIN_LIMIT_BMSK \
+	0x3f0000
+#define HWIO_IPA_SRC_RSRC_GRP_01_RSRC_TYPE_n_SRC_RSRC_GRP_1_MIN_LIMIT_SHFT \
+	0x10
+#define HWIO_IPA_SRC_RSRC_GRP_01_RSRC_TYPE_n_SRC_RSRC_GRP_0_MAX_LIMIT_BMSK \
+	0x3f00
+#define HWIO_IPA_SRC_RSRC_GRP_01_RSRC_TYPE_n_SRC_RSRC_GRP_0_MAX_LIMIT_SHFT \
+	0x8
+#define HWIO_IPA_SRC_RSRC_GRP_01_RSRC_TYPE_n_SRC_RSRC_GRP_0_MIN_LIMIT_BMSK \
+	0x3f
+#define HWIO_IPA_SRC_RSRC_GRP_01_RSRC_TYPE_n_SRC_RSRC_GRP_0_MIN_LIMIT_SHFT \
+	0x0
+#define HWIO_IPA_SRC_RSRC_GRP_23_RSRC_TYPE_n_ADDR(n) (IPA_CFG_REG_BASE + \
+						      0x00000404 + 0x20 * \
+						      (n))
+#define HWIO_IPA_SRC_RSRC_GRP_23_RSRC_TYPE_n_PHYS(n) ( \
+		IPA_CFG_REG_BASE_PHYS + 0x00000404 + 0x20 * (n))
+#define HWIO_IPA_SRC_RSRC_GRP_23_RSRC_TYPE_n_OFFS(n) ( \
+		IPA_CFG_REG_BASE_OFFS + 0x00000404 + 0x20 * (n))
+#define HWIO_IPA_SRC_RSRC_GRP_23_RSRC_TYPE_n_RMSK 0x3f3f3f3f
+#define HWIO_IPA_SRC_RSRC_GRP_23_RSRC_TYPE_n_MAXn 4
+#define HWIO_IPA_SRC_RSRC_GRP_23_RSRC_TYPE_n_ATTR 0x3
+#define HWIO_IPA_SRC_RSRC_GRP_23_RSRC_TYPE_n_INI(n) in_dword_masked( \
+		HWIO_IPA_SRC_RSRC_GRP_23_RSRC_TYPE_n_ADDR(n), \
+		HWIO_IPA_SRC_RSRC_GRP_23_RSRC_TYPE_n_RMSK)
+#define HWIO_IPA_SRC_RSRC_GRP_23_RSRC_TYPE_n_INMI(n, mask) in_dword_masked( \
+		HWIO_IPA_SRC_RSRC_GRP_23_RSRC_TYPE_n_ADDR(n), \
+		mask)
+#define HWIO_IPA_SRC_RSRC_GRP_23_RSRC_TYPE_n_OUTI(n, val) out_dword( \
+		HWIO_IPA_SRC_RSRC_GRP_23_RSRC_TYPE_n_ADDR(n), \
+		val)
+#define HWIO_IPA_SRC_RSRC_GRP_23_RSRC_TYPE_n_OUTMI(n, mask, \
+						   val)	\
+	out_dword_masked_ns(HWIO_IPA_SRC_RSRC_GRP_23_RSRC_TYPE_n_ADDR( \
+				    n),	\
+			    mask, val, \
+			    HWIO_IPA_SRC_RSRC_GRP_23_RSRC_TYPE_n_INI(n))
+#define HWIO_IPA_SRC_RSRC_GRP_23_RSRC_TYPE_n_SRC_RSRC_GRP_3_MAX_LIMIT_BMSK \
+	0x3f000000
+#define HWIO_IPA_SRC_RSRC_GRP_23_RSRC_TYPE_n_SRC_RSRC_GRP_3_MAX_LIMIT_SHFT \
+	0x18
+#define HWIO_IPA_SRC_RSRC_GRP_23_RSRC_TYPE_n_SRC_RSRC_GRP_3_MIN_LIMIT_BMSK \
+	0x3f0000
+#define HWIO_IPA_SRC_RSRC_GRP_23_RSRC_TYPE_n_SRC_RSRC_GRP_3_MIN_LIMIT_SHFT \
+	0x10
+#define HWIO_IPA_SRC_RSRC_GRP_23_RSRC_TYPE_n_SRC_RSRC_GRP_2_MAX_LIMIT_BMSK \
+	0x3f00
+#define HWIO_IPA_SRC_RSRC_GRP_23_RSRC_TYPE_n_SRC_RSRC_GRP_2_MAX_LIMIT_SHFT \
+	0x8
+#define HWIO_IPA_SRC_RSRC_GRP_23_RSRC_TYPE_n_SRC_RSRC_GRP_2_MIN_LIMIT_BMSK \
+	0x3f
+#define HWIO_IPA_SRC_RSRC_GRP_23_RSRC_TYPE_n_SRC_RSRC_GRP_2_MIN_LIMIT_SHFT \
+	0x0
+#define HWIO_IPA_SRC_RSRC_GRP_45_RSRC_TYPE_n_ADDR(n) (IPA_CFG_REG_BASE + \
+						      0x00000408 + 0x20 * \
+						      (n))
+#define HWIO_IPA_SRC_RSRC_GRP_45_RSRC_TYPE_n_PHYS(n) ( \
+		IPA_CFG_REG_BASE_PHYS + 0x00000408 + 0x20 * (n))
+#define HWIO_IPA_SRC_RSRC_GRP_45_RSRC_TYPE_n_OFFS(n) ( \
+		IPA_CFG_REG_BASE_OFFS + 0x00000408 + 0x20 * (n))
+#define HWIO_IPA_SRC_RSRC_GRP_45_RSRC_TYPE_n_RMSK 0x3f3f
+#define HWIO_IPA_SRC_RSRC_GRP_45_RSRC_TYPE_n_MAXn 4
+#define HWIO_IPA_SRC_RSRC_GRP_45_RSRC_TYPE_n_ATTR 0x3
+#define HWIO_IPA_SRC_RSRC_GRP_45_RSRC_TYPE_n_INI(n) in_dword_masked( \
+		HWIO_IPA_SRC_RSRC_GRP_45_RSRC_TYPE_n_ADDR(n), \
+		HWIO_IPA_SRC_RSRC_GRP_45_RSRC_TYPE_n_RMSK)
+#define HWIO_IPA_SRC_RSRC_GRP_45_RSRC_TYPE_n_INMI(n, mask) in_dword_masked( \
+		HWIO_IPA_SRC_RSRC_GRP_45_RSRC_TYPE_n_ADDR(n), \
+		mask)
+#define HWIO_IPA_SRC_RSRC_GRP_45_RSRC_TYPE_n_OUTI(n, val) out_dword( \
+		HWIO_IPA_SRC_RSRC_GRP_45_RSRC_TYPE_n_ADDR(n), \
+		val)
+#define HWIO_IPA_SRC_RSRC_GRP_45_RSRC_TYPE_n_OUTMI(n, mask, \
+						   val)	\
+	out_dword_masked_ns(HWIO_IPA_SRC_RSRC_GRP_45_RSRC_TYPE_n_ADDR( \
+				    n),	\
+			    mask, val, \
+			    HWIO_IPA_SRC_RSRC_GRP_45_RSRC_TYPE_n_INI(n))
+#define HWIO_IPA_SRC_RSRC_GRP_45_RSRC_TYPE_n_SRC_RSRC_GRP_4_MAX_LIMIT_BMSK \
+	0x3f00
+#define HWIO_IPA_SRC_RSRC_GRP_45_RSRC_TYPE_n_SRC_RSRC_GRP_4_MAX_LIMIT_SHFT \
+	0x8
+#define HWIO_IPA_SRC_RSRC_GRP_45_RSRC_TYPE_n_SRC_RSRC_GRP_4_MIN_LIMIT_BMSK \
+	0x3f
+#define HWIO_IPA_SRC_RSRC_GRP_45_RSRC_TYPE_n_SRC_RSRC_GRP_4_MIN_LIMIT_SHFT \
+	0x0
+#define HWIO_IPA_SRC_RSRC_GRP_0123_RSRC_TYPE_CNT_n_ADDR(n) ( \
+		IPA_CFG_REG_BASE + 0x00000410 + 0x20 * (n))
+#define HWIO_IPA_SRC_RSRC_GRP_0123_RSRC_TYPE_CNT_n_PHYS(n) ( \
+		IPA_CFG_REG_BASE_PHYS + 0x00000410 + 0x20 * (n))
+#define HWIO_IPA_SRC_RSRC_GRP_0123_RSRC_TYPE_CNT_n_OFFS(n) ( \
+		IPA_CFG_REG_BASE_OFFS + 0x00000410 + 0x20 * (n))
+#define HWIO_IPA_SRC_RSRC_GRP_0123_RSRC_TYPE_CNT_n_RMSK 0x3f3f3f3f
+#define HWIO_IPA_SRC_RSRC_GRP_0123_RSRC_TYPE_CNT_n_MAXn 4
+#define HWIO_IPA_SRC_RSRC_GRP_0123_RSRC_TYPE_CNT_n_ATTR 0x1
+#define HWIO_IPA_SRC_RSRC_GRP_0123_RSRC_TYPE_CNT_n_INI(n) in_dword_masked( \
+		HWIO_IPA_SRC_RSRC_GRP_0123_RSRC_TYPE_CNT_n_ADDR(n), \
+		HWIO_IPA_SRC_RSRC_GRP_0123_RSRC_TYPE_CNT_n_RMSK)
+#define HWIO_IPA_SRC_RSRC_GRP_0123_RSRC_TYPE_CNT_n_INMI(n, \
+							mask) \
+	in_dword_masked( \
+		HWIO_IPA_SRC_RSRC_GRP_0123_RSRC_TYPE_CNT_n_ADDR(n), \
+		mask)
+#define HWIO_IPA_SRC_RSRC_GRP_0123_RSRC_TYPE_CNT_n_SRC_RSRC_GRP_3_CNT_BMSK \
+	0x3f000000
+#define HWIO_IPA_SRC_RSRC_GRP_0123_RSRC_TYPE_CNT_n_SRC_RSRC_GRP_3_CNT_SHFT \
+	0x18
+#define HWIO_IPA_SRC_RSRC_GRP_0123_RSRC_TYPE_CNT_n_SRC_RSRC_GRP_2_CNT_BMSK \
+	0x3f0000
+#define HWIO_IPA_SRC_RSRC_GRP_0123_RSRC_TYPE_CNT_n_SRC_RSRC_GRP_2_CNT_SHFT \
+	0x10
+#define HWIO_IPA_SRC_RSRC_GRP_0123_RSRC_TYPE_CNT_n_SRC_RSRC_GRP_1_CNT_BMSK \
+	0x3f00
+#define HWIO_IPA_SRC_RSRC_GRP_0123_RSRC_TYPE_CNT_n_SRC_RSRC_GRP_1_CNT_SHFT \
+	0x8
+#define HWIO_IPA_SRC_RSRC_GRP_0123_RSRC_TYPE_CNT_n_SRC_RSRC_GRP_0_CNT_BMSK \
+	0x3f
+#define HWIO_IPA_SRC_RSRC_GRP_0123_RSRC_TYPE_CNT_n_SRC_RSRC_GRP_0_CNT_SHFT \
+	0x0
+#define HWIO_IPA_SRC_RSRC_GRP_4567_RSRC_TYPE_CNT_n_ADDR(n) ( \
+		IPA_CFG_REG_BASE + 0x00000414 + 0x20 * (n))
+#define HWIO_IPA_SRC_RSRC_GRP_4567_RSRC_TYPE_CNT_n_PHYS(n) ( \
+		IPA_CFG_REG_BASE_PHYS + 0x00000414 + 0x20 * (n))
+#define HWIO_IPA_SRC_RSRC_GRP_4567_RSRC_TYPE_CNT_n_OFFS(n) ( \
+		IPA_CFG_REG_BASE_OFFS + 0x00000414 + 0x20 * (n))
+#define HWIO_IPA_SRC_RSRC_GRP_4567_RSRC_TYPE_CNT_n_RMSK 0x3f
+#define HWIO_IPA_SRC_RSRC_GRP_4567_RSRC_TYPE_CNT_n_MAXn 4
+#define HWIO_IPA_SRC_RSRC_GRP_4567_RSRC_TYPE_CNT_n_ATTR 0x1
+#define HWIO_IPA_SRC_RSRC_GRP_4567_RSRC_TYPE_CNT_n_INI(n) in_dword_masked( \
+		HWIO_IPA_SRC_RSRC_GRP_4567_RSRC_TYPE_CNT_n_ADDR(n), \
+		HWIO_IPA_SRC_RSRC_GRP_4567_RSRC_TYPE_CNT_n_RMSK)
+#define HWIO_IPA_SRC_RSRC_GRP_4567_RSRC_TYPE_CNT_n_INMI(n, \
+							mask) \
+	in_dword_masked( \
+		HWIO_IPA_SRC_RSRC_GRP_4567_RSRC_TYPE_CNT_n_ADDR(n), \
+		mask)
+#define HWIO_IPA_SRC_RSRC_GRP_4567_RSRC_TYPE_CNT_n_SRC_RSRC_GRP_4_CNT_BMSK \
+	0x3f
+#define HWIO_IPA_SRC_RSRC_GRP_4567_RSRC_TYPE_CNT_n_SRC_RSRC_GRP_4_CNT_SHFT \
+	0x0
+#define HWIO_IPA_SRC_RSRC_TYPE_AMOUNT_n_ADDR(n) (IPA_CFG_REG_BASE + \
+						 0x00000418 + 0x20 * (n))
+#define HWIO_IPA_SRC_RSRC_TYPE_AMOUNT_n_PHYS(n) (IPA_CFG_REG_BASE_PHYS + \
+						 0x00000418 + 0x20 * (n))
+#define HWIO_IPA_SRC_RSRC_TYPE_AMOUNT_n_OFFS(n) (IPA_CFG_REG_BASE_OFFS + \
+						 0x00000418 + 0x20 * (n))
+#define HWIO_IPA_DST_RSRC_GRP_01_RSRC_TYPE_n_ADDR(n) (IPA_CFG_REG_BASE + \
+						      0x00000500 + 0x20 * \
+						      (n))
+#define HWIO_IPA_DST_RSRC_GRP_01_RSRC_TYPE_n_PHYS(n) ( \
+		IPA_CFG_REG_BASE_PHYS + 0x00000500 + 0x20 * (n))
+#define HWIO_IPA_DST_RSRC_GRP_01_RSRC_TYPE_n_OFFS(n) ( \
+		IPA_CFG_REG_BASE_OFFS + 0x00000500 + 0x20 * (n))
+#define HWIO_IPA_DST_RSRC_GRP_01_RSRC_TYPE_n_RMSK 0x3f3f3f3f
+#define HWIO_IPA_DST_RSRC_GRP_01_RSRC_TYPE_n_MAXn 1
+#define HWIO_IPA_DST_RSRC_GRP_01_RSRC_TYPE_n_ATTR 0x3
+#define HWIO_IPA_DST_RSRC_GRP_01_RSRC_TYPE_n_INI(n) in_dword_masked( \
+		HWIO_IPA_DST_RSRC_GRP_01_RSRC_TYPE_n_ADDR(n), \
+		HWIO_IPA_DST_RSRC_GRP_01_RSRC_TYPE_n_RMSK)
+#define HWIO_IPA_DST_RSRC_GRP_01_RSRC_TYPE_n_INMI(n, mask) in_dword_masked( \
+		HWIO_IPA_DST_RSRC_GRP_01_RSRC_TYPE_n_ADDR(n), \
+		mask)
+#define HWIO_IPA_DST_RSRC_GRP_01_RSRC_TYPE_n_OUTI(n, val) out_dword( \
+		HWIO_IPA_DST_RSRC_GRP_01_RSRC_TYPE_n_ADDR(n), \
+		val)
+#define HWIO_IPA_DST_RSRC_GRP_01_RSRC_TYPE_n_OUTMI(n, mask, \
+						   val)	\
+	out_dword_masked_ns(HWIO_IPA_DST_RSRC_GRP_01_RSRC_TYPE_n_ADDR( \
+				    n),	\
+			    mask, val, \
+			    HWIO_IPA_DST_RSRC_GRP_01_RSRC_TYPE_n_INI(n))
+#define HWIO_IPA_DST_RSRC_GRP_01_RSRC_TYPE_n_DST_RSRC_GRP_1_MAX_LIMIT_BMSK \
+	0x3f000000
+#define HWIO_IPA_DST_RSRC_GRP_01_RSRC_TYPE_n_DST_RSRC_GRP_1_MAX_LIMIT_SHFT \
+	0x18
+#define HWIO_IPA_DST_RSRC_GRP_01_RSRC_TYPE_n_DST_RSRC_GRP_1_MIN_LIMIT_BMSK \
+	0x3f0000
+#define HWIO_IPA_DST_RSRC_GRP_01_RSRC_TYPE_n_DST_RSRC_GRP_1_MIN_LIMIT_SHFT \
+	0x10
+#define HWIO_IPA_DST_RSRC_GRP_01_RSRC_TYPE_n_DST_RSRC_GRP_0_MAX_LIMIT_BMSK \
+	0x3f00
+#define HWIO_IPA_DST_RSRC_GRP_01_RSRC_TYPE_n_DST_RSRC_GRP_0_MAX_LIMIT_SHFT \
+	0x8
+#define HWIO_IPA_DST_RSRC_GRP_01_RSRC_TYPE_n_DST_RSRC_GRP_0_MIN_LIMIT_BMSK \
+	0x3f
+#define HWIO_IPA_DST_RSRC_GRP_01_RSRC_TYPE_n_DST_RSRC_GRP_0_MIN_LIMIT_SHFT \
+	0x0
+#define HWIO_IPA_DST_RSRC_GRP_23_RSRC_TYPE_n_ADDR(n) (IPA_CFG_REG_BASE + \
+						      0x00000504 + 0x20 * \
+						      (n))
+#define HWIO_IPA_DST_RSRC_GRP_23_RSRC_TYPE_n_PHYS(n) ( \
+		IPA_CFG_REG_BASE_PHYS + 0x00000504 + 0x20 * (n))
+#define HWIO_IPA_DST_RSRC_GRP_23_RSRC_TYPE_n_OFFS(n) ( \
+		IPA_CFG_REG_BASE_OFFS + 0x00000504 + 0x20 * (n))
+#define HWIO_IPA_DST_RSRC_GRP_23_RSRC_TYPE_n_RMSK 0x3f3f3f3f
+#define HWIO_IPA_DST_RSRC_GRP_23_RSRC_TYPE_n_MAXn 1
+#define HWIO_IPA_DST_RSRC_GRP_23_RSRC_TYPE_n_ATTR 0x3
+#define HWIO_IPA_DST_RSRC_GRP_23_RSRC_TYPE_n_INI(n) in_dword_masked( \
+		HWIO_IPA_DST_RSRC_GRP_23_RSRC_TYPE_n_ADDR(n), \
+		HWIO_IPA_DST_RSRC_GRP_23_RSRC_TYPE_n_RMSK)
+#define HWIO_IPA_DST_RSRC_GRP_23_RSRC_TYPE_n_INMI(n, mask) in_dword_masked( \
+		HWIO_IPA_DST_RSRC_GRP_23_RSRC_TYPE_n_ADDR(n), \
+		mask)
+#define HWIO_IPA_DST_RSRC_GRP_23_RSRC_TYPE_n_OUTI(n, val) out_dword( \
+		HWIO_IPA_DST_RSRC_GRP_23_RSRC_TYPE_n_ADDR(n), \
+		val)
+#define HWIO_IPA_DST_RSRC_GRP_23_RSRC_TYPE_n_OUTMI(n, mask, \
+						   val)	\
+	out_dword_masked_ns(HWIO_IPA_DST_RSRC_GRP_23_RSRC_TYPE_n_ADDR( \
+				    n),	\
+			    mask, val, \
+			    HWIO_IPA_DST_RSRC_GRP_23_RSRC_TYPE_n_INI(n))
+#define HWIO_IPA_DST_RSRC_GRP_23_RSRC_TYPE_n_DST_RSRC_GRP_3_MAX_LIMIT_BMSK \
+	0x3f000000
+#define HWIO_IPA_DST_RSRC_GRP_23_RSRC_TYPE_n_DST_RSRC_GRP_3_MAX_LIMIT_SHFT \
+	0x18
+#define HWIO_IPA_DST_RSRC_GRP_23_RSRC_TYPE_n_DST_RSRC_GRP_3_MIN_LIMIT_BMSK \
+	0x3f0000
+#define HWIO_IPA_DST_RSRC_GRP_23_RSRC_TYPE_n_DST_RSRC_GRP_3_MIN_LIMIT_SHFT \
+	0x10
+#define HWIO_IPA_DST_RSRC_GRP_23_RSRC_TYPE_n_DST_RSRC_GRP_2_MAX_LIMIT_BMSK \
+	0x3f00
+#define HWIO_IPA_DST_RSRC_GRP_23_RSRC_TYPE_n_DST_RSRC_GRP_2_MAX_LIMIT_SHFT \
+	0x8
+#define HWIO_IPA_DST_RSRC_GRP_23_RSRC_TYPE_n_DST_RSRC_GRP_2_MIN_LIMIT_BMSK \
+	0x3f
+#define HWIO_IPA_DST_RSRC_GRP_23_RSRC_TYPE_n_DST_RSRC_GRP_2_MIN_LIMIT_SHFT \
+	0x0
+#define HWIO_IPA_DST_RSRC_GRP_45_RSRC_TYPE_n_ADDR(n) (IPA_CFG_REG_BASE + \
+						      0x00000508 + 0x20 * \
+						      (n))
+#define HWIO_IPA_DST_RSRC_GRP_45_RSRC_TYPE_n_PHYS(n) ( \
+		IPA_CFG_REG_BASE_PHYS + 0x00000508 + 0x20 * (n))
+#define HWIO_IPA_DST_RSRC_GRP_45_RSRC_TYPE_n_OFFS(n) ( \
+		IPA_CFG_REG_BASE_OFFS + 0x00000508 + 0x20 * (n))
+#define HWIO_IPA_DST_RSRC_GRP_45_RSRC_TYPE_n_RMSK 0x3f3f
+#define HWIO_IPA_DST_RSRC_GRP_45_RSRC_TYPE_n_MAXn 1
+#define HWIO_IPA_DST_RSRC_GRP_45_RSRC_TYPE_n_ATTR 0x3
+#define HWIO_IPA_DST_RSRC_GRP_45_RSRC_TYPE_n_INI(n) in_dword_masked( \
+		HWIO_IPA_DST_RSRC_GRP_45_RSRC_TYPE_n_ADDR(n), \
+		HWIO_IPA_DST_RSRC_GRP_45_RSRC_TYPE_n_RMSK)
+#define HWIO_IPA_DST_RSRC_GRP_45_RSRC_TYPE_n_INMI(n, mask) in_dword_masked( \
+		HWIO_IPA_DST_RSRC_GRP_45_RSRC_TYPE_n_ADDR(n), \
+		mask)
+#define HWIO_IPA_DST_RSRC_GRP_45_RSRC_TYPE_n_OUTI(n, val) out_dword( \
+		HWIO_IPA_DST_RSRC_GRP_45_RSRC_TYPE_n_ADDR(n), \
+		val)
+#define HWIO_IPA_DST_RSRC_GRP_45_RSRC_TYPE_n_OUTMI(n, mask, \
+						   val)	\
+	out_dword_masked_ns(HWIO_IPA_DST_RSRC_GRP_45_RSRC_TYPE_n_ADDR( \
+				    n),	\
+			    mask, val, \
+			    HWIO_IPA_DST_RSRC_GRP_45_RSRC_TYPE_n_INI(n))
+#define HWIO_IPA_DST_RSRC_GRP_45_RSRC_TYPE_n_DST_RSRC_GRP_4_MAX_LIMIT_BMSK \
+	0x3f00
+#define HWIO_IPA_DST_RSRC_GRP_45_RSRC_TYPE_n_DST_RSRC_GRP_4_MAX_LIMIT_SHFT \
+	0x8
+#define HWIO_IPA_DST_RSRC_GRP_45_RSRC_TYPE_n_DST_RSRC_GRP_4_MIN_LIMIT_BMSK \
+	0x3f
+#define HWIO_IPA_DST_RSRC_GRP_45_RSRC_TYPE_n_DST_RSRC_GRP_4_MIN_LIMIT_SHFT \
+	0x0
+#define HWIO_IPA_DST_RSRC_GRP_0123_RSRC_TYPE_CNT_n_ADDR(n) ( \
+		IPA_CFG_REG_BASE + 0x00000510 + 0x20 * (n))
+#define HWIO_IPA_DST_RSRC_GRP_0123_RSRC_TYPE_CNT_n_PHYS(n) ( \
+		IPA_CFG_REG_BASE_PHYS + 0x00000510 + 0x20 * (n))
+#define HWIO_IPA_DST_RSRC_GRP_0123_RSRC_TYPE_CNT_n_OFFS(n) ( \
+		IPA_CFG_REG_BASE_OFFS + 0x00000510 + 0x20 * (n))
+#define HWIO_IPA_DST_RSRC_GRP_0123_RSRC_TYPE_CNT_n_RMSK 0x3f3f3f3f
+#define HWIO_IPA_DST_RSRC_GRP_0123_RSRC_TYPE_CNT_n_MAXn 1
+#define HWIO_IPA_DST_RSRC_GRP_0123_RSRC_TYPE_CNT_n_ATTR 0x1
+#define HWIO_IPA_DST_RSRC_GRP_0123_RSRC_TYPE_CNT_n_INI(n) in_dword_masked( \
+		HWIO_IPA_DST_RSRC_GRP_0123_RSRC_TYPE_CNT_n_ADDR(n), \
+		HWIO_IPA_DST_RSRC_GRP_0123_RSRC_TYPE_CNT_n_RMSK)
+#define HWIO_IPA_DST_RSRC_GRP_0123_RSRC_TYPE_CNT_n_INMI(n, \
+							mask) \
+	in_dword_masked( \
+		HWIO_IPA_DST_RSRC_GRP_0123_RSRC_TYPE_CNT_n_ADDR(n), \
+		mask)
+#define HWIO_IPA_DST_RSRC_GRP_0123_RSRC_TYPE_CNT_n_DST_RSRC_GRP_3_CNT_BMSK \
+	0x3f000000
+#define HWIO_IPA_DST_RSRC_GRP_0123_RSRC_TYPE_CNT_n_DST_RSRC_GRP_3_CNT_SHFT \
+	0x18
+#define HWIO_IPA_DST_RSRC_GRP_0123_RSRC_TYPE_CNT_n_DST_RSRC_GRP_2_CNT_BMSK \
+	0x3f0000
+#define HWIO_IPA_DST_RSRC_GRP_0123_RSRC_TYPE_CNT_n_DST_RSRC_GRP_2_CNT_SHFT \
+	0x10
+#define HWIO_IPA_DST_RSRC_GRP_0123_RSRC_TYPE_CNT_n_DST_RSRC_GRP_1_CNT_BMSK \
+	0x3f00
+#define HWIO_IPA_DST_RSRC_GRP_0123_RSRC_TYPE_CNT_n_DST_RSRC_GRP_1_CNT_SHFT \
+	0x8
+#define HWIO_IPA_DST_RSRC_GRP_0123_RSRC_TYPE_CNT_n_DST_RSRC_GRP_0_CNT_BMSK \
+	0x3f
+#define HWIO_IPA_DST_RSRC_GRP_0123_RSRC_TYPE_CNT_n_DST_RSRC_GRP_0_CNT_SHFT \
+	0x0
+#define HWIO_IPA_DST_RSRC_GRP_4567_RSRC_TYPE_CNT_n_ADDR(n) ( \
+		IPA_CFG_REG_BASE + 0x00000514 + 0x20 * (n))
+#define HWIO_IPA_DST_RSRC_GRP_4567_RSRC_TYPE_CNT_n_PHYS(n) ( \
+		IPA_CFG_REG_BASE_PHYS + 0x00000514 + 0x20 * (n))
+#define HWIO_IPA_DST_RSRC_GRP_4567_RSRC_TYPE_CNT_n_OFFS(n) ( \
+		IPA_CFG_REG_BASE_OFFS + 0x00000514 + 0x20 * (n))
+#define HWIO_IPA_DST_RSRC_GRP_4567_RSRC_TYPE_CNT_n_RMSK 0xff
+#define HWIO_IPA_DST_RSRC_GRP_4567_RSRC_TYPE_CNT_n_MAXn 1
+#define HWIO_IPA_DST_RSRC_GRP_4567_RSRC_TYPE_CNT_n_ATTR 0x1
+#define HWIO_IPA_DST_RSRC_GRP_4567_RSRC_TYPE_CNT_n_INI(n) in_dword_masked( \
+		HWIO_IPA_DST_RSRC_GRP_4567_RSRC_TYPE_CNT_n_ADDR(n), \
+		HWIO_IPA_DST_RSRC_GRP_4567_RSRC_TYPE_CNT_n_RMSK)
+#define HWIO_IPA_DST_RSRC_GRP_4567_RSRC_TYPE_CNT_n_INMI(n, \
+							mask) \
+	in_dword_masked( \
+		HWIO_IPA_DST_RSRC_GRP_4567_RSRC_TYPE_CNT_n_ADDR(n), \
+		mask)
+#define HWIO_IPA_DST_RSRC_GRP_4567_RSRC_TYPE_CNT_n_DST_RSRC_GRP_4_CNT_BMSK \
+	0xff
+#define HWIO_IPA_DST_RSRC_GRP_4567_RSRC_TYPE_CNT_n_DST_RSRC_GRP_4_CNT_SHFT \
+	0x0
+#define HWIO_IPA_DST_RSRC_TYPE_AMOUNT_n_ADDR(n) (IPA_CFG_REG_BASE + \
+						 0x00000518 + 0x20 * (n))
+#define HWIO_IPA_DST_RSRC_TYPE_AMOUNT_n_PHYS(n) (IPA_CFG_REG_BASE_PHYS + \
+						 0x00000518 + 0x20 * (n))
+#define HWIO_IPA_DST_RSRC_TYPE_AMOUNT_n_OFFS(n) (IPA_CFG_REG_BASE_OFFS + \
+						 0x00000518 + 0x20 * (n))
+#define HWIO_IPA_RSRC_GRP_CFG_ADDR (IPA_CFG_REG_BASE + 0x000005a0)
+#define HWIO_IPA_RSRC_GRP_CFG_PHYS (IPA_CFG_REG_BASE_PHYS + 0x000005a0)
+#define HWIO_IPA_RSRC_GRP_CFG_OFFS (IPA_CFG_REG_BASE_OFFS + 0x000005a0)
+#define HWIO_IPA_RSRC_GRP_CFG_RMSK 0x3f11f171
+#define HWIO_IPA_RSRC_GRP_CFG_ATTR 0x3
+#define HWIO_IPA_RSRC_GRP_CFG_IN in_dword_masked( \
+		HWIO_IPA_RSRC_GRP_CFG_ADDR, \
+		HWIO_IPA_RSRC_GRP_CFG_RMSK)
+#define HWIO_IPA_RSRC_GRP_CFG_INM(m) in_dword_masked( \
+		HWIO_IPA_RSRC_GRP_CFG_ADDR, \
+		m)
+#define HWIO_IPA_RSRC_GRP_CFG_OUT(v) out_dword(HWIO_IPA_RSRC_GRP_CFG_ADDR, \
+					       v)
+#define HWIO_IPA_RSRC_GRP_CFG_OUTM(m, v) out_dword_masked_ns( \
+		HWIO_IPA_RSRC_GRP_CFG_ADDR, \
+		m, \
+		v, \
+		HWIO_IPA_RSRC_GRP_CFG_IN)
+#define HWIO_IPA_RSRC_GRP_CFG_DST_GRP_SPECIAL_INDEX_BMSK 0x3f000000
+#define HWIO_IPA_RSRC_GRP_CFG_DST_GRP_SPECIAL_INDEX_SHFT 0x18
+#define HWIO_IPA_RSRC_GRP_CFG_DST_GRP_SPECIAL_VALID_BMSK 0x100000
+#define HWIO_IPA_RSRC_GRP_CFG_DST_GRP_SPECIAL_VALID_SHFT 0x14
+#define HWIO_IPA_RSRC_GRP_CFG_DST_PIPE_SPECIAL_INDEX_BMSK 0x1f000
+#define HWIO_IPA_RSRC_GRP_CFG_DST_PIPE_SPECIAL_INDEX_SHFT 0xc
+#define HWIO_IPA_RSRC_GRP_CFG_DST_PIPE_SPECIAL_VALID_BMSK 0x100
+#define HWIO_IPA_RSRC_GRP_CFG_DST_PIPE_SPECIAL_VALID_SHFT 0x8
+#define HWIO_IPA_RSRC_GRP_CFG_SRC_GRP_SPECIAL_INDEX_BMSK 0x70
+#define HWIO_IPA_RSRC_GRP_CFG_SRC_GRP_SPECIAL_INDEX_SHFT 0x4
+#define HWIO_IPA_RSRC_GRP_CFG_SRC_GRP_SPECIAL_VALID_BMSK 0x1
+#define HWIO_IPA_RSRC_GRP_CFG_SRC_GRP_SPECIAL_VALID_SHFT 0x0
+#define HWIO_IPA_PIPELINE_DISABLE_ADDR (IPA_CFG_REG_BASE + 0x000005a8)
+#define HWIO_IPA_PIPELINE_DISABLE_PHYS (IPA_CFG_REG_BASE_PHYS + 0x000005a8)
+#define HWIO_IPA_PIPELINE_DISABLE_OFFS (IPA_CFG_REG_BASE_OFFS + 0x000005a8)
+#define HWIO_IPA_PIPELINE_DISABLE_RMSK 0x8
+#define HWIO_IPA_PIPELINE_DISABLE_ATTR 0x3
+#define HWIO_IPA_PIPELINE_DISABLE_IN in_dword_masked( \
+		HWIO_IPA_PIPELINE_DISABLE_ADDR,	\
+		HWIO_IPA_PIPELINE_DISABLE_RMSK)
+#define HWIO_IPA_PIPELINE_DISABLE_INM(m) in_dword_masked( \
+		HWIO_IPA_PIPELINE_DISABLE_ADDR,	\
+		m)
+#define HWIO_IPA_PIPELINE_DISABLE_OUT(v) out_dword( \
+		HWIO_IPA_PIPELINE_DISABLE_ADDR,	\
+		v)
+#define HWIO_IPA_PIPELINE_DISABLE_OUTM(m, v) out_dword_masked_ns( \
+		HWIO_IPA_PIPELINE_DISABLE_ADDR,	\
+		m, \
+		v, \
+		HWIO_IPA_PIPELINE_DISABLE_IN)
+#define HWIO_IPA_PIPELINE_DISABLE_RX_CMDQ_SPLITTER_DIS_BMSK 0x8
+#define HWIO_IPA_PIPELINE_DISABLE_RX_CMDQ_SPLITTER_DIS_SHFT 0x3
+#define HWIO_IPA_AXI_CFG_ADDR (IPA_CFG_REG_BASE + 0x000005ac)
+#define HWIO_IPA_AXI_CFG_PHYS (IPA_CFG_REG_BASE_PHYS + 0x000005ac)
+#define HWIO_IPA_AXI_CFG_OFFS (IPA_CFG_REG_BASE_OFFS + 0x000005ac)
+#define HWIO_IPA_STAT_QUOTA_BASE_n_ADDR(n) (IPA_CFG_REG_BASE + \
+					    0x00000700 + 0x4 * (n))
+#define HWIO_IPA_STAT_QUOTA_BASE_n_PHYS(n) (IPA_CFG_REG_BASE_PHYS + \
+					    0x00000700 + 0x4 * (n))
+#define HWIO_IPA_STAT_QUOTA_BASE_n_OFFS(n) (IPA_CFG_REG_BASE_OFFS + \
+					    0x00000700 + 0x4 * (n))
+#define HWIO_IPA_STAT_QUOTA_MASK_n_ADDR(n) (IPA_CFG_REG_BASE + \
+					    0x00000708 + 0x4 * (n))
+#define HWIO_IPA_STAT_QUOTA_MASK_n_PHYS(n) (IPA_CFG_REG_BASE_PHYS + \
+					    0x00000708 + 0x4 * (n))
+#define HWIO_IPA_STAT_QUOTA_MASK_n_OFFS(n) (IPA_CFG_REG_BASE_OFFS + \
+					    0x00000708 + 0x4 * (n))
+#define HWIO_IPA_STAT_TETHERING_BASE_n_ADDR(n) (IPA_CFG_REG_BASE + \
+						0x00000710 + 0x4 * (n))
+#define HWIO_IPA_STAT_TETHERING_BASE_n_PHYS(n) (IPA_CFG_REG_BASE_PHYS +	\
+						0x00000710 + 0x4 * (n))
+#define HWIO_IPA_STAT_TETHERING_BASE_n_OFFS(n) (IPA_CFG_REG_BASE_OFFS +	\
+						0x00000710 + 0x4 * (n))
+#define HWIO_IPA_STAT_TETHERING_MASK_n_ADDR(n) (IPA_CFG_REG_BASE + \
+						0x00000718 + 0x4 * (n))
+#define HWIO_IPA_STAT_TETHERING_MASK_n_PHYS(n) (IPA_CFG_REG_BASE_PHYS +	\
+						0x00000718 + 0x4 * (n))
+#define HWIO_IPA_STAT_TETHERING_MASK_n_OFFS(n) (IPA_CFG_REG_BASE_OFFS +	\
+						0x00000718 + 0x4 * (n))
+#define HWIO_IPA_STAT_FILTER_IPV4_BASE_ADDR (IPA_CFG_REG_BASE + 0x00000720)
+#define HWIO_IPA_STAT_FILTER_IPV4_BASE_PHYS (IPA_CFG_REG_BASE_PHYS + \
+					     0x00000720)
+#define HWIO_IPA_STAT_FILTER_IPV4_BASE_OFFS (IPA_CFG_REG_BASE_OFFS + \
+					     0x00000720)
+#define HWIO_IPA_STAT_FILTER_IPV6_BASE_ADDR (IPA_CFG_REG_BASE + 0x00000724)
+#define HWIO_IPA_STAT_FILTER_IPV6_BASE_PHYS (IPA_CFG_REG_BASE_PHYS + \
+					     0x00000724)
+#define HWIO_IPA_STAT_FILTER_IPV6_BASE_OFFS (IPA_CFG_REG_BASE_OFFS + \
+					     0x00000724)
+#define HWIO_IPA_STAT_ROUTER_IPV4_BASE_ADDR (IPA_CFG_REG_BASE + 0x00000728)
+#define HWIO_IPA_STAT_ROUTER_IPV4_BASE_PHYS (IPA_CFG_REG_BASE_PHYS + \
+					     0x00000728)
+#define HWIO_IPA_STAT_ROUTER_IPV4_BASE_OFFS (IPA_CFG_REG_BASE_OFFS + \
+					     0x00000728)
+#define HWIO_IPA_STAT_ROUTER_IPV6_BASE_ADDR (IPA_CFG_REG_BASE + 0x0000072c)
+#define HWIO_IPA_STAT_ROUTER_IPV6_BASE_PHYS (IPA_CFG_REG_BASE_PHYS + \
+					     0x0000072c)
+#define HWIO_IPA_STAT_ROUTER_IPV6_BASE_OFFS (IPA_CFG_REG_BASE_OFFS + \
+					     0x0000072c)
+#define HWIO_IPA_STAT_DROP_CNT_BASE_n_ADDR(n) (IPA_CFG_REG_BASE + \
+					       0x00000750 + 0x4 * (n))
+#define HWIO_IPA_STAT_DROP_CNT_BASE_n_PHYS(n) (IPA_CFG_REG_BASE_PHYS + \
+					       0x00000750 + 0x4 * (n))
+#define HWIO_IPA_STAT_DROP_CNT_BASE_n_OFFS(n) (IPA_CFG_REG_BASE_OFFS + \
+					       0x00000750 + 0x4 * (n))
+#define HWIO_IPA_STAT_DROP_CNT_MASK_n_ADDR(n) (IPA_CFG_REG_BASE + \
+					       0x00000758 + 0x4 * (n))
+#define HWIO_IPA_STAT_DROP_CNT_MASK_n_PHYS(n) (IPA_CFG_REG_BASE_PHYS + \
+					       0x00000758 + 0x4 * (n))
+#define HWIO_IPA_STAT_DROP_CNT_MASK_n_OFFS(n) (IPA_CFG_REG_BASE_OFFS + \
+					       0x00000758 + 0x4 * (n))
+#define HWIO_IPA_ENDP_INIT_CTRL_n_ADDR(n) (IPA_CFG_REG_BASE + 0x00000800 + \
+					   0x70 * (n))
+#define HWIO_IPA_ENDP_INIT_CTRL_n_PHYS(n) (IPA_CFG_REG_BASE_PHYS + \
+					   0x00000800 + 0x70 * (n))
+#define HWIO_IPA_ENDP_INIT_CTRL_n_OFFS(n) (IPA_CFG_REG_BASE_OFFS + \
+					   0x00000800 + 0x70 * (n))
+#define HWIO_IPA_ENDP_INIT_CTRL_n_RMSK 0x3
+#define HWIO_IPA_ENDP_INIT_CTRL_n_MAXn 30
+#define HWIO_IPA_ENDP_INIT_CTRL_n_ATTR 0x3
+#define HWIO_IPA_ENDP_INIT_CTRL_n_INI(n) in_dword_masked( \
+		HWIO_IPA_ENDP_INIT_CTRL_n_ADDR(n), \
+		HWIO_IPA_ENDP_INIT_CTRL_n_RMSK)
+#define HWIO_IPA_ENDP_INIT_CTRL_n_INMI(n, mask) in_dword_masked( \
+		HWIO_IPA_ENDP_INIT_CTRL_n_ADDR(n), \
+		mask)
+#define HWIO_IPA_ENDP_INIT_CTRL_n_OUTI(n, val) out_dword( \
+		HWIO_IPA_ENDP_INIT_CTRL_n_ADDR(n), \
+		val)
+#define HWIO_IPA_ENDP_INIT_CTRL_n_OUTMI(n, mask, val) out_dword_masked_ns( \
+		HWIO_IPA_ENDP_INIT_CTRL_n_ADDR(n), \
+		mask, \
+		val, \
+		HWIO_IPA_ENDP_INIT_CTRL_n_INI(n))
+#define HWIO_IPA_ENDP_INIT_CTRL_n_ENDP_DELAY_BMSK 0x2
+#define HWIO_IPA_ENDP_INIT_CTRL_n_ENDP_DELAY_SHFT 0x1
+#define HWIO_IPA_ENDP_INIT_CTRL_n_ENDP_SUSPEND_BMSK 0x1
+#define HWIO_IPA_ENDP_INIT_CTRL_n_ENDP_SUSPEND_SHFT 0x0
+#define HWIO_IPA_ENDP_INIT_CTRL_SCND_n_ADDR(n) (IPA_CFG_REG_BASE + \
+						0x00000804 + 0x70 * (n))
+#define HWIO_IPA_ENDP_INIT_CTRL_SCND_n_PHYS(n) (IPA_CFG_REG_BASE_PHYS +	\
+						0x00000804 + 0x70 * (n))
+#define HWIO_IPA_ENDP_INIT_CTRL_SCND_n_OFFS(n) (IPA_CFG_REG_BASE_OFFS +	\
+						0x00000804 + 0x70 * (n))
+#define HWIO_IPA_ENDP_INIT_CTRL_SCND_n_RMSK 0x2
+#define HWIO_IPA_ENDP_INIT_CTRL_SCND_n_MAXn 30
+#define HWIO_IPA_ENDP_INIT_CTRL_SCND_n_ATTR 0x3
+#define HWIO_IPA_ENDP_INIT_CTRL_SCND_n_INI(n) in_dword_masked( \
+		HWIO_IPA_ENDP_INIT_CTRL_SCND_n_ADDR(n),	\
+		HWIO_IPA_ENDP_INIT_CTRL_SCND_n_RMSK)
+#define HWIO_IPA_ENDP_INIT_CTRL_SCND_n_INMI(n, mask) in_dword_masked( \
+		HWIO_IPA_ENDP_INIT_CTRL_SCND_n_ADDR(n),	\
+		mask)
+#define HWIO_IPA_ENDP_INIT_CTRL_SCND_n_OUTI(n, val) out_dword( \
+		HWIO_IPA_ENDP_INIT_CTRL_SCND_n_ADDR(n),	\
+		val)
+#define HWIO_IPA_ENDP_INIT_CTRL_SCND_n_OUTMI(n, mask, \
+					     val) out_dword_masked_ns( \
+		HWIO_IPA_ENDP_INIT_CTRL_SCND_n_ADDR( \
+			n), \
+		mask, \
+		val, \
+		HWIO_IPA_ENDP_INIT_CTRL_SCND_n_INI(n))
+#define HWIO_IPA_ENDP_INIT_CTRL_SCND_n_ENDP_DELAY_BMSK 0x2
+#define HWIO_IPA_ENDP_INIT_CTRL_SCND_n_ENDP_DELAY_SHFT 0x1
+#define HWIO_IPA_ENDP_INIT_CFG_n_ADDR(n) (IPA_CFG_REG_BASE + 0x00000808 + \
+					  0x70 * (n))
+#define HWIO_IPA_ENDP_INIT_CFG_n_PHYS(n) (IPA_CFG_REG_BASE_PHYS + \
+					  0x00000808 + 0x70 * (n))
+#define HWIO_IPA_ENDP_INIT_CFG_n_OFFS(n) (IPA_CFG_REG_BASE_OFFS + \
+					  0x00000808 + 0x70 * (n))
+#define HWIO_IPA_ENDP_INIT_CFG_n_RMSK 0x17f
+#define HWIO_IPA_ENDP_INIT_CFG_n_MAXn 30
+#define HWIO_IPA_ENDP_INIT_CFG_n_ATTR 0x3
+#define HWIO_IPA_ENDP_INIT_CFG_n_INI(n) in_dword_masked( \
+		HWIO_IPA_ENDP_INIT_CFG_n_ADDR(n), \
+		HWIO_IPA_ENDP_INIT_CFG_n_RMSK)
+#define HWIO_IPA_ENDP_INIT_CFG_n_INMI(n, mask) in_dword_masked(	\
+		HWIO_IPA_ENDP_INIT_CFG_n_ADDR(n), \
+		mask)
+#define HWIO_IPA_ENDP_INIT_CFG_n_OUTI(n, val) out_dword( \
+		HWIO_IPA_ENDP_INIT_CFG_n_ADDR(n), \
+		val)
+#define HWIO_IPA_ENDP_INIT_CFG_n_OUTMI(n, mask, val) out_dword_masked_ns( \
+		HWIO_IPA_ENDP_INIT_CFG_n_ADDR(n), \
+		mask, \
+		val, \
+		HWIO_IPA_ENDP_INIT_CFG_n_INI(n))
+#define HWIO_IPA_ENDP_INIT_CFG_n_GEN_QMB_MASTER_SEL_BMSK 0x100
+#define HWIO_IPA_ENDP_INIT_CFG_n_GEN_QMB_MASTER_SEL_SHFT 0x8
+#define HWIO_IPA_ENDP_INIT_CFG_n_CS_METADATA_HDR_OFFSET_BMSK 0x78
+#define HWIO_IPA_ENDP_INIT_CFG_n_CS_METADATA_HDR_OFFSET_SHFT 0x3
+#define HWIO_IPA_ENDP_INIT_CFG_n_CS_OFFLOAD_EN_BMSK 0x6
+#define HWIO_IPA_ENDP_INIT_CFG_n_CS_OFFLOAD_EN_SHFT 0x1
+#define HWIO_IPA_ENDP_INIT_CFG_n_FRAG_OFFLOAD_EN_BMSK 0x1
+#define HWIO_IPA_ENDP_INIT_CFG_n_FRAG_OFFLOAD_EN_SHFT 0x0
+#define HWIO_IPA_ENDP_INIT_NAT_n_ADDR(n) (IPA_CFG_REG_BASE + 0x0000080c + \
+					  0x70 * (n))
+#define HWIO_IPA_ENDP_INIT_NAT_n_PHYS(n) (IPA_CFG_REG_BASE_PHYS + \
+					  0x0000080c + 0x70 * (n))
+#define HWIO_IPA_ENDP_INIT_NAT_n_OFFS(n) (IPA_CFG_REG_BASE_OFFS + \
+					  0x0000080c + 0x70 * (n))
+#define HWIO_IPA_ENDP_INIT_NAT_n_RMSK 0x3
+#define HWIO_IPA_ENDP_INIT_NAT_n_MAXn 12
+#define HWIO_IPA_ENDP_INIT_NAT_n_ATTR 0x3
+#define HWIO_IPA_ENDP_INIT_NAT_n_INI(n) in_dword_masked( \
+		HWIO_IPA_ENDP_INIT_NAT_n_ADDR(n), \
+		HWIO_IPA_ENDP_INIT_NAT_n_RMSK)
+#define HWIO_IPA_ENDP_INIT_NAT_n_INMI(n, mask) in_dword_masked(	\
+		HWIO_IPA_ENDP_INIT_NAT_n_ADDR(n), \
+		mask)
+#define HWIO_IPA_ENDP_INIT_NAT_n_OUTI(n, val) out_dword( \
+		HWIO_IPA_ENDP_INIT_NAT_n_ADDR(n), \
+		val)
+#define HWIO_IPA_ENDP_INIT_NAT_n_OUTMI(n, mask, val) out_dword_masked_ns( \
+		HWIO_IPA_ENDP_INIT_NAT_n_ADDR(n), \
+		mask, \
+		val, \
+		HWIO_IPA_ENDP_INIT_NAT_n_INI(n))
+#define HWIO_IPA_ENDP_INIT_NAT_n_NAT_EN_BMSK 0x3
+#define HWIO_IPA_ENDP_INIT_NAT_n_NAT_EN_SHFT 0x0
+#define HWIO_IPA_ENDP_INIT_HDR_n_ADDR(n) (IPA_CFG_REG_BASE + 0x00000810 + \
+					  0x70 * (n))
+#define HWIO_IPA_ENDP_INIT_HDR_n_PHYS(n) (IPA_CFG_REG_BASE_PHYS + \
+					  0x00000810 + 0x70 * (n))
+#define HWIO_IPA_ENDP_INIT_HDR_n_OFFS(n) (IPA_CFG_REG_BASE_OFFS + \
+					  0x00000810 + 0x70 * (n))
+#define HWIO_IPA_ENDP_INIT_HDR_n_RMSK 0xffffffff
+#define HWIO_IPA_ENDP_INIT_HDR_n_MAXn 30
+#define HWIO_IPA_ENDP_INIT_HDR_n_ATTR 0x3
+#define HWIO_IPA_ENDP_INIT_HDR_n_INI(n) in_dword_masked( \
+		HWIO_IPA_ENDP_INIT_HDR_n_ADDR(n), \
+		HWIO_IPA_ENDP_INIT_HDR_n_RMSK)
+#define HWIO_IPA_ENDP_INIT_HDR_n_INMI(n, mask) in_dword_masked(	\
+		HWIO_IPA_ENDP_INIT_HDR_n_ADDR(n), \
+		mask)
+#define HWIO_IPA_ENDP_INIT_HDR_n_OUTI(n, val) out_dword( \
+		HWIO_IPA_ENDP_INIT_HDR_n_ADDR(n), \
+		val)
+#define HWIO_IPA_ENDP_INIT_HDR_n_OUTMI(n, mask, val) out_dword_masked_ns( \
+		HWIO_IPA_ENDP_INIT_HDR_n_ADDR(n), \
+		mask, \
+		val, \
+		HWIO_IPA_ENDP_INIT_HDR_n_INI(n))
+#define HWIO_IPA_ENDP_INIT_HDR_n_HDR_OFST_METADATA_MSB_BMSK 0xc0000000
+#define HWIO_IPA_ENDP_INIT_HDR_n_HDR_OFST_METADATA_MSB_SHFT 0x1e
+#define HWIO_IPA_ENDP_INIT_HDR_n_HDR_LEN_MSB_BMSK 0x30000000
+#define HWIO_IPA_ENDP_INIT_HDR_n_HDR_LEN_MSB_SHFT 0x1c
+#define HWIO_IPA_ENDP_INIT_HDR_n_HDR_LEN_INC_DEAGG_HDR_BMSK 0x8000000
+#define HWIO_IPA_ENDP_INIT_HDR_n_HDR_LEN_INC_DEAGG_HDR_SHFT 0x1b
+#define HWIO_IPA_ENDP_INIT_HDR_n_HDR_A5_MUX_BMSK 0x4000000
+#define HWIO_IPA_ENDP_INIT_HDR_n_HDR_A5_MUX_SHFT 0x1a
+#define HWIO_IPA_ENDP_INIT_HDR_n_HDR_OFST_PKT_SIZE_BMSK 0x3f00000
+#define HWIO_IPA_ENDP_INIT_HDR_n_HDR_OFST_PKT_SIZE_SHFT 0x14
+#define HWIO_IPA_ENDP_INIT_HDR_n_HDR_OFST_PKT_SIZE_VALID_BMSK 0x80000
+#define HWIO_IPA_ENDP_INIT_HDR_n_HDR_OFST_PKT_SIZE_VALID_SHFT 0x13
+#define HWIO_IPA_ENDP_INIT_HDR_n_HDR_ADDITIONAL_CONST_LEN_BMSK 0x7e000
+#define HWIO_IPA_ENDP_INIT_HDR_n_HDR_ADDITIONAL_CONST_LEN_SHFT 0xd
+#define HWIO_IPA_ENDP_INIT_HDR_n_HDR_OFST_METADATA_BMSK 0x1f80
+#define HWIO_IPA_ENDP_INIT_HDR_n_HDR_OFST_METADATA_SHFT 0x7
+#define HWIO_IPA_ENDP_INIT_HDR_n_HDR_OFST_METADATA_VALID_BMSK 0x40
+#define HWIO_IPA_ENDP_INIT_HDR_n_HDR_OFST_METADATA_VALID_SHFT 0x6
+#define HWIO_IPA_ENDP_INIT_HDR_n_HDR_LEN_BMSK 0x3f
+#define HWIO_IPA_ENDP_INIT_HDR_n_HDR_LEN_SHFT 0x0
+#define HWIO_IPA_ENDP_INIT_HDR_EXT_n_ADDR(n) (IPA_CFG_REG_BASE + \
+					      0x00000814 + 0x70 * (n))
+#define HWIO_IPA_ENDP_INIT_HDR_EXT_n_PHYS(n) (IPA_CFG_REG_BASE_PHYS + \
+					      0x00000814 + 0x70 * (n))
+#define HWIO_IPA_ENDP_INIT_HDR_EXT_n_OFFS(n) (IPA_CFG_REG_BASE_OFFS + \
+					      0x00000814 + 0x70 * (n))
+#define HWIO_IPA_ENDP_INIT_HDR_EXT_n_RMSK 0x3f3fff
+#define HWIO_IPA_ENDP_INIT_HDR_EXT_n_MAXn 30
+#define HWIO_IPA_ENDP_INIT_HDR_EXT_n_ATTR 0x3
+#define HWIO_IPA_ENDP_INIT_HDR_EXT_n_INI(n) in_dword_masked( \
+		HWIO_IPA_ENDP_INIT_HDR_EXT_n_ADDR(n), \
+		HWIO_IPA_ENDP_INIT_HDR_EXT_n_RMSK)
+#define HWIO_IPA_ENDP_INIT_HDR_EXT_n_INMI(n, mask) in_dword_masked( \
+		HWIO_IPA_ENDP_INIT_HDR_EXT_n_ADDR(n), \
+		mask)
+#define HWIO_IPA_ENDP_INIT_HDR_EXT_n_OUTI(n, val) out_dword( \
+		HWIO_IPA_ENDP_INIT_HDR_EXT_n_ADDR(n), \
+		val)
+#define HWIO_IPA_ENDP_INIT_HDR_EXT_n_OUTMI(n, mask, \
+					   val) out_dword_masked_ns( \
+		HWIO_IPA_ENDP_INIT_HDR_EXT_n_ADDR( \
+			n), \
+		mask, \
+		val, \
+		HWIO_IPA_ENDP_INIT_HDR_EXT_n_INI(n))
+#define HWIO_IPA_ENDP_INIT_HDR_EXT_n_HDR_ADDITIONAL_CONST_LEN_MSB_BMSK \
+	0x300000
+#define HWIO_IPA_ENDP_INIT_HDR_EXT_n_HDR_ADDITIONAL_CONST_LEN_MSB_SHFT \
+	0x14
+#define HWIO_IPA_ENDP_INIT_HDR_EXT_n_HDR_OFST_PKT_SIZE_MSB_BMSK 0xc0000
+#define HWIO_IPA_ENDP_INIT_HDR_EXT_n_HDR_OFST_PKT_SIZE_MSB_SHFT 0x12
+#define HWIO_IPA_ENDP_INIT_HDR_EXT_n_HDR_TOTAL_LEN_OR_PAD_OFFSET_MSB_BMSK \
+	0x30000
+#define HWIO_IPA_ENDP_INIT_HDR_EXT_n_HDR_TOTAL_LEN_OR_PAD_OFFSET_MSB_SHFT \
+	0x10
+#define HWIO_IPA_ENDP_INIT_HDR_EXT_n_HDR_PAD_TO_ALIGNMENT_BMSK 0x3c00
+#define HWIO_IPA_ENDP_INIT_HDR_EXT_n_HDR_PAD_TO_ALIGNMENT_SHFT 0xa
+#define HWIO_IPA_ENDP_INIT_HDR_EXT_n_HDR_TOTAL_LEN_OR_PAD_OFFSET_BMSK \
+	0x3f0
+#define HWIO_IPA_ENDP_INIT_HDR_EXT_n_HDR_TOTAL_LEN_OR_PAD_OFFSET_SHFT 0x4
+#define HWIO_IPA_ENDP_INIT_HDR_EXT_n_HDR_PAYLOAD_LEN_INC_PADDING_BMSK 0x8
+#define HWIO_IPA_ENDP_INIT_HDR_EXT_n_HDR_PAYLOAD_LEN_INC_PADDING_SHFT 0x3
+#define HWIO_IPA_ENDP_INIT_HDR_EXT_n_HDR_TOTAL_LEN_OR_PAD_BMSK 0x4
+#define HWIO_IPA_ENDP_INIT_HDR_EXT_n_HDR_TOTAL_LEN_OR_PAD_SHFT 0x2
+#define HWIO_IPA_ENDP_INIT_HDR_EXT_n_HDR_TOTAL_LEN_OR_PAD_VALID_BMSK 0x2
+#define HWIO_IPA_ENDP_INIT_HDR_EXT_n_HDR_TOTAL_LEN_OR_PAD_VALID_SHFT 0x1
+#define HWIO_IPA_ENDP_INIT_HDR_METADATA_MASK_n_ADDR(n) (IPA_CFG_REG_BASE + \
+							0x00000818 + \
+							0x70 * (n))
+#define HWIO_IPA_ENDP_INIT_HDR_METADATA_MASK_n_PHYS(n) ( \
+		IPA_CFG_REG_BASE_PHYS + 0x00000818 + 0x70 * (n))
+#define HWIO_IPA_ENDP_INIT_HDR_METADATA_MASK_n_OFFS(n) ( \
+		IPA_CFG_REG_BASE_OFFS + 0x00000818 + 0x70 * (n))
+#define HWIO_IPA_ENDP_INIT_HDR_METADATA_MASK_n_RMSK 0xffffffff
+#define HWIO_IPA_ENDP_INIT_HDR_METADATA_MASK_n_MAXn 30
+#define HWIO_IPA_ENDP_INIT_HDR_METADATA_MASK_n_ATTR 0x3
+#define HWIO_IPA_ENDP_INIT_HDR_METADATA_MASK_n_INI(n) in_dword_masked( \
+		HWIO_IPA_ENDP_INIT_HDR_METADATA_MASK_n_ADDR(n),	\
+		HWIO_IPA_ENDP_INIT_HDR_METADATA_MASK_n_RMSK)
+#define HWIO_IPA_ENDP_INIT_HDR_METADATA_MASK_n_INMI(n, \
+						    mask) in_dword_masked( \
+		HWIO_IPA_ENDP_INIT_HDR_METADATA_MASK_n_ADDR( \
+			n), \
+		mask)
+#define HWIO_IPA_ENDP_INIT_HDR_METADATA_MASK_n_OUTI(n, val) out_dword( \
+		HWIO_IPA_ENDP_INIT_HDR_METADATA_MASK_n_ADDR(n),	\
+		val)
+#define HWIO_IPA_ENDP_INIT_HDR_METADATA_MASK_n_OUTMI(n, mask, \
+						     val) \
+	out_dword_masked_ns( \
+		HWIO_IPA_ENDP_INIT_HDR_METADATA_MASK_n_ADDR(n),	\
+		mask, \
+		val, \
+		HWIO_IPA_ENDP_INIT_HDR_METADATA_MASK_n_INI(n))
+#define HWIO_IPA_ENDP_INIT_HDR_METADATA_MASK_n_METADATA_MASK_BMSK \
+	0xffffffff
+#define HWIO_IPA_ENDP_INIT_HDR_METADATA_MASK_n_METADATA_MASK_SHFT 0x0
+#define HWIO_IPA_ENDP_INIT_HDR_METADATA_n_ADDR(n) (IPA_CFG_REG_BASE + \
+						   0x0000081c + 0x70 * (n))
+#define HWIO_IPA_ENDP_INIT_HDR_METADATA_n_PHYS(n) (IPA_CFG_REG_BASE_PHYS + \
+						   0x0000081c + 0x70 * (n))
+#define HWIO_IPA_ENDP_INIT_HDR_METADATA_n_OFFS(n) (IPA_CFG_REG_BASE_OFFS + \
+						   0x0000081c + 0x70 * (n))
+#define HWIO_IPA_ENDP_INIT_HDR_METADATA_n_RMSK 0xffffffff
+#define HWIO_IPA_ENDP_INIT_HDR_METADATA_n_MAXn 12
+#define HWIO_IPA_ENDP_INIT_HDR_METADATA_n_ATTR 0x3
+#define HWIO_IPA_ENDP_INIT_HDR_METADATA_n_INI(n) in_dword_masked( \
+		HWIO_IPA_ENDP_INIT_HDR_METADATA_n_ADDR(n), \
+		HWIO_IPA_ENDP_INIT_HDR_METADATA_n_RMSK)
+#define HWIO_IPA_ENDP_INIT_HDR_METADATA_n_INMI(n, mask) in_dword_masked( \
+		HWIO_IPA_ENDP_INIT_HDR_METADATA_n_ADDR(n), \
+		mask)
+#define HWIO_IPA_ENDP_INIT_HDR_METADATA_n_OUTI(n, val) out_dword( \
+		HWIO_IPA_ENDP_INIT_HDR_METADATA_n_ADDR(n), \
+		val)
+#define HWIO_IPA_ENDP_INIT_HDR_METADATA_n_OUTMI(n, mask, \
+						val) out_dword_masked_ns( \
+		HWIO_IPA_ENDP_INIT_HDR_METADATA_n_ADDR(	\
+			n), \
+		mask, \
+		val, \
+		HWIO_IPA_ENDP_INIT_HDR_METADATA_n_INI(n))
+#define HWIO_IPA_ENDP_INIT_HDR_METADATA_n_METADATA_BMSK 0xffffffff
+#define HWIO_IPA_ENDP_INIT_HDR_METADATA_n_METADATA_SHFT 0x0
+#define HWIO_IPA_ENDP_INIT_MODE_n_ADDR(n) (IPA_CFG_REG_BASE + 0x00000820 + \
+					   0x70 * (n))
+#define HWIO_IPA_ENDP_INIT_MODE_n_PHYS(n) (IPA_CFG_REG_BASE_PHYS + \
+					   0x00000820 + 0x70 * (n))
+#define HWIO_IPA_ENDP_INIT_MODE_n_OFFS(n) (IPA_CFG_REG_BASE_OFFS + \
+					   0x00000820 + 0x70 * (n))
+#define HWIO_IPA_ENDP_INIT_MODE_n_RMSK 0x3ffff1ff
+#define HWIO_IPA_ENDP_INIT_MODE_n_MAXn 12
+#define HWIO_IPA_ENDP_INIT_MODE_n_ATTR 0x3
+#define HWIO_IPA_ENDP_INIT_MODE_n_INI(n) in_dword_masked( \
+		HWIO_IPA_ENDP_INIT_MODE_n_ADDR(n), \
+		HWIO_IPA_ENDP_INIT_MODE_n_RMSK)
+#define HWIO_IPA_ENDP_INIT_MODE_n_INMI(n, mask) in_dword_masked( \
+		HWIO_IPA_ENDP_INIT_MODE_n_ADDR(n), \
+		mask)
+#define HWIO_IPA_ENDP_INIT_MODE_n_OUTI(n, val) out_dword( \
+		HWIO_IPA_ENDP_INIT_MODE_n_ADDR(n), \
+		val)
+#define HWIO_IPA_ENDP_INIT_MODE_n_OUTMI(n, mask, val) out_dword_masked_ns( \
+		HWIO_IPA_ENDP_INIT_MODE_n_ADDR(n), \
+		mask, \
+		val, \
+		HWIO_IPA_ENDP_INIT_MODE_n_INI(n))
+#define HWIO_IPA_ENDP_INIT_MODE_n_PAD_EN_BMSK 0x20000000
+#define HWIO_IPA_ENDP_INIT_MODE_n_PAD_EN_SHFT 0x1d
+#define HWIO_IPA_ENDP_INIT_MODE_n_PIPE_REPLICATE_EN_BMSK 0x10000000
+#define HWIO_IPA_ENDP_INIT_MODE_n_PIPE_REPLICATE_EN_SHFT 0x1c
+#define HWIO_IPA_ENDP_INIT_MODE_n_BYTE_THRESHOLD_BMSK 0xffff000
+#define HWIO_IPA_ENDP_INIT_MODE_n_BYTE_THRESHOLD_SHFT 0xc
+#define HWIO_IPA_ENDP_INIT_MODE_n_DEST_PIPE_INDEX_BMSK 0x1f0
+#define HWIO_IPA_ENDP_INIT_MODE_n_DEST_PIPE_INDEX_SHFT 0x4
+#define HWIO_IPA_ENDP_INIT_MODE_n_DCPH_ENABLE_BMSK 0x8
+#define HWIO_IPA_ENDP_INIT_MODE_n_DCPH_ENABLE_SHFT 0x3
+#define HWIO_IPA_ENDP_INIT_MODE_n_MODE_BMSK 0x7
+#define HWIO_IPA_ENDP_INIT_MODE_n_MODE_SHFT 0x0
+#define HWIO_IPA_ENDP_INIT_AGGR_n_ADDR(n) (IPA_CFG_REG_BASE + 0x00000824 + \
+					   0x70 * (n))
+#define HWIO_IPA_ENDP_INIT_AGGR_n_PHYS(n) (IPA_CFG_REG_BASE_PHYS + \
+					   0x00000824 + 0x70 * (n))
+#define HWIO_IPA_ENDP_INIT_AGGR_n_OFFS(n) (IPA_CFG_REG_BASE_OFFS + \
+					   0x00000824 + 0x70 * (n))
+#define HWIO_IPA_ENDP_INIT_AGGR_n_RMSK 0xdfff7ff
+#define HWIO_IPA_ENDP_INIT_AGGR_n_MAXn 30
+#define HWIO_IPA_ENDP_INIT_AGGR_n_ATTR 0x3
+#define HWIO_IPA_ENDP_INIT_AGGR_n_INI(n) in_dword_masked( \
+		HWIO_IPA_ENDP_INIT_AGGR_n_ADDR(n), \
+		HWIO_IPA_ENDP_INIT_AGGR_n_RMSK)
+#define HWIO_IPA_ENDP_INIT_AGGR_n_INMI(n, mask) in_dword_masked( \
+		HWIO_IPA_ENDP_INIT_AGGR_n_ADDR(n), \
+		mask)
+#define HWIO_IPA_ENDP_INIT_AGGR_n_OUTI(n, val) out_dword( \
+		HWIO_IPA_ENDP_INIT_AGGR_n_ADDR(n), \
+		val)
+#define HWIO_IPA_ENDP_INIT_AGGR_n_OUTMI(n, mask, val) out_dword_masked_ns( \
+		HWIO_IPA_ENDP_INIT_AGGR_n_ADDR(n), \
+		mask, \
+		val, \
+		HWIO_IPA_ENDP_INIT_AGGR_n_INI(n))
+#define HWIO_IPA_ENDP_INIT_AGGR_n_AGGR_GRAN_SEL_BMSK 0x8000000
+#define HWIO_IPA_ENDP_INIT_AGGR_n_AGGR_GRAN_SEL_SHFT 0x1b
+#define HWIO_IPA_ENDP_INIT_AGGR_n_AGGR_HARD_BYTE_LIMIT_ENABLE_BMSK \
+	0x4000000
+#define HWIO_IPA_ENDP_INIT_AGGR_n_AGGR_HARD_BYTE_LIMIT_ENABLE_SHFT 0x1a
+#define HWIO_IPA_ENDP_INIT_AGGR_n_AGGR_FORCE_CLOSE_BMSK 0x1000000
+#define HWIO_IPA_ENDP_INIT_AGGR_n_AGGR_FORCE_CLOSE_SHFT 0x18
+#define HWIO_IPA_ENDP_INIT_AGGR_n_AGGR_SW_EOF_ACTIVE_BMSK 0x800000
+#define HWIO_IPA_ENDP_INIT_AGGR_n_AGGR_SW_EOF_ACTIVE_SHFT 0x17
+#define HWIO_IPA_ENDP_INIT_AGGR_n_AGGR_PKT_LIMIT_BMSK 0x7e0000
+#define HWIO_IPA_ENDP_INIT_AGGR_n_AGGR_PKT_LIMIT_SHFT 0x11
+#define HWIO_IPA_ENDP_INIT_AGGR_n_AGGR_TIME_LIMIT_BMSK 0x1f000
+#define HWIO_IPA_ENDP_INIT_AGGR_n_AGGR_TIME_LIMIT_SHFT 0xc
+#define HWIO_IPA_ENDP_INIT_AGGR_n_AGGR_BYTE_LIMIT_BMSK 0x7e0
+#define HWIO_IPA_ENDP_INIT_AGGR_n_AGGR_BYTE_LIMIT_SHFT 0x5
+#define HWIO_IPA_ENDP_INIT_AGGR_n_AGGR_TYPE_BMSK 0x1c
+#define HWIO_IPA_ENDP_INIT_AGGR_n_AGGR_TYPE_SHFT 0x2
+#define HWIO_IPA_ENDP_INIT_AGGR_n_AGGR_EN_BMSK 0x3
+#define HWIO_IPA_ENDP_INIT_AGGR_n_AGGR_EN_SHFT 0x0
+#define HWIO_IPA_ENDP_INIT_HOL_BLOCK_EN_n_ADDR(n) (IPA_CFG_REG_BASE + \
+						   0x0000082c + 0x70 * (n))
+#define HWIO_IPA_ENDP_INIT_HOL_BLOCK_EN_n_PHYS(n) (IPA_CFG_REG_BASE_PHYS + \
+						   0x0000082c + 0x70 * (n))
+#define HWIO_IPA_ENDP_INIT_HOL_BLOCK_EN_n_OFFS(n) (IPA_CFG_REG_BASE_OFFS + \
+						   0x0000082c + 0x70 * (n))
+#define HWIO_IPA_ENDP_INIT_HOL_BLOCK_EN_n_RMSK 0x1
+#define HWIO_IPA_ENDP_INIT_HOL_BLOCK_EN_n_MAXn 30
+#define HWIO_IPA_ENDP_INIT_HOL_BLOCK_EN_n_ATTR 0x3
+#define HWIO_IPA_ENDP_INIT_HOL_BLOCK_EN_n_INI(n) in_dword_masked( \
+		HWIO_IPA_ENDP_INIT_HOL_BLOCK_EN_n_ADDR(n), \
+		HWIO_IPA_ENDP_INIT_HOL_BLOCK_EN_n_RMSK)
+#define HWIO_IPA_ENDP_INIT_HOL_BLOCK_EN_n_INMI(n, mask) in_dword_masked( \
+		HWIO_IPA_ENDP_INIT_HOL_BLOCK_EN_n_ADDR(n), \
+		mask)
+#define HWIO_IPA_ENDP_INIT_HOL_BLOCK_EN_n_OUTI(n, val) out_dword( \
+		HWIO_IPA_ENDP_INIT_HOL_BLOCK_EN_n_ADDR(n), \
+		val)
+#define HWIO_IPA_ENDP_INIT_HOL_BLOCK_EN_n_OUTMI(n, mask, \
+						val) out_dword_masked_ns( \
+		HWIO_IPA_ENDP_INIT_HOL_BLOCK_EN_n_ADDR(	\
+			n), \
+		mask, \
+		val, \
+		HWIO_IPA_ENDP_INIT_HOL_BLOCK_EN_n_INI(n))
+#define HWIO_IPA_ENDP_INIT_HOL_BLOCK_EN_n_EN_BMSK 0x1
+#define HWIO_IPA_ENDP_INIT_HOL_BLOCK_EN_n_EN_SHFT 0x0
+#define HWIO_IPA_ENDP_INIT_HOL_BLOCK_TIMER_n_ADDR(n) (IPA_CFG_REG_BASE + \
+						      0x00000830 + 0x70 * \
+						      (n))
+#define HWIO_IPA_ENDP_INIT_HOL_BLOCK_TIMER_n_PHYS(n) ( \
+		IPA_CFG_REG_BASE_PHYS + 0x00000830 + 0x70 * (n))
+#define HWIO_IPA_ENDP_INIT_HOL_BLOCK_TIMER_n_OFFS(n) ( \
+		IPA_CFG_REG_BASE_OFFS + 0x00000830 + 0x70 * (n))
+#define HWIO_IPA_ENDP_INIT_HOL_BLOCK_TIMER_n_RMSK 0x11f
+#define HWIO_IPA_ENDP_INIT_HOL_BLOCK_TIMER_n_MAXn 30
+#define HWIO_IPA_ENDP_INIT_HOL_BLOCK_TIMER_n_ATTR 0x3
+#define HWIO_IPA_ENDP_INIT_HOL_BLOCK_TIMER_n_INI(n) in_dword_masked( \
+		HWIO_IPA_ENDP_INIT_HOL_BLOCK_TIMER_n_ADDR(n), \
+		HWIO_IPA_ENDP_INIT_HOL_BLOCK_TIMER_n_RMSK)
+#define HWIO_IPA_ENDP_INIT_HOL_BLOCK_TIMER_n_INMI(n, mask) in_dword_masked( \
+		HWIO_IPA_ENDP_INIT_HOL_BLOCK_TIMER_n_ADDR(n), \
+		mask)
+#define HWIO_IPA_ENDP_INIT_HOL_BLOCK_TIMER_n_OUTI(n, val) out_dword( \
+		HWIO_IPA_ENDP_INIT_HOL_BLOCK_TIMER_n_ADDR(n), \
+		val)
+#define HWIO_IPA_ENDP_INIT_HOL_BLOCK_TIMER_n_OUTMI(n, mask, \
+						   val)	\
+	out_dword_masked_ns(HWIO_IPA_ENDP_INIT_HOL_BLOCK_TIMER_n_ADDR( \
+				    n),	\
+			    mask, val, \
+			    HWIO_IPA_ENDP_INIT_HOL_BLOCK_TIMER_n_INI(n))
+#define HWIO_IPA_ENDP_INIT_HOL_BLOCK_TIMER_n_GRAN_SEL_BMSK 0x100
+#define HWIO_IPA_ENDP_INIT_HOL_BLOCK_TIMER_n_GRAN_SEL_SHFT 0x8
+#define HWIO_IPA_ENDP_INIT_HOL_BLOCK_TIMER_n_TIME_LIMIT_BMSK 0x1f
+#define HWIO_IPA_ENDP_INIT_HOL_BLOCK_TIMER_n_TIME_LIMIT_SHFT 0x0
+#define HWIO_IPA_ENDP_INIT_DEAGGR_n_ADDR(n) (IPA_CFG_REG_BASE +	\
+					     0x00000834 + 0x70 * (n))
+#define HWIO_IPA_ENDP_INIT_DEAGGR_n_PHYS(n) (IPA_CFG_REG_BASE_PHYS + \
+					     0x00000834 + 0x70 * (n))
+#define HWIO_IPA_ENDP_INIT_DEAGGR_n_OFFS(n) (IPA_CFG_REG_BASE_OFFS + \
+					     0x00000834 + 0x70 * (n))
+#define HWIO_IPA_ENDP_INIT_DEAGGR_n_RMSK 0xffff7fff
+#define HWIO_IPA_ENDP_INIT_DEAGGR_n_MAXn 12
+#define HWIO_IPA_ENDP_INIT_DEAGGR_n_ATTR 0x3
+#define HWIO_IPA_ENDP_INIT_DEAGGR_n_INI(n) in_dword_masked( \
+		HWIO_IPA_ENDP_INIT_DEAGGR_n_ADDR(n), \
+		HWIO_IPA_ENDP_INIT_DEAGGR_n_RMSK)
+#define HWIO_IPA_ENDP_INIT_DEAGGR_n_INMI(n, mask) in_dword_masked( \
+		HWIO_IPA_ENDP_INIT_DEAGGR_n_ADDR(n), \
+		mask)
+#define HWIO_IPA_ENDP_INIT_DEAGGR_n_OUTI(n, val) out_dword( \
+		HWIO_IPA_ENDP_INIT_DEAGGR_n_ADDR(n), \
+		val)
+#define HWIO_IPA_ENDP_INIT_DEAGGR_n_OUTMI(n, mask, \
+					  val) out_dword_masked_ns( \
+		HWIO_IPA_ENDP_INIT_DEAGGR_n_ADDR( \
+			n), \
+		mask, \
+		val, \
+		HWIO_IPA_ENDP_INIT_DEAGGR_n_INI(n))
+#define HWIO_IPA_ENDP_INIT_DEAGGR_n_MAX_PACKET_LEN_BMSK 0xffff0000
+#define HWIO_IPA_ENDP_INIT_DEAGGR_n_MAX_PACKET_LEN_SHFT 0x10
+#define HWIO_IPA_ENDP_INIT_DEAGGR_n_IGNORE_MIN_PKT_ERR_BMSK 0x4000
+#define HWIO_IPA_ENDP_INIT_DEAGGR_n_IGNORE_MIN_PKT_ERR_SHFT 0xe
+#define HWIO_IPA_ENDP_INIT_DEAGGR_n_PACKET_OFFSET_LOCATION_BMSK 0x3f00
+#define HWIO_IPA_ENDP_INIT_DEAGGR_n_PACKET_OFFSET_LOCATION_SHFT 0x8
+#define HWIO_IPA_ENDP_INIT_DEAGGR_n_PACKET_OFFSET_VALID_BMSK 0x80
+#define HWIO_IPA_ENDP_INIT_DEAGGR_n_PACKET_OFFSET_VALID_SHFT 0x7
+#define HWIO_IPA_ENDP_INIT_DEAGGR_n_SYSPIPE_ERR_DETECTION_BMSK 0x40
+#define HWIO_IPA_ENDP_INIT_DEAGGR_n_SYSPIPE_ERR_DETECTION_SHFT 0x6
+#define HWIO_IPA_ENDP_INIT_DEAGGR_n_DEAGGR_HDR_LEN_BMSK 0x3f
+#define HWIO_IPA_ENDP_INIT_DEAGGR_n_DEAGGR_HDR_LEN_SHFT 0x0
+#define HWIO_IPA_ENDP_INIT_RSRC_GRP_n_ADDR(n) (IPA_CFG_REG_BASE + \
+					       0x00000838 + 0x70 * (n))
+#define HWIO_IPA_ENDP_INIT_RSRC_GRP_n_PHYS(n) (IPA_CFG_REG_BASE_PHYS + \
+					       0x00000838 + 0x70 * (n))
+#define HWIO_IPA_ENDP_INIT_RSRC_GRP_n_OFFS(n) (IPA_CFG_REG_BASE_OFFS + \
+					       0x00000838 + 0x70 * (n))
+#define HWIO_IPA_ENDP_INIT_RSRC_GRP_n_RMSK 0x7
+#define HWIO_IPA_ENDP_INIT_RSRC_GRP_n_MAXn 30
+#define HWIO_IPA_ENDP_INIT_RSRC_GRP_n_ATTR 0x3
+#define HWIO_IPA_ENDP_INIT_RSRC_GRP_n_INI(n) in_dword_masked( \
+		HWIO_IPA_ENDP_INIT_RSRC_GRP_n_ADDR(n), \
+		HWIO_IPA_ENDP_INIT_RSRC_GRP_n_RMSK)
+#define HWIO_IPA_ENDP_INIT_RSRC_GRP_n_INMI(n, mask) in_dword_masked( \
+		HWIO_IPA_ENDP_INIT_RSRC_GRP_n_ADDR(n), \
+		mask)
+#define HWIO_IPA_ENDP_INIT_RSRC_GRP_n_OUTI(n, val) out_dword( \
+		HWIO_IPA_ENDP_INIT_RSRC_GRP_n_ADDR(n), \
+		val)
+#define HWIO_IPA_ENDP_INIT_RSRC_GRP_n_OUTMI(n, mask, \
+					    val) out_dword_masked_ns( \
+		HWIO_IPA_ENDP_INIT_RSRC_GRP_n_ADDR( \
+			n), \
+		mask, \
+		val, \
+		HWIO_IPA_ENDP_INIT_RSRC_GRP_n_INI(n))
+#define HWIO_IPA_ENDP_INIT_RSRC_GRP_n_RSRC_GRP_BMSK 0x7
+#define HWIO_IPA_ENDP_INIT_RSRC_GRP_n_RSRC_GRP_SHFT 0x0
+#define HWIO_IPA_ENDP_INIT_SEQ_n_ADDR(n) (IPA_CFG_REG_BASE + 0x0000083c + \
+					  0x70 * (n))
+#define HWIO_IPA_ENDP_INIT_SEQ_n_PHYS(n) (IPA_CFG_REG_BASE_PHYS + \
+					  0x0000083c + 0x70 * (n))
+#define HWIO_IPA_ENDP_INIT_SEQ_n_OFFS(n) (IPA_CFG_REG_BASE_OFFS + \
+					  0x0000083c + 0x70 * (n))
+#define HWIO_IPA_ENDP_INIT_SEQ_n_RMSK 0xffff
+#define HWIO_IPA_ENDP_INIT_SEQ_n_MAXn 12
+#define HWIO_IPA_ENDP_INIT_SEQ_n_ATTR 0x3
+#define HWIO_IPA_ENDP_INIT_SEQ_n_INI(n) in_dword_masked( \
+		HWIO_IPA_ENDP_INIT_SEQ_n_ADDR(n), \
+		HWIO_IPA_ENDP_INIT_SEQ_n_RMSK)
+#define HWIO_IPA_ENDP_INIT_SEQ_n_INMI(n, mask) in_dword_masked(	\
+		HWIO_IPA_ENDP_INIT_SEQ_n_ADDR(n), \
+		mask)
+#define HWIO_IPA_ENDP_INIT_SEQ_n_OUTI(n, val) out_dword( \
+		HWIO_IPA_ENDP_INIT_SEQ_n_ADDR(n), \
+		val)
+#define HWIO_IPA_ENDP_INIT_SEQ_n_OUTMI(n, mask, val) out_dword_masked_ns( \
+		HWIO_IPA_ENDP_INIT_SEQ_n_ADDR(n), \
+		mask, \
+		val, \
+		HWIO_IPA_ENDP_INIT_SEQ_n_INI(n))
+#define HWIO_IPA_ENDP_INIT_SEQ_n_DPS_REP_SEQ_TYPE_BMSK 0xf000
+#define HWIO_IPA_ENDP_INIT_SEQ_n_DPS_REP_SEQ_TYPE_SHFT 0xc
+#define HWIO_IPA_ENDP_INIT_SEQ_n_HPS_REP_SEQ_TYPE_BMSK 0xf00
+#define HWIO_IPA_ENDP_INIT_SEQ_n_HPS_REP_SEQ_TYPE_SHFT 0x8
+#define HWIO_IPA_ENDP_INIT_SEQ_n_DPS_SEQ_TYPE_BMSK 0xf0
+#define HWIO_IPA_ENDP_INIT_SEQ_n_DPS_SEQ_TYPE_SHFT 0x4
+#define HWIO_IPA_ENDP_INIT_SEQ_n_HPS_SEQ_TYPE_BMSK 0xf
+#define HWIO_IPA_ENDP_INIT_SEQ_n_HPS_SEQ_TYPE_SHFT 0x0
+#define HWIO_IPA_ENDP_STATUS_n_ADDR(n) (IPA_CFG_REG_BASE + 0x00000840 +	\
+					0x70 * (n))
+#define HWIO_IPA_ENDP_STATUS_n_PHYS(n) (IPA_CFG_REG_BASE_PHYS +	\
+					0x00000840 + 0x70 * (n))
+#define HWIO_IPA_ENDP_STATUS_n_OFFS(n) (IPA_CFG_REG_BASE_OFFS +	\
+					0x00000840 + 0x70 * (n))
+#define HWIO_IPA_ENDP_STATUS_n_RMSK 0x23f
+#define HWIO_IPA_ENDP_STATUS_n_MAXn 30
+#define HWIO_IPA_ENDP_STATUS_n_ATTR 0x3
+#define HWIO_IPA_ENDP_STATUS_n_INI(n) in_dword_masked( \
+		HWIO_IPA_ENDP_STATUS_n_ADDR(n),	\
+		HWIO_IPA_ENDP_STATUS_n_RMSK)
+#define HWIO_IPA_ENDP_STATUS_n_INMI(n, mask) in_dword_masked( \
+		HWIO_IPA_ENDP_STATUS_n_ADDR(n),	\
+		mask)
+#define HWIO_IPA_ENDP_STATUS_n_OUTI(n, val) out_dword( \
+		HWIO_IPA_ENDP_STATUS_n_ADDR(n),	\
+		val)
+#define HWIO_IPA_ENDP_STATUS_n_OUTMI(n, mask, val) out_dword_masked_ns(	\
+		HWIO_IPA_ENDP_STATUS_n_ADDR(n),	\
+		mask, \
+		val, \
+		HWIO_IPA_ENDP_STATUS_n_INI(n))
+#define HWIO_IPA_ENDP_STATUS_n_STATUS_ENDP_BMSK 0x3e
+#define HWIO_IPA_ENDP_STATUS_n_STATUS_ENDP_SHFT 0x1
+#define HWIO_IPA_ENDP_STATUS_n_STATUS_EN_BMSK 0x1
+#define HWIO_IPA_ENDP_STATUS_n_STATUS_EN_SHFT 0x0
+#define HWIO_IPA_ENDP_SRC_ID_WRITE_n_ADDR(n) (IPA_CFG_REG_BASE + \
+					      0x00000848 + 0x70 * (n))
+#define HWIO_IPA_ENDP_SRC_ID_WRITE_n_PHYS(n) (IPA_CFG_REG_BASE_PHYS + \
+					      0x00000848 + 0x70 * (n))
+#define HWIO_IPA_ENDP_SRC_ID_WRITE_n_OFFS(n) (IPA_CFG_REG_BASE_OFFS + \
+					      0x00000848 + 0x70 * (n))
+#define HWIO_IPA_ENDP_SRC_ID_READ_n_ADDR(n) (IPA_CFG_REG_BASE +	\
+					     0x0000084c + 0x70 * (n))
+#define HWIO_IPA_ENDP_SRC_ID_READ_n_PHYS(n) (IPA_CFG_REG_BASE_PHYS + \
+					     0x0000084c + 0x70 * (n))
+#define HWIO_IPA_ENDP_SRC_ID_READ_n_OFFS(n) (IPA_CFG_REG_BASE_OFFS + \
+					     0x0000084c + 0x70 * (n))
+#define HWIO_IPA_ENDP_INIT_CONN_TRACK_n_ADDR(n) (IPA_CFG_REG_BASE + \
+						 0x00000850 + 0x70 * (n))
+#define HWIO_IPA_ENDP_INIT_CONN_TRACK_n_PHYS(n) (IPA_CFG_REG_BASE_PHYS + \
+						 0x00000850 + 0x70 * (n))
+#define HWIO_IPA_ENDP_INIT_CONN_TRACK_n_OFFS(n) (IPA_CFG_REG_BASE_OFFS + \
+						 0x00000850 + 0x70 * (n))
+#define HWIO_IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_ADDR(n) (IPA_CFG_REG_BASE + \
+						       0x0000085c + 0x70 * \
+						       (n))
+#define HWIO_IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_PHYS(n) (	\
+		IPA_CFG_REG_BASE_PHYS + 0x0000085c + 0x70 * (n))
+#define HWIO_IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_OFFS(n) (	\
+		IPA_CFG_REG_BASE_OFFS + 0x0000085c + 0x70 * (n))
+#define HWIO_IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_RMSK 0x7f007f
+#define HWIO_IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_MAXn 31
+#define HWIO_IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_ATTR 0x3
+#define HWIO_IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_INI(n) in_dword_masked( \
+		HWIO_IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_ADDR(n), \
+		HWIO_IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_RMSK)
+#define HWIO_IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_INMI(n, \
+						   mask) in_dword_masked( \
+		HWIO_IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_ADDR( \
+			n), \
+		mask)
+#define HWIO_IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_OUTI(n, val) out_dword( \
+		HWIO_IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_ADDR(n), \
+		val)
+#define HWIO_IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_OUTMI(n, mask, \
+						    val) \
+	out_dword_masked_ns(HWIO_IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_ADDR(	\
+				    n),	\
+			    mask, val, \
+			    HWIO_IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_INI(n))
+#define	\
+	HWIO_IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_ROUTER_HASH_MSK_METADATA_BMSK \
+	0x400000
+#define	\
+	HWIO_IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_ROUTER_HASH_MSK_METADATA_SHFT \
+	0x16
+#define	\
+	HWIO_IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_ROUTER_HASH_MSK_PROTOCOL_BMSK \
+	0x200000
+#define	\
+	HWIO_IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_ROUTER_HASH_MSK_PROTOCOL_SHFT \
+	0x15
+#define	\
+	HWIO_IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_ROUTER_HASH_MSK_DST_PORT_BMSK \
+	0x100000
+#define	\
+	HWIO_IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_ROUTER_HASH_MSK_DST_PORT_SHFT \
+	0x14
+#define	\
+	HWIO_IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_ROUTER_HASH_MSK_SRC_PORT_BMSK \
+	0x80000
+#define	\
+	HWIO_IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_ROUTER_HASH_MSK_SRC_PORT_SHFT \
+	0x13
+#define	\
+	HWIO_IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_ROUTER_HASH_MSK_DST_IP_ADD_BMSK \
+	0x40000
+#define	\
+	HWIO_IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_ROUTER_HASH_MSK_DST_IP_ADD_SHFT \
+	0x12
+#define	\
+	HWIO_IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_ROUTER_HASH_MSK_SRC_IP_ADD_BMSK \
+	0x20000
+#define	\
+	HWIO_IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_ROUTER_HASH_MSK_SRC_IP_ADD_SHFT \
+	0x11
+#define HWIO_IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_ROUTER_HASH_MSK_SRC_ID_BMSK \
+	0x10000
+#define HWIO_IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_ROUTER_HASH_MSK_SRC_ID_SHFT \
+	0x10
+#define	\
+	HWIO_IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_FILTER_HASH_MSK_METADATA_BMSK \
+	0x40
+#define	\
+	HWIO_IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_FILTER_HASH_MSK_METADATA_SHFT \
+	0x6
+#define	\
+	HWIO_IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_FILTER_HASH_MSK_PROTOCOL_BMSK \
+	0x20
+#define	\
+	HWIO_IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_FILTER_HASH_MSK_PROTOCOL_SHFT \
+	0x5
+#define	\
+	HWIO_IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_FILTER_HASH_MSK_DST_PORT_BMSK \
+	0x10
+#define	\
+	HWIO_IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_FILTER_HASH_MSK_DST_PORT_SHFT \
+	0x4
+#define	\
+	HWIO_IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_FILTER_HASH_MSK_SRC_PORT_BMSK \
+	0x8
+#define	\
+	HWIO_IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_FILTER_HASH_MSK_SRC_PORT_SHFT \
+	0x3
+#define	\
+	HWIO_IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_FILTER_HASH_MSK_DST_IP_ADD_BMSK \
+	0x4
+#define	\
+	HWIO_IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_FILTER_HASH_MSK_DST_IP_ADD_SHFT \
+	0x2
+#define	\
+	HWIO_IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_FILTER_HASH_MSK_SRC_IP_ADD_BMSK \
+	0x2
+#define	\
+	HWIO_IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_FILTER_HASH_MSK_SRC_IP_ADD_SHFT \
+	0x1
+#define HWIO_IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_FILTER_HASH_MSK_SRC_ID_BMSK \
+	0x1
+#define HWIO_IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_FILTER_HASH_MSK_SRC_ID_SHFT \
+	0x0
+#define HWIO_IPA_ENDP_YELLOW_RED_MARKER_CFG_n_ADDR(n) (IPA_CFG_REG_BASE + \
+						       0x00000860 + 0x70 * \
+						       (n))
+#define HWIO_IPA_ENDP_YELLOW_RED_MARKER_CFG_n_PHYS(n) (	\
+		IPA_CFG_REG_BASE_PHYS + 0x00000860 + 0x70 * (n))
+#define HWIO_IPA_ENDP_YELLOW_RED_MARKER_CFG_n_OFFS(n) (	\
+		IPA_CFG_REG_BASE_OFFS + 0x00000860 + 0x70 * (n))
+#define HWIO_IPA_ENDP_INIT_CTRL_STATUS_n_ADDR(n) (IPA_CFG_REG_BASE + \
+						  0x00000864 + 0x70 * (n))
+#define HWIO_IPA_ENDP_INIT_CTRL_STATUS_n_PHYS(n) (IPA_CFG_REG_BASE_PHYS + \
+						  0x00000864 + 0x70 * (n))
+#define HWIO_IPA_ENDP_INIT_CTRL_STATUS_n_OFFS(n) (IPA_CFG_REG_BASE_OFFS + \
+						  0x00000864 + 0x70 * (n))
+#define HWIO_IPA_ENDP_INIT_PROD_CFG_n_ADDR(n) (IPA_CFG_REG_BASE + \
+					       0x00000868 + 0x70 * (n))
+#define HWIO_IPA_ENDP_INIT_PROD_CFG_n_PHYS(n) (IPA_CFG_REG_BASE_PHYS + \
+					       0x00000868 + 0x70 * (n))
+#define HWIO_IPA_ENDP_INIT_PROD_CFG_n_OFFS(n) (IPA_CFG_REG_BASE_OFFS + \
+					       0x00000868 + 0x70 * (n))
+#define HWIO_IPA_NLO_PP_CFG1_ADDR (IPA_CFG_REG_BASE + 0x00001680)
+#define HWIO_IPA_NLO_PP_CFG1_PHYS (IPA_CFG_REG_BASE_PHYS + 0x00001680)
+#define HWIO_IPA_NLO_PP_CFG1_OFFS (IPA_CFG_REG_BASE_OFFS + 0x00001680)
+#define HWIO_IPA_NLO_PP_CFG1_RMSK 0x3fffffff
+#define HWIO_IPA_NLO_PP_CFG1_ATTR 0x3
+#define HWIO_IPA_NLO_PP_CFG1_IN in_dword_masked(HWIO_IPA_NLO_PP_CFG1_ADDR, \
+						HWIO_IPA_NLO_PP_CFG1_RMSK)
+#define HWIO_IPA_NLO_PP_CFG1_INM(m) in_dword_masked( \
+		HWIO_IPA_NLO_PP_CFG1_ADDR, \
+		m)
+#define HWIO_IPA_NLO_PP_CFG1_OUT(v) out_dword(HWIO_IPA_NLO_PP_CFG1_ADDR, v)
+#define HWIO_IPA_NLO_PP_CFG1_OUTM(m, v) out_dword_masked_ns( \
+		HWIO_IPA_NLO_PP_CFG1_ADDR, \
+		m, \
+		v, \
+		HWIO_IPA_NLO_PP_CFG1_IN)
+#define HWIO_IPA_NLO_PP_CFG1_NLO_ACK_MAX_VP_BMSK 0x3f000000
+#define HWIO_IPA_NLO_PP_CFG1_NLO_ACK_MAX_VP_SHFT 0x18
+#define HWIO_IPA_NLO_PP_CFG1_NLO_STATUS_PP_BMSK 0xff0000
+#define HWIO_IPA_NLO_PP_CFG1_NLO_STATUS_PP_SHFT 0x10
+#define HWIO_IPA_NLO_PP_CFG1_NLO_DATA_PP_BMSK 0xff00
+#define HWIO_IPA_NLO_PP_CFG1_NLO_DATA_PP_SHFT 0x8
+#define HWIO_IPA_NLO_PP_CFG1_NLO_ACK_PP_BMSK 0xff
+#define HWIO_IPA_NLO_PP_CFG1_NLO_ACK_PP_SHFT 0x0
+#define HWIO_IPA_NLO_PP_CFG2_ADDR (IPA_CFG_REG_BASE + 0x00001684)
+#define HWIO_IPA_NLO_PP_CFG2_PHYS (IPA_CFG_REG_BASE_PHYS + 0x00001684)
+#define HWIO_IPA_NLO_PP_CFG2_OFFS (IPA_CFG_REG_BASE_OFFS + 0x00001684)
+#define HWIO_IPA_NLO_PP_CFG2_RMSK 0x7ffff
+#define HWIO_IPA_NLO_PP_CFG2_ATTR 0x3
+#define HWIO_IPA_NLO_PP_CFG2_IN in_dword_masked(HWIO_IPA_NLO_PP_CFG2_ADDR, \
+						HWIO_IPA_NLO_PP_CFG2_RMSK)
+#define HWIO_IPA_NLO_PP_CFG2_INM(m) in_dword_masked( \
+		HWIO_IPA_NLO_PP_CFG2_ADDR, \
+		m)
+#define HWIO_IPA_NLO_PP_CFG2_OUT(v) out_dword(HWIO_IPA_NLO_PP_CFG2_ADDR, v)
+#define HWIO_IPA_NLO_PP_CFG2_OUTM(m, v) out_dword_masked_ns( \
+		HWIO_IPA_NLO_PP_CFG2_ADDR, \
+		m, \
+		v, \
+		HWIO_IPA_NLO_PP_CFG2_IN)
+#define HWIO_IPA_NLO_PP_CFG2_NLO_STATUS_BUFFER_MODE_BMSK 0x40000
+#define HWIO_IPA_NLO_PP_CFG2_NLO_STATUS_BUFFER_MODE_SHFT 0x12
+#define HWIO_IPA_NLO_PP_CFG2_NLO_DATA_BUFFER_MODE_BMSK 0x20000
+#define HWIO_IPA_NLO_PP_CFG2_NLO_DATA_BUFFER_MODE_SHFT 0x11
+#define HWIO_IPA_NLO_PP_CFG2_NLO_ACK_BUFFER_MODE_BMSK 0x10000
+#define HWIO_IPA_NLO_PP_CFG2_NLO_ACK_BUFFER_MODE_SHFT 0x10
+#define HWIO_IPA_NLO_PP_CFG2_NLO_DATA_CLOSE_PADD_BMSK 0xff00
+#define HWIO_IPA_NLO_PP_CFG2_NLO_DATA_CLOSE_PADD_SHFT 0x8
+#define HWIO_IPA_NLO_PP_CFG2_NLO_ACK_CLOSE_PADD_BMSK 0xff
+#define HWIO_IPA_NLO_PP_CFG2_NLO_ACK_CLOSE_PADD_SHFT 0x0
+#define HWIO_IPA_NLO_PP_ACK_LIMIT_CFG_ADDR (IPA_CFG_REG_BASE + 0x00001688)
+#define HWIO_IPA_NLO_PP_ACK_LIMIT_CFG_PHYS (IPA_CFG_REG_BASE_PHYS + \
+					    0x00001688)
+#define HWIO_IPA_NLO_PP_ACK_LIMIT_CFG_OFFS (IPA_CFG_REG_BASE_OFFS + \
+					    0x00001688)
+#define HWIO_IPA_NLO_PP_ACK_LIMIT_CFG_RMSK 0xffffffff
+#define HWIO_IPA_NLO_PP_ACK_LIMIT_CFG_ATTR 0x3
+#define HWIO_IPA_NLO_PP_ACK_LIMIT_CFG_IN in_dword_masked( \
+		HWIO_IPA_NLO_PP_ACK_LIMIT_CFG_ADDR, \
+		HWIO_IPA_NLO_PP_ACK_LIMIT_CFG_RMSK)
+#define HWIO_IPA_NLO_PP_ACK_LIMIT_CFG_INM(m) in_dword_masked( \
+		HWIO_IPA_NLO_PP_ACK_LIMIT_CFG_ADDR, \
+		m)
+#define HWIO_IPA_NLO_PP_ACK_LIMIT_CFG_OUT(v) out_dword(	\
+		HWIO_IPA_NLO_PP_ACK_LIMIT_CFG_ADDR, \
+		v)
+#define HWIO_IPA_NLO_PP_ACK_LIMIT_CFG_OUTM(m, v) out_dword_masked_ns( \
+		HWIO_IPA_NLO_PP_ACK_LIMIT_CFG_ADDR, \
+		m, \
+		v, \
+		HWIO_IPA_NLO_PP_ACK_LIMIT_CFG_IN)
+#define HWIO_IPA_NLO_PP_ACK_LIMIT_CFG_NLO_ACK_UPPER_SIZE_BMSK 0xffff0000
+#define HWIO_IPA_NLO_PP_ACK_LIMIT_CFG_NLO_ACK_UPPER_SIZE_SHFT 0x10
+#define HWIO_IPA_NLO_PP_ACK_LIMIT_CFG_NLO_ACK_LOWER_SIZE_BMSK 0xffff
+#define HWIO_IPA_NLO_PP_ACK_LIMIT_CFG_NLO_ACK_LOWER_SIZE_SHFT 0x0
+#define HWIO_IPA_NLO_PP_DATA_LIMIT_CFG_ADDR (IPA_CFG_REG_BASE + 0x0000168c)
+#define HWIO_IPA_NLO_PP_DATA_LIMIT_CFG_PHYS (IPA_CFG_REG_BASE_PHYS + \
+					     0x0000168c)
+#define HWIO_IPA_NLO_PP_DATA_LIMIT_CFG_OFFS (IPA_CFG_REG_BASE_OFFS + \
+					     0x0000168c)
+#define HWIO_IPA_NLO_PP_DATA_LIMIT_CFG_RMSK 0xffffffff
+#define HWIO_IPA_NLO_PP_DATA_LIMIT_CFG_ATTR 0x3
+#define HWIO_IPA_NLO_PP_DATA_LIMIT_CFG_IN in_dword_masked( \
+		HWIO_IPA_NLO_PP_DATA_LIMIT_CFG_ADDR, \
+		HWIO_IPA_NLO_PP_DATA_LIMIT_CFG_RMSK)
+#define HWIO_IPA_NLO_PP_DATA_LIMIT_CFG_INM(m) in_dword_masked( \
+		HWIO_IPA_NLO_PP_DATA_LIMIT_CFG_ADDR, \
+		m)
+#define HWIO_IPA_NLO_PP_DATA_LIMIT_CFG_OUT(v) out_dword( \
+		HWIO_IPA_NLO_PP_DATA_LIMIT_CFG_ADDR, \
+		v)
+#define HWIO_IPA_NLO_PP_DATA_LIMIT_CFG_OUTM(m, v) out_dword_masked_ns( \
+		HWIO_IPA_NLO_PP_DATA_LIMIT_CFG_ADDR, \
+		m, \
+		v, \
+		HWIO_IPA_NLO_PP_DATA_LIMIT_CFG_IN)
+#define HWIO_IPA_NLO_PP_DATA_LIMIT_CFG_NLO_DATA_UPPER_SIZE_BMSK 0xffff0000
+#define HWIO_IPA_NLO_PP_DATA_LIMIT_CFG_NLO_DATA_UPPER_SIZE_SHFT 0x10
+#define HWIO_IPA_NLO_PP_DATA_LIMIT_CFG_NLO_DATA_LOWER_SIZE_BMSK 0xffff
+#define HWIO_IPA_NLO_PP_DATA_LIMIT_CFG_NLO_DATA_LOWER_SIZE_SHFT 0x0
+#define HWIO_IPA_NLO_MIN_DSM_CFG_ADDR (IPA_CFG_REG_BASE + 0x00001690)
+#define HWIO_IPA_NLO_MIN_DSM_CFG_PHYS (IPA_CFG_REG_BASE_PHYS + 0x00001690)
+#define HWIO_IPA_NLO_MIN_DSM_CFG_OFFS (IPA_CFG_REG_BASE_OFFS + 0x00001690)
+#define HWIO_IPA_NLO_MIN_DSM_CFG_RMSK 0xffffffff
+#define HWIO_IPA_NLO_MIN_DSM_CFG_ATTR 0x3
+#define HWIO_IPA_NLO_MIN_DSM_CFG_IN in_dword_masked( \
+		HWIO_IPA_NLO_MIN_DSM_CFG_ADDR, \
+		HWIO_IPA_NLO_MIN_DSM_CFG_RMSK)
+#define HWIO_IPA_NLO_MIN_DSM_CFG_INM(m) in_dword_masked( \
+		HWIO_IPA_NLO_MIN_DSM_CFG_ADDR, \
+		m)
+#define HWIO_IPA_NLO_MIN_DSM_CFG_OUT(v) out_dword( \
+		HWIO_IPA_NLO_MIN_DSM_CFG_ADDR, \
+		v)
+#define HWIO_IPA_NLO_MIN_DSM_CFG_OUTM(m, v) out_dword_masked_ns( \
+		HWIO_IPA_NLO_MIN_DSM_CFG_ADDR, \
+		m, \
+		v, \
+		HWIO_IPA_NLO_MIN_DSM_CFG_IN)
+#define HWIO_IPA_NLO_MIN_DSM_CFG_NLO_DATA_MIN_DSM_LEN_BMSK 0xffff0000
+#define HWIO_IPA_NLO_MIN_DSM_CFG_NLO_DATA_MIN_DSM_LEN_SHFT 0x10
+#define HWIO_IPA_NLO_MIN_DSM_CFG_NLO_ACK_MIN_DSM_LEN_BMSK 0xffff
+#define HWIO_IPA_NLO_MIN_DSM_CFG_NLO_ACK_MIN_DSM_LEN_SHFT 0x0
+#define HWIO_IPA_NLO_VP_AGGR_CFG_LSB_n_ADDR(n) (IPA_CFG_REG_BASE + \
+						0x00001700 + 0x8 * (n))
+#define HWIO_IPA_NLO_VP_AGGR_CFG_LSB_n_PHYS(n) (IPA_CFG_REG_BASE_PHYS +	\
+						0x00001700 + 0x8 * (n))
+#define HWIO_IPA_NLO_VP_AGGR_CFG_LSB_n_OFFS(n) (IPA_CFG_REG_BASE_OFFS +	\
+						0x00001700 + 0x8 * (n))
+#define HWIO_IPA_NLO_VP_AGGR_CFG_MSB_n_ADDR(n) (IPA_CFG_REG_BASE + \
+						0x00001704 + 0x8 * (n))
+#define HWIO_IPA_NLO_VP_AGGR_CFG_MSB_n_PHYS(n) (IPA_CFG_REG_BASE_PHYS +	\
+						0x00001704 + 0x8 * (n))
+#define HWIO_IPA_NLO_VP_AGGR_CFG_MSB_n_OFFS(n) (IPA_CFG_REG_BASE_OFFS +	\
+						0x00001704 + 0x8 * (n))
+#define HWIO_IPA_SNIFFER_QMB_SEL_ADDR (IPA_CFG_REG_BASE + 0x00001800)
+#define HWIO_IPA_SNIFFER_QMB_SEL_PHYS (IPA_CFG_REG_BASE_PHYS + 0x00001800)
+#define HWIO_IPA_SNIFFER_QMB_SEL_OFFS (IPA_CFG_REG_BASE_OFFS + 0x00001800)
+#define HWIO_IPA_COAL_EVICT_LRU_ADDR (IPA_CFG_REG_BASE + 0x0000180c)
+#define HWIO_IPA_COAL_EVICT_LRU_PHYS (IPA_CFG_REG_BASE_PHYS + 0x0000180c)
+#define HWIO_IPA_COAL_EVICT_LRU_OFFS (IPA_CFG_REG_BASE_OFFS + 0x0000180c)
+#define HWIO_IPA_COAL_QMAP_CFG_ADDR (IPA_CFG_REG_BASE + 0x00001810)
+#define HWIO_IPA_COAL_QMAP_CFG_PHYS (IPA_CFG_REG_BASE_PHYS + 0x00001810)
+#define HWIO_IPA_COAL_QMAP_CFG_OFFS (IPA_CFG_REG_BASE_OFFS + 0x00001810)
+#define HWIO_IPA_NLO_VP_FLUSH_REQ_ADDR (IPA_CFG_REG_BASE + 0x00001814)
+#define HWIO_IPA_NLO_VP_FLUSH_REQ_PHYS (IPA_CFG_REG_BASE_PHYS + 0x00001814)
+#define HWIO_IPA_NLO_VP_FLUSH_REQ_OFFS (IPA_CFG_REG_BASE_OFFS + 0x00001814)
+#define HWIO_IPA_NLO_VP_FLUSH_REQ_RMSK 0x80ff00ff
+#define HWIO_IPA_NLO_VP_FLUSH_REQ_ATTR 0x3
+#define HWIO_IPA_NLO_VP_FLUSH_REQ_IN in_dword_masked( \
+		HWIO_IPA_NLO_VP_FLUSH_REQ_ADDR,	\
+		HWIO_IPA_NLO_VP_FLUSH_REQ_RMSK)
+#define HWIO_IPA_NLO_VP_FLUSH_REQ_INM(m) in_dword_masked( \
+		HWIO_IPA_NLO_VP_FLUSH_REQ_ADDR,	\
+		m)
+#define HWIO_IPA_NLO_VP_FLUSH_REQ_OUT(v) out_dword( \
+		HWIO_IPA_NLO_VP_FLUSH_REQ_ADDR,	\
+		v)
+#define HWIO_IPA_NLO_VP_FLUSH_REQ_OUTM(m, v) out_dword_masked_ns( \
+		HWIO_IPA_NLO_VP_FLUSH_REQ_ADDR,	\
+		m, \
+		v, \
+		HWIO_IPA_NLO_VP_FLUSH_REQ_IN)
+#define HWIO_IPA_NLO_VP_FLUSH_REQ_VP_FLUSH_REQ_BMSK 0x80000000
+#define HWIO_IPA_NLO_VP_FLUSH_REQ_VP_FLUSH_REQ_SHFT 0x1f
+#define HWIO_IPA_NLO_VP_FLUSH_REQ_VP_FLUSH_VP_INDX_BMSK 0xff0000
+#define HWIO_IPA_NLO_VP_FLUSH_REQ_VP_FLUSH_VP_INDX_SHFT 0x10
+#define HWIO_IPA_NLO_VP_FLUSH_REQ_VP_FLUSH_PP_INDX_BMSK 0xff
+#define HWIO_IPA_NLO_VP_FLUSH_REQ_VP_FLUSH_PP_INDX_SHFT 0x0
+#define HWIO_IPA_NLO_VP_FLUSH_COOKIE_ADDR (IPA_CFG_REG_BASE + 0x00001818)
+#define HWIO_IPA_NLO_VP_FLUSH_COOKIE_PHYS (IPA_CFG_REG_BASE_PHYS + \
+					   0x00001818)
+#define HWIO_IPA_NLO_VP_FLUSH_COOKIE_OFFS (IPA_CFG_REG_BASE_OFFS + \
+					   0x00001818)
+#define HWIO_IPA_NLO_VP_FLUSH_COOKIE_RMSK 0xffffffff
+#define HWIO_IPA_NLO_VP_FLUSH_COOKIE_ATTR 0x1
+#define HWIO_IPA_NLO_VP_FLUSH_COOKIE_IN in_dword_masked( \
+		HWIO_IPA_NLO_VP_FLUSH_COOKIE_ADDR, \
+		HWIO_IPA_NLO_VP_FLUSH_COOKIE_RMSK)
+#define HWIO_IPA_NLO_VP_FLUSH_COOKIE_INM(m) in_dword_masked( \
+		HWIO_IPA_NLO_VP_FLUSH_COOKIE_ADDR, \
+		m)
+#define HWIO_IPA_NLO_VP_FLUSH_COOKIE_VP_FLUSH_COOKIE_BMSK 0xffffffff
+#define HWIO_IPA_NLO_VP_FLUSH_COOKIE_VP_FLUSH_COOKIE_SHFT 0x0
+#define HWIO_IPA_NLO_VP_FLUSH_ACK_ADDR (IPA_CFG_REG_BASE + 0x0000181c)
+#define HWIO_IPA_NLO_VP_FLUSH_ACK_PHYS (IPA_CFG_REG_BASE_PHYS + 0x0000181c)
+#define HWIO_IPA_NLO_VP_FLUSH_ACK_OFFS (IPA_CFG_REG_BASE_OFFS + 0x0000181c)
+#define HWIO_IPA_NLO_VP_FLUSH_ACK_RMSK 0x1
+#define HWIO_IPA_NLO_VP_FLUSH_ACK_ATTR 0x1
+#define HWIO_IPA_NLO_VP_FLUSH_ACK_IN in_dword_masked( \
+		HWIO_IPA_NLO_VP_FLUSH_ACK_ADDR,	\
+		HWIO_IPA_NLO_VP_FLUSH_ACK_RMSK)
+#define HWIO_IPA_NLO_VP_FLUSH_ACK_INM(m) in_dword_masked( \
+		HWIO_IPA_NLO_VP_FLUSH_ACK_ADDR,	\
+		m)
+#define HWIO_IPA_NLO_VP_FLUSH_ACK_VP_FLUSH_ACK_BMSK 0x1
+#define HWIO_IPA_NLO_VP_FLUSH_ACK_VP_FLUSH_ACK_SHFT 0x0
+#define HWIO_IPA_NLO_VP_DSM_OPEN_ADDR (IPA_CFG_REG_BASE + 0x00001820)
+#define HWIO_IPA_NLO_VP_DSM_OPEN_PHYS (IPA_CFG_REG_BASE_PHYS + 0x00001820)
+#define HWIO_IPA_NLO_VP_DSM_OPEN_OFFS (IPA_CFG_REG_BASE_OFFS + 0x00001820)
+#define HWIO_IPA_NLO_VP_DSM_OPEN_RMSK 0xffffffff
+#define HWIO_IPA_NLO_VP_DSM_OPEN_ATTR 0x1
+#define HWIO_IPA_NLO_VP_DSM_OPEN_IN in_dword_masked( \
+		HWIO_IPA_NLO_VP_DSM_OPEN_ADDR, \
+		HWIO_IPA_NLO_VP_DSM_OPEN_RMSK)
+#define HWIO_IPA_NLO_VP_DSM_OPEN_INM(m) in_dword_masked( \
+		HWIO_IPA_NLO_VP_DSM_OPEN_ADDR, \
+		m)
+#define HWIO_IPA_NLO_VP_DSM_OPEN_VP_DSM_OPEN_BMSK 0xffffffff
+#define HWIO_IPA_NLO_VP_DSM_OPEN_VP_DSM_OPEN_SHFT 0x0
+#define HWIO_IPA_NLO_VP_QBAP_OPEN_ADDR (IPA_CFG_REG_BASE + 0x00001824)
+#define HWIO_IPA_NLO_VP_QBAP_OPEN_PHYS (IPA_CFG_REG_BASE_PHYS + 0x00001824)
+#define HWIO_IPA_NLO_VP_QBAP_OPEN_OFFS (IPA_CFG_REG_BASE_OFFS + 0x00001824)
+#define HWIO_IPA_NLO_VP_QBAP_OPEN_RMSK 0xffffffff
+#define HWIO_IPA_NLO_VP_QBAP_OPEN_ATTR 0x1
+#define HWIO_IPA_NLO_VP_QBAP_OPEN_IN in_dword_masked( \
+		HWIO_IPA_NLO_VP_QBAP_OPEN_ADDR,	\
+		HWIO_IPA_NLO_VP_QBAP_OPEN_RMSK)
+#define HWIO_IPA_NLO_VP_QBAP_OPEN_INM(m) in_dword_masked( \
+		HWIO_IPA_NLO_VP_QBAP_OPEN_ADDR,	\
+		m)
+#define HWIO_IPA_NLO_VP_QBAP_OPEN_VP_QBAP_OPEN_BMSK 0xffffffff
+#define HWIO_IPA_NLO_VP_QBAP_OPEN_VP_QBAP_OPEN_SHFT 0x0
+#define IPA_DEBUG_REG_BASE (IPA_0_IPA_WRAPPER_BASE + 0x00042000)
+#define IPA_DEBUG_REG_BASE_PHYS (IPA_0_IPA_WRAPPER_BASE_PHYS + 0x00042000)
+#define IPA_DEBUG_REG_BASE_OFFS 0x00042000
+#define HWIO_IPA_HPS_FTCH_ARB_DEBUG_STATUS_ADDR (IPA_DEBUG_REG_BASE + \
+						 0x00000000)
+#define HWIO_IPA_HPS_FTCH_ARB_DEBUG_STATUS_PHYS (IPA_DEBUG_REG_BASE_PHYS + \
+						 0x00000000)
+#define HWIO_IPA_HPS_FTCH_ARB_DEBUG_STATUS_OFFS (IPA_DEBUG_REG_BASE_OFFS + \
+						 0x00000000)
+#define HWIO_IPA_HPS_FTCH_ARB_DEBUG_BLOCK_STATUS_ADDR (IPA_DEBUG_REG_BASE \
+						       + 0x00000004)
+#define HWIO_IPA_HPS_FTCH_ARB_DEBUG_BLOCK_STATUS_PHYS (	\
+		IPA_DEBUG_REG_BASE_PHYS + 0x00000004)
+#define HWIO_IPA_HPS_FTCH_ARB_DEBUG_BLOCK_STATUS_OFFS (	\
+		IPA_DEBUG_REG_BASE_OFFS + 0x00000004)
+#define HWIO_IPA_HPS_FTCH_ARB_DEBUG_CFG_MASK_ADDR (IPA_DEBUG_REG_BASE +	\
+						   0x00000008)
+#define HWIO_IPA_HPS_FTCH_ARB_DEBUG_CFG_MASK_PHYS (IPA_DEBUG_REG_BASE_PHYS \
+						   + 0x00000008)
+#define HWIO_IPA_HPS_FTCH_ARB_DEBUG_CFG_MASK_OFFS (IPA_DEBUG_REG_BASE_OFFS \
+						   + 0x00000008)
+#define HWIO_IPA_HPS_FTCH_ARB_DEBUG_CFG_BLOCK_ADDR (IPA_DEBUG_REG_BASE + \
+						    0x0000000c)
+#define HWIO_IPA_HPS_FTCH_ARB_DEBUG_CFG_BLOCK_PHYS ( \
+		IPA_DEBUG_REG_BASE_PHYS + 0x0000000c)
+#define HWIO_IPA_HPS_FTCH_ARB_DEBUG_CFG_BLOCK_OFFS ( \
+		IPA_DEBUG_REG_BASE_OFFS + 0x0000000c)
+#define HWIO_IPA_HPS_FTCH_ARB_DEBUG_CMD_ADDR (IPA_DEBUG_REG_BASE + \
+					      0x00000010)
+#define HWIO_IPA_HPS_FTCH_ARB_DEBUG_CMD_PHYS (IPA_DEBUG_REG_BASE_PHYS +	\
+					      0x00000010)
+#define HWIO_IPA_HPS_FTCH_ARB_DEBUG_CMD_OFFS (IPA_DEBUG_REG_BASE_OFFS +	\
+					      0x00000010)
+#define HWIO_IPA_DPS_FTCH_ARB_DEBUG_STATUS_ADDR (IPA_DEBUG_REG_BASE + \
+						 0x00000014)
+#define HWIO_IPA_DPS_FTCH_ARB_DEBUG_STATUS_PHYS (IPA_DEBUG_REG_BASE_PHYS + \
+						 0x00000014)
+#define HWIO_IPA_DPS_FTCH_ARB_DEBUG_STATUS_OFFS (IPA_DEBUG_REG_BASE_OFFS + \
+						 0x00000014)
+#define HWIO_IPA_DPS_FTCH_ARB_DEBUG_BLOCK_STATUS_ADDR (IPA_DEBUG_REG_BASE \
+						       + 0x00000018)
+#define HWIO_IPA_DPS_FTCH_ARB_DEBUG_BLOCK_STATUS_PHYS (	\
+		IPA_DEBUG_REG_BASE_PHYS + 0x00000018)
+#define HWIO_IPA_DPS_FTCH_ARB_DEBUG_BLOCK_STATUS_OFFS (	\
+		IPA_DEBUG_REG_BASE_OFFS + 0x00000018)
+#define HWIO_IPA_DPS_FTCH_ARB_DEBUG_CFG_MASK_ADDR (IPA_DEBUG_REG_BASE +	\
+						   0x0000001c)
+#define HWIO_IPA_DPS_FTCH_ARB_DEBUG_CFG_MASK_PHYS (IPA_DEBUG_REG_BASE_PHYS \
+						   + 0x0000001c)
+#define HWIO_IPA_DPS_FTCH_ARB_DEBUG_CFG_MASK_OFFS (IPA_DEBUG_REG_BASE_OFFS \
+						   + 0x0000001c)
+#define HWIO_IPA_DPS_FTCH_ARB_DEBUG_CFG_BLOCK_ADDR (IPA_DEBUG_REG_BASE + \
+						    0x00000020)
+#define HWIO_IPA_DPS_FTCH_ARB_DEBUG_CFG_BLOCK_PHYS ( \
+		IPA_DEBUG_REG_BASE_PHYS + 0x00000020)
+#define HWIO_IPA_DPS_FTCH_ARB_DEBUG_CFG_BLOCK_OFFS ( \
+		IPA_DEBUG_REG_BASE_OFFS + 0x00000020)
+#define HWIO_IPA_DPS_FTCH_ARB_DEBUG_CMD_ADDR (IPA_DEBUG_REG_BASE + \
+					      0x00000024)
+#define HWIO_IPA_DPS_FTCH_ARB_DEBUG_CMD_PHYS (IPA_DEBUG_REG_BASE_PHYS +	\
+					      0x00000024)
+#define HWIO_IPA_DPS_FTCH_ARB_DEBUG_CMD_OFFS (IPA_DEBUG_REG_BASE_OFFS +	\
+					      0x00000024)
+#define HWIO_IPA_RSRC_MNGR_FUNC_ARB_DEBUG_STATUS_ADDR (IPA_DEBUG_REG_BASE \
+						       + 0x00000028)
+#define HWIO_IPA_RSRC_MNGR_FUNC_ARB_DEBUG_STATUS_PHYS (	\
+		IPA_DEBUG_REG_BASE_PHYS + 0x00000028)
+#define HWIO_IPA_RSRC_MNGR_FUNC_ARB_DEBUG_STATUS_OFFS (	\
+		IPA_DEBUG_REG_BASE_OFFS + 0x00000028)
+#define HWIO_IPA_RSRC_MNGR_FUNC_ARB_DEBUG_BLOCK_STATUS_ADDR ( \
+		IPA_DEBUG_REG_BASE + 0x0000002c)
+#define HWIO_IPA_RSRC_MNGR_FUNC_ARB_DEBUG_BLOCK_STATUS_PHYS ( \
+		IPA_DEBUG_REG_BASE_PHYS + 0x0000002c)
+#define HWIO_IPA_RSRC_MNGR_FUNC_ARB_DEBUG_BLOCK_STATUS_OFFS ( \
+		IPA_DEBUG_REG_BASE_OFFS + 0x0000002c)
+#define HWIO_IPA_RSRC_MNGR_FUNC_ARB_DEBUG_CFG_MASK_ADDR ( \
+		IPA_DEBUG_REG_BASE + 0x00000030)
+#define HWIO_IPA_RSRC_MNGR_FUNC_ARB_DEBUG_CFG_MASK_PHYS ( \
+		IPA_DEBUG_REG_BASE_PHYS + 0x00000030)
+#define HWIO_IPA_RSRC_MNGR_FUNC_ARB_DEBUG_CFG_MASK_OFFS ( \
+		IPA_DEBUG_REG_BASE_OFFS + 0x00000030)
+#define HWIO_IPA_RSRC_MNGR_FUNC_ARB_DEBUG_CFG_BLOCK_ADDR ( \
+		IPA_DEBUG_REG_BASE + 0x00000034)
+#define HWIO_IPA_RSRC_MNGR_FUNC_ARB_DEBUG_CFG_BLOCK_PHYS ( \
+		IPA_DEBUG_REG_BASE_PHYS + 0x00000034)
+#define HWIO_IPA_RSRC_MNGR_FUNC_ARB_DEBUG_CFG_BLOCK_OFFS ( \
+		IPA_DEBUG_REG_BASE_OFFS + 0x00000034)
+#define HWIO_IPA_RSRC_MNGR_FUNC_ARB_DEBUG_CMD_ADDR (IPA_DEBUG_REG_BASE + \
+						    0x00000038)
+#define HWIO_IPA_RSRC_MNGR_FUNC_ARB_DEBUG_CMD_PHYS ( \
+		IPA_DEBUG_REG_BASE_PHYS + 0x00000038)
+#define HWIO_IPA_RSRC_MNGR_FUNC_ARB_DEBUG_CMD_OFFS ( \
+		IPA_DEBUG_REG_BASE_OFFS + 0x00000038)
+#define HWIO_IPA_RSRC_MNGR_ALLOC_ARB_DEBUG_STATUS_ADDR (IPA_DEBUG_REG_BASE \
+							+ 0x0000003c)
+#define HWIO_IPA_RSRC_MNGR_ALLOC_ARB_DEBUG_STATUS_PHYS ( \
+		IPA_DEBUG_REG_BASE_PHYS + 0x0000003c)
+#define HWIO_IPA_RSRC_MNGR_ALLOC_ARB_DEBUG_STATUS_OFFS ( \
+		IPA_DEBUG_REG_BASE_OFFS + 0x0000003c)
+#define HWIO_IPA_RSRC_MNGR_ALLOC_ARB_DEBUG_BLOCK_STATUS_ADDR ( \
+		IPA_DEBUG_REG_BASE + 0x00000040)
+#define HWIO_IPA_RSRC_MNGR_ALLOC_ARB_DEBUG_BLOCK_STATUS_PHYS ( \
+		IPA_DEBUG_REG_BASE_PHYS + 0x00000040)
+#define HWIO_IPA_RSRC_MNGR_ALLOC_ARB_DEBUG_BLOCK_STATUS_OFFS ( \
+		IPA_DEBUG_REG_BASE_OFFS + 0x00000040)
+#define HWIO_IPA_RSRC_MNGR_ALLOC_ARB_DEBUG_CFG_MASK_ADDR ( \
+		IPA_DEBUG_REG_BASE + 0x00000044)
+#define HWIO_IPA_RSRC_MNGR_ALLOC_ARB_DEBUG_CFG_MASK_PHYS ( \
+		IPA_DEBUG_REG_BASE_PHYS + 0x00000044)
+#define HWIO_IPA_RSRC_MNGR_ALLOC_ARB_DEBUG_CFG_MASK_OFFS ( \
+		IPA_DEBUG_REG_BASE_OFFS + 0x00000044)
+#define HWIO_IPA_RSRC_MNGR_ALLOC_ARB_DEBUG_CFG_BLOCK_ADDR ( \
+		IPA_DEBUG_REG_BASE + 0x00000048)
+#define HWIO_IPA_RSRC_MNGR_ALLOC_ARB_DEBUG_CFG_BLOCK_PHYS ( \
+		IPA_DEBUG_REG_BASE_PHYS + 0x00000048)
+#define HWIO_IPA_RSRC_MNGR_ALLOC_ARB_DEBUG_CFG_BLOCK_OFFS ( \
+		IPA_DEBUG_REG_BASE_OFFS + 0x00000048)
+#define HWIO_IPA_RSRC_MNGR_ALLOC_ARB_DEBUG_CMD_ADDR (IPA_DEBUG_REG_BASE + \
+						     0x0000004c)
+#define HWIO_IPA_RSRC_MNGR_ALLOC_ARB_DEBUG_CMD_PHYS ( \
+		IPA_DEBUG_REG_BASE_PHYS + 0x0000004c)
+#define HWIO_IPA_RSRC_MNGR_ALLOC_ARB_DEBUG_CMD_OFFS ( \
+		IPA_DEBUG_REG_BASE_OFFS + 0x0000004c)
+#define HWIO_IPA_RSRC_MNGR_SRCH_ARB_DEBUG_STATUS_ADDR (IPA_DEBUG_REG_BASE \
+						       + 0x00000050)
+#define HWIO_IPA_RSRC_MNGR_SRCH_ARB_DEBUG_STATUS_PHYS (	\
+		IPA_DEBUG_REG_BASE_PHYS + 0x00000050)
+#define HWIO_IPA_RSRC_MNGR_SRCH_ARB_DEBUG_STATUS_OFFS (	\
+		IPA_DEBUG_REG_BASE_OFFS + 0x00000050)
+#define HWIO_IPA_RSRC_MNGR_SRCH_ARB_DEBUG_BLOCK_STATUS_ADDR ( \
+		IPA_DEBUG_REG_BASE + 0x00000054)
+#define HWIO_IPA_RSRC_MNGR_SRCH_ARB_DEBUG_BLOCK_STATUS_PHYS ( \
+		IPA_DEBUG_REG_BASE_PHYS + 0x00000054)
+#define HWIO_IPA_RSRC_MNGR_SRCH_ARB_DEBUG_BLOCK_STATUS_OFFS ( \
+		IPA_DEBUG_REG_BASE_OFFS + 0x00000054)
+#define HWIO_IPA_RSRC_MNGR_SRCH_ARB_DEBUG_CFG_MASK_ADDR ( \
+		IPA_DEBUG_REG_BASE + 0x00000058)
+#define HWIO_IPA_RSRC_MNGR_SRCH_ARB_DEBUG_CFG_MASK_PHYS ( \
+		IPA_DEBUG_REG_BASE_PHYS + 0x00000058)
+#define HWIO_IPA_RSRC_MNGR_SRCH_ARB_DEBUG_CFG_MASK_OFFS ( \
+		IPA_DEBUG_REG_BASE_OFFS + 0x00000058)
+#define HWIO_IPA_RSRC_MNGR_SRCH_ARB_DEBUG_CFG_BLOCK_ADDR ( \
+		IPA_DEBUG_REG_BASE + 0x0000005c)
+#define HWIO_IPA_RSRC_MNGR_SRCH_ARB_DEBUG_CFG_BLOCK_PHYS ( \
+		IPA_DEBUG_REG_BASE_PHYS + 0x0000005c)
+#define HWIO_IPA_RSRC_MNGR_SRCH_ARB_DEBUG_CFG_BLOCK_OFFS ( \
+		IPA_DEBUG_REG_BASE_OFFS + 0x0000005c)
+#define HWIO_IPA_RSRC_MNGR_SRCH_ARB_DEBUG_CMD_ADDR (IPA_DEBUG_REG_BASE + \
+						    0x00000060)
+#define HWIO_IPA_RSRC_MNGR_SRCH_ARB_DEBUG_CMD_PHYS ( \
+		IPA_DEBUG_REG_BASE_PHYS + 0x00000060)
+#define HWIO_IPA_RSRC_MNGR_SRCH_ARB_DEBUG_CMD_OFFS ( \
+		IPA_DEBUG_REG_BASE_OFFS + 0x00000060)
+#define HWIO_IPA_RSRC_MNGR_REL_ARB_DEBUG_STATUS_ADDR (IPA_DEBUG_REG_BASE + \
+						      0x00000064)
+#define HWIO_IPA_RSRC_MNGR_REL_ARB_DEBUG_STATUS_PHYS ( \
+		IPA_DEBUG_REG_BASE_PHYS + 0x00000064)
+#define HWIO_IPA_RSRC_MNGR_REL_ARB_DEBUG_STATUS_OFFS ( \
+		IPA_DEBUG_REG_BASE_OFFS + 0x00000064)
+#define HWIO_IPA_RSRC_MNGR_REL_ARB_DEBUG_BLOCK_STATUS_ADDR ( \
+		IPA_DEBUG_REG_BASE + 0x00000068)
+#define HWIO_IPA_RSRC_MNGR_REL_ARB_DEBUG_BLOCK_STATUS_PHYS ( \
+		IPA_DEBUG_REG_BASE_PHYS + 0x00000068)
+#define HWIO_IPA_RSRC_MNGR_REL_ARB_DEBUG_BLOCK_STATUS_OFFS ( \
+		IPA_DEBUG_REG_BASE_OFFS + 0x00000068)
+#define HWIO_IPA_RSRC_MNGR_REL_ARB_DEBUG_CFG_MASK_ADDR (IPA_DEBUG_REG_BASE \
+							+ 0x0000006c)
+#define HWIO_IPA_RSRC_MNGR_REL_ARB_DEBUG_CFG_MASK_PHYS ( \
+		IPA_DEBUG_REG_BASE_PHYS + 0x0000006c)
+#define HWIO_IPA_RSRC_MNGR_REL_ARB_DEBUG_CFG_MASK_OFFS ( \
+		IPA_DEBUG_REG_BASE_OFFS + 0x0000006c)
+#define HWIO_IPA_RSRC_MNGR_REL_ARB_DEBUG_CFG_BLOCK_ADDR ( \
+		IPA_DEBUG_REG_BASE + 0x00000070)
+#define HWIO_IPA_RSRC_MNGR_REL_ARB_DEBUG_CFG_BLOCK_PHYS ( \
+		IPA_DEBUG_REG_BASE_PHYS + 0x00000070)
+#define HWIO_IPA_RSRC_MNGR_REL_ARB_DEBUG_CFG_BLOCK_OFFS ( \
+		IPA_DEBUG_REG_BASE_OFFS + 0x00000070)
+#define HWIO_IPA_RSRC_MNGR_REL_ARB_DEBUG_CMD_ADDR (IPA_DEBUG_REG_BASE +	\
+						   0x00000074)
+#define HWIO_IPA_RSRC_MNGR_REL_ARB_DEBUG_CMD_PHYS (IPA_DEBUG_REG_BASE_PHYS \
+						   + 0x00000074)
+#define HWIO_IPA_RSRC_MNGR_REL_ARB_DEBUG_CMD_OFFS (IPA_DEBUG_REG_BASE_OFFS \
+						   + 0x00000074)
+#define HWIO_IPA_TX_ARB_DEBUG_STATUS_ADDR (IPA_DEBUG_REG_BASE + 0x00000078)
+#define HWIO_IPA_TX_ARB_DEBUG_STATUS_PHYS (IPA_DEBUG_REG_BASE_PHYS + \
+					   0x00000078)
+#define HWIO_IPA_TX_ARB_DEBUG_STATUS_OFFS (IPA_DEBUG_REG_BASE_OFFS + \
+					   0x00000078)
+#define HWIO_IPA_TX_ARB_DEBUG_BLOCK_STATUS_ADDR (IPA_DEBUG_REG_BASE + \
+						 0x0000007c)
+#define HWIO_IPA_TX_ARB_DEBUG_BLOCK_STATUS_PHYS (IPA_DEBUG_REG_BASE_PHYS + \
+						 0x0000007c)
+#define HWIO_IPA_TX_ARB_DEBUG_BLOCK_STATUS_OFFS (IPA_DEBUG_REG_BASE_OFFS + \
+						 0x0000007c)
+#define HWIO_IPA_TX_ARB_DEBUG_CFG_MASK_ADDR (IPA_DEBUG_REG_BASE + \
+					     0x00000080)
+#define HWIO_IPA_TX_ARB_DEBUG_CFG_MASK_PHYS (IPA_DEBUG_REG_BASE_PHYS + \
+					     0x00000080)
+#define HWIO_IPA_TX_ARB_DEBUG_CFG_MASK_OFFS (IPA_DEBUG_REG_BASE_OFFS + \
+					     0x00000080)
+#define HWIO_IPA_TX_ARB_DEBUG_CFG_BLOCK_ADDR (IPA_DEBUG_REG_BASE + \
+					      0x00000084)
+#define HWIO_IPA_TX_ARB_DEBUG_CFG_BLOCK_PHYS (IPA_DEBUG_REG_BASE_PHYS +	\
+					      0x00000084)
+#define HWIO_IPA_TX_ARB_DEBUG_CFG_BLOCK_OFFS (IPA_DEBUG_REG_BASE_OFFS +	\
+					      0x00000084)
+#define HWIO_IPA_TX_ARB_DEBUG_CMD_ADDR (IPA_DEBUG_REG_BASE + 0x00000088)
+#define HWIO_IPA_TX_ARB_DEBUG_CMD_PHYS (IPA_DEBUG_REG_BASE_PHYS + \
+					0x00000088)
+#define HWIO_IPA_TX_ARB_DEBUG_CMD_OFFS (IPA_DEBUG_REG_BASE_OFFS + \
+					0x00000088)
+#define HWIO_IPA_HPS_SEQ_ARB_DEBUG_STATUS_ADDR (IPA_DEBUG_REG_BASE + \
+						0x0000008c)
+#define HWIO_IPA_HPS_SEQ_ARB_DEBUG_STATUS_PHYS (IPA_DEBUG_REG_BASE_PHYS + \
+						0x0000008c)
+#define HWIO_IPA_HPS_SEQ_ARB_DEBUG_STATUS_OFFS (IPA_DEBUG_REG_BASE_OFFS + \
+						0x0000008c)
+#define HWIO_IPA_HPS_SEQ_ARB_DEBUG_BLOCK_STATUS_ADDR (IPA_DEBUG_REG_BASE + \
+						      0x00000090)
+#define HWIO_IPA_HPS_SEQ_ARB_DEBUG_BLOCK_STATUS_PHYS ( \
+		IPA_DEBUG_REG_BASE_PHYS + 0x00000090)
+#define HWIO_IPA_HPS_SEQ_ARB_DEBUG_BLOCK_STATUS_OFFS ( \
+		IPA_DEBUG_REG_BASE_OFFS + 0x00000090)
+#define HWIO_IPA_HPS_SEQ_ARB_DEBUG_CFG_MASK_ADDR (IPA_DEBUG_REG_BASE + \
+						  0x00000094)
+#define HWIO_IPA_HPS_SEQ_ARB_DEBUG_CFG_MASK_PHYS (IPA_DEBUG_REG_BASE_PHYS \
+						  + 0x00000094)
+#define HWIO_IPA_HPS_SEQ_ARB_DEBUG_CFG_MASK_OFFS (IPA_DEBUG_REG_BASE_OFFS \
+						  + 0x00000094)
+#define HWIO_IPA_HPS_SEQ_ARB_DEBUG_CFG_BLOCK_ADDR (IPA_DEBUG_REG_BASE +	\
+						   0x00000098)
+#define HWIO_IPA_HPS_SEQ_ARB_DEBUG_CFG_BLOCK_PHYS (IPA_DEBUG_REG_BASE_PHYS \
+						   + 0x00000098)
+#define HWIO_IPA_HPS_SEQ_ARB_DEBUG_CFG_BLOCK_OFFS (IPA_DEBUG_REG_BASE_OFFS \
+						   + 0x00000098)
+#define HWIO_IPA_HPS_SEQ_ARB_DEBUG_CMD_ADDR (IPA_DEBUG_REG_BASE + \
+					     0x0000009c)
+#define HWIO_IPA_HPS_SEQ_ARB_DEBUG_CMD_PHYS (IPA_DEBUG_REG_BASE_PHYS + \
+					     0x0000009c)
+#define HWIO_IPA_HPS_SEQ_ARB_DEBUG_CMD_OFFS (IPA_DEBUG_REG_BASE_OFFS + \
+					     0x0000009c)
+#define HWIO_IPA_DPS_SEQ_ARB_DEBUG_STATUS_ADDR (IPA_DEBUG_REG_BASE + \
+						0x00000100)
+#define HWIO_IPA_DPS_SEQ_ARB_DEBUG_STATUS_PHYS (IPA_DEBUG_REG_BASE_PHYS + \
+						0x00000100)
+#define HWIO_IPA_DPS_SEQ_ARB_DEBUG_STATUS_OFFS (IPA_DEBUG_REG_BASE_OFFS + \
+						0x00000100)
+#define HWIO_IPA_DPS_SEQ_ARB_DEBUG_BLOCK_STATUS_ADDR (IPA_DEBUG_REG_BASE + \
+						      0x00000104)
+#define HWIO_IPA_DPS_SEQ_ARB_DEBUG_BLOCK_STATUS_PHYS ( \
+		IPA_DEBUG_REG_BASE_PHYS + 0x00000104)
+#define HWIO_IPA_DPS_SEQ_ARB_DEBUG_BLOCK_STATUS_OFFS ( \
+		IPA_DEBUG_REG_BASE_OFFS + 0x00000104)
+#define HWIO_IPA_DPS_SEQ_ARB_DEBUG_CFG_MASK_ADDR (IPA_DEBUG_REG_BASE + \
+						  0x00000108)
+#define HWIO_IPA_DPS_SEQ_ARB_DEBUG_CFG_MASK_PHYS (IPA_DEBUG_REG_BASE_PHYS \
+						  + 0x00000108)
+#define HWIO_IPA_DPS_SEQ_ARB_DEBUG_CFG_MASK_OFFS (IPA_DEBUG_REG_BASE_OFFS \
+						  + 0x00000108)
+#define HWIO_IPA_DPS_SEQ_ARB_DEBUG_CFG_BLOCK_ADDR (IPA_DEBUG_REG_BASE +	\
+						   0x0000010c)
+#define HWIO_IPA_DPS_SEQ_ARB_DEBUG_CFG_BLOCK_PHYS (IPA_DEBUG_REG_BASE_PHYS \
+						   + 0x0000010c)
+#define HWIO_IPA_DPS_SEQ_ARB_DEBUG_CFG_BLOCK_OFFS (IPA_DEBUG_REG_BASE_OFFS \
+						   + 0x0000010c)
+#define HWIO_IPA_DPS_SEQ_ARB_DEBUG_CMD_ADDR (IPA_DEBUG_REG_BASE + \
+					     0x00000110)
+#define HWIO_IPA_DPS_SEQ_ARB_DEBUG_CMD_PHYS (IPA_DEBUG_REG_BASE_PHYS + \
+					     0x00000110)
+#define HWIO_IPA_DPS_SEQ_ARB_DEBUG_CMD_OFFS (IPA_DEBUG_REG_BASE_OFFS + \
+					     0x00000110)
+#define HWIO_IPA_RSRC_MNGR_SW_ACCESS_ALLOC_CFG_ADDR (IPA_DEBUG_REG_BASE + \
+						     0x00000114)
+#define HWIO_IPA_RSRC_MNGR_SW_ACCESS_ALLOC_CFG_PHYS ( \
+		IPA_DEBUG_REG_BASE_PHYS + 0x00000114)
+#define HWIO_IPA_RSRC_MNGR_SW_ACCESS_ALLOC_CFG_OFFS ( \
+		IPA_DEBUG_REG_BASE_OFFS + 0x00000114)
+#define HWIO_IPA_RSRC_MNGR_SW_ACCESS_SRCH_CFG_ADDR (IPA_DEBUG_REG_BASE + \
+						    0x00000118)
+#define HWIO_IPA_RSRC_MNGR_SW_ACCESS_SRCH_CFG_PHYS ( \
+		IPA_DEBUG_REG_BASE_PHYS + 0x00000118)
+#define HWIO_IPA_RSRC_MNGR_SW_ACCESS_SRCH_CFG_OFFS ( \
+		IPA_DEBUG_REG_BASE_OFFS + 0x00000118)
+#define HWIO_IPA_RSRC_MNGR_SW_ACCESS_REL_CFG_ADDR (IPA_DEBUG_REG_BASE +	\
+						   0x0000011c)
+#define HWIO_IPA_RSRC_MNGR_SW_ACCESS_REL_CFG_PHYS (IPA_DEBUG_REG_BASE_PHYS \
+						   + 0x0000011c)
+#define HWIO_IPA_RSRC_MNGR_SW_ACCESS_REL_CFG_OFFS (IPA_DEBUG_REG_BASE_OFFS \
+						   + 0x0000011c)
+#define HWIO_IPA_RSRC_MNGR_SW_ACCESS_CMD_ADDR (IPA_DEBUG_REG_BASE + \
+					       0x00000120)
+#define HWIO_IPA_RSRC_MNGR_SW_ACCESS_CMD_PHYS (IPA_DEBUG_REG_BASE_PHYS + \
+					       0x00000120)
+#define HWIO_IPA_RSRC_MNGR_SW_ACCESS_CMD_OFFS (IPA_DEBUG_REG_BASE_OFFS + \
+					       0x00000120)
+#define HWIO_IPA_RSRC_MNGR_SW_ACCESS_STATUS_ADDR (IPA_DEBUG_REG_BASE + \
+						  0x00000124)
+#define HWIO_IPA_RSRC_MNGR_SW_ACCESS_STATUS_PHYS (IPA_DEBUG_REG_BASE_PHYS \
+						  + 0x00000124)
+#define HWIO_IPA_RSRC_MNGR_SW_ACCESS_STATUS_OFFS (IPA_DEBUG_REG_BASE_OFFS \
+						  + 0x00000124)
+#define HWIO_IPA_RSRC_MNGR_DB_CFG_ADDR (IPA_DEBUG_REG_BASE + 0x00000128)
+#define HWIO_IPA_RSRC_MNGR_DB_CFG_PHYS (IPA_DEBUG_REG_BASE_PHYS + \
+					0x00000128)
+#define HWIO_IPA_RSRC_MNGR_DB_CFG_OFFS (IPA_DEBUG_REG_BASE_OFFS + \
+					0x00000128)
+#define HWIO_IPA_RSRC_MNGR_DB_CFG_RMSK 0x3f77
+#define HWIO_IPA_RSRC_MNGR_DB_CFG_ATTR 0x3
+#define HWIO_IPA_RSRC_MNGR_DB_CFG_IN in_dword_masked( \
+		HWIO_IPA_RSRC_MNGR_DB_CFG_ADDR,	\
+		HWIO_IPA_RSRC_MNGR_DB_CFG_RMSK)
+#define HWIO_IPA_RSRC_MNGR_DB_CFG_INM(m) in_dword_masked( \
+		HWIO_IPA_RSRC_MNGR_DB_CFG_ADDR,	\
+		m)
+#define HWIO_IPA_RSRC_MNGR_DB_CFG_OUT(v) out_dword( \
+		HWIO_IPA_RSRC_MNGR_DB_CFG_ADDR,	\
+		v)
+#define HWIO_IPA_RSRC_MNGR_DB_CFG_OUTM(m, v) out_dword_masked_ns( \
+		HWIO_IPA_RSRC_MNGR_DB_CFG_ADDR,	\
+		m, \
+		v, \
+		HWIO_IPA_RSRC_MNGR_DB_CFG_IN)
+#define HWIO_IPA_RSRC_MNGR_DB_CFG_RSRC_ID_SEL_BMSK 0x3f00
+#define HWIO_IPA_RSRC_MNGR_DB_CFG_RSRC_ID_SEL_SHFT 0x8
+#define HWIO_IPA_RSRC_MNGR_DB_CFG_RSRC_TYPE_SEL_BMSK 0x70
+#define HWIO_IPA_RSRC_MNGR_DB_CFG_RSRC_TYPE_SEL_SHFT 0x4
+#define HWIO_IPA_RSRC_MNGR_DB_CFG_RSRC_GRP_SEL_BMSK 0x7
+#define HWIO_IPA_RSRC_MNGR_DB_CFG_RSRC_GRP_SEL_SHFT 0x0
+#define HWIO_IPA_RSRC_MNGR_DB_RSRC_READ_ADDR (IPA_DEBUG_REG_BASE + \
+					      0x0000012c)
+#define HWIO_IPA_RSRC_MNGR_DB_RSRC_READ_PHYS (IPA_DEBUG_REG_BASE_PHYS +	\
+					      0x0000012c)
+#define HWIO_IPA_RSRC_MNGR_DB_RSRC_READ_OFFS (IPA_DEBUG_REG_BASE_OFFS +	\
+					      0x0000012c)
+#define HWIO_IPA_RSRC_MNGR_DB_RSRC_READ_RMSK 0x3f3
+#define HWIO_IPA_RSRC_MNGR_DB_RSRC_READ_ATTR 0x1
+#define HWIO_IPA_RSRC_MNGR_DB_RSRC_READ_IN in_dword_masked( \
+		HWIO_IPA_RSRC_MNGR_DB_RSRC_READ_ADDR, \
+		HWIO_IPA_RSRC_MNGR_DB_RSRC_READ_RMSK)
+#define HWIO_IPA_RSRC_MNGR_DB_RSRC_READ_INM(m) in_dword_masked(	\
+		HWIO_IPA_RSRC_MNGR_DB_RSRC_READ_ADDR, \
+		m)
+#define HWIO_IPA_RSRC_MNGR_DB_RSRC_READ_RSRC_NEXT_INDEX_BMSK 0x3f0
+#define HWIO_IPA_RSRC_MNGR_DB_RSRC_READ_RSRC_NEXT_INDEX_SHFT 0x4
+#define HWIO_IPA_RSRC_MNGR_DB_RSRC_READ_RSRC_NEXT_VALID_BMSK 0x2
+#define HWIO_IPA_RSRC_MNGR_DB_RSRC_READ_RSRC_NEXT_VALID_SHFT 0x1
+#define HWIO_IPA_RSRC_MNGR_DB_RSRC_READ_RSRC_OCCUPIED_BMSK 0x1
+#define HWIO_IPA_RSRC_MNGR_DB_RSRC_READ_RSRC_OCCUPIED_SHFT 0x0
+#define HWIO_IPA_RSRC_MNGR_DB_LIST_READ_ADDR (IPA_DEBUG_REG_BASE + \
+					      0x00000130)
+#define HWIO_IPA_RSRC_MNGR_DB_LIST_READ_PHYS (IPA_DEBUG_REG_BASE_PHYS +	\
+					      0x00000130)
+#define HWIO_IPA_RSRC_MNGR_DB_LIST_READ_OFFS (IPA_DEBUG_REG_BASE_OFFS +	\
+					      0x00000130)
+#define HWIO_IPA_RSRC_MNGR_DB_LIST_READ_RMSK 0x7f7f3f3
+#define HWIO_IPA_RSRC_MNGR_DB_LIST_READ_ATTR 0x1
+#define HWIO_IPA_RSRC_MNGR_DB_LIST_READ_IN in_dword_masked( \
+		HWIO_IPA_RSRC_MNGR_DB_LIST_READ_ADDR, \
+		HWIO_IPA_RSRC_MNGR_DB_LIST_READ_RMSK)
+#define HWIO_IPA_RSRC_MNGR_DB_LIST_READ_INM(m) in_dword_masked(	\
+		HWIO_IPA_RSRC_MNGR_DB_LIST_READ_ADDR, \
+		m)
+#define HWIO_IPA_RSRC_MNGR_DB_LIST_READ_RSRC_LIST_ENTRY_CNT_BMSK 0x7f00000
+#define HWIO_IPA_RSRC_MNGR_DB_LIST_READ_RSRC_LIST_ENTRY_CNT_SHFT 0x14
+#define HWIO_IPA_RSRC_MNGR_DB_LIST_READ_RSRC_LIST_HEAD_CNT_BMSK 0x7f000
+#define HWIO_IPA_RSRC_MNGR_DB_LIST_READ_RSRC_LIST_HEAD_CNT_SHFT 0xc
+#define HWIO_IPA_RSRC_MNGR_DB_LIST_READ_RSRC_LIST_HEAD_RSRC_BMSK 0x3f0
+#define HWIO_IPA_RSRC_MNGR_DB_LIST_READ_RSRC_LIST_HEAD_RSRC_SHFT 0x4
+#define HWIO_IPA_RSRC_MNGR_DB_LIST_READ_RSRC_LIST_HOLD_BMSK 0x2
+#define HWIO_IPA_RSRC_MNGR_DB_LIST_READ_RSRC_LIST_HOLD_SHFT 0x1
+#define HWIO_IPA_RSRC_MNGR_DB_LIST_READ_RSRC_LIST_VALID_BMSK 0x1
+#define HWIO_IPA_RSRC_MNGR_DB_LIST_READ_RSRC_LIST_VALID_SHFT 0x0
+#define HWIO_IPA_RSRC_MNGR_CONTEXTS_ADDR (IPA_DEBUG_REG_BASE + 0x00000134)
+#define HWIO_IPA_RSRC_MNGR_CONTEXTS_PHYS (IPA_DEBUG_REG_BASE_PHYS + \
+					  0x00000134)
+#define HWIO_IPA_RSRC_MNGR_CONTEXTS_OFFS (IPA_DEBUG_REG_BASE_OFFS + \
+					  0x00000134)
+#define HWIO_IPA_BRESP_DB_CFG_ADDR (IPA_DEBUG_REG_BASE + 0x00000138)
+#define HWIO_IPA_BRESP_DB_CFG_PHYS (IPA_DEBUG_REG_BASE_PHYS + 0x00000138)
+#define HWIO_IPA_BRESP_DB_CFG_OFFS (IPA_DEBUG_REG_BASE_OFFS + 0x00000138)
+#define HWIO_IPA_BRESP_DB_DATA_ADDR (IPA_DEBUG_REG_BASE + 0x0000013c)
+#define HWIO_IPA_BRESP_DB_DATA_PHYS (IPA_DEBUG_REG_BASE_PHYS + 0x0000013c)
+#define HWIO_IPA_BRESP_DB_DATA_OFFS (IPA_DEBUG_REG_BASE_OFFS + 0x0000013c)
+#define HWIO_IPA_DEBUG_DATA_ADDR (IPA_DEBUG_REG_BASE + 0x00000204)
+#define HWIO_IPA_DEBUG_DATA_PHYS (IPA_DEBUG_REG_BASE_PHYS + 0x00000204)
+#define HWIO_IPA_DEBUG_DATA_OFFS (IPA_DEBUG_REG_BASE_OFFS + 0x00000204)
+#define HWIO_IPA_DEBUG_DATA_RMSK 0xffffffff
+#define HWIO_IPA_DEBUG_DATA_ATTR 0x1
+#define HWIO_IPA_DEBUG_DATA_IN in_dword_masked(HWIO_IPA_DEBUG_DATA_ADDR, \
+					       HWIO_IPA_DEBUG_DATA_RMSK)
+#define HWIO_IPA_DEBUG_DATA_INM(m) in_dword_masked( \
+		HWIO_IPA_DEBUG_DATA_ADDR, \
+		m)
+#define HWIO_IPA_DEBUG_DATA_DEBUG_DATA_BMSK 0xffffffff
+#define HWIO_IPA_DEBUG_DATA_DEBUG_DATA_SHFT 0x0
+#define HWIO_IPA_TESTBUS_SEL_ADDR (IPA_DEBUG_REG_BASE + 0x00000208)
+#define HWIO_IPA_TESTBUS_SEL_PHYS (IPA_DEBUG_REG_BASE_PHYS + 0x00000208)
+#define HWIO_IPA_TESTBUS_SEL_OFFS (IPA_DEBUG_REG_BASE_OFFS + 0x00000208)
+#define HWIO_IPA_TESTBUS_SEL_RMSK 0x1fffff1
+#define HWIO_IPA_TESTBUS_SEL_ATTR 0x3
+#define HWIO_IPA_TESTBUS_SEL_IN in_dword_masked(HWIO_IPA_TESTBUS_SEL_ADDR, \
+						HWIO_IPA_TESTBUS_SEL_RMSK)
+#define HWIO_IPA_TESTBUS_SEL_INM(m) in_dword_masked( \
+		HWIO_IPA_TESTBUS_SEL_ADDR, \
+		m)
+#define HWIO_IPA_TESTBUS_SEL_OUT(v) out_dword(HWIO_IPA_TESTBUS_SEL_ADDR, v)
+#define HWIO_IPA_TESTBUS_SEL_OUTM(m, v) out_dword_masked_ns( \
+		HWIO_IPA_TESTBUS_SEL_ADDR, \
+		m, \
+		v, \
+		HWIO_IPA_TESTBUS_SEL_IN)
+#define HWIO_IPA_TESTBUS_SEL_PIPE_SELECT_BMSK 0x1f00000
+#define HWIO_IPA_TESTBUS_SEL_PIPE_SELECT_SHFT 0x14
+#define HWIO_IPA_TESTBUS_SEL_INTERNAL_BLOCK_SELECT_BMSK 0xff000
+#define HWIO_IPA_TESTBUS_SEL_INTERNAL_BLOCK_SELECT_SHFT 0xc
+#define HWIO_IPA_TESTBUS_SEL_EXTERNAL_BLOCK_SELECT_BMSK 0xff0
+#define HWIO_IPA_TESTBUS_SEL_EXTERNAL_BLOCK_SELECT_SHFT 0x4
+#define HWIO_IPA_TESTBUS_SEL_EXTERNAL_BLOCK_SELECT_IPA_RX_FVAL 0x0
+#define HWIO_IPA_TESTBUS_SEL_EXTERNAL_BLOCK_SELECT_IPA_TX0_FVAL 0x1
+#define HWIO_IPA_TESTBUS_SEL_EXTERNAL_BLOCK_SELECT_HPS_FRAG_FVAL 0x2
+#define HWIO_IPA_TESTBUS_SEL_EXTERNAL_BLOCK_SELECT_HPS_UCP_FVAL 0x3
+#define HWIO_IPA_TESTBUS_SEL_EXTERNAL_BLOCK_SELECT_HPS_ENQUEUER_FVAL 0x4
+#define HWIO_IPA_TESTBUS_SEL_EXTERNAL_BLOCK_SELECT_HPS_ROUTER_FVAL 0x5
+#define HWIO_IPA_TESTBUS_SEL_EXTERNAL_BLOCK_SELECT_HPS_PKT_PARSER_FVAL 0x6
+#define HWIO_IPA_TESTBUS_SEL_EXTERNAL_BLOCK_SELECT_HPS_FILTER_NAT_FVAL 0x7
+#define HWIO_IPA_TESTBUS_SEL_EXTERNAL_BLOCK_SELECT_HPS_HDRI_RSRCREL_FVAL \
+	0x8
+#define HWIO_IPA_TESTBUS_SEL_EXTERNAL_BLOCK_SELECT_IPA_AHB2AHB_FVAL 0x9
+#define HWIO_IPA_TESTBUS_SEL_EXTERNAL_BLOCK_SELECT_IPA_MAXI2AXI_FVAL 0xa
+#define HWIO_IPA_TESTBUS_SEL_EXTERNAL_BLOCK_SELECT_DPS_DCMP_FVAL 0xb
+#define HWIO_IPA_TESTBUS_SEL_EXTERNAL_BLOCK_SELECT_DPS_DISPATCHER_FVAL 0xc
+#define HWIO_IPA_TESTBUS_SEL_EXTERNAL_BLOCK_SELECT_DPS_D_DCPH_FVAL 0xd
+#define HWIO_IPA_TESTBUS_SEL_EXTERNAL_BLOCK_SELECT_GSI_TEST_BUS_FVAL 0xe
+#define HWIO_IPA_TESTBUS_SEL_EXTERNAL_BLOCK_SELECT_DEADBEAF_FVAL 0xf
+#define HWIO_IPA_TESTBUS_SEL_EXTERNAL_BLOCK_SELECT_IPA_MISC_FVAL 0x10
+#define HWIO_IPA_TESTBUS_SEL_EXTERNAL_BLOCK_SELECT_IPA_STTS_SNIFFER_FVAL \
+	0x11
+#define HWIO_IPA_TESTBUS_SEL_EXTERNAL_BLOCK_SELECT_IPA_QMB_0_FVAL 0x12
+#define HWIO_IPA_TESTBUS_SEL_EXTERNAL_BLOCK_SELECT_IPA_QMB_1_FVAL 0x13
+#define HWIO_IPA_TESTBUS_SEL_EXTERNAL_BLOCK_SELECT_IPA_UC_ACKQ_FVAL 0x14
+#define HWIO_IPA_TESTBUS_SEL_EXTERNAL_BLOCK_SELECT_IPA_RX_ACKQ_FVAL 0x15
+#define HWIO_IPA_TESTBUS_SEL_EXTERNAL_BLOCK_SELECT_IPA_TX1_FVAL 0x16
+#define HWIO_IPA_TESTBUS_SEL_EXTERNAL_BLOCK_SELECT_HPS_H_DCPH_FVAL 0x17
+#define HWIO_IPA_TESTBUS_SEL_EXTERNAL_BLOCK_SELECT_RX_HPS_CMDQ_FVAL 0x18
+#define HWIO_IPA_TESTBUS_SEL_EXTERNAL_BLOCK_SELECT_HPS_DPS_CMDQ_FVAL 0x19
+#define HWIO_IPA_TESTBUS_SEL_EXTERNAL_BLOCK_SELECT_DPS_TX_CMDQ_FVAL 0x1a
+#define HWIO_IPA_TESTBUS_SEL_EXTERNAL_BLOCK_SELECT_IPA_CMDQ_L_FVAL 0x1b
+#define	\
+	HWIO_IPA_TESTBUS_SEL_EXTERNAL_BLOCK_SELECT_IPA_RX_LEGACY_CMDQ_INT_FVAL \
+	0x1c
+#define HWIO_IPA_TESTBUS_SEL_EXTERNAL_BLOCK_SELECT_IPA_CTX_HANDLER_FVAL	\
+	0x1d
+#define HWIO_IPA_TESTBUS_SEL_EXTERNAL_BLOCK_SELECT_IPA_GSI_FVAL 0x1e
+#define HWIO_IPA_TESTBUS_SEL_EXTERNAL_BLOCK_SELECT_ACK_MNGR_CMDQ_FVAL 0x1f
+#define	\
+	HWIO_IPA_TESTBUS_SEL_EXTERNAL_BLOCK_SELECT_ENDP_INIT_CTRL_SUSPEND_FVAL \
+	0x20
+#define HWIO_IPA_TESTBUS_SEL_EXTERNAL_BLOCK_SELECT_ACL_WRAPPER_FVAL 0x22
+#define HWIO_IPA_TESTBUS_SEL_EXTERNAL_BLOCK_SELECT_IPA_TX_WRAPPER_FVAL \
+	0x23
+#define HWIO_IPA_TESTBUS_SEL_EXTERNAL_BLOCK_SELECT_AHB2AHB_BRIDGE_FVAL \
+	0x24
+#define HWIO_IPA_TESTBUS_SEL_EXTERNAL_BLOCK_SELECT_RSRC_TYPE_FVAL 0x31
+#define HWIO_IPA_TESTBUS_SEL_EXTERNAL_BLOCK_SELECT_RSRC_FVAL 0x32
+#define HWIO_IPA_TESTBUS_SEL_EXTERNAL_BLOCK_SELECT_ACKMNGR_FVAL 0x33
+#define HWIO_IPA_TESTBUS_SEL_EXTERNAL_BLOCK_SELECT_HPS_SEQ_FVAL 0x34
+#define HWIO_IPA_TESTBUS_SEL_EXTERNAL_BLOCK_SELECT_DPS_SEQ_FVAL 0x35
+#define HWIO_IPA_TESTBUS_SEL_EXTERNAL_BLOCK_SELECT_HPS_FTCH_FVAL 0x36
+#define HWIO_IPA_TESTBUS_SEL_EXTERNAL_BLOCK_SELECT_DPS_FTCH_FVAL 0x37
+#define HWIO_IPA_TESTBUS_SEL_EXTERNAL_BLOCK_SELECT_HPS_D_DCPH_2_FVAL 0x38
+#define HWIO_IPA_TESTBUS_SEL_EXTERNAL_BLOCK_SELECT_NTF_TX_CMDQ_FVAL 0x39
+#define HWIO_IPA_TESTBUS_SEL_EXTERNAL_BLOCK_SELECT_PROD_ACK_MNGR_CMDQ_FVAL \
+	0x3a
+#define HWIO_IPA_TESTBUS_SEL_EXTERNAL_BLOCK_SELECT_PROD_ACKMNGR_FVAL 0x3b
+#define HWIO_IPA_TESTBUS_SEL_EXTERNAL_BLOCK_SELECT_IPA_GSI_AHB2AHB_FVAL	\
+	0x3c
+#define HWIO_IPA_TESTBUS_SEL_EXTERNAL_BLOCK_SELECT_IPA_MAXI2AXI_PCIE_FVAL \
+	0x3d
+#define HWIO_IPA_TESTBUS_SEL_EXTERNAL_BLOCK_SELECT_IPA_QSB2AXI_FVAL 0x3e
+#define HWIO_IPA_TESTBUS_SEL_EXTERNAL_BLOCK_SELECT_IPA_UC_FVAL 0x3f
+#define HWIO_IPA_TESTBUS_SEL_TESTBUS_EN_BMSK 0x1
+#define HWIO_IPA_TESTBUS_SEL_TESTBUS_EN_SHFT 0x0
+#define HWIO_IPA_STEP_MODE_BREAKPOINTS_ADDR (IPA_DEBUG_REG_BASE + \
+					     0x0000020c)
+#define HWIO_IPA_STEP_MODE_BREAKPOINTS_PHYS (IPA_DEBUG_REG_BASE_PHYS + \
+					     0x0000020c)
+#define HWIO_IPA_STEP_MODE_BREAKPOINTS_OFFS (IPA_DEBUG_REG_BASE_OFFS + \
+					     0x0000020c)
+#define HWIO_IPA_STEP_MODE_BREAKPOINTS_RMSK 0xffffffff
+#define HWIO_IPA_STEP_MODE_BREAKPOINTS_ATTR 0x3
+#define HWIO_IPA_STEP_MODE_BREAKPOINTS_IN in_dword_masked( \
+		HWIO_IPA_STEP_MODE_BREAKPOINTS_ADDR, \
+		HWIO_IPA_STEP_MODE_BREAKPOINTS_RMSK)
+#define HWIO_IPA_STEP_MODE_BREAKPOINTS_INM(m) in_dword_masked( \
+		HWIO_IPA_STEP_MODE_BREAKPOINTS_ADDR, \
+		m)
+#define HWIO_IPA_STEP_MODE_BREAKPOINTS_OUT(v) out_dword( \
+		HWIO_IPA_STEP_MODE_BREAKPOINTS_ADDR, \
+		v)
+#define HWIO_IPA_STEP_MODE_BREAKPOINTS_OUTM(m, v) out_dword_masked_ns( \
+		HWIO_IPA_STEP_MODE_BREAKPOINTS_ADDR, \
+		m, \
+		v, \
+		HWIO_IPA_STEP_MODE_BREAKPOINTS_IN)
+#define HWIO_IPA_STEP_MODE_BREAKPOINTS_HW_EN_BMSK 0xffffffff
+#define HWIO_IPA_STEP_MODE_BREAKPOINTS_HW_EN_SHFT 0x0
+#define HWIO_IPA_STEP_MODE_STATUS_ADDR (IPA_DEBUG_REG_BASE + 0x00000210)
+#define HWIO_IPA_STEP_MODE_STATUS_PHYS (IPA_DEBUG_REG_BASE_PHYS + \
+					0x00000210)
+#define HWIO_IPA_STEP_MODE_STATUS_OFFS (IPA_DEBUG_REG_BASE_OFFS + \
+					0x00000210)
+#define HWIO_IPA_STEP_MODE_STATUS_RMSK 0xffffffff
+#define HWIO_IPA_STEP_MODE_STATUS_ATTR 0x1
+#define HWIO_IPA_STEP_MODE_STATUS_IN in_dword_masked( \
+		HWIO_IPA_STEP_MODE_STATUS_ADDR,	\
+		HWIO_IPA_STEP_MODE_STATUS_RMSK)
+#define HWIO_IPA_STEP_MODE_STATUS_INM(m) in_dword_masked( \
+		HWIO_IPA_STEP_MODE_STATUS_ADDR,	\
+		m)
+#define HWIO_IPA_STEP_MODE_STATUS_HW_EN_BMSK 0xffffffff
+#define HWIO_IPA_STEP_MODE_STATUS_HW_EN_SHFT 0x0
+#define HWIO_IPA_STEP_MODE_GO_ADDR (IPA_DEBUG_REG_BASE + 0x00000214)
+#define HWIO_IPA_STEP_MODE_GO_PHYS (IPA_DEBUG_REG_BASE_PHYS + 0x00000214)
+#define HWIO_IPA_STEP_MODE_GO_OFFS (IPA_DEBUG_REG_BASE_OFFS + 0x00000214)
+#define HWIO_IPA_HW_EVENTS_CFG_ADDR (IPA_DEBUG_REG_BASE + 0x00000218)
+#define HWIO_IPA_HW_EVENTS_CFG_PHYS (IPA_DEBUG_REG_BASE_PHYS + 0x00000218)
+#define HWIO_IPA_HW_EVENTS_CFG_OFFS (IPA_DEBUG_REG_BASE_OFFS + 0x00000218)
+#define HWIO_IPA_LOG_ADDR (IPA_DEBUG_REG_BASE + 0x0000021c)
+#define HWIO_IPA_LOG_PHYS (IPA_DEBUG_REG_BASE_PHYS + 0x0000021c)
+#define HWIO_IPA_LOG_OFFS (IPA_DEBUG_REG_BASE_OFFS + 0x0000021c)
+#define HWIO_IPA_LOG_RMSK 0x3ff1f2
+#define HWIO_IPA_LOG_ATTR 0x3
+#define HWIO_IPA_LOG_IN in_dword_masked(HWIO_IPA_LOG_ADDR, \
+					HWIO_IPA_LOG_RMSK)
+#define HWIO_IPA_LOG_INM(m) in_dword_masked(HWIO_IPA_LOG_ADDR, m)
+#define HWIO_IPA_LOG_OUT(v) out_dword(HWIO_IPA_LOG_ADDR, v)
+#define HWIO_IPA_LOG_OUTM(m, v) out_dword_masked_ns(HWIO_IPA_LOG_ADDR, \
+						    m, \
+						    v, \
+						    HWIO_IPA_LOG_IN)
+#define HWIO_IPA_LOG_LOG_DPL_L2_REMOVE_EN_BMSK 0x200000
+#define HWIO_IPA_LOG_LOG_DPL_L2_REMOVE_EN_SHFT 0x15
+#define HWIO_IPA_LOG_LOG_REDUCTION_EN_BMSK 0x100000
+#define HWIO_IPA_LOG_LOG_REDUCTION_EN_SHFT 0x14
+#define HWIO_IPA_LOG_LOG_LENGTH_BMSK 0xff000
+#define HWIO_IPA_LOG_LOG_LENGTH_SHFT 0xc
+#define HWIO_IPA_LOG_LOG_PIPE_BMSK 0x1f0
+#define HWIO_IPA_LOG_LOG_PIPE_SHFT 0x4
+#define HWIO_IPA_LOG_LOG_EN_BMSK 0x2
+#define HWIO_IPA_LOG_LOG_EN_SHFT 0x1
+#define HWIO_IPA_LOG_BUF_HW_CMD_ADDR_ADDR (IPA_DEBUG_REG_BASE + 0x00000224)
+#define HWIO_IPA_LOG_BUF_HW_CMD_ADDR_PHYS (IPA_DEBUG_REG_BASE_PHYS + \
+					   0x00000224)
+#define HWIO_IPA_LOG_BUF_HW_CMD_ADDR_OFFS (IPA_DEBUG_REG_BASE_OFFS + \
+					   0x00000224)
+#define HWIO_IPA_LOG_BUF_HW_CMD_ADDR_RMSK 0xffffffff
+#define HWIO_IPA_LOG_BUF_HW_CMD_ADDR_ATTR 0x3
+#define HWIO_IPA_LOG_BUF_HW_CMD_ADDR_IN in_dword_masked( \
+		HWIO_IPA_LOG_BUF_HW_CMD_ADDR_ADDR, \
+		HWIO_IPA_LOG_BUF_HW_CMD_ADDR_RMSK)
+#define HWIO_IPA_LOG_BUF_HW_CMD_ADDR_INM(m) in_dword_masked( \
+		HWIO_IPA_LOG_BUF_HW_CMD_ADDR_ADDR, \
+		m)
+#define HWIO_IPA_LOG_BUF_HW_CMD_ADDR_OUT(v) out_dword( \
+		HWIO_IPA_LOG_BUF_HW_CMD_ADDR_ADDR, \
+		v)
+#define HWIO_IPA_LOG_BUF_HW_CMD_ADDR_OUTM(m, v) out_dword_masked_ns( \
+		HWIO_IPA_LOG_BUF_HW_CMD_ADDR_ADDR, \
+		m, \
+		v, \
+		HWIO_IPA_LOG_BUF_HW_CMD_ADDR_IN)
+#define HWIO_IPA_LOG_BUF_HW_CMD_ADDR_START_ADDR_BMSK 0xffffffff
+#define HWIO_IPA_LOG_BUF_HW_CMD_ADDR_START_ADDR_SHFT 0x0
+#define HWIO_IPA_LOG_BUF_HW_CMD_ADDR_MSB_ADDR (IPA_DEBUG_REG_BASE + \
+					       0x00000228)
+#define HWIO_IPA_LOG_BUF_HW_CMD_ADDR_MSB_PHYS (IPA_DEBUG_REG_BASE_PHYS + \
+					       0x00000228)
+#define HWIO_IPA_LOG_BUF_HW_CMD_ADDR_MSB_OFFS (IPA_DEBUG_REG_BASE_OFFS + \
+					       0x00000228)
+#define HWIO_IPA_LOG_BUF_HW_CMD_ADDR_MSB_RMSK 0xffffffff
+#define HWIO_IPA_LOG_BUF_HW_CMD_ADDR_MSB_ATTR 0x3
+#define HWIO_IPA_LOG_BUF_HW_CMD_ADDR_MSB_IN in_dword_masked( \
+		HWIO_IPA_LOG_BUF_HW_CMD_ADDR_MSB_ADDR, \
+		HWIO_IPA_LOG_BUF_HW_CMD_ADDR_MSB_RMSK)
+#define HWIO_IPA_LOG_BUF_HW_CMD_ADDR_MSB_INM(m) in_dword_masked( \
+		HWIO_IPA_LOG_BUF_HW_CMD_ADDR_MSB_ADDR, \
+		m)
+#define HWIO_IPA_LOG_BUF_HW_CMD_ADDR_MSB_OUT(v) out_dword( \
+		HWIO_IPA_LOG_BUF_HW_CMD_ADDR_MSB_ADDR, \
+		v)
+#define HWIO_IPA_LOG_BUF_HW_CMD_ADDR_MSB_OUTM(m, v) out_dword_masked_ns( \
+		HWIO_IPA_LOG_BUF_HW_CMD_ADDR_MSB_ADDR, \
+		m, \
+		v, \
+		HWIO_IPA_LOG_BUF_HW_CMD_ADDR_MSB_IN)
+#define HWIO_IPA_LOG_BUF_HW_CMD_ADDR_MSB_START_ADDR_BMSK 0xffffffff
+#define HWIO_IPA_LOG_BUF_HW_CMD_ADDR_MSB_START_ADDR_SHFT 0x0
+#define HWIO_IPA_LOG_BUF_HW_CMD_WRITE_PTR_ADDR (IPA_DEBUG_REG_BASE + \
+						0x0000022c)
+#define HWIO_IPA_LOG_BUF_HW_CMD_WRITE_PTR_PHYS (IPA_DEBUG_REG_BASE_PHYS + \
+						0x0000022c)
+#define HWIO_IPA_LOG_BUF_HW_CMD_WRITE_PTR_OFFS (IPA_DEBUG_REG_BASE_OFFS + \
+						0x0000022c)
+#define HWIO_IPA_LOG_BUF_HW_CMD_WRITE_PTR_RMSK 0xffffffff
+#define HWIO_IPA_LOG_BUF_HW_CMD_WRITE_PTR_ATTR 0x1
+#define HWIO_IPA_LOG_BUF_HW_CMD_WRITE_PTR_IN in_dword_masked( \
+		HWIO_IPA_LOG_BUF_HW_CMD_WRITE_PTR_ADDR,	\
+		HWIO_IPA_LOG_BUF_HW_CMD_WRITE_PTR_RMSK)
+#define HWIO_IPA_LOG_BUF_HW_CMD_WRITE_PTR_INM(m) in_dword_masked( \
+		HWIO_IPA_LOG_BUF_HW_CMD_WRITE_PTR_ADDR,	\
+		m)
+#define HWIO_IPA_LOG_BUF_HW_CMD_WRITE_PTR_WRITR_ADDR_BMSK 0xffffffff
+#define HWIO_IPA_LOG_BUF_HW_CMD_WRITE_PTR_WRITR_ADDR_SHFT 0x0
+#define HWIO_IPA_LOG_BUF_HW_CMD_WRITE_PTR_MSB_ADDR (IPA_DEBUG_REG_BASE + \
+						    0x00000230)
+#define HWIO_IPA_LOG_BUF_HW_CMD_WRITE_PTR_MSB_PHYS ( \
+		IPA_DEBUG_REG_BASE_PHYS + 0x00000230)
+#define HWIO_IPA_LOG_BUF_HW_CMD_WRITE_PTR_MSB_OFFS ( \
+		IPA_DEBUG_REG_BASE_OFFS + 0x00000230)
+#define HWIO_IPA_LOG_BUF_HW_CMD_WRITE_PTR_MSB_RMSK 0xffffffff
+#define HWIO_IPA_LOG_BUF_HW_CMD_WRITE_PTR_MSB_ATTR 0x1
+#define HWIO_IPA_LOG_BUF_HW_CMD_WRITE_PTR_MSB_IN in_dword_masked( \
+		HWIO_IPA_LOG_BUF_HW_CMD_WRITE_PTR_MSB_ADDR, \
+		HWIO_IPA_LOG_BUF_HW_CMD_WRITE_PTR_MSB_RMSK)
+#define HWIO_IPA_LOG_BUF_HW_CMD_WRITE_PTR_MSB_INM(m) in_dword_masked( \
+		HWIO_IPA_LOG_BUF_HW_CMD_WRITE_PTR_MSB_ADDR, \
+		m)
+#define HWIO_IPA_LOG_BUF_HW_CMD_WRITE_PTR_MSB_WRITR_ADDR_BMSK 0xffffffff
+#define HWIO_IPA_LOG_BUF_HW_CMD_WRITE_PTR_MSB_WRITR_ADDR_SHFT 0x0
+#define HWIO_IPA_LOG_BUF_HW_CMD_CFG_ADDR (IPA_DEBUG_REG_BASE + 0x00000234)
+#define HWIO_IPA_LOG_BUF_HW_CMD_CFG_PHYS (IPA_DEBUG_REG_BASE_PHYS + \
+					  0x00000234)
+#define HWIO_IPA_LOG_BUF_HW_CMD_CFG_OFFS (IPA_DEBUG_REG_BASE_OFFS + \
+					  0x00000234)
+#define HWIO_IPA_LOG_BUF_HW_CMD_CFG_RMSK 0x3ffff
+#define HWIO_IPA_LOG_BUF_HW_CMD_CFG_ATTR 0x3
+#define HWIO_IPA_LOG_BUF_HW_CMD_CFG_IN in_dword_masked(	\
+		HWIO_IPA_LOG_BUF_HW_CMD_CFG_ADDR, \
+		HWIO_IPA_LOG_BUF_HW_CMD_CFG_RMSK)
+#define HWIO_IPA_LOG_BUF_HW_CMD_CFG_INM(m) in_dword_masked( \
+		HWIO_IPA_LOG_BUF_HW_CMD_CFG_ADDR, \
+		m)
+#define HWIO_IPA_LOG_BUF_HW_CMD_CFG_OUT(v) out_dword( \
+		HWIO_IPA_LOG_BUF_HW_CMD_CFG_ADDR, \
+		v)
+#define HWIO_IPA_LOG_BUF_HW_CMD_CFG_OUTM(m, v) out_dword_masked_ns( \
+		HWIO_IPA_LOG_BUF_HW_CMD_CFG_ADDR, \
+		m, \
+		v, \
+		HWIO_IPA_LOG_BUF_HW_CMD_CFG_IN)
+#define HWIO_IPA_LOG_BUF_HW_CMD_CFG_SKIP_DDR_DMA_BMSK 0x20000
+#define HWIO_IPA_LOG_BUF_HW_CMD_CFG_SKIP_DDR_DMA_SHFT 0x11
+#define HWIO_IPA_LOG_BUF_HW_CMD_CFG_ENABLE_BMSK 0x10000
+#define HWIO_IPA_LOG_BUF_HW_CMD_CFG_ENABLE_SHFT 0x10
+#define HWIO_IPA_LOG_BUF_HW_CMD_CFG_SIZE_BMSK 0xffff
+#define HWIO_IPA_LOG_BUF_HW_CMD_CFG_SIZE_SHFT 0x0
+#define HWIO_IPA_LOG_BUF_HW_CMD_RAM_PTR_ADDR (IPA_DEBUG_REG_BASE + \
+					      0x00000238)
+#define HWIO_IPA_LOG_BUF_HW_CMD_RAM_PTR_PHYS (IPA_DEBUG_REG_BASE_PHYS +	\
+					      0x00000238)
+#define HWIO_IPA_LOG_BUF_HW_CMD_RAM_PTR_OFFS (IPA_DEBUG_REG_BASE_OFFS +	\
+					      0x00000238)
+#define HWIO_IPA_LOG_BUF_HW_CMD_RAM_PTR_RMSK 0xbfff3fff
+#define HWIO_IPA_LOG_BUF_HW_CMD_RAM_PTR_ATTR 0x1
+#define HWIO_IPA_LOG_BUF_HW_CMD_RAM_PTR_IN in_dword_masked( \
+		HWIO_IPA_LOG_BUF_HW_CMD_RAM_PTR_ADDR, \
+		HWIO_IPA_LOG_BUF_HW_CMD_RAM_PTR_RMSK)
+#define HWIO_IPA_LOG_BUF_HW_CMD_RAM_PTR_INM(m) in_dword_masked(	\
+		HWIO_IPA_LOG_BUF_HW_CMD_RAM_PTR_ADDR, \
+		m)
+#define HWIO_IPA_LOG_BUF_HW_CMD_RAM_PTR_SKIP_DDR_WRAP_HAPPENED_BMSK \
+	0x80000000
+#define HWIO_IPA_LOG_BUF_HW_CMD_RAM_PTR_SKIP_DDR_WRAP_HAPPENED_SHFT 0x1f
+#define HWIO_IPA_LOG_BUF_HW_CMD_RAM_PTR_WRITE_PTR_BMSK 0x3fff0000
+#define HWIO_IPA_LOG_BUF_HW_CMD_RAM_PTR_WRITE_PTR_SHFT 0x10
+#define HWIO_IPA_LOG_BUF_HW_CMD_RAM_PTR_READ_PTR_BMSK 0x3fff
+#define HWIO_IPA_LOG_BUF_HW_CMD_RAM_PTR_READ_PTR_SHFT 0x0
+#define HWIO_IPA_STEP_MODE_HFETCHER_ADDR_LSB_ADDR (IPA_DEBUG_REG_BASE +	\
+						   0x0000023c)
+#define HWIO_IPA_STEP_MODE_HFETCHER_ADDR_LSB_PHYS (IPA_DEBUG_REG_BASE_PHYS \
+						   + 0x0000023c)
+#define HWIO_IPA_STEP_MODE_HFETCHER_ADDR_LSB_OFFS (IPA_DEBUG_REG_BASE_OFFS \
+						   + 0x0000023c)
+#define HWIO_IPA_STEP_MODE_HFETCHER_ADDR_MSB_ADDR (IPA_DEBUG_REG_BASE +	\
+						   0x00000240)
+#define HWIO_IPA_STEP_MODE_HFETCHER_ADDR_MSB_PHYS (IPA_DEBUG_REG_BASE_PHYS \
+						   + 0x00000240)
+#define HWIO_IPA_STEP_MODE_HFETCHER_ADDR_MSB_OFFS (IPA_DEBUG_REG_BASE_OFFS \
+						   + 0x00000240)
+#define HWIO_IPA_STEP_MODE_HFETCHER_ADDR_RESULT_ADDR (IPA_DEBUG_REG_BASE + \
+						      0x00000244)
+#define HWIO_IPA_STEP_MODE_HFETCHER_ADDR_RESULT_PHYS ( \
+		IPA_DEBUG_REG_BASE_PHYS + 0x00000244)
+#define HWIO_IPA_STEP_MODE_HFETCHER_ADDR_RESULT_OFFS ( \
+		IPA_DEBUG_REG_BASE_OFFS + 0x00000244)
+#define HWIO_IPA_STEP_MODE_HSEQ_BREAKPOINT_ADDR (IPA_DEBUG_REG_BASE + \
+						 0x00000248)
+#define HWIO_IPA_STEP_MODE_HSEQ_BREAKPOINT_PHYS (IPA_DEBUG_REG_BASE_PHYS + \
+						 0x00000248)
+#define HWIO_IPA_STEP_MODE_HSEQ_BREAKPOINT_OFFS (IPA_DEBUG_REG_BASE_OFFS + \
+						 0x00000248)
+#define HWIO_IPA_STEP_MODE_HSEQ_STATUS_ADDR (IPA_DEBUG_REG_BASE + \
+					     0x0000024c)
+#define HWIO_IPA_STEP_MODE_HSEQ_STATUS_PHYS (IPA_DEBUG_REG_BASE_PHYS + \
+					     0x0000024c)
+#define HWIO_IPA_STEP_MODE_HSEQ_STATUS_OFFS (IPA_DEBUG_REG_BASE_OFFS + \
+					     0x0000024c)
+#define HWIO_IPA_STEP_MODE_DSEQ_BREAKPOINT_ADDR (IPA_DEBUG_REG_BASE + \
+						 0x00000250)
+#define HWIO_IPA_STEP_MODE_DSEQ_BREAKPOINT_PHYS (IPA_DEBUG_REG_BASE_PHYS + \
+						 0x00000250)
+#define HWIO_IPA_STEP_MODE_DSEQ_BREAKPOINT_OFFS (IPA_DEBUG_REG_BASE_OFFS + \
+						 0x00000250)
+#define HWIO_IPA_STEP_MODE_DSEQ_STATUS_ADDR (IPA_DEBUG_REG_BASE + \
+					     0x00000254)
+#define HWIO_IPA_STEP_MODE_DSEQ_STATUS_PHYS (IPA_DEBUG_REG_BASE_PHYS + \
+					     0x00000254)
+#define HWIO_IPA_STEP_MODE_DSEQ_STATUS_OFFS (IPA_DEBUG_REG_BASE_OFFS + \
+					     0x00000254)
+#define HWIO_IPA_RX_ACKQ_CMD_ADDR (IPA_DEBUG_REG_BASE + 0x00000258)
+#define HWIO_IPA_RX_ACKQ_CMD_PHYS (IPA_DEBUG_REG_BASE_PHYS + 0x00000258)
+#define HWIO_IPA_RX_ACKQ_CMD_OFFS (IPA_DEBUG_REG_BASE_OFFS + 0x00000258)
+#define HWIO_IPA_RX_ACKQ_CFG_ADDR (IPA_DEBUG_REG_BASE + 0x0000025c)
+#define HWIO_IPA_RX_ACKQ_CFG_PHYS (IPA_DEBUG_REG_BASE_PHYS + 0x0000025c)
+#define HWIO_IPA_RX_ACKQ_CFG_OFFS (IPA_DEBUG_REG_BASE_OFFS + 0x0000025c)
+#define HWIO_IPA_RX_ACKQ_DATA_WR_0_ADDR (IPA_DEBUG_REG_BASE + 0x00000260)
+#define HWIO_IPA_RX_ACKQ_DATA_WR_0_PHYS (IPA_DEBUG_REG_BASE_PHYS + \
+					 0x00000260)
+#define HWIO_IPA_RX_ACKQ_DATA_WR_0_OFFS (IPA_DEBUG_REG_BASE_OFFS + \
+					 0x00000260)
+#define HWIO_IPA_RX_ACKQ_DATA_RD_0_ADDR (IPA_DEBUG_REG_BASE + 0x00000264)
+#define HWIO_IPA_RX_ACKQ_DATA_RD_0_PHYS (IPA_DEBUG_REG_BASE_PHYS + \
+					 0x00000264)
+#define HWIO_IPA_RX_ACKQ_DATA_RD_0_OFFS (IPA_DEBUG_REG_BASE_OFFS + \
+					 0x00000264)
+#define HWIO_IPA_RX_ACKQ_STATUS_ADDR (IPA_DEBUG_REG_BASE + 0x00000268)
+#define HWIO_IPA_RX_ACKQ_STATUS_PHYS (IPA_DEBUG_REG_BASE_PHYS + 0x00000268)
+#define HWIO_IPA_RX_ACKQ_STATUS_OFFS (IPA_DEBUG_REG_BASE_OFFS + 0x00000268)
+#define HWIO_IPA_UC_ACKQ_CMD_ADDR (IPA_DEBUG_REG_BASE + 0x0000026c)
+#define HWIO_IPA_UC_ACKQ_CMD_PHYS (IPA_DEBUG_REG_BASE_PHYS + 0x0000026c)
+#define HWIO_IPA_UC_ACKQ_CMD_OFFS (IPA_DEBUG_REG_BASE_OFFS + 0x0000026c)
+#define HWIO_IPA_UC_ACKQ_CFG_ADDR (IPA_DEBUG_REG_BASE + 0x00000270)
+#define HWIO_IPA_UC_ACKQ_CFG_PHYS (IPA_DEBUG_REG_BASE_PHYS + 0x00000270)
+#define HWIO_IPA_UC_ACKQ_CFG_OFFS (IPA_DEBUG_REG_BASE_OFFS + 0x00000270)
+#define HWIO_IPA_UC_ACKQ_DATA_WR_0_ADDR (IPA_DEBUG_REG_BASE + 0x00000274)
+#define HWIO_IPA_UC_ACKQ_DATA_WR_0_PHYS (IPA_DEBUG_REG_BASE_PHYS + \
+					 0x00000274)
+#define HWIO_IPA_UC_ACKQ_DATA_WR_0_OFFS (IPA_DEBUG_REG_BASE_OFFS + \
+					 0x00000274)
+#define HWIO_IPA_UC_ACKQ_DATA_RD_0_ADDR (IPA_DEBUG_REG_BASE + 0x00000278)
+#define HWIO_IPA_UC_ACKQ_DATA_RD_0_PHYS (IPA_DEBUG_REG_BASE_PHYS + \
+					 0x00000278)
+#define HWIO_IPA_UC_ACKQ_DATA_RD_0_OFFS (IPA_DEBUG_REG_BASE_OFFS + \
+					 0x00000278)
+#define HWIO_IPA_UC_ACKQ_STATUS_ADDR (IPA_DEBUG_REG_BASE + 0x0000027c)
+#define HWIO_IPA_UC_ACKQ_STATUS_PHYS (IPA_DEBUG_REG_BASE_PHYS + 0x0000027c)
+#define HWIO_IPA_UC_ACKQ_STATUS_OFFS (IPA_DEBUG_REG_BASE_OFFS + 0x0000027c)
+#define HWIO_IPA_RX_SPLT_CMDQ_CMD_n_ADDR(n) (IPA_DEBUG_REG_BASE + \
+					     0x00000280 + 0x2C * (n))
+#define HWIO_IPA_RX_SPLT_CMDQ_CMD_n_PHYS(n) (IPA_DEBUG_REG_BASE_PHYS + \
+					     0x00000280 + 0x2C * (n))
+#define HWIO_IPA_RX_SPLT_CMDQ_CMD_n_OFFS(n) (IPA_DEBUG_REG_BASE_OFFS + \
+					     0x00000280 + 0x2C * (n))
+#define HWIO_IPA_RX_SPLT_CMDQ_CMD_n_RMSK 0x7f
+#define HWIO_IPA_RX_SPLT_CMDQ_CMD_n_MAXn 3
+#define HWIO_IPA_RX_SPLT_CMDQ_CMD_n_ATTR 0x2
+#define HWIO_IPA_RX_SPLT_CMDQ_CMD_n_OUTI(n, val) out_dword( \
+		HWIO_IPA_RX_SPLT_CMDQ_CMD_n_ADDR(n), \
+		val)
+#define HWIO_IPA_RX_SPLT_CMDQ_CMD_n_RELEASE_RD_PKT_ENHANCED_BMSK 0x40
+#define HWIO_IPA_RX_SPLT_CMDQ_CMD_n_RELEASE_RD_PKT_ENHANCED_SHFT 0x6
+#define HWIO_IPA_RX_SPLT_CMDQ_CMD_n_RELEASE_WR_PKT_BMSK 0x20
+#define HWIO_IPA_RX_SPLT_CMDQ_CMD_n_RELEASE_WR_PKT_SHFT 0x5
+#define HWIO_IPA_RX_SPLT_CMDQ_CMD_n_RELEASE_RD_PKT_BMSK 0x10
+#define HWIO_IPA_RX_SPLT_CMDQ_CMD_n_RELEASE_RD_PKT_SHFT 0x4
+#define HWIO_IPA_RX_SPLT_CMDQ_CMD_n_RELEASE_WR_CMD_BMSK 0x8
+#define HWIO_IPA_RX_SPLT_CMDQ_CMD_n_RELEASE_WR_CMD_SHFT 0x3
+#define HWIO_IPA_RX_SPLT_CMDQ_CMD_n_RELEASE_RD_CMD_BMSK 0x4
+#define HWIO_IPA_RX_SPLT_CMDQ_CMD_n_RELEASE_RD_CMD_SHFT 0x2
+#define HWIO_IPA_RX_SPLT_CMDQ_CMD_n_POP_CMD_BMSK 0x2
+#define HWIO_IPA_RX_SPLT_CMDQ_CMD_n_POP_CMD_SHFT 0x1
+#define HWIO_IPA_RX_SPLT_CMDQ_CMD_n_WRITE_CMD_BMSK 0x1
+#define HWIO_IPA_RX_SPLT_CMDQ_CMD_n_WRITE_CMD_SHFT 0x0
+#define HWIO_IPA_RX_SPLT_CMDQ_CFG_n_ADDR(n) (IPA_DEBUG_REG_BASE + \
+					     0x00000284 + 0x2C * (n))
+#define HWIO_IPA_RX_SPLT_CMDQ_CFG_n_PHYS(n) (IPA_DEBUG_REG_BASE_PHYS + \
+					     0x00000284 + 0x2C * (n))
+#define HWIO_IPA_RX_SPLT_CMDQ_CFG_n_OFFS(n) (IPA_DEBUG_REG_BASE_OFFS + \
+					     0x00000284 + 0x2C * (n))
+#define HWIO_IPA_RX_SPLT_CMDQ_CFG_n_RMSK 0x3
+#define HWIO_IPA_RX_SPLT_CMDQ_CFG_n_MAXn 3
+#define HWIO_IPA_RX_SPLT_CMDQ_CFG_n_ATTR 0x3
+#define HWIO_IPA_RX_SPLT_CMDQ_CFG_n_INI(n) in_dword_masked( \
+		HWIO_IPA_RX_SPLT_CMDQ_CFG_n_ADDR(n), \
+		HWIO_IPA_RX_SPLT_CMDQ_CFG_n_RMSK)
+#define HWIO_IPA_RX_SPLT_CMDQ_CFG_n_INMI(n, mask) in_dword_masked( \
+		HWIO_IPA_RX_SPLT_CMDQ_CFG_n_ADDR(n), \
+		mask)
+#define HWIO_IPA_RX_SPLT_CMDQ_CFG_n_OUTI(n, val) out_dword( \
+		HWIO_IPA_RX_SPLT_CMDQ_CFG_n_ADDR(n), \
+		val)
+#define HWIO_IPA_RX_SPLT_CMDQ_CFG_n_OUTMI(n, mask, \
+					  val) out_dword_masked_ns( \
+		HWIO_IPA_RX_SPLT_CMDQ_CFG_n_ADDR( \
+			n), \
+		mask, \
+		val, \
+		HWIO_IPA_RX_SPLT_CMDQ_CFG_n_INI(n))
+#define HWIO_IPA_RX_SPLT_CMDQ_CFG_n_BLOCK_WR_BMSK 0x2
+#define HWIO_IPA_RX_SPLT_CMDQ_CFG_n_BLOCK_WR_SHFT 0x1
+#define HWIO_IPA_RX_SPLT_CMDQ_CFG_n_BLOCK_RD_BMSK 0x1
+#define HWIO_IPA_RX_SPLT_CMDQ_CFG_n_BLOCK_RD_SHFT 0x0
+#define HWIO_IPA_RX_SPLT_CMDQ_DATA_WR_0_n_ADDR(n) (IPA_DEBUG_REG_BASE +	\
+						   0x00000288 + 0x2C * (n))
+#define HWIO_IPA_RX_SPLT_CMDQ_DATA_WR_0_n_PHYS(n) (IPA_DEBUG_REG_BASE_PHYS \
+						   + 0x00000288 + 0x2C * \
+						   (n))
+#define HWIO_IPA_RX_SPLT_CMDQ_DATA_WR_0_n_OFFS(n) (IPA_DEBUG_REG_BASE_OFFS \
+						   + 0x00000288 + 0x2C * \
+						   (n))
+#define HWIO_IPA_RX_SPLT_CMDQ_DATA_WR_0_n_RMSK 0xffffffff
+#define HWIO_IPA_RX_SPLT_CMDQ_DATA_WR_0_n_MAXn 3
+#define HWIO_IPA_RX_SPLT_CMDQ_DATA_WR_0_n_ATTR 0x3
+#define HWIO_IPA_RX_SPLT_CMDQ_DATA_WR_0_n_INI(n) in_dword_masked( \
+		HWIO_IPA_RX_SPLT_CMDQ_DATA_WR_0_n_ADDR(n), \
+		HWIO_IPA_RX_SPLT_CMDQ_DATA_WR_0_n_RMSK)
+#define HWIO_IPA_RX_SPLT_CMDQ_DATA_WR_0_n_INMI(n, mask) in_dword_masked( \
+		HWIO_IPA_RX_SPLT_CMDQ_DATA_WR_0_n_ADDR(n), \
+		mask)
+#define HWIO_IPA_RX_SPLT_CMDQ_DATA_WR_0_n_OUTI(n, val) out_dword( \
+		HWIO_IPA_RX_SPLT_CMDQ_DATA_WR_0_n_ADDR(n), \
+		val)
+#define HWIO_IPA_RX_SPLT_CMDQ_DATA_WR_0_n_OUTMI(n, mask, \
+						val) out_dword_masked_ns( \
+		HWIO_IPA_RX_SPLT_CMDQ_DATA_WR_0_n_ADDR(	\
+			n), \
+		mask, \
+		val, \
+		HWIO_IPA_RX_SPLT_CMDQ_DATA_WR_0_n_INI(n))
+#define HWIO_IPA_RX_SPLT_CMDQ_DATA_WR_0_n_CMDQ_SRC_LEN_F_BMSK 0xffff0000
+#define HWIO_IPA_RX_SPLT_CMDQ_DATA_WR_0_n_CMDQ_SRC_LEN_F_SHFT 0x10
+#define HWIO_IPA_RX_SPLT_CMDQ_DATA_WR_0_n_CMDQ_PACKET_LEN_F_BMSK 0xffff
+#define HWIO_IPA_RX_SPLT_CMDQ_DATA_WR_0_n_CMDQ_PACKET_LEN_F_SHFT 0x0
+#define HWIO_IPA_RX_SPLT_CMDQ_DATA_WR_1_n_ADDR(n) (IPA_DEBUG_REG_BASE +	\
+						   0x0000028c + 0x2C * (n))
+#define HWIO_IPA_RX_SPLT_CMDQ_DATA_WR_1_n_PHYS(n) (IPA_DEBUG_REG_BASE_PHYS \
+						   + 0x0000028c + 0x2C * \
+						   (n))
+#define HWIO_IPA_RX_SPLT_CMDQ_DATA_WR_1_n_OFFS(n) (IPA_DEBUG_REG_BASE_OFFS \
+						   + 0x0000028c + 0x2C * \
+						   (n))
+#define HWIO_IPA_RX_SPLT_CMDQ_DATA_WR_1_n_RMSK 0xffffffff
+#define HWIO_IPA_RX_SPLT_CMDQ_DATA_WR_1_n_MAXn 3
+#define HWIO_IPA_RX_SPLT_CMDQ_DATA_WR_1_n_ATTR 0x3
+#define HWIO_IPA_RX_SPLT_CMDQ_DATA_WR_1_n_INI(n) in_dword_masked( \
+		HWIO_IPA_RX_SPLT_CMDQ_DATA_WR_1_n_ADDR(n), \
+		HWIO_IPA_RX_SPLT_CMDQ_DATA_WR_1_n_RMSK)
+#define HWIO_IPA_RX_SPLT_CMDQ_DATA_WR_1_n_INMI(n, mask) in_dword_masked( \
+		HWIO_IPA_RX_SPLT_CMDQ_DATA_WR_1_n_ADDR(n), \
+		mask)
+#define HWIO_IPA_RX_SPLT_CMDQ_DATA_WR_1_n_OUTI(n, val) out_dword( \
+		HWIO_IPA_RX_SPLT_CMDQ_DATA_WR_1_n_ADDR(n), \
+		val)
+#define HWIO_IPA_RX_SPLT_CMDQ_DATA_WR_1_n_OUTMI(n, mask, \
+						val) out_dword_masked_ns( \
+		HWIO_IPA_RX_SPLT_CMDQ_DATA_WR_1_n_ADDR(	\
+			n), \
+		mask, \
+		val, \
+		HWIO_IPA_RX_SPLT_CMDQ_DATA_WR_1_n_INI(n))
+#define HWIO_IPA_RX_SPLT_CMDQ_DATA_WR_1_n_CMDQ_METADATA_F_BMSK 0xff000000
+#define HWIO_IPA_RX_SPLT_CMDQ_DATA_WR_1_n_CMDQ_METADATA_F_SHFT 0x18
+#define HWIO_IPA_RX_SPLT_CMDQ_DATA_WR_1_n_CMDQ_OPCODE_F_BMSK 0xff0000
+#define HWIO_IPA_RX_SPLT_CMDQ_DATA_WR_1_n_CMDQ_OPCODE_F_SHFT 0x10
+#define HWIO_IPA_RX_SPLT_CMDQ_DATA_WR_1_n_CMDQ_FLAGS_F_BMSK 0xfc00
+#define HWIO_IPA_RX_SPLT_CMDQ_DATA_WR_1_n_CMDQ_FLAGS_F_SHFT 0xa
+#define HWIO_IPA_RX_SPLT_CMDQ_DATA_WR_1_n_CMDQ_ORDER_F_BMSK 0x300
+#define HWIO_IPA_RX_SPLT_CMDQ_DATA_WR_1_n_CMDQ_ORDER_F_SHFT 0x8
+#define HWIO_IPA_RX_SPLT_CMDQ_DATA_WR_1_n_CMDQ_SRC_PIPE_F_BMSK 0xff
+#define HWIO_IPA_RX_SPLT_CMDQ_DATA_WR_1_n_CMDQ_SRC_PIPE_F_SHFT 0x0
+#define HWIO_IPA_RX_SPLT_CMDQ_DATA_WR_2_n_ADDR(n) (IPA_DEBUG_REG_BASE +	\
+						   0x00000290 + 0x2C * (n))
+#define HWIO_IPA_RX_SPLT_CMDQ_DATA_WR_2_n_PHYS(n) (IPA_DEBUG_REG_BASE_PHYS \
+						   + 0x00000290 + 0x2C * \
+						   (n))
+#define HWIO_IPA_RX_SPLT_CMDQ_DATA_WR_2_n_OFFS(n) (IPA_DEBUG_REG_BASE_OFFS \
+						   + 0x00000290 + 0x2C * \
+						   (n))
+#define HWIO_IPA_RX_SPLT_CMDQ_DATA_WR_2_n_RMSK 0xffffffff
+#define HWIO_IPA_RX_SPLT_CMDQ_DATA_WR_2_n_MAXn 3
+#define HWIO_IPA_RX_SPLT_CMDQ_DATA_WR_2_n_ATTR 0x3
+#define HWIO_IPA_RX_SPLT_CMDQ_DATA_WR_2_n_INI(n) in_dword_masked( \
+		HWIO_IPA_RX_SPLT_CMDQ_DATA_WR_2_n_ADDR(n), \
+		HWIO_IPA_RX_SPLT_CMDQ_DATA_WR_2_n_RMSK)
+#define HWIO_IPA_RX_SPLT_CMDQ_DATA_WR_2_n_INMI(n, mask) in_dword_masked( \
+		HWIO_IPA_RX_SPLT_CMDQ_DATA_WR_2_n_ADDR(n), \
+		mask)
+#define HWIO_IPA_RX_SPLT_CMDQ_DATA_WR_2_n_OUTI(n, val) out_dword( \
+		HWIO_IPA_RX_SPLT_CMDQ_DATA_WR_2_n_ADDR(n), \
+		val)
+#define HWIO_IPA_RX_SPLT_CMDQ_DATA_WR_2_n_OUTMI(n, mask, \
+						val) out_dword_masked_ns( \
+		HWIO_IPA_RX_SPLT_CMDQ_DATA_WR_2_n_ADDR(	\
+			n), \
+		mask, \
+		val, \
+		HWIO_IPA_RX_SPLT_CMDQ_DATA_WR_2_n_INI(n))
+#define HWIO_IPA_RX_SPLT_CMDQ_DATA_WR_2_n_CMDQ_ADDR_LSB_F_BMSK 0xffffffff
+#define HWIO_IPA_RX_SPLT_CMDQ_DATA_WR_2_n_CMDQ_ADDR_LSB_F_SHFT 0x0
+#define HWIO_IPA_RX_SPLT_CMDQ_DATA_WR_3_n_ADDR(n) (IPA_DEBUG_REG_BASE +	\
+						   0x00000294 + 0x2C * (n))
+#define HWIO_IPA_RX_SPLT_CMDQ_DATA_WR_3_n_PHYS(n) (IPA_DEBUG_REG_BASE_PHYS \
+						   + 0x00000294 + 0x2C * \
+						   (n))
+#define HWIO_IPA_RX_SPLT_CMDQ_DATA_WR_3_n_OFFS(n) (IPA_DEBUG_REG_BASE_OFFS \
+						   + 0x00000294 + 0x2C * \
+						   (n))
+#define HWIO_IPA_RX_SPLT_CMDQ_DATA_WR_3_n_RMSK 0xffffffff
+#define HWIO_IPA_RX_SPLT_CMDQ_DATA_WR_3_n_MAXn 3
+#define HWIO_IPA_RX_SPLT_CMDQ_DATA_WR_3_n_ATTR 0x3
+#define HWIO_IPA_RX_SPLT_CMDQ_DATA_WR_3_n_INI(n) in_dword_masked( \
+		HWIO_IPA_RX_SPLT_CMDQ_DATA_WR_3_n_ADDR(n), \
+		HWIO_IPA_RX_SPLT_CMDQ_DATA_WR_3_n_RMSK)
+#define HWIO_IPA_RX_SPLT_CMDQ_DATA_WR_3_n_INMI(n, mask) in_dword_masked( \
+		HWIO_IPA_RX_SPLT_CMDQ_DATA_WR_3_n_ADDR(n), \
+		mask)
+#define HWIO_IPA_RX_SPLT_CMDQ_DATA_WR_3_n_OUTI(n, val) out_dword( \
+		HWIO_IPA_RX_SPLT_CMDQ_DATA_WR_3_n_ADDR(n), \
+		val)
+#define HWIO_IPA_RX_SPLT_CMDQ_DATA_WR_3_n_OUTMI(n, mask, \
+						val) out_dword_masked_ns( \
+		HWIO_IPA_RX_SPLT_CMDQ_DATA_WR_3_n_ADDR(	\
+			n), \
+		mask, \
+		val, \
+		HWIO_IPA_RX_SPLT_CMDQ_DATA_WR_3_n_INI(n))
+#define HWIO_IPA_RX_SPLT_CMDQ_DATA_WR_3_n_CMDQ_ADDR_MSB_F_BMSK 0xffffffff
+#define HWIO_IPA_RX_SPLT_CMDQ_DATA_WR_3_n_CMDQ_ADDR_MSB_F_SHFT 0x0
+#define HWIO_IPA_RX_SPLT_CMDQ_DATA_RD_0_n_ADDR(n) (IPA_DEBUG_REG_BASE +	\
+						   0x00000298 + 0x2C * (n))
+#define HWIO_IPA_RX_SPLT_CMDQ_DATA_RD_0_n_PHYS(n) (IPA_DEBUG_REG_BASE_PHYS \
+						   + 0x00000298 + 0x2C * \
+						   (n))
+#define HWIO_IPA_RX_SPLT_CMDQ_DATA_RD_0_n_OFFS(n) (IPA_DEBUG_REG_BASE_OFFS \
+						   + 0x00000298 + 0x2C * \
+						   (n))
+#define HWIO_IPA_RX_SPLT_CMDQ_DATA_RD_0_n_RMSK 0xffffffff
+#define HWIO_IPA_RX_SPLT_CMDQ_DATA_RD_0_n_MAXn 3
+#define HWIO_IPA_RX_SPLT_CMDQ_DATA_RD_0_n_ATTR 0x1
+#define HWIO_IPA_RX_SPLT_CMDQ_DATA_RD_0_n_INI(n) in_dword_masked( \
+		HWIO_IPA_RX_SPLT_CMDQ_DATA_RD_0_n_ADDR(n), \
+		HWIO_IPA_RX_SPLT_CMDQ_DATA_RD_0_n_RMSK)
+#define HWIO_IPA_RX_SPLT_CMDQ_DATA_RD_0_n_INMI(n, mask) in_dword_masked( \
+		HWIO_IPA_RX_SPLT_CMDQ_DATA_RD_0_n_ADDR(n), \
+		mask)
+#define HWIO_IPA_RX_SPLT_CMDQ_DATA_RD_0_n_CMDQ_SRC_LEN_F_BMSK 0xffff0000
+#define HWIO_IPA_RX_SPLT_CMDQ_DATA_RD_0_n_CMDQ_SRC_LEN_F_SHFT 0x10
+#define HWIO_IPA_RX_SPLT_CMDQ_DATA_RD_0_n_CMDQ_PACKET_LEN_F_BMSK 0xffff
+#define HWIO_IPA_RX_SPLT_CMDQ_DATA_RD_0_n_CMDQ_PACKET_LEN_F_SHFT 0x0
+#define HWIO_IPA_RX_SPLT_CMDQ_DATA_RD_1_n_ADDR(n) (IPA_DEBUG_REG_BASE +	\
+						   0x0000029c + 0x2C * (n))
+#define HWIO_IPA_RX_SPLT_CMDQ_DATA_RD_1_n_PHYS(n) (IPA_DEBUG_REG_BASE_PHYS \
+						   + 0x0000029c + 0x2C * \
+						   (n))
+#define HWIO_IPA_RX_SPLT_CMDQ_DATA_RD_1_n_OFFS(n) (IPA_DEBUG_REG_BASE_OFFS \
+						   + 0x0000029c + 0x2C * \
+						   (n))
+#define HWIO_IPA_RX_SPLT_CMDQ_DATA_RD_1_n_RMSK 0xffffffff
+#define HWIO_IPA_RX_SPLT_CMDQ_DATA_RD_1_n_MAXn 3
+#define HWIO_IPA_RX_SPLT_CMDQ_DATA_RD_1_n_ATTR 0x1
+#define HWIO_IPA_RX_SPLT_CMDQ_DATA_RD_1_n_INI(n) in_dword_masked( \
+		HWIO_IPA_RX_SPLT_CMDQ_DATA_RD_1_n_ADDR(n), \
+		HWIO_IPA_RX_SPLT_CMDQ_DATA_RD_1_n_RMSK)
+#define HWIO_IPA_RX_SPLT_CMDQ_DATA_RD_1_n_INMI(n, mask) in_dword_masked( \
+		HWIO_IPA_RX_SPLT_CMDQ_DATA_RD_1_n_ADDR(n), \
+		mask)
+#define HWIO_IPA_RX_SPLT_CMDQ_DATA_RD_1_n_CMDQ_METADATA_F_BMSK 0xff000000
+#define HWIO_IPA_RX_SPLT_CMDQ_DATA_RD_1_n_CMDQ_METADATA_F_SHFT 0x18
+#define HWIO_IPA_RX_SPLT_CMDQ_DATA_RD_1_n_CMDQ_OPCODE_F_BMSK 0xff0000
+#define HWIO_IPA_RX_SPLT_CMDQ_DATA_RD_1_n_CMDQ_OPCODE_F_SHFT 0x10
+#define HWIO_IPA_RX_SPLT_CMDQ_DATA_RD_1_n_CMDQ_FLAGS_F_BMSK 0xfc00
+#define HWIO_IPA_RX_SPLT_CMDQ_DATA_RD_1_n_CMDQ_FLAGS_F_SHFT 0xa
+#define HWIO_IPA_RX_SPLT_CMDQ_DATA_RD_1_n_CMDQ_ORDER_F_BMSK 0x300
+#define HWIO_IPA_RX_SPLT_CMDQ_DATA_RD_1_n_CMDQ_ORDER_F_SHFT 0x8
+#define HWIO_IPA_RX_SPLT_CMDQ_DATA_RD_1_n_CMDQ_SRC_PIPE_F_BMSK 0xff
+#define HWIO_IPA_RX_SPLT_CMDQ_DATA_RD_1_n_CMDQ_SRC_PIPE_F_SHFT 0x0
+#define HWIO_IPA_RX_SPLT_CMDQ_DATA_RD_2_n_ADDR(n) (IPA_DEBUG_REG_BASE +	\
+						   0x000002a0 + 0x2C * (n))
+#define HWIO_IPA_RX_SPLT_CMDQ_DATA_RD_2_n_PHYS(n) (IPA_DEBUG_REG_BASE_PHYS \
+						   + 0x000002a0 + 0x2C * \
+						   (n))
+#define HWIO_IPA_RX_SPLT_CMDQ_DATA_RD_2_n_OFFS(n) (IPA_DEBUG_REG_BASE_OFFS \
+						   + 0x000002a0 + 0x2C * \
+						   (n))
+#define HWIO_IPA_RX_SPLT_CMDQ_DATA_RD_2_n_RMSK 0xffffffff
+#define HWIO_IPA_RX_SPLT_CMDQ_DATA_RD_2_n_MAXn 3
+#define HWIO_IPA_RX_SPLT_CMDQ_DATA_RD_2_n_ATTR 0x1
+#define HWIO_IPA_RX_SPLT_CMDQ_DATA_RD_2_n_INI(n) in_dword_masked( \
+		HWIO_IPA_RX_SPLT_CMDQ_DATA_RD_2_n_ADDR(n), \
+		HWIO_IPA_RX_SPLT_CMDQ_DATA_RD_2_n_RMSK)
+#define HWIO_IPA_RX_SPLT_CMDQ_DATA_RD_2_n_INMI(n, mask) in_dword_masked( \
+		HWIO_IPA_RX_SPLT_CMDQ_DATA_RD_2_n_ADDR(n), \
+		mask)
+#define HWIO_IPA_RX_SPLT_CMDQ_DATA_RD_2_n_CMDQ_ADDR_LSB_F_BMSK 0xffffffff
+#define HWIO_IPA_RX_SPLT_CMDQ_DATA_RD_2_n_CMDQ_ADDR_LSB_F_SHFT 0x0
+#define HWIO_IPA_RX_SPLT_CMDQ_DATA_RD_3_n_ADDR(n) (IPA_DEBUG_REG_BASE +	\
+						   0x000002a4 + 0x2C * (n))
+#define HWIO_IPA_RX_SPLT_CMDQ_DATA_RD_3_n_PHYS(n) (IPA_DEBUG_REG_BASE_PHYS \
+						   + 0x000002a4 + 0x2C * \
+						   (n))
+#define HWIO_IPA_RX_SPLT_CMDQ_DATA_RD_3_n_OFFS(n) (IPA_DEBUG_REG_BASE_OFFS \
+						   + 0x000002a4 + 0x2C * \
+						   (n))
+#define HWIO_IPA_RX_SPLT_CMDQ_DATA_RD_3_n_RMSK 0xffffffff
+#define HWIO_IPA_RX_SPLT_CMDQ_DATA_RD_3_n_MAXn 3
+#define HWIO_IPA_RX_SPLT_CMDQ_DATA_RD_3_n_ATTR 0x1
+#define HWIO_IPA_RX_SPLT_CMDQ_DATA_RD_3_n_INI(n) in_dword_masked( \
+		HWIO_IPA_RX_SPLT_CMDQ_DATA_RD_3_n_ADDR(n), \
+		HWIO_IPA_RX_SPLT_CMDQ_DATA_RD_3_n_RMSK)
+#define HWIO_IPA_RX_SPLT_CMDQ_DATA_RD_3_n_INMI(n, mask) in_dword_masked( \
+		HWIO_IPA_RX_SPLT_CMDQ_DATA_RD_3_n_ADDR(n), \
+		mask)
+#define HWIO_IPA_RX_SPLT_CMDQ_DATA_RD_3_n_CMDQ_ADDR_MSB_F_BMSK 0xffffffff
+#define HWIO_IPA_RX_SPLT_CMDQ_DATA_RD_3_n_CMDQ_ADDR_MSB_F_SHFT 0x0
+#define HWIO_IPA_RX_SPLT_CMDQ_STATUS_n_ADDR(n) (IPA_DEBUG_REG_BASE + \
+						0x000002a8 + 0x2C * (n))
+#define HWIO_IPA_RX_SPLT_CMDQ_STATUS_n_PHYS(n) (IPA_DEBUG_REG_BASE_PHYS + \
+						0x000002a8 + 0x2C * (n))
+#define HWIO_IPA_RX_SPLT_CMDQ_STATUS_n_OFFS(n) (IPA_DEBUG_REG_BASE_OFFS + \
+						0x000002a8 + 0x2C * (n))
+#define HWIO_IPA_RX_SPLT_CMDQ_STATUS_n_RMSK 0x7f
+#define HWIO_IPA_RX_SPLT_CMDQ_STATUS_n_MAXn 3
+#define HWIO_IPA_RX_SPLT_CMDQ_STATUS_n_ATTR 0x1
+#define HWIO_IPA_RX_SPLT_CMDQ_STATUS_n_INI(n) in_dword_masked( \
+		HWIO_IPA_RX_SPLT_CMDQ_STATUS_n_ADDR(n),	\
+		HWIO_IPA_RX_SPLT_CMDQ_STATUS_n_RMSK)
+#define HWIO_IPA_RX_SPLT_CMDQ_STATUS_n_INMI(n, mask) in_dword_masked( \
+		HWIO_IPA_RX_SPLT_CMDQ_STATUS_n_ADDR(n),	\
+		mask)
+#define HWIO_IPA_RX_SPLT_CMDQ_STATUS_n_CMDQ_DEPTH_BMSK 0x60
+#define HWIO_IPA_RX_SPLT_CMDQ_STATUS_n_CMDQ_DEPTH_SHFT 0x5
+#define HWIO_IPA_RX_SPLT_CMDQ_STATUS_n_CMDQ_COUNT_BMSK 0x18
+#define HWIO_IPA_RX_SPLT_CMDQ_STATUS_n_CMDQ_COUNT_SHFT 0x3
+#define HWIO_IPA_RX_SPLT_CMDQ_STATUS_n_CMDQ_FULL_BMSK 0x4
+#define HWIO_IPA_RX_SPLT_CMDQ_STATUS_n_CMDQ_FULL_SHFT 0x2
+#define HWIO_IPA_RX_SPLT_CMDQ_STATUS_n_CMDQ_EMPTY_BMSK 0x2
+#define HWIO_IPA_RX_SPLT_CMDQ_STATUS_n_CMDQ_EMPTY_SHFT 0x1
+#define HWIO_IPA_RX_SPLT_CMDQ_STATUS_n_STATUS_BMSK 0x1
+#define HWIO_IPA_RX_SPLT_CMDQ_STATUS_n_STATUS_SHFT 0x0
+#define HWIO_IPA_TX_COMMANDER_CMDQ_CMD_ADDR (IPA_DEBUG_REG_BASE + \
+					     0x0000035c)
+#define HWIO_IPA_TX_COMMANDER_CMDQ_CMD_PHYS (IPA_DEBUG_REG_BASE_PHYS + \
+					     0x0000035c)
+#define HWIO_IPA_TX_COMMANDER_CMDQ_CMD_OFFS (IPA_DEBUG_REG_BASE_OFFS + \
+					     0x0000035c)
+#define HWIO_IPA_TX_COMMANDER_CMDQ_CFG_ADDR (IPA_DEBUG_REG_BASE + \
+					     0x00000360)
+#define HWIO_IPA_TX_COMMANDER_CMDQ_CFG_PHYS (IPA_DEBUG_REG_BASE_PHYS + \
+					     0x00000360)
+#define HWIO_IPA_TX_COMMANDER_CMDQ_CFG_OFFS (IPA_DEBUG_REG_BASE_OFFS + \
+					     0x00000360)
+#define HWIO_IPA_TX_COMMANDER_CMDQ_DATA_WR_0_ADDR (IPA_DEBUG_REG_BASE +	\
+						   0x00000364)
+#define HWIO_IPA_TX_COMMANDER_CMDQ_DATA_WR_0_PHYS (IPA_DEBUG_REG_BASE_PHYS \
+						   + 0x00000364)
+#define HWIO_IPA_TX_COMMANDER_CMDQ_DATA_WR_0_OFFS (IPA_DEBUG_REG_BASE_OFFS \
+						   + 0x00000364)
+#define HWIO_IPA_TX_COMMANDER_CMDQ_DATA_WR_1_ADDR (IPA_DEBUG_REG_BASE +	\
+						   0x00000368)
+#define HWIO_IPA_TX_COMMANDER_CMDQ_DATA_WR_1_PHYS (IPA_DEBUG_REG_BASE_PHYS \
+						   + 0x00000368)
+#define HWIO_IPA_TX_COMMANDER_CMDQ_DATA_WR_1_OFFS (IPA_DEBUG_REG_BASE_OFFS \
+						   + 0x00000368)
+#define HWIO_IPA_TX_COMMANDER_CMDQ_DATA_WR_2_ADDR (IPA_DEBUG_REG_BASE +	\
+						   0x0000036c)
+#define HWIO_IPA_TX_COMMANDER_CMDQ_DATA_WR_2_PHYS (IPA_DEBUG_REG_BASE_PHYS \
+						   + 0x0000036c)
+#define HWIO_IPA_TX_COMMANDER_CMDQ_DATA_WR_2_OFFS (IPA_DEBUG_REG_BASE_OFFS \
+						   + 0x0000036c)
+#define HWIO_IPA_TX_COMMANDER_CMDQ_DATA_RD_0_ADDR (IPA_DEBUG_REG_BASE +	\
+						   0x00000370)
+#define HWIO_IPA_TX_COMMANDER_CMDQ_DATA_RD_0_PHYS (IPA_DEBUG_REG_BASE_PHYS \
+						   + 0x00000370)
+#define HWIO_IPA_TX_COMMANDER_CMDQ_DATA_RD_0_OFFS (IPA_DEBUG_REG_BASE_OFFS \
+						   + 0x00000370)
+#define HWIO_IPA_TX_COMMANDER_CMDQ_DATA_RD_1_ADDR (IPA_DEBUG_REG_BASE +	\
+						   0x00000374)
+#define HWIO_IPA_TX_COMMANDER_CMDQ_DATA_RD_1_PHYS (IPA_DEBUG_REG_BASE_PHYS \
+						   + 0x00000374)
+#define HWIO_IPA_TX_COMMANDER_CMDQ_DATA_RD_1_OFFS (IPA_DEBUG_REG_BASE_OFFS \
+						   + 0x00000374)
+#define HWIO_IPA_TX_COMMANDER_CMDQ_DATA_RD_2_ADDR (IPA_DEBUG_REG_BASE +	\
+						   0x00000378)
+#define HWIO_IPA_TX_COMMANDER_CMDQ_DATA_RD_2_PHYS (IPA_DEBUG_REG_BASE_PHYS \
+						   + 0x00000378)
+#define HWIO_IPA_TX_COMMANDER_CMDQ_DATA_RD_2_OFFS (IPA_DEBUG_REG_BASE_OFFS \
+						   + 0x00000378)
+#define HWIO_IPA_TX_COMMANDER_CMDQ_STATUS_ADDR (IPA_DEBUG_REG_BASE + \
+						0x0000037c)
+#define HWIO_IPA_TX_COMMANDER_CMDQ_STATUS_PHYS (IPA_DEBUG_REG_BASE_PHYS + \
+						0x0000037c)
+#define HWIO_IPA_TX_COMMANDER_CMDQ_STATUS_OFFS (IPA_DEBUG_REG_BASE_OFFS + \
+						0x0000037c)
+#define HWIO_IPA_TX_COMMANDER_CMDQ_STATUS_RMSK 0x7
+#define HWIO_IPA_TX_COMMANDER_CMDQ_STATUS_ATTR 0x1
+#define HWIO_IPA_TX_COMMANDER_CMDQ_STATUS_IN in_dword_masked( \
+		HWIO_IPA_TX_COMMANDER_CMDQ_STATUS_ADDR,	\
+		HWIO_IPA_TX_COMMANDER_CMDQ_STATUS_RMSK)
+#define HWIO_IPA_TX_COMMANDER_CMDQ_STATUS_INM(m) in_dword_masked( \
+		HWIO_IPA_TX_COMMANDER_CMDQ_STATUS_ADDR,	\
+		m)
+#define HWIO_IPA_TX_COMMANDER_CMDQ_STATUS_CMDQ_FULL_BMSK 0x4
+#define HWIO_IPA_TX_COMMANDER_CMDQ_STATUS_CMDQ_FULL_SHFT 0x2
+#define HWIO_IPA_TX_COMMANDER_CMDQ_STATUS_CMDQ_EMPTY_BMSK 0x2
+#define HWIO_IPA_TX_COMMANDER_CMDQ_STATUS_CMDQ_EMPTY_SHFT 0x1
+#define HWIO_IPA_TX_COMMANDER_CMDQ_STATUS_STATUS_BMSK 0x1
+#define HWIO_IPA_TX_COMMANDER_CMDQ_STATUS_STATUS_SHFT 0x0
+#define HWIO_IPA_RX_HPS_CMDQ_CMD_ADDR (IPA_DEBUG_REG_BASE + 0x00000380)
+#define HWIO_IPA_RX_HPS_CMDQ_CMD_PHYS (IPA_DEBUG_REG_BASE_PHYS + \
+				       0x00000380)
+#define HWIO_IPA_RX_HPS_CMDQ_CMD_OFFS (IPA_DEBUG_REG_BASE_OFFS + \
+				       0x00000380)
+#define HWIO_IPA_RX_HPS_CMDQ_CMD_RMSK 0x3f
+#define HWIO_IPA_RX_HPS_CMDQ_CMD_ATTR 0x3
+#define HWIO_IPA_RX_HPS_CMDQ_CMD_IN in_dword_masked( \
+		HWIO_IPA_RX_HPS_CMDQ_CMD_ADDR, \
+		HWIO_IPA_RX_HPS_CMDQ_CMD_RMSK)
+#define HWIO_IPA_RX_HPS_CMDQ_CMD_INM(m) in_dword_masked( \
+		HWIO_IPA_RX_HPS_CMDQ_CMD_ADDR, \
+		m)
+#define HWIO_IPA_RX_HPS_CMDQ_CMD_OUT(v) out_dword( \
+		HWIO_IPA_RX_HPS_CMDQ_CMD_ADDR, \
+		v)
+#define HWIO_IPA_RX_HPS_CMDQ_CMD_OUTM(m, v) out_dword_masked_ns( \
+		HWIO_IPA_RX_HPS_CMDQ_CMD_ADDR, \
+		m, \
+		v, \
+		HWIO_IPA_RX_HPS_CMDQ_CMD_IN)
+#define HWIO_IPA_RX_HPS_CMDQ_CMD_RD_REQ_BMSK 0x20
+#define HWIO_IPA_RX_HPS_CMDQ_CMD_RD_REQ_SHFT 0x5
+#define HWIO_IPA_RX_HPS_CMDQ_CMD_CMD_CLIENT_BMSK 0x1c
+#define HWIO_IPA_RX_HPS_CMDQ_CMD_CMD_CLIENT_SHFT 0x2
+#define HWIO_IPA_RX_HPS_CMDQ_CMD_POP_CMD_BMSK 0x2
+#define HWIO_IPA_RX_HPS_CMDQ_CMD_POP_CMD_SHFT 0x1
+#define HWIO_IPA_RX_HPS_CMDQ_CMD_WRITE_CMD_BMSK 0x1
+#define HWIO_IPA_RX_HPS_CMDQ_CMD_WRITE_CMD_SHFT 0x0
+#define HWIO_IPA_RX_HPS_CMDQ_RELEASE_WR_ADDR (IPA_DEBUG_REG_BASE + \
+					      0x00000384)
+#define HWIO_IPA_RX_HPS_CMDQ_RELEASE_WR_PHYS (IPA_DEBUG_REG_BASE_PHYS +	\
+					      0x00000384)
+#define HWIO_IPA_RX_HPS_CMDQ_RELEASE_WR_OFFS (IPA_DEBUG_REG_BASE_OFFS +	\
+					      0x00000384)
+#define HWIO_IPA_RX_HPS_CMDQ_RELEASE_RD_ADDR (IPA_DEBUG_REG_BASE + \
+					      0x00000388)
+#define HWIO_IPA_RX_HPS_CMDQ_RELEASE_RD_PHYS (IPA_DEBUG_REG_BASE_PHYS +	\
+					      0x00000388)
+#define HWIO_IPA_RX_HPS_CMDQ_RELEASE_RD_OFFS (IPA_DEBUG_REG_BASE_OFFS +	\
+					      0x00000388)
+#define HWIO_IPA_RX_HPS_CMDQ_CFG_WR_ADDR (IPA_DEBUG_REG_BASE + 0x0000038c)
+#define HWIO_IPA_RX_HPS_CMDQ_CFG_WR_PHYS (IPA_DEBUG_REG_BASE_PHYS + \
+					  0x0000038c)
+#define HWIO_IPA_RX_HPS_CMDQ_CFG_WR_OFFS (IPA_DEBUG_REG_BASE_OFFS + \
+					  0x0000038c)
+#define HWIO_IPA_RX_HPS_CMDQ_CFG_WR_RMSK 0x1f
+#define HWIO_IPA_RX_HPS_CMDQ_CFG_WR_ATTR 0x3
+#define HWIO_IPA_RX_HPS_CMDQ_CFG_WR_IN in_dword_masked(	\
+		HWIO_IPA_RX_HPS_CMDQ_CFG_WR_ADDR, \
+		HWIO_IPA_RX_HPS_CMDQ_CFG_WR_RMSK)
+#define HWIO_IPA_RX_HPS_CMDQ_CFG_WR_INM(m) in_dword_masked( \
+		HWIO_IPA_RX_HPS_CMDQ_CFG_WR_ADDR, \
+		m)
+#define HWIO_IPA_RX_HPS_CMDQ_CFG_WR_OUT(v) out_dword( \
+		HWIO_IPA_RX_HPS_CMDQ_CFG_WR_ADDR, \
+		v)
+#define HWIO_IPA_RX_HPS_CMDQ_CFG_WR_OUTM(m, v) out_dword_masked_ns( \
+		HWIO_IPA_RX_HPS_CMDQ_CFG_WR_ADDR, \
+		m, \
+		v, \
+		HWIO_IPA_RX_HPS_CMDQ_CFG_WR_IN)
+#define HWIO_IPA_RX_HPS_CMDQ_CFG_WR_BLOCK_WR_BMSK 0x1f
+#define HWIO_IPA_RX_HPS_CMDQ_CFG_WR_BLOCK_WR_SHFT 0x0
+#define HWIO_IPA_RX_HPS_CMDQ_CFG_RD_ADDR (IPA_DEBUG_REG_BASE + 0x00000390)
+#define HWIO_IPA_RX_HPS_CMDQ_CFG_RD_PHYS (IPA_DEBUG_REG_BASE_PHYS + \
+					  0x00000390)
+#define HWIO_IPA_RX_HPS_CMDQ_CFG_RD_OFFS (IPA_DEBUG_REG_BASE_OFFS + \
+					  0x00000390)
+#define HWIO_IPA_RX_HPS_CMDQ_CFG_RD_RMSK 0x1f
+#define HWIO_IPA_RX_HPS_CMDQ_CFG_RD_ATTR 0x3
+#define HWIO_IPA_RX_HPS_CMDQ_CFG_RD_IN in_dword_masked(	\
+		HWIO_IPA_RX_HPS_CMDQ_CFG_RD_ADDR, \
+		HWIO_IPA_RX_HPS_CMDQ_CFG_RD_RMSK)
+#define HWIO_IPA_RX_HPS_CMDQ_CFG_RD_INM(m) in_dword_masked( \
+		HWIO_IPA_RX_HPS_CMDQ_CFG_RD_ADDR, \
+		m)
+#define HWIO_IPA_RX_HPS_CMDQ_CFG_RD_OUT(v) out_dword( \
+		HWIO_IPA_RX_HPS_CMDQ_CFG_RD_ADDR, \
+		v)
+#define HWIO_IPA_RX_HPS_CMDQ_CFG_RD_OUTM(m, v) out_dword_masked_ns( \
+		HWIO_IPA_RX_HPS_CMDQ_CFG_RD_ADDR, \
+		m, \
+		v, \
+		HWIO_IPA_RX_HPS_CMDQ_CFG_RD_IN)
+#define HWIO_IPA_RX_HPS_CMDQ_CFG_RD_BLOCK_RD_BMSK 0x1f
+#define HWIO_IPA_RX_HPS_CMDQ_CFG_RD_BLOCK_RD_SHFT 0x0
+#define HWIO_IPA_RX_HPS_CMDQ_DATA_WR_0_ADDR (IPA_DEBUG_REG_BASE + \
+					     0x00000394)
+#define HWIO_IPA_RX_HPS_CMDQ_DATA_WR_0_PHYS (IPA_DEBUG_REG_BASE_PHYS + \
+					     0x00000394)
+#define HWIO_IPA_RX_HPS_CMDQ_DATA_WR_0_OFFS (IPA_DEBUG_REG_BASE_OFFS + \
+					     0x00000394)
+#define HWIO_IPA_RX_HPS_CMDQ_DATA_WR_1_ADDR (IPA_DEBUG_REG_BASE + \
+					     0x00000398)
+#define HWIO_IPA_RX_HPS_CMDQ_DATA_WR_1_PHYS (IPA_DEBUG_REG_BASE_PHYS + \
+					     0x00000398)
+#define HWIO_IPA_RX_HPS_CMDQ_DATA_WR_1_OFFS (IPA_DEBUG_REG_BASE_OFFS + \
+					     0x00000398)
+#define HWIO_IPA_RX_HPS_CMDQ_DATA_WR_2_ADDR (IPA_DEBUG_REG_BASE + \
+					     0x0000039c)
+#define HWIO_IPA_RX_HPS_CMDQ_DATA_WR_2_PHYS (IPA_DEBUG_REG_BASE_PHYS + \
+					     0x0000039c)
+#define HWIO_IPA_RX_HPS_CMDQ_DATA_WR_2_OFFS (IPA_DEBUG_REG_BASE_OFFS + \
+					     0x0000039c)
+#define HWIO_IPA_RX_HPS_CMDQ_DATA_WR_3_ADDR (IPA_DEBUG_REG_BASE + \
+					     0x000003a0)
+#define HWIO_IPA_RX_HPS_CMDQ_DATA_WR_3_PHYS (IPA_DEBUG_REG_BASE_PHYS + \
+					     0x000003a0)
+#define HWIO_IPA_RX_HPS_CMDQ_DATA_WR_3_OFFS (IPA_DEBUG_REG_BASE_OFFS + \
+					     0x000003a0)
+#define HWIO_IPA_RX_HPS_CMDQ_DATA_RD_0_ADDR (IPA_DEBUG_REG_BASE + \
+					     0x000003a4)
+#define HWIO_IPA_RX_HPS_CMDQ_DATA_RD_0_PHYS (IPA_DEBUG_REG_BASE_PHYS + \
+					     0x000003a4)
+#define HWIO_IPA_RX_HPS_CMDQ_DATA_RD_0_OFFS (IPA_DEBUG_REG_BASE_OFFS + \
+					     0x000003a4)
+#define HWIO_IPA_RX_HPS_CMDQ_DATA_RD_0_RMSK 0xffffffff
+#define HWIO_IPA_RX_HPS_CMDQ_DATA_RD_0_ATTR 0x1
+#define HWIO_IPA_RX_HPS_CMDQ_DATA_RD_0_IN in_dword_masked( \
+		HWIO_IPA_RX_HPS_CMDQ_DATA_RD_0_ADDR, \
+		HWIO_IPA_RX_HPS_CMDQ_DATA_RD_0_RMSK)
+#define HWIO_IPA_RX_HPS_CMDQ_DATA_RD_0_INM(m) in_dword_masked( \
+		HWIO_IPA_RX_HPS_CMDQ_DATA_RD_0_ADDR, \
+		m)
+#define HWIO_IPA_RX_HPS_CMDQ_DATA_RD_0_CMDQ_DEST_LEN_F_BMSK 0xffff0000
+#define HWIO_IPA_RX_HPS_CMDQ_DATA_RD_0_CMDQ_DEST_LEN_F_SHFT 0x10
+#define HWIO_IPA_RX_HPS_CMDQ_DATA_RD_0_CMDQ_PACKET_LEN_F_BMSK 0xffff
+#define HWIO_IPA_RX_HPS_CMDQ_DATA_RD_0_CMDQ_PACKET_LEN_F_SHFT 0x0
+#define HWIO_IPA_RX_HPS_CMDQ_DATA_RD_1_ADDR (IPA_DEBUG_REG_BASE + \
+					     0x000003a8)
+#define HWIO_IPA_RX_HPS_CMDQ_DATA_RD_1_PHYS (IPA_DEBUG_REG_BASE_PHYS + \
+					     0x000003a8)
+#define HWIO_IPA_RX_HPS_CMDQ_DATA_RD_1_OFFS (IPA_DEBUG_REG_BASE_OFFS + \
+					     0x000003a8)
+#define HWIO_IPA_RX_HPS_CMDQ_DATA_RD_1_RMSK 0xffffffff
+#define HWIO_IPA_RX_HPS_CMDQ_DATA_RD_1_ATTR 0x1
+#define HWIO_IPA_RX_HPS_CMDQ_DATA_RD_1_IN in_dword_masked( \
+		HWIO_IPA_RX_HPS_CMDQ_DATA_RD_1_ADDR, \
+		HWIO_IPA_RX_HPS_CMDQ_DATA_RD_1_RMSK)
+#define HWIO_IPA_RX_HPS_CMDQ_DATA_RD_1_INM(m) in_dword_masked( \
+		HWIO_IPA_RX_HPS_CMDQ_DATA_RD_1_ADDR, \
+		m)
+#define HWIO_IPA_RX_HPS_CMDQ_DATA_RD_1_CMDQ_METADATA_F_BMSK 0xff000000
+#define HWIO_IPA_RX_HPS_CMDQ_DATA_RD_1_CMDQ_METADATA_F_SHFT 0x18
+#define HWIO_IPA_RX_HPS_CMDQ_DATA_RD_1_CMDQ_OPCODE_F_BMSK 0xff0000
+#define HWIO_IPA_RX_HPS_CMDQ_DATA_RD_1_CMDQ_OPCODE_F_SHFT 0x10
+#define HWIO_IPA_RX_HPS_CMDQ_DATA_RD_1_CMDQ_FLAGS_F_BMSK 0xfc00
+#define HWIO_IPA_RX_HPS_CMDQ_DATA_RD_1_CMDQ_FLAGS_F_SHFT 0xa
+#define HWIO_IPA_RX_HPS_CMDQ_DATA_RD_1_CMDQ_ORDER_F_BMSK 0x300
+#define HWIO_IPA_RX_HPS_CMDQ_DATA_RD_1_CMDQ_ORDER_F_SHFT 0x8
+#define HWIO_IPA_RX_HPS_CMDQ_DATA_RD_1_CMDQ_SRC_PIPE_F_BMSK 0xff
+#define HWIO_IPA_RX_HPS_CMDQ_DATA_RD_1_CMDQ_SRC_PIPE_F_SHFT 0x0
+#define HWIO_IPA_RX_HPS_CMDQ_DATA_RD_2_ADDR (IPA_DEBUG_REG_BASE + \
+					     0x000003ac)
+#define HWIO_IPA_RX_HPS_CMDQ_DATA_RD_2_PHYS (IPA_DEBUG_REG_BASE_PHYS + \
+					     0x000003ac)
+#define HWIO_IPA_RX_HPS_CMDQ_DATA_RD_2_OFFS (IPA_DEBUG_REG_BASE_OFFS + \
+					     0x000003ac)
+#define HWIO_IPA_RX_HPS_CMDQ_DATA_RD_2_RMSK 0xffffffff
+#define HWIO_IPA_RX_HPS_CMDQ_DATA_RD_2_ATTR 0x1
+#define HWIO_IPA_RX_HPS_CMDQ_DATA_RD_2_IN in_dword_masked( \
+		HWIO_IPA_RX_HPS_CMDQ_DATA_RD_2_ADDR, \
+		HWIO_IPA_RX_HPS_CMDQ_DATA_RD_2_RMSK)
+#define HWIO_IPA_RX_HPS_CMDQ_DATA_RD_2_INM(m) in_dword_masked( \
+		HWIO_IPA_RX_HPS_CMDQ_DATA_RD_2_ADDR, \
+		m)
+#define HWIO_IPA_RX_HPS_CMDQ_DATA_RD_2_CMDQ_ADDR_LSB_F_BMSK 0xffffffff
+#define HWIO_IPA_RX_HPS_CMDQ_DATA_RD_2_CMDQ_ADDR_LSB_F_SHFT 0x0
+#define HWIO_IPA_RX_HPS_CMDQ_DATA_RD_3_ADDR (IPA_DEBUG_REG_BASE + \
+					     0x000003b0)
+#define HWIO_IPA_RX_HPS_CMDQ_DATA_RD_3_PHYS (IPA_DEBUG_REG_BASE_PHYS + \
+					     0x000003b0)
+#define HWIO_IPA_RX_HPS_CMDQ_DATA_RD_3_OFFS (IPA_DEBUG_REG_BASE_OFFS + \
+					     0x000003b0)
+#define HWIO_IPA_RX_HPS_CMDQ_DATA_RD_3_RMSK 0xffffffff
+#define HWIO_IPA_RX_HPS_CMDQ_DATA_RD_3_ATTR 0x1
+#define HWIO_IPA_RX_HPS_CMDQ_DATA_RD_3_IN in_dword_masked( \
+		HWIO_IPA_RX_HPS_CMDQ_DATA_RD_3_ADDR, \
+		HWIO_IPA_RX_HPS_CMDQ_DATA_RD_3_RMSK)
+#define HWIO_IPA_RX_HPS_CMDQ_DATA_RD_3_INM(m) in_dword_masked( \
+		HWIO_IPA_RX_HPS_CMDQ_DATA_RD_3_ADDR, \
+		m)
+#define HWIO_IPA_RX_HPS_CMDQ_DATA_RD_3_CMDQ_ADDR_MSB_F_BMSK 0xffffffff
+#define HWIO_IPA_RX_HPS_CMDQ_DATA_RD_3_CMDQ_ADDR_MSB_F_SHFT 0x0
+#define HWIO_IPA_RX_HPS_CMDQ_STATUS_ADDR (IPA_DEBUG_REG_BASE + 0x000003b4)
+#define HWIO_IPA_RX_HPS_CMDQ_STATUS_PHYS (IPA_DEBUG_REG_BASE_PHYS + \
+					  0x000003b4)
+#define HWIO_IPA_RX_HPS_CMDQ_STATUS_OFFS (IPA_DEBUG_REG_BASE_OFFS + \
+					  0x000003b4)
+#define HWIO_IPA_RX_HPS_CMDQ_STATUS_RMSK 0x1ff
+#define HWIO_IPA_RX_HPS_CMDQ_STATUS_ATTR 0x1
+#define HWIO_IPA_RX_HPS_CMDQ_STATUS_IN in_dword_masked(	\
+		HWIO_IPA_RX_HPS_CMDQ_STATUS_ADDR, \
+		HWIO_IPA_RX_HPS_CMDQ_STATUS_RMSK)
+#define HWIO_IPA_RX_HPS_CMDQ_STATUS_INM(m) in_dword_masked( \
+		HWIO_IPA_RX_HPS_CMDQ_STATUS_ADDR, \
+		m)
+#define HWIO_IPA_RX_HPS_CMDQ_STATUS_CMDQ_DEPTH_BMSK 0x1fc
+#define HWIO_IPA_RX_HPS_CMDQ_STATUS_CMDQ_DEPTH_SHFT 0x2
+#define HWIO_IPA_RX_HPS_CMDQ_STATUS_CMDQ_FULL_BMSK 0x2
+#define HWIO_IPA_RX_HPS_CMDQ_STATUS_CMDQ_FULL_SHFT 0x1
+#define HWIO_IPA_RX_HPS_CMDQ_STATUS_STATUS_BMSK 0x1
+#define HWIO_IPA_RX_HPS_CMDQ_STATUS_STATUS_SHFT 0x0
+#define HWIO_IPA_RX_HPS_CMDQ_STATUS_EMPTY_ADDR (IPA_DEBUG_REG_BASE + \
+						0x000003b8)
+#define HWIO_IPA_RX_HPS_CMDQ_STATUS_EMPTY_PHYS (IPA_DEBUG_REG_BASE_PHYS + \
+						0x000003b8)
+#define HWIO_IPA_RX_HPS_CMDQ_STATUS_EMPTY_OFFS (IPA_DEBUG_REG_BASE_OFFS + \
+						0x000003b8)
+#define HWIO_IPA_RX_HPS_CMDQ_STATUS_EMPTY_RMSK 0x1f
+#define HWIO_IPA_RX_HPS_CMDQ_STATUS_EMPTY_ATTR 0x1
+#define HWIO_IPA_RX_HPS_CMDQ_STATUS_EMPTY_IN in_dword_masked( \
+		HWIO_IPA_RX_HPS_CMDQ_STATUS_EMPTY_ADDR,	\
+		HWIO_IPA_RX_HPS_CMDQ_STATUS_EMPTY_RMSK)
+#define HWIO_IPA_RX_HPS_CMDQ_STATUS_EMPTY_INM(m) in_dword_masked( \
+		HWIO_IPA_RX_HPS_CMDQ_STATUS_EMPTY_ADDR,	\
+		m)
+#define HWIO_IPA_RX_HPS_CMDQ_STATUS_EMPTY_CMDQ_EMPTY_BMSK 0x1f
+#define HWIO_IPA_RX_HPS_CMDQ_STATUS_EMPTY_CMDQ_EMPTY_SHFT 0x0
+#define HWIO_IPA_RX_HPS_SNP_ADDR (IPA_DEBUG_REG_BASE + 0x000003bc)
+#define HWIO_IPA_RX_HPS_SNP_PHYS (IPA_DEBUG_REG_BASE_PHYS + 0x000003bc)
+#define HWIO_IPA_RX_HPS_SNP_OFFS (IPA_DEBUG_REG_BASE_OFFS + 0x000003bc)
+#define HWIO_IPA_RX_HPS_CMDQ_COUNT_ADDR (IPA_DEBUG_REG_BASE + 0x000003c0)
+#define HWIO_IPA_RX_HPS_CMDQ_COUNT_PHYS (IPA_DEBUG_REG_BASE_PHYS + \
+					 0x000003c0)
+#define HWIO_IPA_RX_HPS_CMDQ_COUNT_OFFS (IPA_DEBUG_REG_BASE_OFFS + \
+					 0x000003c0)
+#define HWIO_IPA_RX_HPS_CMDQ_COUNT_RMSK 0x7f
+#define HWIO_IPA_RX_HPS_CMDQ_COUNT_ATTR 0x1
+#define HWIO_IPA_RX_HPS_CMDQ_COUNT_IN in_dword_masked( \
+		HWIO_IPA_RX_HPS_CMDQ_COUNT_ADDR, \
+		HWIO_IPA_RX_HPS_CMDQ_COUNT_RMSK)
+#define HWIO_IPA_RX_HPS_CMDQ_COUNT_INM(m) in_dword_masked( \
+		HWIO_IPA_RX_HPS_CMDQ_COUNT_ADDR, \
+		m)
+#define HWIO_IPA_RX_HPS_CMDQ_COUNT_FIFO_COUNT_BMSK 0x7f
+#define HWIO_IPA_RX_HPS_CMDQ_COUNT_FIFO_COUNT_SHFT 0x0
+#define HWIO_IPA_RX_HPS_CLIENTS_MIN_DEPTH_0_ADDR (IPA_DEBUG_REG_BASE + \
+						  0x000003c4)
+#define HWIO_IPA_RX_HPS_CLIENTS_MIN_DEPTH_0_PHYS (IPA_DEBUG_REG_BASE_PHYS \
+						  + 0x000003c4)
+#define HWIO_IPA_RX_HPS_CLIENTS_MIN_DEPTH_0_OFFS (IPA_DEBUG_REG_BASE_OFFS \
+						  + 0x000003c4)
+#define HWIO_IPA_RX_HPS_CLIENTS_MIN_DEPTH_0_RMSK 0xff0f0f0f
+#define HWIO_IPA_RX_HPS_CLIENTS_MIN_DEPTH_0_ATTR 0x3
+#define HWIO_IPA_RX_HPS_CLIENTS_MIN_DEPTH_0_IN in_dword_masked(	\
+		HWIO_IPA_RX_HPS_CLIENTS_MIN_DEPTH_0_ADDR, \
+		HWIO_IPA_RX_HPS_CLIENTS_MIN_DEPTH_0_RMSK)
+#define HWIO_IPA_RX_HPS_CLIENTS_MIN_DEPTH_0_INM(m) in_dword_masked( \
+		HWIO_IPA_RX_HPS_CLIENTS_MIN_DEPTH_0_ADDR, \
+		m)
+#define HWIO_IPA_RX_HPS_CLIENTS_MIN_DEPTH_0_OUT(v) out_dword( \
+		HWIO_IPA_RX_HPS_CLIENTS_MIN_DEPTH_0_ADDR, \
+		v)
+#define HWIO_IPA_RX_HPS_CLIENTS_MIN_DEPTH_0_OUTM(m, v) out_dword_masked_ns( \
+		HWIO_IPA_RX_HPS_CLIENTS_MIN_DEPTH_0_ADDR, \
+		m, \
+		v, \
+		HWIO_IPA_RX_HPS_CLIENTS_MIN_DEPTH_0_IN)
+#define HWIO_IPA_RX_HPS_CLIENTS_MIN_DEPTH_0_CLIENT_4_MIN_DEPTH_BMSK \
+	0xf0000000
+#define HWIO_IPA_RX_HPS_CLIENTS_MIN_DEPTH_0_CLIENT_4_MIN_DEPTH_SHFT 0x1c
+#define HWIO_IPA_RX_HPS_CLIENTS_MIN_DEPTH_0_CLIENT_3_MIN_DEPTH_BMSK \
+	0xf000000
+#define HWIO_IPA_RX_HPS_CLIENTS_MIN_DEPTH_0_CLIENT_3_MIN_DEPTH_SHFT 0x18
+#define HWIO_IPA_RX_HPS_CLIENTS_MIN_DEPTH_0_CLIENT_2_MIN_DEPTH_BMSK \
+	0xf0000
+#define HWIO_IPA_RX_HPS_CLIENTS_MIN_DEPTH_0_CLIENT_2_MIN_DEPTH_SHFT 0x10
+#define HWIO_IPA_RX_HPS_CLIENTS_MIN_DEPTH_0_CLIENT_1_MIN_DEPTH_BMSK 0xf00
+#define HWIO_IPA_RX_HPS_CLIENTS_MIN_DEPTH_0_CLIENT_1_MIN_DEPTH_SHFT 0x8
+#define HWIO_IPA_RX_HPS_CLIENTS_MIN_DEPTH_0_CLIENT_0_MIN_DEPTH_BMSK 0xf
+#define HWIO_IPA_RX_HPS_CLIENTS_MIN_DEPTH_0_CLIENT_0_MIN_DEPTH_SHFT 0x0
+#define HWIO_IPA_RX_HPS_CLIENTS_MAX_DEPTH_0_ADDR (IPA_DEBUG_REG_BASE + \
+						  0x000003cc)
+#define HWIO_IPA_RX_HPS_CLIENTS_MAX_DEPTH_0_PHYS (IPA_DEBUG_REG_BASE_PHYS \
+						  + 0x000003cc)
+#define HWIO_IPA_RX_HPS_CLIENTS_MAX_DEPTH_0_OFFS (IPA_DEBUG_REG_BASE_OFFS \
+						  + 0x000003cc)
+#define HWIO_IPA_RX_HPS_CLIENTS_MAX_DEPTH_0_RMSK 0xff0f0f0f
+#define HWIO_IPA_RX_HPS_CLIENTS_MAX_DEPTH_0_ATTR 0x3
+#define HWIO_IPA_RX_HPS_CLIENTS_MAX_DEPTH_0_IN in_dword_masked(	\
+		HWIO_IPA_RX_HPS_CLIENTS_MAX_DEPTH_0_ADDR, \
+		HWIO_IPA_RX_HPS_CLIENTS_MAX_DEPTH_0_RMSK)
+#define HWIO_IPA_RX_HPS_CLIENTS_MAX_DEPTH_0_INM(m) in_dword_masked( \
+		HWIO_IPA_RX_HPS_CLIENTS_MAX_DEPTH_0_ADDR, \
+		m)
+#define HWIO_IPA_RX_HPS_CLIENTS_MAX_DEPTH_0_OUT(v) out_dword( \
+		HWIO_IPA_RX_HPS_CLIENTS_MAX_DEPTH_0_ADDR, \
+		v)
+#define HWIO_IPA_RX_HPS_CLIENTS_MAX_DEPTH_0_OUTM(m, v) out_dword_masked_ns( \
+		HWIO_IPA_RX_HPS_CLIENTS_MAX_DEPTH_0_ADDR, \
+		m, \
+		v, \
+		HWIO_IPA_RX_HPS_CLIENTS_MAX_DEPTH_0_IN)
+#define HWIO_IPA_RX_HPS_CLIENTS_MAX_DEPTH_0_CLIENT_4_MAX_DEPTH_BMSK \
+	0xf0000000
+#define HWIO_IPA_RX_HPS_CLIENTS_MAX_DEPTH_0_CLIENT_4_MAX_DEPTH_SHFT 0x1c
+#define HWIO_IPA_RX_HPS_CLIENTS_MAX_DEPTH_0_CLIENT_3_MAX_DEPTH_BMSK \
+	0xf000000
+#define HWIO_IPA_RX_HPS_CLIENTS_MAX_DEPTH_0_CLIENT_3_MAX_DEPTH_SHFT 0x18
+#define HWIO_IPA_RX_HPS_CLIENTS_MAX_DEPTH_0_CLIENT_2_MAX_DEPTH_BMSK \
+	0xf0000
+#define HWIO_IPA_RX_HPS_CLIENTS_MAX_DEPTH_0_CLIENT_2_MAX_DEPTH_SHFT 0x10
+#define HWIO_IPA_RX_HPS_CLIENTS_MAX_DEPTH_0_CLIENT_1_MAX_DEPTH_BMSK 0xf00
+#define HWIO_IPA_RX_HPS_CLIENTS_MAX_DEPTH_0_CLIENT_1_MAX_DEPTH_SHFT 0x8
+#define HWIO_IPA_RX_HPS_CLIENTS_MAX_DEPTH_0_CLIENT_0_MAX_DEPTH_BMSK 0xf
+#define HWIO_IPA_RX_HPS_CLIENTS_MAX_DEPTH_0_CLIENT_0_MAX_DEPTH_SHFT 0x0
+#define HWIO_IPA_HPS_DPS_CMDQ_CMD_ADDR (IPA_DEBUG_REG_BASE + 0x000003d4)
+#define HWIO_IPA_HPS_DPS_CMDQ_CMD_PHYS (IPA_DEBUG_REG_BASE_PHYS + \
+					0x000003d4)
+#define HWIO_IPA_HPS_DPS_CMDQ_CMD_OFFS (IPA_DEBUG_REG_BASE_OFFS + \
+					0x000003d4)
+#define HWIO_IPA_HPS_DPS_CMDQ_CMD_RMSK 0xff
+#define HWIO_IPA_HPS_DPS_CMDQ_CMD_ATTR 0x3
+#define HWIO_IPA_HPS_DPS_CMDQ_CMD_IN in_dword_masked( \
+		HWIO_IPA_HPS_DPS_CMDQ_CMD_ADDR,	\
+		HWIO_IPA_HPS_DPS_CMDQ_CMD_RMSK)
+#define HWIO_IPA_HPS_DPS_CMDQ_CMD_INM(m) in_dword_masked( \
+		HWIO_IPA_HPS_DPS_CMDQ_CMD_ADDR,	\
+		m)
+#define HWIO_IPA_HPS_DPS_CMDQ_CMD_OUT(v) out_dword( \
+		HWIO_IPA_HPS_DPS_CMDQ_CMD_ADDR,	\
+		v)
+#define HWIO_IPA_HPS_DPS_CMDQ_CMD_OUTM(m, v) out_dword_masked_ns( \
+		HWIO_IPA_HPS_DPS_CMDQ_CMD_ADDR,	\
+		m, \
+		v, \
+		HWIO_IPA_HPS_DPS_CMDQ_CMD_IN)
+#define HWIO_IPA_HPS_DPS_CMDQ_CMD_RD_REQ_BMSK 0x80
+#define HWIO_IPA_HPS_DPS_CMDQ_CMD_RD_REQ_SHFT 0x7
+#define HWIO_IPA_HPS_DPS_CMDQ_CMD_CMD_CLIENT_BMSK 0x7c
+#define HWIO_IPA_HPS_DPS_CMDQ_CMD_CMD_CLIENT_SHFT 0x2
+#define HWIO_IPA_HPS_DPS_CMDQ_CMD_POP_CMD_BMSK 0x2
+#define HWIO_IPA_HPS_DPS_CMDQ_CMD_POP_CMD_SHFT 0x1
+#define HWIO_IPA_HPS_DPS_CMDQ_CMD_WRITE_CMD_BMSK 0x1
+#define HWIO_IPA_HPS_DPS_CMDQ_CMD_WRITE_CMD_SHFT 0x0
+#define HWIO_IPA_HPS_DPS_CMDQ_RELEASE_WR_ADDR (IPA_DEBUG_REG_BASE + \
+					       0x000003d8)
+#define HWIO_IPA_HPS_DPS_CMDQ_RELEASE_WR_PHYS (IPA_DEBUG_REG_BASE_PHYS + \
+					       0x000003d8)
+#define HWIO_IPA_HPS_DPS_CMDQ_RELEASE_WR_OFFS (IPA_DEBUG_REG_BASE_OFFS + \
+					       0x000003d8)
+#define HWIO_IPA_HPS_DPS_CMDQ_RELEASE_RD_ADDR (IPA_DEBUG_REG_BASE + \
+					       0x000003dc)
+#define HWIO_IPA_HPS_DPS_CMDQ_RELEASE_RD_PHYS (IPA_DEBUG_REG_BASE_PHYS + \
+					       0x000003dc)
+#define HWIO_IPA_HPS_DPS_CMDQ_RELEASE_RD_OFFS (IPA_DEBUG_REG_BASE_OFFS + \
+					       0x000003dc)
+#define HWIO_IPA_HPS_DPS_CMDQ_CFG_WR_ADDR (IPA_DEBUG_REG_BASE + 0x000003e0)
+#define HWIO_IPA_HPS_DPS_CMDQ_CFG_WR_PHYS (IPA_DEBUG_REG_BASE_PHYS + \
+					   0x000003e0)
+#define HWIO_IPA_HPS_DPS_CMDQ_CFG_WR_OFFS (IPA_DEBUG_REG_BASE_OFFS + \
+					   0x000003e0)
+#define HWIO_IPA_HPS_DPS_CMDQ_CFG_RD_ADDR (IPA_DEBUG_REG_BASE + 0x000003e4)
+#define HWIO_IPA_HPS_DPS_CMDQ_CFG_RD_PHYS (IPA_DEBUG_REG_BASE_PHYS + \
+					   0x000003e4)
+#define HWIO_IPA_HPS_DPS_CMDQ_CFG_RD_OFFS (IPA_DEBUG_REG_BASE_OFFS + \
+					   0x000003e4)
+#define HWIO_IPA_HPS_DPS_CMDQ_DATA_WR_0_ADDR (IPA_DEBUG_REG_BASE + \
+					      0x000003e8)
+#define HWIO_IPA_HPS_DPS_CMDQ_DATA_WR_0_PHYS (IPA_DEBUG_REG_BASE_PHYS +	\
+					      0x000003e8)
+#define HWIO_IPA_HPS_DPS_CMDQ_DATA_WR_0_OFFS (IPA_DEBUG_REG_BASE_OFFS +	\
+					      0x000003e8)
+#define HWIO_IPA_HPS_DPS_CMDQ_DATA_RD_0_ADDR (IPA_DEBUG_REG_BASE + \
+					      0x000003ec)
+#define HWIO_IPA_HPS_DPS_CMDQ_DATA_RD_0_PHYS (IPA_DEBUG_REG_BASE_PHYS +	\
+					      0x000003ec)
+#define HWIO_IPA_HPS_DPS_CMDQ_DATA_RD_0_OFFS (IPA_DEBUG_REG_BASE_OFFS +	\
+					      0x000003ec)
+#define HWIO_IPA_HPS_DPS_CMDQ_DATA_RD_0_RMSK 0xfffff
+#define HWIO_IPA_HPS_DPS_CMDQ_DATA_RD_0_ATTR 0x1
+#define HWIO_IPA_HPS_DPS_CMDQ_DATA_RD_0_IN in_dword_masked( \
+		HWIO_IPA_HPS_DPS_CMDQ_DATA_RD_0_ADDR, \
+		HWIO_IPA_HPS_DPS_CMDQ_DATA_RD_0_RMSK)
+#define HWIO_IPA_HPS_DPS_CMDQ_DATA_RD_0_INM(m) in_dword_masked(	\
+		HWIO_IPA_HPS_DPS_CMDQ_DATA_RD_0_ADDR, \
+		m)
+#define HWIO_IPA_HPS_DPS_CMDQ_DATA_RD_0_CMDQ_REP_F_BMSK 0x80000
+#define HWIO_IPA_HPS_DPS_CMDQ_DATA_RD_0_CMDQ_REP_F_SHFT 0x13
+#define HWIO_IPA_HPS_DPS_CMDQ_DATA_RD_0_CMDQ_OPCODE_F_BMSK 0x60000
+#define HWIO_IPA_HPS_DPS_CMDQ_DATA_RD_0_CMDQ_OPCODE_F_SHFT 0x11
+#define HWIO_IPA_HPS_DPS_CMDQ_DATA_RD_0_CMDQ_SRC_PIPE_F_BMSK 0x1f000
+#define HWIO_IPA_HPS_DPS_CMDQ_DATA_RD_0_CMDQ_SRC_PIPE_F_SHFT 0xc
+#define HWIO_IPA_HPS_DPS_CMDQ_DATA_RD_0_CMDQ_SRC_ID_F_BMSK 0xff0
+#define HWIO_IPA_HPS_DPS_CMDQ_DATA_RD_0_CMDQ_SRC_ID_F_SHFT 0x4
+#define HWIO_IPA_HPS_DPS_CMDQ_DATA_RD_0_CMDQ_CTX_ID_F_BMSK 0xf
+#define HWIO_IPA_HPS_DPS_CMDQ_DATA_RD_0_CMDQ_CTX_ID_F_SHFT 0x0
+#define HWIO_IPA_HPS_DPS_CMDQ_STATUS_ADDR (IPA_DEBUG_REG_BASE + 0x000003f0)
+#define HWIO_IPA_HPS_DPS_CMDQ_STATUS_PHYS (IPA_DEBUG_REG_BASE_PHYS + \
+					   0x000003f0)
+#define HWIO_IPA_HPS_DPS_CMDQ_STATUS_OFFS (IPA_DEBUG_REG_BASE_OFFS + \
+					   0x000003f0)
+#define HWIO_IPA_HPS_DPS_CMDQ_STATUS_RMSK 0xff
+#define HWIO_IPA_HPS_DPS_CMDQ_STATUS_ATTR 0x1
+#define HWIO_IPA_HPS_DPS_CMDQ_STATUS_IN in_dword_masked( \
+		HWIO_IPA_HPS_DPS_CMDQ_STATUS_ADDR, \
+		HWIO_IPA_HPS_DPS_CMDQ_STATUS_RMSK)
+#define HWIO_IPA_HPS_DPS_CMDQ_STATUS_INM(m) in_dword_masked( \
+		HWIO_IPA_HPS_DPS_CMDQ_STATUS_ADDR, \
+		m)
+#define HWIO_IPA_HPS_DPS_CMDQ_STATUS_CMDQ_DEPTH_BMSK 0xfc
+#define HWIO_IPA_HPS_DPS_CMDQ_STATUS_CMDQ_DEPTH_SHFT 0x2
+#define HWIO_IPA_HPS_DPS_CMDQ_STATUS_CMDQ_FULL_BMSK 0x2
+#define HWIO_IPA_HPS_DPS_CMDQ_STATUS_CMDQ_FULL_SHFT 0x1
+#define HWIO_IPA_HPS_DPS_CMDQ_STATUS_STATUS_BMSK 0x1
+#define HWIO_IPA_HPS_DPS_CMDQ_STATUS_STATUS_SHFT 0x0
+#define HWIO_IPA_HPS_DPS_CMDQ_STATUS_EMPTY_ADDR (IPA_DEBUG_REG_BASE + \
+						 0x000003f4)
+#define HWIO_IPA_HPS_DPS_CMDQ_STATUS_EMPTY_PHYS (IPA_DEBUG_REG_BASE_PHYS + \
+						 0x000003f4)
+#define HWIO_IPA_HPS_DPS_CMDQ_STATUS_EMPTY_OFFS (IPA_DEBUG_REG_BASE_OFFS + \
+						 0x000003f4)
+#define HWIO_IPA_HPS_DPS_CMDQ_STATUS_EMPTY_RMSK 0x7fffffff
+#define HWIO_IPA_HPS_DPS_CMDQ_STATUS_EMPTY_ATTR 0x1
+#define HWIO_IPA_HPS_DPS_CMDQ_STATUS_EMPTY_IN in_dword_masked( \
+		HWIO_IPA_HPS_DPS_CMDQ_STATUS_EMPTY_ADDR, \
+		HWIO_IPA_HPS_DPS_CMDQ_STATUS_EMPTY_RMSK)
+#define HWIO_IPA_HPS_DPS_CMDQ_STATUS_EMPTY_INM(m) in_dword_masked( \
+		HWIO_IPA_HPS_DPS_CMDQ_STATUS_EMPTY_ADDR, \
+		m)
+#define HWIO_IPA_HPS_DPS_CMDQ_STATUS_EMPTY_CMDQ_EMPTY_BMSK 0x7fffffff
+#define HWIO_IPA_HPS_DPS_CMDQ_STATUS_EMPTY_CMDQ_EMPTY_SHFT 0x0
+#define HWIO_IPA_HPS_DPS_SNP_ADDR (IPA_DEBUG_REG_BASE + 0x000003f8)
+#define HWIO_IPA_HPS_DPS_SNP_PHYS (IPA_DEBUG_REG_BASE_PHYS + 0x000003f8)
+#define HWIO_IPA_HPS_DPS_SNP_OFFS (IPA_DEBUG_REG_BASE_OFFS + 0x000003f8)
+#define HWIO_IPA_HPS_DPS_CMDQ_COUNT_ADDR (IPA_DEBUG_REG_BASE + 0x000003fc)
+#define HWIO_IPA_HPS_DPS_CMDQ_COUNT_PHYS (IPA_DEBUG_REG_BASE_PHYS + \
+					  0x000003fc)
+#define HWIO_IPA_HPS_DPS_CMDQ_COUNT_OFFS (IPA_DEBUG_REG_BASE_OFFS + \
+					  0x000003fc)
+#define HWIO_IPA_HPS_DPS_CMDQ_COUNT_RMSK 0x3f
+#define HWIO_IPA_HPS_DPS_CMDQ_COUNT_ATTR 0x1
+#define HWIO_IPA_HPS_DPS_CMDQ_COUNT_IN in_dword_masked(	\
+		HWIO_IPA_HPS_DPS_CMDQ_COUNT_ADDR, \
+		HWIO_IPA_HPS_DPS_CMDQ_COUNT_RMSK)
+#define HWIO_IPA_HPS_DPS_CMDQ_COUNT_INM(m) in_dword_masked( \
+		HWIO_IPA_HPS_DPS_CMDQ_COUNT_ADDR, \
+		m)
+#define HWIO_IPA_HPS_DPS_CMDQ_COUNT_FIFO_COUNT_BMSK 0x3f
+#define HWIO_IPA_HPS_DPS_CMDQ_COUNT_FIFO_COUNT_SHFT 0x0
+#define HWIO_IPA_DPS_TX_CMDQ_CMD_ADDR (IPA_DEBUG_REG_BASE + 0x00000400)
+#define HWIO_IPA_DPS_TX_CMDQ_CMD_PHYS (IPA_DEBUG_REG_BASE_PHYS + \
+				       0x00000400)
+#define HWIO_IPA_DPS_TX_CMDQ_CMD_OFFS (IPA_DEBUG_REG_BASE_OFFS + \
+				       0x00000400)
+#define HWIO_IPA_DPS_TX_CMDQ_CMD_RMSK 0xbf
+#define HWIO_IPA_DPS_TX_CMDQ_CMD_ATTR 0x3
+#define HWIO_IPA_DPS_TX_CMDQ_CMD_IN in_dword_masked( \
+		HWIO_IPA_DPS_TX_CMDQ_CMD_ADDR, \
+		HWIO_IPA_DPS_TX_CMDQ_CMD_RMSK)
+#define HWIO_IPA_DPS_TX_CMDQ_CMD_INM(m) in_dword_masked( \
+		HWIO_IPA_DPS_TX_CMDQ_CMD_ADDR, \
+		m)
+#define HWIO_IPA_DPS_TX_CMDQ_CMD_OUT(v) out_dword( \
+		HWIO_IPA_DPS_TX_CMDQ_CMD_ADDR, \
+		v)
+#define HWIO_IPA_DPS_TX_CMDQ_CMD_OUTM(m, v) out_dword_masked_ns( \
+		HWIO_IPA_DPS_TX_CMDQ_CMD_ADDR, \
+		m, \
+		v, \
+		HWIO_IPA_DPS_TX_CMDQ_CMD_IN)
+#define HWIO_IPA_DPS_TX_CMDQ_CMD_RD_REQ_BMSK 0x80
+#define HWIO_IPA_DPS_TX_CMDQ_CMD_RD_REQ_SHFT 0x7
+#define HWIO_IPA_DPS_TX_CMDQ_CMD_CMD_CLIENT_BMSK 0x3c
+#define HWIO_IPA_DPS_TX_CMDQ_CMD_CMD_CLIENT_SHFT 0x2
+#define HWIO_IPA_DPS_TX_CMDQ_CMD_POP_CMD_BMSK 0x2
+#define HWIO_IPA_DPS_TX_CMDQ_CMD_POP_CMD_SHFT 0x1
+#define HWIO_IPA_DPS_TX_CMDQ_CMD_WRITE_CMD_BMSK 0x1
+#define HWIO_IPA_DPS_TX_CMDQ_CMD_WRITE_CMD_SHFT 0x0
+#define HWIO_IPA_DPS_TX_CMDQ_RELEASE_WR_ADDR (IPA_DEBUG_REG_BASE + \
+					      0x00000404)
+#define HWIO_IPA_DPS_TX_CMDQ_RELEASE_WR_PHYS (IPA_DEBUG_REG_BASE_PHYS +	\
+					      0x00000404)
+#define HWIO_IPA_DPS_TX_CMDQ_RELEASE_WR_OFFS (IPA_DEBUG_REG_BASE_OFFS +	\
+					      0x00000404)
+#define HWIO_IPA_DPS_TX_CMDQ_RELEASE_RD_ADDR (IPA_DEBUG_REG_BASE + \
+					      0x00000408)
+#define HWIO_IPA_DPS_TX_CMDQ_RELEASE_RD_PHYS (IPA_DEBUG_REG_BASE_PHYS +	\
+					      0x00000408)
+#define HWIO_IPA_DPS_TX_CMDQ_RELEASE_RD_OFFS (IPA_DEBUG_REG_BASE_OFFS +	\
+					      0x00000408)
+#define HWIO_IPA_DPS_TX_CMDQ_CFG_WR_ADDR (IPA_DEBUG_REG_BASE + 0x0000040c)
+#define HWIO_IPA_DPS_TX_CMDQ_CFG_WR_PHYS (IPA_DEBUG_REG_BASE_PHYS + \
+					  0x0000040c)
+#define HWIO_IPA_DPS_TX_CMDQ_CFG_WR_OFFS (IPA_DEBUG_REG_BASE_OFFS + \
+					  0x0000040c)
+#define HWIO_IPA_DPS_TX_CMDQ_CFG_RD_ADDR (IPA_DEBUG_REG_BASE + 0x00000410)
+#define HWIO_IPA_DPS_TX_CMDQ_CFG_RD_PHYS (IPA_DEBUG_REG_BASE_PHYS + \
+					  0x00000410)
+#define HWIO_IPA_DPS_TX_CMDQ_CFG_RD_OFFS (IPA_DEBUG_REG_BASE_OFFS + \
+					  0x00000410)
+#define HWIO_IPA_DPS_TX_CMDQ_DATA_WR_0_ADDR (IPA_DEBUG_REG_BASE + \
+					     0x00000414)
+#define HWIO_IPA_DPS_TX_CMDQ_DATA_WR_0_PHYS (IPA_DEBUG_REG_BASE_PHYS + \
+					     0x00000414)
+#define HWIO_IPA_DPS_TX_CMDQ_DATA_WR_0_OFFS (IPA_DEBUG_REG_BASE_OFFS + \
+					     0x00000414)
+#define HWIO_IPA_DPS_TX_CMDQ_DATA_RD_0_ADDR (IPA_DEBUG_REG_BASE + \
+					     0x00000418)
+#define HWIO_IPA_DPS_TX_CMDQ_DATA_RD_0_PHYS (IPA_DEBUG_REG_BASE_PHYS + \
+					     0x00000418)
+#define HWIO_IPA_DPS_TX_CMDQ_DATA_RD_0_OFFS (IPA_DEBUG_REG_BASE_OFFS + \
+					     0x00000418)
+#define HWIO_IPA_DPS_TX_CMDQ_DATA_RD_0_RMSK 0xfffff
+#define HWIO_IPA_DPS_TX_CMDQ_DATA_RD_0_ATTR 0x1
+#define HWIO_IPA_DPS_TX_CMDQ_DATA_RD_0_IN in_dword_masked( \
+		HWIO_IPA_DPS_TX_CMDQ_DATA_RD_0_ADDR, \
+		HWIO_IPA_DPS_TX_CMDQ_DATA_RD_0_RMSK)
+#define HWIO_IPA_DPS_TX_CMDQ_DATA_RD_0_INM(m) in_dword_masked( \
+		HWIO_IPA_DPS_TX_CMDQ_DATA_RD_0_ADDR, \
+		m)
+#define HWIO_IPA_DPS_TX_CMDQ_DATA_RD_0_CMDQ_REP_F_BMSK 0x80000
+#define HWIO_IPA_DPS_TX_CMDQ_DATA_RD_0_CMDQ_REP_F_SHFT 0x13
+#define HWIO_IPA_DPS_TX_CMDQ_DATA_RD_0_CMDQ_OPCODE_F_BMSK 0x60000
+#define HWIO_IPA_DPS_TX_CMDQ_DATA_RD_0_CMDQ_OPCODE_F_SHFT 0x11
+#define HWIO_IPA_DPS_TX_CMDQ_DATA_RD_0_CMDQ_SRC_PIPE_F_BMSK 0x1f000
+#define HWIO_IPA_DPS_TX_CMDQ_DATA_RD_0_CMDQ_SRC_PIPE_F_SHFT 0xc
+#define HWIO_IPA_DPS_TX_CMDQ_DATA_RD_0_CMDQ_SRC_ID_F_BMSK 0xff0
+#define HWIO_IPA_DPS_TX_CMDQ_DATA_RD_0_CMDQ_SRC_ID_F_SHFT 0x4
+#define HWIO_IPA_DPS_TX_CMDQ_DATA_RD_0_CMDQ_CTX_ID_F_BMSK 0xf
+#define HWIO_IPA_DPS_TX_CMDQ_DATA_RD_0_CMDQ_CTX_ID_F_SHFT 0x0
+#define HWIO_IPA_DPS_TX_CMDQ_STATUS_ADDR (IPA_DEBUG_REG_BASE + 0x0000041c)
+#define HWIO_IPA_DPS_TX_CMDQ_STATUS_PHYS (IPA_DEBUG_REG_BASE_PHYS + \
+					  0x0000041c)
+#define HWIO_IPA_DPS_TX_CMDQ_STATUS_OFFS (IPA_DEBUG_REG_BASE_OFFS + \
+					  0x0000041c)
+#define HWIO_IPA_DPS_TX_CMDQ_STATUS_RMSK 0x1ff
+#define HWIO_IPA_DPS_TX_CMDQ_STATUS_ATTR 0x1
+#define HWIO_IPA_DPS_TX_CMDQ_STATUS_IN in_dword_masked(	\
+		HWIO_IPA_DPS_TX_CMDQ_STATUS_ADDR, \
+		HWIO_IPA_DPS_TX_CMDQ_STATUS_RMSK)
+#define HWIO_IPA_DPS_TX_CMDQ_STATUS_INM(m) in_dword_masked( \
+		HWIO_IPA_DPS_TX_CMDQ_STATUS_ADDR, \
+		m)
+#define HWIO_IPA_DPS_TX_CMDQ_STATUS_CMDQ_DEPTH_BMSK 0x1fc
+#define HWIO_IPA_DPS_TX_CMDQ_STATUS_CMDQ_DEPTH_SHFT 0x2
+#define HWIO_IPA_DPS_TX_CMDQ_STATUS_CMDQ_FULL_BMSK 0x2
+#define HWIO_IPA_DPS_TX_CMDQ_STATUS_CMDQ_FULL_SHFT 0x1
+#define HWIO_IPA_DPS_TX_CMDQ_STATUS_STATUS_BMSK 0x1
+#define HWIO_IPA_DPS_TX_CMDQ_STATUS_STATUS_SHFT 0x0
+#define HWIO_IPA_DPS_TX_CMDQ_STATUS_EMPTY_ADDR (IPA_DEBUG_REG_BASE + \
+						0x00000420)
+#define HWIO_IPA_DPS_TX_CMDQ_STATUS_EMPTY_PHYS (IPA_DEBUG_REG_BASE_PHYS + \
+						0x00000420)
+#define HWIO_IPA_DPS_TX_CMDQ_STATUS_EMPTY_OFFS (IPA_DEBUG_REG_BASE_OFFS + \
+						0x00000420)
+#define HWIO_IPA_DPS_TX_CMDQ_STATUS_EMPTY_RMSK 0x3ff
+#define HWIO_IPA_DPS_TX_CMDQ_STATUS_EMPTY_ATTR 0x1
+#define HWIO_IPA_DPS_TX_CMDQ_STATUS_EMPTY_IN in_dword_masked( \
+		HWIO_IPA_DPS_TX_CMDQ_STATUS_EMPTY_ADDR,	\
+		HWIO_IPA_DPS_TX_CMDQ_STATUS_EMPTY_RMSK)
+#define HWIO_IPA_DPS_TX_CMDQ_STATUS_EMPTY_INM(m) in_dword_masked( \
+		HWIO_IPA_DPS_TX_CMDQ_STATUS_EMPTY_ADDR,	\
+		m)
+#define HWIO_IPA_DPS_TX_CMDQ_STATUS_EMPTY_CMDQ_EMPTY_BMSK 0x3ff
+#define HWIO_IPA_DPS_TX_CMDQ_STATUS_EMPTY_CMDQ_EMPTY_SHFT 0x0
+#define HWIO_IPA_DPS_TX_SNP_ADDR (IPA_DEBUG_REG_BASE + 0x00000424)
+#define HWIO_IPA_DPS_TX_SNP_PHYS (IPA_DEBUG_REG_BASE_PHYS + 0x00000424)
+#define HWIO_IPA_DPS_TX_SNP_OFFS (IPA_DEBUG_REG_BASE_OFFS + 0x00000424)
+#define HWIO_IPA_DPS_TX_CMDQ_COUNT_ADDR (IPA_DEBUG_REG_BASE + 0x00000428)
+#define HWIO_IPA_DPS_TX_CMDQ_COUNT_PHYS (IPA_DEBUG_REG_BASE_PHYS + \
+					 0x00000428)
+#define HWIO_IPA_DPS_TX_CMDQ_COUNT_OFFS (IPA_DEBUG_REG_BASE_OFFS + \
+					 0x00000428)
+#define HWIO_IPA_DPS_TX_CMDQ_COUNT_RMSK 0x7f
+#define HWIO_IPA_DPS_TX_CMDQ_COUNT_ATTR 0x1
+#define HWIO_IPA_DPS_TX_CMDQ_COUNT_IN in_dword_masked( \
+		HWIO_IPA_DPS_TX_CMDQ_COUNT_ADDR, \
+		HWIO_IPA_DPS_TX_CMDQ_COUNT_RMSK)
+#define HWIO_IPA_DPS_TX_CMDQ_COUNT_INM(m) in_dword_masked( \
+		HWIO_IPA_DPS_TX_CMDQ_COUNT_ADDR, \
+		m)
+#define HWIO_IPA_DPS_TX_CMDQ_COUNT_FIFO_COUNT_BMSK 0x7f
+#define HWIO_IPA_DPS_TX_CMDQ_COUNT_FIFO_COUNT_SHFT 0x0
+#define HWIO_IPA_LOG_BUF_HW_SNIF_EL_EN_ADDR (IPA_DEBUG_REG_BASE + \
+					     0x0000042c)
+#define HWIO_IPA_LOG_BUF_HW_SNIF_EL_EN_PHYS (IPA_DEBUG_REG_BASE_PHYS + \
+					     0x0000042c)
+#define HWIO_IPA_LOG_BUF_HW_SNIF_EL_EN_OFFS (IPA_DEBUG_REG_BASE_OFFS + \
+					     0x0000042c)
+#define HWIO_IPA_LOG_BUF_HW_SNIF_EL_EN_RMSK 0x7
+#define HWIO_IPA_LOG_BUF_HW_SNIF_EL_EN_ATTR 0x3
+#define HWIO_IPA_LOG_BUF_HW_SNIF_EL_EN_IN in_dword_masked( \
+		HWIO_IPA_LOG_BUF_HW_SNIF_EL_EN_ADDR, \
+		HWIO_IPA_LOG_BUF_HW_SNIF_EL_EN_RMSK)
+#define HWIO_IPA_LOG_BUF_HW_SNIF_EL_EN_INM(m) in_dword_masked( \
+		HWIO_IPA_LOG_BUF_HW_SNIF_EL_EN_ADDR, \
+		m)
+#define HWIO_IPA_LOG_BUF_HW_SNIF_EL_EN_OUT(v) out_dword( \
+		HWIO_IPA_LOG_BUF_HW_SNIF_EL_EN_ADDR, \
+		v)
+#define HWIO_IPA_LOG_BUF_HW_SNIF_EL_EN_OUTM(m, v) out_dword_masked_ns( \
+		HWIO_IPA_LOG_BUF_HW_SNIF_EL_EN_ADDR, \
+		m, \
+		v, \
+		HWIO_IPA_LOG_BUF_HW_SNIF_EL_EN_IN)
+#define HWIO_IPA_LOG_BUF_HW_SNIF_EL_EN_BITMAP_BMSK 0x7
+#define HWIO_IPA_LOG_BUF_HW_SNIF_EL_EN_BITMAP_SHFT 0x0
+#define HWIO_IPA_LOG_BUF_HW_SNIF_EL_WR_N_RD_SEL_ADDR (IPA_DEBUG_REG_BASE + \
+						      0x00000430)
+#define HWIO_IPA_LOG_BUF_HW_SNIF_EL_WR_N_RD_SEL_PHYS ( \
+		IPA_DEBUG_REG_BASE_PHYS + 0x00000430)
+#define HWIO_IPA_LOG_BUF_HW_SNIF_EL_WR_N_RD_SEL_OFFS ( \
+		IPA_DEBUG_REG_BASE_OFFS + 0x00000430)
+#define HWIO_IPA_LOG_BUF_HW_SNIF_EL_WR_N_RD_SEL_RMSK 0x7
+#define HWIO_IPA_LOG_BUF_HW_SNIF_EL_WR_N_RD_SEL_ATTR 0x3
+#define HWIO_IPA_LOG_BUF_HW_SNIF_EL_WR_N_RD_SEL_IN in_dword_masked( \
+		HWIO_IPA_LOG_BUF_HW_SNIF_EL_WR_N_RD_SEL_ADDR, \
+		HWIO_IPA_LOG_BUF_HW_SNIF_EL_WR_N_RD_SEL_RMSK)
+#define HWIO_IPA_LOG_BUF_HW_SNIF_EL_WR_N_RD_SEL_INM(m) in_dword_masked(	\
+		HWIO_IPA_LOG_BUF_HW_SNIF_EL_WR_N_RD_SEL_ADDR, \
+		m)
+#define HWIO_IPA_LOG_BUF_HW_SNIF_EL_WR_N_RD_SEL_OUT(v) out_dword( \
+		HWIO_IPA_LOG_BUF_HW_SNIF_EL_WR_N_RD_SEL_ADDR, \
+		v)
+#define HWIO_IPA_LOG_BUF_HW_SNIF_EL_WR_N_RD_SEL_OUTM(m,	\
+						     v)	\
+	out_dword_masked_ns(HWIO_IPA_LOG_BUF_HW_SNIF_EL_WR_N_RD_SEL_ADDR, \
+			    m, \
+			    v, \
+			    HWIO_IPA_LOG_BUF_HW_SNIF_EL_WR_N_RD_SEL_IN)
+#define HWIO_IPA_LOG_BUF_HW_SNIF_EL_WR_N_RD_SEL_BITMAP_BMSK 0x7
+#define HWIO_IPA_LOG_BUF_HW_SNIF_EL_WR_N_RD_SEL_BITMAP_SHFT 0x0
+#define HWIO_IPA_LOG_BUF_HW_SNIF_EL_CLI_MUX_ADDR (IPA_DEBUG_REG_BASE + \
+						  0x00000434)
+#define HWIO_IPA_LOG_BUF_HW_SNIF_EL_CLI_MUX_PHYS (IPA_DEBUG_REG_BASE_PHYS \
+						  + 0x00000434)
+#define HWIO_IPA_LOG_BUF_HW_SNIF_EL_CLI_MUX_OFFS (IPA_DEBUG_REG_BASE_OFFS \
+						  + 0x00000434)
+#define HWIO_IPA_LOG_BUF_HW_SNIF_EL_CLI_MUX_RMSK 0xfff
+#define HWIO_IPA_LOG_BUF_HW_SNIF_EL_CLI_MUX_ATTR 0x3
+#define HWIO_IPA_LOG_BUF_HW_SNIF_EL_CLI_MUX_IN in_dword_masked(	\
+		HWIO_IPA_LOG_BUF_HW_SNIF_EL_CLI_MUX_ADDR, \
+		HWIO_IPA_LOG_BUF_HW_SNIF_EL_CLI_MUX_RMSK)
+#define HWIO_IPA_LOG_BUF_HW_SNIF_EL_CLI_MUX_INM(m) in_dword_masked( \
+		HWIO_IPA_LOG_BUF_HW_SNIF_EL_CLI_MUX_ADDR, \
+		m)
+#define HWIO_IPA_LOG_BUF_HW_SNIF_EL_CLI_MUX_OUT(v) out_dword( \
+		HWIO_IPA_LOG_BUF_HW_SNIF_EL_CLI_MUX_ADDR, \
+		v)
+#define HWIO_IPA_LOG_BUF_HW_SNIF_EL_CLI_MUX_OUTM(m, v) out_dword_masked_ns( \
+		HWIO_IPA_LOG_BUF_HW_SNIF_EL_CLI_MUX_ADDR, \
+		m, \
+		v, \
+		HWIO_IPA_LOG_BUF_HW_SNIF_EL_CLI_MUX_IN)
+#define HWIO_IPA_LOG_BUF_HW_SNIF_EL_CLI_MUX_ALL_CLI_MUX_CONCAT_BMSK 0xfff
+#define HWIO_IPA_LOG_BUF_HW_SNIF_EL_CLI_MUX_ALL_CLI_MUX_CONCAT_SHFT 0x0
+#define HWIO_IPA_LOG_BUF_HW_SNIF_EL_COMP_VAL_0_CLI_n_ADDR(n) ( \
+		IPA_DEBUG_REG_BASE + 0x00000438 + 0x10 * (n))
+#define HWIO_IPA_LOG_BUF_HW_SNIF_EL_COMP_VAL_0_CLI_n_PHYS(n) ( \
+		IPA_DEBUG_REG_BASE_PHYS + 0x00000438 + 0x10 * (n))
+#define HWIO_IPA_LOG_BUF_HW_SNIF_EL_COMP_VAL_0_CLI_n_OFFS(n) ( \
+		IPA_DEBUG_REG_BASE_OFFS + 0x00000438 + 0x10 * (n))
+#define HWIO_IPA_LOG_BUF_HW_SNIF_EL_COMP_VAL_0_CLI_n_RMSK 0xffffffff
+#define HWIO_IPA_LOG_BUF_HW_SNIF_EL_COMP_VAL_0_CLI_n_MAXn 2
+#define HWIO_IPA_LOG_BUF_HW_SNIF_EL_COMP_VAL_0_CLI_n_ATTR 0x3
+#define HWIO_IPA_LOG_BUF_HW_SNIF_EL_COMP_VAL_0_CLI_n_INI(n) \
+	in_dword_masked(HWIO_IPA_LOG_BUF_HW_SNIF_EL_COMP_VAL_0_CLI_n_ADDR( \
+				n), \
+			HWIO_IPA_LOG_BUF_HW_SNIF_EL_COMP_VAL_0_CLI_n_RMSK)
+#define HWIO_IPA_LOG_BUF_HW_SNIF_EL_COMP_VAL_0_CLI_n_INMI(n, \
+							  mask)	\
+	in_dword_masked( \
+		HWIO_IPA_LOG_BUF_HW_SNIF_EL_COMP_VAL_0_CLI_n_ADDR(n), \
+		mask)
+#define HWIO_IPA_LOG_BUF_HW_SNIF_EL_COMP_VAL_0_CLI_n_OUTI(n, \
+							  val) out_dword( \
+		HWIO_IPA_LOG_BUF_HW_SNIF_EL_COMP_VAL_0_CLI_n_ADDR( \
+			n), \
+		val)
+#define HWIO_IPA_LOG_BUF_HW_SNIF_EL_COMP_VAL_0_CLI_n_OUTMI(n, mask, \
+							   val)	\
+	out_dword_masked_ns( \
+		HWIO_IPA_LOG_BUF_HW_SNIF_EL_COMP_VAL_0_CLI_n_ADDR(n), \
+		mask, \
+		val, \
+		HWIO_IPA_LOG_BUF_HW_SNIF_EL_COMP_VAL_0_CLI_n_INI(n))
+#define HWIO_IPA_LOG_BUF_HW_SNIF_EL_COMP_VAL_0_CLI_n_VALUE_BMSK 0xffffffff
+#define HWIO_IPA_LOG_BUF_HW_SNIF_EL_COMP_VAL_0_CLI_n_VALUE_SHFT 0x0
+#define HWIO_IPA_LOG_BUF_HW_SNIF_EL_COMP_VAL_1_CLI_n_ADDR(n) ( \
+		IPA_DEBUG_REG_BASE + 0x0000043c + 0x10 * (n))
+#define HWIO_IPA_LOG_BUF_HW_SNIF_EL_COMP_VAL_1_CLI_n_PHYS(n) ( \
+		IPA_DEBUG_REG_BASE_PHYS + 0x0000043c + 0x10 * (n))
+#define HWIO_IPA_LOG_BUF_HW_SNIF_EL_COMP_VAL_1_CLI_n_OFFS(n) ( \
+		IPA_DEBUG_REG_BASE_OFFS + 0x0000043c + 0x10 * (n))
+#define HWIO_IPA_LOG_BUF_HW_SNIF_EL_COMP_VAL_1_CLI_n_RMSK 0xffffffff
+#define HWIO_IPA_LOG_BUF_HW_SNIF_EL_COMP_VAL_1_CLI_n_MAXn 2
+#define HWIO_IPA_LOG_BUF_HW_SNIF_EL_COMP_VAL_1_CLI_n_ATTR 0x3
+#define HWIO_IPA_LOG_BUF_HW_SNIF_EL_COMP_VAL_1_CLI_n_INI(n) \
+	in_dword_masked(HWIO_IPA_LOG_BUF_HW_SNIF_EL_COMP_VAL_1_CLI_n_ADDR( \
+				n), \
+			HWIO_IPA_LOG_BUF_HW_SNIF_EL_COMP_VAL_1_CLI_n_RMSK)
+#define HWIO_IPA_LOG_BUF_HW_SNIF_EL_COMP_VAL_1_CLI_n_INMI(n, \
+							  mask)	\
+	in_dword_masked( \
+		HWIO_IPA_LOG_BUF_HW_SNIF_EL_COMP_VAL_1_CLI_n_ADDR(n), \
+		mask)
+#define HWIO_IPA_LOG_BUF_HW_SNIF_EL_COMP_VAL_1_CLI_n_OUTI(n, \
+							  val) out_dword( \
+		HWIO_IPA_LOG_BUF_HW_SNIF_EL_COMP_VAL_1_CLI_n_ADDR( \
+			n), \
+		val)
+#define HWIO_IPA_LOG_BUF_HW_SNIF_EL_COMP_VAL_1_CLI_n_OUTMI(n, mask, \
+							   val)	\
+	out_dword_masked_ns( \
+		HWIO_IPA_LOG_BUF_HW_SNIF_EL_COMP_VAL_1_CLI_n_ADDR(n), \
+		mask, \
+		val, \
+		HWIO_IPA_LOG_BUF_HW_SNIF_EL_COMP_VAL_1_CLI_n_INI(n))
+#define HWIO_IPA_LOG_BUF_HW_SNIF_EL_COMP_VAL_1_CLI_n_VALUE_BMSK 0xffffffff
+#define HWIO_IPA_LOG_BUF_HW_SNIF_EL_COMP_VAL_1_CLI_n_VALUE_SHFT 0x0
+#define HWIO_IPA_LOG_BUF_HW_SNIF_EL_COMP_VAL_2_CLI_n_ADDR(n) ( \
+		IPA_DEBUG_REG_BASE + 0x00000440 + 0x10 * (n))
+#define HWIO_IPA_LOG_BUF_HW_SNIF_EL_COMP_VAL_2_CLI_n_PHYS(n) ( \
+		IPA_DEBUG_REG_BASE_PHYS + 0x00000440 + 0x10 * (n))
+#define HWIO_IPA_LOG_BUF_HW_SNIF_EL_COMP_VAL_2_CLI_n_OFFS(n) ( \
+		IPA_DEBUG_REG_BASE_OFFS + 0x00000440 + 0x10 * (n))
+#define HWIO_IPA_LOG_BUF_HW_SNIF_EL_COMP_VAL_2_CLI_n_RMSK 0xffffffff
+#define HWIO_IPA_LOG_BUF_HW_SNIF_EL_COMP_VAL_2_CLI_n_MAXn 2
+#define HWIO_IPA_LOG_BUF_HW_SNIF_EL_COMP_VAL_2_CLI_n_ATTR 0x3
+#define HWIO_IPA_LOG_BUF_HW_SNIF_EL_COMP_VAL_2_CLI_n_INI(n) \
+	in_dword_masked(HWIO_IPA_LOG_BUF_HW_SNIF_EL_COMP_VAL_2_CLI_n_ADDR( \
+				n), \
+			HWIO_IPA_LOG_BUF_HW_SNIF_EL_COMP_VAL_2_CLI_n_RMSK)
+#define HWIO_IPA_LOG_BUF_HW_SNIF_EL_COMP_VAL_2_CLI_n_INMI(n, \
+							  mask)	\
+	in_dword_masked( \
+		HWIO_IPA_LOG_BUF_HW_SNIF_EL_COMP_VAL_2_CLI_n_ADDR(n), \
+		mask)
+#define HWIO_IPA_LOG_BUF_HW_SNIF_EL_COMP_VAL_2_CLI_n_OUTI(n, \
+							  val) out_dword( \
+		HWIO_IPA_LOG_BUF_HW_SNIF_EL_COMP_VAL_2_CLI_n_ADDR( \
+			n), \
+		val)
+#define HWIO_IPA_LOG_BUF_HW_SNIF_EL_COMP_VAL_2_CLI_n_OUTMI(n, mask, \
+							   val)	\
+	out_dword_masked_ns( \
+		HWIO_IPA_LOG_BUF_HW_SNIF_EL_COMP_VAL_2_CLI_n_ADDR(n), \
+		mask, \
+		val, \
+		HWIO_IPA_LOG_BUF_HW_SNIF_EL_COMP_VAL_2_CLI_n_INI(n))
+#define HWIO_IPA_LOG_BUF_HW_SNIF_EL_COMP_VAL_2_CLI_n_VALUE_BMSK 0xffffffff
+#define HWIO_IPA_LOG_BUF_HW_SNIF_EL_COMP_VAL_2_CLI_n_VALUE_SHFT 0x0
+#define HWIO_IPA_LOG_BUF_HW_SNIF_EL_COMP_VAL_3_CLI_n_ADDR(n) ( \
+		IPA_DEBUG_REG_BASE + 0x00000444 + 0x10 * (n))
+#define HWIO_IPA_LOG_BUF_HW_SNIF_EL_COMP_VAL_3_CLI_n_PHYS(n) ( \
+		IPA_DEBUG_REG_BASE_PHYS + 0x00000444 + 0x10 * (n))
+#define HWIO_IPA_LOG_BUF_HW_SNIF_EL_COMP_VAL_3_CLI_n_OFFS(n) ( \
+		IPA_DEBUG_REG_BASE_OFFS + 0x00000444 + 0x10 * (n))
+#define HWIO_IPA_LOG_BUF_HW_SNIF_EL_COMP_VAL_3_CLI_n_RMSK 0xffffffff
+#define HWIO_IPA_LOG_BUF_HW_SNIF_EL_COMP_VAL_3_CLI_n_MAXn 2
+#define HWIO_IPA_LOG_BUF_HW_SNIF_EL_COMP_VAL_3_CLI_n_ATTR 0x3
+#define HWIO_IPA_LOG_BUF_HW_SNIF_EL_COMP_VAL_3_CLI_n_INI(n) \
+	in_dword_masked(HWIO_IPA_LOG_BUF_HW_SNIF_EL_COMP_VAL_3_CLI_n_ADDR( \
+				n), \
+			HWIO_IPA_LOG_BUF_HW_SNIF_EL_COMP_VAL_3_CLI_n_RMSK)
+#define HWIO_IPA_LOG_BUF_HW_SNIF_EL_COMP_VAL_3_CLI_n_INMI(n, \
+							  mask)	\
+	in_dword_masked( \
+		HWIO_IPA_LOG_BUF_HW_SNIF_EL_COMP_VAL_3_CLI_n_ADDR(n), \
+		mask)
+#define HWIO_IPA_LOG_BUF_HW_SNIF_EL_COMP_VAL_3_CLI_n_OUTI(n, \
+							  val) out_dword( \
+		HWIO_IPA_LOG_BUF_HW_SNIF_EL_COMP_VAL_3_CLI_n_ADDR( \
+			n), \
+		val)
+#define HWIO_IPA_LOG_BUF_HW_SNIF_EL_COMP_VAL_3_CLI_n_OUTMI(n, mask, \
+							   val)	\
+	out_dword_masked_ns( \
+		HWIO_IPA_LOG_BUF_HW_SNIF_EL_COMP_VAL_3_CLI_n_ADDR(n), \
+		mask, \
+		val, \
+		HWIO_IPA_LOG_BUF_HW_SNIF_EL_COMP_VAL_3_CLI_n_INI(n))
+#define HWIO_IPA_LOG_BUF_HW_SNIF_EL_COMP_VAL_3_CLI_n_VALUE_BMSK 0xffffffff
+#define HWIO_IPA_LOG_BUF_HW_SNIF_EL_COMP_VAL_3_CLI_n_VALUE_SHFT 0x0
+#define HWIO_IPA_LOG_BUF_HW_SNIF_EL_MASK_VAL_0_CLI_n_ADDR(n) ( \
+		IPA_DEBUG_REG_BASE + 0x00000468 + 0x10 * (n))
+#define HWIO_IPA_LOG_BUF_HW_SNIF_EL_MASK_VAL_0_CLI_n_PHYS(n) ( \
+		IPA_DEBUG_REG_BASE_PHYS + 0x00000468 + 0x10 * (n))
+#define HWIO_IPA_LOG_BUF_HW_SNIF_EL_MASK_VAL_0_CLI_n_OFFS(n) ( \
+		IPA_DEBUG_REG_BASE_OFFS + 0x00000468 + 0x10 * (n))
+#define HWIO_IPA_LOG_BUF_HW_SNIF_EL_MASK_VAL_0_CLI_n_RMSK 0xffffffff
+#define HWIO_IPA_LOG_BUF_HW_SNIF_EL_MASK_VAL_0_CLI_n_MAXn 2
+#define HWIO_IPA_LOG_BUF_HW_SNIF_EL_MASK_VAL_0_CLI_n_ATTR 0x3
+#define HWIO_IPA_LOG_BUF_HW_SNIF_EL_MASK_VAL_0_CLI_n_INI(n) \
+	in_dword_masked(HWIO_IPA_LOG_BUF_HW_SNIF_EL_MASK_VAL_0_CLI_n_ADDR( \
+				n), \
+			HWIO_IPA_LOG_BUF_HW_SNIF_EL_MASK_VAL_0_CLI_n_RMSK)
+#define HWIO_IPA_LOG_BUF_HW_SNIF_EL_MASK_VAL_0_CLI_n_INMI(n, \
+							  mask)	\
+	in_dword_masked( \
+		HWIO_IPA_LOG_BUF_HW_SNIF_EL_MASK_VAL_0_CLI_n_ADDR(n), \
+		mask)
+#define HWIO_IPA_LOG_BUF_HW_SNIF_EL_MASK_VAL_0_CLI_n_OUTI(n, \
+							  val) out_dword( \
+		HWIO_IPA_LOG_BUF_HW_SNIF_EL_MASK_VAL_0_CLI_n_ADDR( \
+			n), \
+		val)
+#define HWIO_IPA_LOG_BUF_HW_SNIF_EL_MASK_VAL_0_CLI_n_OUTMI(n, mask, \
+							   val)	\
+	out_dword_masked_ns( \
+		HWIO_IPA_LOG_BUF_HW_SNIF_EL_MASK_VAL_0_CLI_n_ADDR(n), \
+		mask, \
+		val, \
+		HWIO_IPA_LOG_BUF_HW_SNIF_EL_MASK_VAL_0_CLI_n_INI(n))
+#define HWIO_IPA_LOG_BUF_HW_SNIF_EL_MASK_VAL_0_CLI_n_VALUE_BMSK 0xffffffff
+#define HWIO_IPA_LOG_BUF_HW_SNIF_EL_MASK_VAL_0_CLI_n_VALUE_SHFT 0x0
+#define HWIO_IPA_LOG_BUF_HW_SNIF_EL_MASK_VAL_1_CLI_n_ADDR(n) ( \
+		IPA_DEBUG_REG_BASE + 0x0000046c + 0x10 * (n))
+#define HWIO_IPA_LOG_BUF_HW_SNIF_EL_MASK_VAL_1_CLI_n_PHYS(n) ( \
+		IPA_DEBUG_REG_BASE_PHYS + 0x0000046c + 0x10 * (n))
+#define HWIO_IPA_LOG_BUF_HW_SNIF_EL_MASK_VAL_1_CLI_n_OFFS(n) ( \
+		IPA_DEBUG_REG_BASE_OFFS + 0x0000046c + 0x10 * (n))
+#define HWIO_IPA_LOG_BUF_HW_SNIF_EL_MASK_VAL_1_CLI_n_RMSK 0xffffffff
+#define HWIO_IPA_LOG_BUF_HW_SNIF_EL_MASK_VAL_1_CLI_n_MAXn 2
+#define HWIO_IPA_LOG_BUF_HW_SNIF_EL_MASK_VAL_1_CLI_n_ATTR 0x3
+#define HWIO_IPA_LOG_BUF_HW_SNIF_EL_MASK_VAL_1_CLI_n_INI(n) \
+	in_dword_masked(HWIO_IPA_LOG_BUF_HW_SNIF_EL_MASK_VAL_1_CLI_n_ADDR( \
+				n), \
+			HWIO_IPA_LOG_BUF_HW_SNIF_EL_MASK_VAL_1_CLI_n_RMSK)
+#define HWIO_IPA_LOG_BUF_HW_SNIF_EL_MASK_VAL_1_CLI_n_INMI(n, \
+							  mask)	\
+	in_dword_masked( \
+		HWIO_IPA_LOG_BUF_HW_SNIF_EL_MASK_VAL_1_CLI_n_ADDR(n), \
+		mask)
+#define HWIO_IPA_LOG_BUF_HW_SNIF_EL_MASK_VAL_1_CLI_n_OUTI(n, \
+							  val) out_dword( \
+		HWIO_IPA_LOG_BUF_HW_SNIF_EL_MASK_VAL_1_CLI_n_ADDR( \
+			n), \
+		val)
+#define HWIO_IPA_LOG_BUF_HW_SNIF_EL_MASK_VAL_1_CLI_n_OUTMI(n, mask, \
+							   val)	\
+	out_dword_masked_ns( \
+		HWIO_IPA_LOG_BUF_HW_SNIF_EL_MASK_VAL_1_CLI_n_ADDR(n), \
+		mask, \
+		val, \
+		HWIO_IPA_LOG_BUF_HW_SNIF_EL_MASK_VAL_1_CLI_n_INI(n))
+#define HWIO_IPA_LOG_BUF_HW_SNIF_EL_MASK_VAL_1_CLI_n_VALUE_BMSK 0xffffffff
+#define HWIO_IPA_LOG_BUF_HW_SNIF_EL_MASK_VAL_1_CLI_n_VALUE_SHFT 0x0
+#define HWIO_IPA_LOG_BUF_HW_SNIF_EL_MASK_VAL_2_CLI_n_ADDR(n) ( \
+		IPA_DEBUG_REG_BASE + 0x00000470 + 0x10 * (n))
+#define HWIO_IPA_LOG_BUF_HW_SNIF_EL_MASK_VAL_2_CLI_n_PHYS(n) ( \
+		IPA_DEBUG_REG_BASE_PHYS + 0x00000470 + 0x10 * (n))
+#define HWIO_IPA_LOG_BUF_HW_SNIF_EL_MASK_VAL_2_CLI_n_OFFS(n) ( \
+		IPA_DEBUG_REG_BASE_OFFS + 0x00000470 + 0x10 * (n))
+#define HWIO_IPA_LOG_BUF_HW_SNIF_EL_MASK_VAL_2_CLI_n_RMSK 0xffffffff
+#define HWIO_IPA_LOG_BUF_HW_SNIF_EL_MASK_VAL_2_CLI_n_MAXn 2
+#define HWIO_IPA_LOG_BUF_HW_SNIF_EL_MASK_VAL_2_CLI_n_ATTR 0x3
+#define HWIO_IPA_LOG_BUF_HW_SNIF_EL_MASK_VAL_2_CLI_n_INI(n) \
+	in_dword_masked(HWIO_IPA_LOG_BUF_HW_SNIF_EL_MASK_VAL_2_CLI_n_ADDR( \
+				n), \
+			HWIO_IPA_LOG_BUF_HW_SNIF_EL_MASK_VAL_2_CLI_n_RMSK)
+#define HWIO_IPA_LOG_BUF_HW_SNIF_EL_MASK_VAL_2_CLI_n_INMI(n, \
+							  mask)	\
+	in_dword_masked( \
+		HWIO_IPA_LOG_BUF_HW_SNIF_EL_MASK_VAL_2_CLI_n_ADDR(n), \
+		mask)
+#define HWIO_IPA_LOG_BUF_HW_SNIF_EL_MASK_VAL_2_CLI_n_OUTI(n, \
+							  val) out_dword( \
+		HWIO_IPA_LOG_BUF_HW_SNIF_EL_MASK_VAL_2_CLI_n_ADDR( \
+			n), \
+		val)
+#define HWIO_IPA_LOG_BUF_HW_SNIF_EL_MASK_VAL_2_CLI_n_OUTMI(n, mask, \
+							   val)	\
+	out_dword_masked_ns( \
+		HWIO_IPA_LOG_BUF_HW_SNIF_EL_MASK_VAL_2_CLI_n_ADDR(n), \
+		mask, \
+		val, \
+		HWIO_IPA_LOG_BUF_HW_SNIF_EL_MASK_VAL_2_CLI_n_INI(n))
+#define HWIO_IPA_LOG_BUF_HW_SNIF_EL_MASK_VAL_2_CLI_n_VALUE_BMSK 0xffffffff
+#define HWIO_IPA_LOG_BUF_HW_SNIF_EL_MASK_VAL_2_CLI_n_VALUE_SHFT 0x0
+#define HWIO_IPA_LOG_BUF_HW_SNIF_EL_MASK_VAL_3_CLI_n_ADDR(n) ( \
+		IPA_DEBUG_REG_BASE + 0x00000474 + 0x10 * (n))
+#define HWIO_IPA_LOG_BUF_HW_SNIF_EL_MASK_VAL_3_CLI_n_PHYS(n) ( \
+		IPA_DEBUG_REG_BASE_PHYS + 0x00000474 + 0x10 * (n))
+#define HWIO_IPA_LOG_BUF_HW_SNIF_EL_MASK_VAL_3_CLI_n_OFFS(n) ( \
+		IPA_DEBUG_REG_BASE_OFFS + 0x00000474 + 0x10 * (n))
+#define HWIO_IPA_LOG_BUF_HW_SNIF_EL_MASK_VAL_3_CLI_n_RMSK 0xffffffff
+#define HWIO_IPA_LOG_BUF_HW_SNIF_EL_MASK_VAL_3_CLI_n_MAXn 2
+#define HWIO_IPA_LOG_BUF_HW_SNIF_EL_MASK_VAL_3_CLI_n_ATTR 0x3
+#define HWIO_IPA_LOG_BUF_HW_SNIF_EL_MASK_VAL_3_CLI_n_INI(n) \
+	in_dword_masked(HWIO_IPA_LOG_BUF_HW_SNIF_EL_MASK_VAL_3_CLI_n_ADDR( \
+				n), \
+			HWIO_IPA_LOG_BUF_HW_SNIF_EL_MASK_VAL_3_CLI_n_RMSK)
+#define HWIO_IPA_LOG_BUF_HW_SNIF_EL_MASK_VAL_3_CLI_n_INMI(n, \
+							  mask)	\
+	in_dword_masked( \
+		HWIO_IPA_LOG_BUF_HW_SNIF_EL_MASK_VAL_3_CLI_n_ADDR(n), \
+		mask)
+#define HWIO_IPA_LOG_BUF_HW_SNIF_EL_MASK_VAL_3_CLI_n_OUTI(n, \
+							  val) out_dword( \
+		HWIO_IPA_LOG_BUF_HW_SNIF_EL_MASK_VAL_3_CLI_n_ADDR( \
+			n), \
+		val)
+#define HWIO_IPA_LOG_BUF_HW_SNIF_EL_MASK_VAL_3_CLI_n_OUTMI(n, mask, \
+							   val)	\
+	out_dword_masked_ns( \
+		HWIO_IPA_LOG_BUF_HW_SNIF_EL_MASK_VAL_3_CLI_n_ADDR(n), \
+		mask, \
+		val, \
+		HWIO_IPA_LOG_BUF_HW_SNIF_EL_MASK_VAL_3_CLI_n_INI(n))
+#define HWIO_IPA_LOG_BUF_HW_SNIF_EL_MASK_VAL_3_CLI_n_VALUE_BMSK 0xffffffff
+#define HWIO_IPA_LOG_BUF_HW_SNIF_EL_MASK_VAL_3_CLI_n_VALUE_SHFT 0x0
+#define HWIO_IPA_LOG_BUF_HW_SNIF_LEGACY_RX_ADDR (IPA_DEBUG_REG_BASE + \
+						 0x00000498)
+#define HWIO_IPA_LOG_BUF_HW_SNIF_LEGACY_RX_PHYS (IPA_DEBUG_REG_BASE_PHYS + \
+						 0x00000498)
+#define HWIO_IPA_LOG_BUF_HW_SNIF_LEGACY_RX_OFFS (IPA_DEBUG_REG_BASE_OFFS + \
+						 0x00000498)
+#define HWIO_IPA_LOG_BUF_HW_SNIF_LEGACY_RX_RMSK 0x7
+#define HWIO_IPA_LOG_BUF_HW_SNIF_LEGACY_RX_ATTR 0x3
+#define HWIO_IPA_LOG_BUF_HW_SNIF_LEGACY_RX_IN in_dword_masked( \
+		HWIO_IPA_LOG_BUF_HW_SNIF_LEGACY_RX_ADDR, \
+		HWIO_IPA_LOG_BUF_HW_SNIF_LEGACY_RX_RMSK)
+#define HWIO_IPA_LOG_BUF_HW_SNIF_LEGACY_RX_INM(m) in_dword_masked( \
+		HWIO_IPA_LOG_BUF_HW_SNIF_LEGACY_RX_ADDR, \
+		m)
+#define HWIO_IPA_LOG_BUF_HW_SNIF_LEGACY_RX_OUT(v) out_dword( \
+		HWIO_IPA_LOG_BUF_HW_SNIF_LEGACY_RX_ADDR, \
+		v)
+#define HWIO_IPA_LOG_BUF_HW_SNIF_LEGACY_RX_OUTM(m, v) out_dword_masked_ns( \
+		HWIO_IPA_LOG_BUF_HW_SNIF_LEGACY_RX_ADDR, \
+		m, \
+		v, \
+		HWIO_IPA_LOG_BUF_HW_SNIF_LEGACY_RX_IN)
+#define HWIO_IPA_LOG_BUF_HW_SNIF_LEGACY_RX_SRC_GROUP_SEL_BMSK 0x7
+#define HWIO_IPA_LOG_BUF_HW_SNIF_LEGACY_RX_SRC_GROUP_SEL_SHFT 0x0
+#define HWIO_IPA_ACKMNGR_CMDQ_CMD_ADDR (IPA_DEBUG_REG_BASE + 0x000004a0)
+#define HWIO_IPA_ACKMNGR_CMDQ_CMD_PHYS (IPA_DEBUG_REG_BASE_PHYS + \
+					0x000004a0)
+#define HWIO_IPA_ACKMNGR_CMDQ_CMD_OFFS (IPA_DEBUG_REG_BASE_OFFS + \
+					0x000004a0)
+#define HWIO_IPA_ACKMNGR_CMDQ_CMD_RMSK 0xff
+#define HWIO_IPA_ACKMNGR_CMDQ_CMD_ATTR 0x3
+#define HWIO_IPA_ACKMNGR_CMDQ_CMD_IN in_dword_masked( \
+		HWIO_IPA_ACKMNGR_CMDQ_CMD_ADDR,	\
+		HWIO_IPA_ACKMNGR_CMDQ_CMD_RMSK)
+#define HWIO_IPA_ACKMNGR_CMDQ_CMD_INM(m) in_dword_masked( \
+		HWIO_IPA_ACKMNGR_CMDQ_CMD_ADDR,	\
+		m)
+#define HWIO_IPA_ACKMNGR_CMDQ_CMD_OUT(v) out_dword( \
+		HWIO_IPA_ACKMNGR_CMDQ_CMD_ADDR,	\
+		v)
+#define HWIO_IPA_ACKMNGR_CMDQ_CMD_OUTM(m, v) out_dword_masked_ns( \
+		HWIO_IPA_ACKMNGR_CMDQ_CMD_ADDR,	\
+		m, \
+		v, \
+		HWIO_IPA_ACKMNGR_CMDQ_CMD_IN)
+#define HWIO_IPA_ACKMNGR_CMDQ_CMD_RD_REQ_BMSK 0x80
+#define HWIO_IPA_ACKMNGR_CMDQ_CMD_RD_REQ_SHFT 0x7
+#define HWIO_IPA_ACKMNGR_CMDQ_CMD_CMD_CLIENT_BMSK 0x7c
+#define HWIO_IPA_ACKMNGR_CMDQ_CMD_CMD_CLIENT_SHFT 0x2
+#define HWIO_IPA_ACKMNGR_CMDQ_CMD_POP_CMD_BMSK 0x2
+#define HWIO_IPA_ACKMNGR_CMDQ_CMD_POP_CMD_SHFT 0x1
+#define HWIO_IPA_ACKMNGR_CMDQ_CMD_WRITE_CMD_BMSK 0x1
+#define HWIO_IPA_ACKMNGR_CMDQ_CMD_WRITE_CMD_SHFT 0x0
+#define HWIO_IPA_ACKMNGR_CMDQ_RELEASE_WR_ADDR (IPA_DEBUG_REG_BASE + \
+					       0x000004a4)
+#define HWIO_IPA_ACKMNGR_CMDQ_RELEASE_WR_PHYS (IPA_DEBUG_REG_BASE_PHYS + \
+					       0x000004a4)
+#define HWIO_IPA_ACKMNGR_CMDQ_RELEASE_WR_OFFS (IPA_DEBUG_REG_BASE_OFFS + \
+					       0x000004a4)
+#define HWIO_IPA_ACKMNGR_CMDQ_RELEASE_RD_ADDR (IPA_DEBUG_REG_BASE + \
+					       0x000004a8)
+#define HWIO_IPA_ACKMNGR_CMDQ_RELEASE_RD_PHYS (IPA_DEBUG_REG_BASE_PHYS + \
+					       0x000004a8)
+#define HWIO_IPA_ACKMNGR_CMDQ_RELEASE_RD_OFFS (IPA_DEBUG_REG_BASE_OFFS + \
+					       0x000004a8)
+#define HWIO_IPA_ACKMNGR_CMDQ_CFG_WR_ADDR (IPA_DEBUG_REG_BASE + 0x000004ac)
+#define HWIO_IPA_ACKMNGR_CMDQ_CFG_WR_PHYS (IPA_DEBUG_REG_BASE_PHYS + \
+					   0x000004ac)
+#define HWIO_IPA_ACKMNGR_CMDQ_CFG_WR_OFFS (IPA_DEBUG_REG_BASE_OFFS + \
+					   0x000004ac)
+#define HWIO_IPA_ACKMNGR_CMDQ_CFG_RD_ADDR (IPA_DEBUG_REG_BASE + 0x000004b0)
+#define HWIO_IPA_ACKMNGR_CMDQ_CFG_RD_PHYS (IPA_DEBUG_REG_BASE_PHYS + \
+					   0x000004b0)
+#define HWIO_IPA_ACKMNGR_CMDQ_CFG_RD_OFFS (IPA_DEBUG_REG_BASE_OFFS + \
+					   0x000004b0)
+#define HWIO_IPA_ACKMNGR_CMDQ_DATA_WR_ADDR (IPA_DEBUG_REG_BASE + \
+					    0x000004b4)
+#define HWIO_IPA_ACKMNGR_CMDQ_DATA_WR_PHYS (IPA_DEBUG_REG_BASE_PHYS + \
+					    0x000004b4)
+#define HWIO_IPA_ACKMNGR_CMDQ_DATA_WR_OFFS (IPA_DEBUG_REG_BASE_OFFS + \
+					    0x000004b4)
+#define HWIO_IPA_ACKMNGR_CMDQ_DATA_RD_ADDR (IPA_DEBUG_REG_BASE + \
+					    0x000004b8)
+#define HWIO_IPA_ACKMNGR_CMDQ_DATA_RD_PHYS (IPA_DEBUG_REG_BASE_PHYS + \
+					    0x000004b8)
+#define HWIO_IPA_ACKMNGR_CMDQ_DATA_RD_OFFS (IPA_DEBUG_REG_BASE_OFFS + \
+					    0x000004b8)
+#define HWIO_IPA_ACKMNGR_CMDQ_DATA_RD_RMSK 0x7ffffff
+#define HWIO_IPA_ACKMNGR_CMDQ_DATA_RD_ATTR 0x1
+#define HWIO_IPA_ACKMNGR_CMDQ_DATA_RD_IN in_dword_masked( \
+		HWIO_IPA_ACKMNGR_CMDQ_DATA_RD_ADDR, \
+		HWIO_IPA_ACKMNGR_CMDQ_DATA_RD_RMSK)
+#define HWIO_IPA_ACKMNGR_CMDQ_DATA_RD_INM(m) in_dword_masked( \
+		HWIO_IPA_ACKMNGR_CMDQ_DATA_RD_ADDR, \
+		m)
+#define HWIO_IPA_ACKMNGR_CMDQ_DATA_RD_CMDQ_SRC_ID_VALID_BMSK 0x4000000
+#define HWIO_IPA_ACKMNGR_CMDQ_DATA_RD_CMDQ_SRC_ID_VALID_SHFT 0x1a
+#define HWIO_IPA_ACKMNGR_CMDQ_DATA_RD_CMDQ_SENT_BMSK 0x2000000
+#define HWIO_IPA_ACKMNGR_CMDQ_DATA_RD_CMDQ_SENT_SHFT 0x19
+#define HWIO_IPA_ACKMNGR_CMDQ_DATA_RD_CMDQ_ORIGIN_BMSK 0x1000000
+#define HWIO_IPA_ACKMNGR_CMDQ_DATA_RD_CMDQ_ORIGIN_SHFT 0x18
+#define HWIO_IPA_ACKMNGR_CMDQ_DATA_RD_CMDQ_LENGTH_BMSK 0xffff00
+#define HWIO_IPA_ACKMNGR_CMDQ_DATA_RD_CMDQ_LENGTH_SHFT 0x8
+#define HWIO_IPA_ACKMNGR_CMDQ_DATA_RD_CMDQ_SRC_ID_BMSK 0xff
+#define HWIO_IPA_ACKMNGR_CMDQ_DATA_RD_CMDQ_SRC_ID_SHFT 0x0
+#define HWIO_IPA_ACKMNGR_CMDQ_STATUS_ADDR (IPA_DEBUG_REG_BASE + 0x000004bc)
+#define HWIO_IPA_ACKMNGR_CMDQ_STATUS_PHYS (IPA_DEBUG_REG_BASE_PHYS + \
+					   0x000004bc)
+#define HWIO_IPA_ACKMNGR_CMDQ_STATUS_OFFS (IPA_DEBUG_REG_BASE_OFFS + \
+					   0x000004bc)
+#define HWIO_IPA_ACKMNGR_CMDQ_STATUS_RMSK 0x1ff
+#define HWIO_IPA_ACKMNGR_CMDQ_STATUS_ATTR 0x1
+#define HWIO_IPA_ACKMNGR_CMDQ_STATUS_IN in_dword_masked( \
+		HWIO_IPA_ACKMNGR_CMDQ_STATUS_ADDR, \
+		HWIO_IPA_ACKMNGR_CMDQ_STATUS_RMSK)
+#define HWIO_IPA_ACKMNGR_CMDQ_STATUS_INM(m) in_dword_masked( \
+		HWIO_IPA_ACKMNGR_CMDQ_STATUS_ADDR, \
+		m)
+#define HWIO_IPA_ACKMNGR_CMDQ_STATUS_CMDQ_DEPTH_BMSK 0x1fc
+#define HWIO_IPA_ACKMNGR_CMDQ_STATUS_CMDQ_DEPTH_SHFT 0x2
+#define HWIO_IPA_ACKMNGR_CMDQ_STATUS_CMDQ_FULL_BMSK 0x2
+#define HWIO_IPA_ACKMNGR_CMDQ_STATUS_CMDQ_FULL_SHFT 0x1
+#define HWIO_IPA_ACKMNGR_CMDQ_STATUS_STATUS_BMSK 0x1
+#define HWIO_IPA_ACKMNGR_CMDQ_STATUS_STATUS_SHFT 0x0
+#define HWIO_IPA_ACKMNGR_CMDQ_STATUS_EMPTY_ADDR (IPA_DEBUG_REG_BASE + \
+						 0x000004c0)
+#define HWIO_IPA_ACKMNGR_CMDQ_STATUS_EMPTY_PHYS (IPA_DEBUG_REG_BASE_PHYS + \
+						 0x000004c0)
+#define HWIO_IPA_ACKMNGR_CMDQ_STATUS_EMPTY_OFFS (IPA_DEBUG_REG_BASE_OFFS + \
+						 0x000004c0)
+#define HWIO_IPA_ACKMNGR_CMDQ_STATUS_EMPTY_RMSK 0x1fff
+#define HWIO_IPA_ACKMNGR_CMDQ_STATUS_EMPTY_ATTR 0x1
+#define HWIO_IPA_ACKMNGR_CMDQ_STATUS_EMPTY_IN in_dword_masked( \
+		HWIO_IPA_ACKMNGR_CMDQ_STATUS_EMPTY_ADDR, \
+		HWIO_IPA_ACKMNGR_CMDQ_STATUS_EMPTY_RMSK)
+#define HWIO_IPA_ACKMNGR_CMDQ_STATUS_EMPTY_INM(m) in_dword_masked( \
+		HWIO_IPA_ACKMNGR_CMDQ_STATUS_EMPTY_ADDR, \
+		m)
+#define HWIO_IPA_ACKMNGR_CMDQ_STATUS_EMPTY_CMDQ_EMPTY_BMSK 0x1fff
+#define HWIO_IPA_ACKMNGR_CMDQ_STATUS_EMPTY_CMDQ_EMPTY_SHFT 0x0
+#define HWIO_IPA_ACKMNGR_CMDQ_COUNT_ADDR (IPA_DEBUG_REG_BASE + 0x000004c4)
+#define HWIO_IPA_ACKMNGR_CMDQ_COUNT_PHYS (IPA_DEBUG_REG_BASE_PHYS + \
+					  0x000004c4)
+#define HWIO_IPA_ACKMNGR_CMDQ_COUNT_OFFS (IPA_DEBUG_REG_BASE_OFFS + \
+					  0x000004c4)
+#define HWIO_IPA_ACKMNGR_CMDQ_COUNT_RMSK 0x7f
+#define HWIO_IPA_ACKMNGR_CMDQ_COUNT_ATTR 0x1
+#define HWIO_IPA_ACKMNGR_CMDQ_COUNT_IN in_dword_masked(	\
+		HWIO_IPA_ACKMNGR_CMDQ_COUNT_ADDR, \
+		HWIO_IPA_ACKMNGR_CMDQ_COUNT_RMSK)
+#define HWIO_IPA_ACKMNGR_CMDQ_COUNT_INM(m) in_dword_masked( \
+		HWIO_IPA_ACKMNGR_CMDQ_COUNT_ADDR, \
+		m)
+#define HWIO_IPA_ACKMNGR_CMDQ_COUNT_FIFO_COUNT_BMSK 0x7f
+#define HWIO_IPA_ACKMNGR_CMDQ_COUNT_FIFO_COUNT_SHFT 0x0
+#define HWIO_IPA_GSI_FIFO_STATUS_CTRL_ADDR (IPA_DEBUG_REG_BASE + \
+					    0x000004c8)
+#define HWIO_IPA_GSI_FIFO_STATUS_CTRL_PHYS (IPA_DEBUG_REG_BASE_PHYS + \
+					    0x000004c8)
+#define HWIO_IPA_GSI_FIFO_STATUS_CTRL_OFFS (IPA_DEBUG_REG_BASE_OFFS + \
+					    0x000004c8)
+#define HWIO_IPA_GSI_FIFO_STATUS_CTRL_RMSK 0x3f
+#define HWIO_IPA_GSI_FIFO_STATUS_CTRL_ATTR 0x3
+#define HWIO_IPA_GSI_FIFO_STATUS_CTRL_IN in_dword_masked( \
+		HWIO_IPA_GSI_FIFO_STATUS_CTRL_ADDR, \
+		HWIO_IPA_GSI_FIFO_STATUS_CTRL_RMSK)
+#define HWIO_IPA_GSI_FIFO_STATUS_CTRL_INM(m) in_dword_masked( \
+		HWIO_IPA_GSI_FIFO_STATUS_CTRL_ADDR, \
+		m)
+#define HWIO_IPA_GSI_FIFO_STATUS_CTRL_OUT(v) out_dword(	\
+		HWIO_IPA_GSI_FIFO_STATUS_CTRL_ADDR, \
+		v)
+#define HWIO_IPA_GSI_FIFO_STATUS_CTRL_OUTM(m, v) out_dword_masked_ns( \
+		HWIO_IPA_GSI_FIFO_STATUS_CTRL_ADDR, \
+		m, \
+		v, \
+		HWIO_IPA_GSI_FIFO_STATUS_CTRL_IN)
+#define HWIO_IPA_GSI_FIFO_STATUS_CTRL_IPA_GSI_FIFO_STATUS_EN_BMSK 0x20
+#define HWIO_IPA_GSI_FIFO_STATUS_CTRL_IPA_GSI_FIFO_STATUS_EN_SHFT 0x5
+#define HWIO_IPA_GSI_FIFO_STATUS_CTRL_IPA_GSI_FIFO_STATUS_PORT_SEL_BMSK	\
+	0x1f
+#define HWIO_IPA_GSI_FIFO_STATUS_CTRL_IPA_GSI_FIFO_STATUS_PORT_SEL_SHFT	\
+	0x0
+#define HWIO_IPA_GSI_TLV_FIFO_STATUS_ADDR (IPA_DEBUG_REG_BASE + 0x000004cc)
+#define HWIO_IPA_GSI_TLV_FIFO_STATUS_PHYS (IPA_DEBUG_REG_BASE_PHYS + \
+					   0x000004cc)
+#define HWIO_IPA_GSI_TLV_FIFO_STATUS_OFFS (IPA_DEBUG_REG_BASE_OFFS + \
+					   0x000004cc)
+#define HWIO_IPA_GSI_TLV_FIFO_STATUS_RMSK 0x7fffffff
+#define HWIO_IPA_GSI_TLV_FIFO_STATUS_ATTR 0x1
+#define HWIO_IPA_GSI_TLV_FIFO_STATUS_IN in_dword_masked( \
+		HWIO_IPA_GSI_TLV_FIFO_STATUS_ADDR, \
+		HWIO_IPA_GSI_TLV_FIFO_STATUS_RMSK)
+#define HWIO_IPA_GSI_TLV_FIFO_STATUS_INM(m) in_dword_masked( \
+		HWIO_IPA_GSI_TLV_FIFO_STATUS_ADDR, \
+		m)
+#define HWIO_IPA_GSI_TLV_FIFO_STATUS_FIFO_HEAD_IS_BUBBLE_BMSK 0x40000000
+#define HWIO_IPA_GSI_TLV_FIFO_STATUS_FIFO_HEAD_IS_BUBBLE_SHFT 0x1e
+#define HWIO_IPA_GSI_TLV_FIFO_STATUS_FIFO_FULL_PUB_BMSK 0x20000000
+#define HWIO_IPA_GSI_TLV_FIFO_STATUS_FIFO_FULL_PUB_SHFT 0x1d
+#define HWIO_IPA_GSI_TLV_FIFO_STATUS_FIFO_ALMOST_FULL_PUB_BMSK 0x10000000
+#define HWIO_IPA_GSI_TLV_FIFO_STATUS_FIFO_ALMOST_FULL_PUB_SHFT 0x1c
+#define HWIO_IPA_GSI_TLV_FIFO_STATUS_FIFO_FULL_BMSK 0x8000000
+#define HWIO_IPA_GSI_TLV_FIFO_STATUS_FIFO_FULL_SHFT 0x1b
+#define HWIO_IPA_GSI_TLV_FIFO_STATUS_FIFO_ALMOST_FULL_BMSK 0x4000000
+#define HWIO_IPA_GSI_TLV_FIFO_STATUS_FIFO_ALMOST_FULL_SHFT 0x1a
+#define HWIO_IPA_GSI_TLV_FIFO_STATUS_FIFO_EMPTY_PUB_BMSK 0x2000000
+#define HWIO_IPA_GSI_TLV_FIFO_STATUS_FIFO_EMPTY_PUB_SHFT 0x19
+#define HWIO_IPA_GSI_TLV_FIFO_STATUS_FIFO_EMPTY_BMSK 0x1000000
+#define HWIO_IPA_GSI_TLV_FIFO_STATUS_FIFO_EMPTY_SHFT 0x18
+#define HWIO_IPA_GSI_TLV_FIFO_STATUS_FIFO_RD_PUB_PTR_BMSK 0xff0000
+#define HWIO_IPA_GSI_TLV_FIFO_STATUS_FIFO_RD_PUB_PTR_SHFT 0x10
+#define HWIO_IPA_GSI_TLV_FIFO_STATUS_FIFO_RD_PTR_BMSK 0xff00
+#define HWIO_IPA_GSI_TLV_FIFO_STATUS_FIFO_RD_PTR_SHFT 0x8
+#define HWIO_IPA_GSI_TLV_FIFO_STATUS_FIFO_WR_PTR_BMSK 0xff
+#define HWIO_IPA_GSI_TLV_FIFO_STATUS_FIFO_WR_PTR_SHFT 0x0
+#define HWIO_IPA_GSI_AOS_FIFO_STATUS_ADDR (IPA_DEBUG_REG_BASE + 0x000004d0)
+#define HWIO_IPA_GSI_AOS_FIFO_STATUS_PHYS (IPA_DEBUG_REG_BASE_PHYS + \
+					   0x000004d0)
+#define HWIO_IPA_GSI_AOS_FIFO_STATUS_OFFS (IPA_DEBUG_REG_BASE_OFFS + \
+					   0x000004d0)
+#define HWIO_IPA_GSI_AOS_FIFO_STATUS_RMSK 0x7fffffff
+#define HWIO_IPA_GSI_AOS_FIFO_STATUS_ATTR 0x1
+#define HWIO_IPA_GSI_AOS_FIFO_STATUS_IN in_dword_masked( \
+		HWIO_IPA_GSI_AOS_FIFO_STATUS_ADDR, \
+		HWIO_IPA_GSI_AOS_FIFO_STATUS_RMSK)
+#define HWIO_IPA_GSI_AOS_FIFO_STATUS_INM(m) in_dword_masked( \
+		HWIO_IPA_GSI_AOS_FIFO_STATUS_ADDR, \
+		m)
+#define HWIO_IPA_GSI_AOS_FIFO_STATUS_FIFO_HEAD_IS_BUBBLE_BMSK 0x40000000
+#define HWIO_IPA_GSI_AOS_FIFO_STATUS_FIFO_HEAD_IS_BUBBLE_SHFT 0x1e
+#define HWIO_IPA_GSI_AOS_FIFO_STATUS_FIFO_FULL_PUB_BMSK 0x20000000
+#define HWIO_IPA_GSI_AOS_FIFO_STATUS_FIFO_FULL_PUB_SHFT 0x1d
+#define HWIO_IPA_GSI_AOS_FIFO_STATUS_FIFO_ALMOST_FULL_PUB_BMSK 0x10000000
+#define HWIO_IPA_GSI_AOS_FIFO_STATUS_FIFO_ALMOST_FULL_PUB_SHFT 0x1c
+#define HWIO_IPA_GSI_AOS_FIFO_STATUS_FIFO_FULL_BMSK 0x8000000
+#define HWIO_IPA_GSI_AOS_FIFO_STATUS_FIFO_FULL_SHFT 0x1b
+#define HWIO_IPA_GSI_AOS_FIFO_STATUS_FIFO_ALMOST_FULL_BMSK 0x4000000
+#define HWIO_IPA_GSI_AOS_FIFO_STATUS_FIFO_ALMOST_FULL_SHFT 0x1a
+#define HWIO_IPA_GSI_AOS_FIFO_STATUS_FIFO_EMPTY_PUB_BMSK 0x2000000
+#define HWIO_IPA_GSI_AOS_FIFO_STATUS_FIFO_EMPTY_PUB_SHFT 0x19
+#define HWIO_IPA_GSI_AOS_FIFO_STATUS_FIFO_EMPTY_BMSK 0x1000000
+#define HWIO_IPA_GSI_AOS_FIFO_STATUS_FIFO_EMPTY_SHFT 0x18
+#define HWIO_IPA_GSI_AOS_FIFO_STATUS_FIFO_RD_PUB_PTR_BMSK 0xff0000
+#define HWIO_IPA_GSI_AOS_FIFO_STATUS_FIFO_RD_PUB_PTR_SHFT 0x10
+#define HWIO_IPA_GSI_AOS_FIFO_STATUS_FIFO_RD_PTR_BMSK 0xff00
+#define HWIO_IPA_GSI_AOS_FIFO_STATUS_FIFO_RD_PTR_SHFT 0x8
+#define HWIO_IPA_GSI_AOS_FIFO_STATUS_FIFO_WR_PTR_BMSK 0xff
+#define HWIO_IPA_GSI_AOS_FIFO_STATUS_FIFO_WR_PTR_SHFT 0x0
+#define HWIO_IPA_ENDP_GSI_CONS_BYTES_TLV_ADDR (IPA_DEBUG_REG_BASE + \
+					       0x000004d4)
+#define HWIO_IPA_ENDP_GSI_CONS_BYTES_TLV_PHYS (IPA_DEBUG_REG_BASE_PHYS + \
+					       0x000004d4)
+#define HWIO_IPA_ENDP_GSI_CONS_BYTES_TLV_OFFS (IPA_DEBUG_REG_BASE_OFFS + \
+					       0x000004d4)
+#define HWIO_IPA_ENDP_GSI_CONS_BYTES_AOS_ADDR (IPA_DEBUG_REG_BASE + \
+					       0x00000548)
+#define HWIO_IPA_ENDP_GSI_CONS_BYTES_AOS_PHYS (IPA_DEBUG_REG_BASE_PHYS + \
+					       0x00000548)
+#define HWIO_IPA_ENDP_GSI_CONS_BYTES_AOS_OFFS (IPA_DEBUG_REG_BASE_OFFS + \
+					       0x00000548)
+#define HWIO_IPA_LOG_BUF_SW_COMP_VAL_0_ADDR (IPA_DEBUG_REG_BASE + \
+					     0x000004d8)
+#define HWIO_IPA_LOG_BUF_SW_COMP_VAL_0_PHYS (IPA_DEBUG_REG_BASE_PHYS + \
+					     0x000004d8)
+#define HWIO_IPA_LOG_BUF_SW_COMP_VAL_0_OFFS (IPA_DEBUG_REG_BASE_OFFS + \
+					     0x000004d8)
+#define HWIO_IPA_LOG_BUF_SW_COMP_VAL_0_RMSK 0xffffffff
+#define HWIO_IPA_LOG_BUF_SW_COMP_VAL_0_ATTR 0x3
+#define HWIO_IPA_LOG_BUF_SW_COMP_VAL_0_IN in_dword_masked( \
+		HWIO_IPA_LOG_BUF_SW_COMP_VAL_0_ADDR, \
+		HWIO_IPA_LOG_BUF_SW_COMP_VAL_0_RMSK)
+#define HWIO_IPA_LOG_BUF_SW_COMP_VAL_0_INM(m) in_dword_masked( \
+		HWIO_IPA_LOG_BUF_SW_COMP_VAL_0_ADDR, \
+		m)
+#define HWIO_IPA_LOG_BUF_SW_COMP_VAL_0_OUT(v) out_dword( \
+		HWIO_IPA_LOG_BUF_SW_COMP_VAL_0_ADDR, \
+		v)
+#define HWIO_IPA_LOG_BUF_SW_COMP_VAL_0_OUTM(m, v) out_dword_masked_ns( \
+		HWIO_IPA_LOG_BUF_SW_COMP_VAL_0_ADDR, \
+		m, \
+		v, \
+		HWIO_IPA_LOG_BUF_SW_COMP_VAL_0_IN)
+#define HWIO_IPA_LOG_BUF_SW_COMP_VAL_0_VALUE_BMSK 0xffffffff
+#define HWIO_IPA_LOG_BUF_SW_COMP_VAL_0_VALUE_SHFT 0x0
+#define HWIO_IPA_LOG_BUF_SW_COMP_VAL_1_ADDR (IPA_DEBUG_REG_BASE + \
+					     0x000004dc)
+#define HWIO_IPA_LOG_BUF_SW_COMP_VAL_1_PHYS (IPA_DEBUG_REG_BASE_PHYS + \
+					     0x000004dc)
+#define HWIO_IPA_LOG_BUF_SW_COMP_VAL_1_OFFS (IPA_DEBUG_REG_BASE_OFFS + \
+					     0x000004dc)
+#define HWIO_IPA_LOG_BUF_SW_COMP_VAL_1_RMSK 0xffffffff
+#define HWIO_IPA_LOG_BUF_SW_COMP_VAL_1_ATTR 0x3
+#define HWIO_IPA_LOG_BUF_SW_COMP_VAL_1_IN in_dword_masked( \
+		HWIO_IPA_LOG_BUF_SW_COMP_VAL_1_ADDR, \
+		HWIO_IPA_LOG_BUF_SW_COMP_VAL_1_RMSK)
+#define HWIO_IPA_LOG_BUF_SW_COMP_VAL_1_INM(m) in_dword_masked( \
+		HWIO_IPA_LOG_BUF_SW_COMP_VAL_1_ADDR, \
+		m)
+#define HWIO_IPA_LOG_BUF_SW_COMP_VAL_1_OUT(v) out_dword( \
+		HWIO_IPA_LOG_BUF_SW_COMP_VAL_1_ADDR, \
+		v)
+#define HWIO_IPA_LOG_BUF_SW_COMP_VAL_1_OUTM(m, v) out_dword_masked_ns( \
+		HWIO_IPA_LOG_BUF_SW_COMP_VAL_1_ADDR, \
+		m, \
+		v, \
+		HWIO_IPA_LOG_BUF_SW_COMP_VAL_1_IN)
+#define HWIO_IPA_LOG_BUF_SW_COMP_VAL_1_VALUE_BMSK 0xffffffff
+#define HWIO_IPA_LOG_BUF_SW_COMP_VAL_1_VALUE_SHFT 0x0
+#define HWIO_IPA_LOG_BUF_SW_COMP_VAL_2_ADDR (IPA_DEBUG_REG_BASE + \
+					     0x000004e0)
+#define HWIO_IPA_LOG_BUF_SW_COMP_VAL_2_PHYS (IPA_DEBUG_REG_BASE_PHYS + \
+					     0x000004e0)
+#define HWIO_IPA_LOG_BUF_SW_COMP_VAL_2_OFFS (IPA_DEBUG_REG_BASE_OFFS + \
+					     0x000004e0)
+#define HWIO_IPA_LOG_BUF_SW_COMP_VAL_2_RMSK 0xffffffff
+#define HWIO_IPA_LOG_BUF_SW_COMP_VAL_2_ATTR 0x3
+#define HWIO_IPA_LOG_BUF_SW_COMP_VAL_2_IN in_dword_masked( \
+		HWIO_IPA_LOG_BUF_SW_COMP_VAL_2_ADDR, \
+		HWIO_IPA_LOG_BUF_SW_COMP_VAL_2_RMSK)
+#define HWIO_IPA_LOG_BUF_SW_COMP_VAL_2_INM(m) in_dword_masked( \
+		HWIO_IPA_LOG_BUF_SW_COMP_VAL_2_ADDR, \
+		m)
+#define HWIO_IPA_LOG_BUF_SW_COMP_VAL_2_OUT(v) out_dword( \
+		HWIO_IPA_LOG_BUF_SW_COMP_VAL_2_ADDR, \
+		v)
+#define HWIO_IPA_LOG_BUF_SW_COMP_VAL_2_OUTM(m, v) out_dword_masked_ns( \
+		HWIO_IPA_LOG_BUF_SW_COMP_VAL_2_ADDR, \
+		m, \
+		v, \
+		HWIO_IPA_LOG_BUF_SW_COMP_VAL_2_IN)
+#define HWIO_IPA_LOG_BUF_SW_COMP_VAL_2_VALUE_BMSK 0xffffffff
+#define HWIO_IPA_LOG_BUF_SW_COMP_VAL_2_VALUE_SHFT 0x0
+#define HWIO_IPA_LOG_BUF_SW_COMP_VAL_3_ADDR (IPA_DEBUG_REG_BASE + \
+					     0x000004e4)
+#define HWIO_IPA_LOG_BUF_SW_COMP_VAL_3_PHYS (IPA_DEBUG_REG_BASE_PHYS + \
+					     0x000004e4)
+#define HWIO_IPA_LOG_BUF_SW_COMP_VAL_3_OFFS (IPA_DEBUG_REG_BASE_OFFS + \
+					     0x000004e4)
+#define HWIO_IPA_LOG_BUF_SW_COMP_VAL_3_RMSK 0xffffffff
+#define HWIO_IPA_LOG_BUF_SW_COMP_VAL_3_ATTR 0x3
+#define HWIO_IPA_LOG_BUF_SW_COMP_VAL_3_IN in_dword_masked( \
+		HWIO_IPA_LOG_BUF_SW_COMP_VAL_3_ADDR, \
+		HWIO_IPA_LOG_BUF_SW_COMP_VAL_3_RMSK)
+#define HWIO_IPA_LOG_BUF_SW_COMP_VAL_3_INM(m) in_dword_masked( \
+		HWIO_IPA_LOG_BUF_SW_COMP_VAL_3_ADDR, \
+		m)
+#define HWIO_IPA_LOG_BUF_SW_COMP_VAL_3_OUT(v) out_dword( \
+		HWIO_IPA_LOG_BUF_SW_COMP_VAL_3_ADDR, \
+		v)
+#define HWIO_IPA_LOG_BUF_SW_COMP_VAL_3_OUTM(m, v) out_dword_masked_ns( \
+		HWIO_IPA_LOG_BUF_SW_COMP_VAL_3_ADDR, \
+		m, \
+		v, \
+		HWIO_IPA_LOG_BUF_SW_COMP_VAL_3_IN)
+#define HWIO_IPA_LOG_BUF_SW_COMP_VAL_3_VALUE_BMSK 0xffffffff
+#define HWIO_IPA_LOG_BUF_SW_COMP_VAL_3_VALUE_SHFT 0x0
+#define HWIO_IPA_LOG_BUF_SW_COMP_VAL_4_ADDR (IPA_DEBUG_REG_BASE + \
+					     0x000004e8)
+#define HWIO_IPA_LOG_BUF_SW_COMP_VAL_4_PHYS (IPA_DEBUG_REG_BASE_PHYS + \
+					     0x000004e8)
+#define HWIO_IPA_LOG_BUF_SW_COMP_VAL_4_OFFS (IPA_DEBUG_REG_BASE_OFFS + \
+					     0x000004e8)
+#define HWIO_IPA_LOG_BUF_SW_COMP_VAL_4_RMSK 0xffffffff
+#define HWIO_IPA_LOG_BUF_SW_COMP_VAL_4_ATTR 0x3
+#define HWIO_IPA_LOG_BUF_SW_COMP_VAL_4_IN in_dword_masked( \
+		HWIO_IPA_LOG_BUF_SW_COMP_VAL_4_ADDR, \
+		HWIO_IPA_LOG_BUF_SW_COMP_VAL_4_RMSK)
+#define HWIO_IPA_LOG_BUF_SW_COMP_VAL_4_INM(m) in_dword_masked( \
+		HWIO_IPA_LOG_BUF_SW_COMP_VAL_4_ADDR, \
+		m)
+#define HWIO_IPA_LOG_BUF_SW_COMP_VAL_4_OUT(v) out_dword( \
+		HWIO_IPA_LOG_BUF_SW_COMP_VAL_4_ADDR, \
+		v)
+#define HWIO_IPA_LOG_BUF_SW_COMP_VAL_4_OUTM(m, v) out_dword_masked_ns( \
+		HWIO_IPA_LOG_BUF_SW_COMP_VAL_4_ADDR, \
+		m, \
+		v, \
+		HWIO_IPA_LOG_BUF_SW_COMP_VAL_4_IN)
+#define HWIO_IPA_LOG_BUF_SW_COMP_VAL_4_VALUE_BMSK 0xffffffff
+#define HWIO_IPA_LOG_BUF_SW_COMP_VAL_4_VALUE_SHFT 0x0
+#define HWIO_IPA_LOG_BUF_SW_COMP_VAL_5_ADDR (IPA_DEBUG_REG_BASE + \
+					     0x000004ec)
+#define HWIO_IPA_LOG_BUF_SW_COMP_VAL_5_PHYS (IPA_DEBUG_REG_BASE_PHYS + \
+					     0x000004ec)
+#define HWIO_IPA_LOG_BUF_SW_COMP_VAL_5_OFFS (IPA_DEBUG_REG_BASE_OFFS + \
+					     0x000004ec)
+#define HWIO_IPA_LOG_BUF_SW_COMP_VAL_5_RMSK 0xffffffff
+#define HWIO_IPA_LOG_BUF_SW_COMP_VAL_5_ATTR 0x3
+#define HWIO_IPA_LOG_BUF_SW_COMP_VAL_5_IN in_dword_masked( \
+		HWIO_IPA_LOG_BUF_SW_COMP_VAL_5_ADDR, \
+		HWIO_IPA_LOG_BUF_SW_COMP_VAL_5_RMSK)
+#define HWIO_IPA_LOG_BUF_SW_COMP_VAL_5_INM(m) in_dword_masked( \
+		HWIO_IPA_LOG_BUF_SW_COMP_VAL_5_ADDR, \
+		m)
+#define HWIO_IPA_LOG_BUF_SW_COMP_VAL_5_OUT(v) out_dword( \
+		HWIO_IPA_LOG_BUF_SW_COMP_VAL_5_ADDR, \
+		v)
+#define HWIO_IPA_LOG_BUF_SW_COMP_VAL_5_OUTM(m, v) out_dword_masked_ns( \
+		HWIO_IPA_LOG_BUF_SW_COMP_VAL_5_ADDR, \
+		m, \
+		v, \
+		HWIO_IPA_LOG_BUF_SW_COMP_VAL_5_IN)
+#define HWIO_IPA_LOG_BUF_SW_COMP_VAL_5_VALUE_BMSK 0xffffffff
+#define HWIO_IPA_LOG_BUF_SW_COMP_VAL_5_VALUE_SHFT 0x0
+#define HWIO_IPA_LOG_BUF_SW_COMP_VAL_6_ADDR (IPA_DEBUG_REG_BASE + \
+					     0x000004f0)
+#define HWIO_IPA_LOG_BUF_SW_COMP_VAL_6_PHYS (IPA_DEBUG_REG_BASE_PHYS + \
+					     0x000004f0)
+#define HWIO_IPA_LOG_BUF_SW_COMP_VAL_6_OFFS (IPA_DEBUG_REG_BASE_OFFS + \
+					     0x000004f0)
+#define HWIO_IPA_LOG_BUF_SW_COMP_VAL_6_RMSK 0xffffffff
+#define HWIO_IPA_LOG_BUF_SW_COMP_VAL_6_ATTR 0x3
+#define HWIO_IPA_LOG_BUF_SW_COMP_VAL_6_IN in_dword_masked( \
+		HWIO_IPA_LOG_BUF_SW_COMP_VAL_6_ADDR, \
+		HWIO_IPA_LOG_BUF_SW_COMP_VAL_6_RMSK)
+#define HWIO_IPA_LOG_BUF_SW_COMP_VAL_6_INM(m) in_dword_masked( \
+		HWIO_IPA_LOG_BUF_SW_COMP_VAL_6_ADDR, \
+		m)
+#define HWIO_IPA_LOG_BUF_SW_COMP_VAL_6_OUT(v) out_dword( \
+		HWIO_IPA_LOG_BUF_SW_COMP_VAL_6_ADDR, \
+		v)
+#define HWIO_IPA_LOG_BUF_SW_COMP_VAL_6_OUTM(m, v) out_dword_masked_ns( \
+		HWIO_IPA_LOG_BUF_SW_COMP_VAL_6_ADDR, \
+		m, \
+		v, \
+		HWIO_IPA_LOG_BUF_SW_COMP_VAL_6_IN)
+#define HWIO_IPA_LOG_BUF_SW_COMP_VAL_6_VALUE_BMSK 0xffffffff
+#define HWIO_IPA_LOG_BUF_SW_COMP_VAL_6_VALUE_SHFT 0x0
+#define HWIO_IPA_LOG_BUF_SW_COMP_VAL_7_ADDR (IPA_DEBUG_REG_BASE + \
+					     0x000004f4)
+#define HWIO_IPA_LOG_BUF_SW_COMP_VAL_7_PHYS (IPA_DEBUG_REG_BASE_PHYS + \
+					     0x000004f4)
+#define HWIO_IPA_LOG_BUF_SW_COMP_VAL_7_OFFS (IPA_DEBUG_REG_BASE_OFFS + \
+					     0x000004f4)
+#define HWIO_IPA_LOG_BUF_SW_COMP_VAL_7_RMSK 0xffffffff
+#define HWIO_IPA_LOG_BUF_SW_COMP_VAL_7_ATTR 0x3
+#define HWIO_IPA_LOG_BUF_SW_COMP_VAL_7_IN in_dword_masked( \
+		HWIO_IPA_LOG_BUF_SW_COMP_VAL_7_ADDR, \
+		HWIO_IPA_LOG_BUF_SW_COMP_VAL_7_RMSK)
+#define HWIO_IPA_LOG_BUF_SW_COMP_VAL_7_INM(m) in_dword_masked( \
+		HWIO_IPA_LOG_BUF_SW_COMP_VAL_7_ADDR, \
+		m)
+#define HWIO_IPA_LOG_BUF_SW_COMP_VAL_7_OUT(v) out_dword( \
+		HWIO_IPA_LOG_BUF_SW_COMP_VAL_7_ADDR, \
+		v)
+#define HWIO_IPA_LOG_BUF_SW_COMP_VAL_7_OUTM(m, v) out_dword_masked_ns( \
+		HWIO_IPA_LOG_BUF_SW_COMP_VAL_7_ADDR, \
+		m, \
+		v, \
+		HWIO_IPA_LOG_BUF_SW_COMP_VAL_7_IN)
+#define HWIO_IPA_LOG_BUF_SW_COMP_VAL_7_VALUE_BMSK 0xffffffff
+#define HWIO_IPA_LOG_BUF_SW_COMP_VAL_7_VALUE_SHFT 0x0
+#define HWIO_IPA_LOG_BUF_SW_MASK_VAL_0_ADDR (IPA_DEBUG_REG_BASE + \
+					     0x000004f8)
+#define HWIO_IPA_LOG_BUF_SW_MASK_VAL_0_PHYS (IPA_DEBUG_REG_BASE_PHYS + \
+					     0x000004f8)
+#define HWIO_IPA_LOG_BUF_SW_MASK_VAL_0_OFFS (IPA_DEBUG_REG_BASE_OFFS + \
+					     0x000004f8)
+#define HWIO_IPA_LOG_BUF_SW_MASK_VAL_0_RMSK 0xffffffff
+#define HWIO_IPA_LOG_BUF_SW_MASK_VAL_0_ATTR 0x3
+#define HWIO_IPA_LOG_BUF_SW_MASK_VAL_0_IN in_dword_masked( \
+		HWIO_IPA_LOG_BUF_SW_MASK_VAL_0_ADDR, \
+		HWIO_IPA_LOG_BUF_SW_MASK_VAL_0_RMSK)
+#define HWIO_IPA_LOG_BUF_SW_MASK_VAL_0_INM(m) in_dword_masked( \
+		HWIO_IPA_LOG_BUF_SW_MASK_VAL_0_ADDR, \
+		m)
+#define HWIO_IPA_LOG_BUF_SW_MASK_VAL_0_OUT(v) out_dword( \
+		HWIO_IPA_LOG_BUF_SW_MASK_VAL_0_ADDR, \
+		v)
+#define HWIO_IPA_LOG_BUF_SW_MASK_VAL_0_OUTM(m, v) out_dword_masked_ns( \
+		HWIO_IPA_LOG_BUF_SW_MASK_VAL_0_ADDR, \
+		m, \
+		v, \
+		HWIO_IPA_LOG_BUF_SW_MASK_VAL_0_IN)
+#define HWIO_IPA_LOG_BUF_SW_MASK_VAL_0_VALUE_BMSK 0xffffffff
+#define HWIO_IPA_LOG_BUF_SW_MASK_VAL_0_VALUE_SHFT 0x0
+#define HWIO_IPA_LOG_BUF_SW_MASK_VAL_1_ADDR (IPA_DEBUG_REG_BASE + \
+					     0x000004fc)
+#define HWIO_IPA_LOG_BUF_SW_MASK_VAL_1_PHYS (IPA_DEBUG_REG_BASE_PHYS + \
+					     0x000004fc)
+#define HWIO_IPA_LOG_BUF_SW_MASK_VAL_1_OFFS (IPA_DEBUG_REG_BASE_OFFS + \
+					     0x000004fc)
+#define HWIO_IPA_LOG_BUF_SW_MASK_VAL_1_RMSK 0xffffffff
+#define HWIO_IPA_LOG_BUF_SW_MASK_VAL_1_ATTR 0x3
+#define HWIO_IPA_LOG_BUF_SW_MASK_VAL_1_IN in_dword_masked( \
+		HWIO_IPA_LOG_BUF_SW_MASK_VAL_1_ADDR, \
+		HWIO_IPA_LOG_BUF_SW_MASK_VAL_1_RMSK)
+#define HWIO_IPA_LOG_BUF_SW_MASK_VAL_1_INM(m) in_dword_masked( \
+		HWIO_IPA_LOG_BUF_SW_MASK_VAL_1_ADDR, \
+		m)
+#define HWIO_IPA_LOG_BUF_SW_MASK_VAL_1_OUT(v) out_dword( \
+		HWIO_IPA_LOG_BUF_SW_MASK_VAL_1_ADDR, \
+		v)
+#define HWIO_IPA_LOG_BUF_SW_MASK_VAL_1_OUTM(m, v) out_dword_masked_ns( \
+		HWIO_IPA_LOG_BUF_SW_MASK_VAL_1_ADDR, \
+		m, \
+		v, \
+		HWIO_IPA_LOG_BUF_SW_MASK_VAL_1_IN)
+#define HWIO_IPA_LOG_BUF_SW_MASK_VAL_1_VALUE_BMSK 0xffffffff
+#define HWIO_IPA_LOG_BUF_SW_MASK_VAL_1_VALUE_SHFT 0x0
+#define HWIO_IPA_LOG_BUF_SW_MASK_VAL_2_ADDR (IPA_DEBUG_REG_BASE + \
+					     0x00000500)
+#define HWIO_IPA_LOG_BUF_SW_MASK_VAL_2_PHYS (IPA_DEBUG_REG_BASE_PHYS + \
+					     0x00000500)
+#define HWIO_IPA_LOG_BUF_SW_MASK_VAL_2_OFFS (IPA_DEBUG_REG_BASE_OFFS + \
+					     0x00000500)
+#define HWIO_IPA_LOG_BUF_SW_MASK_VAL_2_RMSK 0xffffffff
+#define HWIO_IPA_LOG_BUF_SW_MASK_VAL_2_ATTR 0x3
+#define HWIO_IPA_LOG_BUF_SW_MASK_VAL_2_IN in_dword_masked( \
+		HWIO_IPA_LOG_BUF_SW_MASK_VAL_2_ADDR, \
+		HWIO_IPA_LOG_BUF_SW_MASK_VAL_2_RMSK)
+#define HWIO_IPA_LOG_BUF_SW_MASK_VAL_2_INM(m) in_dword_masked( \
+		HWIO_IPA_LOG_BUF_SW_MASK_VAL_2_ADDR, \
+		m)
+#define HWIO_IPA_LOG_BUF_SW_MASK_VAL_2_OUT(v) out_dword( \
+		HWIO_IPA_LOG_BUF_SW_MASK_VAL_2_ADDR, \
+		v)
+#define HWIO_IPA_LOG_BUF_SW_MASK_VAL_2_OUTM(m, v) out_dword_masked_ns( \
+		HWIO_IPA_LOG_BUF_SW_MASK_VAL_2_ADDR, \
+		m, \
+		v, \
+		HWIO_IPA_LOG_BUF_SW_MASK_VAL_2_IN)
+#define HWIO_IPA_LOG_BUF_SW_MASK_VAL_2_VALUE_BMSK 0xffffffff
+#define HWIO_IPA_LOG_BUF_SW_MASK_VAL_2_VALUE_SHFT 0x0
+#define HWIO_IPA_LOG_BUF_SW_MASK_VAL_3_ADDR (IPA_DEBUG_REG_BASE + \
+					     0x00000504)
+#define HWIO_IPA_LOG_BUF_SW_MASK_VAL_3_PHYS (IPA_DEBUG_REG_BASE_PHYS + \
+					     0x00000504)
+#define HWIO_IPA_LOG_BUF_SW_MASK_VAL_3_OFFS (IPA_DEBUG_REG_BASE_OFFS + \
+					     0x00000504)
+#define HWIO_IPA_LOG_BUF_SW_MASK_VAL_3_RMSK 0xffffffff
+#define HWIO_IPA_LOG_BUF_SW_MASK_VAL_3_ATTR 0x3
+#define HWIO_IPA_LOG_BUF_SW_MASK_VAL_3_IN in_dword_masked( \
+		HWIO_IPA_LOG_BUF_SW_MASK_VAL_3_ADDR, \
+		HWIO_IPA_LOG_BUF_SW_MASK_VAL_3_RMSK)
+#define HWIO_IPA_LOG_BUF_SW_MASK_VAL_3_INM(m) in_dword_masked( \
+		HWIO_IPA_LOG_BUF_SW_MASK_VAL_3_ADDR, \
+		m)
+#define HWIO_IPA_LOG_BUF_SW_MASK_VAL_3_OUT(v) out_dword( \
+		HWIO_IPA_LOG_BUF_SW_MASK_VAL_3_ADDR, \
+		v)
+#define HWIO_IPA_LOG_BUF_SW_MASK_VAL_3_OUTM(m, v) out_dword_masked_ns( \
+		HWIO_IPA_LOG_BUF_SW_MASK_VAL_3_ADDR, \
+		m, \
+		v, \
+		HWIO_IPA_LOG_BUF_SW_MASK_VAL_3_IN)
+#define HWIO_IPA_LOG_BUF_SW_MASK_VAL_3_VALUE_BMSK 0xffffffff
+#define HWIO_IPA_LOG_BUF_SW_MASK_VAL_3_VALUE_SHFT 0x0
+#define HWIO_IPA_LOG_BUF_SW_MASK_VAL_4_ADDR (IPA_DEBUG_REG_BASE + \
+					     0x00000508)
+#define HWIO_IPA_LOG_BUF_SW_MASK_VAL_4_PHYS (IPA_DEBUG_REG_BASE_PHYS + \
+					     0x00000508)
+#define HWIO_IPA_LOG_BUF_SW_MASK_VAL_4_OFFS (IPA_DEBUG_REG_BASE_OFFS + \
+					     0x00000508)
+#define HWIO_IPA_LOG_BUF_SW_MASK_VAL_4_RMSK 0xffffffff
+#define HWIO_IPA_LOG_BUF_SW_MASK_VAL_4_ATTR 0x3
+#define HWIO_IPA_LOG_BUF_SW_MASK_VAL_4_IN in_dword_masked( \
+		HWIO_IPA_LOG_BUF_SW_MASK_VAL_4_ADDR, \
+		HWIO_IPA_LOG_BUF_SW_MASK_VAL_4_RMSK)
+#define HWIO_IPA_LOG_BUF_SW_MASK_VAL_4_INM(m) in_dword_masked( \
+		HWIO_IPA_LOG_BUF_SW_MASK_VAL_4_ADDR, \
+		m)
+#define HWIO_IPA_LOG_BUF_SW_MASK_VAL_4_OUT(v) out_dword( \
+		HWIO_IPA_LOG_BUF_SW_MASK_VAL_4_ADDR, \
+		v)
+#define HWIO_IPA_LOG_BUF_SW_MASK_VAL_4_OUTM(m, v) out_dword_masked_ns( \
+		HWIO_IPA_LOG_BUF_SW_MASK_VAL_4_ADDR, \
+		m, \
+		v, \
+		HWIO_IPA_LOG_BUF_SW_MASK_VAL_4_IN)
+#define HWIO_IPA_LOG_BUF_SW_MASK_VAL_4_VALUE_BMSK 0xffffffff
+#define HWIO_IPA_LOG_BUF_SW_MASK_VAL_4_VALUE_SHFT 0x0
+#define HWIO_IPA_LOG_BUF_SW_MASK_VAL_5_ADDR (IPA_DEBUG_REG_BASE + \
+					     0x0000050c)
+#define HWIO_IPA_LOG_BUF_SW_MASK_VAL_5_PHYS (IPA_DEBUG_REG_BASE_PHYS + \
+					     0x0000050c)
+#define HWIO_IPA_LOG_BUF_SW_MASK_VAL_5_OFFS (IPA_DEBUG_REG_BASE_OFFS + \
+					     0x0000050c)
+#define HWIO_IPA_LOG_BUF_SW_MASK_VAL_5_RMSK 0xffffffff
+#define HWIO_IPA_LOG_BUF_SW_MASK_VAL_5_ATTR 0x3
+#define HWIO_IPA_LOG_BUF_SW_MASK_VAL_5_IN in_dword_masked( \
+		HWIO_IPA_LOG_BUF_SW_MASK_VAL_5_ADDR, \
+		HWIO_IPA_LOG_BUF_SW_MASK_VAL_5_RMSK)
+#define HWIO_IPA_LOG_BUF_SW_MASK_VAL_5_INM(m) in_dword_masked( \
+		HWIO_IPA_LOG_BUF_SW_MASK_VAL_5_ADDR, \
+		m)
+#define HWIO_IPA_LOG_BUF_SW_MASK_VAL_5_OUT(v) out_dword( \
+		HWIO_IPA_LOG_BUF_SW_MASK_VAL_5_ADDR, \
+		v)
+#define HWIO_IPA_LOG_BUF_SW_MASK_VAL_5_OUTM(m, v) out_dword_masked_ns( \
+		HWIO_IPA_LOG_BUF_SW_MASK_VAL_5_ADDR, \
+		m, \
+		v, \
+		HWIO_IPA_LOG_BUF_SW_MASK_VAL_5_IN)
+#define HWIO_IPA_LOG_BUF_SW_MASK_VAL_5_VALUE_BMSK 0xffffffff
+#define HWIO_IPA_LOG_BUF_SW_MASK_VAL_5_VALUE_SHFT 0x0
+#define HWIO_IPA_LOG_BUF_SW_MASK_VAL_6_ADDR (IPA_DEBUG_REG_BASE + \
+					     0x00000510)
+#define HWIO_IPA_LOG_BUF_SW_MASK_VAL_6_PHYS (IPA_DEBUG_REG_BASE_PHYS + \
+					     0x00000510)
+#define HWIO_IPA_LOG_BUF_SW_MASK_VAL_6_OFFS (IPA_DEBUG_REG_BASE_OFFS + \
+					     0x00000510)
+#define HWIO_IPA_LOG_BUF_SW_MASK_VAL_6_RMSK 0xffffffff
+#define HWIO_IPA_LOG_BUF_SW_MASK_VAL_6_ATTR 0x3
+#define HWIO_IPA_LOG_BUF_SW_MASK_VAL_6_IN in_dword_masked( \
+		HWIO_IPA_LOG_BUF_SW_MASK_VAL_6_ADDR, \
+		HWIO_IPA_LOG_BUF_SW_MASK_VAL_6_RMSK)
+#define HWIO_IPA_LOG_BUF_SW_MASK_VAL_6_INM(m) in_dword_masked( \
+		HWIO_IPA_LOG_BUF_SW_MASK_VAL_6_ADDR, \
+		m)
+#define HWIO_IPA_LOG_BUF_SW_MASK_VAL_6_OUT(v) out_dword( \
+		HWIO_IPA_LOG_BUF_SW_MASK_VAL_6_ADDR, \
+		v)
+#define HWIO_IPA_LOG_BUF_SW_MASK_VAL_6_OUTM(m, v) out_dword_masked_ns( \
+		HWIO_IPA_LOG_BUF_SW_MASK_VAL_6_ADDR, \
+		m, \
+		v, \
+		HWIO_IPA_LOG_BUF_SW_MASK_VAL_6_IN)
+#define HWIO_IPA_LOG_BUF_SW_MASK_VAL_6_VALUE_BMSK 0xffffffff
+#define HWIO_IPA_LOG_BUF_SW_MASK_VAL_6_VALUE_SHFT 0x0
+#define HWIO_IPA_LOG_BUF_SW_MASK_VAL_7_ADDR (IPA_DEBUG_REG_BASE + \
+					     0x00000514)
+#define HWIO_IPA_LOG_BUF_SW_MASK_VAL_7_PHYS (IPA_DEBUG_REG_BASE_PHYS + \
+					     0x00000514)
+#define HWIO_IPA_LOG_BUF_SW_MASK_VAL_7_OFFS (IPA_DEBUG_REG_BASE_OFFS + \
+					     0x00000514)
+#define HWIO_IPA_LOG_BUF_SW_MASK_VAL_7_RMSK 0xffffffff
+#define HWIO_IPA_LOG_BUF_SW_MASK_VAL_7_ATTR 0x3
+#define HWIO_IPA_LOG_BUF_SW_MASK_VAL_7_IN in_dword_masked( \
+		HWIO_IPA_LOG_BUF_SW_MASK_VAL_7_ADDR, \
+		HWIO_IPA_LOG_BUF_SW_MASK_VAL_7_RMSK)
+#define HWIO_IPA_LOG_BUF_SW_MASK_VAL_7_INM(m) in_dword_masked( \
+		HWIO_IPA_LOG_BUF_SW_MASK_VAL_7_ADDR, \
+		m)
+#define HWIO_IPA_LOG_BUF_SW_MASK_VAL_7_OUT(v) out_dword( \
+		HWIO_IPA_LOG_BUF_SW_MASK_VAL_7_ADDR, \
+		v)
+#define HWIO_IPA_LOG_BUF_SW_MASK_VAL_7_OUTM(m, v) out_dword_masked_ns( \
+		HWIO_IPA_LOG_BUF_SW_MASK_VAL_7_ADDR, \
+		m, \
+		v, \
+		HWIO_IPA_LOG_BUF_SW_MASK_VAL_7_IN)
+#define HWIO_IPA_LOG_BUF_SW_MASK_VAL_7_VALUE_BMSK 0xffffffff
+#define HWIO_IPA_LOG_BUF_SW_MASK_VAL_7_VALUE_SHFT 0x0
+#define HWIO_IPA_UC_RX_HND_CMDQ_CMD_ADDR (IPA_DEBUG_REG_BASE + 0x00000518)
+#define HWIO_IPA_UC_RX_HND_CMDQ_CMD_PHYS (IPA_DEBUG_REG_BASE_PHYS + \
+					  0x00000518)
+#define HWIO_IPA_UC_RX_HND_CMDQ_CMD_OFFS (IPA_DEBUG_REG_BASE_OFFS + \
+					  0x00000518)
+#define HWIO_IPA_UC_RX_HND_CMDQ_CFG_ADDR (IPA_DEBUG_REG_BASE + 0x0000051c)
+#define HWIO_IPA_UC_RX_HND_CMDQ_CFG_PHYS (IPA_DEBUG_REG_BASE_PHYS + \
+					  0x0000051c)
+#define HWIO_IPA_UC_RX_HND_CMDQ_CFG_OFFS (IPA_DEBUG_REG_BASE_OFFS + \
+					  0x0000051c)
+#define HWIO_IPA_UC_RX_HND_CMDQ_DATA_WR_0_ADDR (IPA_DEBUG_REG_BASE + \
+						0x00000520)
+#define HWIO_IPA_UC_RX_HND_CMDQ_DATA_WR_0_PHYS (IPA_DEBUG_REG_BASE_PHYS + \
+						0x00000520)
+#define HWIO_IPA_UC_RX_HND_CMDQ_DATA_WR_0_OFFS (IPA_DEBUG_REG_BASE_OFFS + \
+						0x00000520)
+#define HWIO_IPA_UC_RX_HND_CMDQ_DATA_WR_1_ADDR (IPA_DEBUG_REG_BASE + \
+						0x00000524)
+#define HWIO_IPA_UC_RX_HND_CMDQ_DATA_WR_1_PHYS (IPA_DEBUG_REG_BASE_PHYS + \
+						0x00000524)
+#define HWIO_IPA_UC_RX_HND_CMDQ_DATA_WR_1_OFFS (IPA_DEBUG_REG_BASE_OFFS + \
+						0x00000524)
+#define HWIO_IPA_UC_RX_HND_CMDQ_DATA_WR_2_ADDR (IPA_DEBUG_REG_BASE + \
+						0x00000528)
+#define HWIO_IPA_UC_RX_HND_CMDQ_DATA_WR_2_PHYS (IPA_DEBUG_REG_BASE_PHYS + \
+						0x00000528)
+#define HWIO_IPA_UC_RX_HND_CMDQ_DATA_WR_2_OFFS (IPA_DEBUG_REG_BASE_OFFS + \
+						0x00000528)
+#define HWIO_IPA_UC_RX_HND_CMDQ_DATA_WR_3_ADDR (IPA_DEBUG_REG_BASE + \
+						0x0000052c)
+#define HWIO_IPA_UC_RX_HND_CMDQ_DATA_WR_3_PHYS (IPA_DEBUG_REG_BASE_PHYS + \
+						0x0000052c)
+#define HWIO_IPA_UC_RX_HND_CMDQ_DATA_WR_3_OFFS (IPA_DEBUG_REG_BASE_OFFS + \
+						0x0000052c)
+#define HWIO_IPA_UC_RX_HND_CMDQ_DATA_RD_0_ADDR (IPA_DEBUG_REG_BASE + \
+						0x00000530)
+#define HWIO_IPA_UC_RX_HND_CMDQ_DATA_RD_0_PHYS (IPA_DEBUG_REG_BASE_PHYS + \
+						0x00000530)
+#define HWIO_IPA_UC_RX_HND_CMDQ_DATA_RD_0_OFFS (IPA_DEBUG_REG_BASE_OFFS + \
+						0x00000530)
+#define HWIO_IPA_UC_RX_HND_CMDQ_DATA_RD_1_ADDR (IPA_DEBUG_REG_BASE + \
+						0x00000534)
+#define HWIO_IPA_UC_RX_HND_CMDQ_DATA_RD_1_PHYS (IPA_DEBUG_REG_BASE_PHYS + \
+						0x00000534)
+#define HWIO_IPA_UC_RX_HND_CMDQ_DATA_RD_1_OFFS (IPA_DEBUG_REG_BASE_OFFS + \
+						0x00000534)
+#define HWIO_IPA_UC_RX_HND_CMDQ_DATA_RD_2_ADDR (IPA_DEBUG_REG_BASE + \
+						0x00000538)
+#define HWIO_IPA_UC_RX_HND_CMDQ_DATA_RD_2_PHYS (IPA_DEBUG_REG_BASE_PHYS + \
+						0x00000538)
+#define HWIO_IPA_UC_RX_HND_CMDQ_DATA_RD_2_OFFS (IPA_DEBUG_REG_BASE_OFFS + \
+						0x00000538)
+#define HWIO_IPA_UC_RX_HND_CMDQ_DATA_RD_3_ADDR (IPA_DEBUG_REG_BASE + \
+						0x0000053c)
+#define HWIO_IPA_UC_RX_HND_CMDQ_DATA_RD_3_PHYS (IPA_DEBUG_REG_BASE_PHYS + \
+						0x0000053c)
+#define HWIO_IPA_UC_RX_HND_CMDQ_DATA_RD_3_OFFS (IPA_DEBUG_REG_BASE_OFFS + \
+						0x0000053c)
+#define HWIO_IPA_UC_RX_HND_CMDQ_STATUS_ADDR (IPA_DEBUG_REG_BASE + \
+					     0x00000540)
+#define HWIO_IPA_UC_RX_HND_CMDQ_STATUS_PHYS (IPA_DEBUG_REG_BASE_PHYS + \
+					     0x00000540)
+#define HWIO_IPA_UC_RX_HND_CMDQ_STATUS_OFFS (IPA_DEBUG_REG_BASE_OFFS + \
+					     0x00000540)
+#define HWIO_IPA_RAM_HW_FIRST_ADDR (IPA_DEBUG_REG_BASE + 0x0000054c)
+#define HWIO_IPA_RAM_HW_FIRST_PHYS (IPA_DEBUG_REG_BASE_PHYS + 0x0000054c)
+#define HWIO_IPA_RAM_HW_FIRST_OFFS (IPA_DEBUG_REG_BASE_OFFS + 0x0000054c)
+#define HWIO_IPA_RAM_HW_LAST_ADDR (IPA_DEBUG_REG_BASE + 0x00000550)
+#define HWIO_IPA_RAM_HW_LAST_PHYS (IPA_DEBUG_REG_BASE_PHYS + 0x00000550)
+#define HWIO_IPA_RAM_HW_LAST_OFFS (IPA_DEBUG_REG_BASE_OFFS + 0x00000550)
+#define HWIO_IPA_RAM_SNIFFER_BASE_OFFSET_ADDR (IPA_DEBUG_REG_BASE + \
+					       0x00000554)
+#define HWIO_IPA_RAM_SNIFFER_BASE_OFFSET_PHYS (IPA_DEBUG_REG_BASE_PHYS + \
+					       0x00000554)
+#define HWIO_IPA_RAM_SNIFFER_BASE_OFFSET_OFFS (IPA_DEBUG_REG_BASE_OFFS + \
+					       0x00000554)
+#define HWIO_IPA_RAM_FRAG_FRST_BASE_ADDR_ADDR (IPA_DEBUG_REG_BASE + \
+					       0x00000558)
+#define HWIO_IPA_RAM_FRAG_FRST_BASE_ADDR_PHYS (IPA_DEBUG_REG_BASE_PHYS + \
+					       0x00000558)
+#define HWIO_IPA_RAM_FRAG_FRST_BASE_ADDR_OFFS (IPA_DEBUG_REG_BASE_OFFS + \
+					       0x00000558)
+#define HWIO_IPA_RAM_FRAG_SCND_BASE_ADDR_ADDR (IPA_DEBUG_REG_BASE + \
+					       0x0000055c)
+#define HWIO_IPA_RAM_FRAG_SCND_BASE_ADDR_PHYS (IPA_DEBUG_REG_BASE_PHYS + \
+					       0x0000055c)
+#define HWIO_IPA_RAM_FRAG_SCND_BASE_ADDR_OFFS (IPA_DEBUG_REG_BASE_OFFS + \
+					       0x0000055c)
+#define HWIO_IPA_RAM_GSI_TLV_BASE_ADDR_ADDR (IPA_DEBUG_REG_BASE + \
+					     0x00000560)
+#define HWIO_IPA_RAM_GSI_TLV_BASE_ADDR_PHYS (IPA_DEBUG_REG_BASE_PHYS + \
+					     0x00000560)
+#define HWIO_IPA_RAM_GSI_TLV_BASE_ADDR_OFFS (IPA_DEBUG_REG_BASE_OFFS + \
+					     0x00000560)
+#define HWIO_IPA_RAM_DCPH_KEYS_FIRST_ADDR (IPA_DEBUG_REG_BASE + 0x00000564)
+#define HWIO_IPA_RAM_DCPH_KEYS_FIRST_PHYS (IPA_DEBUG_REG_BASE_PHYS + \
+					   0x00000564)
+#define HWIO_IPA_RAM_DCPH_KEYS_FIRST_OFFS (IPA_DEBUG_REG_BASE_OFFS + \
+					   0x00000564)
+#define HWIO_IPA_RAM_DCPH_KEYS_LAST_ADDR (IPA_DEBUG_REG_BASE + 0x00000568)
+#define HWIO_IPA_RAM_DCPH_KEYS_LAST_PHYS (IPA_DEBUG_REG_BASE_PHYS + \
+					  0x00000568)
+#define HWIO_IPA_RAM_DCPH_KEYS_LAST_OFFS (IPA_DEBUG_REG_BASE_OFFS + \
+					  0x00000568)
+#define HWIO_IPA_DPS_SEQUENCER_FIRST_ADDR (IPA_DEBUG_REG_BASE + 0x00000570)
+#define HWIO_IPA_DPS_SEQUENCER_FIRST_PHYS (IPA_DEBUG_REG_BASE_PHYS + \
+					   0x00000570)
+#define HWIO_IPA_DPS_SEQUENCER_FIRST_OFFS (IPA_DEBUG_REG_BASE_OFFS + \
+					   0x00000570)
+#define HWIO_IPA_DPS_SEQUENCER_LAST_ADDR (IPA_DEBUG_REG_BASE + 0x00000574)
+#define HWIO_IPA_DPS_SEQUENCER_LAST_PHYS (IPA_DEBUG_REG_BASE_PHYS + \
+					  0x00000574)
+#define HWIO_IPA_DPS_SEQUENCER_LAST_OFFS (IPA_DEBUG_REG_BASE_OFFS + \
+					  0x00000574)
+#define HWIO_IPA_HPS_SEQUENCER_FIRST_ADDR (IPA_DEBUG_REG_BASE + 0x00000578)
+#define HWIO_IPA_HPS_SEQUENCER_FIRST_PHYS (IPA_DEBUG_REG_BASE_PHYS + \
+					   0x00000578)
+#define HWIO_IPA_HPS_SEQUENCER_FIRST_OFFS (IPA_DEBUG_REG_BASE_OFFS + \
+					   0x00000578)
+#define HWIO_IPA_HPS_SEQUENCER_LAST_ADDR (IPA_DEBUG_REG_BASE + 0x0000057c)
+#define HWIO_IPA_HPS_SEQUENCER_LAST_PHYS (IPA_DEBUG_REG_BASE_PHYS + \
+					  0x0000057c)
+#define HWIO_IPA_HPS_SEQUENCER_LAST_OFFS (IPA_DEBUG_REG_BASE_OFFS + \
+					  0x0000057c)
+#define HWIO_IPA_RAM_PKT_CTX_BASE_ADDR_ADDR (IPA_DEBUG_REG_BASE + \
+					     0x00000650)
+#define HWIO_IPA_RAM_PKT_CTX_BASE_ADDR_PHYS (IPA_DEBUG_REG_BASE_PHYS + \
+					     0x00000650)
+#define HWIO_IPA_RAM_PKT_CTX_BASE_ADDR_OFFS (IPA_DEBUG_REG_BASE_OFFS + \
+					     0x00000650)
+#define HWIO_IPA_RAM_SW_AREA_BASE_ADDR_ADDR (IPA_DEBUG_REG_BASE + \
+					     0x00000654)
+#define HWIO_IPA_RAM_SW_AREA_BASE_ADDR_PHYS (IPA_DEBUG_REG_BASE_PHYS + \
+					     0x00000654)
+#define HWIO_IPA_RAM_SW_AREA_BASE_ADDR_OFFS (IPA_DEBUG_REG_BASE_OFFS + \
+					     0x00000654)
+#define HWIO_IPA_RAM_HDRI_TYPE1_BASE_ADDR_ADDR (IPA_DEBUG_REG_BASE + \
+						0x00000658)
+#define HWIO_IPA_RAM_HDRI_TYPE1_BASE_ADDR_PHYS (IPA_DEBUG_REG_BASE_PHYS + \
+						0x00000658)
+#define HWIO_IPA_RAM_HDRI_TYPE1_BASE_ADDR_OFFS (IPA_DEBUG_REG_BASE_OFFS + \
+						0x00000658)
+#define HWIO_IPA_RAM_AGGR_NLO_COUNTERS_BASE_ADDR_ADDR (IPA_DEBUG_REG_BASE \
+						       + 0x0000065c)
+#define HWIO_IPA_RAM_AGGR_NLO_COUNTERS_BASE_ADDR_PHYS (	\
+		IPA_DEBUG_REG_BASE_PHYS + 0x0000065c)
+#define HWIO_IPA_RAM_AGGR_NLO_COUNTERS_BASE_ADDR_OFFS (	\
+		IPA_DEBUG_REG_BASE_OFFS + 0x0000065c)
+#define HWIO_IPA_RAM_NLO_VP_CACHE_BASE_ADDR_ADDR (IPA_DEBUG_REG_BASE + \
+						  0x00000660)
+#define HWIO_IPA_RAM_NLO_VP_CACHE_BASE_ADDR_PHYS (IPA_DEBUG_REG_BASE_PHYS \
+						  + 0x00000660)
+#define HWIO_IPA_RAM_NLO_VP_CACHE_BASE_ADDR_OFFS (IPA_DEBUG_REG_BASE_OFFS \
+						  + 0x00000660)
+#define HWIO_IPA_RAM_COAL_VP_CACHE_BASE_ADDR_ADDR (IPA_DEBUG_REG_BASE +	\
+						   0x00000664)
+#define HWIO_IPA_RAM_COAL_VP_CACHE_BASE_ADDR_PHYS (IPA_DEBUG_REG_BASE_PHYS \
+						   + 0x00000664)
+#define HWIO_IPA_RAM_COAL_VP_CACHE_BASE_ADDR_OFFS (IPA_DEBUG_REG_BASE_OFFS \
+						   + 0x00000664)
+#define HWIO_IPA_RAM_COAL_VP_FIFO_BASE_ADDR_ADDR (IPA_DEBUG_REG_BASE + \
+						  0x00000668)
+#define HWIO_IPA_RAM_COAL_VP_FIFO_BASE_ADDR_PHYS (IPA_DEBUG_REG_BASE_PHYS \
+						  + 0x00000668)
+#define HWIO_IPA_RAM_COAL_VP_FIFO_BASE_ADDR_OFFS (IPA_DEBUG_REG_BASE_OFFS \
+						  + 0x00000668)
+#define HWIO_IPA_RAM_GSI_IF_CONS_ACCUMS_BASE_ADDR_ADDR (IPA_DEBUG_REG_BASE \
+							+ 0x0000066c)
+#define HWIO_IPA_RAM_GSI_IF_CONS_ACCUMS_BASE_ADDR_PHYS ( \
+		IPA_DEBUG_REG_BASE_PHYS + 0x0000066c)
+#define HWIO_IPA_RAM_GSI_IF_CONS_ACCUMS_BASE_ADDR_OFFS ( \
+		IPA_DEBUG_REG_BASE_OFFS + 0x0000066c)
+#define HWIO_IPA_RAM_AGGR_BASE_ADDR_ADDR (IPA_DEBUG_REG_BASE + 0x00000670)
+#define HWIO_IPA_RAM_AGGR_BASE_ADDR_PHYS (IPA_DEBUG_REG_BASE_PHYS + \
+					  0x00000670)
+#define HWIO_IPA_RAM_AGGR_BASE_ADDR_OFFS (IPA_DEBUG_REG_BASE_OFFS + \
+					  0x00000670)
+#define HWIO_IPA_RAM_TX_COUNTERS_BASE_ADDR_ADDR (IPA_DEBUG_REG_BASE + \
+						 0x00000674)
+#define HWIO_IPA_RAM_TX_COUNTERS_BASE_ADDR_PHYS (IPA_DEBUG_REG_BASE_PHYS + \
+						 0x00000674)
+#define HWIO_IPA_RAM_TX_COUNTERS_BASE_ADDR_OFFS (IPA_DEBUG_REG_BASE_OFFS + \
+						 0x00000674)
+#define HWIO_IPA_RAM_DPL_FIFO_BASE_ADDR_ADDR (IPA_DEBUG_REG_BASE + \
+					      0x00000678)
+#define HWIO_IPA_RAM_DPL_FIFO_BASE_ADDR_PHYS (IPA_DEBUG_REG_BASE_PHYS +	\
+					      0x00000678)
+#define HWIO_IPA_RAM_DPL_FIFO_BASE_ADDR_OFFS (IPA_DEBUG_REG_BASE_OFFS +	\
+					      0x00000678)
+#define HWIO_IPA_RAM_COAL_MASTER_VP_CTX_BASE_ADDR_ADDR (IPA_DEBUG_REG_BASE \
+							+ 0x0000067c)
+#define HWIO_IPA_RAM_COAL_MASTER_VP_CTX_BASE_ADDR_PHYS ( \
+		IPA_DEBUG_REG_BASE_PHYS + 0x0000067c)
+#define HWIO_IPA_RAM_COAL_MASTER_VP_CTX_BASE_ADDR_OFFS ( \
+		IPA_DEBUG_REG_BASE_OFFS + 0x0000067c)
+#define HWIO_IPA_RAM_COAL_MASTER_VP_AGGR_BASE_ADDR_ADDR ( \
+		IPA_DEBUG_REG_BASE + 0x00000680)
+#define HWIO_IPA_RAM_COAL_MASTER_VP_AGGR_BASE_ADDR_PHYS ( \
+		IPA_DEBUG_REG_BASE_PHYS + 0x00000680)
+#define HWIO_IPA_RAM_COAL_MASTER_VP_AGGR_BASE_ADDR_OFFS ( \
+		IPA_DEBUG_REG_BASE_OFFS + 0x00000680)
+#define HWIO_IPA_RAM_COAL_SLAVE_VP_CTX_BASE_ADDR_ADDR (IPA_DEBUG_REG_BASE \
+						       + 0x00000684)
+#define HWIO_IPA_RAM_COAL_SLAVE_VP_CTX_BASE_ADDR_PHYS (	\
+		IPA_DEBUG_REG_BASE_PHYS + 0x00000684)
+#define HWIO_IPA_RAM_COAL_SLAVE_VP_CTX_BASE_ADDR_OFFS (	\
+		IPA_DEBUG_REG_BASE_OFFS + 0x00000684)
+#define HWIO_IPA_RAM_UL_NLO_AGGR_BASE_ADDR_ADDR (IPA_DEBUG_REG_BASE + \
+						 0x00000688)
+#define HWIO_IPA_RAM_UL_NLO_AGGR_BASE_ADDR_PHYS (IPA_DEBUG_REG_BASE_PHYS + \
+						 0x00000688)
+#define HWIO_IPA_RAM_UL_NLO_AGGR_BASE_ADDR_OFFS (IPA_DEBUG_REG_BASE_OFFS + \
+						 0x00000688)
+#define HWIO_IPA_RAM_UC_IRAM_ADDR_BASE_ADDR_ADDR (IPA_DEBUG_REG_BASE + \
+						  0x0000069c)
+#define HWIO_IPA_RAM_UC_IRAM_ADDR_BASE_ADDR_PHYS (IPA_DEBUG_REG_BASE_PHYS \
+						  + 0x0000069c)
+#define HWIO_IPA_RAM_UC_IRAM_ADDR_BASE_ADDR_OFFS (IPA_DEBUG_REG_BASE_OFFS \
+						  + 0x0000069c)
+#define HWIO_IPA_HPS_UC2SEQ_PUSH_ADDR (IPA_DEBUG_REG_BASE + 0x00000580)
+#define HWIO_IPA_HPS_UC2SEQ_PUSH_PHYS (IPA_DEBUG_REG_BASE_PHYS + \
+				       0x00000580)
+#define HWIO_IPA_HPS_UC2SEQ_PUSH_OFFS (IPA_DEBUG_REG_BASE_OFFS + \
+				       0x00000580)
+#define HWIO_IPA_HPS_UC2SEQ_STATUS_ADDR (IPA_DEBUG_REG_BASE + 0x00000584)
+#define HWIO_IPA_HPS_UC2SEQ_STATUS_PHYS (IPA_DEBUG_REG_BASE_PHYS + \
+					 0x00000584)
+#define HWIO_IPA_HPS_UC2SEQ_STATUS_OFFS (IPA_DEBUG_REG_BASE_OFFS + \
+					 0x00000584)
+#define HWIO_IPA_HPS_SEQ2UC_RD_ADDR (IPA_DEBUG_REG_BASE + 0x00000588)
+#define HWIO_IPA_HPS_SEQ2UC_RD_PHYS (IPA_DEBUG_REG_BASE_PHYS + 0x00000588)
+#define HWIO_IPA_HPS_SEQ2UC_RD_OFFS (IPA_DEBUG_REG_BASE_OFFS + 0x00000588)
+#define HWIO_IPA_HPS_SEQ2UC_STATUS_ADDR (IPA_DEBUG_REG_BASE + 0x0000058c)
+#define HWIO_IPA_HPS_SEQ2UC_STATUS_PHYS (IPA_DEBUG_REG_BASE_PHYS + \
+					 0x0000058c)
+#define HWIO_IPA_HPS_SEQ2UC_STATUS_OFFS (IPA_DEBUG_REG_BASE_OFFS + \
+					 0x0000058c)
+#define HWIO_IPA_HPS_SEQ2UC_CMD_ADDR (IPA_DEBUG_REG_BASE + 0x00000590)
+#define HWIO_IPA_HPS_SEQ2UC_CMD_PHYS (IPA_DEBUG_REG_BASE_PHYS + 0x00000590)
+#define HWIO_IPA_HPS_SEQ2UC_CMD_OFFS (IPA_DEBUG_REG_BASE_OFFS + 0x00000590)
+#define HWIO_IPA_DPS_UC2SEQ_PUSH_ADDR (IPA_DEBUG_REG_BASE + 0x00000594)
+#define HWIO_IPA_DPS_UC2SEQ_PUSH_PHYS (IPA_DEBUG_REG_BASE_PHYS + \
+				       0x00000594)
+#define HWIO_IPA_DPS_UC2SEQ_PUSH_OFFS (IPA_DEBUG_REG_BASE_OFFS + \
+				       0x00000594)
+#define HWIO_IPA_DPS_UC2SEQ_STATUS_ADDR (IPA_DEBUG_REG_BASE + 0x00000598)
+#define HWIO_IPA_DPS_UC2SEQ_STATUS_PHYS (IPA_DEBUG_REG_BASE_PHYS + \
+					 0x00000598)
+#define HWIO_IPA_DPS_UC2SEQ_STATUS_OFFS (IPA_DEBUG_REG_BASE_OFFS + \
+					 0x00000598)
+#define HWIO_IPA_DPS_SEQ2UC_RD_ADDR (IPA_DEBUG_REG_BASE + 0x0000059c)
+#define HWIO_IPA_DPS_SEQ2UC_RD_PHYS (IPA_DEBUG_REG_BASE_PHYS + 0x0000059c)
+#define HWIO_IPA_DPS_SEQ2UC_RD_OFFS (IPA_DEBUG_REG_BASE_OFFS + 0x0000059c)
+#define HWIO_IPA_DPS_SEQ2UC_STATUS_ADDR (IPA_DEBUG_REG_BASE + 0x000005a0)
+#define HWIO_IPA_DPS_SEQ2UC_STATUS_PHYS (IPA_DEBUG_REG_BASE_PHYS + \
+					 0x000005a0)
+#define HWIO_IPA_DPS_SEQ2UC_STATUS_OFFS (IPA_DEBUG_REG_BASE_OFFS + \
+					 0x000005a0)
+#define HWIO_IPA_DPS_SEQ2UC_CMD_ADDR (IPA_DEBUG_REG_BASE + 0x000005a4)
+#define HWIO_IPA_DPS_SEQ2UC_CMD_PHYS (IPA_DEBUG_REG_BASE_PHYS + 0x000005a4)
+#define HWIO_IPA_DPS_SEQ2UC_CMD_OFFS (IPA_DEBUG_REG_BASE_OFFS + 0x000005a4)
+#define HWIO_IPA_NTF_TX_CMDQ_CMD_ADDR (IPA_DEBUG_REG_BASE + 0x00000600)
+#define HWIO_IPA_NTF_TX_CMDQ_CMD_PHYS (IPA_DEBUG_REG_BASE_PHYS + \
+				       0x00000600)
+#define HWIO_IPA_NTF_TX_CMDQ_CMD_OFFS (IPA_DEBUG_REG_BASE_OFFS + \
+				       0x00000600)
+#define HWIO_IPA_NTF_TX_CMDQ_CMD_RMSK 0xff
+#define HWIO_IPA_NTF_TX_CMDQ_CMD_ATTR 0x3
+#define HWIO_IPA_NTF_TX_CMDQ_CMD_IN in_dword_masked( \
+		HWIO_IPA_NTF_TX_CMDQ_CMD_ADDR, \
+		HWIO_IPA_NTF_TX_CMDQ_CMD_RMSK)
+#define HWIO_IPA_NTF_TX_CMDQ_CMD_INM(m) in_dword_masked( \
+		HWIO_IPA_NTF_TX_CMDQ_CMD_ADDR, \
+		m)
+#define HWIO_IPA_NTF_TX_CMDQ_CMD_OUT(v) out_dword( \
+		HWIO_IPA_NTF_TX_CMDQ_CMD_ADDR, \
+		v)
+#define HWIO_IPA_NTF_TX_CMDQ_CMD_OUTM(m, v) out_dword_masked_ns( \
+		HWIO_IPA_NTF_TX_CMDQ_CMD_ADDR, \
+		m, \
+		v, \
+		HWIO_IPA_NTF_TX_CMDQ_CMD_IN)
+#define HWIO_IPA_NTF_TX_CMDQ_CMD_RD_REQ_BMSK 0x80
+#define HWIO_IPA_NTF_TX_CMDQ_CMD_RD_REQ_SHFT 0x7
+#define HWIO_IPA_NTF_TX_CMDQ_CMD_CMD_CLIENT_BMSK 0x7c
+#define HWIO_IPA_NTF_TX_CMDQ_CMD_CMD_CLIENT_SHFT 0x2
+#define HWIO_IPA_NTF_TX_CMDQ_CMD_POP_CMD_BMSK 0x2
+#define HWIO_IPA_NTF_TX_CMDQ_CMD_POP_CMD_SHFT 0x1
+#define HWIO_IPA_NTF_TX_CMDQ_CMD_WRITE_CMD_BMSK 0x1
+#define HWIO_IPA_NTF_TX_CMDQ_CMD_WRITE_CMD_SHFT 0x0
+#define HWIO_IPA_NTF_TX_CMDQ_RELEASE_WR_ADDR (IPA_DEBUG_REG_BASE + \
+					      0x00000604)
+#define HWIO_IPA_NTF_TX_CMDQ_RELEASE_WR_PHYS (IPA_DEBUG_REG_BASE_PHYS +	\
+					      0x00000604)
+#define HWIO_IPA_NTF_TX_CMDQ_RELEASE_WR_OFFS (IPA_DEBUG_REG_BASE_OFFS +	\
+					      0x00000604)
+#define HWIO_IPA_NTF_TX_CMDQ_RELEASE_RD_ADDR (IPA_DEBUG_REG_BASE + \
+					      0x00000608)
+#define HWIO_IPA_NTF_TX_CMDQ_RELEASE_RD_PHYS (IPA_DEBUG_REG_BASE_PHYS +	\
+					      0x00000608)
+#define HWIO_IPA_NTF_TX_CMDQ_RELEASE_RD_OFFS (IPA_DEBUG_REG_BASE_OFFS +	\
+					      0x00000608)
+#define HWIO_IPA_NTF_TX_CMDQ_CFG_WR_ADDR (IPA_DEBUG_REG_BASE + 0x0000060c)
+#define HWIO_IPA_NTF_TX_CMDQ_CFG_WR_PHYS (IPA_DEBUG_REG_BASE_PHYS + \
+					  0x0000060c)
+#define HWIO_IPA_NTF_TX_CMDQ_CFG_WR_OFFS (IPA_DEBUG_REG_BASE_OFFS + \
+					  0x0000060c)
+#define HWIO_IPA_NTF_TX_CMDQ_CFG_RD_ADDR (IPA_DEBUG_REG_BASE + 0x00000610)
+#define HWIO_IPA_NTF_TX_CMDQ_CFG_RD_PHYS (IPA_DEBUG_REG_BASE_PHYS + \
+					  0x00000610)
+#define HWIO_IPA_NTF_TX_CMDQ_CFG_RD_OFFS (IPA_DEBUG_REG_BASE_OFFS + \
+					  0x00000610)
+#define HWIO_IPA_NTF_TX_CMDQ_DATA_WR_0_ADDR (IPA_DEBUG_REG_BASE + \
+					     0x00000614)
+#define HWIO_IPA_NTF_TX_CMDQ_DATA_WR_0_PHYS (IPA_DEBUG_REG_BASE_PHYS + \
+					     0x00000614)
+#define HWIO_IPA_NTF_TX_CMDQ_DATA_WR_0_OFFS (IPA_DEBUG_REG_BASE_OFFS + \
+					     0x00000614)
+#define HWIO_IPA_NTF_TX_CMDQ_DATA_RD_0_ADDR (IPA_DEBUG_REG_BASE + \
+					     0x00000618)
+#define HWIO_IPA_NTF_TX_CMDQ_DATA_RD_0_PHYS (IPA_DEBUG_REG_BASE_PHYS + \
+					     0x00000618)
+#define HWIO_IPA_NTF_TX_CMDQ_DATA_RD_0_OFFS (IPA_DEBUG_REG_BASE_OFFS + \
+					     0x00000618)
+#define HWIO_IPA_NTF_TX_CMDQ_DATA_RD_0_RMSK 0xfffff
+#define HWIO_IPA_NTF_TX_CMDQ_DATA_RD_0_ATTR 0x1
+#define HWIO_IPA_NTF_TX_CMDQ_DATA_RD_0_IN in_dword_masked( \
+		HWIO_IPA_NTF_TX_CMDQ_DATA_RD_0_ADDR, \
+		HWIO_IPA_NTF_TX_CMDQ_DATA_RD_0_RMSK)
+#define HWIO_IPA_NTF_TX_CMDQ_DATA_RD_0_INM(m) in_dword_masked( \
+		HWIO_IPA_NTF_TX_CMDQ_DATA_RD_0_ADDR, \
+		m)
+#define HWIO_IPA_NTF_TX_CMDQ_DATA_RD_0_CMDQ_REP_F_BMSK 0x80000
+#define HWIO_IPA_NTF_TX_CMDQ_DATA_RD_0_CMDQ_REP_F_SHFT 0x13
+#define HWIO_IPA_NTF_TX_CMDQ_DATA_RD_0_CMDQ_OPCODE_F_BMSK 0x60000
+#define HWIO_IPA_NTF_TX_CMDQ_DATA_RD_0_CMDQ_OPCODE_F_SHFT 0x11
+#define HWIO_IPA_NTF_TX_CMDQ_DATA_RD_0_CMDQ_SRC_PIPE_F_BMSK 0x1f000
+#define HWIO_IPA_NTF_TX_CMDQ_DATA_RD_0_CMDQ_SRC_PIPE_F_SHFT 0xc
+#define HWIO_IPA_NTF_TX_CMDQ_DATA_RD_0_CMDQ_SRC_ID_F_BMSK 0xff0
+#define HWIO_IPA_NTF_TX_CMDQ_DATA_RD_0_CMDQ_SRC_ID_F_SHFT 0x4
+#define HWIO_IPA_NTF_TX_CMDQ_DATA_RD_0_CMDQ_CTX_ID_F_BMSK 0xf
+#define HWIO_IPA_NTF_TX_CMDQ_DATA_RD_0_CMDQ_CTX_ID_F_SHFT 0x0
+#define HWIO_IPA_NTF_TX_CMDQ_STATUS_ADDR (IPA_DEBUG_REG_BASE + 0x0000061c)
+#define HWIO_IPA_NTF_TX_CMDQ_STATUS_PHYS (IPA_DEBUG_REG_BASE_PHYS + \
+					  0x0000061c)
+#define HWIO_IPA_NTF_TX_CMDQ_STATUS_OFFS (IPA_DEBUG_REG_BASE_OFFS + \
+					  0x0000061c)
+#define HWIO_IPA_NTF_TX_CMDQ_STATUS_RMSK 0x1ff
+#define HWIO_IPA_NTF_TX_CMDQ_STATUS_ATTR 0x1
+#define HWIO_IPA_NTF_TX_CMDQ_STATUS_IN in_dword_masked(	\
+		HWIO_IPA_NTF_TX_CMDQ_STATUS_ADDR, \
+		HWIO_IPA_NTF_TX_CMDQ_STATUS_RMSK)
+#define HWIO_IPA_NTF_TX_CMDQ_STATUS_INM(m) in_dword_masked( \
+		HWIO_IPA_NTF_TX_CMDQ_STATUS_ADDR, \
+		m)
+#define HWIO_IPA_NTF_TX_CMDQ_STATUS_CMDQ_DEPTH_BMSK 0x1fc
+#define HWIO_IPA_NTF_TX_CMDQ_STATUS_CMDQ_DEPTH_SHFT 0x2
+#define HWIO_IPA_NTF_TX_CMDQ_STATUS_CMDQ_FULL_BMSK 0x2
+#define HWIO_IPA_NTF_TX_CMDQ_STATUS_CMDQ_FULL_SHFT 0x1
+#define HWIO_IPA_NTF_TX_CMDQ_STATUS_STATUS_BMSK 0x1
+#define HWIO_IPA_NTF_TX_CMDQ_STATUS_STATUS_SHFT 0x0
+#define HWIO_IPA_NTF_TX_CMDQ_STATUS_EMPTY_ADDR (IPA_DEBUG_REG_BASE + \
+						0x00000620)
+#define HWIO_IPA_NTF_TX_CMDQ_STATUS_EMPTY_PHYS (IPA_DEBUG_REG_BASE_PHYS + \
+						0x00000620)
+#define HWIO_IPA_NTF_TX_CMDQ_STATUS_EMPTY_OFFS (IPA_DEBUG_REG_BASE_OFFS + \
+						0x00000620)
+#define HWIO_IPA_NTF_TX_CMDQ_STATUS_EMPTY_RMSK 0x7fffffff
+#define HWIO_IPA_NTF_TX_CMDQ_STATUS_EMPTY_ATTR 0x1
+#define HWIO_IPA_NTF_TX_CMDQ_STATUS_EMPTY_IN in_dword_masked( \
+		HWIO_IPA_NTF_TX_CMDQ_STATUS_EMPTY_ADDR,	\
+		HWIO_IPA_NTF_TX_CMDQ_STATUS_EMPTY_RMSK)
+#define HWIO_IPA_NTF_TX_CMDQ_STATUS_EMPTY_INM(m) in_dword_masked( \
+		HWIO_IPA_NTF_TX_CMDQ_STATUS_EMPTY_ADDR,	\
+		m)
+#define HWIO_IPA_NTF_TX_CMDQ_STATUS_EMPTY_CMDQ_EMPTY_BMSK 0x7fffffff
+#define HWIO_IPA_NTF_TX_CMDQ_STATUS_EMPTY_CMDQ_EMPTY_SHFT 0x0
+#define HWIO_IPA_NTF_TX_SNP_ADDR (IPA_DEBUG_REG_BASE + 0x00000624)
+#define HWIO_IPA_NTF_TX_SNP_PHYS (IPA_DEBUG_REG_BASE_PHYS + 0x00000624)
+#define HWIO_IPA_NTF_TX_SNP_OFFS (IPA_DEBUG_REG_BASE_OFFS + 0x00000624)
+#define HWIO_IPA_NTF_TX_CMDQ_COUNT_ADDR (IPA_DEBUG_REG_BASE + 0x00000628)
+#define HWIO_IPA_NTF_TX_CMDQ_COUNT_PHYS (IPA_DEBUG_REG_BASE_PHYS + \
+					 0x00000628)
+#define HWIO_IPA_NTF_TX_CMDQ_COUNT_OFFS (IPA_DEBUG_REG_BASE_OFFS + \
+					 0x00000628)
+#define HWIO_IPA_NTF_TX_CMDQ_COUNT_RMSK 0x7f
+#define HWIO_IPA_NTF_TX_CMDQ_COUNT_ATTR 0x1
+#define HWIO_IPA_NTF_TX_CMDQ_COUNT_IN in_dword_masked( \
+		HWIO_IPA_NTF_TX_CMDQ_COUNT_ADDR, \
+		HWIO_IPA_NTF_TX_CMDQ_COUNT_RMSK)
+#define HWIO_IPA_NTF_TX_CMDQ_COUNT_INM(m) in_dword_masked( \
+		HWIO_IPA_NTF_TX_CMDQ_COUNT_ADDR, \
+		m)
+#define HWIO_IPA_NTF_TX_CMDQ_COUNT_FIFO_COUNT_BMSK 0x7f
+#define HWIO_IPA_NTF_TX_CMDQ_COUNT_FIFO_COUNT_SHFT 0x0
+#define HWIO_IPA_PROD_ACKMNGR_CMDQ_CMD_ADDR (IPA_DEBUG_REG_BASE + \
+					     0x00000700)
+#define HWIO_IPA_PROD_ACKMNGR_CMDQ_CMD_PHYS (IPA_DEBUG_REG_BASE_PHYS + \
+					     0x00000700)
+#define HWIO_IPA_PROD_ACKMNGR_CMDQ_CMD_OFFS (IPA_DEBUG_REG_BASE_OFFS + \
+					     0x00000700)
+#define HWIO_IPA_PROD_ACKMNGR_CMDQ_CMD_RMSK 0xff
+#define HWIO_IPA_PROD_ACKMNGR_CMDQ_CMD_ATTR 0x3
+#define HWIO_IPA_PROD_ACKMNGR_CMDQ_CMD_IN in_dword_masked( \
+		HWIO_IPA_PROD_ACKMNGR_CMDQ_CMD_ADDR, \
+		HWIO_IPA_PROD_ACKMNGR_CMDQ_CMD_RMSK)
+#define HWIO_IPA_PROD_ACKMNGR_CMDQ_CMD_INM(m) in_dword_masked( \
+		HWIO_IPA_PROD_ACKMNGR_CMDQ_CMD_ADDR, \
+		m)
+#define HWIO_IPA_PROD_ACKMNGR_CMDQ_CMD_OUT(v) out_dword( \
+		HWIO_IPA_PROD_ACKMNGR_CMDQ_CMD_ADDR, \
+		v)
+#define HWIO_IPA_PROD_ACKMNGR_CMDQ_CMD_OUTM(m, v) out_dword_masked_ns( \
+		HWIO_IPA_PROD_ACKMNGR_CMDQ_CMD_ADDR, \
+		m, \
+		v, \
+		HWIO_IPA_PROD_ACKMNGR_CMDQ_CMD_IN)
+#define HWIO_IPA_PROD_ACKMNGR_CMDQ_CMD_RD_REQ_BMSK 0x80
+#define HWIO_IPA_PROD_ACKMNGR_CMDQ_CMD_RD_REQ_SHFT 0x7
+#define HWIO_IPA_PROD_ACKMNGR_CMDQ_CMD_CMD_CLIENT_BMSK 0x7c
+#define HWIO_IPA_PROD_ACKMNGR_CMDQ_CMD_CMD_CLIENT_SHFT 0x2
+#define HWIO_IPA_PROD_ACKMNGR_CMDQ_CMD_POP_CMD_BMSK 0x2
+#define HWIO_IPA_PROD_ACKMNGR_CMDQ_CMD_POP_CMD_SHFT 0x1
+#define HWIO_IPA_PROD_ACKMNGR_CMDQ_CMD_WRITE_CMD_BMSK 0x1
+#define HWIO_IPA_PROD_ACKMNGR_CMDQ_CMD_WRITE_CMD_SHFT 0x0
+#define HWIO_IPA_PROD_ACKMNGR_CMDQ_RELEASE_WR_ADDR (IPA_DEBUG_REG_BASE + \
+						    0x00000704)
+#define HWIO_IPA_PROD_ACKMNGR_CMDQ_RELEASE_WR_PHYS ( \
+		IPA_DEBUG_REG_BASE_PHYS + 0x00000704)
+#define HWIO_IPA_PROD_ACKMNGR_CMDQ_RELEASE_WR_OFFS ( \
+		IPA_DEBUG_REG_BASE_OFFS + 0x00000704)
+#define HWIO_IPA_PROD_ACKMNGR_CMDQ_RELEASE_RD_ADDR (IPA_DEBUG_REG_BASE + \
+						    0x00000708)
+#define HWIO_IPA_PROD_ACKMNGR_CMDQ_RELEASE_RD_PHYS ( \
+		IPA_DEBUG_REG_BASE_PHYS + 0x00000708)
+#define HWIO_IPA_PROD_ACKMNGR_CMDQ_RELEASE_RD_OFFS ( \
+		IPA_DEBUG_REG_BASE_OFFS + 0x00000708)
+#define HWIO_IPA_PROD_ACKMNGR_CMDQ_CFG_WR_ADDR (IPA_DEBUG_REG_BASE + \
+						0x0000070c)
+#define HWIO_IPA_PROD_ACKMNGR_CMDQ_CFG_WR_PHYS (IPA_DEBUG_REG_BASE_PHYS + \
+						0x0000070c)
+#define HWIO_IPA_PROD_ACKMNGR_CMDQ_CFG_WR_OFFS (IPA_DEBUG_REG_BASE_OFFS + \
+						0x0000070c)
+#define HWIO_IPA_PROD_ACKMNGR_CMDQ_CFG_RD_ADDR (IPA_DEBUG_REG_BASE + \
+						0x00000710)
+#define HWIO_IPA_PROD_ACKMNGR_CMDQ_CFG_RD_PHYS (IPA_DEBUG_REG_BASE_PHYS + \
+						0x00000710)
+#define HWIO_IPA_PROD_ACKMNGR_CMDQ_CFG_RD_OFFS (IPA_DEBUG_REG_BASE_OFFS + \
+						0x00000710)
+#define HWIO_IPA_PROD_ACKMNGR_CMDQ_DATA_WR_ADDR (IPA_DEBUG_REG_BASE + \
+						 0x00000714)
+#define HWIO_IPA_PROD_ACKMNGR_CMDQ_DATA_WR_PHYS (IPA_DEBUG_REG_BASE_PHYS + \
+						 0x00000714)
+#define HWIO_IPA_PROD_ACKMNGR_CMDQ_DATA_WR_OFFS (IPA_DEBUG_REG_BASE_OFFS + \
+						 0x00000714)
+#define HWIO_IPA_PROD_ACKMNGR_CMDQ_DATA_RD_ADDR (IPA_DEBUG_REG_BASE + \
+						 0x00000718)
+#define HWIO_IPA_PROD_ACKMNGR_CMDQ_DATA_RD_PHYS (IPA_DEBUG_REG_BASE_PHYS + \
+						 0x00000718)
+#define HWIO_IPA_PROD_ACKMNGR_CMDQ_DATA_RD_OFFS (IPA_DEBUG_REG_BASE_OFFS + \
+						 0x00000718)
+#define HWIO_IPA_PROD_ACKMNGR_CMDQ_DATA_RD_RMSK 0xffffffff
+#define HWIO_IPA_PROD_ACKMNGR_CMDQ_DATA_RD_ATTR 0x3
+#define HWIO_IPA_PROD_ACKMNGR_CMDQ_DATA_RD_IN in_dword_masked( \
+		HWIO_IPA_PROD_ACKMNGR_CMDQ_DATA_RD_ADDR, \
+		HWIO_IPA_PROD_ACKMNGR_CMDQ_DATA_RD_RMSK)
+#define HWIO_IPA_PROD_ACKMNGR_CMDQ_DATA_RD_INM(m) in_dword_masked( \
+		HWIO_IPA_PROD_ACKMNGR_CMDQ_DATA_RD_ADDR, \
+		m)
+#define HWIO_IPA_PROD_ACKMNGR_CMDQ_DATA_RD_OUT(v) out_dword( \
+		HWIO_IPA_PROD_ACKMNGR_CMDQ_DATA_RD_ADDR, \
+		v)
+#define HWIO_IPA_PROD_ACKMNGR_CMDQ_DATA_RD_OUTM(m, v) out_dword_masked_ns( \
+		HWIO_IPA_PROD_ACKMNGR_CMDQ_DATA_RD_ADDR, \
+		m, \
+		v, \
+		HWIO_IPA_PROD_ACKMNGR_CMDQ_DATA_RD_IN)
+#define HWIO_IPA_PROD_ACKMNGR_CMDQ_DATA_RD_CMDQ_USERDATA_BMSK 0xf8000000
+#define HWIO_IPA_PROD_ACKMNGR_CMDQ_DATA_RD_CMDQ_USERDATA_SHFT 0x1b
+#define HWIO_IPA_PROD_ACKMNGR_CMDQ_DATA_RD_CMDQ_SRC_ID_VALID_BMSK \
+	0x4000000
+#define HWIO_IPA_PROD_ACKMNGR_CMDQ_DATA_RD_CMDQ_SRC_ID_VALID_SHFT 0x1a
+#define HWIO_IPA_PROD_ACKMNGR_CMDQ_DATA_RD_CMDQ_SENT_BMSK 0x2000000
+#define HWIO_IPA_PROD_ACKMNGR_CMDQ_DATA_RD_CMDQ_SENT_SHFT 0x19
+#define HWIO_IPA_PROD_ACKMNGR_CMDQ_DATA_RD_CMDQ_ORIGIN_BMSK 0x1000000
+#define HWIO_IPA_PROD_ACKMNGR_CMDQ_DATA_RD_CMDQ_ORIGIN_SHFT 0x18
+#define HWIO_IPA_PROD_ACKMNGR_CMDQ_DATA_RD_CMDQ_LENGTH_BMSK 0xffff00
+#define HWIO_IPA_PROD_ACKMNGR_CMDQ_DATA_RD_CMDQ_LENGTH_SHFT 0x8
+#define HWIO_IPA_PROD_ACKMNGR_CMDQ_DATA_RD_CMDQ_SRC_ID_BMSK 0xff
+#define HWIO_IPA_PROD_ACKMNGR_CMDQ_DATA_RD_CMDQ_SRC_ID_SHFT 0x0
+#define HWIO_IPA_PROD_ACKMNGR_CMDQ_STATUS_ADDR (IPA_DEBUG_REG_BASE + \
+						0x0000071c)
+#define HWIO_IPA_PROD_ACKMNGR_CMDQ_STATUS_PHYS (IPA_DEBUG_REG_BASE_PHYS + \
+						0x0000071c)
+#define HWIO_IPA_PROD_ACKMNGR_CMDQ_STATUS_OFFS (IPA_DEBUG_REG_BASE_OFFS + \
+						0x0000071c)
+#define HWIO_IPA_PROD_ACKMNGR_CMDQ_STATUS_RMSK 0x1ff
+#define HWIO_IPA_PROD_ACKMNGR_CMDQ_STATUS_ATTR 0x1
+#define HWIO_IPA_PROD_ACKMNGR_CMDQ_STATUS_IN in_dword_masked( \
+		HWIO_IPA_PROD_ACKMNGR_CMDQ_STATUS_ADDR,	\
+		HWIO_IPA_PROD_ACKMNGR_CMDQ_STATUS_RMSK)
+#define HWIO_IPA_PROD_ACKMNGR_CMDQ_STATUS_INM(m) in_dword_masked( \
+		HWIO_IPA_PROD_ACKMNGR_CMDQ_STATUS_ADDR,	\
+		m)
+#define HWIO_IPA_PROD_ACKMNGR_CMDQ_STATUS_CMDQ_DEPTH_BMSK 0x1fc
+#define HWIO_IPA_PROD_ACKMNGR_CMDQ_STATUS_CMDQ_DEPTH_SHFT 0x2
+#define HWIO_IPA_PROD_ACKMNGR_CMDQ_STATUS_CMDQ_FULL_BMSK 0x2
+#define HWIO_IPA_PROD_ACKMNGR_CMDQ_STATUS_CMDQ_FULL_SHFT 0x1
+#define HWIO_IPA_PROD_ACKMNGR_CMDQ_STATUS_STATUS_BMSK 0x1
+#define HWIO_IPA_PROD_ACKMNGR_CMDQ_STATUS_STATUS_SHFT 0x0
+#define HWIO_IPA_PROD_ACKMNGR_CMDQ_STATUS_EMPTY_ADDR (IPA_DEBUG_REG_BASE + \
+						      0x00000720)
+#define HWIO_IPA_PROD_ACKMNGR_CMDQ_STATUS_EMPTY_PHYS ( \
+		IPA_DEBUG_REG_BASE_PHYS + 0x00000720)
+#define HWIO_IPA_PROD_ACKMNGR_CMDQ_STATUS_EMPTY_OFFS ( \
+		IPA_DEBUG_REG_BASE_OFFS + 0x00000720)
+#define HWIO_IPA_PROD_ACKMNGR_CMDQ_STATUS_EMPTY_RMSK 0x7fffffff
+#define HWIO_IPA_PROD_ACKMNGR_CMDQ_STATUS_EMPTY_ATTR 0x1
+#define HWIO_IPA_PROD_ACKMNGR_CMDQ_STATUS_EMPTY_IN in_dword_masked( \
+		HWIO_IPA_PROD_ACKMNGR_CMDQ_STATUS_EMPTY_ADDR, \
+		HWIO_IPA_PROD_ACKMNGR_CMDQ_STATUS_EMPTY_RMSK)
+#define HWIO_IPA_PROD_ACKMNGR_CMDQ_STATUS_EMPTY_INM(m) in_dword_masked(	\
+		HWIO_IPA_PROD_ACKMNGR_CMDQ_STATUS_EMPTY_ADDR, \
+		m)
+#define HWIO_IPA_PROD_ACKMNGR_CMDQ_STATUS_EMPTY_CMDQ_EMPTY_BMSK 0x7fffffff
+#define HWIO_IPA_PROD_ACKMNGR_CMDQ_STATUS_EMPTY_CMDQ_EMPTY_SHFT 0x0
+#define HWIO_IPA_PROD_ACKMNGR_CMDQ_COUNT_ADDR (IPA_DEBUG_REG_BASE + \
+					       0x00000724)
+#define HWIO_IPA_PROD_ACKMNGR_CMDQ_COUNT_PHYS (IPA_DEBUG_REG_BASE_PHYS + \
+					       0x00000724)
+#define HWIO_IPA_PROD_ACKMNGR_CMDQ_COUNT_OFFS (IPA_DEBUG_REG_BASE_OFFS + \
+					       0x00000724)
+#define HWIO_IPA_PROD_ACKMNGR_CMDQ_COUNT_RMSK 0x7f
+#define HWIO_IPA_PROD_ACKMNGR_CMDQ_COUNT_ATTR 0x1
+#define HWIO_IPA_PROD_ACKMNGR_CMDQ_COUNT_IN in_dword_masked( \
+		HWIO_IPA_PROD_ACKMNGR_CMDQ_COUNT_ADDR, \
+		HWIO_IPA_PROD_ACKMNGR_CMDQ_COUNT_RMSK)
+#define HWIO_IPA_PROD_ACKMNGR_CMDQ_COUNT_INM(m) in_dword_masked( \
+		HWIO_IPA_PROD_ACKMNGR_CMDQ_COUNT_ADDR, \
+		m)
+#define HWIO_IPA_PROD_ACKMNGR_CMDQ_COUNT_FIFO_COUNT_BMSK 0x7f
+#define HWIO_IPA_PROD_ACKMNGR_CMDQ_COUNT_FIFO_COUNT_SHFT 0x0
+#define HWIO_IPA_ACKMNGR_CONTROL_CFG_0_ADDR (IPA_DEBUG_REG_BASE + \
+					     0x00000728)
+#define HWIO_IPA_ACKMNGR_CONTROL_CFG_0_PHYS (IPA_DEBUG_REG_BASE_PHYS + \
+					     0x00000728)
+#define HWIO_IPA_ACKMNGR_CONTROL_CFG_0_OFFS (IPA_DEBUG_REG_BASE_OFFS + \
+					     0x00000728)
+#define HWIO_IPA_ACKMNGR_CONTROL_CFG_1_ADDR (IPA_DEBUG_REG_BASE + \
+					     0x0000072c)
+#define HWIO_IPA_ACKMNGR_CONTROL_CFG_1_PHYS (IPA_DEBUG_REG_BASE_PHYS + \
+					     0x0000072c)
+#define HWIO_IPA_ACKMNGR_CONTROL_CFG_1_OFFS (IPA_DEBUG_REG_BASE_OFFS + \
+					     0x0000072c)
+#define HWIO_IPA_ACKMNGR_CONTROL_CFG_2_ADDR (IPA_DEBUG_REG_BASE + \
+					     0x00000730)
+#define HWIO_IPA_ACKMNGR_CONTROL_CFG_2_PHYS (IPA_DEBUG_REG_BASE_PHYS + \
+					     0x00000730)
+#define HWIO_IPA_ACKMNGR_CONTROL_CFG_2_OFFS (IPA_DEBUG_REG_BASE_OFFS + \
+					     0x00000730)
+#define HWIO_IPA_ACKMNGR_CONTROL_CFG_3_ADDR (IPA_DEBUG_REG_BASE + \
+					     0x00000734)
+#define HWIO_IPA_ACKMNGR_CONTROL_CFG_3_PHYS (IPA_DEBUG_REG_BASE_PHYS + \
+					     0x00000734)
+#define HWIO_IPA_ACKMNGR_CONTROL_CFG_3_OFFS (IPA_DEBUG_REG_BASE_OFFS + \
+					     0x00000734)
+#define HWIO_IPA_ACKMNGR_CONTROL_CFG_4_ADDR (IPA_DEBUG_REG_BASE + \
+					     0x00000738)
+#define HWIO_IPA_ACKMNGR_CONTROL_CFG_4_PHYS (IPA_DEBUG_REG_BASE_PHYS + \
+					     0x00000738)
+#define HWIO_IPA_ACKMNGR_CONTROL_CFG_4_OFFS (IPA_DEBUG_REG_BASE_OFFS + \
+					     0x00000738)
+#define HWIO_IPA_ACKMNGR_CONTROL_CFG_5_ADDR (IPA_DEBUG_REG_BASE + \
+					     0x0000073c)
+#define HWIO_IPA_ACKMNGR_CONTROL_CFG_5_PHYS (IPA_DEBUG_REG_BASE_PHYS + \
+					     0x0000073c)
+#define HWIO_IPA_ACKMNGR_CONTROL_CFG_5_OFFS (IPA_DEBUG_REG_BASE_OFFS + \
+					     0x0000073c)
+#define HWIO_IPA_ACKMNGR_CONTROL_CFG_6_ADDR (IPA_DEBUG_REG_BASE + \
+					     0x00000740)
+#define HWIO_IPA_ACKMNGR_CONTROL_CFG_6_PHYS (IPA_DEBUG_REG_BASE_PHYS + \
+					     0x00000740)
+#define HWIO_IPA_ACKMNGR_CONTROL_CFG_6_OFFS (IPA_DEBUG_REG_BASE_OFFS + \
+					     0x00000740)
+#define HWIO_IPA_ACKMNGR_SW_ACCESS_ACKINJ_CFG_ADDR (IPA_DEBUG_REG_BASE + \
+						    0x00000744)
+#define HWIO_IPA_ACKMNGR_SW_ACCESS_ACKINJ_CFG_PHYS ( \
+		IPA_DEBUG_REG_BASE_PHYS + 0x00000744)
+#define HWIO_IPA_ACKMNGR_SW_ACCESS_ACKINJ_CFG_OFFS ( \
+		IPA_DEBUG_REG_BASE_OFFS + 0x00000744)
+#define HWIO_IPA_ACKMNGR_SW_ACCESS_ACKUPD_CFG_ADDR (IPA_DEBUG_REG_BASE + \
+						    0x00000748)
+#define HWIO_IPA_ACKMNGR_SW_ACCESS_ACKUPD_CFG_PHYS ( \
+		IPA_DEBUG_REG_BASE_PHYS + 0x00000748)
+#define HWIO_IPA_ACKMNGR_SW_ACCESS_ACKUPD_CFG_OFFS ( \
+		IPA_DEBUG_REG_BASE_OFFS + 0x00000748)
+#define HWIO_IPA_ACKMNGR_SW_ACCESS_CMD_ADDR (IPA_DEBUG_REG_BASE + \
+					     0x0000074c)
+#define HWIO_IPA_ACKMNGR_SW_ACCESS_CMD_PHYS (IPA_DEBUG_REG_BASE_PHYS + \
+					     0x0000074c)
+#define HWIO_IPA_ACKMNGR_SW_ACCESS_CMD_OFFS (IPA_DEBUG_REG_BASE_OFFS + \
+					     0x0000074c)
+#define HWIO_IPA_ACKMNGR_SW_ACCESS_STATUS_ADDR (IPA_DEBUG_REG_BASE + \
+						0x00000750)
+#define HWIO_IPA_ACKMNGR_SW_ACCESS_STATUS_PHYS (IPA_DEBUG_REG_BASE_PHYS + \
+						0x00000750)
+#define HWIO_IPA_ACKMNGR_SW_ACCESS_STATUS_OFFS (IPA_DEBUG_REG_BASE_OFFS + \
+						0x00000750)
+#define HWIO_IPA_PROD_ACKMNGR_CONTROL_CFG_0_ADDR (IPA_DEBUG_REG_BASE + \
+						  0x00000754)
+#define HWIO_IPA_PROD_ACKMNGR_CONTROL_CFG_0_PHYS (IPA_DEBUG_REG_BASE_PHYS \
+						  + 0x00000754)
+#define HWIO_IPA_PROD_ACKMNGR_CONTROL_CFG_0_OFFS (IPA_DEBUG_REG_BASE_OFFS \
+						  + 0x00000754)
+#define HWIO_IPA_PROD_ACKMNGR_CONTROL_CFG_1_ADDR (IPA_DEBUG_REG_BASE + \
+						  0x00000758)
+#define HWIO_IPA_PROD_ACKMNGR_CONTROL_CFG_1_PHYS (IPA_DEBUG_REG_BASE_PHYS \
+						  + 0x00000758)
+#define HWIO_IPA_PROD_ACKMNGR_CONTROL_CFG_1_OFFS (IPA_DEBUG_REG_BASE_OFFS \
+						  + 0x00000758)
+#define HWIO_IPA_PROD_ACKMNGR_CONTROL_CFG_2_ADDR (IPA_DEBUG_REG_BASE + \
+						  0x0000075c)
+#define HWIO_IPA_PROD_ACKMNGR_CONTROL_CFG_2_PHYS (IPA_DEBUG_REG_BASE_PHYS \
+						  + 0x0000075c)
+#define HWIO_IPA_PROD_ACKMNGR_CONTROL_CFG_2_OFFS (IPA_DEBUG_REG_BASE_OFFS \
+						  + 0x0000075c)
+#define HWIO_IPA_PROD_ACKMNGR_CONTROL_CFG_3_ADDR (IPA_DEBUG_REG_BASE + \
+						  0x00000760)
+#define HWIO_IPA_PROD_ACKMNGR_CONTROL_CFG_3_PHYS (IPA_DEBUG_REG_BASE_PHYS \
+						  + 0x00000760)
+#define HWIO_IPA_PROD_ACKMNGR_CONTROL_CFG_3_OFFS (IPA_DEBUG_REG_BASE_OFFS \
+						  + 0x00000760)
+#define HWIO_IPA_PROD_ACKMNGR_CONTROL_CFG_4_ADDR (IPA_DEBUG_REG_BASE + \
+						  0x00000764)
+#define HWIO_IPA_PROD_ACKMNGR_CONTROL_CFG_4_PHYS (IPA_DEBUG_REG_BASE_PHYS \
+						  + 0x00000764)
+#define HWIO_IPA_PROD_ACKMNGR_CONTROL_CFG_4_OFFS (IPA_DEBUG_REG_BASE_OFFS \
+						  + 0x00000764)
+#define HWIO_IPA_PROD_ACKMNGR_CONTROL_CFG_5_ADDR (IPA_DEBUG_REG_BASE + \
+						  0x00000768)
+#define HWIO_IPA_PROD_ACKMNGR_CONTROL_CFG_5_PHYS (IPA_DEBUG_REG_BASE_PHYS \
+						  + 0x00000768)
+#define HWIO_IPA_PROD_ACKMNGR_CONTROL_CFG_5_OFFS (IPA_DEBUG_REG_BASE_OFFS \
+						  + 0x00000768)
+#define HWIO_IPA_PROD_ACKMNGR_SW_ACCESS_ACKINJ_CFG_ADDR ( \
+		IPA_DEBUG_REG_BASE + 0x0000076c)
+#define HWIO_IPA_PROD_ACKMNGR_SW_ACCESS_ACKINJ_CFG_PHYS ( \
+		IPA_DEBUG_REG_BASE_PHYS + 0x0000076c)
+#define HWIO_IPA_PROD_ACKMNGR_SW_ACCESS_ACKINJ_CFG_OFFS ( \
+		IPA_DEBUG_REG_BASE_OFFS + 0x0000076c)
+#define HWIO_IPA_PROD_ACKMNGR_SW_ACCESS_ACKUPD_CFG_ADDR ( \
+		IPA_DEBUG_REG_BASE + 0x00000770)
+#define HWIO_IPA_PROD_ACKMNGR_SW_ACCESS_ACKUPD_CFG_PHYS ( \
+		IPA_DEBUG_REG_BASE_PHYS + 0x00000770)
+#define HWIO_IPA_PROD_ACKMNGR_SW_ACCESS_ACKUPD_CFG_OFFS ( \
+		IPA_DEBUG_REG_BASE_OFFS + 0x00000770)
+#define HWIO_IPA_PROD_ACKMNGR_SW_ACCESS_CMD_ADDR (IPA_DEBUG_REG_BASE + \
+						  0x00000774)
+#define HWIO_IPA_PROD_ACKMNGR_SW_ACCESS_CMD_PHYS (IPA_DEBUG_REG_BASE_PHYS \
+						  + 0x00000774)
+#define HWIO_IPA_PROD_ACKMNGR_SW_ACCESS_CMD_OFFS (IPA_DEBUG_REG_BASE_OFFS \
+						  + 0x00000774)
+#define HWIO_IPA_PROD_ACKMNGR_SW_ACCESS_STATUS_ADDR (IPA_DEBUG_REG_BASE + \
+						     0x00000778)
+#define HWIO_IPA_PROD_ACKMNGR_SW_ACCESS_STATUS_PHYS ( \
+		IPA_DEBUG_REG_BASE_PHYS + 0x00000778)
+#define HWIO_IPA_PROD_ACKMNGR_SW_ACCESS_STATUS_OFFS ( \
+		IPA_DEBUG_REG_BASE_OFFS + 0x00000778)
+#define HWIO_IPA_PROD_ACKMNGR_SW_ACCESS_ACKINJ_CFG1_ADDR ( \
+		IPA_DEBUG_REG_BASE + 0x0000077c)
+#define HWIO_IPA_PROD_ACKMNGR_SW_ACCESS_ACKINJ_CFG1_PHYS ( \
+		IPA_DEBUG_REG_BASE_PHYS + 0x0000077c)
+#define HWIO_IPA_PROD_ACKMNGR_SW_ACCESS_ACKINJ_CFG1_OFFS ( \
+		IPA_DEBUG_REG_BASE_OFFS + 0x0000077c)
+#define HWIO_IPA_SPARE_REG_1_ADDR (IPA_DEBUG_REG_BASE + 0x00000780)
+#define HWIO_IPA_SPARE_REG_1_PHYS (IPA_DEBUG_REG_BASE_PHYS + 0x00000780)
+#define HWIO_IPA_SPARE_REG_1_OFFS (IPA_DEBUG_REG_BASE_OFFS + 0x00000780)
+#define HWIO_IPA_SPARE_REG_1_RMSK 0xffffffff
+#define HWIO_IPA_SPARE_REG_1_ATTR 0x3
+#define HWIO_IPA_SPARE_REG_1_IN in_dword_masked(HWIO_IPA_SPARE_REG_1_ADDR, \
+						HWIO_IPA_SPARE_REG_1_RMSK)
+#define HWIO_IPA_SPARE_REG_1_INM(m) in_dword_masked( \
+		HWIO_IPA_SPARE_REG_1_ADDR, \
+		m)
+#define HWIO_IPA_SPARE_REG_1_OUT(v) out_dword(HWIO_IPA_SPARE_REG_1_ADDR, v)
+#define HWIO_IPA_SPARE_REG_1_OUTM(m, v) out_dword_masked_ns( \
+		HWIO_IPA_SPARE_REG_1_ADDR, \
+		m, \
+		v, \
+		HWIO_IPA_SPARE_REG_1_IN)
+#define HWIO_IPA_SPARE_REG_1_SPARE_BIT31_BMSK 0x80000000
+#define HWIO_IPA_SPARE_REG_1_SPARE_BIT31_SHFT 0x1f
+#define HWIO_IPA_SPARE_REG_1_SPARE_BIT30_BMSK 0x40000000
+#define HWIO_IPA_SPARE_REG_1_SPARE_BIT30_SHFT 0x1e
+#define HWIO_IPA_SPARE_REG_1_SPARE_ACKINJ_PIPE8_MASK_ENABLE_BMSK \
+	0x20000000
+#define HWIO_IPA_SPARE_REG_1_SPARE_ACKINJ_PIPE8_MASK_ENABLE_SHFT 0x1d
+#define	\
+	HWIO_IPA_SPARE_REG_1_WARB_FORCE_ARB_ROUND_FINISH_SPECIAL_DISABLE_BMSK \
+	0x10000000
+#define	\
+	HWIO_IPA_SPARE_REG_1_WARB_FORCE_ARB_ROUND_FINISH_SPECIAL_DISABLE_SHFT \
+	0x1c
+#define HWIO_IPA_SPARE_REG_1_DCPH_RAM_RD_PREFETCH_DISABLE_BMSK 0x8000000
+#define HWIO_IPA_SPARE_REG_1_DCPH_RAM_RD_PREFETCH_DISABLE_SHFT 0x1b
+#define HWIO_IPA_SPARE_REG_1_RAM_SLAVEWAY_ACCESS_PROTECTION_DISABLE_BMSK \
+	0x4000000
+#define HWIO_IPA_SPARE_REG_1_RAM_SLAVEWAY_ACCESS_PROTECTION_DISABLE_SHFT \
+	0x1a
+#define HWIO_IPA_SPARE_REG_1_SPARE_BIT25_BMSK 0x2000000
+#define HWIO_IPA_SPARE_REG_1_SPARE_BIT25_SHFT 0x19
+#define HWIO_IPA_SPARE_REG_1_SPARE_BIT24_BMSK 0x1000000
+#define HWIO_IPA_SPARE_REG_1_SPARE_BIT24_SHFT 0x18
+#define HWIO_IPA_SPARE_REG_1_SPARE_BIT23_BMSK 0x800000
+#define HWIO_IPA_SPARE_REG_1_SPARE_BIT23_SHFT 0x17
+#define HWIO_IPA_SPARE_REG_1_BAM_IDLE_IN_IPA_MISC_CGC_EN_BMSK 0x400000
+#define HWIO_IPA_SPARE_REG_1_BAM_IDLE_IN_IPA_MISC_CGC_EN_SHFT 0x16
+#define HWIO_IPA_SPARE_REG_1_GSI_IF_OUT_OF_BUF_STOP_RESET_MASK_ENABLE_BMSK \
+	0x200000
+#define HWIO_IPA_SPARE_REG_1_GSI_IF_OUT_OF_BUF_STOP_RESET_MASK_ENABLE_SHFT \
+	0x15
+#define HWIO_IPA_SPARE_REG_1_REVERT_WARB_FIX_BMSK 0x100000
+#define HWIO_IPA_SPARE_REG_1_REVERT_WARB_FIX_SHFT 0x14
+#define HWIO_IPA_SPARE_REG_1_SPARE_BIT19_BMSK 0x80000
+#define HWIO_IPA_SPARE_REG_1_SPARE_BIT19_SHFT 0x13
+#define HWIO_IPA_SPARE_REG_1_RX_STALL_ON_GEN_DEAGGR_ERROR_BMSK 0x40000
+#define HWIO_IPA_SPARE_REG_1_RX_STALL_ON_GEN_DEAGGR_ERROR_SHFT 0x12
+#define HWIO_IPA_SPARE_REG_1_RX_STALL_ON_MBIM_DEAGGR_ERROR_BMSK 0x20000
+#define HWIO_IPA_SPARE_REG_1_RX_STALL_ON_MBIM_DEAGGR_ERROR_SHFT 0x11
+#define HWIO_IPA_SPARE_REG_1_QMB_RAM_RD_CACHE_DISABLE_BMSK 0x10000
+#define HWIO_IPA_SPARE_REG_1_QMB_RAM_RD_CACHE_DISABLE_SHFT 0x10
+#define	\
+	HWIO_IPA_SPARE_REG_1_RX_CMDQ_SPLITTER_CMDQ_PENDING_MUX_DISABLE_BMSK \
+	0x8000
+#define	\
+	HWIO_IPA_SPARE_REG_1_RX_CMDQ_SPLITTER_CMDQ_PENDING_MUX_DISABLE_SHFT \
+	0xf
+#define	\
+	HWIO_IPA_SPARE_REG_1_FRAG_MNGR_FAIRNESS_EVICTION_ON_CONSTRUCTING_BMSK \
+	0x4000
+#define	\
+	HWIO_IPA_SPARE_REG_1_FRAG_MNGR_FAIRNESS_EVICTION_ON_CONSTRUCTING_SHFT \
+	0xe
+#define HWIO_IPA_SPARE_REG_1_TX_BLOCK_AGGR_QUERY_ON_HOLB_PACKET_BMSK \
+	0x2000
+#define HWIO_IPA_SPARE_REG_1_TX_BLOCK_AGGR_QUERY_ON_HOLB_PACKET_SHFT 0xd
+#define HWIO_IPA_SPARE_REG_1_SPARE_BIT12_BMSK 0x1000
+#define HWIO_IPA_SPARE_REG_1_SPARE_BIT12_SHFT 0xc
+#define HWIO_IPA_SPARE_REG_1_TX_GIVES_SSPND_ACK_ON_OPEN_AGGR_FRAME_BMSK	\
+	0x800
+#define HWIO_IPA_SPARE_REG_1_TX_GIVES_SSPND_ACK_ON_OPEN_AGGR_FRAME_SHFT	\
+	0xb
+#define HWIO_IPA_SPARE_REG_1_ACL_DISPATCHER_PKT_CHECK_DISABLE_BMSK 0x400
+#define HWIO_IPA_SPARE_REG_1_ACL_DISPATCHER_PKT_CHECK_DISABLE_SHFT 0xa
+#define HWIO_IPA_SPARE_REG_1_SPARE_BIT8_BMSK 0x100
+#define HWIO_IPA_SPARE_REG_1_SPARE_BIT8_SHFT 0x8
+#define HWIO_IPA_SPARE_REG_1_ACL_DISPATCHER_FRAG_NOTIF_CHECK_DISABLE_BMSK \
+	0x40
+#define HWIO_IPA_SPARE_REG_1_ACL_DISPATCHER_FRAG_NOTIF_CHECK_DISABLE_SHFT \
+	0x6
+#define HWIO_IPA_SPARE_REG_1_ACL_INORDER_MULTI_DISABLE_BMSK 0x20
+#define HWIO_IPA_SPARE_REG_1_ACL_INORDER_MULTI_DISABLE_SHFT 0x5
+#define HWIO_IPA_SPARE_REG_1_SPARE_BIT4_BMSK 0x10
+#define HWIO_IPA_SPARE_REG_1_SPARE_BIT4_SHFT 0x4
+#define HWIO_IPA_SPARE_REG_1_SPARE_BIT3_BMSK 0x8
+#define HWIO_IPA_SPARE_REG_1_SPARE_BIT3_SHFT 0x3
+#define HWIO_IPA_SPARE_REG_1_GENQMB_AOOOWR_BMSK 0x4
+#define HWIO_IPA_SPARE_REG_1_GENQMB_AOOOWR_SHFT 0x2
+#define HWIO_IPA_SPARE_REG_1_SPARE_BIT1_BMSK 0x2
+#define HWIO_IPA_SPARE_REG_1_SPARE_BIT1_SHFT 0x1
+#define HWIO_IPA_SPARE_REG_1_SPARE_BIT0_BMSK 0x1
+#define HWIO_IPA_SPARE_REG_1_SPARE_BIT0_SHFT 0x0
+#define HWIO_IPA_SPARE_REG_2_ADDR (IPA_DEBUG_REG_BASE + 0x00000784)
+#define HWIO_IPA_SPARE_REG_2_PHYS (IPA_DEBUG_REG_BASE_PHYS + 0x00000784)
+#define HWIO_IPA_SPARE_REG_2_OFFS (IPA_DEBUG_REG_BASE_OFFS + 0x00000784)
+#define HWIO_IPA_SPARE_REG_2_RMSK 0xffffffff
+#define HWIO_IPA_SPARE_REG_2_ATTR 0x3
+#define HWIO_IPA_SPARE_REG_2_IN in_dword_masked(HWIO_IPA_SPARE_REG_2_ADDR, \
+						HWIO_IPA_SPARE_REG_2_RMSK)
+#define HWIO_IPA_SPARE_REG_2_INM(m) in_dword_masked( \
+		HWIO_IPA_SPARE_REG_2_ADDR, \
+		m)
+#define HWIO_IPA_SPARE_REG_2_OUT(v) out_dword(HWIO_IPA_SPARE_REG_2_ADDR, v)
+#define HWIO_IPA_SPARE_REG_2_OUTM(m, v) out_dword_masked_ns( \
+		HWIO_IPA_SPARE_REG_2_ADDR, \
+		m, \
+		v, \
+		HWIO_IPA_SPARE_REG_2_IN)
+#define HWIO_IPA_SPARE_REG_2_SPARE_BITS_BMSK 0xfffffffc
+#define HWIO_IPA_SPARE_REG_2_SPARE_BITS_SHFT 0x2
+#define	\
+	HWIO_IPA_SPARE_REG_2_CMDQ_SPLIT_NOT_WAIT_DATA_DESC_PRIOR_HDR_PUSH_BMSK \
+	0x2
+#define	\
+	HWIO_IPA_SPARE_REG_2_CMDQ_SPLIT_NOT_WAIT_DATA_DESC_PRIOR_HDR_PUSH_SHFT \
+	0x1
+#define HWIO_IPA_SPARE_REG_2_TX_BRESP_INJ_WITH_FLOP_BMSK 0x1
+#define HWIO_IPA_SPARE_REG_2_TX_BRESP_INJ_WITH_FLOP_SHFT 0x0
+#define HWIO_IPA_ENDP_GSI_CFG1_n_ADDR(n) (IPA_DEBUG_REG_BASE + \
+					  0x00000794 + 0x4 * (n))
+#define HWIO_IPA_ENDP_GSI_CFG1_n_PHYS(n) (IPA_DEBUG_REG_BASE_PHYS + \
+					  0x00000794 + 0x4 * (n))
+#define HWIO_IPA_ENDP_GSI_CFG1_n_OFFS(n) (IPA_DEBUG_REG_BASE_OFFS + \
+					  0x00000794 + 0x4 * (n))
+#define HWIO_IPA_ENDP_GSI_CFG1_n_RMSK 0x80010000
+#define HWIO_IPA_ENDP_GSI_CFG1_n_MAXn 30
+#define HWIO_IPA_ENDP_GSI_CFG1_n_ATTR 0x3
+#define HWIO_IPA_ENDP_GSI_CFG1_n_INI(n) in_dword_masked( \
+		HWIO_IPA_ENDP_GSI_CFG1_n_ADDR(n), \
+		HWIO_IPA_ENDP_GSI_CFG1_n_RMSK)
+#define HWIO_IPA_ENDP_GSI_CFG1_n_INMI(n, mask) in_dword_masked(	\
+		HWIO_IPA_ENDP_GSI_CFG1_n_ADDR(n), \
+		mask)
+#define HWIO_IPA_ENDP_GSI_CFG1_n_OUTI(n, val) out_dword( \
+		HWIO_IPA_ENDP_GSI_CFG1_n_ADDR(n), \
+		val)
+#define HWIO_IPA_ENDP_GSI_CFG1_n_OUTMI(n, mask, val) out_dword_masked_ns( \
+		HWIO_IPA_ENDP_GSI_CFG1_n_ADDR(n), \
+		mask, \
+		val, \
+		HWIO_IPA_ENDP_GSI_CFG1_n_INI(n))
+#define HWIO_IPA_ENDP_GSI_CFG1_n_INIT_ENDP_BMSK 0x80000000
+#define HWIO_IPA_ENDP_GSI_CFG1_n_INIT_ENDP_SHFT 0x1f
+#define HWIO_IPA_ENDP_GSI_CFG1_n_ENDP_EN_BMSK 0x10000
+#define HWIO_IPA_ENDP_GSI_CFG1_n_ENDP_EN_SHFT 0x10
+#define HWIO_IPA_GSI_IPA_IF_TLV_OUT_GENERATOR_1_ADDR (IPA_DEBUG_REG_BASE + \
+						      0x00000908)
+#define HWIO_IPA_GSI_IPA_IF_TLV_OUT_GENERATOR_1_PHYS ( \
+		IPA_DEBUG_REG_BASE_PHYS + 0x00000908)
+#define HWIO_IPA_GSI_IPA_IF_TLV_OUT_GENERATOR_1_OFFS ( \
+		IPA_DEBUG_REG_BASE_OFFS + 0x00000908)
+#define HWIO_IPA_GSI_IPA_IF_TLV_OUT_GENERATOR_2_ADDR (IPA_DEBUG_REG_BASE + \
+						      0x0000090c)
+#define HWIO_IPA_GSI_IPA_IF_TLV_OUT_GENERATOR_2_PHYS ( \
+		IPA_DEBUG_REG_BASE_PHYS + 0x0000090c)
+#define HWIO_IPA_GSI_IPA_IF_TLV_OUT_GENERATOR_2_OFFS ( \
+		IPA_DEBUG_REG_BASE_OFFS + 0x0000090c)
+#define HWIO_IPA_GSI_IPA_IF_TLV_OUT_GENERATOR_3_ADDR (IPA_DEBUG_REG_BASE + \
+						      0x00000910)
+#define HWIO_IPA_GSI_IPA_IF_TLV_OUT_GENERATOR_3_PHYS ( \
+		IPA_DEBUG_REG_BASE_PHYS + 0x00000910)
+#define HWIO_IPA_GSI_IPA_IF_TLV_OUT_GENERATOR_3_OFFS ( \
+		IPA_DEBUG_REG_BASE_OFFS + 0x00000910)
+#define HWIO_IPA_GSI_IPA_IF_TLV_OUT_GENERATOR_CTRL_ADDR ( \
+		IPA_DEBUG_REG_BASE + 0x00000914)
+#define HWIO_IPA_GSI_IPA_IF_TLV_OUT_GENERATOR_CTRL_PHYS ( \
+		IPA_DEBUG_REG_BASE_PHYS + 0x00000914)
+#define HWIO_IPA_GSI_IPA_IF_TLV_OUT_GENERATOR_CTRL_OFFS ( \
+		IPA_DEBUG_REG_BASE_OFFS + 0x00000914)
+#define HWIO_IPA_GSI_IPA_IF_TLV_IN_RDY_ADDR (IPA_DEBUG_REG_BASE + \
+					     0x00000918)
+#define HWIO_IPA_GSI_IPA_IF_TLV_IN_RDY_PHYS (IPA_DEBUG_REG_BASE_PHYS + \
+					     0x00000918)
+#define HWIO_IPA_GSI_IPA_IF_TLV_IN_RDY_OFFS (IPA_DEBUG_REG_BASE_OFFS + \
+					     0x00000918)
+#define HWIO_IPA_GSI_IPA_IF_TLV_IN_DATA_1_ADDR (IPA_DEBUG_REG_BASE + \
+						0x0000091c)
+#define HWIO_IPA_GSI_IPA_IF_TLV_IN_DATA_1_PHYS (IPA_DEBUG_REG_BASE_PHYS + \
+						0x0000091c)
+#define HWIO_IPA_GSI_IPA_IF_TLV_IN_DATA_1_OFFS (IPA_DEBUG_REG_BASE_OFFS + \
+						0x0000091c)
+#define HWIO_IPA_GSI_IPA_IF_TLV_IN_DATA_2_ADDR (IPA_DEBUG_REG_BASE + \
+						0x00000920)
+#define HWIO_IPA_GSI_IPA_IF_TLV_IN_DATA_2_PHYS (IPA_DEBUG_REG_BASE_PHYS + \
+						0x00000920)
+#define HWIO_IPA_GSI_IPA_IF_TLV_IN_DATA_2_OFFS (IPA_DEBUG_REG_BASE_OFFS + \
+						0x00000920)
+#define HWIO_IPA_ENDP_GSI_CFG_TLV_n_ADDR(n) (IPA_DEBUG_REG_BASE + \
+					     0x00000924 + 0x4 * (n))
+#define HWIO_IPA_ENDP_GSI_CFG_TLV_n_PHYS(n) (IPA_DEBUG_REG_BASE_PHYS + \
+					     0x00000924 + 0x4 * (n))
+#define HWIO_IPA_ENDP_GSI_CFG_TLV_n_OFFS(n) (IPA_DEBUG_REG_BASE_OFFS + \
+					     0x00000924 + 0x4 * (n))
+#define HWIO_IPA_ENDP_GSI_CFG_TLV_n_RMSK 0xffffff
+#define HWIO_IPA_ENDP_GSI_CFG_TLV_n_MAXn 30
+#define HWIO_IPA_ENDP_GSI_CFG_TLV_n_ATTR 0x3
+#define HWIO_IPA_ENDP_GSI_CFG_TLV_n_INI(n) in_dword_masked( \
+		HWIO_IPA_ENDP_GSI_CFG_TLV_n_ADDR(n), \
+		HWIO_IPA_ENDP_GSI_CFG_TLV_n_RMSK)
+#define HWIO_IPA_ENDP_GSI_CFG_TLV_n_INMI(n, mask) in_dword_masked( \
+		HWIO_IPA_ENDP_GSI_CFG_TLV_n_ADDR(n), \
+		mask)
+#define HWIO_IPA_ENDP_GSI_CFG_TLV_n_OUTI(n, val) out_dword( \
+		HWIO_IPA_ENDP_GSI_CFG_TLV_n_ADDR(n), \
+		val)
+#define HWIO_IPA_ENDP_GSI_CFG_TLV_n_OUTMI(n, mask, \
+					  val) out_dword_masked_ns( \
+		HWIO_IPA_ENDP_GSI_CFG_TLV_n_ADDR( \
+			n), \
+		mask, \
+		val, \
+		HWIO_IPA_ENDP_GSI_CFG_TLV_n_INI(n))
+#define HWIO_IPA_ENDP_GSI_CFG_TLV_n_FIFO_SIZE_BMSK 0xff0000
+#define HWIO_IPA_ENDP_GSI_CFG_TLV_n_FIFO_SIZE_SHFT 0x10
+#define HWIO_IPA_ENDP_GSI_CFG_TLV_n_FIFO_BASE_ADDR_BMSK 0xffff
+#define HWIO_IPA_ENDP_GSI_CFG_TLV_n_FIFO_BASE_ADDR_SHFT 0x0
+#define HWIO_IPA_ENDP_GSI_CFG_AOS_n_ADDR(n) (IPA_DEBUG_REG_BASE + \
+					     0x000009a8 + 0x4 * (n))
+#define HWIO_IPA_ENDP_GSI_CFG_AOS_n_PHYS(n) (IPA_DEBUG_REG_BASE_PHYS + \
+					     0x000009a8 + 0x4 * (n))
+#define HWIO_IPA_ENDP_GSI_CFG_AOS_n_OFFS(n) (IPA_DEBUG_REG_BASE_OFFS + \
+					     0x000009a8 + 0x4 * (n))
+#define HWIO_IPA_ENDP_GSI_CFG_AOS_n_RMSK 0xffffff
+#define HWIO_IPA_ENDP_GSI_CFG_AOS_n_MAXn 30
+#define HWIO_IPA_ENDP_GSI_CFG_AOS_n_ATTR 0x3
+#define HWIO_IPA_ENDP_GSI_CFG_AOS_n_INI(n) in_dword_masked( \
+		HWIO_IPA_ENDP_GSI_CFG_AOS_n_ADDR(n), \
+		HWIO_IPA_ENDP_GSI_CFG_AOS_n_RMSK)
+#define HWIO_IPA_ENDP_GSI_CFG_AOS_n_INMI(n, mask) in_dword_masked( \
+		HWIO_IPA_ENDP_GSI_CFG_AOS_n_ADDR(n), \
+		mask)
+#define HWIO_IPA_ENDP_GSI_CFG_AOS_n_OUTI(n, val) out_dword( \
+		HWIO_IPA_ENDP_GSI_CFG_AOS_n_ADDR(n), \
+		val)
+#define HWIO_IPA_ENDP_GSI_CFG_AOS_n_OUTMI(n, mask, \
+					  val) out_dword_masked_ns( \
+		HWIO_IPA_ENDP_GSI_CFG_AOS_n_ADDR( \
+			n), \
+		mask, \
+		val, \
+		HWIO_IPA_ENDP_GSI_CFG_AOS_n_INI(n))
+#define HWIO_IPA_ENDP_GSI_CFG_AOS_n_FIFO_SIZE_BMSK 0xff0000
+#define HWIO_IPA_ENDP_GSI_CFG_AOS_n_FIFO_SIZE_SHFT 0x10
+#define HWIO_IPA_ENDP_GSI_CFG_AOS_n_FIFO_BASE_ADDR_BMSK 0xffff
+#define HWIO_IPA_ENDP_GSI_CFG_AOS_n_FIFO_BASE_ADDR_SHFT 0x0
+#define HWIO_IPA_COAL_VP_AOS_FIFO_n_ADDR(n) (IPA_DEBUG_REG_BASE + \
+					     0x00000a60 + 0x4 * (n))
+#define HWIO_IPA_COAL_VP_AOS_FIFO_n_PHYS(n) (IPA_DEBUG_REG_BASE_PHYS + \
+					     0x00000a60 + 0x4 * (n))
+#define HWIO_IPA_COAL_VP_AOS_FIFO_n_OFFS(n) (IPA_DEBUG_REG_BASE_OFFS + \
+					     0x00000a60 + 0x4 * (n))
+#define HWIO_IPA_CTXH_CTRL_ADDR (IPA_DEBUG_REG_BASE + 0x00000afc)
+#define HWIO_IPA_CTXH_CTRL_PHYS (IPA_DEBUG_REG_BASE_PHYS + 0x00000afc)
+#define HWIO_IPA_CTXH_CTRL_OFFS (IPA_DEBUG_REG_BASE_OFFS + 0x00000afc)
+#define HWIO_IPA_CTXH_CTRL_RMSK 0x8000000f
+#define HWIO_IPA_CTXH_CTRL_ATTR 0x3
+#define HWIO_IPA_CTXH_CTRL_IN in_dword_masked(HWIO_IPA_CTXH_CTRL_ADDR, \
+					      HWIO_IPA_CTXH_CTRL_RMSK)
+#define HWIO_IPA_CTXH_CTRL_INM(m) in_dword_masked(HWIO_IPA_CTXH_CTRL_ADDR, \
+						  m)
+#define HWIO_IPA_CTXH_CTRL_OUT(v) out_dword(HWIO_IPA_CTXH_CTRL_ADDR, v)
+#define HWIO_IPA_CTXH_CTRL_OUTM(m, v) out_dword_masked_ns( \
+		HWIO_IPA_CTXH_CTRL_ADDR, \
+		m, \
+		v, \
+		HWIO_IPA_CTXH_CTRL_IN)
+#define HWIO_IPA_CTXH_CTRL_CTXH_LOCK_BMSK 0x80000000
+#define HWIO_IPA_CTXH_CTRL_CTXH_LOCK_SHFT 0x1f
+#define HWIO_IPA_CTXH_CTRL_CTXH_LOCK_ID_BMSK 0xf
+#define HWIO_IPA_CTXH_CTRL_CTXH_LOCK_ID_SHFT 0x0
+#define HWIO_IPA_CTX_ID_m_CTX_NUM_n_ADDR(m, n) (IPA_DEBUG_REG_BASE + \
+						0x00000b00 + 0x80 * (m) + \
+						0x4 * (n))
+#define HWIO_IPA_CTX_ID_m_CTX_NUM_n_PHYS(m, n) (IPA_DEBUG_REG_BASE_PHYS + \
+						0x00000b00 + 0x80 * (m) + \
+						0x4 * (n))
+#define HWIO_IPA_CTX_ID_m_CTX_NUM_n_OFFS(m, n) (IPA_DEBUG_REG_BASE_OFFS + \
+						0x00000b00 + 0x80 * (m) + \
+						0x4 * (n))
+#define IPA_EE_REG_BASE (IPA_0_IPA_WRAPPER_BASE + 0x00043000)
+#define IPA_EE_REG_BASE_PHYS (IPA_0_IPA_WRAPPER_BASE_PHYS + 0x00043000)
+#define IPA_EE_REG_BASE_OFFS 0x00043000
+#define HWIO_IPA_IRQ_STTS_EE_n_ADDR(n) (IPA_EE_REG_BASE + 0x00000008 + \
+					0x1000 * (n))
+#define HWIO_IPA_IRQ_STTS_EE_n_PHYS(n) (IPA_EE_REG_BASE_PHYS + \
+					0x00000008 + 0x1000 * (n))
+#define HWIO_IPA_IRQ_STTS_EE_n_OFFS(n) (IPA_EE_REG_BASE_OFFS + \
+					0x00000008 + 0x1000 * (n))
+#define HWIO_IPA_IRQ_EN_EE_n_ADDR(n) (IPA_EE_REG_BASE + 0x0000000c + \
+				      0x1000 * (n))
+#define HWIO_IPA_IRQ_EN_EE_n_PHYS(n) (IPA_EE_REG_BASE_PHYS + 0x0000000c + \
+				      0x1000 * (n))
+#define HWIO_IPA_IRQ_EN_EE_n_OFFS(n) (IPA_EE_REG_BASE_OFFS + 0x0000000c + \
+				      0x1000 * (n))
+#define HWIO_IPA_IRQ_EN_EE_n_RMSK 0x7bffffd
+#define HWIO_IPA_IRQ_EN_EE_n_MAXn 3
+#define HWIO_IPA_IRQ_EN_EE_n_ATTR 0x3
+#define HWIO_IPA_IRQ_EN_EE_n_INI(n) in_dword_masked( \
+		HWIO_IPA_IRQ_EN_EE_n_ADDR(n), \
+		HWIO_IPA_IRQ_EN_EE_n_RMSK)
+#define HWIO_IPA_IRQ_EN_EE_n_INMI(n, mask) in_dword_masked( \
+		HWIO_IPA_IRQ_EN_EE_n_ADDR(n), \
+		mask)
+#define HWIO_IPA_IRQ_EN_EE_n_OUTI(n, val) out_dword( \
+		HWIO_IPA_IRQ_EN_EE_n_ADDR(n), \
+		val)
+#define HWIO_IPA_IRQ_EN_EE_n_OUTMI(n, mask, val) out_dword_masked_ns( \
+		HWIO_IPA_IRQ_EN_EE_n_ADDR(n), \
+		mask, \
+		val, \
+		HWIO_IPA_IRQ_EN_EE_n_INI(n))
+#define HWIO_IPA_IRQ_EN_EE_n_TLV_LEN_MIN_DSM_IRQ_EN_BMSK 0x4000000
+#define HWIO_IPA_IRQ_EN_EE_n_TLV_LEN_MIN_DSM_IRQ_EN_SHFT 0x1a
+#define HWIO_IPA_IRQ_EN_EE_n_GSI_UC_IRQ_EN_BMSK 0x2000000
+#define HWIO_IPA_IRQ_EN_EE_n_GSI_UC_IRQ_EN_SHFT 0x19
+#define HWIO_IPA_IRQ_EN_EE_n_GSI_IPA_IF_TLV_RCVD_IRQ_EN_BMSK 0x1000000
+#define HWIO_IPA_IRQ_EN_EE_n_GSI_IPA_IF_TLV_RCVD_IRQ_EN_SHFT 0x18
+#define HWIO_IPA_IRQ_EN_EE_n_GSI_EE_IRQ_EN_BMSK 0x800000
+#define HWIO_IPA_IRQ_EN_EE_n_GSI_EE_IRQ_EN_SHFT 0x17
+#define HWIO_IPA_IRQ_EN_EE_n_UCP_IRQ_EN_BMSK 0x200000
+#define HWIO_IPA_IRQ_EN_EE_n_UCP_IRQ_EN_SHFT 0x15
+#define HWIO_IPA_IRQ_EN_EE_n_PIPE_RED_MARKER_ABOVE_IRQ_EN_BMSK 0x100000
+#define HWIO_IPA_IRQ_EN_EE_n_PIPE_RED_MARKER_ABOVE_IRQ_EN_SHFT 0x14
+#define HWIO_IPA_IRQ_EN_EE_n_PIPE_YELLOW_MARKER_ABOVE_IRQ_EN_BMSK 0x80000
+#define HWIO_IPA_IRQ_EN_EE_n_PIPE_YELLOW_MARKER_ABOVE_IRQ_EN_SHFT 0x13
+#define HWIO_IPA_IRQ_EN_EE_n_PIPE_RED_MARKER_BELOW_IRQ_EN_BMSK 0x40000
+#define HWIO_IPA_IRQ_EN_EE_n_PIPE_RED_MARKER_BELOW_IRQ_EN_SHFT 0x12
+#define HWIO_IPA_IRQ_EN_EE_n_PIPE_YELLOW_MARKER_BELOW_IRQ_EN_BMSK 0x20000
+#define HWIO_IPA_IRQ_EN_EE_n_PIPE_YELLOW_MARKER_BELOW_IRQ_EN_SHFT 0x11
+#define HWIO_IPA_IRQ_EN_EE_n_BAM_GSI_IDLE_IRQ_EN_BMSK 0x10000
+#define HWIO_IPA_IRQ_EN_EE_n_BAM_GSI_IDLE_IRQ_EN_SHFT 0x10
+#define HWIO_IPA_IRQ_EN_EE_n_TX_HOLB_DROP_IRQ_EN_BMSK 0x8000
+#define HWIO_IPA_IRQ_EN_EE_n_TX_HOLB_DROP_IRQ_EN_SHFT 0xf
+#define HWIO_IPA_IRQ_EN_EE_n_TX_SUSPEND_IRQ_EN_BMSK 0x4000
+#define HWIO_IPA_IRQ_EN_EE_n_TX_SUSPEND_IRQ_EN_SHFT 0xe
+#define HWIO_IPA_IRQ_EN_EE_n_PROC_ERR_IRQ_EN_BMSK 0x2000
+#define HWIO_IPA_IRQ_EN_EE_n_PROC_ERR_IRQ_EN_SHFT 0xd
+#define HWIO_IPA_IRQ_EN_EE_n_STEP_MODE_IRQ_EN_BMSK 0x1000
+#define HWIO_IPA_IRQ_EN_EE_n_STEP_MODE_IRQ_EN_SHFT 0xc
+#define HWIO_IPA_IRQ_EN_EE_n_TX_ERR_IRQ_EN_BMSK 0x800
+#define HWIO_IPA_IRQ_EN_EE_n_TX_ERR_IRQ_EN_SHFT 0xb
+#define HWIO_IPA_IRQ_EN_EE_n_DEAGGR_ERR_IRQ_EN_BMSK 0x400
+#define HWIO_IPA_IRQ_EN_EE_n_DEAGGR_ERR_IRQ_EN_SHFT 0xa
+#define HWIO_IPA_IRQ_EN_EE_n_RX_ERR_IRQ_EN_BMSK 0x200
+#define HWIO_IPA_IRQ_EN_EE_n_RX_ERR_IRQ_EN_SHFT 0x9
+#define HWIO_IPA_IRQ_EN_EE_n_PROC_TO_UC_ACK_Q_NOT_EMPTY_IRQ_EN_BMSK 0x100
+#define HWIO_IPA_IRQ_EN_EE_n_PROC_TO_UC_ACK_Q_NOT_EMPTY_IRQ_EN_SHFT 0x8
+#define HWIO_IPA_IRQ_EN_EE_n_UC_RX_CMD_Q_NOT_FULL_IRQ_EN_BMSK 0x80
+#define HWIO_IPA_IRQ_EN_EE_n_UC_RX_CMD_Q_NOT_FULL_IRQ_EN_SHFT 0x7
+#define HWIO_IPA_IRQ_EN_EE_n_UC_IN_Q_NOT_EMPTY_IRQ_EN_BMSK 0x40
+#define HWIO_IPA_IRQ_EN_EE_n_UC_IN_Q_NOT_EMPTY_IRQ_EN_SHFT 0x6
+#define HWIO_IPA_IRQ_EN_EE_n_UC_IRQ_3_IRQ_EN_BMSK 0x20
+#define HWIO_IPA_IRQ_EN_EE_n_UC_IRQ_3_IRQ_EN_SHFT 0x5
+#define HWIO_IPA_IRQ_EN_EE_n_UC_IRQ_2_IRQ_EN_BMSK 0x10
+#define HWIO_IPA_IRQ_EN_EE_n_UC_IRQ_2_IRQ_EN_SHFT 0x4
+#define HWIO_IPA_IRQ_EN_EE_n_UC_IRQ_1_IRQ_EN_BMSK 0x8
+#define HWIO_IPA_IRQ_EN_EE_n_UC_IRQ_1_IRQ_EN_SHFT 0x3
+#define HWIO_IPA_IRQ_EN_EE_n_UC_IRQ_0_IRQ_EN_BMSK 0x4
+#define HWIO_IPA_IRQ_EN_EE_n_UC_IRQ_0_IRQ_EN_SHFT 0x2
+#define HWIO_IPA_IRQ_EN_EE_n_BAD_SNOC_ACCESS_IRQ_EN_BMSK 0x1
+#define HWIO_IPA_IRQ_EN_EE_n_BAD_SNOC_ACCESS_IRQ_EN_SHFT 0x0
+#define HWIO_IPA_IRQ_CLR_EE_n_ADDR(n) (IPA_EE_REG_BASE + 0x00000010 + \
+				       0x1000 * (n))
+#define HWIO_IPA_IRQ_CLR_EE_n_PHYS(n) (IPA_EE_REG_BASE_PHYS + 0x00000010 + \
+				       0x1000 * (n))
+#define HWIO_IPA_IRQ_CLR_EE_n_OFFS(n) (IPA_EE_REG_BASE_OFFS + 0x00000010 + \
+				       0x1000 * (n))
+#define HWIO_IPA_SNOC_FEC_EE_n_ADDR(n) (IPA_EE_REG_BASE + 0x00000018 + \
+					0x1000 * (n))
+#define HWIO_IPA_SNOC_FEC_EE_n_PHYS(n) (IPA_EE_REG_BASE_PHYS + \
+					0x00000018 + 0x1000 * (n))
+#define HWIO_IPA_SNOC_FEC_EE_n_OFFS(n) (IPA_EE_REG_BASE_OFFS + \
+					0x00000018 + 0x1000 * (n))
+#define HWIO_IPA_SNOC_FEC_EE_n_RMSK 0x8000f1ff
+#define HWIO_IPA_SNOC_FEC_EE_n_MAXn 3
+#define HWIO_IPA_SNOC_FEC_EE_n_ATTR 0x1
+#define HWIO_IPA_SNOC_FEC_EE_n_INI(n) in_dword_masked( \
+		HWIO_IPA_SNOC_FEC_EE_n_ADDR(n),	\
+		HWIO_IPA_SNOC_FEC_EE_n_RMSK)
+#define HWIO_IPA_SNOC_FEC_EE_n_INMI(n, mask) in_dword_masked( \
+		HWIO_IPA_SNOC_FEC_EE_n_ADDR(n),	\
+		mask)
+#define HWIO_IPA_SNOC_FEC_EE_n_READ_NOT_WRITE_BMSK 0x80000000
+#define HWIO_IPA_SNOC_FEC_EE_n_READ_NOT_WRITE_SHFT 0x1f
+#define HWIO_IPA_SNOC_FEC_EE_n_TID_BMSK 0xf000
+#define HWIO_IPA_SNOC_FEC_EE_n_TID_SHFT 0xc
+#define HWIO_IPA_SNOC_FEC_EE_n_QMB_INDEX_BMSK 0x100
+#define HWIO_IPA_SNOC_FEC_EE_n_QMB_INDEX_SHFT 0x8
+#define HWIO_IPA_SNOC_FEC_EE_n_CLIENT_BMSK 0xff
+#define HWIO_IPA_SNOC_FEC_EE_n_CLIENT_SHFT 0x0
+#define HWIO_IPA_IRQ_EE_UC_n_ADDR(n) (IPA_EE_REG_BASE + 0x0000001c + \
+				      0x1000 * (n))
+#define HWIO_IPA_IRQ_EE_UC_n_PHYS(n) (IPA_EE_REG_BASE_PHYS + 0x0000001c + \
+				      0x1000 * (n))
+#define HWIO_IPA_IRQ_EE_UC_n_OFFS(n) (IPA_EE_REG_BASE_OFFS + 0x0000001c + \
+				      0x1000 * (n))
+#define HWIO_IPA_FEC_ADDR_EE_n_ADDR(n) (IPA_EE_REG_BASE + 0x00000020 + \
+					0x1000 * (n))
+#define HWIO_IPA_FEC_ADDR_EE_n_PHYS(n) (IPA_EE_REG_BASE_PHYS + \
+					0x00000020 + 0x1000 * (n))
+#define HWIO_IPA_FEC_ADDR_EE_n_OFFS(n) (IPA_EE_REG_BASE_OFFS + \
+					0x00000020 + 0x1000 * (n))
+#define HWIO_IPA_FEC_ADDR_EE_n_RMSK 0xffffffff
+#define HWIO_IPA_FEC_ADDR_EE_n_MAXn 3
+#define HWIO_IPA_FEC_ADDR_EE_n_ATTR 0x1
+#define HWIO_IPA_FEC_ADDR_EE_n_INI(n) in_dword_masked( \
+		HWIO_IPA_FEC_ADDR_EE_n_ADDR(n),	\
+		HWIO_IPA_FEC_ADDR_EE_n_RMSK)
+#define HWIO_IPA_FEC_ADDR_EE_n_INMI(n, mask) in_dword_masked( \
+		HWIO_IPA_FEC_ADDR_EE_n_ADDR(n),	\
+		mask)
+#define HWIO_IPA_FEC_ADDR_EE_n_ADDR_BMSK 0xffffffff
+#define HWIO_IPA_FEC_ADDR_EE_n_ADDR_SHFT 0x0
+#define HWIO_IPA_FEC_ADDR_MSB_EE_n_ADDR(n) (IPA_EE_REG_BASE + 0x00000024 + \
+					    0x1000 * (n))
+#define HWIO_IPA_FEC_ADDR_MSB_EE_n_PHYS(n) (IPA_EE_REG_BASE_PHYS + \
+					    0x00000024 + 0x1000 * (n))
+#define HWIO_IPA_FEC_ADDR_MSB_EE_n_OFFS(n) (IPA_EE_REG_BASE_OFFS + \
+					    0x00000024 + 0x1000 * (n))
+#define HWIO_IPA_FEC_ATTR_EE_n_ADDR(n) (IPA_EE_REG_BASE + 0x00000028 + \
+					0x1000 * (n))
+#define HWIO_IPA_FEC_ATTR_EE_n_PHYS(n) (IPA_EE_REG_BASE_PHYS + \
+					0x00000028 + 0x1000 * (n))
+#define HWIO_IPA_FEC_ATTR_EE_n_OFFS(n) (IPA_EE_REG_BASE_OFFS + \
+					0x00000028 + 0x1000 * (n))
+#define HWIO_IPA_FEC_ATTR_EE_n_RMSK 0xffffffff
+#define HWIO_IPA_FEC_ATTR_EE_n_MAXn 3
+#define HWIO_IPA_FEC_ATTR_EE_n_ATTR 0x1
+#define HWIO_IPA_FEC_ATTR_EE_n_INI(n) in_dword_masked( \
+		HWIO_IPA_FEC_ATTR_EE_n_ADDR(n),	\
+		HWIO_IPA_FEC_ATTR_EE_n_RMSK)
+#define HWIO_IPA_FEC_ATTR_EE_n_INMI(n, mask) in_dword_masked( \
+		HWIO_IPA_FEC_ATTR_EE_n_ADDR(n),	\
+		mask)
+#define HWIO_IPA_FEC_ATTR_EE_n_ERROR_INFO_BMSK 0xffffffc0
+#define HWIO_IPA_FEC_ATTR_EE_n_ERROR_INFO_SHFT 0x6
+#define HWIO_IPA_FEC_ATTR_EE_n_OPCODE_BMSK 0x3f
+#define HWIO_IPA_FEC_ATTR_EE_n_OPCODE_SHFT 0x0
+#define HWIO_IPA_SUSPEND_IRQ_INFO_EE_n_ADDR(n) (IPA_EE_REG_BASE + \
+						0x00000030 + 0x1000 * (n))
+#define HWIO_IPA_SUSPEND_IRQ_INFO_EE_n_PHYS(n) (IPA_EE_REG_BASE_PHYS + \
+						0x00000030 + 0x1000 * (n))
+#define HWIO_IPA_SUSPEND_IRQ_INFO_EE_n_OFFS(n) (IPA_EE_REG_BASE_OFFS + \
+						0x00000030 + 0x1000 * (n))
+#define HWIO_IPA_SUSPEND_IRQ_INFO_EE_n_RMSK 0x7fffffff
+#define HWIO_IPA_SUSPEND_IRQ_INFO_EE_n_MAXn 3
+#define HWIO_IPA_SUSPEND_IRQ_INFO_EE_n_ATTR 0x1
+#define HWIO_IPA_SUSPEND_IRQ_INFO_EE_n_INI(n) in_dword_masked( \
+		HWIO_IPA_SUSPEND_IRQ_INFO_EE_n_ADDR(n),	\
+		HWIO_IPA_SUSPEND_IRQ_INFO_EE_n_RMSK)
+#define HWIO_IPA_SUSPEND_IRQ_INFO_EE_n_INMI(n, mask) in_dword_masked( \
+		HWIO_IPA_SUSPEND_IRQ_INFO_EE_n_ADDR(n),	\
+		mask)
+#define HWIO_IPA_SUSPEND_IRQ_INFO_EE_n_ENDPOINTS_BMSK 0x7fffffff
+#define HWIO_IPA_SUSPEND_IRQ_INFO_EE_n_ENDPOINTS_SHFT 0x0
+#define HWIO_IPA_SUSPEND_IRQ_EN_EE_n_ADDR(n) (IPA_EE_REG_BASE +	\
+					      0x00000034 + 0x1000 * (n))
+#define HWIO_IPA_SUSPEND_IRQ_EN_EE_n_PHYS(n) (IPA_EE_REG_BASE_PHYS + \
+					      0x00000034 + 0x1000 * (n))
+#define HWIO_IPA_SUSPEND_IRQ_EN_EE_n_OFFS(n) (IPA_EE_REG_BASE_OFFS + \
+					      0x00000034 + 0x1000 * (n))
+#define HWIO_IPA_SUSPEND_IRQ_EN_EE_n_RMSK 0x7fffffff
+#define HWIO_IPA_SUSPEND_IRQ_EN_EE_n_MAXn 3
+#define HWIO_IPA_SUSPEND_IRQ_EN_EE_n_ATTR 0x3
+#define HWIO_IPA_SUSPEND_IRQ_EN_EE_n_INI(n) in_dword_masked( \
+		HWIO_IPA_SUSPEND_IRQ_EN_EE_n_ADDR(n), \
+		HWIO_IPA_SUSPEND_IRQ_EN_EE_n_RMSK)
+#define HWIO_IPA_SUSPEND_IRQ_EN_EE_n_INMI(n, mask) in_dword_masked( \
+		HWIO_IPA_SUSPEND_IRQ_EN_EE_n_ADDR(n), \
+		mask)
+#define HWIO_IPA_SUSPEND_IRQ_EN_EE_n_OUTI(n, val) out_dword( \
+		HWIO_IPA_SUSPEND_IRQ_EN_EE_n_ADDR(n), \
+		val)
+#define HWIO_IPA_SUSPEND_IRQ_EN_EE_n_OUTMI(n, mask, \
+					   val) out_dword_masked_ns( \
+		HWIO_IPA_SUSPEND_IRQ_EN_EE_n_ADDR( \
+			n), \
+		mask, \
+		val, \
+		HWIO_IPA_SUSPEND_IRQ_EN_EE_n_INI(n))
+#define HWIO_IPA_SUSPEND_IRQ_EN_EE_n_ENDPOINTS_BMSK 0x7fffffff
+#define HWIO_IPA_SUSPEND_IRQ_EN_EE_n_ENDPOINTS_SHFT 0x0
+#define HWIO_IPA_SUSPEND_IRQ_CLR_EE_n_ADDR(n) (IPA_EE_REG_BASE + \
+					       0x00000038 + 0x1000 * (n))
+#define HWIO_IPA_SUSPEND_IRQ_CLR_EE_n_PHYS(n) (IPA_EE_REG_BASE_PHYS + \
+					       0x00000038 + 0x1000 * (n))
+#define HWIO_IPA_SUSPEND_IRQ_CLR_EE_n_OFFS(n) (IPA_EE_REG_BASE_OFFS + \
+					       0x00000038 + 0x1000 * (n))
+#define HWIO_IPA_HOLB_DROP_IRQ_INFO_EE_n_ADDR(n) (IPA_EE_REG_BASE + \
+						  0x0000003c + 0x1000 * (n))
+#define HWIO_IPA_HOLB_DROP_IRQ_INFO_EE_n_PHYS(n) (IPA_EE_REG_BASE_PHYS + \
+						  0x0000003c + 0x1000 * (n))
+#define HWIO_IPA_HOLB_DROP_IRQ_INFO_EE_n_OFFS(n) (IPA_EE_REG_BASE_OFFS + \
+						  0x0000003c + 0x1000 * (n))
+#define HWIO_IPA_HOLB_DROP_IRQ_INFO_EE_n_RMSK 0x7fffe000
+#define HWIO_IPA_HOLB_DROP_IRQ_INFO_EE_n_MAXn 3
+#define HWIO_IPA_HOLB_DROP_IRQ_INFO_EE_n_ATTR 0x1
+#define HWIO_IPA_HOLB_DROP_IRQ_INFO_EE_n_INI(n) in_dword_masked( \
+		HWIO_IPA_HOLB_DROP_IRQ_INFO_EE_n_ADDR(n), \
+		HWIO_IPA_HOLB_DROP_IRQ_INFO_EE_n_RMSK)
+#define HWIO_IPA_HOLB_DROP_IRQ_INFO_EE_n_INMI(n, mask) in_dword_masked(	\
+		HWIO_IPA_HOLB_DROP_IRQ_INFO_EE_n_ADDR(n), \
+		mask)
+#define HWIO_IPA_HOLB_DROP_IRQ_INFO_EE_n_ENDPOINTS_BMSK 0x7fffe000
+#define HWIO_IPA_HOLB_DROP_IRQ_INFO_EE_n_ENDPOINTS_SHFT 0xd
+#define HWIO_IPA_HOLB_DROP_IRQ_EN_EE_n_ADDR(n) (IPA_EE_REG_BASE + \
+						0x00000040 + 0x1000 * (n))
+#define HWIO_IPA_HOLB_DROP_IRQ_EN_EE_n_PHYS(n) (IPA_EE_REG_BASE_PHYS + \
+						0x00000040 + 0x1000 * (n))
+#define HWIO_IPA_HOLB_DROP_IRQ_EN_EE_n_OFFS(n) (IPA_EE_REG_BASE_OFFS + \
+						0x00000040 + 0x1000 * (n))
+#define HWIO_IPA_HOLB_DROP_IRQ_CLR_EE_n_ADDR(n) (IPA_EE_REG_BASE + \
+						 0x00000044 + 0x1000 * (n))
+#define HWIO_IPA_HOLB_DROP_IRQ_CLR_EE_n_PHYS(n) (IPA_EE_REG_BASE_PHYS +	\
+						 0x00000044 + 0x1000 * (n))
+#define HWIO_IPA_HOLB_DROP_IRQ_CLR_EE_n_OFFS(n) (IPA_EE_REG_BASE_OFFS +	\
+						 0x00000044 + 0x1000 * (n))
+#define HWIO_IPA_LOG_BUF_STATUS_ADDR_ADDR (IPA_EE_REG_BASE + 0x000010a0)
+#define HWIO_IPA_LOG_BUF_STATUS_ADDR_PHYS (IPA_EE_REG_BASE_PHYS + \
+					   0x000010a0)
+#define HWIO_IPA_LOG_BUF_STATUS_ADDR_OFFS (IPA_EE_REG_BASE_OFFS + \
+					   0x000010a0)
+#define HWIO_IPA_LOG_BUF_STATUS_ADDR_RMSK 0xffffffff
+#define HWIO_IPA_LOG_BUF_STATUS_ADDR_ATTR 0x3
+#define HWIO_IPA_LOG_BUF_STATUS_ADDR_IN in_dword_masked( \
+		HWIO_IPA_LOG_BUF_STATUS_ADDR_ADDR, \
+		HWIO_IPA_LOG_BUF_STATUS_ADDR_RMSK)
+#define HWIO_IPA_LOG_BUF_STATUS_ADDR_INM(m) in_dword_masked( \
+		HWIO_IPA_LOG_BUF_STATUS_ADDR_ADDR, \
+		m)
+#define HWIO_IPA_LOG_BUF_STATUS_ADDR_OUT(v) out_dword( \
+		HWIO_IPA_LOG_BUF_STATUS_ADDR_ADDR, \
+		v)
+#define HWIO_IPA_LOG_BUF_STATUS_ADDR_OUTM(m, v) out_dword_masked_ns( \
+		HWIO_IPA_LOG_BUF_STATUS_ADDR_ADDR, \
+		m, \
+		v, \
+		HWIO_IPA_LOG_BUF_STATUS_ADDR_IN)
+#define HWIO_IPA_LOG_BUF_STATUS_ADDR_START_ADDR_BMSK 0xffffffff
+#define HWIO_IPA_LOG_BUF_STATUS_ADDR_START_ADDR_SHFT 0x0
+#define HWIO_IPA_LOG_BUF_STATUS_ADDR_MSB_ADDR (IPA_EE_REG_BASE + \
+					       0x000010a4)
+#define HWIO_IPA_LOG_BUF_STATUS_ADDR_MSB_PHYS (IPA_EE_REG_BASE_PHYS + \
+					       0x000010a4)
+#define HWIO_IPA_LOG_BUF_STATUS_ADDR_MSB_OFFS (IPA_EE_REG_BASE_OFFS + \
+					       0x000010a4)
+#define HWIO_IPA_LOG_BUF_STATUS_ADDR_MSB_RMSK 0xffffffff
+#define HWIO_IPA_LOG_BUF_STATUS_ADDR_MSB_ATTR 0x3
+#define HWIO_IPA_LOG_BUF_STATUS_ADDR_MSB_IN in_dword_masked( \
+		HWIO_IPA_LOG_BUF_STATUS_ADDR_MSB_ADDR, \
+		HWIO_IPA_LOG_BUF_STATUS_ADDR_MSB_RMSK)
+#define HWIO_IPA_LOG_BUF_STATUS_ADDR_MSB_INM(m) in_dword_masked( \
+		HWIO_IPA_LOG_BUF_STATUS_ADDR_MSB_ADDR, \
+		m)
+#define HWIO_IPA_LOG_BUF_STATUS_ADDR_MSB_OUT(v) out_dword( \
+		HWIO_IPA_LOG_BUF_STATUS_ADDR_MSB_ADDR, \
+		v)
+#define HWIO_IPA_LOG_BUF_STATUS_ADDR_MSB_OUTM(m, v) out_dword_masked_ns( \
+		HWIO_IPA_LOG_BUF_STATUS_ADDR_MSB_ADDR, \
+		m, \
+		v, \
+		HWIO_IPA_LOG_BUF_STATUS_ADDR_MSB_IN)
+#define HWIO_IPA_LOG_BUF_STATUS_ADDR_MSB_START_ADDR_BMSK 0xffffffff
+#define HWIO_IPA_LOG_BUF_STATUS_ADDR_MSB_START_ADDR_SHFT 0x0
+#define HWIO_IPA_LOG_BUF_STATUS_WRITE_PTR_ADDR (IPA_EE_REG_BASE + \
+						0x000010a8)
+#define HWIO_IPA_LOG_BUF_STATUS_WRITE_PTR_PHYS (IPA_EE_REG_BASE_PHYS + \
+						0x000010a8)
+#define HWIO_IPA_LOG_BUF_STATUS_WRITE_PTR_OFFS (IPA_EE_REG_BASE_OFFS + \
+						0x000010a8)
+#define HWIO_IPA_LOG_BUF_STATUS_WRITE_PTR_RMSK 0xffffffff
+#define HWIO_IPA_LOG_BUF_STATUS_WRITE_PTR_ATTR 0x1
+#define HWIO_IPA_LOG_BUF_STATUS_WRITE_PTR_IN in_dword_masked( \
+		HWIO_IPA_LOG_BUF_STATUS_WRITE_PTR_ADDR,	\
+		HWIO_IPA_LOG_BUF_STATUS_WRITE_PTR_RMSK)
+#define HWIO_IPA_LOG_BUF_STATUS_WRITE_PTR_INM(m) in_dword_masked( \
+		HWIO_IPA_LOG_BUF_STATUS_WRITE_PTR_ADDR,	\
+		m)
+#define HWIO_IPA_LOG_BUF_STATUS_WRITE_PTR_WRITE_ADDR_BMSK 0xffffffff
+#define HWIO_IPA_LOG_BUF_STATUS_WRITE_PTR_WRITE_ADDR_SHFT 0x0
+#define HWIO_IPA_LOG_BUF_STATUS_WRITE_PTR_MSB_ADDR (IPA_EE_REG_BASE + \
+						    0x000010ac)
+#define HWIO_IPA_LOG_BUF_STATUS_WRITE_PTR_MSB_PHYS (IPA_EE_REG_BASE_PHYS + \
+						    0x000010ac)
+#define HWIO_IPA_LOG_BUF_STATUS_WRITE_PTR_MSB_OFFS (IPA_EE_REG_BASE_OFFS + \
+						    0x000010ac)
+#define HWIO_IPA_LOG_BUF_STATUS_WRITE_PTR_MSB_RMSK 0xffffffff
+#define HWIO_IPA_LOG_BUF_STATUS_WRITE_PTR_MSB_ATTR 0x1
+#define HWIO_IPA_LOG_BUF_STATUS_WRITE_PTR_MSB_IN in_dword_masked( \
+		HWIO_IPA_LOG_BUF_STATUS_WRITE_PTR_MSB_ADDR, \
+		HWIO_IPA_LOG_BUF_STATUS_WRITE_PTR_MSB_RMSK)
+#define HWIO_IPA_LOG_BUF_STATUS_WRITE_PTR_MSB_INM(m) in_dword_masked( \
+		HWIO_IPA_LOG_BUF_STATUS_WRITE_PTR_MSB_ADDR, \
+		m)
+#define HWIO_IPA_LOG_BUF_STATUS_WRITE_PTR_MSB_WRITE_ADDR_BMSK 0xffffffff
+#define HWIO_IPA_LOG_BUF_STATUS_WRITE_PTR_MSB_WRITE_ADDR_SHFT 0x0
+#define HWIO_IPA_LOG_BUF_STATUS_CFG_ADDR (IPA_EE_REG_BASE + 0x000010b0)
+#define HWIO_IPA_LOG_BUF_STATUS_CFG_PHYS (IPA_EE_REG_BASE_PHYS + \
+					  0x000010b0)
+#define HWIO_IPA_LOG_BUF_STATUS_CFG_OFFS (IPA_EE_REG_BASE_OFFS + \
+					  0x000010b0)
+#define HWIO_IPA_LOG_BUF_STATUS_CFG_RMSK 0x1ffff
+#define HWIO_IPA_LOG_BUF_STATUS_CFG_ATTR 0x3
+#define HWIO_IPA_LOG_BUF_STATUS_CFG_IN in_dword_masked(	\
+		HWIO_IPA_LOG_BUF_STATUS_CFG_ADDR, \
+		HWIO_IPA_LOG_BUF_STATUS_CFG_RMSK)
+#define HWIO_IPA_LOG_BUF_STATUS_CFG_INM(m) in_dword_masked( \
+		HWIO_IPA_LOG_BUF_STATUS_CFG_ADDR, \
+		m)
+#define HWIO_IPA_LOG_BUF_STATUS_CFG_OUT(v) out_dword( \
+		HWIO_IPA_LOG_BUF_STATUS_CFG_ADDR, \
+		v)
+#define HWIO_IPA_LOG_BUF_STATUS_CFG_OUTM(m, v) out_dword_masked_ns( \
+		HWIO_IPA_LOG_BUF_STATUS_CFG_ADDR, \
+		m, \
+		v, \
+		HWIO_IPA_LOG_BUF_STATUS_CFG_IN)
+#define HWIO_IPA_LOG_BUF_STATUS_CFG_ENABLE_BMSK 0x10000
+#define HWIO_IPA_LOG_BUF_STATUS_CFG_ENABLE_SHFT 0x10
+#define HWIO_IPA_LOG_BUF_STATUS_CFG_SIZE_BMSK 0xffff
+#define HWIO_IPA_LOG_BUF_STATUS_CFG_SIZE_SHFT 0x0
+#define HWIO_IPA_LOG_BUF_STATUS_RAM_PTR_ADDR (IPA_EE_REG_BASE + 0x000010b4)
+#define HWIO_IPA_LOG_BUF_STATUS_RAM_PTR_PHYS (IPA_EE_REG_BASE_PHYS + \
+					      0x000010b4)
+#define HWIO_IPA_LOG_BUF_STATUS_RAM_PTR_OFFS (IPA_EE_REG_BASE_OFFS + \
+					      0x000010b4)
+#define HWIO_IPA_LOG_BUF_STATUS_RAM_PTR_RMSK 0xffffffff
+#define HWIO_IPA_LOG_BUF_STATUS_RAM_PTR_ATTR 0x1
+#define HWIO_IPA_LOG_BUF_STATUS_RAM_PTR_IN in_dword_masked( \
+		HWIO_IPA_LOG_BUF_STATUS_RAM_PTR_ADDR, \
+		HWIO_IPA_LOG_BUF_STATUS_RAM_PTR_RMSK)
+#define HWIO_IPA_LOG_BUF_STATUS_RAM_PTR_INM(m) in_dword_masked(	\
+		HWIO_IPA_LOG_BUF_STATUS_RAM_PTR_ADDR, \
+		m)
+#define HWIO_IPA_LOG_BUF_STATUS_RAM_PTR_WRITE_PTR_BMSK 0xffff0000
+#define HWIO_IPA_LOG_BUF_STATUS_RAM_PTR_WRITE_PTR_SHFT 0x10
+#define HWIO_IPA_LOG_BUF_STATUS_RAM_PTR_READ_PTR_BMSK 0xffff
+#define HWIO_IPA_LOG_BUF_STATUS_RAM_PTR_READ_PTR_SHFT 0x0
+#define HWIO_IPA_LTE_DECIPH_INIT_VALUES_0_ADDR (IPA_EE_REG_BASE + \
+						0x000010c0)
+#define HWIO_IPA_LTE_DECIPH_INIT_VALUES_0_PHYS (IPA_EE_REG_BASE_PHYS + \
+						0x000010c0)
+#define HWIO_IPA_LTE_DECIPH_INIT_VALUES_0_OFFS (IPA_EE_REG_BASE_OFFS + \
+						0x000010c0)
+#define HWIO_IPA_LTE_DECIPH_INIT_VALUES_1_ADDR (IPA_EE_REG_BASE + \
+						0x000010c4)
+#define HWIO_IPA_LTE_DECIPH_INIT_VALUES_1_PHYS (IPA_EE_REG_BASE_PHYS + \
+						0x000010c4)
+#define HWIO_IPA_LTE_DECIPH_INIT_VALUES_1_OFFS (IPA_EE_REG_BASE_OFFS + \
+						0x000010c4)
+#define HWIO_IPA_LTE_DECIPH_CONFIG_VALUES_0_ADDR (IPA_EE_REG_BASE + \
+						  0x000010c8)
+#define HWIO_IPA_LTE_DECIPH_CONFIG_VALUES_0_PHYS (IPA_EE_REG_BASE_PHYS + \
+						  0x000010c8)
+#define HWIO_IPA_LTE_DECIPH_CONFIG_VALUES_0_OFFS (IPA_EE_REG_BASE_OFFS + \
+						  0x000010c8)
+#define HWIO_IPA_LTE_DECIPH_CONFIG_VALUES_1_ADDR (IPA_EE_REG_BASE + \
+						  0x000010cc)
+#define HWIO_IPA_LTE_DECIPH_CONFIG_VALUES_1_PHYS (IPA_EE_REG_BASE_PHYS + \
+						  0x000010cc)
+#define HWIO_IPA_LTE_DECIPH_CONFIG_VALUES_1_OFFS (IPA_EE_REG_BASE_OFFS + \
+						  0x000010cc)
+#define HWIO_IPA_SECURED_PIPES_ADDR (IPA_EE_REG_BASE + 0x000010d0)
+#define HWIO_IPA_SECURED_PIPES_PHYS (IPA_EE_REG_BASE_PHYS + 0x000010d0)
+#define HWIO_IPA_SECURED_PIPES_OFFS (IPA_EE_REG_BASE_OFFS + 0x000010d0)
+#define HWIO_IPA_LTE_DECIPH_INIT_VALUES_CFG_ADDR (IPA_EE_REG_BASE + \
+						  0x000010d4)
+#define HWIO_IPA_LTE_DECIPH_INIT_VALUES_CFG_PHYS (IPA_EE_REG_BASE_PHYS + \
+						  0x000010d4)
+#define HWIO_IPA_LTE_DECIPH_INIT_VALUES_CFG_OFFS (IPA_EE_REG_BASE_OFFS + \
+						  0x000010d4)
+#define HWIO_IPA_UC_REGS_INSIDE_IPA__CONTROL_ADDR (IPA_EE_REG_BASE + \
+						   0x00001200)
+#define HWIO_IPA_UC_REGS_INSIDE_IPA__CONTROL_PHYS (IPA_EE_REG_BASE_PHYS + \
+						   0x00001200)
+#define HWIO_IPA_UC_REGS_INSIDE_IPA__CONTROL_OFFS (IPA_EE_REG_BASE_OFFS + \
+						   0x00001200)
+#define HWIO_IPA_UC_REGS_INSIDE_IPA__NMI_ADDR (IPA_EE_REG_BASE + \
+					       0x00001204)
+#define HWIO_IPA_UC_REGS_INSIDE_IPA__NMI_PHYS (IPA_EE_REG_BASE_PHYS + \
+					       0x00001204)
+#define HWIO_IPA_UC_REGS_INSIDE_IPA__NMI_OFFS (IPA_EE_REG_BASE_OFFS + \
+					       0x00001204)
+#define HWIO_IPA_SET_UC_IRQ_EE_n_ADDR(n) (IPA_EE_REG_BASE + 0x00002048 + \
+					  0x4 * (n))
+#define HWIO_IPA_SET_UC_IRQ_EE_n_PHYS(n) (IPA_EE_REG_BASE_PHYS + \
+					  0x00002048 + 0x4 * (n))
+#define HWIO_IPA_SET_UC_IRQ_EE_n_OFFS(n) (IPA_EE_REG_BASE_OFFS + \
+					  0x00002048 + 0x4 * (n))
+#define HWIO_IPA_SET_UC_IRQ_ALL_EES_ADDR (IPA_EE_REG_BASE + 0x00002058)
+#define HWIO_IPA_SET_UC_IRQ_ALL_EES_PHYS (IPA_EE_REG_BASE_PHYS + \
+					  0x00002058)
+#define HWIO_IPA_SET_UC_IRQ_ALL_EES_OFFS (IPA_EE_REG_BASE_OFFS + \
+					  0x00002058)
+#define HWIO_IPA_UCP_RESUME_ADDR (IPA_EE_REG_BASE + 0x000030a0)
+#define HWIO_IPA_UCP_RESUME_PHYS (IPA_EE_REG_BASE_PHYS + 0x000030a0)
+#define HWIO_IPA_UCP_RESUME_OFFS (IPA_EE_REG_BASE_OFFS + 0x000030a0)
+#define HWIO_IPA_PROC_UCP_CFG_ADDR (IPA_EE_REG_BASE + 0x000030a4)
+#define HWIO_IPA_PROC_UCP_CFG_PHYS (IPA_EE_REG_BASE_PHYS + 0x000030a4)
+#define HWIO_IPA_PROC_UCP_CFG_OFFS (IPA_EE_REG_BASE_OFFS + 0x000030a4)
+#define HWIO_IPA_UC_PKT_PROCESS_BASE_ADDR_0_ADDR (IPA_EE_REG_BASE + \
+						  0x000030a8)
+#define HWIO_IPA_UC_PKT_PROCESS_BASE_ADDR_0_PHYS (IPA_EE_REG_BASE_PHYS + \
+						  0x000030a8)
+#define HWIO_IPA_UC_PKT_PROCESS_BASE_ADDR_0_OFFS (IPA_EE_REG_BASE_OFFS + \
+						  0x000030a8)
+#define HWIO_IPA_UC_PKT_PROCESS_BASE_ADDR_1_ADDR (IPA_EE_REG_BASE + \
+						  0x000030ac)
+#define HWIO_IPA_UC_PKT_PROCESS_BASE_ADDR_1_PHYS (IPA_EE_REG_BASE_PHYS + \
+						  0x000030ac)
+#define HWIO_IPA_UC_PKT_PROCESS_BASE_ADDR_1_OFFS (IPA_EE_REG_BASE_OFFS + \
+						  0x000030ac)
+#define HWIO_IPA_UC_PKT_PROCESS_BASE_ADDR_2_ADDR (IPA_EE_REG_BASE + \
+						  0x000030b0)
+#define HWIO_IPA_UC_PKT_PROCESS_BASE_ADDR_2_PHYS (IPA_EE_REG_BASE_PHYS + \
+						  0x000030b0)
+#define HWIO_IPA_UC_PKT_PROCESS_BASE_ADDR_2_OFFS (IPA_EE_REG_BASE_OFFS + \
+						  0x000030b0)
+#define IPA_UC_IPA_UC_PER_REG_BASE (IPA_0_IPA_WRAPPER_BASE + 0x000c0000)
+#define IPA_UC_IPA_UC_PER_REG_BASE_PHYS (IPA_0_IPA_WRAPPER_BASE_PHYS + \
+					 0x000c0000)
+#define IPA_UC_IPA_UC_PER_REG_BASE_OFFS 0x000c0000
+#define HWIO_IPA_UC_STATUS_ADDR (IPA_UC_IPA_UC_PER_REG_BASE + 0x00000000)
+#define HWIO_IPA_UC_STATUS_PHYS (IPA_UC_IPA_UC_PER_REG_BASE_PHYS + \
+				 0x00000000)
+#define HWIO_IPA_UC_STATUS_OFFS (IPA_UC_IPA_UC_PER_REG_BASE_OFFS + \
+				 0x00000000)
+#define HWIO_IPA_UC_CONTROL_ADDR (IPA_UC_IPA_UC_PER_REG_BASE + 0x00000004)
+#define HWIO_IPA_UC_CONTROL_PHYS (IPA_UC_IPA_UC_PER_REG_BASE_PHYS + \
+				  0x00000004)
+#define HWIO_IPA_UC_CONTROL_OFFS (IPA_UC_IPA_UC_PER_REG_BASE_OFFS + \
+				  0x00000004)
+#define HWIO_IPA_UC_BASE_ADDR_ADDR (IPA_UC_IPA_UC_PER_REG_BASE + \
+				    0x00000008)
+#define HWIO_IPA_UC_BASE_ADDR_PHYS (IPA_UC_IPA_UC_PER_REG_BASE_PHYS + \
+				    0x00000008)
+#define HWIO_IPA_UC_BASE_ADDR_OFFS (IPA_UC_IPA_UC_PER_REG_BASE_OFFS + \
+				    0x00000008)
+#define HWIO_IPA_UC_BASE_ADDR_MSB_ADDR (IPA_UC_IPA_UC_PER_REG_BASE + \
+					0x0000000c)
+#define HWIO_IPA_UC_BASE_ADDR_MSB_PHYS (IPA_UC_IPA_UC_PER_REG_BASE_PHYS + \
+					0x0000000c)
+#define HWIO_IPA_UC_BASE_ADDR_MSB_OFFS (IPA_UC_IPA_UC_PER_REG_BASE_OFFS + \
+					0x0000000c)
+#define HWIO_IPA_UC_SYS_BUS_ATTRIB_ADDR (IPA_UC_IPA_UC_PER_REG_BASE + \
+					 0x00000010)
+#define HWIO_IPA_UC_SYS_BUS_ATTRIB_PHYS (IPA_UC_IPA_UC_PER_REG_BASE_PHYS + \
+					 0x00000010)
+#define HWIO_IPA_UC_SYS_BUS_ATTRIB_OFFS (IPA_UC_IPA_UC_PER_REG_BASE_OFFS + \
+					 0x00000010)
+#define HWIO_IPA_UC_PEND_IRQ_ADDR (IPA_UC_IPA_UC_PER_REG_BASE + 0x00000014)
+#define HWIO_IPA_UC_PEND_IRQ_PHYS (IPA_UC_IPA_UC_PER_REG_BASE_PHYS + \
+				   0x00000014)
+#define HWIO_IPA_UC_PEND_IRQ_OFFS (IPA_UC_IPA_UC_PER_REG_BASE_OFFS + \
+				   0x00000014)
+#define HWIO_IPA_UC_TRACE_BUFFER_ADDR (IPA_UC_IPA_UC_PER_REG_BASE + \
+				       0x00000018)
+#define HWIO_IPA_UC_TRACE_BUFFER_PHYS (IPA_UC_IPA_UC_PER_REG_BASE_PHYS + \
+				       0x00000018)
+#define HWIO_IPA_UC_TRACE_BUFFER_OFFS (IPA_UC_IPA_UC_PER_REG_BASE_OFFS + \
+				       0x00000018)
+#define HWIO_IPA_UC_PC_ADDR (IPA_UC_IPA_UC_PER_REG_BASE + 0x0000001c)
+#define HWIO_IPA_UC_PC_PHYS (IPA_UC_IPA_UC_PER_REG_BASE_PHYS + 0x0000001c)
+#define HWIO_IPA_UC_PC_OFFS (IPA_UC_IPA_UC_PER_REG_BASE_OFFS + 0x0000001c)
+#define HWIO_IPA_UC_VUIC_INT_ADDRESS_LSB_ADDR (IPA_UC_IPA_UC_PER_REG_BASE \
+					       + 0x00000024)
+#define HWIO_IPA_UC_VUIC_INT_ADDRESS_LSB_PHYS (	\
+		IPA_UC_IPA_UC_PER_REG_BASE_PHYS + 0x00000024)
+#define HWIO_IPA_UC_VUIC_INT_ADDRESS_LSB_OFFS (	\
+		IPA_UC_IPA_UC_PER_REG_BASE_OFFS + 0x00000024)
+#define HWIO_IPA_UC_VUIC_INT_ADDRESS_MSB_ADDR (IPA_UC_IPA_UC_PER_REG_BASE \
+					       + 0x00000028)
+#define HWIO_IPA_UC_VUIC_INT_ADDRESS_MSB_PHYS (	\
+		IPA_UC_IPA_UC_PER_REG_BASE_PHYS + 0x00000028)
+#define HWIO_IPA_UC_VUIC_INT_ADDRESS_MSB_OFFS (	\
+		IPA_UC_IPA_UC_PER_REG_BASE_OFFS + 0x00000028)
+#define HWIO_IPA_UC_QMB_SYS_ADDR_ADDR (IPA_UC_IPA_UC_PER_REG_BASE + \
+				       0x00000100)
+#define HWIO_IPA_UC_QMB_SYS_ADDR_PHYS (IPA_UC_IPA_UC_PER_REG_BASE_PHYS + \
+				       0x00000100)
+#define HWIO_IPA_UC_QMB_SYS_ADDR_OFFS (IPA_UC_IPA_UC_PER_REG_BASE_OFFS + \
+				       0x00000100)
+#define HWIO_IPA_UC_QMB_SYS_ADDR_RMSK 0xffffffff
+#define HWIO_IPA_UC_QMB_SYS_ADDR_ATTR 0x3
+#define HWIO_IPA_UC_QMB_SYS_ADDR_IN in_dword_masked( \
+		HWIO_IPA_UC_QMB_SYS_ADDR_ADDR, \
+		HWIO_IPA_UC_QMB_SYS_ADDR_RMSK)
+#define HWIO_IPA_UC_QMB_SYS_ADDR_INM(m) in_dword_masked( \
+		HWIO_IPA_UC_QMB_SYS_ADDR_ADDR, \
+		m)
+#define HWIO_IPA_UC_QMB_SYS_ADDR_OUT(v) out_dword( \
+		HWIO_IPA_UC_QMB_SYS_ADDR_ADDR, \
+		v)
+#define HWIO_IPA_UC_QMB_SYS_ADDR_OUTM(m, v) out_dword_masked_ns( \
+		HWIO_IPA_UC_QMB_SYS_ADDR_ADDR, \
+		m, \
+		v, \
+		HWIO_IPA_UC_QMB_SYS_ADDR_IN)
+#define HWIO_IPA_UC_QMB_SYS_ADDR_ADDR_BMSK 0xffffffff
+#define HWIO_IPA_UC_QMB_SYS_ADDR_ADDR_SHFT 0x0
+#define HWIO_IPA_UC_QMB_SYS_ADDR_MSB_ADDR (IPA_UC_IPA_UC_PER_REG_BASE +	\
+					   0x00000104)
+#define HWIO_IPA_UC_QMB_SYS_ADDR_MSB_PHYS (IPA_UC_IPA_UC_PER_REG_BASE_PHYS \
+					   + 0x00000104)
+#define HWIO_IPA_UC_QMB_SYS_ADDR_MSB_OFFS (IPA_UC_IPA_UC_PER_REG_BASE_OFFS \
+					   + 0x00000104)
+#define HWIO_IPA_UC_QMB_SYS_ADDR_MSB_RMSK 0xffffffff
+#define HWIO_IPA_UC_QMB_SYS_ADDR_MSB_ATTR 0x3
+#define HWIO_IPA_UC_QMB_SYS_ADDR_MSB_IN in_dword_masked( \
+		HWIO_IPA_UC_QMB_SYS_ADDR_MSB_ADDR, \
+		HWIO_IPA_UC_QMB_SYS_ADDR_MSB_RMSK)
+#define HWIO_IPA_UC_QMB_SYS_ADDR_MSB_INM(m) in_dword_masked( \
+		HWIO_IPA_UC_QMB_SYS_ADDR_MSB_ADDR, \
+		m)
+#define HWIO_IPA_UC_QMB_SYS_ADDR_MSB_OUT(v) out_dword( \
+		HWIO_IPA_UC_QMB_SYS_ADDR_MSB_ADDR, \
+		v)
+#define HWIO_IPA_UC_QMB_SYS_ADDR_MSB_OUTM(m, v) out_dword_masked_ns( \
+		HWIO_IPA_UC_QMB_SYS_ADDR_MSB_ADDR, \
+		m, \
+		v, \
+		HWIO_IPA_UC_QMB_SYS_ADDR_MSB_IN)
+#define HWIO_IPA_UC_QMB_SYS_ADDR_MSB_ADDR_MSB_BMSK 0xffffffff
+#define HWIO_IPA_UC_QMB_SYS_ADDR_MSB_ADDR_MSB_SHFT 0x0
+#define HWIO_IPA_UC_QMB_LOCAL_ADDR_ADDR (IPA_UC_IPA_UC_PER_REG_BASE + \
+					 0x00000108)
+#define HWIO_IPA_UC_QMB_LOCAL_ADDR_PHYS (IPA_UC_IPA_UC_PER_REG_BASE_PHYS + \
+					 0x00000108)
+#define HWIO_IPA_UC_QMB_LOCAL_ADDR_OFFS (IPA_UC_IPA_UC_PER_REG_BASE_OFFS + \
+					 0x00000108)
+#define HWIO_IPA_UC_QMB_LOCAL_ADDR_RMSK 0x3ffff
+#define HWIO_IPA_UC_QMB_LOCAL_ADDR_ATTR 0x3
+#define HWIO_IPA_UC_QMB_LOCAL_ADDR_IN in_dword_masked( \
+		HWIO_IPA_UC_QMB_LOCAL_ADDR_ADDR, \
+		HWIO_IPA_UC_QMB_LOCAL_ADDR_RMSK)
+#define HWIO_IPA_UC_QMB_LOCAL_ADDR_INM(m) in_dword_masked( \
+		HWIO_IPA_UC_QMB_LOCAL_ADDR_ADDR, \
+		m)
+#define HWIO_IPA_UC_QMB_LOCAL_ADDR_OUT(v) out_dword( \
+		HWIO_IPA_UC_QMB_LOCAL_ADDR_ADDR, \
+		v)
+#define HWIO_IPA_UC_QMB_LOCAL_ADDR_OUTM(m, v) out_dword_masked_ns( \
+		HWIO_IPA_UC_QMB_LOCAL_ADDR_ADDR, \
+		m, \
+		v, \
+		HWIO_IPA_UC_QMB_LOCAL_ADDR_IN)
+#define HWIO_IPA_UC_QMB_LOCAL_ADDR_ADDR_BMSK 0x3ffff
+#define HWIO_IPA_UC_QMB_LOCAL_ADDR_ADDR_SHFT 0x0
+#define HWIO_IPA_UC_QMB_LENGTH_ADDR (IPA_UC_IPA_UC_PER_REG_BASE + \
+				     0x0000010c)
+#define HWIO_IPA_UC_QMB_LENGTH_PHYS (IPA_UC_IPA_UC_PER_REG_BASE_PHYS + \
+				     0x0000010c)
+#define HWIO_IPA_UC_QMB_LENGTH_OFFS (IPA_UC_IPA_UC_PER_REG_BASE_OFFS + \
+				     0x0000010c)
+#define HWIO_IPA_UC_QMB_LENGTH_RMSK 0x7f
+#define HWIO_IPA_UC_QMB_LENGTH_ATTR 0x3
+#define HWIO_IPA_UC_QMB_LENGTH_IN in_dword_masked( \
+		HWIO_IPA_UC_QMB_LENGTH_ADDR, \
+		HWIO_IPA_UC_QMB_LENGTH_RMSK)
+#define HWIO_IPA_UC_QMB_LENGTH_INM(m) in_dword_masked( \
+		HWIO_IPA_UC_QMB_LENGTH_ADDR, \
+		m)
+#define HWIO_IPA_UC_QMB_LENGTH_OUT(v) out_dword( \
+		HWIO_IPA_UC_QMB_LENGTH_ADDR, \
+		v)
+#define HWIO_IPA_UC_QMB_LENGTH_OUTM(m, v) out_dword_masked_ns( \
+		HWIO_IPA_UC_QMB_LENGTH_ADDR, \
+		m, \
+		v, \
+		HWIO_IPA_UC_QMB_LENGTH_IN)
+#define HWIO_IPA_UC_QMB_LENGTH_LENGTH_BMSK 0x7f
+#define HWIO_IPA_UC_QMB_LENGTH_LENGTH_SHFT 0x0
+#define HWIO_IPA_UC_QMB_TRIGGER_ADDR (IPA_UC_IPA_UC_PER_REG_BASE + \
+				      0x00000110)
+#define HWIO_IPA_UC_QMB_TRIGGER_PHYS (IPA_UC_IPA_UC_PER_REG_BASE_PHYS +	\
+				      0x00000110)
+#define HWIO_IPA_UC_QMB_TRIGGER_OFFS (IPA_UC_IPA_UC_PER_REG_BASE_OFFS +	\
+				      0x00000110)
+#define HWIO_IPA_UC_QMB_TRIGGER_RMSK 0x31
+#define HWIO_IPA_UC_QMB_TRIGGER_ATTR 0x3
+#define HWIO_IPA_UC_QMB_TRIGGER_IN in_dword_masked( \
+		HWIO_IPA_UC_QMB_TRIGGER_ADDR, \
+		HWIO_IPA_UC_QMB_TRIGGER_RMSK)
+#define HWIO_IPA_UC_QMB_TRIGGER_INM(m) in_dword_masked(	\
+		HWIO_IPA_UC_QMB_TRIGGER_ADDR, \
+		m)
+#define HWIO_IPA_UC_QMB_TRIGGER_OUT(v) out_dword( \
+		HWIO_IPA_UC_QMB_TRIGGER_ADDR, \
+		v)
+#define HWIO_IPA_UC_QMB_TRIGGER_OUTM(m, v) out_dword_masked_ns(	\
+		HWIO_IPA_UC_QMB_TRIGGER_ADDR, \
+		m, \
+		v, \
+		HWIO_IPA_UC_QMB_TRIGGER_IN)
+#define HWIO_IPA_UC_QMB_TRIGGER_POSTING_BMSK 0x30
+#define HWIO_IPA_UC_QMB_TRIGGER_POSTING_SHFT 0x4
+#define HWIO_IPA_UC_QMB_TRIGGER_POSTING_DATA_POSTED_FVAL 0x0
+#define HWIO_IPA_UC_QMB_TRIGGER_POSTING_RESP_POSTED_FVAL 0x1
+#define HWIO_IPA_UC_QMB_TRIGGER_POSTING_DATA_COMPLETE_FVAL 0x2
+#define HWIO_IPA_UC_QMB_TRIGGER_POSTING_RESP_COMPLETE_FVAL 0x3
+#define HWIO_IPA_UC_QMB_TRIGGER_DIRECTION_BMSK 0x1
+#define HWIO_IPA_UC_QMB_TRIGGER_DIRECTION_SHFT 0x0
+#define HWIO_IPA_UC_QMB_TRIGGER_DIRECTION_READ_FVAL 0x0
+#define HWIO_IPA_UC_QMB_TRIGGER_DIRECTION_WRITE_FVAL 0x1
+#define HWIO_IPA_UC_QMB_PENDING_TID_ADDR (IPA_UC_IPA_UC_PER_REG_BASE + \
+					  0x00000114)
+#define HWIO_IPA_UC_QMB_PENDING_TID_PHYS (IPA_UC_IPA_UC_PER_REG_BASE_PHYS \
+					  + 0x00000114)
+#define HWIO_IPA_UC_QMB_PENDING_TID_OFFS (IPA_UC_IPA_UC_PER_REG_BASE_OFFS \
+					  + 0x00000114)
+#define HWIO_IPA_UC_QMB_PENDING_TID_RMSK 0x11113f
+#define HWIO_IPA_UC_QMB_PENDING_TID_ATTR 0x1
+#define HWIO_IPA_UC_QMB_PENDING_TID_IN in_dword_masked(	\
+		HWIO_IPA_UC_QMB_PENDING_TID_ADDR, \
+		HWIO_IPA_UC_QMB_PENDING_TID_RMSK)
+#define HWIO_IPA_UC_QMB_PENDING_TID_INM(m) in_dword_masked( \
+		HWIO_IPA_UC_QMB_PENDING_TID_ADDR, \
+		m)
+#define HWIO_IPA_UC_QMB_PENDING_TID_ERROR_SECURITY_BMSK 0x100000
+#define HWIO_IPA_UC_QMB_PENDING_TID_ERROR_SECURITY_SHFT 0x14
+#define HWIO_IPA_UC_QMB_PENDING_TID_ERROR_MAX_COMP_BMSK 0x10000
+#define HWIO_IPA_UC_QMB_PENDING_TID_ERROR_MAX_COMP_SHFT 0x10
+#define HWIO_IPA_UC_QMB_PENDING_TID_ERROR_MAX_OS_BMSK 0x1000
+#define HWIO_IPA_UC_QMB_PENDING_TID_ERROR_MAX_OS_SHFT 0xc
+#define HWIO_IPA_UC_QMB_PENDING_TID_ERROR_BUS_BMSK 0x100
+#define HWIO_IPA_UC_QMB_PENDING_TID_ERROR_BUS_SHFT 0x8
+#define HWIO_IPA_UC_QMB_PENDING_TID_TID_BMSK 0x3f
+#define HWIO_IPA_UC_QMB_PENDING_TID_TID_SHFT 0x0
+#define HWIO_IPA_UC_QMB_COMPLETED_RD_FIFO_ADDR (IPA_UC_IPA_UC_PER_REG_BASE \
+						+ 0x00000118)
+#define HWIO_IPA_UC_QMB_COMPLETED_RD_FIFO_PHYS ( \
+		IPA_UC_IPA_UC_PER_REG_BASE_PHYS + 0x00000118)
+#define HWIO_IPA_UC_QMB_COMPLETED_RD_FIFO_OFFS ( \
+		IPA_UC_IPA_UC_PER_REG_BASE_OFFS + 0x00000118)
+#define HWIO_IPA_UC_QMB_COMPLETED_RD_FIFO_PEEK_ADDR ( \
+		IPA_UC_IPA_UC_PER_REG_BASE + 0x0000011c)
+#define HWIO_IPA_UC_QMB_COMPLETED_RD_FIFO_PEEK_PHYS ( \
+		IPA_UC_IPA_UC_PER_REG_BASE_PHYS + 0x0000011c)
+#define HWIO_IPA_UC_QMB_COMPLETED_RD_FIFO_PEEK_OFFS ( \
+		IPA_UC_IPA_UC_PER_REG_BASE_OFFS + 0x0000011c)
+#define HWIO_IPA_UC_QMB_COMPLETED_RD_FIFO_PEEK_RMSK 0x113f
+#define HWIO_IPA_UC_QMB_COMPLETED_RD_FIFO_PEEK_ATTR 0x1
+#define HWIO_IPA_UC_QMB_COMPLETED_RD_FIFO_PEEK_IN in_dword_masked( \
+		HWIO_IPA_UC_QMB_COMPLETED_RD_FIFO_PEEK_ADDR, \
+		HWIO_IPA_UC_QMB_COMPLETED_RD_FIFO_PEEK_RMSK)
+#define HWIO_IPA_UC_QMB_COMPLETED_RD_FIFO_PEEK_INM(m) in_dword_masked( \
+		HWIO_IPA_UC_QMB_COMPLETED_RD_FIFO_PEEK_ADDR, \
+		m)
+#define HWIO_IPA_UC_QMB_COMPLETED_RD_FIFO_PEEK_VALID_BMSK 0x1000
+#define HWIO_IPA_UC_QMB_COMPLETED_RD_FIFO_PEEK_VALID_SHFT 0xc
+#define HWIO_IPA_UC_QMB_COMPLETED_RD_FIFO_PEEK_ERROR_BMSK 0x100
+#define HWIO_IPA_UC_QMB_COMPLETED_RD_FIFO_PEEK_ERROR_SHFT 0x8
+#define HWIO_IPA_UC_QMB_COMPLETED_RD_FIFO_PEEK_TID_BMSK 0x3f
+#define HWIO_IPA_UC_QMB_COMPLETED_RD_FIFO_PEEK_TID_SHFT 0x0
+#define HWIO_IPA_UC_QMB_COMPLETED_WR_FIFO_ADDR (IPA_UC_IPA_UC_PER_REG_BASE \
+						+ 0x00000120)
+#define HWIO_IPA_UC_QMB_COMPLETED_WR_FIFO_PHYS ( \
+		IPA_UC_IPA_UC_PER_REG_BASE_PHYS + 0x00000120)
+#define HWIO_IPA_UC_QMB_COMPLETED_WR_FIFO_OFFS ( \
+		IPA_UC_IPA_UC_PER_REG_BASE_OFFS + 0x00000120)
+#define HWIO_IPA_UC_QMB_COMPLETED_WR_FIFO_PEEK_ADDR ( \
+		IPA_UC_IPA_UC_PER_REG_BASE + 0x00000124)
+#define HWIO_IPA_UC_QMB_COMPLETED_WR_FIFO_PEEK_PHYS ( \
+		IPA_UC_IPA_UC_PER_REG_BASE_PHYS + 0x00000124)
+#define HWIO_IPA_UC_QMB_COMPLETED_WR_FIFO_PEEK_OFFS ( \
+		IPA_UC_IPA_UC_PER_REG_BASE_OFFS + 0x00000124)
+#define HWIO_IPA_UC_QMB_COMPLETED_WR_FIFO_PEEK_RMSK 0x113f
+#define HWIO_IPA_UC_QMB_COMPLETED_WR_FIFO_PEEK_ATTR 0x1
+#define HWIO_IPA_UC_QMB_COMPLETED_WR_FIFO_PEEK_IN in_dword_masked( \
+		HWIO_IPA_UC_QMB_COMPLETED_WR_FIFO_PEEK_ADDR, \
+		HWIO_IPA_UC_QMB_COMPLETED_WR_FIFO_PEEK_RMSK)
+#define HWIO_IPA_UC_QMB_COMPLETED_WR_FIFO_PEEK_INM(m) in_dword_masked( \
+		HWIO_IPA_UC_QMB_COMPLETED_WR_FIFO_PEEK_ADDR, \
+		m)
+#define HWIO_IPA_UC_QMB_COMPLETED_WR_FIFO_PEEK_VALID_BMSK 0x1000
+#define HWIO_IPA_UC_QMB_COMPLETED_WR_FIFO_PEEK_VALID_SHFT 0xc
+#define HWIO_IPA_UC_QMB_COMPLETED_WR_FIFO_PEEK_ERROR_BMSK 0x100
+#define HWIO_IPA_UC_QMB_COMPLETED_WR_FIFO_PEEK_ERROR_SHFT 0x8
+#define HWIO_IPA_UC_QMB_COMPLETED_WR_FIFO_PEEK_TID_BMSK 0x3f
+#define HWIO_IPA_UC_QMB_COMPLETED_WR_FIFO_PEEK_TID_SHFT 0x0
+#define HWIO_IPA_UC_QMB_MISC_ADDR (IPA_UC_IPA_UC_PER_REG_BASE + 0x00000128)
+#define HWIO_IPA_UC_QMB_MISC_PHYS (IPA_UC_IPA_UC_PER_REG_BASE_PHYS + \
+				   0x00000128)
+#define HWIO_IPA_UC_QMB_MISC_OFFS (IPA_UC_IPA_UC_PER_REG_BASE_OFFS + \
+				   0x00000128)
+#define HWIO_IPA_UC_QMB_MISC_RMSK 0xf11333ff
+#define HWIO_IPA_UC_QMB_MISC_ATTR 0x3
+#define HWIO_IPA_UC_QMB_MISC_IN in_dword_masked(HWIO_IPA_UC_QMB_MISC_ADDR, \
+						HWIO_IPA_UC_QMB_MISC_RMSK)
+#define HWIO_IPA_UC_QMB_MISC_INM(m) in_dword_masked( \
+		HWIO_IPA_UC_QMB_MISC_ADDR, \
+		m)
+#define HWIO_IPA_UC_QMB_MISC_OUT(v) out_dword(HWIO_IPA_UC_QMB_MISC_ADDR, v)
+#define HWIO_IPA_UC_QMB_MISC_OUTM(m, v) out_dword_masked_ns( \
+		HWIO_IPA_UC_QMB_MISC_ADDR, \
+		m, \
+		v, \
+		HWIO_IPA_UC_QMB_MISC_IN)
+#define HWIO_IPA_UC_QMB_MISC_QMB_HREADY_BCR_BMSK 0x80000000
+#define HWIO_IPA_UC_QMB_MISC_QMB_HREADY_BCR_SHFT 0x1f
+#define HWIO_IPA_UC_QMB_MISC_POSTED_STALL_BMSK 0x40000000
+#define HWIO_IPA_UC_QMB_MISC_POSTED_STALL_SHFT 0x1e
+#define HWIO_IPA_UC_QMB_MISC_IRQ_COAL_BMSK 0x20000000
+#define HWIO_IPA_UC_QMB_MISC_IRQ_COAL_SHFT 0x1d
+#define HWIO_IPA_UC_QMB_MISC_SWAP_BMSK 0x10000000
+#define HWIO_IPA_UC_QMB_MISC_SWAP_SHFT 0x1c
+#define HWIO_IPA_UC_QMB_MISC_OOOWR_BMSK 0x1000000
+#define HWIO_IPA_UC_QMB_MISC_OOOWR_SHFT 0x18
+#define HWIO_IPA_UC_QMB_MISC_OOORD_BMSK 0x100000
+#define HWIO_IPA_UC_QMB_MISC_OOORD_SHFT 0x14
+#define HWIO_IPA_UC_QMB_MISC_WR_PRIORITY_BMSK 0x30000
+#define HWIO_IPA_UC_QMB_MISC_WR_PRIORITY_SHFT 0x10
+#define HWIO_IPA_UC_QMB_MISC_RD_PRIORITY_BMSK 0x3000
+#define HWIO_IPA_UC_QMB_MISC_RD_PRIORITY_SHFT 0xc
+#define HWIO_IPA_UC_QMB_MISC_USER_BMSK 0x3ff
+#define HWIO_IPA_UC_QMB_MISC_USER_SHFT 0x0
+#define HWIO_IPA_UC_QMB_STATUS_ADDR (IPA_UC_IPA_UC_PER_REG_BASE + \
+				     0x0000012c)
+#define HWIO_IPA_UC_QMB_STATUS_PHYS (IPA_UC_IPA_UC_PER_REG_BASE_PHYS + \
+				     0x0000012c)
+#define HWIO_IPA_UC_QMB_STATUS_OFFS (IPA_UC_IPA_UC_PER_REG_BASE_OFFS + \
+				     0x0000012c)
+#define HWIO_IPA_UC_QMB_STATUS_RMSK 0x1fff1fff
+#define HWIO_IPA_UC_QMB_STATUS_ATTR 0x1
+#define HWIO_IPA_UC_QMB_STATUS_IN in_dword_masked( \
+		HWIO_IPA_UC_QMB_STATUS_ADDR, \
+		HWIO_IPA_UC_QMB_STATUS_RMSK)
+#define HWIO_IPA_UC_QMB_STATUS_INM(m) in_dword_masked( \
+		HWIO_IPA_UC_QMB_STATUS_ADDR, \
+		m)
+#define HWIO_IPA_UC_QMB_STATUS_COMPLETED_WR_FIFO_FULL_BMSK 0x10000000
+#define HWIO_IPA_UC_QMB_STATUS_COMPLETED_WR_FIFO_FULL_SHFT 0x1c
+#define HWIO_IPA_UC_QMB_STATUS_COMPLETED_WR_CNT_BMSK 0xf000000
+#define HWIO_IPA_UC_QMB_STATUS_COMPLETED_WR_CNT_SHFT 0x18
+#define HWIO_IPA_UC_QMB_STATUS_OUTSTANDING_WR_CNT_BMSK 0xf00000
+#define HWIO_IPA_UC_QMB_STATUS_OUTSTANDING_WR_CNT_SHFT 0x14
+#define HWIO_IPA_UC_QMB_STATUS_MAX_OUTSTANDING_WR_BMSK 0xf0000
+#define HWIO_IPA_UC_QMB_STATUS_MAX_OUTSTANDING_WR_SHFT 0x10
+#define HWIO_IPA_UC_QMB_STATUS_COMPLETED_RD_FIFO_FULL_BMSK 0x1000
+#define HWIO_IPA_UC_QMB_STATUS_COMPLETED_RD_FIFO_FULL_SHFT 0xc
+#define HWIO_IPA_UC_QMB_STATUS_COMPLETED_RD_CNT_BMSK 0xf00
+#define HWIO_IPA_UC_QMB_STATUS_COMPLETED_RD_CNT_SHFT 0x8
+#define HWIO_IPA_UC_QMB_STATUS_OUTSTANDING_RD_CNT_BMSK 0xf0
+#define HWIO_IPA_UC_QMB_STATUS_OUTSTANDING_RD_CNT_SHFT 0x4
+#define HWIO_IPA_UC_QMB_STATUS_MAX_OUTSTANDING_RD_BMSK 0xf
+#define HWIO_IPA_UC_QMB_STATUS_MAX_OUTSTANDING_RD_SHFT 0x0
+#define HWIO_IPA_UC_QMB_BUS_ATTRIB_ADDR (IPA_UC_IPA_UC_PER_REG_BASE + \
+					 0x00000130)
+#define HWIO_IPA_UC_QMB_BUS_ATTRIB_PHYS (IPA_UC_IPA_UC_PER_REG_BASE_PHYS + \
+					 0x00000130)
+#define HWIO_IPA_UC_QMB_BUS_ATTRIB_OFFS (IPA_UC_IPA_UC_PER_REG_BASE_OFFS + \
+					 0x00000130)
+#define HWIO_IPA_UC_QMB_BUS_ATTRIB_RMSK 0x1117
+#define HWIO_IPA_UC_QMB_BUS_ATTRIB_ATTR 0x3
+#define HWIO_IPA_UC_QMB_BUS_ATTRIB_IN in_dword_masked( \
+		HWIO_IPA_UC_QMB_BUS_ATTRIB_ADDR, \
+		HWIO_IPA_UC_QMB_BUS_ATTRIB_RMSK)
+#define HWIO_IPA_UC_QMB_BUS_ATTRIB_INM(m) in_dword_masked( \
+		HWIO_IPA_UC_QMB_BUS_ATTRIB_ADDR, \
+		m)
+#define HWIO_IPA_UC_QMB_BUS_ATTRIB_OUT(v) out_dword( \
+		HWIO_IPA_UC_QMB_BUS_ATTRIB_ADDR, \
+		v)
+#define HWIO_IPA_UC_QMB_BUS_ATTRIB_OUTM(m, v) out_dword_masked_ns( \
+		HWIO_IPA_UC_QMB_BUS_ATTRIB_ADDR, \
+		m, \
+		v, \
+		HWIO_IPA_UC_QMB_BUS_ATTRIB_IN)
+#define HWIO_IPA_UC_QMB_BUS_ATTRIB_SHARED_BMSK 0x1000
+#define HWIO_IPA_UC_QMB_BUS_ATTRIB_SHARED_SHFT 0xc
+#define HWIO_IPA_UC_QMB_BUS_ATTRIB_INNERSHARED_BMSK 0x100
+#define HWIO_IPA_UC_QMB_BUS_ATTRIB_INNERSHARED_SHFT 0x8
+#define HWIO_IPA_UC_QMB_BUS_ATTRIB_NOALLOCATE_BMSK 0x10
+#define HWIO_IPA_UC_QMB_BUS_ATTRIB_NOALLOCATE_SHFT 0x4
+#define HWIO_IPA_UC_QMB_BUS_ATTRIB_MEMTYPE_BMSK 0x7
+#define HWIO_IPA_UC_QMB_BUS_ATTRIB_MEMTYPE_SHFT 0x0
+#define HWIO_IPA_UC_QMB_BUS_ATTRIB_MEMTYPE_STRONGLY_ORDERED_FVAL 0x0
+#define HWIO_IPA_UC_QMB_BUS_ATTRIB_MEMTYPE_DEVICE_FVAL 0x1
+#define HWIO_IPA_UC_QMB_BUS_ATTRIB_MEMTYPE_NON_CACHEABLE_FVAL 0x2
+#define HWIO_IPA_UC_QMB_BUS_ATTRIB_MEMTYPE_COPYBACK_WRITEALLOCATE_FVAL 0x3
+#define HWIO_IPA_UC_QMB_BUS_ATTRIB_MEMTYPE_WRITETHROUGH_NOALLOCATE_FVAL	\
+	0x6
+#define HWIO_IPA_UC_QMB_BUS_ATTRIB_MEMTYPE_COPYBACK_NOALLOCATE_FVAL 0x7
+#define HWIO_IPA_UC_MBOX_INT_STTS_n_ADDR(n) (IPA_UC_IPA_UC_PER_REG_BASE + \
+					     0x00000200 + 0x10 * (n))
+#define HWIO_IPA_UC_MBOX_INT_STTS_n_PHYS(n) ( \
+		IPA_UC_IPA_UC_PER_REG_BASE_PHYS + 0x00000200 + 0x10 * (n))
+#define HWIO_IPA_UC_MBOX_INT_STTS_n_OFFS(n) ( \
+		IPA_UC_IPA_UC_PER_REG_BASE_OFFS + 0x00000200 + 0x10 * (n))
+#define HWIO_IPA_UC_MBOX_INT_EN_n_ADDR(n) (IPA_UC_IPA_UC_PER_REG_BASE +	\
+					   0x00000204 + 0x10 * (n))
+#define HWIO_IPA_UC_MBOX_INT_EN_n_PHYS(n) (IPA_UC_IPA_UC_PER_REG_BASE_PHYS \
+					   + 0x00000204 + 0x10 * (n))
+#define HWIO_IPA_UC_MBOX_INT_EN_n_OFFS(n) (IPA_UC_IPA_UC_PER_REG_BASE_OFFS \
+					   + 0x00000204 + 0x10 * (n))
+#define HWIO_IPA_UC_MBOX_INT_CLR_n_ADDR(n) (IPA_UC_IPA_UC_PER_REG_BASE + \
+					    0x00000208 + 0x10 * (n))
+#define HWIO_IPA_UC_MBOX_INT_CLR_n_PHYS(n) ( \
+		IPA_UC_IPA_UC_PER_REG_BASE_PHYS + 0x00000208 + 0x10 * (n))
+#define HWIO_IPA_UC_MBOX_INT_CLR_n_OFFS(n) ( \
+		IPA_UC_IPA_UC_PER_REG_BASE_OFFS + 0x00000208 + 0x10 * (n))
+#define HWIO_IPA_UC_IPA_INT_STTS_n_ADDR(n) (IPA_UC_IPA_UC_PER_REG_BASE + \
+					    0x00000300 + 0x10 * (n))
+#define HWIO_IPA_UC_IPA_INT_STTS_n_PHYS(n) ( \
+		IPA_UC_IPA_UC_PER_REG_BASE_PHYS + 0x00000300 + 0x10 * (n))
+#define HWIO_IPA_UC_IPA_INT_STTS_n_OFFS(n) ( \
+		IPA_UC_IPA_UC_PER_REG_BASE_OFFS + 0x00000300 + 0x10 * (n))
+#define HWIO_IPA_UC_IPA_INT_EN_n_ADDR(n) (IPA_UC_IPA_UC_PER_REG_BASE + \
+					  0x00000304 + 0x10 * (n))
+#define HWIO_IPA_UC_IPA_INT_EN_n_PHYS(n) (IPA_UC_IPA_UC_PER_REG_BASE_PHYS \
+					  + 0x00000304 + 0x10 * (n))
+#define HWIO_IPA_UC_IPA_INT_EN_n_OFFS(n) (IPA_UC_IPA_UC_PER_REG_BASE_OFFS \
+					  + 0x00000304 + 0x10 * (n))
+#define HWIO_IPA_UC_IPA_INT_CLR_n_ADDR(n) (IPA_UC_IPA_UC_PER_REG_BASE +	\
+					   0x00000308 + 0x10 * (n))
+#define HWIO_IPA_UC_IPA_INT_CLR_n_PHYS(n) (IPA_UC_IPA_UC_PER_REG_BASE_PHYS \
+					   + 0x00000308 + 0x10 * (n))
+#define HWIO_IPA_UC_IPA_INT_CLR_n_OFFS(n) (IPA_UC_IPA_UC_PER_REG_BASE_OFFS \
+					   + 0x00000308 + 0x10 * (n))
+#define HWIO_IPA_UC_HWEV_INT_STTS_ADDR (IPA_UC_IPA_UC_PER_REG_BASE + \
+					0x00000400)
+#define HWIO_IPA_UC_HWEV_INT_STTS_PHYS (IPA_UC_IPA_UC_PER_REG_BASE_PHYS + \
+					0x00000400)
+#define HWIO_IPA_UC_HWEV_INT_STTS_OFFS (IPA_UC_IPA_UC_PER_REG_BASE_OFFS + \
+					0x00000400)
+#define HWIO_IPA_UC_HWEV_INT_EN_ADDR (IPA_UC_IPA_UC_PER_REG_BASE + \
+				      0x00000404)
+#define HWIO_IPA_UC_HWEV_INT_EN_PHYS (IPA_UC_IPA_UC_PER_REG_BASE_PHYS +	\
+				      0x00000404)
+#define HWIO_IPA_UC_HWEV_INT_EN_OFFS (IPA_UC_IPA_UC_PER_REG_BASE_OFFS +	\
+				      0x00000404)
+#define HWIO_IPA_UC_HWEV_INT_CLR_ADDR (IPA_UC_IPA_UC_PER_REG_BASE + \
+				       0x00000408)
+#define HWIO_IPA_UC_HWEV_INT_CLR_PHYS (IPA_UC_IPA_UC_PER_REG_BASE_PHYS + \
+				       0x00000408)
+#define HWIO_IPA_UC_HWEV_INT_CLR_OFFS (IPA_UC_IPA_UC_PER_REG_BASE_OFFS + \
+				       0x00000408)
+#define HWIO_IPA_UC_SWEV_INT_STTS_ADDR (IPA_UC_IPA_UC_PER_REG_BASE + \
+					0x00000410)
+#define HWIO_IPA_UC_SWEV_INT_STTS_PHYS (IPA_UC_IPA_UC_PER_REG_BASE_PHYS + \
+					0x00000410)
+#define HWIO_IPA_UC_SWEV_INT_STTS_OFFS (IPA_UC_IPA_UC_PER_REG_BASE_OFFS + \
+					0x00000410)
+#define HWIO_IPA_UC_SWEV_INT_EN_ADDR (IPA_UC_IPA_UC_PER_REG_BASE + \
+				      0x00000414)
+#define HWIO_IPA_UC_SWEV_INT_EN_PHYS (IPA_UC_IPA_UC_PER_REG_BASE_PHYS +	\
+				      0x00000414)
+#define HWIO_IPA_UC_SWEV_INT_EN_OFFS (IPA_UC_IPA_UC_PER_REG_BASE_OFFS +	\
+				      0x00000414)
+#define HWIO_IPA_UC_SWEV_INT_CLR_ADDR (IPA_UC_IPA_UC_PER_REG_BASE + \
+				       0x00000418)
+#define HWIO_IPA_UC_SWEV_INT_CLR_PHYS (IPA_UC_IPA_UC_PER_REG_BASE_PHYS + \
+				       0x00000418)
+#define HWIO_IPA_UC_SWEV_INT_CLR_OFFS (IPA_UC_IPA_UC_PER_REG_BASE_OFFS + \
+				       0x00000418)
+#define HWIO_IPA_UC_VUIC_INT_STTS_ADDR (IPA_UC_IPA_UC_PER_REG_BASE + \
+					0x0000041c)
+#define HWIO_IPA_UC_VUIC_INT_STTS_PHYS (IPA_UC_IPA_UC_PER_REG_BASE_PHYS + \
+					0x0000041c)
+#define HWIO_IPA_UC_VUIC_INT_STTS_OFFS (IPA_UC_IPA_UC_PER_REG_BASE_OFFS + \
+					0x0000041c)
+#define HWIO_IPA_UC_VUIC_INT_CLR_ADDR (IPA_UC_IPA_UC_PER_REG_BASE + \
+				       0x00000420)
+#define HWIO_IPA_UC_VUIC_INT_CLR_PHYS (IPA_UC_IPA_UC_PER_REG_BASE_PHYS + \
+				       0x00000420)
+#define HWIO_IPA_UC_VUIC_INT_CLR_OFFS (IPA_UC_IPA_UC_PER_REG_BASE_OFFS + \
+				       0x00000420)
+#define HWIO_IPA_UC_TIMER_CTRL_n_ADDR(n) (IPA_UC_IPA_UC_PER_REG_BASE + \
+					  0x00000500 + 0x10 * (n))
+#define HWIO_IPA_UC_TIMER_CTRL_n_PHYS(n) (IPA_UC_IPA_UC_PER_REG_BASE_PHYS \
+					  + 0x00000500 + 0x10 * (n))
+#define HWIO_IPA_UC_TIMER_CTRL_n_OFFS(n) (IPA_UC_IPA_UC_PER_REG_BASE_OFFS \
+					  + 0x00000500 + 0x10 * (n))
+#define HWIO_IPA_UC_TIMER_STATUS_n_ADDR(n) (IPA_UC_IPA_UC_PER_REG_BASE + \
+					    0x00000508 + 0x10 * (n))
+#define HWIO_IPA_UC_TIMER_STATUS_n_PHYS(n) ( \
+		IPA_UC_IPA_UC_PER_REG_BASE_PHYS + 0x00000508 + 0x10 * (n))
+#define HWIO_IPA_UC_TIMER_STATUS_n_OFFS(n) ( \
+		IPA_UC_IPA_UC_PER_REG_BASE_OFFS + 0x00000508 + 0x10 * (n))
+#define HWIO_IPA_UC_EVENTS_ADDR (IPA_UC_IPA_UC_PER_REG_BASE + 0x00000600)
+#define HWIO_IPA_UC_EVENTS_PHYS (IPA_UC_IPA_UC_PER_REG_BASE_PHYS + \
+				 0x00000600)
+#define HWIO_IPA_UC_EVENTS_OFFS (IPA_UC_IPA_UC_PER_REG_BASE_OFFS + \
+				 0x00000600)
+#define HWIO_IPA_UC_VUIC_BUS_ADDR_TRANSLATE_EN_ADDR ( \
+		IPA_UC_IPA_UC_PER_REG_BASE + 0x00000710)
+#define HWIO_IPA_UC_VUIC_BUS_ADDR_TRANSLATE_EN_PHYS ( \
+		IPA_UC_IPA_UC_PER_REG_BASE_PHYS + 0x00000710)
+#define HWIO_IPA_UC_VUIC_BUS_ADDR_TRANSLATE_EN_OFFS ( \
+		IPA_UC_IPA_UC_PER_REG_BASE_OFFS + 0x00000710)
+#define HWIO_IPA_UC_SYS_ADDR_MSB_ADDR (IPA_UC_IPA_UC_PER_REG_BASE + \
+				       0x00000714)
+#define HWIO_IPA_UC_SYS_ADDR_MSB_PHYS (IPA_UC_IPA_UC_PER_REG_BASE_PHYS + \
+				       0x00000714)
+#define HWIO_IPA_UC_SYS_ADDR_MSB_OFFS (IPA_UC_IPA_UC_PER_REG_BASE_OFFS + \
+				       0x00000714)
+#define HWIO_IPA_UC_PC_RESTORE_WR_ADDR (IPA_UC_IPA_UC_PER_REG_BASE + \
+					0x00000718)
+#define HWIO_IPA_UC_PC_RESTORE_WR_PHYS (IPA_UC_IPA_UC_PER_REG_BASE_PHYS + \
+					0x00000718)
+#define HWIO_IPA_UC_PC_RESTORE_WR_OFFS (IPA_UC_IPA_UC_PER_REG_BASE_OFFS + \
+					0x00000718)
+#define HWIO_IPA_UC_PC_RESTORE_RD_ADDR (IPA_UC_IPA_UC_PER_REG_BASE + \
+					0x0000071c)
+#define HWIO_IPA_UC_PC_RESTORE_RD_PHYS (IPA_UC_IPA_UC_PER_REG_BASE_PHYS + \
+					0x0000071c)
+#define HWIO_IPA_UC_PC_RESTORE_RD_OFFS (IPA_UC_IPA_UC_PER_REG_BASE_OFFS + \
+					0x0000071c)
+#define HWIO_IPA_UC_SPARE_ADDR (IPA_UC_IPA_UC_PER_REG_BASE + 0x00001ffc)
+#define HWIO_IPA_UC_SPARE_PHYS (IPA_UC_IPA_UC_PER_REG_BASE_PHYS + \
+				0x00001ffc)
+#define HWIO_IPA_UC_SPARE_OFFS (IPA_UC_IPA_UC_PER_REG_BASE_OFFS + \
+				0x00001ffc)
+#endif

+ 2963 - 0
ipa/ipa_v3/dump/ipa4.5/ipa_hwio_def.h

@@ -0,0 +1,2963 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2019, The Linux Foundation. All rights reserved.
+ */
+#if !defined(_IPA_HWIO_DEF_H_)
+#define _IPA_HWIO_DEF_H_
+struct ipa_hwio_def_ipa_gsi_top_gsi_cfg_s {
+	u32	gsi_enable : 1;
+	u32	mcs_enable : 1;
+	u32	double_mcs_clk_freq : 1;
+	u32	uc_is_mcs : 1;
+	u32	gsi_pwr_clps : 1;
+	u32	bp_mtrix_disable : 1;
+	u32	reserved0 : 2;
+	u32	sleep_clk_div : 4;
+	u32	reserved1 : 20;
+};
+union ipa_hwio_def_ipa_gsi_top_gsi_cfg_u {
+	struct ipa_hwio_def_ipa_gsi_top_gsi_cfg_s	def;
+	u32						value;
+};
+struct ipa_hwio_def_ipa_gsi_top_gsi_ree_cfg_s {
+	u32	move_to_esc_clr_mode_trsh : 1;
+	u32	channel_empty_int_enable : 1;
+	u32	reserved0 : 6;
+	u32	max_burst_size : 8;
+	u32	reserved1 : 16;
+};
+union ipa_hwio_def_ipa_gsi_top_gsi_ree_cfg_u {
+	struct ipa_hwio_def_ipa_gsi_top_gsi_ree_cfg_s	def;
+	u32						value;
+};
+struct ipa_hwio_def_ipa_gsi_top_gsi_manager_ee_qos_n_s {
+	u32	ee_prio : 2;
+	u32	reserved0 : 6;
+	u32	max_ch_alloc : 5;
+	u32	reserved1 : 3;
+	u32	max_ev_alloc : 5;
+	u32	reserved2 : 11;
+};
+union ipa_hwio_def_ipa_gsi_top_gsi_manager_ee_qos_n_u {
+	struct ipa_hwio_def_ipa_gsi_top_gsi_manager_ee_qos_n_s	def;
+	u32							value;
+};
+struct ipa_hwio_def_ipa_gsi_top_gsi_shram_ptr_ch_cntxt_base_addr_s {
+	u32	shram_ptr : 16;
+	u32	reserved0 : 16;
+};
+union ipa_hwio_def_ipa_gsi_top_gsi_shram_ptr_ch_cntxt_base_addr_u {
+	struct ipa_hwio_def_ipa_gsi_top_gsi_shram_ptr_ch_cntxt_base_addr_s
+		def;
+	u32 value;
+};
+struct ipa_hwio_def_ipa_gsi_top_gsi_shram_ptr_ev_cntxt_base_addr_s {
+	u32	shram_ptr : 16;
+	u32	reserved0 : 16;
+};
+union ipa_hwio_def_ipa_gsi_top_gsi_shram_ptr_ev_cntxt_base_addr_u {
+	struct ipa_hwio_def_ipa_gsi_top_gsi_shram_ptr_ev_cntxt_base_addr_s
+		def;
+	u32 value;
+};
+struct ipa_hwio_def_ipa_gsi_top_gsi_shram_ptr_re_storage_base_addr_s {
+	u32	shram_ptr : 16;
+	u32	reserved0 : 16;
+};
+union ipa_hwio_def_ipa_gsi_top_gsi_shram_ptr_re_storage_base_addr_u {
+	struct
+	ipa_hwio_def_ipa_gsi_top_gsi_shram_ptr_re_storage_base_addr_s
+		def;
+	u32 value;
+};
+struct ipa_hwio_def_ipa_gsi_top_gsi_shram_ptr_re_esc_buf_base_addr_s {
+	u32	shram_ptr : 16;
+	u32	reserved0 : 16;
+};
+union ipa_hwio_def_ipa_gsi_top_gsi_shram_ptr_re_esc_buf_base_addr_u {
+	struct
+	ipa_hwio_def_ipa_gsi_top_gsi_shram_ptr_re_esc_buf_base_addr_s
+		def;
+	u32 value;
+};
+struct ipa_hwio_def_ipa_gsi_top_gsi_shram_ptr_ee_scrach_base_addr_s {
+	u32	shram_ptr : 16;
+	u32	reserved0 : 16;
+};
+union ipa_hwio_def_ipa_gsi_top_gsi_shram_ptr_ee_scrach_base_addr_u {
+	struct
+	ipa_hwio_def_ipa_gsi_top_gsi_shram_ptr_ee_scrach_base_addr_s
+		def;
+	u32 value;
+};
+struct ipa_hwio_def_ipa_gsi_top_gsi_shram_ptr_func_stack_base_addr_s {
+	u32	shram_ptr : 16;
+	u32	reserved0 : 16;
+};
+union ipa_hwio_def_ipa_gsi_top_gsi_shram_ptr_func_stack_base_addr_u {
+	struct
+	ipa_hwio_def_ipa_gsi_top_gsi_shram_ptr_func_stack_base_addr_s
+		def;
+	u32 value;
+};
+struct ipa_hwio_def_ipa_gsi_top_gsi_iram_ptr_ch_cmd_s {
+	u32	iram_ptr : 12;
+	u32	reserved0 : 20;
+};
+union ipa_hwio_def_ipa_gsi_top_gsi_iram_ptr_ch_cmd_u {
+	struct ipa_hwio_def_ipa_gsi_top_gsi_iram_ptr_ch_cmd_s	def;
+	u32							value;
+};
+struct ipa_hwio_def_ipa_gsi_top_gsi_iram_ptr_ee_generic_cmd_s {
+	u32	iram_ptr : 12;
+	u32	reserved0 : 20;
+};
+union ipa_hwio_def_ipa_gsi_top_gsi_iram_ptr_ee_generic_cmd_u {
+	struct ipa_hwio_def_ipa_gsi_top_gsi_iram_ptr_ee_generic_cmd_s
+		def;
+	u32 value;
+};
+struct ipa_hwio_def_ipa_gsi_top_gsi_iram_ptr_ch_db_s {
+	u32	iram_ptr : 12;
+	u32	reserved0 : 20;
+};
+union ipa_hwio_def_ipa_gsi_top_gsi_iram_ptr_ch_db_u {
+	struct ipa_hwio_def_ipa_gsi_top_gsi_iram_ptr_ch_db_s	def;
+	u32							value;
+};
+struct ipa_hwio_def_ipa_gsi_top_gsi_iram_ptr_ev_db_s {
+	u32	iram_ptr : 12;
+	u32	reserved0 : 20;
+};
+union ipa_hwio_def_ipa_gsi_top_gsi_iram_ptr_ev_db_u {
+	struct ipa_hwio_def_ipa_gsi_top_gsi_iram_ptr_ev_db_s	def;
+	u32							value;
+};
+struct ipa_hwio_def_ipa_gsi_top_gsi_iram_ptr_new_re_s {
+	u32	iram_ptr : 12;
+	u32	reserved0 : 20;
+};
+union ipa_hwio_def_ipa_gsi_top_gsi_iram_ptr_new_re_u {
+	struct ipa_hwio_def_ipa_gsi_top_gsi_iram_ptr_new_re_s	def;
+	u32							value;
+};
+struct ipa_hwio_def_ipa_gsi_top_gsi_iram_ptr_ch_dis_comp_s {
+	u32	iram_ptr : 12;
+	u32	reserved0 : 20;
+};
+union ipa_hwio_def_ipa_gsi_top_gsi_iram_ptr_ch_dis_comp_u {
+	struct ipa_hwio_def_ipa_gsi_top_gsi_iram_ptr_ch_dis_comp_s
+		def;
+	u32 value;
+};
+struct ipa_hwio_def_ipa_gsi_top_gsi_iram_ptr_ch_empty_s {
+	u32	iram_ptr : 12;
+	u32	reserved0 : 20;
+};
+union ipa_hwio_def_ipa_gsi_top_gsi_iram_ptr_ch_empty_u {
+	struct ipa_hwio_def_ipa_gsi_top_gsi_iram_ptr_ch_empty_s def;
+	u32							value;
+};
+struct ipa_hwio_def_ipa_gsi_top_gsi_iram_ptr_event_gen_comp_s {
+	u32	iram_ptr : 12;
+	u32	reserved0 : 20;
+};
+union ipa_hwio_def_ipa_gsi_top_gsi_iram_ptr_event_gen_comp_u {
+	struct ipa_hwio_def_ipa_gsi_top_gsi_iram_ptr_event_gen_comp_s
+		def;
+	u32 value;
+};
+struct ipa_hwio_def_ipa_gsi_top_gsi_iram_ptr_timer_expired_s {
+	u32	iram_ptr : 12;
+	u32	reserved0 : 20;
+};
+union ipa_hwio_def_ipa_gsi_top_gsi_iram_ptr_timer_expired_u {
+	struct ipa_hwio_def_ipa_gsi_top_gsi_iram_ptr_timer_expired_s
+		def;
+	u32 value;
+};
+struct ipa_hwio_def_ipa_gsi_top_gsi_iram_ptr_write_eng_comp_s {
+	u32	iram_ptr : 12;
+	u32	reserved0 : 20;
+};
+union ipa_hwio_def_ipa_gsi_top_gsi_iram_ptr_write_eng_comp_u {
+	struct ipa_hwio_def_ipa_gsi_top_gsi_iram_ptr_write_eng_comp_s
+		def;
+	u32 value;
+};
+struct ipa_hwio_def_ipa_gsi_top_gsi_iram_ptr_read_eng_comp_s {
+	u32	iram_ptr : 12;
+	u32	reserved0 : 20;
+};
+union ipa_hwio_def_ipa_gsi_top_gsi_iram_ptr_read_eng_comp_u {
+	struct ipa_hwio_def_ipa_gsi_top_gsi_iram_ptr_read_eng_comp_s
+		def;
+	u32 value;
+};
+struct ipa_hwio_def_ipa_gsi_top_gsi_iram_ptr_uc_gp_int_s {
+	u32	iram_ptr : 12;
+	u32	reserved0 : 20;
+};
+union ipa_hwio_def_ipa_gsi_top_gsi_iram_ptr_uc_gp_int_u {
+	struct ipa_hwio_def_ipa_gsi_top_gsi_iram_ptr_uc_gp_int_s
+		def;
+	u32 value;
+};
+struct ipa_hwio_def_ipa_gsi_top_gsi_iram_ptr_int_mod_stopped_s {
+	u32	iram_ptr : 12;
+	u32	reserved0 : 20;
+};
+union ipa_hwio_def_ipa_gsi_top_gsi_iram_ptr_int_mod_stopped_u {
+	struct ipa_hwio_def_ipa_gsi_top_gsi_iram_ptr_int_mod_stopped_s
+		def;
+	u32 value;
+};
+struct ipa_hwio_def_ipa_gsi_top_gsi_inst_ram_n_s {
+	u32 inst_byte_0 : 8;
+	u32 inst_byte_1 : 8;
+	u32 inst_byte_2 : 8;
+	u32 inst_byte_3 : 8;
+};
+union ipa_hwio_def_ipa_gsi_top_gsi_inst_ram_n_u {
+	struct ipa_hwio_def_ipa_gsi_top_gsi_inst_ram_n_s
+		def;
+	u32 value;
+};
+struct ipa_hwio_def_ipa_gsi_top_gsi_shram_n_s {
+	u32 shram : 32;
+};
+union ipa_hwio_def_ipa_gsi_top_gsi_shram_n_u {
+	struct ipa_hwio_def_ipa_gsi_top_gsi_shram_n_s	def;
+	u32						value;
+};
+struct ipa_hwio_def_ipa_gsi_top_gsi_map_ee_n_ch_k_vp_table_s {
+	u32	phy_ch : 5;
+	u32	valid : 1;
+	u32	reserved0 : 26;
+};
+union ipa_hwio_def_ipa_gsi_top_gsi_map_ee_n_ch_k_vp_table_u {
+	struct ipa_hwio_def_ipa_gsi_top_gsi_map_ee_n_ch_k_vp_table_s
+		def;
+	u32 value;
+};
+struct ipa_hwio_def_ipa_gsi_top_gsi_test_bus_sel_s {
+	u32	gsi_testbus_sel : 8;
+	u32	reserved0 : 8;
+	u32	gsi_hw_events_sel : 4;
+	u32	reserved1 : 12;
+};
+union ipa_hwio_def_ipa_gsi_top_gsi_test_bus_sel_u {
+	struct ipa_hwio_def_ipa_gsi_top_gsi_test_bus_sel_s	def;
+	u32							value;
+};
+struct ipa_hwio_def_ipa_gsi_top_gsi_test_bus_reg_s {
+	u32 gsi_testbus_reg : 32;
+};
+union ipa_hwio_def_ipa_gsi_top_gsi_test_bus_reg_u {
+	struct ipa_hwio_def_ipa_gsi_top_gsi_test_bus_reg_s	def;
+	u32							value;
+};
+struct ipa_hwio_def_ipa_gsi_top_gsi_debug_busy_reg_s {
+	u32	csr_busy : 1;
+	u32	ree_busy : 1;
+	u32	mcs_busy : 1;
+	u32	timer_busy : 1;
+	u32	rd_wr_busy : 1;
+	u32	ev_eng_busy : 1;
+	u32	int_eng_busy : 1;
+	u32	ree_pwr_clps_busy : 1;
+	u32	db_eng_busy : 1;
+	u32	dbg_cnt_busy : 1;
+	u32	uc_busy : 1;
+	u32	ic_busy : 1;
+	u32	sdma_busy : 1;
+	u32	reserved0 : 19;
+};
+union ipa_hwio_def_ipa_gsi_top_gsi_debug_busy_reg_u {
+	struct ipa_hwio_def_ipa_gsi_top_gsi_debug_busy_reg_s	def;
+	u32							value;
+};
+struct ipa_hwio_def_ipa_gsi_top_gsi_debug_event_pending_s {
+	u32 chid_bit_map : 32;
+};
+union ipa_hwio_def_ipa_gsi_top_gsi_debug_event_pending_u {
+	struct ipa_hwio_def_ipa_gsi_top_gsi_debug_event_pending_s
+		def;
+	u32 value;
+};
+struct ipa_hwio_def_ipa_gsi_top_gsi_debug_timer_pending_s {
+	u32 chid_bit_map : 32;
+};
+union ipa_hwio_def_ipa_gsi_top_gsi_debug_timer_pending_u {
+	struct ipa_hwio_def_ipa_gsi_top_gsi_debug_timer_pending_s
+		def;
+	u32 value;
+};
+struct ipa_hwio_def_ipa_gsi_top_gsi_debug_rd_wr_pending_s {
+	u32 chid_bit_map : 32;
+};
+union ipa_hwio_def_ipa_gsi_top_gsi_debug_rd_wr_pending_u {
+	struct ipa_hwio_def_ipa_gsi_top_gsi_debug_rd_wr_pending_s
+		def;
+	u32 value;
+};
+struct ipa_hwio_def_ipa_gsi_top_gsi_debug_countern_s {
+	u32	counter_value : 16;
+	u32	reserved0 : 16;
+};
+union ipa_hwio_def_ipa_gsi_top_gsi_debug_countern_u {
+	struct ipa_hwio_def_ipa_gsi_top_gsi_debug_countern_s	def;
+	u32							value;
+};
+struct ipa_hwio_def_ipa_gsi_top_gsi_debug_pc_from_sw_s {
+	u32	iram_ptr : 12;
+	u32	reserved0 : 20;
+};
+union ipa_hwio_def_ipa_gsi_top_gsi_debug_pc_from_sw_u {
+	struct ipa_hwio_def_ipa_gsi_top_gsi_debug_pc_from_sw_s	def;
+	u32							value;
+};
+struct ipa_hwio_def_ipa_gsi_top_gsi_debug_sw_stall_s {
+	u32	mcs_stall : 1;
+	u32	reserved0 : 31;
+};
+union ipa_hwio_def_ipa_gsi_top_gsi_debug_sw_stall_u {
+	struct ipa_hwio_def_ipa_gsi_top_gsi_debug_sw_stall_s	def;
+	u32							value;
+};
+struct ipa_hwio_def_ipa_gsi_top_gsi_debug_pc_for_debug_s {
+	u32	iram_ptr : 12;
+	u32	reserved0 : 20;
+};
+union ipa_hwio_def_ipa_gsi_top_gsi_debug_pc_for_debug_u {
+	struct ipa_hwio_def_ipa_gsi_top_gsi_debug_pc_for_debug_s
+		def;
+	u32 value;
+};
+struct ipa_hwio_def_ipa_gsi_top_gsi_debug_qsb_log_err_trns_id_s {
+	u32	err_write : 1;
+	u32	reserved0 : 7;
+	u32	err_tid : 8;
+	u32	err_mid : 8;
+	u32	err_saved : 1;
+	u32	reserved1 : 7;
+};
+union ipa_hwio_def_ipa_gsi_top_gsi_debug_qsb_log_err_trns_id_u {
+	struct ipa_hwio_def_ipa_gsi_top_gsi_debug_qsb_log_err_trns_id_s
+		def;
+	u32 value;
+};
+struct ipa_hwio_def_ipa_gsi_top_gsi_debug_sw_rf_n_read_s {
+	u32 rf_reg : 32;
+};
+union ipa_hwio_def_ipa_gsi_top_gsi_debug_sw_rf_n_read_u {
+	struct ipa_hwio_def_ipa_gsi_top_gsi_debug_sw_rf_n_read_s
+		def;
+	u32 value;
+};
+struct ipa_hwio_def_ipa_gsi_top_gsi_debug_ee_n_ev_k_vp_table_s {
+	u32	phy_ev_ch : 5;
+	u32	valid : 1;
+	u32	reserved0 : 26;
+};
+union ipa_hwio_def_ipa_gsi_top_gsi_debug_ee_n_ev_k_vp_table_u {
+	struct ipa_hwio_def_ipa_gsi_top_gsi_debug_ee_n_ev_k_vp_table_s
+		def;
+	u32 value;
+};
+struct ipa_hwio_def_ipa_gsi_top_ee_n_gsi_ch_k_cntxt_0_s {
+	u32	chtype_protocol : 3;
+	u32	chtype_dir : 1;
+	u32	ee : 4;
+	u32	chid : 5;
+	u32	chtype_protocol_msb : 1;
+	u32	erindex : 5;
+	u32	reserved0 : 1;
+	u32	chstate : 4;
+	u32	element_size : 8;
+};
+union ipa_hwio_def_ipa_gsi_top_ee_n_gsi_ch_k_cntxt_0_u {
+	struct ipa_hwio_def_ipa_gsi_top_ee_n_gsi_ch_k_cntxt_0_s def;
+	u32							value;
+};
+struct ipa_hwio_def_ipa_gsi_top_ee_n_gsi_ch_k_cntxt_1_s {
+	u32	r_length : 16;
+	u32	reserved0 : 16;
+};
+union ipa_hwio_def_ipa_gsi_top_ee_n_gsi_ch_k_cntxt_1_u {
+	struct ipa_hwio_def_ipa_gsi_top_ee_n_gsi_ch_k_cntxt_1_s def;
+	u32							value;
+};
+struct ipa_hwio_def_ipa_gsi_top_ee_n_gsi_ch_k_cntxt_2_s {
+	u32 r_base_addr_lsbs : 32;
+};
+union ipa_hwio_def_ipa_gsi_top_ee_n_gsi_ch_k_cntxt_2_u {
+	struct ipa_hwio_def_ipa_gsi_top_ee_n_gsi_ch_k_cntxt_2_s def;
+	u32							value;
+};
+struct ipa_hwio_def_ipa_gsi_top_ee_n_gsi_ch_k_cntxt_3_s {
+	u32 r_base_addr_msbs : 32;
+};
+union ipa_hwio_def_ipa_gsi_top_ee_n_gsi_ch_k_cntxt_3_u {
+	struct ipa_hwio_def_ipa_gsi_top_ee_n_gsi_ch_k_cntxt_3_s def;
+	u32							value;
+};
+struct ipa_hwio_def_ipa_gsi_top_ee_n_gsi_ch_k_cntxt_4_s {
+	u32 read_ptr_lsb : 32;
+};
+union ipa_hwio_def_ipa_gsi_top_ee_n_gsi_ch_k_cntxt_4_u {
+	struct ipa_hwio_def_ipa_gsi_top_ee_n_gsi_ch_k_cntxt_4_s def;
+	u32							value;
+};
+struct ipa_hwio_def_ipa_gsi_top_ee_n_gsi_ch_k_cntxt_5_s {
+	u32 read_ptr_msb : 32;
+};
+union ipa_hwio_def_ipa_gsi_top_ee_n_gsi_ch_k_cntxt_5_u {
+	struct ipa_hwio_def_ipa_gsi_top_ee_n_gsi_ch_k_cntxt_5_s def;
+	u32							value;
+};
+struct ipa_hwio_def_ipa_gsi_top_ee_n_gsi_ch_k_cntxt_6_s {
+	u32 write_ptr_lsb : 32;
+};
+union ipa_hwio_def_ipa_gsi_top_ee_n_gsi_ch_k_cntxt_6_u {
+	struct ipa_hwio_def_ipa_gsi_top_ee_n_gsi_ch_k_cntxt_6_s def;
+	u32							value;
+};
+struct ipa_hwio_def_ipa_gsi_top_ee_n_gsi_ch_k_cntxt_7_s {
+	u32 write_ptr_msb : 32;
+};
+union ipa_hwio_def_ipa_gsi_top_ee_n_gsi_ch_k_cntxt_7_u {
+	struct ipa_hwio_def_ipa_gsi_top_ee_n_gsi_ch_k_cntxt_7_s def;
+	u32							value;
+};
+struct ipa_hwio_def_ipa_gsi_top_ee_n_gsi_ch_k_re_fetch_read_ptr_s {
+	u32	read_ptr : 16;
+	u32	reserved0 : 16;
+};
+union ipa_hwio_def_ipa_gsi_top_ee_n_gsi_ch_k_re_fetch_read_ptr_u {
+	struct ipa_hwio_def_ipa_gsi_top_ee_n_gsi_ch_k_re_fetch_read_ptr_s
+		def;
+	u32 value;
+};
+struct ipa_hwio_def_ipa_gsi_top_ee_n_gsi_ch_k_re_fetch_write_ptr_s {
+	u32	re_intr_db : 16;
+	u32	reserved0 : 16;
+};
+union ipa_hwio_def_ipa_gsi_top_ee_n_gsi_ch_k_re_fetch_write_ptr_u {
+	struct ipa_hwio_def_ipa_gsi_top_ee_n_gsi_ch_k_re_fetch_write_ptr_s
+		def;
+	u32 value;
+};
+struct ipa_hwio_def_ipa_gsi_top_ee_n_gsi_ch_k_qos_s {
+	u32	wrr_weight : 4;
+	u32	reserved0 : 4;
+	u32	max_prefetch : 1;
+	u32	use_db_eng : 1;
+	u32	prefetch_mode : 4;
+	u32	reserved1 : 2;
+	u32	empty_lvl_thrshold : 8;
+	u32	reserved2 : 8;
+};
+union ipa_hwio_def_ipa_gsi_top_ee_n_gsi_ch_k_qos_u {
+	struct ipa_hwio_def_ipa_gsi_top_ee_n_gsi_ch_k_qos_s	def;
+	u32							value;
+};
+struct ipa_hwio_def_ipa_gsi_top_ee_n_gsi_ch_k_scratch_0_s {
+	u32 scratch : 32;
+};
+union ipa_hwio_def_ipa_gsi_top_ee_n_gsi_ch_k_scratch_0_u {
+	struct ipa_hwio_def_ipa_gsi_top_ee_n_gsi_ch_k_scratch_0_s
+		def;
+	u32 value;
+};
+struct ipa_hwio_def_ipa_gsi_top_ee_n_gsi_ch_k_scratch_1_s {
+	u32 scratch : 32;
+};
+union ipa_hwio_def_ipa_gsi_top_ee_n_gsi_ch_k_scratch_1_u {
+	struct ipa_hwio_def_ipa_gsi_top_ee_n_gsi_ch_k_scratch_1_s
+		def;
+	u32 value;
+};
+struct ipa_hwio_def_ipa_gsi_top_ee_n_gsi_ch_k_scratch_2_s {
+	u32 scratch : 32;
+};
+union ipa_hwio_def_ipa_gsi_top_ee_n_gsi_ch_k_scratch_2_u {
+	struct ipa_hwio_def_ipa_gsi_top_ee_n_gsi_ch_k_scratch_2_s
+		def;
+	u32 value;
+};
+struct ipa_hwio_def_ipa_gsi_top_ee_n_gsi_ch_k_scratch_3_s {
+	u32 scratch : 32;
+};
+union ipa_hwio_def_ipa_gsi_top_ee_n_gsi_ch_k_scratch_3_u {
+	struct ipa_hwio_def_ipa_gsi_top_ee_n_gsi_ch_k_scratch_3_s
+		def;
+	u32 value;
+};
+struct ipa_hwio_def_ipa_gsi_top_ee_n_ev_ch_k_cntxt_0_s {
+	u32	chtype : 4;
+	u32	ee : 4;
+	u32	evchid : 8;
+	u32	intype : 1;
+	u32	reserved0 : 3;
+	u32	chstate : 4;
+	u32	element_size : 8;
+};
+union ipa_hwio_def_ipa_gsi_top_ee_n_ev_ch_k_cntxt_0_u {
+	struct ipa_hwio_def_ipa_gsi_top_ee_n_ev_ch_k_cntxt_0_s	def;
+	u32							value;
+};
+struct ipa_hwio_def_ipa_gsi_top_ee_n_ev_ch_k_cntxt_1_s {
+	u32	r_length : 16;
+	u32	reserved0 : 16;
+};
+union ipa_hwio_def_ipa_gsi_top_ee_n_ev_ch_k_cntxt_1_u {
+	struct ipa_hwio_def_ipa_gsi_top_ee_n_ev_ch_k_cntxt_1_s	def;
+	u32							value;
+};
+struct ipa_hwio_def_ipa_gsi_top_ee_n_ev_ch_k_cntxt_2_s {
+	u32 r_base_addr_lsbs : 32;
+};
+union ipa_hwio_def_ipa_gsi_top_ee_n_ev_ch_k_cntxt_2_u {
+	struct ipa_hwio_def_ipa_gsi_top_ee_n_ev_ch_k_cntxt_2_s	def;
+	u32							value;
+};
+struct ipa_hwio_def_ipa_gsi_top_ee_n_ev_ch_k_cntxt_3_s {
+	u32 r_base_addr_msbs : 32;
+};
+union ipa_hwio_def_ipa_gsi_top_ee_n_ev_ch_k_cntxt_3_u {
+	struct ipa_hwio_def_ipa_gsi_top_ee_n_ev_ch_k_cntxt_3_s	def;
+	u32							value;
+};
+struct ipa_hwio_def_ipa_gsi_top_ee_n_ev_ch_k_cntxt_4_s {
+	u32 read_ptr_lsb : 32;
+};
+union ipa_hwio_def_ipa_gsi_top_ee_n_ev_ch_k_cntxt_4_u {
+	struct ipa_hwio_def_ipa_gsi_top_ee_n_ev_ch_k_cntxt_4_s	def;
+	u32							value;
+};
+struct ipa_hwio_def_ipa_gsi_top_ee_n_ev_ch_k_cntxt_5_s {
+	u32 read_ptr_msb : 32;
+};
+union ipa_hwio_def_ipa_gsi_top_ee_n_ev_ch_k_cntxt_5_u {
+	struct ipa_hwio_def_ipa_gsi_top_ee_n_ev_ch_k_cntxt_5_s	def;
+	u32							value;
+};
+struct ipa_hwio_def_ipa_gsi_top_ee_n_ev_ch_k_cntxt_6_s {
+	u32 write_ptr_lsb : 32;
+};
+union ipa_hwio_def_ipa_gsi_top_ee_n_ev_ch_k_cntxt_6_u {
+	struct ipa_hwio_def_ipa_gsi_top_ee_n_ev_ch_k_cntxt_6_s	def;
+	u32							value;
+};
+struct ipa_hwio_def_ipa_gsi_top_ee_n_ev_ch_k_cntxt_7_s {
+	u32 write_ptr_msb : 32;
+};
+union ipa_hwio_def_ipa_gsi_top_ee_n_ev_ch_k_cntxt_7_u {
+	struct ipa_hwio_def_ipa_gsi_top_ee_n_ev_ch_k_cntxt_7_s	def;
+	u32							value;
+};
+struct ipa_hwio_def_ipa_gsi_top_ee_n_ev_ch_k_cntxt_8_s {
+	u32	int_modt : 16;
+	u32	int_modc : 8;
+	u32	int_mod_cnt : 8;
+};
+union ipa_hwio_def_ipa_gsi_top_ee_n_ev_ch_k_cntxt_8_u {
+	struct ipa_hwio_def_ipa_gsi_top_ee_n_ev_ch_k_cntxt_8_s	def;
+	u32							value;
+};
+struct ipa_hwio_def_ipa_gsi_top_ee_n_ev_ch_k_cntxt_9_s {
+	u32 intvec : 32;
+};
+union ipa_hwio_def_ipa_gsi_top_ee_n_ev_ch_k_cntxt_9_u {
+	struct ipa_hwio_def_ipa_gsi_top_ee_n_ev_ch_k_cntxt_9_s	def;
+	u32							value;
+};
+struct ipa_hwio_def_ipa_gsi_top_ee_n_ev_ch_k_cntxt_10_s {
+	u32 msi_addr_lsb : 32;
+};
+union ipa_hwio_def_ipa_gsi_top_ee_n_ev_ch_k_cntxt_10_u {
+	struct ipa_hwio_def_ipa_gsi_top_ee_n_ev_ch_k_cntxt_10_s def;
+	u32							value;
+};
+struct ipa_hwio_def_ipa_gsi_top_ee_n_ev_ch_k_cntxt_11_s {
+	u32 msi_addr_msb : 32;
+};
+union ipa_hwio_def_ipa_gsi_top_ee_n_ev_ch_k_cntxt_11_u {
+	struct ipa_hwio_def_ipa_gsi_top_ee_n_ev_ch_k_cntxt_11_s def;
+	u32							value;
+};
+struct ipa_hwio_def_ipa_gsi_top_ee_n_ev_ch_k_cntxt_12_s {
+	u32 rp_update_addr_lsb : 32;
+};
+union ipa_hwio_def_ipa_gsi_top_ee_n_ev_ch_k_cntxt_12_u {
+	struct ipa_hwio_def_ipa_gsi_top_ee_n_ev_ch_k_cntxt_12_s def;
+	u32							value;
+};
+struct ipa_hwio_def_ipa_gsi_top_ee_n_ev_ch_k_cntxt_13_s {
+	u32 rp_update_addr_msb : 32;
+};
+union ipa_hwio_def_ipa_gsi_top_ee_n_ev_ch_k_cntxt_13_u {
+	struct ipa_hwio_def_ipa_gsi_top_ee_n_ev_ch_k_cntxt_13_s def;
+	u32							value;
+};
+struct ipa_hwio_def_ipa_gsi_top_ee_n_ev_ch_k_scratch_0_s {
+	u32 scratch : 32;
+};
+union ipa_hwio_def_ipa_gsi_top_ee_n_ev_ch_k_scratch_0_u {
+	struct ipa_hwio_def_ipa_gsi_top_ee_n_ev_ch_k_scratch_0_s
+		def;
+	u32 value;
+};
+struct ipa_hwio_def_ipa_gsi_top_ee_n_ev_ch_k_scratch_1_s {
+	u32 scratch : 32;
+};
+union ipa_hwio_def_ipa_gsi_top_ee_n_ev_ch_k_scratch_1_u {
+	struct ipa_hwio_def_ipa_gsi_top_ee_n_ev_ch_k_scratch_1_s
+		def;
+	u32 value;
+};
+struct ipa_hwio_def_ipa_gsi_top_ee_n_gsi_status_s {
+	u32	enabled : 1;
+	u32	reserved0 : 31;
+};
+union ipa_hwio_def_ipa_gsi_top_ee_n_gsi_status_u {
+	struct ipa_hwio_def_ipa_gsi_top_ee_n_gsi_status_s	def;
+	u32							value;
+};
+struct ipa_hwio_def_ipa_gsi_top_ee_n_cntxt_type_irq_s {
+	u32	ch_ctrl : 1;
+	u32	ev_ctrl : 1;
+	u32	glob_ee : 1;
+	u32	ieob : 1;
+	u32	inter_ee_ch_ctrl : 1;
+	u32	inter_ee_ev_ctrl : 1;
+	u32	general : 1;
+	u32	reserved0 : 25;
+};
+union ipa_hwio_def_ipa_gsi_top_ee_n_cntxt_type_irq_u {
+	struct ipa_hwio_def_ipa_gsi_top_ee_n_cntxt_type_irq_s	def;
+	u32							value;
+};
+struct ipa_hwio_def_ipa_gsi_top_ee_n_cntxt_type_irq_msk_s {
+	u32	ch_ctrl : 1;
+	u32	ev_ctrl : 1;
+	u32	glob_ee : 1;
+	u32	ieob : 1;
+	u32	inter_ee_ch_ctrl : 1;
+	u32	inter_ee_ev_ctrl : 1;
+	u32	general : 1;
+	u32	reserved0 : 25;
+};
+union ipa_hwio_def_ipa_gsi_top_ee_n_cntxt_type_irq_msk_u {
+	struct ipa_hwio_def_ipa_gsi_top_ee_n_cntxt_type_irq_msk_s
+		def;
+	u32 value;
+};
+struct ipa_hwio_def_ipa_gsi_top_ee_n_cntxt_src_gsi_ch_irq_s {
+	u32 gsi_ch_bit_map : 32;
+};
+union ipa_hwio_def_ipa_gsi_top_ee_n_cntxt_src_gsi_ch_irq_u {
+	struct ipa_hwio_def_ipa_gsi_top_ee_n_cntxt_src_gsi_ch_irq_s
+		def;
+	u32 value;
+};
+struct ipa_hwio_def_ipa_gsi_top_ee_n_cntxt_src_ev_ch_irq_s {
+	u32 ev_ch_bit_map : 32;
+};
+union ipa_hwio_def_ipa_gsi_top_ee_n_cntxt_src_ev_ch_irq_u {
+	struct ipa_hwio_def_ipa_gsi_top_ee_n_cntxt_src_ev_ch_irq_s
+		def;
+	u32 value;
+};
+struct ipa_hwio_def_ipa_gsi_top_ee_n_cntxt_src_gsi_ch_irq_msk_s {
+	u32	gsi_ch_bit_map_msk : 23;
+	u32	reserved0 : 9;
+};
+union ipa_hwio_def_ipa_gsi_top_ee_n_cntxt_src_gsi_ch_irq_msk_u {
+	struct ipa_hwio_def_ipa_gsi_top_ee_n_cntxt_src_gsi_ch_irq_msk_s
+		def;
+	u32 value;
+};
+struct ipa_hwio_def_ipa_gsi_top_ee_n_cntxt_src_ev_ch_irq_msk_s {
+	u32	ev_ch_bit_map_msk : 20;
+	u32	reserved0 : 12;
+};
+union ipa_hwio_def_ipa_gsi_top_ee_n_cntxt_src_ev_ch_irq_msk_u {
+	struct ipa_hwio_def_ipa_gsi_top_ee_n_cntxt_src_ev_ch_irq_msk_s
+		def;
+	u32 value;
+};
+struct ipa_hwio_def_ipa_gsi_top_ee_n_cntxt_src_gsi_ch_irq_clr_s {
+	u32 gsi_ch_bit_map : 32;
+};
+union ipa_hwio_def_ipa_gsi_top_ee_n_cntxt_src_gsi_ch_irq_clr_u {
+	struct ipa_hwio_def_ipa_gsi_top_ee_n_cntxt_src_gsi_ch_irq_clr_s
+		def;
+	u32 value;
+};
+struct ipa_hwio_def_ipa_gsi_top_ee_n_cntxt_src_ev_ch_irq_clr_s {
+	u32 ev_ch_bit_map : 32;
+};
+union ipa_hwio_def_ipa_gsi_top_ee_n_cntxt_src_ev_ch_irq_clr_u {
+	struct ipa_hwio_def_ipa_gsi_top_ee_n_cntxt_src_ev_ch_irq_clr_s
+		def;
+	u32 value;
+};
+struct ipa_hwio_def_ipa_gsi_top_ee_n_cntxt_src_ieob_irq_s {
+	u32 ev_ch_bit_map : 32;
+};
+union ipa_hwio_def_ipa_gsi_top_ee_n_cntxt_src_ieob_irq_u {
+	struct ipa_hwio_def_ipa_gsi_top_ee_n_cntxt_src_ieob_irq_s
+		def;
+	u32 value;
+};
+struct ipa_hwio_def_ipa_gsi_top_ee_n_cntxt_src_ieob_irq_msk_s {
+	u32	ev_ch_bit_map_msk : 20;
+	u32	reserved0 : 12;
+};
+union ipa_hwio_def_ipa_gsi_top_ee_n_cntxt_src_ieob_irq_msk_u {
+	struct ipa_hwio_def_ipa_gsi_top_ee_n_cntxt_src_ieob_irq_msk_s
+		def;
+	u32 value;
+};
+struct ipa_hwio_def_ipa_gsi_top_ee_n_cntxt_src_ieob_irq_clr_s {
+	u32 ev_ch_bit_map : 32;
+};
+union ipa_hwio_def_ipa_gsi_top_ee_n_cntxt_src_ieob_irq_clr_u {
+	struct ipa_hwio_def_ipa_gsi_top_ee_n_cntxt_src_ieob_irq_clr_s
+		def;
+	u32 value;
+};
+struct ipa_hwio_def_ipa_gsi_top_ee_n_cntxt_glob_irq_stts_s {
+	u32	error_int : 1;
+	u32	gp_int1 : 1;
+	u32	gp_int2 : 1;
+	u32	gp_int3 : 1;
+	u32	reserved0 : 28;
+};
+union ipa_hwio_def_ipa_gsi_top_ee_n_cntxt_glob_irq_stts_u {
+	struct ipa_hwio_def_ipa_gsi_top_ee_n_cntxt_glob_irq_stts_s
+		def;
+	u32 value;
+};
+struct ipa_hwio_def_ipa_gsi_top_ee_n_cntxt_gsi_irq_stts_s {
+	u32	gsi_break_point : 1;
+	u32	gsi_bus_error : 1;
+	u32	gsi_cmd_fifo_ovrflow : 1;
+	u32	gsi_mcs_stack_ovrflow : 1;
+	u32	reserved0 : 28;
+};
+union ipa_hwio_def_ipa_gsi_top_ee_n_cntxt_gsi_irq_stts_u {
+	struct ipa_hwio_def_ipa_gsi_top_ee_n_cntxt_gsi_irq_stts_s
+		def;
+	u32 value;
+};
+struct ipa_hwio_def_ipa_gsi_top_ee_n_cntxt_intset_s {
+	u32	intype : 1;
+	u32	reserved0 : 31;
+};
+union ipa_hwio_def_ipa_gsi_top_ee_n_cntxt_intset_u {
+	struct ipa_hwio_def_ipa_gsi_top_ee_n_cntxt_intset_s	def;
+	u32							value;
+};
+struct ipa_hwio_def_ipa_gsi_top_ee_n_cntxt_msi_base_lsb_s {
+	u32 msi_addr_lsb : 32;
+};
+union ipa_hwio_def_ipa_gsi_top_ee_n_cntxt_msi_base_lsb_u {
+	struct ipa_hwio_def_ipa_gsi_top_ee_n_cntxt_msi_base_lsb_s
+		def;
+	u32 value;
+};
+struct ipa_hwio_def_ipa_gsi_top_ee_n_cntxt_msi_base_msb_s {
+	u32 msi_addr_msb : 32;
+};
+union ipa_hwio_def_ipa_gsi_top_ee_n_cntxt_msi_base_msb_u {
+	struct ipa_hwio_def_ipa_gsi_top_ee_n_cntxt_msi_base_msb_s
+		def;
+	u32 value;
+};
+struct ipa_hwio_def_ipa_gsi_top_ee_n_error_log_s {
+	u32 error_log : 32;
+};
+union ipa_hwio_def_ipa_gsi_top_ee_n_error_log_u {
+	struct ipa_hwio_def_ipa_gsi_top_ee_n_error_log_s	def;
+	u32							value;
+};
+struct ipa_hwio_def_ipa_gsi_top_ee_n_error_log_clr_s {
+	u32 error_log_clr : 32;
+};
+union ipa_hwio_def_ipa_gsi_top_ee_n_error_log_clr_u {
+	struct ipa_hwio_def_ipa_gsi_top_ee_n_error_log_clr_s	def;
+	u32							value;
+};
+struct ipa_hwio_def_ipa_gsi_top_ee_n_cntxt_scratch_0_s {
+	u32 scratch : 32;
+};
+union ipa_hwio_def_ipa_gsi_top_ee_n_cntxt_scratch_0_u {
+	struct ipa_hwio_def_ipa_gsi_top_ee_n_cntxt_scratch_0_s	def;
+	u32							value;
+};
+struct ipa_hwio_def_ipa_gsi_top_ee_n_cntxt_scratch_1_s {
+	u32 scratch : 32;
+};
+union ipa_hwio_def_ipa_gsi_top_ee_n_cntxt_scratch_1_u {
+	struct ipa_hwio_def_ipa_gsi_top_ee_n_cntxt_scratch_1_s	def;
+	u32							value;
+};
+struct ipa_hwio_def_ipa_comp_hw_version_s {
+	u32	step : 16;
+	u32	minor : 12;
+	u32	major : 4;
+};
+union ipa_hwio_def_ipa_comp_hw_version_u {
+	struct ipa_hwio_def_ipa_comp_hw_version_s	def;
+	u32						value;
+};
+struct ipa_hwio_def_ipa_comp_cfg_s {
+	u32	reserved0 : 1;
+	u32	gsi_snoc_bypass_dis : 1;
+	u32	gen_qmb_0_snoc_bypass_dis : 1;
+	u32	gen_qmb_1_snoc_bypass_dis : 1;
+	u32	reserved1 : 1;
+	u32	ipa_qmb_select_by_address_cons_en : 1;
+	u32	ipa_qmb_select_by_address_prod_en : 1;
+	u32	gsi_multi_inorder_rd_dis : 1;
+	u32	gsi_multi_inorder_wr_dis : 1;
+	u32	gen_qmb_0_multi_inorder_rd_dis : 1;
+	u32	gen_qmb_1_multi_inorder_rd_dis : 1;
+	u32	gen_qmb_0_multi_inorder_wr_dis : 1;
+	u32	gen_qmb_1_multi_inorder_wr_dis : 1;
+	u32	gen_qmb_0_snoc_cnoc_loop_protection_disable : 1;
+	u32	gsi_snoc_cnoc_loop_protection_disable : 1;
+	u32	gsi_multi_axi_masters_dis : 1;
+	u32	ipa_qmb_select_by_address_global_en : 1;
+	u32	ipa_atomic_fetcher_arb_lock_dis : 4;
+	u32	ipa_full_flush_wait_rsc_closure_en : 1;
+	u32	reserved2 : 10;
+};
+union ipa_hwio_def_ipa_comp_cfg_u {
+	struct ipa_hwio_def_ipa_comp_cfg_s	def;
+	u32					value;
+};
+struct ipa_hwio_def_ipa_route_s {
+	u32	route_dis : 1;
+	u32	route_def_pipe : 5;
+	u32	route_def_hdr_table : 1;
+	u32	route_def_hdr_ofst : 10;
+	u32	route_frag_def_pipe : 5;
+	u32	reserved0 : 2;
+	u32	route_def_retain_hdr : 1;
+	u32	reserved1 : 7;
+};
+union ipa_hwio_def_ipa_route_u {
+	struct ipa_hwio_def_ipa_route_s def;
+	u32				value;
+};
+struct ipa_hwio_def_ipa_proc_iph_cfg_s {
+	u32	iph_threshold : 2;
+	u32	iph_pipelining_disable : 1;
+	u32	reserved0 : 1;
+	u32	status_from_iph_frst_always : 1;
+	u32	iph_nat_blind_invalidate_tport_offset_disable : 1;
+	u32	pipestage_overlap_disable : 1;
+	u32	ftch_dcph_overlap_enable : 1;
+	u32	iph_pkt_parser_protocol_stop_enable : 1;
+	u32	iph_pkt_parser_protocol_stop_hop : 1;
+	u32	iph_pkt_parser_protocol_stop_dest : 1;
+	u32	iph_pkt_parser_ihl_to_2nd_frag_en : 1;
+	u32	reserved1 : 4;
+	u32	iph_pkt_parser_protocol_stop_value : 8;
+	u32	d_dcph_multi_engine_disable : 1;
+	u32	reserved2 : 7;
+};
+union ipa_hwio_def_ipa_proc_iph_cfg_u {
+	struct ipa_hwio_def_ipa_proc_iph_cfg_s	def;
+	u32					value;
+};
+struct ipa_hwio_def_ipa_dpl_timer_lsb_s {
+	u32 tod_lsb : 32;
+};
+union ipa_hwio_def_ipa_dpl_timer_lsb_u {
+	struct ipa_hwio_def_ipa_dpl_timer_lsb_s def;
+	u32					value;
+};
+struct ipa_hwio_def_ipa_dpl_timer_msb_s {
+	u32	tod_msb : 16;
+	u32	reserved0 : 15;
+	u32	timer_en : 1;
+};
+union ipa_hwio_def_ipa_dpl_timer_msb_u {
+	struct ipa_hwio_def_ipa_dpl_timer_msb_s def;
+	u32					value;
+};
+struct ipa_hwio_def_ipa_state_tx_wrapper_s {
+	u32	tx0_idle : 1;
+	u32	tx1_idle : 1;
+	u32	ipa_prod_ackmngr_db_empty : 1;
+	u32	ipa_prod_ackmngr_state_idle : 1;
+	u32	ipa_prod_bresp_empty : 1;
+	u32	ipa_prod_bresp_toggle_idle : 1;
+	u32	ipa_mbim_pkt_fms_idle : 1;
+	u32	mbim_direct_dma : 2;
+	u32	trnseq_force_valid : 1;
+	u32	pkt_drop_cnt_idle : 1;
+	u32	nlo_direct_dma : 2;
+	u32	coal_direct_dma : 2;
+	u32	coal_slave_idle : 1;
+	u32	coal_slave_ctx_idle : 1;
+	u32	reserved0 : 8;
+	u32	coal_slave_open_frame : 4;
+	u32	reserved1 : 3;
+};
+union ipa_hwio_def_ipa_state_tx_wrapper_u {
+	struct ipa_hwio_def_ipa_state_tx_wrapper_s	def;
+	u32						value;
+};
+struct ipa_hwio_def_ipa_state_tx1_s {
+	u32	flopped_arbit_type : 3;
+	u32	arbit_type : 3;
+	u32	pa_idle : 1;
+	u32	pa_ctx_idle : 1;
+	u32	pa_rst_idle : 1;
+	u32	pa_pub_cnt_empty : 1;
+	u32	tx_cmd_main_idle : 1;
+	u32	tx_cmd_trnseq_idle : 1;
+	u32	tx_cmd_snif_idle : 1;
+	u32	tx_cmd_bresp_aloc_idle : 1;
+	u32	tx_cmd_bresp_inj_idle : 1;
+	u32	ar_idle : 1;
+	u32	dmaw_idle : 1;
+	u32	dmaw_last_outsd_idle : 1;
+	u32	pf_idle : 1;
+	u32	pf_empty : 1;
+	u32	aligner_empty : 1;
+	u32	holb_idle : 1;
+	u32	holb_mask_idle : 1;
+	u32	rsrcrel_idle : 1;
+	u32	suspend_empty : 1;
+	u32	cs_snif_idle : 1;
+	u32	last_cmd_pipe : 5;
+	u32	suspend_req_empty : 1;
+};
+union ipa_hwio_def_ipa_state_tx1_u {
+	struct ipa_hwio_def_ipa_state_tx1_s	def;
+	u32					value;
+};
+struct ipa_hwio_def_ipa_state_fetcher_s {
+	u32	ipa_hps_ftch_state_idle : 1;
+	u32	ipa_hps_ftch_alloc_state_idle : 1;
+	u32	ipa_hps_ftch_pkt_state_idle : 1;
+	u32	ipa_hps_ftch_imm_state_idle : 1;
+	u32	ipa_hps_ftch_cmplt_state_idle : 1;
+	u32	ipa_hps_dmar_state_idle : 7;
+	u32	ipa_hps_dmar_slot_state_idle : 7;
+	u32	ipa_hps_imm_cmd_exec_state_idle : 1;
+	u32	reserved0 : 12;
+};
+union ipa_hwio_def_ipa_state_fetcher_u {
+	struct ipa_hwio_def_ipa_state_fetcher_s def;
+	u32					value;
+};
+struct ipa_hwio_def_ipa_state_fetcher_mask_0_s {
+	u32	mask_queue_dmar_uses_queue : 8;
+	u32	mask_queue_imm_exec : 8;
+	u32	mask_queue_no_resources_context : 8;
+	u32	mask_queue_no_resources_hps_dmar : 8;
+};
+union ipa_hwio_def_ipa_state_fetcher_mask_0_u {
+	struct ipa_hwio_def_ipa_state_fetcher_mask_0_s	def;
+	u32						value;
+};
+struct ipa_hwio_def_ipa_state_fetcher_mask_1_s {
+	u32	mask_queue_no_resources_ack_entry : 8;
+	u32	mask_queue_arb_lock : 8;
+	u32	mask_queue_step_mode : 8;
+	u32	mask_queue_no_space_dpl_fifo : 8;
+};
+union ipa_hwio_def_ipa_state_fetcher_mask_1_u {
+	struct ipa_hwio_def_ipa_state_fetcher_mask_1_s	def;
+	u32						value;
+};
+struct ipa_hwio_def_ipa_state_dpl_fifo_s {
+	u32	pop_fsm_state : 3;
+	u32	reserved0 : 29;
+};
+union ipa_hwio_def_ipa_state_dpl_fifo_u {
+	struct ipa_hwio_def_ipa_state_dpl_fifo_s	def;
+	u32						value;
+};
+struct ipa_hwio_def_ipa_state_coal_master_s {
+	u32	vp_vld : 4;
+	u32	main_fsm_state : 4;
+	u32	find_open_fsm_state : 4;
+	u32	hash_calc_fsm_state : 4;
+	u32	check_fit_fsm_state : 4;
+	u32	init_vp_fsm_state : 4;
+	u32	lru_vp : 4;
+	u32	vp_timer_expired : 4;
+};
+union ipa_hwio_def_ipa_state_coal_master_u {
+	struct ipa_hwio_def_ipa_state_coal_master_s	def;
+	u32						value;
+};
+struct ipa_hwio_def_ipa_state_dfetcher_s {
+	u32	ipa_dps_ftch_pkt_state_idle : 1;
+	u32	ipa_dps_ftch_cmplt_state_idle : 1;
+	u32	reserved0 : 2;
+	u32	ipa_dps_dmar_state_idle : 6;
+	u32	reserved1 : 2;
+	u32	ipa_dps_dmar_slot_state_idle : 6;
+	u32	reserved2 : 14;
+};
+union ipa_hwio_def_ipa_state_dfetcher_u {
+	struct ipa_hwio_def_ipa_state_dfetcher_s	def;
+	u32						value;
+};
+struct ipa_hwio_def_ipa_state_acl_s {
+	u32	ipa_hps_h_dcph_empty : 1;
+	u32	ipa_hps_h_dcph_active : 1;
+	u32	ipa_hps_pkt_parser_empty : 1;
+	u32	ipa_hps_pkt_parser_active : 1;
+	u32	ipa_hps_filter_nat_empty : 1;
+	u32	ipa_hps_filter_nat_active : 1;
+	u32	ipa_hps_router_empty : 1;
+	u32	ipa_hps_router_active : 1;
+	u32	ipa_hps_hdri_empty : 1;
+	u32	ipa_hps_hdri_active : 1;
+	u32	ipa_hps_ucp_empty : 1;
+	u32	ipa_hps_ucp_active : 1;
+	u32	ipa_hps_enqueuer_empty : 1;
+	u32	ipa_hps_enqueuer_active : 1;
+	u32	ipa_dps_d_dcph_empty : 1;
+	u32	ipa_dps_d_dcph_active : 1;
+	u32	reserved0 : 2;
+	u32	ipa_dps_dispatcher_empty : 1;
+	u32	ipa_dps_dispatcher_active : 1;
+	u32	ipa_dps_d_dcph_2_empty : 1;
+	u32	ipa_dps_d_dcph_2_active : 1;
+	u32	ipa_hps_sequencer_idle : 1;
+	u32	ipa_dps_sequencer_idle : 1;
+	u32	ipa_dps_d_dcph_2nd_empty : 1;
+	u32	ipa_dps_d_dcph_2nd_active : 1;
+	u32	ipa_hps_coal_master_empty : 1;
+	u32	ipa_hps_coal_master_active : 1;
+	u32	reserved1 : 4;
+};
+union ipa_hwio_def_ipa_state_acl_u {
+	struct ipa_hwio_def_ipa_state_acl_s	def;
+	u32					value;
+};
+struct ipa_hwio_def_ipa_state_gsi_tlv_s {
+	u32	ipa_gsi_toggle_fsm_idle : 1;
+	u32	reserved0 : 31;
+};
+union ipa_hwio_def_ipa_state_gsi_tlv_u {
+	struct ipa_hwio_def_ipa_state_gsi_tlv_s def;
+	u32					value;
+};
+struct ipa_hwio_def_ipa_state_gsi_aos_s {
+	u32	ipa_gsi_aos_fsm_idle : 1;
+	u32	reserved0 : 31;
+};
+union ipa_hwio_def_ipa_state_gsi_aos_u {
+	struct ipa_hwio_def_ipa_state_gsi_aos_s def;
+	u32					value;
+};
+struct ipa_hwio_def_ipa_state_gsi_if_s {
+	u32	ipa_gsi_prod_fsm_tx_0 : 4;
+	u32	ipa_gsi_prod_fsm_tx_1 : 4;
+	u32	reserved0 : 24;
+};
+union ipa_hwio_def_ipa_state_gsi_if_u {
+	struct ipa_hwio_def_ipa_state_gsi_if_s	def;
+	u32					value;
+};
+struct ipa_hwio_def_ipa_state_gsi_skip_s {
+	u32	ipa_gsi_skip_fsm : 2;
+	u32	reserved0 : 30;
+};
+union ipa_hwio_def_ipa_state_gsi_skip_u {
+	struct ipa_hwio_def_ipa_state_gsi_skip_s	def;
+	u32						value;
+};
+struct ipa_hwio_def_ipa_state_gsi_if_cons_s {
+	u32	state : 1;
+	u32	cache_vld : 6;
+	u32	rx_req : 10;
+	u32	rx_req_no_zero : 10;
+	u32	reserved0 : 5;
+};
+union ipa_hwio_def_ipa_state_gsi_if_cons_u {
+	struct ipa_hwio_def_ipa_state_gsi_if_cons_s	def;
+	u32						value;
+};
+struct ipa_hwio_def_ipa_state_s {
+	u32	rx_wait : 1;
+	u32	rx_idle : 1;
+	u32	tx_idle : 1;
+	u32	dpl_fifo_idle : 1;
+	u32	bam_gsi_idle : 1;
+	u32	ipa_status_sniffer_idle : 1;
+	u32	ipa_noc_idle : 1;
+	u32	aggr_idle : 1;
+	u32	mbim_aggr_idle : 1;
+	u32	ipa_rsrc_mngr_db_empty : 1;
+	u32	ipa_rsrc_state_idle : 1;
+	u32	ipa_ackmngr_db_empty : 1;
+	u32	ipa_ackmngr_state_idle : 1;
+	u32	ipa_tx_ackq_full : 1;
+	u32	ipa_prod_ackmngr_db_empty : 1;
+	u32	ipa_prod_ackmngr_state_idle : 1;
+	u32	ipa_prod_bresp_idle : 1;
+	u32	ipa_full_idle : 1;
+	u32	ipa_ntf_tx_empty : 1;
+	u32	ipa_tx_ackq_empty : 1;
+	u32	ipa_uc_ackq_empty : 1;
+	u32	ipa_rx_ackq_empty : 1;
+	u32	ipa_tx_commander_cmdq_empty : 1;
+	u32	ipa_rx_splt_cmdq_empty : 4;
+	u32	reserved0 : 1;
+	u32	ipa_rx_hps_empty : 1;
+	u32	ipa_hps_dps_empty : 1;
+	u32	ipa_dps_tx_empty : 1;
+	u32	ipa_uc_rx_hnd_cmdq_empty : 1;
+};
+union ipa_hwio_def_ipa_state_u {
+	struct ipa_hwio_def_ipa_state_s def;
+	u32				value;
+};
+struct ipa_hwio_def_ipa_state_rx_active_s {
+	u32	endpoints : 13;
+	u32	reserved0 : 19;
+};
+union ipa_hwio_def_ipa_state_rx_active_u {
+	struct ipa_hwio_def_ipa_state_rx_active_s	def;
+	u32						value;
+};
+struct ipa_hwio_def_ipa_state_tx0_s {
+	u32	last_arbit_type : 2;
+	u32	next_arbit_type : 2;
+	u32	pa_idle : 1;
+	u32	pa_ctx_idle : 1;
+	u32	pa_pub_cnt_empty : 1;
+	u32	tx_cmd_main_idle : 1;
+	u32	tx_cmd_trnseq_idle : 1;
+	u32	tx_cmd_snif_idle : 1;
+	u32	tx_cmd_bresp_aloc_idle : 1;
+	u32	tx_cmd_bresp_inj_idle : 1;
+	u32	ar_idle : 1;
+	u32	dmaw_idle : 1;
+	u32	dmaw_last_outsd_idle : 1;
+	u32	pf_idle : 1;
+	u32	pf_empty : 1;
+	u32	aligner_empty : 1;
+	u32	holb_idle : 1;
+	u32	holb_mask_idle : 1;
+	u32	rsrcrel_idle : 1;
+	u32	suspend_empty : 1;
+	u32	cs_snif_idle : 1;
+	u32	last_cmd_pipe : 5;
+	u32	reserved0 : 4;
+};
+union ipa_hwio_def_ipa_state_tx0_u {
+	struct ipa_hwio_def_ipa_state_tx0_s	def;
+	u32					value;
+};
+struct ipa_hwio_def_ipa_state_aggr_active_s {
+	u32	endpoints : 31;
+	u32	reserved0 : 1;
+};
+union ipa_hwio_def_ipa_state_aggr_active_u {
+	struct ipa_hwio_def_ipa_state_aggr_active_s	def;
+	u32						value;
+};
+struct ipa_hwio_def_ipa_state_nlo_aggr_s {
+	u32 nlo_aggr_state : 32;
+};
+union ipa_hwio_def_ipa_state_nlo_aggr_u {
+	struct ipa_hwio_def_ipa_state_nlo_aggr_s	def;
+	u32						value;
+};
+struct ipa_hwio_def_ipa_state_coal_master_1_s {
+	u32	init_vp_wr_ctx_line : 6;
+	u32	init_vp_rd_pkt_line : 6;
+	u32	init_vp_fsm_state : 4;
+	u32	check_fit_rd_ctx_line : 6;
+	u32	check_fit_fsm_state : 4;
+	u32	arbiter_state : 4;
+	u32	reserved0 : 2;
+};
+union ipa_hwio_def_ipa_state_coal_master_1_u {
+	struct ipa_hwio_def_ipa_state_coal_master_1_s	def;
+	u32						value;
+};
+struct ipa_hwio_def_ipa_filt_rout_hash_en_s {
+	u32	ipv6_router_hash_en : 1;
+	u32	reserved0 : 3;
+	u32	ipv6_filter_hash_en : 1;
+	u32	reserved1 : 3;
+	u32	ipv4_router_hash_en : 1;
+	u32	reserved2 : 3;
+	u32	ipv4_filter_hash_en : 1;
+	u32	reserved3 : 19;
+};
+union ipa_hwio_def_ipa_filt_rout_hash_en_u {
+	struct ipa_hwio_def_ipa_filt_rout_hash_en_s	def;
+	u32						value;
+};
+struct ipa_hwio_def_ipa_filt_rout_hash_flush_s {
+	u32	ipv6_router_hash_flush : 1;
+	u32	reserved0 : 3;
+	u32	ipv6_filter_hash_flush : 1;
+	u32	reserved1 : 3;
+	u32	ipv4_router_hash_flush : 1;
+	u32	reserved2 : 3;
+	u32	ipv4_filter_hash_flush : 1;
+	u32	reserved3 : 19;
+};
+union ipa_hwio_def_ipa_filt_rout_hash_flush_u {
+	struct ipa_hwio_def_ipa_filt_rout_hash_flush_s	def;
+	u32						value;
+};
+struct ipa_hwio_def_ipa_ipv4_filter_init_values_s {
+	u32	ip_v4_filter_init_hashed_addr : 16;
+	u32	ip_v4_filter_init_non_hashed_addr : 16;
+};
+union ipa_hwio_def_ipa_ipv4_filter_init_values_u {
+	struct ipa_hwio_def_ipa_ipv4_filter_init_values_s	def;
+	u32							value;
+};
+struct ipa_hwio_def_ipa_ipv6_filter_init_values_s {
+	u32	ip_v6_filter_init_hashed_addr : 16;
+	u32	ip_v6_filter_init_non_hashed_addr : 16;
+};
+union ipa_hwio_def_ipa_ipv6_filter_init_values_u {
+	struct ipa_hwio_def_ipa_ipv6_filter_init_values_s	def;
+	u32							value;
+};
+struct ipa_hwio_def_ipa_ipv4_route_init_values_s {
+	u32	ip_v4_route_init_hashed_addr : 16;
+	u32	ip_v4_route_init_non_hashed_addr : 16;
+};
+union ipa_hwio_def_ipa_ipv4_route_init_values_u {
+	struct ipa_hwio_def_ipa_ipv4_route_init_values_s	def;
+	u32							value;
+};
+struct ipa_hwio_def_ipa_ipv6_route_init_values_s {
+	u32	ip_v6_route_init_hashed_addr : 16;
+	u32	ip_v6_route_init_non_hashed_addr : 16;
+};
+union ipa_hwio_def_ipa_ipv6_route_init_values_u {
+	struct ipa_hwio_def_ipa_ipv6_route_init_values_s	def;
+	u32							value;
+};
+struct ipa_hwio_def_ipa_bam_activated_ports_s {
+	u32	endpoints : 31;
+	u32	reserved0 : 1;
+};
+union ipa_hwio_def_ipa_bam_activated_ports_u {
+	struct ipa_hwio_def_ipa_bam_activated_ports_s	def;
+	u32						value;
+};
+struct ipa_hwio_def_ipa_sys_pkt_proc_cntxt_base_s {
+	u32	zero : 3;
+	u32	addr : 29;
+};
+union ipa_hwio_def_ipa_sys_pkt_proc_cntxt_base_u {
+	struct ipa_hwio_def_ipa_sys_pkt_proc_cntxt_base_s	def;
+	u32							value;
+};
+struct ipa_hwio_def_ipa_sys_pkt_proc_cntxt_base_msb_s {
+	u32 addr : 32;
+};
+union ipa_hwio_def_ipa_sys_pkt_proc_cntxt_base_msb_u {
+	struct ipa_hwio_def_ipa_sys_pkt_proc_cntxt_base_msb_s	def;
+	u32							value;
+};
+struct ipa_hwio_def_ipa_local_pkt_proc_cntxt_base_s {
+	u32	zero : 3;
+	u32	addr : 15;
+	u32	reserved0 : 14;
+};
+union ipa_hwio_def_ipa_local_pkt_proc_cntxt_base_u {
+	struct ipa_hwio_def_ipa_local_pkt_proc_cntxt_base_s	def;
+	u32							value;
+};
+struct ipa_hwio_def_ipa_src_rsrc_grp_01_rsrc_type_n_s {
+	u32	src_rsrc_grp_0_min_limit : 6;
+	u32	reserved0 : 2;
+	u32	src_rsrc_grp_0_max_limit : 6;
+	u32	reserved1 : 2;
+	u32	src_rsrc_grp_1_min_limit : 6;
+	u32	reserved2 : 2;
+	u32	src_rsrc_grp_1_max_limit : 6;
+	u32	reserved3 : 2;
+};
+union ipa_hwio_def_ipa_src_rsrc_grp_01_rsrc_type_n_u {
+	struct ipa_hwio_def_ipa_src_rsrc_grp_01_rsrc_type_n_s	def;
+	u32							value;
+};
+struct ipa_hwio_def_ipa_src_rsrc_grp_23_rsrc_type_n_s {
+	u32	src_rsrc_grp_2_min_limit : 6;
+	u32	reserved0 : 2;
+	u32	src_rsrc_grp_2_max_limit : 6;
+	u32	reserved1 : 2;
+	u32	src_rsrc_grp_3_min_limit : 6;
+	u32	reserved2 : 2;
+	u32	src_rsrc_grp_3_max_limit : 6;
+	u32	reserved3 : 2;
+};
+union ipa_hwio_def_ipa_src_rsrc_grp_23_rsrc_type_n_u {
+	struct ipa_hwio_def_ipa_src_rsrc_grp_23_rsrc_type_n_s	def;
+	u32							value;
+};
+struct ipa_hwio_def_ipa_src_rsrc_grp_45_rsrc_type_n_s {
+	u32 src_rsrc_grp_4_min_limit : 6;
+	u32 reserved0 : 2;
+	u32 src_rsrc_grp_4_max_limit : 6;
+	u32 reserved1 : 18;
+};
+union ipa_hwio_def_ipa_src_rsrc_grp_45_rsrc_type_n_u {
+	struct ipa_hwio_def_ipa_src_rsrc_grp_45_rsrc_type_n_s	def;
+	u32 value;
+};
+struct ipa_hwio_def_ipa_src_rsrc_grp_0123_rsrc_type_cnt_n_s {
+	u32	src_rsrc_grp_0_cnt : 6;
+	u32	reserved0 : 2;
+	u32	src_rsrc_grp_1_cnt : 6;
+	u32	reserved1 : 2;
+	u32	src_rsrc_grp_2_cnt : 6;
+	u32	reserved2 : 2;
+	u32	src_rsrc_grp_3_cnt : 6;
+	u32	reserved3 : 2;
+};
+union ipa_hwio_def_ipa_src_rsrc_grp_0123_rsrc_type_cnt_n_u {
+	struct ipa_hwio_def_ipa_src_rsrc_grp_0123_rsrc_type_cnt_n_s
+		def;
+	u32 value;
+};
+struct ipa_hwio_def_ipa_src_rsrc_grp_4567_rsrc_type_cnt_n_s {
+	u32 src_rsrc_grp_4_cnt : 6;
+	u32 reserved0 : 26;
+};
+union ipa_hwio_def_ipa_src_rsrc_grp_4567_rsrc_type_cnt_n_u {
+	struct ipa_hwio_def_ipa_src_rsrc_grp_4567_rsrc_type_cnt_n_s
+		def;
+	u32 value;
+};
+struct ipa_hwio_def_ipa_dst_rsrc_grp_01_rsrc_type_n_s {
+	u32	dst_rsrc_grp_0_min_limit : 6;
+	u32	reserved0 : 2;
+	u32	dst_rsrc_grp_0_max_limit : 6;
+	u32	reserved1 : 2;
+	u32	dst_rsrc_grp_1_min_limit : 6;
+	u32	reserved2 : 2;
+	u32	dst_rsrc_grp_1_max_limit : 6;
+	u32	reserved3 : 2;
+};
+union ipa_hwio_def_ipa_dst_rsrc_grp_01_rsrc_type_n_u {
+	struct ipa_hwio_def_ipa_dst_rsrc_grp_01_rsrc_type_n_s	def;
+	u32							value;
+};
+struct ipa_hwio_def_ipa_dst_rsrc_grp_23_rsrc_type_n_s {
+	u32	dst_rsrc_grp_2_min_limit : 6;
+	u32	reserved0 : 2;
+	u32	dst_rsrc_grp_2_max_limit : 6;
+	u32	reserved1 : 2;
+	u32	dst_rsrc_grp_3_min_limit : 6;
+	u32	reserved2 : 2;
+	u32	dst_rsrc_grp_3_max_limit : 6;
+	u32	reserved3 : 2;
+};
+union ipa_hwio_def_ipa_dst_rsrc_grp_23_rsrc_type_n_u {
+	struct ipa_hwio_def_ipa_dst_rsrc_grp_23_rsrc_type_n_s	def;
+	u32							value;
+};
+struct ipa_hwio_def_ipa_dst_rsrc_grp_45_rsrc_type_n_s {
+	u32 dst_rsrc_grp_4_min_limit : 6;
+	u32 reserved0 : 2;
+	u32 dst_rsrc_grp_4_max_limit : 6;
+	u32 reserved1 : 18;
+};
+union ipa_hwio_def_ipa_dst_rsrc_grp_45_rsrc_type_n_u {
+	struct ipa_hwio_def_ipa_dst_rsrc_grp_45_rsrc_type_n_s	def;
+	u32 value;
+};
+struct ipa_hwio_def_ipa_dst_rsrc_grp_0123_rsrc_type_cnt_n_s {
+	u32	dst_rsrc_grp_0_cnt : 6;
+	u32	reserved0 : 2;
+	u32	dst_rsrc_grp_1_cnt : 6;
+	u32	reserved1 : 2;
+	u32	dst_rsrc_grp_2_cnt : 6;
+	u32	reserved2 : 2;
+	u32	dst_rsrc_grp_3_cnt : 6;
+	u32	reserved3 : 2;
+};
+union ipa_hwio_def_ipa_dst_rsrc_grp_0123_rsrc_type_cnt_n_u {
+	struct ipa_hwio_def_ipa_dst_rsrc_grp_0123_rsrc_type_cnt_n_s
+		def;
+	u32 value;
+};
+struct ipa_hwio_def_ipa_dst_rsrc_grp_4567_rsrc_type_cnt_n_s {
+	u32 dst_rsrc_grp_4_cnt : 8;
+	u32 reserved0 : 24;
+};
+union ipa_hwio_def_ipa_dst_rsrc_grp_4567_rsrc_type_cnt_n_u {
+	struct ipa_hwio_def_ipa_dst_rsrc_grp_4567_rsrc_type_cnt_n_s
+		def;
+	u32 value;
+};
+struct ipa_hwio_def_ipa_rsrc_grp_cfg_s {
+	u32	src_grp_special_valid : 1;
+	u32	reserved0 : 3;
+	u32	src_grp_special_index : 3;
+	u32	reserved1 : 1;
+	u32	dst_pipe_special_valid : 1;
+	u32	reserved2 : 3;
+	u32	dst_pipe_special_index : 5;
+	u32	reserved3 : 3;
+	u32	dst_grp_special_valid : 1;
+	u32	reserved4 : 3;
+	u32	dst_grp_special_index : 6;
+	u32	reserved5 : 2;
+};
+union ipa_hwio_def_ipa_rsrc_grp_cfg_u {
+	struct ipa_hwio_def_ipa_rsrc_grp_cfg_s	def;
+	u32					value;
+};
+struct ipa_hwio_def_ipa_pipeline_disable_s {
+	u32	reserved0 : 3;
+	u32	rx_cmdq_splitter_dis : 1;
+	u32	reserved1 : 28;
+};
+union ipa_hwio_def_ipa_pipeline_disable_u {
+	struct ipa_hwio_def_ipa_pipeline_disable_s	def;
+	u32						value;
+};
+struct ipa_hwio_def_ipa_endp_init_ctrl_n_s {
+	u32	endp_suspend : 1;
+	u32	endp_delay : 1;
+	u32	reserved0 : 30;
+};
+union ipa_hwio_def_ipa_endp_init_ctrl_n_u {
+	struct ipa_hwio_def_ipa_endp_init_ctrl_n_s	def;
+	u32						value;
+};
+struct ipa_hwio_def_ipa_endp_init_ctrl_scnd_n_s {
+	u32	reserved0 : 1;
+	u32	endp_delay : 1;
+	u32	reserved1 : 30;
+};
+union ipa_hwio_def_ipa_endp_init_ctrl_scnd_n_u {
+	struct ipa_hwio_def_ipa_endp_init_ctrl_scnd_n_s def;
+	u32						value;
+};
+struct ipa_hwio_def_ipa_endp_init_cfg_n_s {
+	u32	frag_offload_en : 1;
+	u32	cs_offload_en : 2;
+	u32	cs_metadata_hdr_offset : 4;
+	u32	reserved0 : 1;
+	u32	gen_qmb_master_sel : 1;
+	u32	reserved1 : 23;
+};
+union ipa_hwio_def_ipa_endp_init_cfg_n_u {
+	struct ipa_hwio_def_ipa_endp_init_cfg_n_s	def;
+	u32						value;
+};
+struct ipa_hwio_def_ipa_endp_init_nat_n_s {
+	u32	nat_en : 2;
+	u32	reserved0 : 30;
+};
+union ipa_hwio_def_ipa_endp_init_nat_n_u {
+	struct ipa_hwio_def_ipa_endp_init_nat_n_s	def;
+	u32						value;
+};
+struct ipa_hwio_def_ipa_endp_init_hdr_n_s {
+	u32	hdr_len : 6;
+	u32	hdr_ofst_metadata_valid : 1;
+	u32	hdr_ofst_metadata : 6;
+	u32	hdr_additional_const_len : 6;
+	u32	hdr_ofst_pkt_size_valid : 1;
+	u32	hdr_ofst_pkt_size : 6;
+	u32	hdr_a5_mux : 1;
+	u32	hdr_len_inc_deagg_hdr : 1;
+	u32	hdr_len_msb : 2;
+	u32	hdr_ofst_metadata_msb : 2;
+};
+union ipa_hwio_def_ipa_endp_init_hdr_n_u {
+	struct ipa_hwio_def_ipa_endp_init_hdr_n_s	def;
+	u32						value;
+};
+struct ipa_hwio_def_ipa_endp_init_hdr_ext_n_s {
+	u32	hdr_endianness : 1;
+	u32	hdr_total_len_or_pad_valid : 1;
+	u32	hdr_total_len_or_pad : 1;
+	u32	hdr_payload_len_inc_padding : 1;
+	u32	hdr_total_len_or_pad_offset : 6;
+	u32	hdr_pad_to_alignment : 4;
+	u32	reserved0 : 2;
+	u32	hdr_total_len_or_pad_offset_msb : 2;
+	u32	hdr_ofst_pkt_size_msb : 2;
+	u32	hdr_additional_const_len_msb : 2;
+	u32	reserved1 : 10;
+};
+union ipa_hwio_def_ipa_endp_init_hdr_ext_n_u {
+	struct ipa_hwio_def_ipa_endp_init_hdr_ext_n_s	def;
+	u32						value;
+};
+struct ipa_hwio_def_ipa_endp_init_hdr_metadata_mask_n_s {
+	u32 metadata_mask : 32;
+};
+union ipa_hwio_def_ipa_endp_init_hdr_metadata_mask_n_u {
+	struct ipa_hwio_def_ipa_endp_init_hdr_metadata_mask_n_s def;
+	u32							value;
+};
+struct ipa_hwio_def_ipa_endp_init_hdr_metadata_n_s {
+	u32 metadata : 32;
+};
+union ipa_hwio_def_ipa_endp_init_hdr_metadata_n_u {
+	struct ipa_hwio_def_ipa_endp_init_hdr_metadata_n_s	def;
+	u32							value;
+};
+struct ipa_hwio_def_ipa_endp_init_mode_n_s {
+	u32	mode : 3;
+	u32	dcph_enable : 1;
+	u32	dest_pipe_index : 5;
+	u32	reserved0 : 3;
+	u32	byte_threshold : 16;
+	u32	pipe_replicate_en : 1;
+	u32	pad_en : 1;
+	u32	reserved1 : 2;
+};
+union ipa_hwio_def_ipa_endp_init_mode_n_u {
+	struct ipa_hwio_def_ipa_endp_init_mode_n_s	def;
+	u32						value;
+};
+struct ipa_hwio_def_ipa_endp_init_aggr_n_s {
+	u32	aggr_en : 2;
+	u32	aggr_type : 3;
+	u32	aggr_byte_limit : 6;
+	u32	reserved0 : 1;
+	u32	aggr_time_limit : 5;
+	u32	aggr_pkt_limit : 6;
+	u32	aggr_sw_eof_active : 1;
+	u32	aggr_force_close : 1;
+	u32	reserved1 : 1;
+	u32	aggr_hard_byte_limit_enable : 1;
+	u32	aggr_gran_sel : 1;
+	u32	reserved2 : 4;
+};
+union ipa_hwio_def_ipa_endp_init_aggr_n_u {
+	struct ipa_hwio_def_ipa_endp_init_aggr_n_s	def;
+	u32						value;
+};
+struct ipa_hwio_def_ipa_endp_init_hol_block_en_n_s {
+	u32	en : 1;
+	u32	reserved0 : 31;
+};
+union ipa_hwio_def_ipa_endp_init_hol_block_en_n_u {
+	struct ipa_hwio_def_ipa_endp_init_hol_block_en_n_s	def;
+	u32							value;
+};
+struct ipa_hwio_def_ipa_endp_init_hol_block_timer_n_s {
+	u32	time_limit : 5;
+	u32	reserved0 : 3;
+	u32	gran_sel : 1;
+	u32	reserved1 : 23;
+};
+union ipa_hwio_def_ipa_endp_init_hol_block_timer_n_u {
+	struct ipa_hwio_def_ipa_endp_init_hol_block_timer_n_s	def;
+	u32							value;
+};
+struct ipa_hwio_def_ipa_endp_init_deaggr_n_s {
+	u32	deaggr_hdr_len : 6;
+	u32	syspipe_err_detection : 1;
+	u32	packet_offset_valid : 1;
+	u32	packet_offset_location : 6;
+	u32	ignore_min_pkt_err : 1;
+	u32	reserved0 : 1;
+	u32	max_packet_len : 16;
+};
+union ipa_hwio_def_ipa_endp_init_deaggr_n_u {
+	struct ipa_hwio_def_ipa_endp_init_deaggr_n_s	def;
+	u32						value;
+};
+struct ipa_hwio_def_ipa_endp_init_rsrc_grp_n_s {
+	u32	rsrc_grp : 3;
+	u32	reserved0 : 29;
+};
+union ipa_hwio_def_ipa_endp_init_rsrc_grp_n_u {
+	struct ipa_hwio_def_ipa_endp_init_rsrc_grp_n_s	def;
+	u32						value;
+};
+struct ipa_hwio_def_ipa_endp_init_seq_n_s {
+	u32	hps_seq_type : 4;
+	u32	dps_seq_type : 4;
+	u32	hps_rep_seq_type : 4;
+	u32	dps_rep_seq_type : 4;
+	u32	reserved0 : 16;
+};
+union ipa_hwio_def_ipa_endp_init_seq_n_u {
+	struct ipa_hwio_def_ipa_endp_init_seq_n_s	def;
+	u32						value;
+};
+struct ipa_hwio_def_ipa_endp_status_n_s {
+	u32	status_en : 1;
+	u32	status_endp : 5;
+	u32	reserved0 : 3;
+	u32	status_pkt_suppress : 1;
+	u32	reserved1 : 22;
+};
+union ipa_hwio_def_ipa_endp_status_n_u {
+	struct ipa_hwio_def_ipa_endp_status_n_s def;
+	u32					value;
+};
+struct ipa_hwio_def_ipa_endp_filter_router_hsh_cfg_n_s {
+	u32	filter_hash_msk_src_id : 1;
+	u32	filter_hash_msk_src_ip_add : 1;
+	u32	filter_hash_msk_dst_ip_add : 1;
+	u32	filter_hash_msk_src_port : 1;
+	u32	filter_hash_msk_dst_port : 1;
+	u32	filter_hash_msk_protocol : 1;
+	u32	filter_hash_msk_metadata : 1;
+	u32	reserved0 : 9;
+	u32	router_hash_msk_src_id : 1;
+	u32	router_hash_msk_src_ip_add : 1;
+	u32	router_hash_msk_dst_ip_add : 1;
+	u32	router_hash_msk_src_port : 1;
+	u32	router_hash_msk_dst_port : 1;
+	u32	router_hash_msk_protocol : 1;
+	u32	router_hash_msk_metadata : 1;
+	u32	reserved1 : 9;
+};
+union ipa_hwio_def_ipa_endp_filter_router_hsh_cfg_n_u {
+	struct ipa_hwio_def_ipa_endp_filter_router_hsh_cfg_n_s	def;
+	u32							value;
+};
+struct ipa_hwio_def_ipa_nlo_pp_cfg1_s {
+	u32	nlo_ack_pp : 8;
+	u32	nlo_data_pp : 8;
+	u32	nlo_status_pp : 8;
+	u32	nlo_ack_max_vp : 6;
+	u32	reserved0 : 2;
+};
+union ipa_hwio_def_ipa_nlo_pp_cfg1_u {
+	struct ipa_hwio_def_ipa_nlo_pp_cfg1_s	def;
+	u32					value;
+};
+struct ipa_hwio_def_ipa_nlo_pp_cfg2_s {
+	u32	nlo_ack_close_padd : 8;
+	u32	nlo_data_close_padd : 8;
+	u32	nlo_ack_buffer_mode : 1;
+	u32	nlo_data_buffer_mode : 1;
+	u32	nlo_status_buffer_mode : 1;
+	u32	reserved0 : 13;
+};
+union ipa_hwio_def_ipa_nlo_pp_cfg2_u {
+	struct ipa_hwio_def_ipa_nlo_pp_cfg2_s	def;
+	u32					value;
+};
+struct ipa_hwio_def_ipa_nlo_pp_ack_limit_cfg_s {
+	u32	nlo_ack_lower_size : 16;
+	u32	nlo_ack_upper_size : 16;
+};
+union ipa_hwio_def_ipa_nlo_pp_ack_limit_cfg_u {
+	struct ipa_hwio_def_ipa_nlo_pp_ack_limit_cfg_s	def;
+	u32						value;
+};
+struct ipa_hwio_def_ipa_nlo_pp_data_limit_cfg_s {
+	u32	nlo_data_lower_size : 16;
+	u32	nlo_data_upper_size : 16;
+};
+union ipa_hwio_def_ipa_nlo_pp_data_limit_cfg_u {
+	struct ipa_hwio_def_ipa_nlo_pp_data_limit_cfg_s def;
+	u32						value;
+};
+struct ipa_hwio_def_ipa_nlo_min_dsm_cfg_s {
+	u32	nlo_ack_min_dsm_len : 16;
+	u32	nlo_data_min_dsm_len : 16;
+};
+union ipa_hwio_def_ipa_nlo_min_dsm_cfg_u {
+	struct ipa_hwio_def_ipa_nlo_min_dsm_cfg_s	def;
+	u32						value;
+};
+struct ipa_hwio_def_ipa_nlo_vp_flush_req_s {
+	u32	vp_flush_pp_indx : 8;
+	u32	reserved0 : 8;
+	u32	vp_flush_vp_indx : 8;
+	u32	reserved1 : 7;
+	u32	vp_flush_req : 1;
+};
+union ipa_hwio_def_ipa_nlo_vp_flush_req_u {
+	struct ipa_hwio_def_ipa_nlo_vp_flush_req_s	def;
+	u32						value;
+};
+struct ipa_hwio_def_ipa_nlo_vp_flush_cookie_s {
+	u32 vp_flush_cookie : 32;
+};
+union ipa_hwio_def_ipa_nlo_vp_flush_cookie_u {
+	struct ipa_hwio_def_ipa_nlo_vp_flush_cookie_s	def;
+	u32						value;
+};
+struct ipa_hwio_def_ipa_nlo_vp_flush_ack_s {
+	u32	vp_flush_ack : 1;
+	u32	reserved0 : 31;
+};
+union ipa_hwio_def_ipa_nlo_vp_flush_ack_u {
+	struct ipa_hwio_def_ipa_nlo_vp_flush_ack_s	def;
+	u32						value;
+};
+struct ipa_hwio_def_ipa_nlo_vp_dsm_open_s {
+	u32 vp_dsm_open : 32;
+};
+union ipa_hwio_def_ipa_nlo_vp_dsm_open_u {
+	struct ipa_hwio_def_ipa_nlo_vp_dsm_open_s	def;
+	u32						value;
+};
+struct ipa_hwio_def_ipa_nlo_vp_qbap_open_s {
+	u32 vp_qbap_open : 32;
+};
+union ipa_hwio_def_ipa_nlo_vp_qbap_open_u {
+	struct ipa_hwio_def_ipa_nlo_vp_qbap_open_s	def;
+	u32						value;
+};
+struct ipa_hwio_def_ipa_rsrc_mngr_db_cfg_s {
+	u32	rsrc_grp_sel : 3;
+	u32	reserved0 : 1;
+	u32	rsrc_type_sel : 3;
+	u32	reserved1 : 1;
+	u32	rsrc_id_sel : 6;
+	u32	reserved2 : 18;
+};
+union ipa_hwio_def_ipa_rsrc_mngr_db_cfg_u {
+	struct ipa_hwio_def_ipa_rsrc_mngr_db_cfg_s	def;
+	u32						value;
+};
+struct ipa_hwio_def_ipa_rsrc_mngr_db_rsrc_read_s {
+	u32	rsrc_occupied : 1;
+	u32	rsrc_next_valid : 1;
+	u32	reserved0 : 2;
+	u32	rsrc_next_index : 6;
+	u32	reserved1 : 22;
+};
+union ipa_hwio_def_ipa_rsrc_mngr_db_rsrc_read_u {
+	struct ipa_hwio_def_ipa_rsrc_mngr_db_rsrc_read_s	def;
+	u32							value;
+};
+struct ipa_hwio_def_ipa_rsrc_mngr_db_list_read_s {
+	u32	rsrc_list_valid : 1;
+	u32	rsrc_list_hold : 1;
+	u32	reserved0 : 2;
+	u32	rsrc_list_head_rsrc : 6;
+	u32	reserved1 : 2;
+	u32	rsrc_list_head_cnt : 7;
+	u32	reserved2 : 1;
+	u32	rsrc_list_entry_cnt : 7;
+	u32	reserved3 : 5;
+};
+union ipa_hwio_def_ipa_rsrc_mngr_db_list_read_u {
+	struct ipa_hwio_def_ipa_rsrc_mngr_db_list_read_s	def;
+	u32							value;
+};
+struct ipa_hwio_def_ipa_debug_data_s {
+	u32 debug_data : 32;
+};
+union ipa_hwio_def_ipa_debug_data_u {
+	struct ipa_hwio_def_ipa_debug_data_s	def;
+	u32					value;
+};
+struct ipa_hwio_def_ipa_testbus_sel_s {
+	u32	testbus_en : 1;
+	u32	reserved0 : 3;
+	u32	external_block_select : 8;
+	u32	internal_block_select : 8;
+	u32	pipe_select : 5;
+	u32	reserved1 : 7;
+};
+union ipa_hwio_def_ipa_testbus_sel_u {
+	struct ipa_hwio_def_ipa_testbus_sel_s	def;
+	u32					value;
+};
+struct ipa_hwio_def_ipa_step_mode_breakpoints_s {
+	u32 hw_en : 32;
+};
+union ipa_hwio_def_ipa_step_mode_breakpoints_u {
+	struct ipa_hwio_def_ipa_step_mode_breakpoints_s def;
+	u32						value;
+};
+struct ipa_hwio_def_ipa_step_mode_status_s {
+	u32 hw_en : 32;
+};
+union ipa_hwio_def_ipa_step_mode_status_u {
+	struct ipa_hwio_def_ipa_step_mode_status_s	def;
+	u32						value;
+};
+struct ipa_hwio_def_ipa_log_s {
+	u32	reserved0 : 1;
+	u32	log_en : 1;
+	u32	reserved1 : 2;
+	u32	log_pipe : 5;
+	u32	reserved2 : 3;
+	u32	log_length : 8;
+	u32	log_reduction_en : 1;
+	u32	log_dpl_l2_remove_en : 1;
+	u32	reserved3 : 10;
+};
+union ipa_hwio_def_ipa_log_u {
+	struct ipa_hwio_def_ipa_log_s	def;
+	u32				value;
+};
+struct ipa_hwio_def_ipa_log_buf_hw_cmd_addr_s {
+	u32 start_addr : 32;
+};
+union ipa_hwio_def_ipa_log_buf_hw_cmd_addr_u {
+	struct ipa_hwio_def_ipa_log_buf_hw_cmd_addr_s	def;
+	u32						value;
+};
+struct ipa_hwio_def_ipa_log_buf_hw_cmd_addr_msb_s {
+	u32 start_addr : 32;
+};
+union ipa_hwio_def_ipa_log_buf_hw_cmd_addr_msb_u {
+	struct ipa_hwio_def_ipa_log_buf_hw_cmd_addr_msb_s	def;
+	u32							value;
+};
+struct ipa_hwio_def_ipa_log_buf_hw_cmd_write_ptr_s {
+	u32 writr_addr : 32;
+};
+union ipa_hwio_def_ipa_log_buf_hw_cmd_write_ptr_u {
+	struct ipa_hwio_def_ipa_log_buf_hw_cmd_write_ptr_s	def;
+	u32							value;
+};
+struct ipa_hwio_def_ipa_log_buf_hw_cmd_write_ptr_msb_s {
+	u32 writr_addr : 32;
+};
+union ipa_hwio_def_ipa_log_buf_hw_cmd_write_ptr_msb_u {
+	struct ipa_hwio_def_ipa_log_buf_hw_cmd_write_ptr_msb_s	def;
+	u32							value;
+};
+struct ipa_hwio_def_ipa_log_buf_hw_cmd_cfg_s {
+	u32	size : 16;
+	u32	enable : 1;
+	u32	skip_ddr_dma : 1;
+	u32	reserved0 : 14;
+};
+union ipa_hwio_def_ipa_log_buf_hw_cmd_cfg_u {
+	struct ipa_hwio_def_ipa_log_buf_hw_cmd_cfg_s	def;
+	u32						value;
+};
+struct ipa_hwio_def_ipa_log_buf_hw_cmd_ram_ptr_s {
+	u32	read_ptr : 14;
+	u32	reserved0 : 2;
+	u32	write_ptr : 14;
+	u32	reserved1 : 1;
+	u32	skip_ddr_wrap_happened : 1;
+};
+union ipa_hwio_def_ipa_log_buf_hw_cmd_ram_ptr_u {
+	struct ipa_hwio_def_ipa_log_buf_hw_cmd_ram_ptr_s	def;
+	u32							value;
+};
+struct ipa_hwio_def_ipa_rx_splt_cmdq_cmd_n_s {
+	u32	write_cmd : 1;
+	u32	pop_cmd : 1;
+	u32	release_rd_cmd : 1;
+	u32	release_wr_cmd : 1;
+	u32	release_rd_pkt : 1;
+	u32	release_wr_pkt : 1;
+	u32	release_rd_pkt_enhanced : 1;
+	u32	reserved0 : 25;
+};
+union ipa_hwio_def_ipa_rx_splt_cmdq_cmd_n_u {
+	struct ipa_hwio_def_ipa_rx_splt_cmdq_cmd_n_s	def;
+	u32						value;
+};
+struct ipa_hwio_def_ipa_rx_splt_cmdq_cfg_n_s {
+	u32	block_rd : 1;
+	u32	block_wr : 1;
+	u32	reserved0 : 30;
+};
+union ipa_hwio_def_ipa_rx_splt_cmdq_cfg_n_u {
+	struct ipa_hwio_def_ipa_rx_splt_cmdq_cfg_n_s	def;
+	u32						value;
+};
+struct ipa_hwio_def_ipa_rx_splt_cmdq_data_wr_0_n_s {
+	u32	cmdq_packet_len_f : 16;
+	u32	cmdq_src_len_f : 16;
+};
+union ipa_hwio_def_ipa_rx_splt_cmdq_data_wr_0_n_u {
+	struct ipa_hwio_def_ipa_rx_splt_cmdq_data_wr_0_n_s	def;
+	u32							value;
+};
+struct ipa_hwio_def_ipa_rx_splt_cmdq_data_wr_1_n_s {
+	u32	cmdq_src_pipe_f : 8;
+	u32	cmdq_order_f : 2;
+	u32	cmdq_flags_f : 6;
+	u32	cmdq_opcode_f : 8;
+	u32	cmdq_metadata_f : 8;
+};
+union ipa_hwio_def_ipa_rx_splt_cmdq_data_wr_1_n_u {
+	struct ipa_hwio_def_ipa_rx_splt_cmdq_data_wr_1_n_s	def;
+	u32							value;
+};
+struct ipa_hwio_def_ipa_rx_splt_cmdq_data_wr_2_n_s {
+	u32 cmdq_addr_lsb_f : 32;
+};
+union ipa_hwio_def_ipa_rx_splt_cmdq_data_wr_2_n_u {
+	struct ipa_hwio_def_ipa_rx_splt_cmdq_data_wr_2_n_s	def;
+	u32							value;
+};
+struct ipa_hwio_def_ipa_rx_splt_cmdq_data_wr_3_n_s {
+	u32 cmdq_addr_msb_f : 32;
+};
+union ipa_hwio_def_ipa_rx_splt_cmdq_data_wr_3_n_u {
+	struct ipa_hwio_def_ipa_rx_splt_cmdq_data_wr_3_n_s	def;
+	u32							value;
+};
+struct ipa_hwio_def_ipa_rx_splt_cmdq_data_rd_0_n_s {
+	u32	cmdq_packet_len_f : 16;
+	u32	cmdq_src_len_f : 16;
+};
+union ipa_hwio_def_ipa_rx_splt_cmdq_data_rd_0_n_u {
+	struct ipa_hwio_def_ipa_rx_splt_cmdq_data_rd_0_n_s	def;
+	u32							value;
+};
+struct ipa_hwio_def_ipa_rx_splt_cmdq_data_rd_1_n_s {
+	u32	cmdq_src_pipe_f : 8;
+	u32	cmdq_order_f : 2;
+	u32	cmdq_flags_f : 6;
+	u32	cmdq_opcode_f : 8;
+	u32	cmdq_metadata_f : 8;
+};
+union ipa_hwio_def_ipa_rx_splt_cmdq_data_rd_1_n_u {
+	struct ipa_hwio_def_ipa_rx_splt_cmdq_data_rd_1_n_s	def;
+	u32							value;
+};
+struct ipa_hwio_def_ipa_rx_splt_cmdq_data_rd_2_n_s {
+	u32 cmdq_addr_lsb_f : 32;
+};
+union ipa_hwio_def_ipa_rx_splt_cmdq_data_rd_2_n_u {
+	struct ipa_hwio_def_ipa_rx_splt_cmdq_data_rd_2_n_s	def;
+	u32							value;
+};
+struct ipa_hwio_def_ipa_rx_splt_cmdq_data_rd_3_n_s {
+	u32 cmdq_addr_msb_f : 32;
+};
+union ipa_hwio_def_ipa_rx_splt_cmdq_data_rd_3_n_u {
+	struct ipa_hwio_def_ipa_rx_splt_cmdq_data_rd_3_n_s	def;
+	u32							value;
+};
+struct ipa_hwio_def_ipa_rx_splt_cmdq_status_n_s {
+	u32	status : 1;
+	u32	cmdq_empty : 1;
+	u32	cmdq_full : 1;
+	u32	cmdq_count : 2;
+	u32	cmdq_depth : 2;
+	u32	reserved0 : 25;
+};
+union ipa_hwio_def_ipa_rx_splt_cmdq_status_n_u {
+	struct ipa_hwio_def_ipa_rx_splt_cmdq_status_n_s def;
+	u32						value;
+};
+struct ipa_hwio_def_ipa_tx_commander_cmdq_status_s {
+	u32	status : 1;
+	u32	cmdq_empty : 1;
+	u32	cmdq_full : 1;
+	u32	reserved0 : 29;
+};
+union ipa_hwio_def_ipa_tx_commander_cmdq_status_u {
+	struct ipa_hwio_def_ipa_tx_commander_cmdq_status_s	def;
+	u32							value;
+};
+struct ipa_hwio_def_ipa_rx_hps_cmdq_cmd_s {
+	u32	write_cmd : 1;
+	u32	pop_cmd : 1;
+	u32	cmd_client : 3;
+	u32	rd_req : 1;
+	u32	reserved0 : 26;
+};
+union ipa_hwio_def_ipa_rx_hps_cmdq_cmd_u {
+	struct ipa_hwio_def_ipa_rx_hps_cmdq_cmd_s	def;
+	u32						value;
+};
+struct ipa_hwio_def_ipa_rx_hps_cmdq_cfg_wr_s {
+	u32	block_wr : 5;
+	u32	reserved0 : 27;
+};
+union ipa_hwio_def_ipa_rx_hps_cmdq_cfg_wr_u {
+	struct ipa_hwio_def_ipa_rx_hps_cmdq_cfg_wr_s	def;
+	u32						value;
+};
+struct ipa_hwio_def_ipa_rx_hps_cmdq_cfg_rd_s {
+	u32	block_rd : 5;
+	u32	reserved0 : 27;
+};
+union ipa_hwio_def_ipa_rx_hps_cmdq_cfg_rd_u {
+	struct ipa_hwio_def_ipa_rx_hps_cmdq_cfg_rd_s	def;
+	u32						value;
+};
+struct ipa_hwio_def_ipa_rx_hps_cmdq_data_rd_0_s {
+	u32	cmdq_packet_len_f : 16;
+	u32	cmdq_dest_len_f : 16;
+};
+union ipa_hwio_def_ipa_rx_hps_cmdq_data_rd_0_u {
+	struct ipa_hwio_def_ipa_rx_hps_cmdq_data_rd_0_s def;
+	u32						value;
+};
+struct ipa_hwio_def_ipa_rx_hps_cmdq_data_rd_1_s {
+	u32	cmdq_src_pipe_f : 8;
+	u32	cmdq_order_f : 2;
+	u32	cmdq_flags_f : 6;
+	u32	cmdq_opcode_f : 8;
+	u32	cmdq_metadata_f : 8;
+};
+union ipa_hwio_def_ipa_rx_hps_cmdq_data_rd_1_u {
+	struct ipa_hwio_def_ipa_rx_hps_cmdq_data_rd_1_s def;
+	u32						value;
+};
+struct ipa_hwio_def_ipa_rx_hps_cmdq_data_rd_2_s {
+	u32 cmdq_addr_lsb_f : 32;
+};
+union ipa_hwio_def_ipa_rx_hps_cmdq_data_rd_2_u {
+	struct ipa_hwio_def_ipa_rx_hps_cmdq_data_rd_2_s def;
+	u32						value;
+};
+struct ipa_hwio_def_ipa_rx_hps_cmdq_data_rd_3_s {
+	u32 cmdq_addr_msb_f : 32;
+};
+union ipa_hwio_def_ipa_rx_hps_cmdq_data_rd_3_u {
+	struct ipa_hwio_def_ipa_rx_hps_cmdq_data_rd_3_s def;
+	u32						value;
+};
+struct ipa_hwio_def_ipa_rx_hps_cmdq_status_s {
+	u32	status : 1;
+	u32	cmdq_full : 1;
+	u32	cmdq_depth : 7;
+	u32	reserved0 : 23;
+};
+union ipa_hwio_def_ipa_rx_hps_cmdq_status_u {
+	struct ipa_hwio_def_ipa_rx_hps_cmdq_status_s	def;
+	u32						value;
+};
+struct ipa_hwio_def_ipa_rx_hps_cmdq_status_empty_s {
+	u32	cmdq_empty : 5;
+	u32	reserved0 : 27;
+};
+union ipa_hwio_def_ipa_rx_hps_cmdq_status_empty_u {
+	struct ipa_hwio_def_ipa_rx_hps_cmdq_status_empty_s	def;
+	u32							value;
+};
+struct ipa_hwio_def_ipa_rx_hps_cmdq_count_s {
+	u32	fifo_count : 7;
+	u32	reserved0 : 25;
+};
+union ipa_hwio_def_ipa_rx_hps_cmdq_count_u {
+	struct ipa_hwio_def_ipa_rx_hps_cmdq_count_s	def;
+	u32						value;
+};
+struct ipa_hwio_def_ipa_rx_hps_clients_min_depth_0_s {
+	u32	client_0_min_depth : 4;
+	u32	reserved0 : 4;
+	u32	client_1_min_depth : 4;
+	u32	reserved1 : 4;
+	u32	client_2_min_depth : 4;
+	u32	reserved2 : 4;
+	u32	client_3_min_depth : 4;
+	u32	client_4_min_depth : 4;
+};
+union ipa_hwio_def_ipa_rx_hps_clients_min_depth_0_u {
+	struct ipa_hwio_def_ipa_rx_hps_clients_min_depth_0_s	def;
+	u32							value;
+};
+struct ipa_hwio_def_ipa_rx_hps_clients_max_depth_0_s {
+	u32	client_0_max_depth : 4;
+	u32	reserved0 : 4;
+	u32	client_1_max_depth : 4;
+	u32	reserved1 : 4;
+	u32	client_2_max_depth : 4;
+	u32	reserved2 : 4;
+	u32	client_3_max_depth : 4;
+	u32	client_4_max_depth : 4;
+};
+union ipa_hwio_def_ipa_rx_hps_clients_max_depth_0_u {
+	struct ipa_hwio_def_ipa_rx_hps_clients_max_depth_0_s	def;
+	u32							value;
+};
+struct ipa_hwio_def_ipa_hps_dps_cmdq_cmd_s {
+	u32	write_cmd : 1;
+	u32	pop_cmd : 1;
+	u32	cmd_client : 5;
+	u32	rd_req : 1;
+	u32	reserved0 : 24;
+};
+union ipa_hwio_def_ipa_hps_dps_cmdq_cmd_u {
+	struct ipa_hwio_def_ipa_hps_dps_cmdq_cmd_s	def;
+	u32						value;
+};
+struct ipa_hwio_def_ipa_hps_dps_cmdq_data_rd_0_s {
+	u32	cmdq_ctx_id_f : 4;
+	u32	cmdq_src_id_f : 8;
+	u32	cmdq_src_pipe_f : 5;
+	u32	cmdq_opcode_f : 2;
+	u32	cmdq_rep_f : 1;
+	u32	reserved0 : 12;
+};
+union ipa_hwio_def_ipa_hps_dps_cmdq_data_rd_0_u {
+	struct ipa_hwio_def_ipa_hps_dps_cmdq_data_rd_0_s	def;
+	u32							value;
+};
+struct ipa_hwio_def_ipa_hps_dps_cmdq_status_s {
+	u32	status : 1;
+	u32	cmdq_full : 1;
+	u32	cmdq_depth : 6;
+	u32	reserved0 : 24;
+};
+union ipa_hwio_def_ipa_hps_dps_cmdq_status_u {
+	struct ipa_hwio_def_ipa_hps_dps_cmdq_status_s	def;
+	u32						value;
+};
+struct ipa_hwio_def_ipa_hps_dps_cmdq_status_empty_s {
+	u32	cmdq_empty : 31;
+	u32	reserved0 : 1;
+};
+union ipa_hwio_def_ipa_hps_dps_cmdq_status_empty_u {
+	struct ipa_hwio_def_ipa_hps_dps_cmdq_status_empty_s	def;
+	u32							value;
+};
+struct ipa_hwio_def_ipa_hps_dps_cmdq_count_s {
+	u32	fifo_count : 6;
+	u32	reserved0 : 26;
+};
+union ipa_hwio_def_ipa_hps_dps_cmdq_count_u {
+	struct ipa_hwio_def_ipa_hps_dps_cmdq_count_s	def;
+	u32						value;
+};
+struct ipa_hwio_def_ipa_dps_tx_cmdq_cmd_s {
+	u32	write_cmd : 1;
+	u32	pop_cmd : 1;
+	u32	cmd_client : 4;
+	u32	reserved0 : 1;
+	u32	rd_req : 1;
+	u32	reserved1 : 24;
+};
+union ipa_hwio_def_ipa_dps_tx_cmdq_cmd_u {
+	struct ipa_hwio_def_ipa_dps_tx_cmdq_cmd_s	def;
+	u32						value;
+};
+struct ipa_hwio_def_ipa_dps_tx_cmdq_data_rd_0_s {
+	u32	cmdq_ctx_id_f : 4;
+	u32	cmdq_src_id_f : 8;
+	u32	cmdq_src_pipe_f : 5;
+	u32	cmdq_opcode_f : 2;
+	u32	cmdq_rep_f : 1;
+	u32	reserved0 : 12;
+};
+union ipa_hwio_def_ipa_dps_tx_cmdq_data_rd_0_u {
+	struct ipa_hwio_def_ipa_dps_tx_cmdq_data_rd_0_s def;
+	u32						value;
+};
+struct ipa_hwio_def_ipa_dps_tx_cmdq_status_s {
+	u32	status : 1;
+	u32	cmdq_full : 1;
+	u32	cmdq_depth : 7;
+	u32	reserved0 : 23;
+};
+union ipa_hwio_def_ipa_dps_tx_cmdq_status_u {
+	struct ipa_hwio_def_ipa_dps_tx_cmdq_status_s	def;
+	u32						value;
+};
+struct ipa_hwio_def_ipa_dps_tx_cmdq_status_empty_s {
+	u32	cmdq_empty : 10;
+	u32	reserved0 : 22;
+};
+union ipa_hwio_def_ipa_dps_tx_cmdq_status_empty_u {
+	struct ipa_hwio_def_ipa_dps_tx_cmdq_status_empty_s	def;
+	u32							value;
+};
+struct ipa_hwio_def_ipa_dps_tx_cmdq_count_s {
+	u32	fifo_count : 7;
+	u32	reserved0 : 25;
+};
+union ipa_hwio_def_ipa_dps_tx_cmdq_count_u {
+	struct ipa_hwio_def_ipa_dps_tx_cmdq_count_s	def;
+	u32						value;
+};
+struct ipa_hwio_def_ipa_log_buf_hw_snif_el_en_s {
+	u32	bitmap : 3;
+	u32	reserved0 : 29;
+};
+union ipa_hwio_def_ipa_log_buf_hw_snif_el_en_u {
+	struct ipa_hwio_def_ipa_log_buf_hw_snif_el_en_s def;
+	u32						value;
+};
+struct ipa_hwio_def_ipa_log_buf_hw_snif_el_wr_n_rd_sel_s {
+	u32	bitmap : 3;
+	u32	reserved0 : 29;
+};
+union ipa_hwio_def_ipa_log_buf_hw_snif_el_wr_n_rd_sel_u {
+	struct ipa_hwio_def_ipa_log_buf_hw_snif_el_wr_n_rd_sel_s
+		def;
+	u32 value;
+};
+struct ipa_hwio_def_ipa_log_buf_hw_snif_el_cli_mux_s {
+	u32	all_cli_mux_concat : 12;
+	u32	reserved0 : 20;
+};
+union ipa_hwio_def_ipa_log_buf_hw_snif_el_cli_mux_u {
+	struct ipa_hwio_def_ipa_log_buf_hw_snif_el_cli_mux_s	def;
+	u32							value;
+};
+struct ipa_hwio_def_ipa_log_buf_hw_snif_el_comp_val_0_cli_n_s {
+	u32 value : 32;
+};
+union ipa_hwio_def_ipa_log_buf_hw_snif_el_comp_val_0_cli_n_u {
+	struct ipa_hwio_def_ipa_log_buf_hw_snif_el_comp_val_0_cli_n_s
+		def;
+	u32 value;
+};
+struct ipa_hwio_def_ipa_log_buf_hw_snif_el_comp_val_1_cli_n_s {
+	u32 value : 32;
+};
+union ipa_hwio_def_ipa_log_buf_hw_snif_el_comp_val_1_cli_n_u {
+	struct ipa_hwio_def_ipa_log_buf_hw_snif_el_comp_val_1_cli_n_s
+		def;
+	u32 value;
+};
+struct ipa_hwio_def_ipa_log_buf_hw_snif_el_comp_val_2_cli_n_s {
+	u32 value : 32;
+};
+union ipa_hwio_def_ipa_log_buf_hw_snif_el_comp_val_2_cli_n_u {
+	struct ipa_hwio_def_ipa_log_buf_hw_snif_el_comp_val_2_cli_n_s
+		def;
+	u32 value;
+};
+struct ipa_hwio_def_ipa_log_buf_hw_snif_el_comp_val_3_cli_n_s {
+	u32 value : 32;
+};
+union ipa_hwio_def_ipa_log_buf_hw_snif_el_comp_val_3_cli_n_u {
+	struct ipa_hwio_def_ipa_log_buf_hw_snif_el_comp_val_3_cli_n_s
+		def;
+	u32 value;
+};
+struct ipa_hwio_def_ipa_log_buf_hw_snif_el_mask_val_0_cli_n_s {
+	u32 value : 32;
+};
+union ipa_hwio_def_ipa_log_buf_hw_snif_el_mask_val_0_cli_n_u {
+	struct ipa_hwio_def_ipa_log_buf_hw_snif_el_mask_val_0_cli_n_s
+		def;
+	u32 value;
+};
+struct ipa_hwio_def_ipa_log_buf_hw_snif_el_mask_val_1_cli_n_s {
+	u32 value : 32;
+};
+union ipa_hwio_def_ipa_log_buf_hw_snif_el_mask_val_1_cli_n_u {
+	struct ipa_hwio_def_ipa_log_buf_hw_snif_el_mask_val_1_cli_n_s
+		def;
+	u32 value;
+};
+struct ipa_hwio_def_ipa_log_buf_hw_snif_el_mask_val_2_cli_n_s {
+	u32 value : 32;
+};
+union ipa_hwio_def_ipa_log_buf_hw_snif_el_mask_val_2_cli_n_u {
+	struct ipa_hwio_def_ipa_log_buf_hw_snif_el_mask_val_2_cli_n_s
+		def;
+	u32 value;
+};
+struct ipa_hwio_def_ipa_log_buf_hw_snif_el_mask_val_3_cli_n_s {
+	u32 value : 32;
+};
+union ipa_hwio_def_ipa_log_buf_hw_snif_el_mask_val_3_cli_n_u {
+	struct ipa_hwio_def_ipa_log_buf_hw_snif_el_mask_val_3_cli_n_s
+		def;
+	u32 value;
+};
+struct ipa_hwio_def_ipa_log_buf_hw_snif_legacy_rx_s {
+	u32	src_group_sel : 3;
+	u32	reserved0 : 29;
+};
+union ipa_hwio_def_ipa_log_buf_hw_snif_legacy_rx_u {
+	struct ipa_hwio_def_ipa_log_buf_hw_snif_legacy_rx_s	def;
+	u32							value;
+};
+struct ipa_hwio_def_ipa_ackmngr_cmdq_cmd_s {
+	u32	write_cmd : 1;
+	u32	pop_cmd : 1;
+	u32	cmd_client : 5;
+	u32	rd_req : 1;
+	u32	reserved0 : 24;
+};
+union ipa_hwio_def_ipa_ackmngr_cmdq_cmd_u {
+	struct ipa_hwio_def_ipa_ackmngr_cmdq_cmd_s	def;
+	u32						value;
+};
+struct ipa_hwio_def_ipa_ackmngr_cmdq_data_rd_s {
+	u32	cmdq_src_id : 8;
+	u32	cmdq_length : 16;
+	u32	cmdq_origin : 1;
+	u32	cmdq_sent : 1;
+	u32	cmdq_src_id_valid : 1;
+	u32	reserved0 : 5;
+};
+union ipa_hwio_def_ipa_ackmngr_cmdq_data_rd_u {
+	struct ipa_hwio_def_ipa_ackmngr_cmdq_data_rd_s	def;
+	u32						value;
+};
+struct ipa_hwio_def_ipa_ackmngr_cmdq_status_s {
+	u32	status : 1;
+	u32	cmdq_full : 1;
+	u32	cmdq_depth : 7;
+	u32	reserved0 : 23;
+};
+union ipa_hwio_def_ipa_ackmngr_cmdq_status_u {
+	struct ipa_hwio_def_ipa_ackmngr_cmdq_status_s	def;
+	u32						value;
+};
+struct ipa_hwio_def_ipa_ackmngr_cmdq_status_empty_s {
+	u32	cmdq_empty : 13;
+	u32	reserved0 : 19;
+};
+union ipa_hwio_def_ipa_ackmngr_cmdq_status_empty_u {
+	struct ipa_hwio_def_ipa_ackmngr_cmdq_status_empty_s	def;
+	u32							value;
+};
+struct ipa_hwio_def_ipa_ackmngr_cmdq_count_s {
+	u32	fifo_count : 7;
+	u32	reserved0 : 25;
+};
+union ipa_hwio_def_ipa_ackmngr_cmdq_count_u {
+	struct ipa_hwio_def_ipa_ackmngr_cmdq_count_s	def;
+	u32						value;
+};
+struct ipa_hwio_def_ipa_gsi_fifo_status_ctrl_s {
+	u32	ipa_gsi_fifo_status_port_sel : 5;
+	u32	ipa_gsi_fifo_status_en : 1;
+	u32	reserved0 : 26;
+};
+union ipa_hwio_def_ipa_gsi_fifo_status_ctrl_u {
+	struct ipa_hwio_def_ipa_gsi_fifo_status_ctrl_s	def;
+	u32						value;
+};
+struct ipa_hwio_def_ipa_gsi_tlv_fifo_status_s {
+	u32	fifo_wr_ptr : 8;
+	u32	fifo_rd_ptr : 8;
+	u32	fifo_rd_pub_ptr : 8;
+	u32	fifo_empty : 1;
+	u32	fifo_empty_pub : 1;
+	u32	fifo_almost_full : 1;
+	u32	fifo_full : 1;
+	u32	fifo_almost_full_pub : 1;
+	u32	fifo_full_pub : 1;
+	u32	fifo_head_is_bubble : 1;
+	u32	reserved0 : 1;
+};
+union ipa_hwio_def_ipa_gsi_tlv_fifo_status_u {
+	struct ipa_hwio_def_ipa_gsi_tlv_fifo_status_s	def;
+	u32						value;
+};
+struct ipa_hwio_def_ipa_gsi_aos_fifo_status_s {
+	u32	fifo_wr_ptr : 8;
+	u32	fifo_rd_ptr : 8;
+	u32	fifo_rd_pub_ptr : 8;
+	u32	fifo_empty : 1;
+	u32	fifo_empty_pub : 1;
+	u32	fifo_almost_full : 1;
+	u32	fifo_full : 1;
+	u32	fifo_almost_full_pub : 1;
+	u32	fifo_full_pub : 1;
+	u32	fifo_head_is_bubble : 1;
+	u32	reserved0 : 1;
+};
+union ipa_hwio_def_ipa_gsi_aos_fifo_status_u {
+	struct ipa_hwio_def_ipa_gsi_aos_fifo_status_s	def;
+	u32						value;
+};
+struct ipa_hwio_def_ipa_log_buf_sw_comp_val_0_s {
+	u32 value : 32;
+};
+union ipa_hwio_def_ipa_log_buf_sw_comp_val_0_u {
+	struct ipa_hwio_def_ipa_log_buf_sw_comp_val_0_s def;
+	u32						value;
+};
+struct ipa_hwio_def_ipa_log_buf_sw_comp_val_1_s {
+	u32 value : 32;
+};
+union ipa_hwio_def_ipa_log_buf_sw_comp_val_1_u {
+	struct ipa_hwio_def_ipa_log_buf_sw_comp_val_1_s def;
+	u32						value;
+};
+struct ipa_hwio_def_ipa_log_buf_sw_comp_val_2_s {
+	u32 value : 32;
+};
+union ipa_hwio_def_ipa_log_buf_sw_comp_val_2_u {
+	struct ipa_hwio_def_ipa_log_buf_sw_comp_val_2_s def;
+	u32						value;
+};
+struct ipa_hwio_def_ipa_log_buf_sw_comp_val_3_s {
+	u32 value : 32;
+};
+union ipa_hwio_def_ipa_log_buf_sw_comp_val_3_u {
+	struct ipa_hwio_def_ipa_log_buf_sw_comp_val_3_s def;
+	u32						value;
+};
+struct ipa_hwio_def_ipa_log_buf_sw_comp_val_4_s {
+	u32 value : 32;
+};
+union ipa_hwio_def_ipa_log_buf_sw_comp_val_4_u {
+	struct ipa_hwio_def_ipa_log_buf_sw_comp_val_4_s def;
+	u32						value;
+};
+struct ipa_hwio_def_ipa_log_buf_sw_comp_val_5_s {
+	u32 value : 32;
+};
+union ipa_hwio_def_ipa_log_buf_sw_comp_val_5_u {
+	struct ipa_hwio_def_ipa_log_buf_sw_comp_val_5_s def;
+	u32						value;
+};
+struct ipa_hwio_def_ipa_log_buf_sw_comp_val_6_s {
+	u32 value : 32;
+};
+union ipa_hwio_def_ipa_log_buf_sw_comp_val_6_u {
+	struct ipa_hwio_def_ipa_log_buf_sw_comp_val_6_s def;
+	u32						value;
+};
+struct ipa_hwio_def_ipa_log_buf_sw_comp_val_7_s {
+	u32 value : 32;
+};
+union ipa_hwio_def_ipa_log_buf_sw_comp_val_7_u {
+	struct ipa_hwio_def_ipa_log_buf_sw_comp_val_7_s def;
+	u32						value;
+};
+struct ipa_hwio_def_ipa_log_buf_sw_mask_val_0_s {
+	u32 value : 32;
+};
+union ipa_hwio_def_ipa_log_buf_sw_mask_val_0_u {
+	struct ipa_hwio_def_ipa_log_buf_sw_mask_val_0_s def;
+	u32						value;
+};
+struct ipa_hwio_def_ipa_log_buf_sw_mask_val_1_s {
+	u32 value : 32;
+};
+union ipa_hwio_def_ipa_log_buf_sw_mask_val_1_u {
+	struct ipa_hwio_def_ipa_log_buf_sw_mask_val_1_s def;
+	u32						value;
+};
+struct ipa_hwio_def_ipa_log_buf_sw_mask_val_2_s {
+	u32 value : 32;
+};
+union ipa_hwio_def_ipa_log_buf_sw_mask_val_2_u {
+	struct ipa_hwio_def_ipa_log_buf_sw_mask_val_2_s def;
+	u32						value;
+};
+struct ipa_hwio_def_ipa_log_buf_sw_mask_val_3_s {
+	u32 value : 32;
+};
+union ipa_hwio_def_ipa_log_buf_sw_mask_val_3_u {
+	struct ipa_hwio_def_ipa_log_buf_sw_mask_val_3_s def;
+	u32						value;
+};
+struct ipa_hwio_def_ipa_log_buf_sw_mask_val_4_s {
+	u32 value : 32;
+};
+union ipa_hwio_def_ipa_log_buf_sw_mask_val_4_u {
+	struct ipa_hwio_def_ipa_log_buf_sw_mask_val_4_s def;
+	u32						value;
+};
+struct ipa_hwio_def_ipa_log_buf_sw_mask_val_5_s {
+	u32 value : 32;
+};
+union ipa_hwio_def_ipa_log_buf_sw_mask_val_5_u {
+	struct ipa_hwio_def_ipa_log_buf_sw_mask_val_5_s def;
+	u32						value;
+};
+struct ipa_hwio_def_ipa_log_buf_sw_mask_val_6_s {
+	u32 value : 32;
+};
+union ipa_hwio_def_ipa_log_buf_sw_mask_val_6_u {
+	struct ipa_hwio_def_ipa_log_buf_sw_mask_val_6_s def;
+	u32						value;
+};
+struct ipa_hwio_def_ipa_log_buf_sw_mask_val_7_s {
+	u32 value : 32;
+};
+union ipa_hwio_def_ipa_log_buf_sw_mask_val_7_u {
+	struct ipa_hwio_def_ipa_log_buf_sw_mask_val_7_s def;
+	u32						value;
+};
+struct ipa_hwio_def_ipa_ntf_tx_cmdq_cmd_s {
+	u32	write_cmd : 1;
+	u32	pop_cmd : 1;
+	u32	cmd_client : 5;
+	u32	rd_req : 1;
+	u32	reserved0 : 24;
+};
+union ipa_hwio_def_ipa_ntf_tx_cmdq_cmd_u {
+	struct ipa_hwio_def_ipa_ntf_tx_cmdq_cmd_s	def;
+	u32						value;
+};
+struct ipa_hwio_def_ipa_ntf_tx_cmdq_data_rd_0_s {
+	u32	cmdq_ctx_id_f : 4;
+	u32	cmdq_src_id_f : 8;
+	u32	cmdq_src_pipe_f : 5;
+	u32	cmdq_opcode_f : 2;
+	u32	cmdq_rep_f : 1;
+	u32	reserved0 : 12;
+};
+union ipa_hwio_def_ipa_ntf_tx_cmdq_data_rd_0_u {
+	struct ipa_hwio_def_ipa_ntf_tx_cmdq_data_rd_0_s def;
+	u32						value;
+};
+struct ipa_hwio_def_ipa_ntf_tx_cmdq_status_s {
+	u32	status : 1;
+	u32	cmdq_full : 1;
+	u32	cmdq_depth : 7;
+	u32	reserved0 : 23;
+};
+union ipa_hwio_def_ipa_ntf_tx_cmdq_status_u {
+	struct ipa_hwio_def_ipa_ntf_tx_cmdq_status_s	def;
+	u32						value;
+};
+struct ipa_hwio_def_ipa_ntf_tx_cmdq_status_empty_s {
+	u32	cmdq_empty : 31;
+	u32	reserved0 : 1;
+};
+union ipa_hwio_def_ipa_ntf_tx_cmdq_status_empty_u {
+	struct ipa_hwio_def_ipa_ntf_tx_cmdq_status_empty_s	def;
+	u32							value;
+};
+struct ipa_hwio_def_ipa_ntf_tx_cmdq_count_s {
+	u32	fifo_count : 7;
+	u32	reserved0 : 25;
+};
+union ipa_hwio_def_ipa_ntf_tx_cmdq_count_u {
+	struct ipa_hwio_def_ipa_ntf_tx_cmdq_count_s	def;
+	u32						value;
+};
+struct ipa_hwio_def_ipa_prod_ackmngr_cmdq_cmd_s {
+	u32	write_cmd : 1;
+	u32	pop_cmd : 1;
+	u32	cmd_client : 5;
+	u32	rd_req : 1;
+	u32	reserved0 : 24;
+};
+union ipa_hwio_def_ipa_prod_ackmngr_cmdq_cmd_u {
+	struct ipa_hwio_def_ipa_prod_ackmngr_cmdq_cmd_s def;
+	u32						value;
+};
+struct ipa_hwio_def_ipa_prod_ackmngr_cmdq_data_rd_s {
+	u32	cmdq_src_id : 8;
+	u32	cmdq_length : 16;
+	u32	cmdq_origin : 1;
+	u32	cmdq_sent : 1;
+	u32	cmdq_src_id_valid : 1;
+	u32	cmdq_userdata : 5;
+};
+union ipa_hwio_def_ipa_prod_ackmngr_cmdq_data_rd_u {
+	struct ipa_hwio_def_ipa_prod_ackmngr_cmdq_data_rd_s	def;
+	u32							value;
+};
+struct ipa_hwio_def_ipa_prod_ackmngr_cmdq_status_s {
+	u32	status : 1;
+	u32	cmdq_full : 1;
+	u32	cmdq_depth : 7;
+	u32	reserved0 : 23;
+};
+union ipa_hwio_def_ipa_prod_ackmngr_cmdq_status_u {
+	struct ipa_hwio_def_ipa_prod_ackmngr_cmdq_status_s	def;
+	u32							value;
+};
+struct ipa_hwio_def_ipa_prod_ackmngr_cmdq_status_empty_s {
+	u32	cmdq_empty : 31;
+	u32	reserved0 : 1;
+};
+union ipa_hwio_def_ipa_prod_ackmngr_cmdq_status_empty_u {
+	struct ipa_hwio_def_ipa_prod_ackmngr_cmdq_status_empty_s
+		def;
+	u32 value;
+};
+struct ipa_hwio_def_ipa_prod_ackmngr_cmdq_count_s {
+	u32	fifo_count : 7;
+	u32	reserved0 : 25;
+};
+union ipa_hwio_def_ipa_prod_ackmngr_cmdq_count_u {
+	struct ipa_hwio_def_ipa_prod_ackmngr_cmdq_count_s	def;
+	u32							value;
+};
+struct ipa_hwio_def_ipa_spare_reg_1_s {
+	u32	spare_bit0 : 1;
+	u32	spare_bit1 : 1;
+	u32	genqmb_aooowr : 1;
+	u32	spare_bit3 : 1;
+	u32	spare_bit4 : 1;
+	u32	acl_inorder_multi_disable : 1;
+	u32	acl_dispatcher_frag_notif_check_disable : 1;
+	u32	acl_dispatcher_frag_notif_check_each_cmd_disable : 1;
+	u32	spare_bit8 : 1;
+	u32	acl_dispatcher_frag_notif_check_notif_mid_disable : 1;
+	u32	acl_dispatcher_pkt_check_disable : 1;
+	u32	tx_gives_sspnd_ack_on_open_aggr_frame : 1;
+	u32	spare_bit12 : 1;
+	u32	tx_block_aggr_query_on_holb_packet : 1;
+	u32	frag_mngr_fairness_eviction_on_constructing : 1;
+	u32	rx_cmdq_splitter_cmdq_pending_mux_disable : 1;
+	u32	qmb_ram_rd_cache_disable : 1;
+	u32	rx_stall_on_mbim_deaggr_error : 1;
+	u32	rx_stall_on_gen_deaggr_error : 1;
+	u32	spare_bit19 : 1;
+	u32	revert_warb_fix : 1;
+	u32	gsi_if_out_of_buf_stop_reset_mask_enable : 1;
+	u32	bam_idle_in_ipa_misc_cgc_en : 1;
+	u32	spare_bit23 : 1;
+	u32	spare_bit24 : 1;
+	u32	spare_bit25 : 1;
+	u32	ram_slaveway_access_protection_disable : 1;
+	u32	dcph_ram_rd_prefetch_disable : 1;
+	u32	warb_force_arb_round_finish_special_disable : 1;
+	u32	spare_ackinj_pipe8_mask_enable : 1;
+	u32	spare_bit30 : 1;
+	u32	spare_bit31 : 1;
+};
+union ipa_hwio_def_ipa_spare_reg_1_u {
+	struct ipa_hwio_def_ipa_spare_reg_1_s	def;
+	u32					value;
+};
+struct ipa_hwio_def_ipa_spare_reg_2_s {
+	u32	tx_bresp_inj_with_flop : 1;
+	u32	cmdq_split_not_wait_data_desc_prior_hdr_push : 1;
+	u32	spare_bits : 30;
+};
+union ipa_hwio_def_ipa_spare_reg_2_u {
+	struct ipa_hwio_def_ipa_spare_reg_2_s	def;
+	u32					value;
+};
+struct ipa_hwio_def_ipa_endp_gsi_cfg1_n_s {
+	u32	reserved0 : 16;
+	u32	endp_en : 1;
+	u32	reserved1 : 14;
+	u32	init_endp : 1;
+};
+union ipa_hwio_def_ipa_endp_gsi_cfg1_n_u {
+	struct ipa_hwio_def_ipa_endp_gsi_cfg1_n_s	def;
+	u32						value;
+};
+struct ipa_hwio_def_ipa_endp_gsi_cfg_tlv_n_s {
+	u32	fifo_base_addr : 16;
+	u32	fifo_size : 8;
+	u32	reserved0 : 8;
+};
+union ipa_hwio_def_ipa_endp_gsi_cfg_tlv_n_u {
+	struct ipa_hwio_def_ipa_endp_gsi_cfg_tlv_n_s	def;
+	u32						value;
+};
+struct ipa_hwio_def_ipa_endp_gsi_cfg_aos_n_s {
+	u32	fifo_base_addr : 16;
+	u32	fifo_size : 8;
+	u32	reserved0 : 8;
+};
+union ipa_hwio_def_ipa_endp_gsi_cfg_aos_n_u {
+	struct ipa_hwio_def_ipa_endp_gsi_cfg_aos_n_s	def;
+	u32						value;
+};
+struct ipa_hwio_def_ipa_ctxh_ctrl_s {
+	u32	ctxh_lock_id : 4;
+	u32	reserved0 : 27;
+	u32	ctxh_lock : 1;
+};
+union ipa_hwio_def_ipa_ctxh_ctrl_u {
+	struct ipa_hwio_def_ipa_ctxh_ctrl_s	def;
+	u32					value;
+};
+struct ipa_hwio_def_ipa_irq_stts_ee_n_s {
+	u32	bad_snoc_access_irq : 1;
+	u32	reserved0 : 1;
+	u32	uc_irq_0 : 1;
+	u32	uc_irq_1 : 1;
+	u32	uc_irq_2 : 1;
+	u32	uc_irq_3 : 1;
+	u32	uc_in_q_not_empty_irq : 1;
+	u32	uc_rx_cmd_q_not_full_irq : 1;
+	u32	proc_to_uc_ack_q_not_empty_irq : 1;
+	u32	rx_err_irq : 1;
+	u32	deaggr_err_irq : 1;
+	u32	tx_err_irq : 1;
+	u32	step_mode_irq : 1;
+	u32	proc_err_irq : 1;
+	u32	tx_suspend_irq : 1;
+	u32	tx_holb_drop_irq : 1;
+	u32	bam_gsi_idle_irq : 1;
+	u32	pipe_yellow_marker_below_irq : 1;
+	u32	pipe_red_marker_below_irq : 1;
+	u32	pipe_yellow_marker_above_irq : 1;
+	u32	pipe_red_marker_above_irq : 1;
+	u32	ucp_irq : 1;
+	u32	reserved1 : 1;
+	u32	gsi_ee_irq : 1;
+	u32	gsi_ipa_if_tlv_rcvd_irq : 1;
+	u32	gsi_uc_irq : 1;
+	u32	tlv_len_min_dsm_irq : 1;
+	u32	reserved2 : 5;
+};
+union ipa_hwio_def_ipa_irq_stts_ee_n_u {
+	struct ipa_hwio_def_ipa_irq_stts_ee_n_s def;
+	u32					value;
+};
+struct ipa_hwio_def_ipa_irq_en_ee_n_s {
+	u32	bad_snoc_access_irq_en : 1;
+	u32	reserved0 : 1;
+	u32	uc_irq_0_irq_en : 1;
+	u32	uc_irq_1_irq_en : 1;
+	u32	uc_irq_2_irq_en : 1;
+	u32	uc_irq_3_irq_en : 1;
+	u32	uc_in_q_not_empty_irq_en : 1;
+	u32	uc_rx_cmd_q_not_full_irq_en : 1;
+	u32	proc_to_uc_ack_q_not_empty_irq_en : 1;
+	u32	rx_err_irq_en : 1;
+	u32	deaggr_err_irq_en : 1;
+	u32	tx_err_irq_en : 1;
+	u32	step_mode_irq_en : 1;
+	u32	proc_err_irq_en : 1;
+	u32	tx_suspend_irq_en : 1;
+	u32	tx_holb_drop_irq_en : 1;
+	u32	bam_gsi_idle_irq_en : 1;
+	u32	pipe_yellow_marker_below_irq_en : 1;
+	u32	pipe_red_marker_below_irq_en : 1;
+	u32	pipe_yellow_marker_above_irq_en : 1;
+	u32	pipe_red_marker_above_irq_en : 1;
+	u32	ucp_irq_en : 1;
+	u32	reserved1 : 1;
+	u32	gsi_ee_irq_en : 1;
+	u32	gsi_ipa_if_tlv_rcvd_irq_en : 1;
+	u32	gsi_uc_irq_en : 1;
+	u32	tlv_len_min_dsm_irq_en : 1;
+	u32	reserved2 : 5;
+};
+union ipa_hwio_def_ipa_irq_en_ee_n_u {
+	struct ipa_hwio_def_ipa_irq_en_ee_n_s	def;
+	u32					value;
+};
+struct ipa_hwio_def_ipa_snoc_fec_ee_n_s {
+	u32	client : 8;
+	u32	qmb_index : 1;
+	u32	reserved0 : 3;
+	u32	tid : 4;
+	u32	reserved1 : 15;
+	u32	read_not_write : 1;
+};
+union ipa_hwio_def_ipa_snoc_fec_ee_n_u {
+	struct ipa_hwio_def_ipa_snoc_fec_ee_n_s def;
+	u32					value;
+};
+struct ipa_hwio_def_ipa_fec_addr_ee_n_s {
+	u32 addr : 32;
+};
+union ipa_hwio_def_ipa_fec_addr_ee_n_u {
+	struct ipa_hwio_def_ipa_fec_addr_ee_n_s def;
+	u32					value;
+};
+struct ipa_hwio_def_ipa_fec_attr_ee_n_s {
+	u32	opcode : 6;
+	u32	error_info : 26;
+};
+union ipa_hwio_def_ipa_fec_attr_ee_n_u {
+	struct ipa_hwio_def_ipa_fec_attr_ee_n_s def;
+	u32					value;
+};
+struct ipa_hwio_def_ipa_suspend_irq_info_ee_n_s {
+	u32	endpoints : 31;
+	u32	reserved0 : 1;
+};
+union ipa_hwio_def_ipa_suspend_irq_info_ee_n_u {
+	struct ipa_hwio_def_ipa_suspend_irq_info_ee_n_s def;
+	u32						value;
+};
+struct ipa_hwio_def_ipa_suspend_irq_en_ee_n_s {
+	u32	endpoints : 31;
+	u32	reserved0 : 1;
+};
+union ipa_hwio_def_ipa_suspend_irq_en_ee_n_u {
+	struct ipa_hwio_def_ipa_suspend_irq_en_ee_n_s	def;
+	u32						value;
+};
+struct ipa_hwio_def_ipa_holb_drop_irq_info_ee_n_s {
+	u32	reserved0 : 13;
+	u32	endpoints : 18;
+	u32	reserved1 : 1;
+};
+union ipa_hwio_def_ipa_holb_drop_irq_info_ee_n_u {
+	struct ipa_hwio_def_ipa_holb_drop_irq_info_ee_n_s	def;
+	u32							value;
+};
+struct ipa_hwio_def_ipa_log_buf_status_addr_s {
+	u32 start_addr : 32;
+};
+union ipa_hwio_def_ipa_log_buf_status_addr_u {
+	struct ipa_hwio_def_ipa_log_buf_status_addr_s	def;
+	u32						value;
+};
+struct ipa_hwio_def_ipa_log_buf_status_addr_msb_s {
+	u32 start_addr : 32;
+};
+union ipa_hwio_def_ipa_log_buf_status_addr_msb_u {
+	struct ipa_hwio_def_ipa_log_buf_status_addr_msb_s	def;
+	u32							value;
+};
+struct ipa_hwio_def_ipa_log_buf_status_write_ptr_s {
+	u32 write_addr : 32;
+};
+union ipa_hwio_def_ipa_log_buf_status_write_ptr_u {
+	struct ipa_hwio_def_ipa_log_buf_status_write_ptr_s	def;
+	u32							value;
+};
+struct ipa_hwio_def_ipa_log_buf_status_write_ptr_msb_s {
+	u32 write_addr : 32;
+};
+union ipa_hwio_def_ipa_log_buf_status_write_ptr_msb_u {
+	struct ipa_hwio_def_ipa_log_buf_status_write_ptr_msb_s	def;
+	u32							value;
+};
+struct ipa_hwio_def_ipa_log_buf_status_cfg_s {
+	u32	size : 16;
+	u32	enable : 1;
+	u32	reserved0 : 15;
+};
+union ipa_hwio_def_ipa_log_buf_status_cfg_u {
+	struct ipa_hwio_def_ipa_log_buf_status_cfg_s	def;
+	u32						value;
+};
+struct ipa_hwio_def_ipa_log_buf_status_ram_ptr_s {
+	u32	read_ptr : 16;
+	u32	write_ptr : 16;
+};
+union ipa_hwio_def_ipa_log_buf_status_ram_ptr_u {
+	struct ipa_hwio_def_ipa_log_buf_status_ram_ptr_s	def;
+	u32							value;
+};
+struct ipa_hwio_def_ipa_uc_qmb_sys_addr_s {
+	u32 addr : 32;
+};
+union ipa_hwio_def_ipa_uc_qmb_sys_addr_u {
+	struct ipa_hwio_def_ipa_uc_qmb_sys_addr_s	def;
+	u32						value;
+};
+struct ipa_hwio_def_ipa_uc_qmb_sys_addr_msb_s {
+	u32 addr_msb : 32;
+};
+union ipa_hwio_def_ipa_uc_qmb_sys_addr_msb_u {
+	struct ipa_hwio_def_ipa_uc_qmb_sys_addr_msb_s	def;
+	u32						value;
+};
+struct ipa_hwio_def_ipa_uc_qmb_local_addr_s {
+	u32	addr : 18;
+	u32	reserved0 : 14;
+};
+union ipa_hwio_def_ipa_uc_qmb_local_addr_u {
+	struct ipa_hwio_def_ipa_uc_qmb_local_addr_s	def;
+	u32						value;
+};
+struct ipa_hwio_def_ipa_uc_qmb_length_s {
+	u32	length : 7;
+	u32	reserved0 : 25;
+};
+union ipa_hwio_def_ipa_uc_qmb_length_u {
+	struct ipa_hwio_def_ipa_uc_qmb_length_s def;
+	u32					value;
+};
+struct ipa_hwio_def_ipa_uc_qmb_trigger_s {
+	u32	direction : 1;
+	u32	reserved0 : 3;
+	u32	posting : 2;
+	u32	reserved1 : 26;
+};
+union ipa_hwio_def_ipa_uc_qmb_trigger_u {
+	struct ipa_hwio_def_ipa_uc_qmb_trigger_s	def;
+	u32						value;
+};
+struct ipa_hwio_def_ipa_uc_qmb_pending_tid_s {
+	u32	tid : 6;
+	u32	reserved0 : 2;
+	u32	error_bus : 1;
+	u32	reserved1 : 3;
+	u32	error_max_os : 1;
+	u32	reserved2 : 3;
+	u32	error_max_comp : 1;
+	u32	reserved3 : 3;
+	u32	error_security : 1;
+	u32	reserved4 : 11;
+};
+union ipa_hwio_def_ipa_uc_qmb_pending_tid_u {
+	struct ipa_hwio_def_ipa_uc_qmb_pending_tid_s	def;
+	u32						value;
+};
+struct ipa_hwio_def_ipa_uc_qmb_completed_rd_fifo_peek_s {
+	u32	tid : 6;
+	u32	reserved0 : 2;
+	u32	error : 1;
+	u32	reserved1 : 3;
+	u32	valid : 1;
+	u32	reserved2 : 19;
+};
+union ipa_hwio_def_ipa_uc_qmb_completed_rd_fifo_peek_u {
+	struct ipa_hwio_def_ipa_uc_qmb_completed_rd_fifo_peek_s def;
+	u32							value;
+};
+struct ipa_hwio_def_ipa_uc_qmb_completed_wr_fifo_peek_s {
+	u32	tid : 6;
+	u32	reserved0 : 2;
+	u32	error : 1;
+	u32	reserved1 : 3;
+	u32	valid : 1;
+	u32	reserved2 : 19;
+};
+union ipa_hwio_def_ipa_uc_qmb_completed_wr_fifo_peek_u {
+	struct ipa_hwio_def_ipa_uc_qmb_completed_wr_fifo_peek_s def;
+	u32							value;
+};
+struct ipa_hwio_def_ipa_uc_qmb_misc_s {
+	u32	user : 10;
+	u32	reserved0 : 2;
+	u32	rd_priority : 2;
+	u32	reserved1 : 2;
+	u32	wr_priority : 2;
+	u32	reserved2 : 2;
+	u32	ooord : 1;
+	u32	reserved3 : 3;
+	u32	ooowr : 1;
+	u32	reserved4 : 3;
+	u32	swap : 1;
+	u32	irq_coal : 1;
+	u32	posted_stall : 1;
+	u32	qmb_hready_bcr : 1;
+};
+union ipa_hwio_def_ipa_uc_qmb_misc_u {
+	struct ipa_hwio_def_ipa_uc_qmb_misc_s	def;
+	u32					value;
+};
+struct ipa_hwio_def_ipa_uc_qmb_status_s {
+	u32	max_outstanding_rd : 4;
+	u32	outstanding_rd_cnt : 4;
+	u32	completed_rd_cnt : 4;
+	u32	completed_rd_fifo_full : 1;
+	u32	reserved0 : 3;
+	u32	max_outstanding_wr : 4;
+	u32	outstanding_wr_cnt : 4;
+	u32	completed_wr_cnt : 4;
+	u32	completed_wr_fifo_full : 1;
+	u32	reserved1 : 3;
+};
+union ipa_hwio_def_ipa_uc_qmb_status_u {
+	struct ipa_hwio_def_ipa_uc_qmb_status_s def;
+	u32					value;
+};
+struct ipa_hwio_def_ipa_uc_qmb_bus_attrib_s {
+	u32	memtype : 3;
+	u32	reserved0 : 1;
+	u32	noallocate : 1;
+	u32	reserved1 : 3;
+	u32	innershared : 1;
+	u32	reserved2 : 3;
+	u32	shared : 1;
+	u32	reserved3 : 19;
+};
+union ipa_hwio_def_ipa_uc_qmb_bus_attrib_u {
+	struct ipa_hwio_def_ipa_uc_qmb_bus_attrib_s	def;
+	u32						value;
+};
+#endif

+ 183 - 0
ipa/ipa_v3/dump/ipa4.5/ipa_pkt_cntxt.h

@@ -0,0 +1,183 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2019, The Linux Foundation. All rights reserved.
+ */
+#if !defined(_IPA_PKT_CNTXT_H_)
+#define _IPA_PKT_CNTXT_H_
+
+#define IPA_HW_PKT_CTNTX_MAX        0x10
+#define IPA_HW_NUM_SAVE_PKT_CTNTX   0x8
+#define IPA_HW_PKT_CTNTX_START_ADDR 0xE434CA00
+#define IPA_HW_PKT_CTNTX_SIZE       (sizeof(ipa_pkt_ctntx_opcode_state_s) + \
+				     sizeof(ipa_pkt_ctntx_u))
+
+/*
+ * Packet Context States
+ */
+enum ipa_hw_pkt_cntxt_state_e {
+	IPA_HW_PKT_CNTXT_STATE_HFETCHER_INIT = 1,
+	IPA_HW_PKT_CNTXT_STATE_HFETCHER_DMAR,
+	IPA_HW_PKT_CNTXT_STATE_HFETCHER_DMAR_REP,
+	IPA_HW_PKT_CNTXT_STATE_H_DCPH,
+	IPA_HW_PKT_CNTXT_STATE_PKT_PARSER,
+	IPA_HW_PKT_CNTXT_STATE_FILTER_NAT,
+	IPA_HW_PKT_CNTXT_STATE_ROUTER,
+	IPA_HW_PKT_CNTXT_STATE_HDRI,
+	IPA_HW_PKT_CNTXT_STATE_UCP,
+	IPA_HW_PKT_CNTXT_STATE_ENQUEUER,
+	IPA_HW_PKT_CNTXT_STATE_DFETCHER,
+	IPA_HW_PKT_CNTXT_STATE_D_DCPH,
+	IPA_HW_PKT_CNTXT_STATE_DISPATCHER,
+	IPA_HW_PKT_CNTXT_STATE_TX,
+	IPA_HW_PKT_CNTXT_STATE_TX_ZLT,
+	IPA_HW_PKT_CNTXT_STATE_DFETCHER_DMAR,
+	IPA_HW_PKT_CNTXT_STATE_DCMP,
+};
+
+/*
+ * Packet Context fields as received from VI/Design
+ */
+struct ipa_pkt_ctntx_s {
+	u64	opcode                           : 8;
+	u64	state                            : 5;
+	u64	not_used_1                       : 2;
+	u64	tx_pkt_dma_done                  : 1;
+	u64	exc_deagg                        : 1;
+	u64	exc_pkt_version                  : 1;
+	u64	exc_pkt_len                      : 1;
+	u64	exc_threshold                    : 1;
+	u64	exc_sw                           : 1;
+	u64	exc_nat                          : 1;
+	u64	exc_frag_miss                    : 1;
+	u64	filter_bypass                    : 1;
+	u64	router_bypass                    : 1;
+	u64	nat_bypass                       : 1;
+	u64	hdri_bypass                      : 1;
+	u64	dcph_bypass                      : 1;
+	u64	security_credentials_select      : 1;
+	u64	pkt_2nd_pass                     : 1;
+	u64	xlat_bypass                      : 1;
+	u64	dcph_valid                       : 1;
+	u64	ucp_on                           : 1;
+	u64	replication                      : 1;
+	u64	src_status_en                    : 1;
+	u64	dest_status_en                   : 1;
+	u64	frag_status_en                   : 1;
+	u64	eot_dest                         : 1;
+	u64	eot_notif                        : 1;
+	u64	prev_eot_dest                    : 1;
+	u64	src_hdr_len                      : 8;
+	u64	tx_valid_sectors                 : 8;
+	u64	rx_flags                         : 8;
+	u64	rx_packet_length                 : 16;
+	u64	revised_packet_length            : 16;
+	u64	frag_en                          : 1;
+	u64	frag_bypass                      : 1;
+	u64	frag_process                     : 1;
+	u64	notif_pipe                       : 5;
+	u64	src_id                           : 8;
+	u64	tx_pkt_transferred               : 1;
+	u64	src_pipe                         : 5;
+	u64	dest_pipe                        : 5;
+	u64	frag_pipe                        : 5;
+	u64	ihl_offset                       : 8;
+	u64	protocol                         : 8;
+	u64	tos                              : 8;
+	u64	id                               : 16;
+	u64	v6_reserved                      : 4;
+	u64	ff                               : 1;
+	u64	mf                               : 1;
+	u64	pkt_israg                        : 1;
+	u64	tx_holb_timer_overflow           : 1;
+	u64	tx_holb_timer_running            : 1;
+	u64	trnseq_0                         : 3;
+	u64	trnseq_1                         : 3;
+	u64	trnseq_2                         : 3;
+	u64	trnseq_3                         : 3;
+	u64	trnseq_4                         : 3;
+	u64	trnseq_ex_length                 : 8;
+	u64	trnseq_4_length                  : 8;
+	u64	trnseq_4_offset                  : 8;
+	u64	dps_tx_pop_cnt                   : 2;
+	u64	dps_tx_push_cnt                  : 2;
+	u64	vol_ic_dcph_cfg                  : 1;
+	u64	vol_ic_tag_stts                  : 1;
+	u64	vol_ic_pxkt_init_e               : 1;
+	u64	vol_ic_pkt_init                  : 1;
+	u64	tx_holb_counter                  : 32;
+	u64	trnseq_0_length                  : 8;
+	u64	trnseq_0_offset                  : 8;
+	u64	trnseq_1_length                  : 8;
+	u64	trnseq_1_offset                  : 8;
+	u64	trnseq_2_length                  : 8;
+	u64	trnseq_2_offset                  : 8;
+	u64	trnseq_3_length                  : 8;
+	u64	trnseq_3_offset                  : 8;
+	u64	dmar_valid_length                : 16;
+	u64	dcph_valid_length                : 16;
+	u64	frag_hdr_offset                  : 9;
+	u64	ip_payload_offset                : 9;
+	u64	frag_rule                        : 4;
+	u64	frag_table                       : 1;
+	u64	frag_hit                         : 1;
+	u64	data_cmdq_ptr                    : 8;
+	u64	filter_result                    : 6;
+	u64	router_result                    : 6;
+	u64	nat_result                       : 6;
+	u64	hdri_result                      : 6;
+	u64	dcph_result                      : 6;
+	u64	dcph_result_valid                : 1;
+	u32	not_used_2                       : 4;
+	u64	tx_pkt_suspended                 : 1;
+	u64	tx_pkt_dropped                   : 1;
+	u32	not_used_3                       : 3;
+	u64	metadata_valid                   : 1;
+	u64	metadata_type                    : 4;
+	u64	ul_cs_start_diff                 : 9;
+	u64	cs_disable_trlr_vld_bit          : 1;
+	u64	cs_required                      : 1;
+	u64	dest_hdr_len                     : 8;
+	u64	fr_l                             : 1;
+	u64	fl_h                             : 1;
+	u64	fr_g                             : 1;
+	u64	fr_ret                           : 1;
+	u64	fr_rule_id                       : 10;
+	u64	rt_l                             : 1;
+	u64	rt_h                             : 1;
+	u64	rtng_tbl_index                   : 5;
+	u64	rt_match                         : 1;
+	u64	rt_rule_id                       : 10;
+	u64	nat_tbl_index                    : 13;
+	u64	nat_type                         : 2;
+	u64	hdr_l                            : 1;
+	u64	header_offset                    : 10;
+	u64	not_used_4                       : 1;
+	u64	filter_result_valid              : 1;
+	u64	router_result_valid              : 1;
+	u64	nat_result_valid                 : 1;
+	u64	hdri_result_valid                : 1;
+	u64	not_used_5                       : 1;
+	u64	stream_id                        : 8;
+	u64	not_used_6                       : 6;
+	u64	dcph_context_index               : 2;
+	u64	dcph_cfg_size                    : 16;
+	u64	dcph_cfg_count                   : 32;
+	u64	tag_info                         : 48;
+	u64	ucp_cmd_id                       : 16;
+	u64	metadata                         : 32;
+	u64	ucp_cmd_params                   : 32;
+	u64	nat_ip_address                   : 32;
+	u64	nat_ip_cs_diff                   : 16;
+	u64	frag_dest_pipe                   : 5;
+	u64	frag_nat_type                    : 2;
+	u64	fragr_ret                        : 1;
+	u64	frag_protocol                    : 8;
+	u64	src_ip_address                   : 32;
+	u64	dest_ip_address                  : 32;
+	u64	not_used_7                       : 37;
+	u64	frag_hdr_l                       : 1;
+	u64	frag_header_offset               : 10;
+	u64	frag_id                          : 16;
+} __packed;
+
+#endif /* #if !defined(_IPA_PKT_CNTXT_H_) */

+ 1632 - 0
ipa/ipa_v3/dump/ipa_reg_dump.c

@@ -0,0 +1,1632 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2019, The Linux Foundation. All rights reserved.
+ */
+#include "ipa_reg_dump.h"
+#include "ipa_access_control.h"
+
+/* Total size required for test bus */
+#define IPA_MEM_OVERLAY_SIZE     0x66000
+
+/*
+ * The following structure contains a hierarchy of structures that
+ * ultimately leads to a series of leafs. The leafs are structures
+ * containing detailed, bit level, register definitions.
+ */
+static struct regs_save_hierarchy_s ipa_reg_save;
+
+static unsigned int ipa_testbus_mem[IPA_MEM_OVERLAY_SIZE];
+
+/*
+ * The following data structure contains a list of the registers
+ * (whose data are to be copied) and the locations (within
+ * ipa_reg_save above) into which the registers' values need to be
+ * copied.
+ */
+static struct map_src_dst_addr_s ipa_regs_to_save_array[] = {
+	/*
+	 * =====================================================================
+	 * IPA register definitions begin here...
+	 * =====================================================================
+	 */
+
+	/* IPA General Registers */
+	GEN_SRC_DST_ADDR_MAP(IPA_STATE,
+			     ipa.gen,
+			     ipa_state),
+	GEN_SRC_DST_ADDR_MAP(IPA_STATE_RX_ACTIVE,
+			     ipa.gen,
+			     ipa_state_rx_active),
+	GEN_SRC_DST_ADDR_MAP(IPA_STATE_TX_WRAPPER,
+			     ipa.gen,
+			     ipa_state_tx_wrapper),
+	GEN_SRC_DST_ADDR_MAP(IPA_STATE_TX0,
+			     ipa.gen,
+			     ipa_state_tx0),
+	GEN_SRC_DST_ADDR_MAP(IPA_STATE_TX1,
+			     ipa.gen,
+			     ipa_state_tx1),
+	GEN_SRC_DST_ADDR_MAP(IPA_STATE_AGGR_ACTIVE,
+			     ipa.gen,
+			     ipa_state_aggr_active),
+	GEN_SRC_DST_ADDR_MAP(IPA_STATE_DFETCHER,
+			     ipa.gen,
+			     ipa_state_dfetcher),
+	GEN_SRC_DST_ADDR_MAP(IPA_STATE_FETCHER_MASK_0,
+			     ipa.gen,
+			     ipa_state_fetcher_mask_0),
+	GEN_SRC_DST_ADDR_MAP(IPA_STATE_FETCHER_MASK_1,
+			     ipa.gen,
+			     ipa_state_fetcher_mask_1),
+	GEN_SRC_DST_ADDR_MAP(IPA_STATE_GSI_AOS,
+			     ipa.gen,
+			     ipa_state_gsi_aos),
+	GEN_SRC_DST_ADDR_MAP(IPA_STATE_GSI_IF,
+			     ipa.gen,
+			     ipa_state_gsi_if),
+	GEN_SRC_DST_ADDR_MAP(IPA_STATE_GSI_SKIP,
+			     ipa.gen,
+			     ipa_state_gsi_skip),
+	GEN_SRC_DST_ADDR_MAP(IPA_STATE_GSI_TLV,
+			     ipa.gen,
+			     ipa_state_gsi_tlv),
+	GEN_SRC_DST_ADDR_MAP(IPA_DPL_TIMER_LSB,
+			     ipa.gen,
+			     ipa_dpl_timer_lsb),
+	GEN_SRC_DST_ADDR_MAP(IPA_DPL_TIMER_MSB,
+			     ipa.gen,
+			     ipa_dpl_timer_msb),
+	GEN_SRC_DST_ADDR_MAP(IPA_PROC_IPH_CFG,
+			     ipa.gen,
+			     ipa_proc_iph_cfg),
+	GEN_SRC_DST_ADDR_MAP(IPA_ROUTE,
+			     ipa.gen,
+			     ipa_route),
+	GEN_SRC_DST_ADDR_MAP(IPA_SPARE_REG_1,
+			     ipa.gen,
+			     ipa_spare_reg_1),
+	GEN_SRC_DST_ADDR_MAP(IPA_SPARE_REG_2,
+			     ipa.gen,
+			     ipa_spare_reg_2),
+	GEN_SRC_DST_ADDR_MAP(IPA_LOG,
+			     ipa.gen,
+			     ipa_log),
+	GEN_SRC_DST_ADDR_MAP(IPA_LOG_BUF_STATUS_CFG,
+			     ipa.gen,
+			     ipa_log_buf_status_cfg),
+	GEN_SRC_DST_ADDR_MAP(IPA_LOG_BUF_STATUS_ADDR,
+			     ipa.gen,
+			     ipa_log_buf_status_addr),
+	GEN_SRC_DST_ADDR_MAP(IPA_LOG_BUF_STATUS_WRITE_PTR,
+			     ipa.gen,
+			     ipa_log_buf_status_write_ptr),
+	GEN_SRC_DST_ADDR_MAP(IPA_LOG_BUF_STATUS_RAM_PTR,
+			     ipa.gen,
+			     ipa_log_buf_status_ram_ptr),
+	GEN_SRC_DST_ADDR_MAP(IPA_LOG_BUF_HW_CMD_CFG,
+			     ipa.gen,
+			     ipa_log_buf_hw_cmd_cfg),
+	GEN_SRC_DST_ADDR_MAP(IPA_LOG_BUF_HW_CMD_ADDR,
+			     ipa.gen,
+			     ipa_log_buf_hw_cmd_addr),
+	GEN_SRC_DST_ADDR_MAP(IPA_LOG_BUF_HW_CMD_WRITE_PTR,
+			     ipa.gen,
+			     ipa_log_buf_hw_cmd_write_ptr),
+	GEN_SRC_DST_ADDR_MAP(IPA_LOG_BUF_HW_CMD_RAM_PTR,
+			     ipa.gen,
+			     ipa_log_buf_hw_cmd_ram_ptr),
+	GEN_SRC_DST_ADDR_MAP(IPA_STATE_DPL_FIFO,
+			     ipa.gen,
+			     ipa_state_dpl_fifo),
+	GEN_SRC_DST_ADDR_MAP(IPA_COMP_HW_VERSION,
+			     ipa.gen,
+			     ipa_comp_hw_version),
+	GEN_SRC_DST_ADDR_MAP(IPA_FILT_ROUT_HASH_EN,
+			     ipa.gen,
+			     ipa_filt_rout_hash_en),
+	GEN_SRC_DST_ADDR_MAP(IPA_FILT_ROUT_HASH_FLUSH,
+			     ipa.gen,
+			     ipa_filt_rout_hash_flush),
+	GEN_SRC_DST_ADDR_MAP(IPA_STATE_FETCHER,
+			     ipa.gen,
+			     ipa_state_fetcher),
+	GEN_SRC_DST_ADDR_MAP(IPA_IPV4_FILTER_INIT_VALUES,
+			     ipa.gen,
+			     ipa_ipv4_filter_init_values),
+	GEN_SRC_DST_ADDR_MAP(IPA_IPV6_FILTER_INIT_VALUES,
+			     ipa.gen,
+			     ipa_ipv6_filter_init_values),
+	GEN_SRC_DST_ADDR_MAP(IPA_IPV4_ROUTE_INIT_VALUES,
+			     ipa.gen,
+			     ipa_ipv4_route_init_values),
+	GEN_SRC_DST_ADDR_MAP(IPA_IPV6_ROUTE_INIT_VALUES,
+			     ipa.gen,
+			     ipa_ipv6_route_init_values),
+	GEN_SRC_DST_ADDR_MAP(IPA_BAM_ACTIVATED_PORTS,
+			     ipa.gen,
+			     ipa_bam_activated_ports),
+	GEN_SRC_DST_ADDR_MAP(IPA_TX_COMMANDER_CMDQ_STATUS,
+			     ipa.gen,
+			     ipa_tx_commander_cmdq_status),
+	GEN_SRC_DST_ADDR_MAP(IPA_LOG_BUF_HW_SNIF_EL_EN,
+			     ipa.gen,
+			     ipa_log_buf_hw_snif_el_en),
+	GEN_SRC_DST_ADDR_MAP(IPA_LOG_BUF_HW_SNIF_EL_WR_N_RD_SEL,
+			     ipa.gen,
+			     ipa_log_buf_hw_snif_el_wr_n_rd_sel),
+	GEN_SRC_DST_ADDR_MAP(IPA_LOG_BUF_HW_SNIF_EL_CLI_MUX,
+			     ipa.gen,
+			     ipa_log_buf_hw_snif_el_cli_mux),
+	GEN_SRC_DST_ADDR_MAP(IPA_STATE_ACL,
+			     ipa.gen,
+			     ipa_state_acl),
+	GEN_SRC_DST_ADDR_MAP(IPA_SYS_PKT_PROC_CNTXT_BASE,
+			     ipa.gen,
+			     ipa_sys_pkt_proc_cntxt_base),
+	GEN_SRC_DST_ADDR_MAP(IPA_SYS_PKT_PROC_CNTXT_BASE_MSB,
+			     ipa.gen,
+			     ipa_sys_pkt_proc_cntxt_base_msb),
+	GEN_SRC_DST_ADDR_MAP(IPA_LOCAL_PKT_PROC_CNTXT_BASE,
+			     ipa.gen,
+			     ipa_local_pkt_proc_cntxt_base),
+	GEN_SRC_DST_ADDR_MAP(IPA_RSRC_GRP_CFG,
+			     ipa.gen,
+			     ipa_rsrc_grp_cfg),
+	GEN_SRC_DST_ADDR_MAP(IPA_PIPELINE_DISABLE,
+			     ipa.gen,
+			     ipa_pipeline_disable),
+	GEN_SRC_DST_ADDR_MAP(IPA_COMP_CFG,
+			     ipa.gen,
+			     ipa_comp_cfg),
+	GEN_SRC_DST_ADDR_MAP(IPA_STATE_NLO_AGGR,
+			     ipa.gen,
+			     ipa_state_nlo_aggr),
+	GEN_SRC_DST_ADDR_MAP(IPA_NLO_PP_CFG1,
+			     ipa.gen,
+			     ipa_nlo_pp_cfg1),
+	GEN_SRC_DST_ADDR_MAP(IPA_NLO_PP_CFG2,
+			     ipa.gen,
+			     ipa_nlo_pp_cfg2),
+	GEN_SRC_DST_ADDR_MAP(IPA_NLO_PP_ACK_LIMIT_CFG,
+			     ipa.gen,
+			     ipa_nlo_pp_ack_limit_cfg),
+	GEN_SRC_DST_ADDR_MAP(IPA_NLO_PP_DATA_LIMIT_CFG,
+			     ipa.gen,
+			     ipa_nlo_pp_data_limit_cfg),
+	GEN_SRC_DST_ADDR_MAP(IPA_NLO_MIN_DSM_CFG,
+			     ipa.gen,
+			     ipa_nlo_min_dsm_cfg),
+	GEN_SRC_DST_ADDR_MAP(IPA_NLO_VP_FLUSH_REQ,
+			     ipa.gen,
+			     ipa_nlo_vp_flush_req),
+	GEN_SRC_DST_ADDR_MAP(IPA_NLO_VP_FLUSH_COOKIE,
+			     ipa.gen,
+			     ipa_nlo_vp_flush_cookie),
+	GEN_SRC_DST_ADDR_MAP(IPA_NLO_VP_FLUSH_ACK,
+			     ipa.gen,
+			     ipa_nlo_vp_flush_ack),
+	GEN_SRC_DST_ADDR_MAP(IPA_NLO_VP_DSM_OPEN,
+			     ipa.gen,
+			     ipa_nlo_vp_dsm_open),
+	GEN_SRC_DST_ADDR_MAP(IPA_NLO_VP_QBAP_OPEN,
+			     ipa.gen,
+			     ipa_nlo_vp_qbap_open),
+
+	/* Debug Registers */
+	GEN_SRC_DST_ADDR_MAP(IPA_DEBUG_DATA,
+			     ipa.dbg,
+			     ipa_debug_data),
+	GEN_SRC_DST_ADDR_MAP(IPA_STEP_MODE_BREAKPOINTS,
+			     ipa.dbg,
+			     ipa_step_mode_breakpoints),
+	GEN_SRC_DST_ADDR_MAP(IPA_STEP_MODE_STATUS,
+			     ipa.dbg,
+			     ipa_step_mode_status),
+
+	IPA_REG_SAVE_RX_SPLT_CMDQ(
+		IPA_RX_SPLT_CMDQ_CMD_n, ipa_rx_splt_cmdq_cmd_n),
+	IPA_REG_SAVE_RX_SPLT_CMDQ(
+		IPA_RX_SPLT_CMDQ_CFG_n, ipa_rx_splt_cmdq_cfg_n),
+	IPA_REG_SAVE_RX_SPLT_CMDQ(
+		IPA_RX_SPLT_CMDQ_DATA_WR_0_n, ipa_rx_splt_cmdq_data_wr_0_n),
+	IPA_REG_SAVE_RX_SPLT_CMDQ(
+		IPA_RX_SPLT_CMDQ_DATA_WR_1_n, ipa_rx_splt_cmdq_data_wr_1_n),
+	IPA_REG_SAVE_RX_SPLT_CMDQ(
+		IPA_RX_SPLT_CMDQ_DATA_WR_2_n, ipa_rx_splt_cmdq_data_wr_2_n),
+	IPA_REG_SAVE_RX_SPLT_CMDQ(
+		IPA_RX_SPLT_CMDQ_DATA_WR_3_n, ipa_rx_splt_cmdq_data_wr_3_n),
+	IPA_REG_SAVE_RX_SPLT_CMDQ(
+		IPA_RX_SPLT_CMDQ_DATA_RD_0_n, ipa_rx_splt_cmdq_data_rd_0_n),
+	IPA_REG_SAVE_RX_SPLT_CMDQ(
+		IPA_RX_SPLT_CMDQ_DATA_RD_1_n, ipa_rx_splt_cmdq_data_rd_1_n),
+	IPA_REG_SAVE_RX_SPLT_CMDQ(
+		IPA_RX_SPLT_CMDQ_DATA_RD_2_n, ipa_rx_splt_cmdq_data_rd_2_n),
+	IPA_REG_SAVE_RX_SPLT_CMDQ(
+		IPA_RX_SPLT_CMDQ_DATA_RD_3_n, ipa_rx_splt_cmdq_data_rd_3_n),
+	IPA_REG_SAVE_RX_SPLT_CMDQ(
+		IPA_RX_SPLT_CMDQ_STATUS_n, ipa_rx_splt_cmdq_status_n),
+
+	GEN_SRC_DST_ADDR_MAP(IPA_RX_HPS_CMDQ_CFG_WR,
+				  ipa.dbg,
+				  ipa_rx_hps_cmdq_cfg_wr),
+	GEN_SRC_DST_ADDR_MAP(IPA_RX_HPS_CMDQ_CFG_RD,
+				  ipa.dbg,
+				  ipa_rx_hps_cmdq_cfg_rd),
+	GEN_SRC_DST_ADDR_MAP(IPA_RX_HPS_CMDQ_CMD,
+			     ipa.dbg,
+			     ipa_rx_hps_cmdq_cmd),
+	GEN_SRC_DST_ADDR_MAP(IPA_RX_HPS_CMDQ_STATUS_EMPTY,
+			     ipa.dbg,
+			     ipa_rx_hps_cmdq_status_empty),
+	GEN_SRC_DST_ADDR_MAP(IPA_RX_HPS_CLIENTS_MIN_DEPTH_0,
+			     ipa.dbg,
+			     ipa_rx_hps_clients_min_depth_0),
+	GEN_SRC_DST_ADDR_MAP(IPA_RX_HPS_CLIENTS_MAX_DEPTH_0,
+			     ipa.dbg,
+			     ipa_rx_hps_clients_max_depth_0),
+	GEN_SRC_DST_ADDR_MAP(IPA_HPS_DPS_CMDQ_CMD,
+			     ipa.dbg,
+			     ipa_hps_dps_cmdq_cmd),
+	GEN_SRC_DST_ADDR_MAP(IPA_HPS_DPS_CMDQ_STATUS_EMPTY,
+			     ipa.dbg,
+			     ipa_hps_dps_cmdq_status_empty),
+	GEN_SRC_DST_ADDR_MAP(IPA_DPS_TX_CMDQ_CMD,
+			     ipa.dbg,
+			     ipa_dps_tx_cmdq_cmd),
+	GEN_SRC_DST_ADDR_MAP(IPA_DPS_TX_CMDQ_STATUS_EMPTY,
+			     ipa.dbg,
+			     ipa_dps_tx_cmdq_status_empty),
+	GEN_SRC_DST_ADDR_MAP(IPA_ACKMNGR_CMDQ_CMD,
+			     ipa.dbg,
+			     ipa_ackmngr_cmdq_cmd),
+	GEN_SRC_DST_ADDR_MAP(IPA_ACKMNGR_CMDQ_STATUS_EMPTY,
+			     ipa.dbg,
+			     ipa_ackmngr_cmdq_status_empty),
+
+	/*
+	 * NOTE: That GEN_SRC_DST_ADDR_MAP() not used below.  This is
+	 *       because the following registers are not scaler, rather
+	 *       they are register arrays...
+	 */
+	IPA_REG_SAVE_CFG_ENTRY_GEN_EE(IPA_IRQ_STTS_EE_n,
+				      ipa_irq_stts_ee_n),
+	IPA_REG_SAVE_CFG_ENTRY_GEN_EE(IPA_IRQ_EN_EE_n,
+				      ipa_irq_en_ee_n),
+	IPA_REG_SAVE_CFG_ENTRY_GEN_EE(IPA_FEC_ADDR_EE_n,
+				      ipa_fec_addr_ee_n),
+	IPA_REG_SAVE_CFG_ENTRY_GEN_EE(IPA_FEC_ATTR_EE_n,
+				      ipa_fec_attr_ee_n),
+	IPA_REG_SAVE_CFG_ENTRY_GEN_EE(IPA_SNOC_FEC_EE_n,
+				      ipa_snoc_fec_ee_n),
+	IPA_REG_SAVE_CFG_ENTRY_GEN_EE(IPA_HOLB_DROP_IRQ_INFO_EE_n,
+				      ipa_holb_drop_irq_info_ee_n),
+	IPA_REG_SAVE_CFG_ENTRY_GEN_EE(IPA_SUSPEND_IRQ_INFO_EE_n,
+				      ipa_suspend_irq_info_ee_n),
+	IPA_REG_SAVE_CFG_ENTRY_GEN_EE(IPA_SUSPEND_IRQ_EN_EE_n,
+				      ipa_suspend_irq_en_ee_n),
+
+	/* Pipe Endp Registers */
+	IPA_REG_SAVE_CFG_ENTRY_PIPE_ENDP(IPA_ENDP_INIT_CTRL_n,
+					 ipa_endp_init_ctrl_n),
+	IPA_REG_SAVE_CFG_ENTRY_PIPE_ENDP(IPA_ENDP_INIT_CTRL_SCND_n,
+					 ipa_endp_init_ctrl_scnd_n),
+	IPA_REG_SAVE_CFG_ENTRY_PIPE_ENDP(IPA_ENDP_INIT_CFG_n,
+					 ipa_endp_init_cfg_n),
+	IPA_REG_SAVE_CFG_ENTRY_PIPE_ENDP(IPA_ENDP_INIT_NAT_n,
+					 ipa_endp_init_nat_n),
+	IPA_REG_SAVE_CFG_ENTRY_PIPE_ENDP(IPA_ENDP_INIT_HDR_n,
+					 ipa_endp_init_hdr_n),
+	IPA_REG_SAVE_CFG_ENTRY_PIPE_ENDP(IPA_ENDP_INIT_HDR_EXT_n,
+					 ipa_endp_init_hdr_ext_n),
+	IPA_REG_SAVE_CFG_ENTRY_PIPE_ENDP(IPA_ENDP_INIT_HDR_METADATA_MASK_n,
+					 ipa_endp_init_hdr_metadata_mask_n),
+	IPA_REG_SAVE_CFG_ENTRY_PIPE_ENDP(IPA_ENDP_INIT_HDR_METADATA_n,
+					 ipa_endp_init_hdr_metadata_n),
+	IPA_REG_SAVE_CFG_ENTRY_PIPE_ENDP(IPA_ENDP_INIT_MODE_n,
+					 ipa_endp_init_mode_n),
+	IPA_REG_SAVE_CFG_ENTRY_PIPE_ENDP(IPA_ENDP_INIT_AGGR_n,
+					 ipa_endp_init_aggr_n),
+	IPA_REG_SAVE_CFG_ENTRY_PIPE_ENDP(IPA_ENDP_INIT_HOL_BLOCK_EN_n,
+					 ipa_endp_init_hol_block_en_n),
+	IPA_REG_SAVE_CFG_ENTRY_PIPE_ENDP(IPA_ENDP_INIT_HOL_BLOCK_TIMER_n,
+					 ipa_endp_init_hol_block_timer_n),
+	IPA_REG_SAVE_CFG_ENTRY_PIPE_ENDP(IPA_ENDP_INIT_DEAGGR_n,
+					 ipa_endp_init_deaggr_n),
+	IPA_REG_SAVE_CFG_ENTRY_PIPE_ENDP(IPA_ENDP_STATUS_n,
+					 ipa_endp_status_n),
+	IPA_REG_SAVE_CFG_ENTRY_PIPE_ENDP(IPA_ENDP_INIT_RSRC_GRP_n,
+					 ipa_endp_init_rsrc_grp_n),
+	IPA_REG_SAVE_CFG_ENTRY_PIPE_ENDP(IPA_ENDP_INIT_SEQ_n,
+					 ipa_endp_init_seq_n),
+	IPA_REG_SAVE_CFG_ENTRY_PIPE_ENDP(IPA_ENDP_GSI_CFG_TLV_n,
+					 ipa_endp_gsi_cfg_tlv_n),
+	IPA_REG_SAVE_CFG_ENTRY_PIPE_ENDP(IPA_ENDP_GSI_CFG_AOS_n,
+					 ipa_endp_gsi_cfg_aos_n),
+	IPA_REG_SAVE_CFG_ENTRY_PIPE_ENDP(IPA_ENDP_GSI_CFG1_n,
+					 ipa_endp_gsi_cfg1_n),
+	IPA_REG_SAVE_CFG_ENTRY_PIPE_ENDP(IPA_ENDP_FILTER_ROUTER_HSH_CFG_n,
+					 ipa_endp_filter_router_hsh_cfg_n),
+
+	/* Source Resource Group Config Registers */
+	IPA_REG_SAVE_CFG_ENTRY_SRC_RSRC_GRP(IPA_SRC_RSRC_GRP_01_RSRC_TYPE_n,
+					    ipa_src_rsrc_grp_01_rsrc_type_n),
+	IPA_REG_SAVE_CFG_ENTRY_SRC_RSRC_GRP(IPA_SRC_RSRC_GRP_23_RSRC_TYPE_n,
+					    ipa_src_rsrc_grp_23_rsrc_type_n),
+	IPA_REG_SAVE_CFG_ENTRY_SRC_RSRC_GRP(IPA_SRC_RSRC_GRP_45_RSRC_TYPE_n,
+					    ipa_src_rsrc_grp_45_rsrc_type_n),
+
+	/* Destination Resource Group Config Registers */
+	IPA_REG_SAVE_CFG_ENTRY_DST_RSRC_GRP(IPA_DST_RSRC_GRP_01_RSRC_TYPE_n,
+					    ipa_dst_rsrc_grp_01_rsrc_type_n),
+	IPA_REG_SAVE_CFG_ENTRY_DST_RSRC_GRP(IPA_DST_RSRC_GRP_23_RSRC_TYPE_n,
+					    ipa_dst_rsrc_grp_23_rsrc_type_n),
+	IPA_REG_SAVE_CFG_ENTRY_DST_RSRC_GRP(IPA_DST_RSRC_GRP_45_RSRC_TYPE_n,
+					    ipa_dst_rsrc_grp_45_rsrc_type_n),
+
+	/* Source Resource Group Count Registers */
+	IPA_REG_SAVE_CFG_ENTRY_SRC_RSRC_CNT_GRP(
+		IPA_SRC_RSRC_GRP_0123_RSRC_TYPE_CNT_n,
+		ipa_src_rsrc_grp_0123_rsrc_type_cnt_n),
+	IPA_REG_SAVE_CFG_ENTRY_SRC_RSRC_CNT_GRP(
+		IPA_SRC_RSRC_GRP_4567_RSRC_TYPE_CNT_n,
+		ipa_src_rsrc_grp_4567_rsrc_type_cnt_n),
+
+	/* Destination Resource Group Count Registers */
+	IPA_REG_SAVE_CFG_ENTRY_DST_RSRC_CNT_GRP(
+		IPA_DST_RSRC_GRP_0123_RSRC_TYPE_CNT_n,
+		ipa_dst_rsrc_grp_0123_rsrc_type_cnt_n),
+	IPA_REG_SAVE_CFG_ENTRY_DST_RSRC_CNT_GRP(
+		IPA_DST_RSRC_GRP_4567_RSRC_TYPE_CNT_n,
+		ipa_dst_rsrc_grp_4567_rsrc_type_cnt_n),
+
+	/*
+	 * =====================================================================
+	 * GSI register definitions begin here...
+	 * =====================================================================
+	 */
+
+	/* GSI General Registers */
+	GEN_SRC_DST_ADDR_MAP(GSI_CFG,
+			     gsi.gen,
+			     gsi_cfg),
+	GEN_SRC_DST_ADDR_MAP(GSI_REE_CFG,
+			     gsi.gen,
+			     gsi_ree_cfg),
+	IPA_REG_SAVE_GSI_VER(
+			     IPA_GSI_TOP_GSI_INST_RAM_n,
+			     ipa_gsi_top_gsi_inst_ram_n),
+
+	/* GSI Debug Registers */
+	GEN_SRC_DST_ADDR_MAP(IPA_GSI_TOP_GSI_DEBUG_BUSY_REG,
+			     gsi.debug,
+			     ipa_gsi_top_gsi_debug_busy_reg),
+	GEN_SRC_DST_ADDR_MAP(IPA_GSI_TOP_GSI_DEBUG_EVENT_PENDING,
+			     gsi.debug,
+			     ipa_gsi_top_gsi_debug_event_pending),
+	GEN_SRC_DST_ADDR_MAP(IPA_GSI_TOP_GSI_DEBUG_TIMER_PENDING,
+			     gsi.debug,
+			     ipa_gsi_top_gsi_debug_timer_pending),
+	GEN_SRC_DST_ADDR_MAP(IPA_GSI_TOP_GSI_DEBUG_RD_WR_PENDING,
+			     gsi.debug,
+			     ipa_gsi_top_gsi_debug_rd_wr_pending),
+	GEN_SRC_DST_ADDR_MAP(IPA_GSI_TOP_GSI_DEBUG_PC_FROM_SW,
+			     gsi.debug,
+			     ipa_gsi_top_gsi_debug_pc_from_sw),
+	GEN_SRC_DST_ADDR_MAP(IPA_GSI_TOP_GSI_DEBUG_SW_STALL,
+			     gsi.debug,
+			     ipa_gsi_top_gsi_debug_sw_stall),
+	GEN_SRC_DST_ADDR_MAP(IPA_GSI_TOP_GSI_DEBUG_PC_FOR_DEBUG,
+			     gsi.debug,
+			     ipa_gsi_top_gsi_debug_pc_for_debug),
+	GEN_SRC_DST_ADDR_MAP(IPA_GSI_TOP_GSI_DEBUG_QSB_LOG_ERR_TRNS_ID,
+			     gsi.debug,
+			     ipa_gsi_top_gsi_debug_qsb_log_err_trns_id),
+
+	IPA_REG_SAVE_CFG_ENTRY_GSI_QSB_DEBUG(
+		GSI_DEBUG_QSB_LOG_LAST_MISC_IDn, qsb_log_last_misc),
+
+	/* GSI IRAM pointers Registers */
+	GEN_SRC_DST_ADDR_MAP(IPA_GSI_TOP_GSI_IRAM_PTR_CH_CMD,
+			     gsi.debug.gsi_iram_ptrs,
+			     ipa_gsi_top_gsi_iram_ptr_ch_cmd),
+	GEN_SRC_DST_ADDR_MAP(IPA_GSI_TOP_GSI_IRAM_PTR_EE_GENERIC_CMD,
+			     gsi.debug.gsi_iram_ptrs,
+			     ipa_gsi_top_gsi_iram_ptr_ee_generic_cmd),
+	GEN_SRC_DST_ADDR_MAP(IPA_GSI_TOP_GSI_IRAM_PTR_CH_DB,
+			     gsi.debug.gsi_iram_ptrs,
+			     ipa_gsi_top_gsi_iram_ptr_ch_db),
+	GEN_SRC_DST_ADDR_MAP(IPA_GSI_TOP_GSI_IRAM_PTR_EV_DB,
+			     gsi.debug.gsi_iram_ptrs,
+			     ipa_gsi_top_gsi_iram_ptr_ev_db),
+	GEN_SRC_DST_ADDR_MAP(IPA_GSI_TOP_GSI_IRAM_PTR_NEW_RE,
+			     gsi.debug.gsi_iram_ptrs,
+			     ipa_gsi_top_gsi_iram_ptr_new_re),
+	GEN_SRC_DST_ADDR_MAP(IPA_GSI_TOP_GSI_IRAM_PTR_CH_DIS_COMP,
+			     gsi.debug.gsi_iram_ptrs,
+			     ipa_gsi_top_gsi_iram_ptr_ch_dis_comp),
+	GEN_SRC_DST_ADDR_MAP(IPA_GSI_TOP_GSI_IRAM_PTR_CH_EMPTY,
+			     gsi.debug.gsi_iram_ptrs,
+			     ipa_gsi_top_gsi_iram_ptr_ch_empty),
+	GEN_SRC_DST_ADDR_MAP(IPA_GSI_TOP_GSI_IRAM_PTR_EVENT_GEN_COMP,
+			     gsi.debug.gsi_iram_ptrs,
+			     ipa_gsi_top_gsi_iram_ptr_event_gen_comp),
+	GEN_SRC_DST_ADDR_MAP(IPA_GSI_TOP_GSI_IRAM_PTR_TIMER_EXPIRED,
+			     gsi.debug.gsi_iram_ptrs,
+			     ipa_gsi_top_gsi_iram_ptr_timer_expired),
+	GEN_SRC_DST_ADDR_MAP(IPA_GSI_TOP_GSI_IRAM_PTR_WRITE_ENG_COMP,
+			     gsi.debug.gsi_iram_ptrs,
+			     ipa_gsi_top_gsi_iram_ptr_write_eng_comp),
+	GEN_SRC_DST_ADDR_MAP(IPA_GSI_TOP_GSI_IRAM_PTR_READ_ENG_COMP,
+			     gsi.debug.gsi_iram_ptrs,
+			     ipa_gsi_top_gsi_iram_ptr_read_eng_comp),
+	GEN_SRC_DST_ADDR_MAP(IPA_GSI_TOP_GSI_IRAM_PTR_UC_GP_INT,
+			     gsi.debug.gsi_iram_ptrs,
+			     ipa_gsi_top_gsi_iram_ptr_uc_gp_int),
+	GEN_SRC_DST_ADDR_MAP(IPA_GSI_TOP_GSI_IRAM_PTR_INT_MOD_STOPPED,
+			     gsi.debug.gsi_iram_ptrs,
+			     ipa_gsi_top_gsi_iram_ptr_int_mod_stopped),
+
+	/* GSI SHRAM pointers Registers */
+	GEN_SRC_DST_ADDR_MAP(IPA_GSI_TOP_GSI_SHRAM_PTR_CH_CNTXT_BASE_ADDR,
+			     gsi.debug.gsi_shram_ptrs,
+			     ipa_gsi_top_gsi_shram_ptr_ch_cntxt_base_addr),
+	GEN_SRC_DST_ADDR_MAP(IPA_GSI_TOP_GSI_SHRAM_PTR_EV_CNTXT_BASE_ADDR,
+			     gsi.debug.gsi_shram_ptrs,
+			     ipa_gsi_top_gsi_shram_ptr_ev_cntxt_base_addr),
+	GEN_SRC_DST_ADDR_MAP(IPA_GSI_TOP_GSI_SHRAM_PTR_RE_STORAGE_BASE_ADDR,
+			     gsi.debug.gsi_shram_ptrs,
+			     ipa_gsi_top_gsi_shram_ptr_re_storage_base_addr),
+	GEN_SRC_DST_ADDR_MAP(IPA_GSI_TOP_GSI_SHRAM_PTR_RE_ESC_BUF_BASE_ADDR,
+			     gsi.debug.gsi_shram_ptrs,
+			     ipa_gsi_top_gsi_shram_ptr_re_esc_buf_base_addr),
+	GEN_SRC_DST_ADDR_MAP(IPA_GSI_TOP_GSI_SHRAM_PTR_EE_SCRACH_BASE_ADDR,
+			     gsi.debug.gsi_shram_ptrs,
+			     ipa_gsi_top_gsi_shram_ptr_ee_scrach_base_addr),
+	GEN_SRC_DST_ADDR_MAP(IPA_GSI_TOP_GSI_SHRAM_PTR_FUNC_STACK_BASE_ADDR,
+			     gsi.debug.gsi_shram_ptrs,
+			     ipa_gsi_top_gsi_shram_ptr_func_stack_base_addr),
+
+	/*
+	 * NOTE: That GEN_SRC_DST_ADDR_MAP() not used below.  This is
+	 *       because the following registers are not scaler, rather
+	 *       they are register arrays...
+	 */
+
+	/* GSI General EE Registers */
+	IPA_REG_SAVE_CFG_ENTRY_GSI_GENERAL_EE(GSI_MANAGER_EE_QOS_n,
+					      gsi_manager_ee_qos_n),
+	IPA_REG_SAVE_CFG_ENTRY_GSI_GENERAL_EE(EE_n_GSI_STATUS,
+					      ee_n_gsi_status),
+	IPA_REG_SAVE_CFG_ENTRY_GSI_GENERAL_EE(EE_n_CNTXT_TYPE_IRQ,
+					      ee_n_cntxt_type_irq),
+	IPA_REG_SAVE_CFG_ENTRY_GSI_GENERAL_EE(EE_n_CNTXT_TYPE_IRQ_MSK,
+					      ee_n_cntxt_type_irq_msk),
+	IPA_REG_SAVE_CFG_ENTRY_GSI_GENERAL_EE(EE_n_CNTXT_SRC_GSI_CH_IRQ,
+					      ee_n_cntxt_src_gsi_ch_irq),
+	IPA_REG_SAVE_CFG_ENTRY_GSI_GENERAL_EE(EE_n_CNTXT_SRC_EV_CH_IRQ,
+					      ee_n_cntxt_src_ev_ch_irq),
+	IPA_REG_SAVE_CFG_ENTRY_GSI_GENERAL_EE(EE_n_CNTXT_SRC_GSI_CH_IRQ_MSK,
+					      ee_n_cntxt_src_gsi_ch_irq_msk),
+	IPA_REG_SAVE_CFG_ENTRY_GSI_GENERAL_EE(EE_n_CNTXT_SRC_EV_CH_IRQ_MSK,
+					      ee_n_cntxt_src_ev_ch_irq_msk),
+	IPA_REG_SAVE_CFG_ENTRY_GSI_GENERAL_EE(EE_n_CNTXT_SRC_IEOB_IRQ,
+					      ee_n_cntxt_src_ieob_irq),
+	IPA_REG_SAVE_CFG_ENTRY_GSI_GENERAL_EE(EE_n_CNTXT_SRC_IEOB_IRQ_MSK,
+					      ee_n_cntxt_src_ieob_irq_msk),
+	IPA_REG_SAVE_CFG_ENTRY_GSI_GENERAL_EE(EE_n_CNTXT_GSI_IRQ_STTS,
+					      ee_n_cntxt_gsi_irq_stts),
+	IPA_REG_SAVE_CFG_ENTRY_GSI_GENERAL_EE(EE_n_CNTXT_GLOB_IRQ_STTS,
+					      ee_n_cntxt_glob_irq_stts),
+	IPA_REG_SAVE_CFG_ENTRY_GSI_GENERAL_EE(EE_n_ERROR_LOG,
+					      ee_n_error_log),
+	IPA_REG_SAVE_CFG_ENTRY_GSI_GENERAL_EE(EE_n_CNTXT_SCRATCH_0,
+					      ee_n_cntxt_scratch_0),
+	IPA_REG_SAVE_CFG_ENTRY_GSI_GENERAL_EE(EE_n_CNTXT_SCRATCH_1,
+					      ee_n_cntxt_scratch_1),
+	IPA_REG_SAVE_CFG_ENTRY_GSI_GENERAL_EE(EE_n_CNTXT_INTSET,
+					      ee_n_cntxt_intset),
+	IPA_REG_SAVE_CFG_ENTRY_GSI_GENERAL_EE(EE_n_CNTXT_MSI_BASE_LSB,
+					      ee_n_cntxt_msi_base_lsb),
+	IPA_REG_SAVE_CFG_ENTRY_GSI_GENERAL_EE(EE_n_CNTXT_MSI_BASE_MSB,
+					      ee_n_cntxt_msi_base_msb),
+
+	/* GSI Channel Context Registers */
+	IPA_REG_SAVE_CFG_ENTRY_GSI_CH_CNTXT(EE_n_GSI_CH_k_CNTXT_0,
+					    ee_n_gsi_ch_k_cntxt_0),
+	IPA_REG_SAVE_CFG_ENTRY_GSI_CH_CNTXT(EE_n_GSI_CH_k_CNTXT_1,
+					    ee_n_gsi_ch_k_cntxt_1),
+	IPA_REG_SAVE_CFG_ENTRY_GSI_CH_CNTXT(EE_n_GSI_CH_k_CNTXT_2,
+					    ee_n_gsi_ch_k_cntxt_2),
+	IPA_REG_SAVE_CFG_ENTRY_GSI_CH_CNTXT(EE_n_GSI_CH_k_CNTXT_3,
+					    ee_n_gsi_ch_k_cntxt_3),
+	IPA_REG_SAVE_CFG_ENTRY_GSI_CH_CNTXT(EE_n_GSI_CH_k_CNTXT_4,
+					    ee_n_gsi_ch_k_cntxt_4),
+	IPA_REG_SAVE_CFG_ENTRY_GSI_CH_CNTXT(EE_n_GSI_CH_k_CNTXT_5,
+					    ee_n_gsi_ch_k_cntxt_5),
+	IPA_REG_SAVE_CFG_ENTRY_GSI_CH_CNTXT(EE_n_GSI_CH_k_CNTXT_6,
+					    ee_n_gsi_ch_k_cntxt_6),
+	IPA_REG_SAVE_CFG_ENTRY_GSI_CH_CNTXT(EE_n_GSI_CH_k_CNTXT_7,
+					    ee_n_gsi_ch_k_cntxt_7),
+	IPA_REG_SAVE_CFG_ENTRY_GSI_CH_CNTXT(EE_n_GSI_CH_k_RE_FETCH_READ_PTR,
+					    ee_n_gsi_ch_k_re_fetch_read_ptr),
+	IPA_REG_SAVE_CFG_ENTRY_GSI_CH_CNTXT(EE_n_GSI_CH_k_RE_FETCH_WRITE_PTR,
+					    ee_n_gsi_ch_k_re_fetch_write_ptr),
+	IPA_REG_SAVE_CFG_ENTRY_GSI_CH_CNTXT(EE_n_GSI_CH_k_QOS,
+					    ee_n_gsi_ch_k_qos),
+	IPA_REG_SAVE_CFG_ENTRY_GSI_CH_CNTXT(EE_n_GSI_CH_k_SCRATCH_0,
+					    ee_n_gsi_ch_k_scratch_0),
+	IPA_REG_SAVE_CFG_ENTRY_GSI_CH_CNTXT(EE_n_GSI_CH_k_SCRATCH_1,
+					    ee_n_gsi_ch_k_scratch_1),
+	IPA_REG_SAVE_CFG_ENTRY_GSI_CH_CNTXT(EE_n_GSI_CH_k_SCRATCH_2,
+					    ee_n_gsi_ch_k_scratch_2),
+	IPA_REG_SAVE_CFG_ENTRY_GSI_CH_CNTXT(EE_n_GSI_CH_k_SCRATCH_3,
+					    ee_n_gsi_ch_k_scratch_3),
+	IPA_REG_SAVE_CFG_ENTRY_GSI_CH_CNTXT(GSI_MAP_EE_n_CH_k_VP_TABLE,
+					    gsi_map_ee_n_ch_k_vp_table),
+
+	/* GSI Channel Event Context Registers */
+	IPA_REG_SAVE_CFG_ENTRY_GSI_EVT_CNTXT(EE_n_EV_CH_k_CNTXT_0,
+					     ee_n_ev_ch_k_cntxt_0),
+	IPA_REG_SAVE_CFG_ENTRY_GSI_EVT_CNTXT(EE_n_EV_CH_k_CNTXT_1,
+					     ee_n_ev_ch_k_cntxt_1),
+	IPA_REG_SAVE_CFG_ENTRY_GSI_EVT_CNTXT(EE_n_EV_CH_k_CNTXT_2,
+					     ee_n_ev_ch_k_cntxt_2),
+	IPA_REG_SAVE_CFG_ENTRY_GSI_EVT_CNTXT(EE_n_EV_CH_k_CNTXT_3,
+					     ee_n_ev_ch_k_cntxt_3),
+	IPA_REG_SAVE_CFG_ENTRY_GSI_EVT_CNTXT(EE_n_EV_CH_k_CNTXT_4,
+					     ee_n_ev_ch_k_cntxt_4),
+	IPA_REG_SAVE_CFG_ENTRY_GSI_EVT_CNTXT(EE_n_EV_CH_k_CNTXT_5,
+					     ee_n_ev_ch_k_cntxt_5),
+	IPA_REG_SAVE_CFG_ENTRY_GSI_EVT_CNTXT(EE_n_EV_CH_k_CNTXT_6,
+					     ee_n_ev_ch_k_cntxt_6),
+	IPA_REG_SAVE_CFG_ENTRY_GSI_EVT_CNTXT(EE_n_EV_CH_k_CNTXT_7,
+					     ee_n_ev_ch_k_cntxt_7),
+	IPA_REG_SAVE_CFG_ENTRY_GSI_EVT_CNTXT(EE_n_EV_CH_k_CNTXT_8,
+					     ee_n_ev_ch_k_cntxt_8),
+	IPA_REG_SAVE_CFG_ENTRY_GSI_EVT_CNTXT(EE_n_EV_CH_k_CNTXT_9,
+					     ee_n_ev_ch_k_cntxt_9),
+	IPA_REG_SAVE_CFG_ENTRY_GSI_EVT_CNTXT(EE_n_EV_CH_k_CNTXT_10,
+					     ee_n_ev_ch_k_cntxt_10),
+	IPA_REG_SAVE_CFG_ENTRY_GSI_EVT_CNTXT(EE_n_EV_CH_k_CNTXT_11,
+					     ee_n_ev_ch_k_cntxt_11),
+	IPA_REG_SAVE_CFG_ENTRY_GSI_EVT_CNTXT(EE_n_EV_CH_k_CNTXT_12,
+					     ee_n_ev_ch_k_cntxt_12),
+	IPA_REG_SAVE_CFG_ENTRY_GSI_EVT_CNTXT(EE_n_EV_CH_k_CNTXT_13,
+					     ee_n_ev_ch_k_cntxt_13),
+	IPA_REG_SAVE_CFG_ENTRY_GSI_EVT_CNTXT(EE_n_EV_CH_k_SCRATCH_0,
+					     ee_n_ev_ch_k_scratch_0),
+	IPA_REG_SAVE_CFG_ENTRY_GSI_EVT_CNTXT(EE_n_EV_CH_k_SCRATCH_1,
+					     ee_n_ev_ch_k_scratch_1),
+	IPA_REG_SAVE_CFG_ENTRY_GSI_EVT_CNTXT(GSI_DEBUG_EE_n_EV_k_VP_TABLE,
+					     gsi_debug_ee_n_ev_k_vp_table),
+
+#if defined(CONFIG_IPA3_REGDUMP_NUM_EXTRA_ENDP_REGS) && \
+	CONFIG_IPA3_REGDUMP_NUM_EXTRA_ENDP_REGS > 0
+	/* Endp Registers for remaining pipes */
+	IPA_REG_SAVE_CFG_ENTRY_PIPE_ENDP_EXTRA(IPA_ENDP_INIT_CTRL_n,
+					       ipa_endp_init_ctrl_n),
+	IPA_REG_SAVE_CFG_ENTRY_PIPE_ENDP_EXTRA(IPA_ENDP_INIT_CTRL_SCND_n,
+					       ipa_endp_init_ctrl_scnd_n),
+	IPA_REG_SAVE_CFG_ENTRY_PIPE_ENDP_EXTRA(IPA_ENDP_INIT_CFG_n,
+					       ipa_endp_init_cfg_n),
+	IPA_REG_SAVE_CFG_ENTRY_PIPE_ENDP_EXTRA(IPA_ENDP_INIT_NAT_n,
+					       ipa_endp_init_nat_n),
+	IPA_REG_SAVE_CFG_ENTRY_PIPE_ENDP_EXTRA(IPA_ENDP_INIT_HDR_n,
+					       ipa_endp_init_hdr_n),
+	IPA_REG_SAVE_CFG_ENTRY_PIPE_ENDP_EXTRA(IPA_ENDP_INIT_HDR_EXT_n,
+					       ipa_endp_init_hdr_ext_n),
+	IPA_REG_SAVE_CFG_ENTRY_PIPE_ENDP_EXTRA
+		(IPA_ENDP_INIT_HDR_METADATA_MASK_n,
+		ipa_endp_init_hdr_metadata_mask_n),
+	IPA_REG_SAVE_CFG_ENTRY_PIPE_ENDP_EXTRA(IPA_ENDP_INIT_HDR_METADATA_n,
+					       ipa_endp_init_hdr_metadata_n),
+	IPA_REG_SAVE_CFG_ENTRY_PIPE_ENDP_EXTRA(IPA_ENDP_INIT_MODE_n,
+					       ipa_endp_init_mode_n),
+	IPA_REG_SAVE_CFG_ENTRY_PIPE_ENDP_EXTRA(IPA_ENDP_INIT_AGGR_n,
+					       ipa_endp_init_aggr_n),
+	IPA_REG_SAVE_CFG_ENTRY_PIPE_ENDP_EXTRA(IPA_ENDP_INIT_HOL_BLOCK_EN_n,
+					       ipa_endp_init_hol_block_en_n),
+	IPA_REG_SAVE_CFG_ENTRY_PIPE_ENDP_EXTRA(IPA_ENDP_INIT_HOL_BLOCK_TIMER_n,
+					       ipa_endp_init_hol_block_timer_n),
+	IPA_REG_SAVE_CFG_ENTRY_PIPE_ENDP_EXTRA(IPA_ENDP_INIT_DEAGGR_n,
+					       ipa_endp_init_deaggr_n),
+	IPA_REG_SAVE_CFG_ENTRY_PIPE_ENDP_EXTRA(IPA_ENDP_STATUS_n,
+					       ipa_endp_status_n),
+	IPA_REG_SAVE_CFG_ENTRY_PIPE_ENDP_EXTRA(IPA_ENDP_INIT_RSRC_GRP_n,
+					       ipa_endp_init_rsrc_grp_n),
+	IPA_REG_SAVE_CFG_ENTRY_PIPE_ENDP_EXTRA(IPA_ENDP_INIT_SEQ_n,
+					       ipa_endp_init_seq_n),
+	IPA_REG_SAVE_CFG_ENTRY_PIPE_ENDP_EXTRA(IPA_ENDP_GSI_CFG_TLV_n,
+					       ipa_endp_gsi_cfg_tlv_n),
+	IPA_REG_SAVE_CFG_ENTRY_PIPE_ENDP_EXTRA(IPA_ENDP_GSI_CFG_AOS_n,
+					       ipa_endp_gsi_cfg_aos_n),
+	IPA_REG_SAVE_CFG_ENTRY_PIPE_ENDP_EXTRA(IPA_ENDP_GSI_CFG1_n,
+					       ipa_endp_gsi_cfg1_n),
+	IPA_REG_SAVE_CFG_ENTRY_PIPE_ENDP_EXTRA
+		(IPA_ENDP_FILTER_ROUTER_HSH_CFG_n,
+		 ipa_endp_filter_router_hsh_cfg_n),
+#endif
+};
+
+/* IPA uC PER registers save Cfg array */
+static struct map_src_dst_addr_s ipa_uc_regs_to_save_array[] = {
+	/* HWP registers */
+	GEN_SRC_DST_ADDR_MAP(IPA_UC_QMB_SYS_ADDR,
+			     ipa.hwp,
+			     ipa_uc_qmb_sys_addr),
+	GEN_SRC_DST_ADDR_MAP(IPA_UC_QMB_LOCAL_ADDR,
+			     ipa.hwp,
+			     ipa_uc_qmb_local_addr),
+	GEN_SRC_DST_ADDR_MAP(IPA_UC_QMB_LENGTH,
+			     ipa.hwp,
+			     ipa_uc_qmb_length),
+	GEN_SRC_DST_ADDR_MAP(IPA_UC_QMB_TRIGGER,
+			     ipa.hwp,
+			     ipa_uc_qmb_trigger),
+	GEN_SRC_DST_ADDR_MAP(IPA_UC_QMB_PENDING_TID,
+			     ipa.hwp,
+			     ipa_uc_qmb_pending_tid),
+	GEN_SRC_DST_ADDR_MAP(IPA_UC_QMB_COMPLETED_RD_FIFO_PEEK,
+			     ipa.hwp,
+			     ipa_uc_qmb_completed_rd_fifo_peek),
+	GEN_SRC_DST_ADDR_MAP(IPA_UC_QMB_COMPLETED_WR_FIFO_PEEK,
+			     ipa.hwp,
+			     ipa_uc_qmb_completed_wr_fifo_peek),
+	GEN_SRC_DST_ADDR_MAP(IPA_UC_QMB_MISC,
+			     ipa.hwp,
+			     ipa_uc_qmb_misc),
+	GEN_SRC_DST_ADDR_MAP(IPA_UC_QMB_STATUS,
+			     ipa.hwp,
+			     ipa_uc_qmb_status),
+	GEN_SRC_DST_ADDR_MAP(IPA_UC_QMB_BUS_ATTRIB,
+			     ipa.hwp,
+			     ipa_uc_qmb_bus_attrib),
+};
+
+static void ipa_hal_save_regs_save_ipa_testbus(void);
+static void ipa_reg_save_gsi_fifo_status(void);
+static void ipa_reg_save_rsrc_cnts(void);
+static void ipa_hal_save_regs_ipa_cmdq(void);
+static void ipa_hal_save_regs_rsrc_db(void);
+static void ipa_reg_save_anomaly_check(void);
+
+static struct reg_access_funcs_s *get_access_funcs(u32 addr)
+{
+	u32 i, asub = ipa3_ctx->sd_state;
+
+	for (i = 0; i < ARRAY_SIZE(mem_access_map); i++) {
+		if (addr >= mem_access_map[i].addr_range_begin &&
+			addr <  mem_access_map[i].addr_range_end) {
+			return mem_access_map[i].access[asub];
+		}
+	}
+
+	IPAERR("Unknown register offset(0x%08X). Using dflt access methods\n",
+		   addr);
+
+	return &io_matrix[AA_COMBO];
+}
+
+static u32 in_dword(
+	u32 addr)
+{
+	struct reg_access_funcs_s *io = get_access_funcs(addr);
+
+	return io->read(ipa3_ctx->reg_collection_base + addr);
+}
+
+static u32 in_dword_masked(
+	u32 addr,
+	u32 mask)
+{
+	struct reg_access_funcs_s *io = get_access_funcs(addr);
+	u32 val;
+
+	val = io->read(ipa3_ctx->reg_collection_base + addr);
+
+	if (io->read == act_read)
+		return val & mask;
+
+	return val;
+}
+
+static void out_dword(
+	u32 addr,
+	u32 val)
+{
+	struct reg_access_funcs_s *io = get_access_funcs(addr);
+
+	io->write(ipa3_ctx->reg_collection_base + addr, val);
+}
+
+/*
+ * FUNCTION:  ipa_save_gsi_ver
+ *
+ * Saves the gsi version
+ *
+ * @return
+ * None
+ */
+void ipa_save_gsi_ver(void)
+{
+	if (!ipa3_ctx->do_register_collection_on_crash)
+		return;
+
+	ipa_reg_save.gsi.fw_ver =
+		IPA_READ_1xVECTOR_REG(IPA_GSI_TOP_GSI_INST_RAM_n, 0);
+}
+
+/*
+ * FUNCTION:  ipa_save_registers
+ *
+ * Saves all the IPA register values which are configured
+ *
+ * @return
+ * None
+ */
+void ipa_save_registers(void)
+{
+	u32 i = 0;
+	/* Fetch the number of registers configured to be saved */
+	u32 num_regs = ARRAY_SIZE(ipa_regs_to_save_array);
+	u32 num_uc_per_regs = ARRAY_SIZE(ipa_uc_regs_to_save_array);
+	union ipa_hwio_def_ipa_rsrc_mngr_db_cfg_u for_cfg;
+	union ipa_hwio_def_ipa_rsrc_mngr_db_rsrc_read_u for_read;
+
+	if (!ipa3_ctx->do_register_collection_on_crash)
+		return;
+
+	IPAERR("Commencing\n");
+
+	/*
+	 * Remove the GSI FIFO and the endp registers for extra pipes for
+	 * now.  These would be saved later
+	 */
+	num_regs -= (CONFIG_IPA3_REGDUMP_NUM_EXTRA_ENDP_REGS *
+		     IPA_REG_SAVE_NUM_EXTRA_ENDP_REGS);
+
+	memset(&for_cfg, 0, sizeof(for_cfg));
+	memset(&for_read, 0, sizeof(for_read));
+
+	/* Now save all the configured registers */
+	for (i = 0; i < num_regs; i++) {
+		/* Copy reg value to our data struct */
+		*(ipa_regs_to_save_array[i].dst_addr) =
+			in_dword(ipa_regs_to_save_array[i].src_addr);
+	}
+
+	/*
+	 * Set the active flag for all active pipe indexed registers.
+	 */
+	for (i = 0; i < IPA_HW_PIPE_ID_MAX; i++)
+		ipa_reg_save.ipa.pipes[i].active = true;
+
+	/* Now save the per endp registers for the remaining pipes */
+	for (i = 0; i < (CONFIG_IPA3_REGDUMP_NUM_EXTRA_ENDP_REGS *
+			 IPA_REG_SAVE_NUM_EXTRA_ENDP_REGS); i++) {
+		/* Copy reg value to our data struct */
+		*(ipa_regs_to_save_array[num_regs + i].dst_addr) =
+			in_dword(ipa_regs_to_save_array[num_regs + i].src_addr);
+	}
+
+	IPA_HW_REG_SAVE_CFG_ENTRY_PIPE_ENDP_EXTRA_ACTIVE();
+
+	num_regs += (CONFIG_IPA3_REGDUMP_NUM_EXTRA_ENDP_REGS *
+		     IPA_REG_SAVE_NUM_EXTRA_ENDP_REGS);
+
+	/* Saving GSI FIFO Status registers */
+	ipa_reg_save_gsi_fifo_status();
+
+	/*
+	 * On targets that support SSR, we generally want to disable
+	 * the following reg save functionality as it may cause stalls
+	 * in IPA after the SSR.
+	 *
+	 * To override this, set do_non_tn_collection_on_crash to
+	 * true, via dtsi, and the collection will be done.
+	 */
+	if (ipa3_ctx->do_non_tn_collection_on_crash) {
+		/* Save all the uC PER configured registers */
+		for (i = 0; i < num_uc_per_regs; i++) {
+			/* Copy reg value to our data struct */
+			*(ipa_uc_regs_to_save_array[i].dst_addr) =
+			    in_dword(ipa_uc_regs_to_save_array[i].src_addr);
+		}
+
+		/* Saving CMD Queue registers */
+		ipa_hal_save_regs_ipa_cmdq();
+
+		/* Collecting resource DB information */
+		ipa_hal_save_regs_rsrc_db();
+
+		/* Save IPA testbus */
+		if (ipa3_ctx->do_testbus_collection_on_crash)
+			ipa_hal_save_regs_save_ipa_testbus();
+	}
+
+	/* GSI test bus */
+	for (i = 0;
+	     i < ARRAY_SIZE(ipa_reg_save_gsi_ch_test_bus_selector_array);
+	     i++) {
+		ipa_reg_save.gsi.debug.gsi_test_bus.test_bus_selector[i] =
+			ipa_reg_save_gsi_ch_test_bus_selector_array[i];
+
+		/* Write test bus selector */
+		IPA_WRITE_SCALER_REG(
+			GSI_TEST_BUS_SEL,
+			ipa_reg_save_gsi_ch_test_bus_selector_array[i]);
+
+		ipa_reg_save.gsi.debug.gsi_test_bus.test_bus_reg[
+		    i].gsi_testbus_reg =
+		    (u32) IPA_READ_SCALER_REG(GSI_TEST_BUS_REG);
+	}
+
+	ipa_reg_save_rsrc_cnts();
+
+	for (i = 0; i < HWIO_GSI_DEBUG_SW_RF_n_READ_MAXn + 1; i++)
+		ipa_reg_save.gsi.debug.gsi_mcs_regs.mcs_reg[i].rf_reg =
+			IPA_READ_1xVECTOR_REG(GSI_DEBUG_SW_RF_n_READ, i);
+
+	for (i = 0; i < HWIO_GSI_DEBUG_COUNTERn_MAXn + 1; i++)
+		ipa_reg_save.gsi.debug.gsi_cnt_regs.cnt[i].counter_value =
+			(u16)IPA_READ_1xVECTOR_REG(GSI_DEBUG_COUNTERn, i);
+
+	for (i = 0; i < IPA_HW_REG_SAVE_GSI_NUM_CH_CNTXT_A7; i++) {
+		u32 phys_ch_idx = ipa_reg_save.gsi.ch_cntxt.a7[
+			i].gsi_map_ee_n_ch_k_vp_table.phy_ch;
+		u32 n = phys_ch_idx * IPA_REG_SAVE_BYTES_PER_CHNL_SHRAM;
+
+		if (!ipa_reg_save.gsi.ch_cntxt.a7[
+				i].gsi_map_ee_n_ch_k_vp_table.valid)
+			continue;
+
+		ipa_reg_save.gsi.ch_cntxt.a7[
+			i].mcs_channel_scratch.scratch4.shram =
+			IPA_READ_1xVECTOR_REG(
+				GSI_SHRAM_n,
+				n + IPA_GSI_OFFSET_WORDS_SCRATCH4);
+
+		ipa_reg_save.gsi.ch_cntxt.a7[
+			i].mcs_channel_scratch.scratch5.shram =
+			IPA_READ_1xVECTOR_REG(
+				GSI_SHRAM_n,
+				n + IPA_GSI_OFFSET_WORDS_SCRATCH5);
+	}
+
+	for (i = 0; i < IPA_HW_REG_SAVE_GSI_NUM_CH_CNTXT_UC; i++) {
+		u32 phys_ch_idx = ipa_reg_save.gsi.ch_cntxt.uc[
+			i].gsi_map_ee_n_ch_k_vp_table.phy_ch;
+		u32 n = phys_ch_idx * IPA_REG_SAVE_BYTES_PER_CHNL_SHRAM;
+
+		if (!ipa_reg_save.gsi.ch_cntxt.uc[
+				i].gsi_map_ee_n_ch_k_vp_table.valid)
+			continue;
+
+		ipa_reg_save.gsi.ch_cntxt.uc[
+			i].mcs_channel_scratch.scratch4.shram =
+			IPA_READ_1xVECTOR_REG(
+				GSI_SHRAM_n,
+				n + IPA_GSI_OFFSET_WORDS_SCRATCH4);
+
+		ipa_reg_save.gsi.ch_cntxt.uc[
+			i].mcs_channel_scratch.scratch5.shram =
+			IPA_READ_1xVECTOR_REG(
+				GSI_SHRAM_n,
+				n + IPA_GSI_OFFSET_WORDS_SCRATCH5);
+	}
+
+	/*
+	 * On targets that support SSR, we generally want to disable
+	 * the following reg save functionality as it may cause stalls
+	 * in IPA after the SSR.
+	 *
+	 * To override this, set do_non_tn_collection_on_crash to
+	 * true, via dtsi, and the collection will be done.
+	 */
+	if (ipa3_ctx->do_non_tn_collection_on_crash) {
+		u32 ofst = GEN_2xVECTOR_REG_OFST(IPA_CTX_ID_m_CTX_NUM_n, 0, 0);
+		struct reg_access_funcs_s *io = get_access_funcs(ofst);
+		/*
+		 * If the memory is accessible, copy pkt context directly from
+		 * IPA_CTX_ID register space
+		 */
+		if (io->read == act_read) {
+			memcpy((void *)ipa_reg_save.pkt_ctntx,
+				   (const void *)
+				   (ipa3_ctx->reg_collection_base + ofst),
+				   sizeof(ipa_reg_save.pkt_ctntx));
+
+			for_cfg.value =
+				IPA_READ_SCALER_REG(IPA_RSRC_MNGR_DB_CFG);
+
+			for_cfg.def.rsrc_type_sel = 0;
+
+			IPA_MASKED_WRITE_SCALER_REG(
+				IPA_RSRC_MNGR_DB_CFG,
+				for_cfg.value);
+
+			for (i = 0; i < IPA_HW_PKT_CTNTX_MAX; i++) {
+				for_cfg.def.rsrc_id_sel = i;
+
+				IPA_MASKED_WRITE_SCALER_REG(
+					IPA_RSRC_MNGR_DB_CFG,
+					for_cfg.value);
+
+				for_read.value =
+					IPA_READ_SCALER_REG(
+						IPA_RSRC_MNGR_DB_RSRC_READ);
+
+				if (for_read.def.rsrc_occupied) {
+					ipa_reg_save.pkt_ctntx_active[i] = true;
+					ipa_reg_save.pkt_cntxt_state[i] =
+						(enum ipa_hw_pkt_cntxt_state_e)
+						ipa_reg_save.pkt_ctntx[i].state;
+				}
+			}
+		} else {
+			IPAERR("IPA_CTX_ID is not currently accessible\n");
+		}
+	}
+
+	if (ipa3_ctx->do_ram_collection_on_crash) {
+		for (i = 0; i < IPA_IU_SIZE / sizeof(u32); i++) {
+			ipa_reg_save.ipa.ipa_iu_ptr[i] =
+				in_dword(IPA_IU_ADDR + (i * sizeof(u32)));
+		}
+		for (i = 0; i < IPA_SRAM_SIZE / sizeof(u32); i++) {
+			ipa_reg_save.ipa.ipa_sram_ptr[i] =
+				in_dword(IPA_SRAM_ADDR + (i * sizeof(u32)));
+		}
+		for (i = 0; i < IPA_MBOX_SIZE / sizeof(u32); i++) {
+			ipa_reg_save.ipa.ipa_mbox_ptr[i] =
+				in_dword(IPA_MBOX_ADDR + (i * sizeof(u32)));
+		}
+		for (i = 0; i < IPA_HRAM_SIZE / sizeof(u32); i++) {
+			ipa_reg_save.ipa.ipa_hram_ptr[i] =
+				in_dword(IPA_HRAM_ADDR + (i * sizeof(u32)));
+		}
+		for (i = 0; i < IPA_SEQ_SIZE / sizeof(u32); i++) {
+			ipa_reg_save.ipa.ipa_seq_ptr[i] =
+				in_dword(IPA_SEQ_ADDR + (i * sizeof(u32)));
+		}
+		for (i = 0; i < IPA_GSI_SIZE / sizeof(u32); i++) {
+			ipa_reg_save.ipa.ipa_gsi_ptr[i] =
+				in_dword(IPA_GSI_ADDR + (i * sizeof(u32)));
+		}
+		IPALOG_VnP_ADDRS(ipa_reg_save.ipa.ipa_iu_ptr);
+		IPALOG_VnP_ADDRS(ipa_reg_save.ipa.ipa_sram_ptr);
+		IPALOG_VnP_ADDRS(ipa_reg_save.ipa.ipa_mbox_ptr);
+		IPALOG_VnP_ADDRS(ipa_reg_save.ipa.ipa_hram_ptr);
+		IPALOG_VnP_ADDRS(ipa_reg_save.ipa.ipa_seq_ptr);
+		IPALOG_VnP_ADDRS(ipa_reg_save.ipa.ipa_gsi_ptr);
+	}
+
+	ipa_reg_save_anomaly_check();
+
+	IPAERR("Completed\n");
+}
+
+/*
+ * FUNCTION:  ipa_reg_save_gsi_fifo_status
+ *
+ * This function saves the GSI FIFO Status registers for all endpoints
+ *
+ * @param
+ *
+ * @return
+ */
+static void ipa_reg_save_gsi_fifo_status(void)
+{
+	union ipa_hwio_def_ipa_gsi_fifo_status_ctrl_u gsi_fifo_status_ctrl;
+	u8 i;
+
+	memset(&gsi_fifo_status_ctrl, 0, sizeof(gsi_fifo_status_ctrl));
+
+	for (i = 0; i < IPA_HW_PIPE_ID_MAX; i++) {
+		gsi_fifo_status_ctrl.def.ipa_gsi_fifo_status_en = 1;
+		gsi_fifo_status_ctrl.def.ipa_gsi_fifo_status_port_sel = i;
+
+		IPA_MASKED_WRITE_SCALER_REG(IPA_GSI_FIFO_STATUS_CTRL,
+				     gsi_fifo_status_ctrl.value);
+
+		ipa_reg_save.gsi_fifo_status[i].gsi_fifo_status_ctrl.value =
+			IPA_READ_SCALER_REG(IPA_GSI_FIFO_STATUS_CTRL);
+		ipa_reg_save.gsi_fifo_status[i].gsi_tlv_fifo_status.value =
+			IPA_READ_SCALER_REG(IPA_GSI_TLV_FIFO_STATUS);
+		ipa_reg_save.gsi_fifo_status[i].gsi_aos_fifo_status.value =
+			IPA_READ_SCALER_REG(IPA_GSI_AOS_FIFO_STATUS);
+	}
+}
+
+/*
+ * FUNCTION:  ipa_reg_save_rsrc_cnts
+ *
+ * This function saves the resource counts for all PCIE and DDR
+ * resource groups.
+ *
+ * @param
+ * @return
+ */
+static void ipa_reg_save_rsrc_cnts(void)
+{
+	union ipa_hwio_def_ipa_src_rsrc_grp_0123_rsrc_type_cnt_n_u
+		src_0123_rsrc_cnt;
+	union ipa_hwio_def_ipa_dst_rsrc_grp_0123_rsrc_type_cnt_n_u
+		dst_0123_rsrc_cnt;
+
+	ipa_reg_save.rsrc_cnts.pcie.resource_group = IPA_HW_PCIE_SRC_RSRP_GRP;
+	ipa_reg_save.rsrc_cnts.ddr.resource_group = IPA_HW_DDR_SRC_RSRP_GRP;
+
+	src_0123_rsrc_cnt.value =
+		IPA_READ_1xVECTOR_REG(IPA_SRC_RSRC_GRP_0123_RSRC_TYPE_CNT_n, 0);
+
+	ipa_reg_save.rsrc_cnts.pcie.src.pkt_cntxt =
+		src_0123_rsrc_cnt.def.src_rsrc_grp_0_cnt;
+	ipa_reg_save.rsrc_cnts.ddr.src.pkt_cntxt =
+		src_0123_rsrc_cnt.def.src_rsrc_grp_1_cnt;
+
+	src_0123_rsrc_cnt.value =
+		IPA_READ_1xVECTOR_REG(IPA_SRC_RSRC_GRP_0123_RSRC_TYPE_CNT_n, 1);
+
+	ipa_reg_save.rsrc_cnts.pcie.src.descriptor_list =
+		src_0123_rsrc_cnt.def.src_rsrc_grp_0_cnt;
+	ipa_reg_save.rsrc_cnts.ddr.src.descriptor_list =
+		src_0123_rsrc_cnt.def.src_rsrc_grp_1_cnt;
+
+	src_0123_rsrc_cnt.value =
+		IPA_READ_1xVECTOR_REG(IPA_SRC_RSRC_GRP_0123_RSRC_TYPE_CNT_n, 2);
+
+	ipa_reg_save.rsrc_cnts.pcie.src.data_descriptor_buffer =
+		src_0123_rsrc_cnt.def.src_rsrc_grp_0_cnt;
+	ipa_reg_save.rsrc_cnts.ddr.src.data_descriptor_buffer =
+		src_0123_rsrc_cnt.def.src_rsrc_grp_1_cnt;
+
+	src_0123_rsrc_cnt.value =
+		IPA_READ_1xVECTOR_REG(IPA_SRC_RSRC_GRP_0123_RSRC_TYPE_CNT_n, 3);
+
+	ipa_reg_save.rsrc_cnts.pcie.src.hps_dmars =
+		src_0123_rsrc_cnt.def.src_rsrc_grp_0_cnt;
+	ipa_reg_save.rsrc_cnts.ddr.src.hps_dmars =
+		src_0123_rsrc_cnt.def.src_rsrc_grp_1_cnt;
+
+	src_0123_rsrc_cnt.value =
+		IPA_READ_1xVECTOR_REG(IPA_SRC_RSRC_GRP_0123_RSRC_TYPE_CNT_n, 4);
+
+	ipa_reg_save.rsrc_cnts.pcie.src.reserved_acks =
+		src_0123_rsrc_cnt.def.src_rsrc_grp_0_cnt;
+	ipa_reg_save.rsrc_cnts.ddr.src.reserved_acks =
+		src_0123_rsrc_cnt.def.src_rsrc_grp_1_cnt;
+
+	dst_0123_rsrc_cnt.value =
+		IPA_READ_1xVECTOR_REG(IPA_DST_RSRC_GRP_0123_RSRC_TYPE_CNT_n, 0);
+
+	ipa_reg_save.rsrc_cnts.pcie.dst.reserved_sectors =
+		dst_0123_rsrc_cnt.def.dst_rsrc_grp_0_cnt;
+	ipa_reg_save.rsrc_cnts.ddr.dst.reserved_sectors =
+		dst_0123_rsrc_cnt.def.dst_rsrc_grp_1_cnt;
+
+	dst_0123_rsrc_cnt.value =
+		IPA_READ_1xVECTOR_REG(IPA_DST_RSRC_GRP_0123_RSRC_TYPE_CNT_n, 1);
+
+	ipa_reg_save.rsrc_cnts.pcie.dst.dps_dmars =
+		dst_0123_rsrc_cnt.def.dst_rsrc_grp_0_cnt;
+	ipa_reg_save.rsrc_cnts.ddr.dst.dps_dmars =
+		dst_0123_rsrc_cnt.def.dst_rsrc_grp_1_cnt;
+}
+
+/*
+ * FUNCTION:  ipa_reg_save_rsrc_cnts_test_bus
+ *
+ * This function saves the resource counts for all PCIE and DDR
+ * resource groups collected from test bus.
+ *
+ * @param
+ *
+ * @return
+ */
+void ipa_reg_save_rsrc_cnts_test_bus(void)
+{
+	int32_t rsrc_type = 0;
+
+	ipa_reg_save.rsrc_cnts.pcie.resource_group = IPA_HW_PCIE_SRC_RSRP_GRP;
+	ipa_reg_save.rsrc_cnts.ddr.resource_group = IPA_HW_DDR_SRC_RSRP_GRP;
+
+	rsrc_type = 0;
+	ipa_reg_save.rsrc_cnts.pcie.src.pkt_cntxt =
+		IPA_DEBUG_TESTBUS_GET_RSRC_TYPE_CNT(rsrc_type,
+						    IPA_HW_PCIE_SRC_RSRP_GRP);
+
+	ipa_reg_save.rsrc_cnts.ddr.src.pkt_cntxt =
+		IPA_DEBUG_TESTBUS_GET_RSRC_TYPE_CNT(rsrc_type,
+						    IPA_HW_DDR_SRC_RSRP_GRP);
+
+	rsrc_type = 1;
+	ipa_reg_save.rsrc_cnts.pcie.src.descriptor_list =
+		IPA_DEBUG_TESTBUS_GET_RSRC_TYPE_CNT(rsrc_type,
+						    IPA_HW_PCIE_SRC_RSRP_GRP);
+
+	ipa_reg_save.rsrc_cnts.ddr.src.descriptor_list =
+		IPA_DEBUG_TESTBUS_GET_RSRC_TYPE_CNT(rsrc_type,
+						    IPA_HW_DDR_SRC_RSRP_GRP);
+
+	rsrc_type = 2;
+	ipa_reg_save.rsrc_cnts.pcie.src.data_descriptor_buffer =
+		IPA_DEBUG_TESTBUS_GET_RSRC_TYPE_CNT(rsrc_type,
+						    IPA_HW_PCIE_SRC_RSRP_GRP);
+
+	ipa_reg_save.rsrc_cnts.ddr.src.data_descriptor_buffer =
+		IPA_DEBUG_TESTBUS_GET_RSRC_TYPE_CNT(rsrc_type,
+						    IPA_HW_DDR_SRC_RSRP_GRP);
+
+	rsrc_type = 3;
+	ipa_reg_save.rsrc_cnts.pcie.src.hps_dmars =
+		IPA_DEBUG_TESTBUS_GET_RSRC_TYPE_CNT(rsrc_type,
+						    IPA_HW_PCIE_SRC_RSRP_GRP);
+
+	ipa_reg_save.rsrc_cnts.ddr.src.hps_dmars =
+		IPA_DEBUG_TESTBUS_GET_RSRC_TYPE_CNT(rsrc_type,
+						    IPA_HW_DDR_SRC_RSRP_GRP);
+
+	rsrc_type = 4;
+	ipa_reg_save.rsrc_cnts.pcie.src.reserved_acks =
+		IPA_DEBUG_TESTBUS_GET_RSRC_TYPE_CNT(rsrc_type,
+						    IPA_HW_PCIE_SRC_RSRP_GRP);
+
+	ipa_reg_save.rsrc_cnts.ddr.src.reserved_acks =
+		IPA_DEBUG_TESTBUS_GET_RSRC_TYPE_CNT(rsrc_type,
+						    IPA_HW_DDR_SRC_RSRP_GRP);
+
+	rsrc_type = 5;
+	ipa_reg_save.rsrc_cnts.pcie.dst.reserved_sectors =
+		IPA_DEBUG_TESTBUS_GET_RSRC_TYPE_CNT(rsrc_type,
+						    IPA_HW_PCIE_DEST_RSRP_GRP);
+
+	ipa_reg_save.rsrc_cnts.ddr.dst.reserved_sectors =
+		IPA_DEBUG_TESTBUS_GET_RSRC_TYPE_CNT(rsrc_type,
+						    IPA_HW_DDR_DEST_RSRP_GRP);
+
+	rsrc_type = 6;
+	ipa_reg_save.rsrc_cnts.pcie.dst.dps_dmars =
+		IPA_DEBUG_TESTBUS_GET_RSRC_TYPE_CNT(rsrc_type,
+						    IPA_HW_PCIE_DEST_RSRP_GRP);
+
+	ipa_reg_save.rsrc_cnts.ddr.dst.dps_dmars =
+		IPA_DEBUG_TESTBUS_GET_RSRC_TYPE_CNT(rsrc_type,
+						    IPA_HW_DDR_DEST_RSRP_GRP);
+}
+
+/*
+ * FUNCTION:  ipa_hal_save_regs_ipa_cmdq
+ *
+ * This function saves the various IPA CMDQ registers
+ *
+ * @param
+ *
+ * @return
+ */
+static void ipa_hal_save_regs_ipa_cmdq(void)
+{
+	int32_t i;
+	union ipa_hwio_def_ipa_rx_hps_cmdq_cmd_u rx_hps_cmdq_cmd = { { 0 } };
+	union ipa_hwio_def_ipa_hps_dps_cmdq_cmd_u hps_dps_cmdq_cmd = { { 0 } };
+	union ipa_hwio_def_ipa_dps_tx_cmdq_cmd_u dps_tx_cmdq_cmd = { { 0 } };
+	union ipa_hwio_def_ipa_ackmngr_cmdq_cmd_u ackmngr_cmdq_cmd = { { 0 } };
+	union ipa_hwio_def_ipa_prod_ackmngr_cmdq_cmd_u
+		prod_ackmngr_cmdq_cmd = { { 0 } };
+	union ipa_hwio_def_ipa_ntf_tx_cmdq_cmd_u ntf_tx_cmdq_cmd = { { 0 } };
+
+	/* Save RX_HPS CMDQ   */
+	for (i = 0; i < IPA_DEBUG_CMDQ_HPS_SELECT_NUM_GROUPS; i++) {
+		rx_hps_cmdq_cmd.def.rd_req = 0;
+		rx_hps_cmdq_cmd.def.cmd_client = i;
+		IPA_MASKED_WRITE_SCALER_REG(IPA_RX_HPS_CMDQ_CMD,
+				     rx_hps_cmdq_cmd.value);
+		ipa_reg_save.ipa.dbg.ipa_rx_hps_cmdq_count_arr[i].value =
+			IPA_READ_SCALER_REG(IPA_RX_HPS_CMDQ_COUNT);
+		ipa_reg_save.ipa.dbg.ipa_rx_hps_cmdq_status_arr[i].value =
+			IPA_READ_SCALER_REG(IPA_RX_HPS_CMDQ_STATUS);
+		rx_hps_cmdq_cmd.def.rd_req = 1;
+		rx_hps_cmdq_cmd.def.cmd_client = i;
+		IPA_MASKED_WRITE_SCALER_REG(IPA_RX_HPS_CMDQ_CMD,
+				     rx_hps_cmdq_cmd.value);
+		ipa_reg_save.ipa.dbg.ipa_rx_hps_cmdq_data_rd_0_arr[i].value =
+			IPA_READ_SCALER_REG(IPA_RX_HPS_CMDQ_DATA_RD_0);
+		ipa_reg_save.ipa.dbg.ipa_rx_hps_cmdq_data_rd_1_arr[i].value =
+			IPA_READ_SCALER_REG(IPA_RX_HPS_CMDQ_DATA_RD_1);
+		ipa_reg_save.ipa.dbg.ipa_rx_hps_cmdq_data_rd_2_arr[i].value =
+			IPA_READ_SCALER_REG(IPA_RX_HPS_CMDQ_DATA_RD_2);
+		ipa_reg_save.ipa.dbg.ipa_rx_hps_cmdq_data_rd_3_arr[i].value =
+			IPA_READ_SCALER_REG(IPA_RX_HPS_CMDQ_DATA_RD_3);
+	}
+
+	/* Save HPS_DPS CMDQ   */
+	for (i = 0; i < IPA_TESTBUS_SEL_EP_MAX + 1; i++) {
+		hps_dps_cmdq_cmd.def.rd_req = 0;
+		hps_dps_cmdq_cmd.def.cmd_client = i;
+		IPA_MASKED_WRITE_SCALER_REG(IPA_HPS_DPS_CMDQ_CMD,
+				     hps_dps_cmdq_cmd.value);
+		ipa_reg_save.ipa.dbg.ipa_hps_dps_cmdq_status_arr[i].value =
+			IPA_READ_SCALER_REG(IPA_HPS_DPS_CMDQ_STATUS);
+		ipa_reg_save.ipa.dbg.ipa_hps_dps_cmdq_count_arr[i].value =
+			IPA_READ_SCALER_REG(IPA_HPS_DPS_CMDQ_COUNT);
+
+		hps_dps_cmdq_cmd.def.rd_req = 1;
+		hps_dps_cmdq_cmd.def.cmd_client = i;
+		IPA_MASKED_WRITE_SCALER_REG(IPA_HPS_DPS_CMDQ_CMD,
+				     hps_dps_cmdq_cmd.value);
+		ipa_reg_save.ipa.dbg.ipa_hps_dps_cmdq_data_rd_0_arr[i].value =
+			IPA_READ_SCALER_REG(IPA_HPS_DPS_CMDQ_DATA_RD_0);
+	}
+
+	/* Save DPS_TX CMDQ   */
+	for (i = 0; i < IPA_DEBUG_CMDQ_DPS_SELECT_NUM_GROUPS; i++) {
+		dps_tx_cmdq_cmd.def.cmd_client = i;
+		dps_tx_cmdq_cmd.def.rd_req = 0;
+		IPA_MASKED_WRITE_SCALER_REG(IPA_DPS_TX_CMDQ_CMD,
+				     dps_tx_cmdq_cmd.value);
+		ipa_reg_save.ipa.dbg.ipa_dps_tx_cmdq_status_arr[i].value =
+			IPA_READ_SCALER_REG(IPA_DPS_TX_CMDQ_STATUS);
+		ipa_reg_save.ipa.dbg.ipa_dps_tx_cmdq_count_arr[i].value =
+			IPA_READ_SCALER_REG(IPA_DPS_TX_CMDQ_COUNT);
+
+		dps_tx_cmdq_cmd.def.cmd_client = i;
+		dps_tx_cmdq_cmd.def.rd_req = 1;
+		IPA_MASKED_WRITE_SCALER_REG(IPA_DPS_TX_CMDQ_CMD,
+				     dps_tx_cmdq_cmd.value);
+		ipa_reg_save.ipa.dbg.ipa_dps_tx_cmdq_data_rd_0_arr[i].value =
+			IPA_READ_SCALER_REG(IPA_DPS_TX_CMDQ_DATA_RD_0);
+	}
+
+	/* Save ACKMNGR CMDQ   */
+	for (i = 0; i < IPA_DEBUG_CMDQ_DPS_SELECT_NUM_GROUPS; i++) {
+		ackmngr_cmdq_cmd.def.rd_req = 0;
+		ackmngr_cmdq_cmd.def.cmd_client = i;
+		IPA_MASKED_WRITE_SCALER_REG(IPA_ACKMNGR_CMDQ_CMD,
+				     ackmngr_cmdq_cmd.value);
+		ipa_reg_save.ipa.dbg.ipa_ackmngr_cmdq_status_arr[i].value =
+			IPA_READ_SCALER_REG(IPA_ACKMNGR_CMDQ_STATUS);
+		ipa_reg_save.ipa.dbg.ipa_ackmngr_cmdq_count_arr[i].value =
+			IPA_READ_SCALER_REG(IPA_ACKMNGR_CMDQ_COUNT);
+
+		ackmngr_cmdq_cmd.def.rd_req = 1;
+		ackmngr_cmdq_cmd.def.cmd_client = i;
+		IPA_MASKED_WRITE_SCALER_REG(IPA_ACKMNGR_CMDQ_CMD,
+				     ackmngr_cmdq_cmd.value);
+		ipa_reg_save.ipa.dbg.ipa_ackmngr_cmdq_data_rd_arr[i].value =
+			IPA_READ_SCALER_REG(IPA_ACKMNGR_CMDQ_DATA_RD);
+	}
+
+	/* Save PROD ACKMNGR CMDQ   */
+	for (i = 0; i < IPA_TESTBUS_SEL_EP_MAX + 1; i++) {
+		prod_ackmngr_cmdq_cmd.def.rd_req = 0;
+		prod_ackmngr_cmdq_cmd.def.cmd_client = i;
+		IPA_MASKED_WRITE_SCALER_REG(IPA_PROD_ACKMNGR_CMDQ_CMD,
+				     prod_ackmngr_cmdq_cmd.value);
+		ipa_reg_save.ipa.dbg.ipa_prod_ackmngr_cmdq_status_arr[i].value
+			= IPA_READ_SCALER_REG(
+				IPA_PROD_ACKMNGR_CMDQ_STATUS);
+		ipa_reg_save.ipa.dbg.ipa_prod_ackmngr_cmdq_count_arr[i].value =
+			IPA_READ_SCALER_REG(IPA_PROD_ACKMNGR_CMDQ_COUNT);
+		prod_ackmngr_cmdq_cmd.def.rd_req = 1;
+		prod_ackmngr_cmdq_cmd.def.cmd_client = i;
+		IPA_MASKED_WRITE_SCALER_REG(IPA_PROD_ACKMNGR_CMDQ_CMD,
+				     prod_ackmngr_cmdq_cmd.value);
+		ipa_reg_save.ipa.dbg.ipa_prod_ackmngr_cmdq_data_rd_arr[
+			i].value =
+			IPA_READ_SCALER_REG(
+				IPA_PROD_ACKMNGR_CMDQ_DATA_RD);
+	}
+
+	/* Save NTF_TX CMDQ   */
+	for (i = 0; i < IPA_TESTBUS_SEL_EP_MAX + 1; i++) {
+		ntf_tx_cmdq_cmd.def.rd_req = 0;
+		ntf_tx_cmdq_cmd.def.cmd_client = i;
+		IPA_MASKED_WRITE_SCALER_REG(IPA_NTF_TX_CMDQ_CMD,
+				     ntf_tx_cmdq_cmd.value);
+		ipa_reg_save.ipa.dbg.ipa_ntf_tx_cmdq_status_arr[i].value =
+			IPA_READ_SCALER_REG(IPA_NTF_TX_CMDQ_STATUS);
+		ipa_reg_save.ipa.dbg.ipa_ntf_tx_cmdq_count_arr[i].value =
+			IPA_READ_SCALER_REG(IPA_NTF_TX_CMDQ_COUNT);
+		ntf_tx_cmdq_cmd.def.rd_req = 1;
+		ntf_tx_cmdq_cmd.def.cmd_client = i;
+		IPA_MASKED_WRITE_SCALER_REG(IPA_NTF_TX_CMDQ_CMD,
+				     ntf_tx_cmdq_cmd.value);
+		ipa_reg_save.ipa.dbg.ipa_ntf_tx_cmdq_data_rd_0_arr[i].value =
+			IPA_READ_SCALER_REG(IPA_NTF_TX_CMDQ_DATA_RD_0);
+	}
+}
+
+/*
+ * FUNCTION:  ipa_hal_save_regs_save_ipa_testbus
+ *
+ * This function saves the IPA testbus
+ *
+ * @param
+ *
+ * @return
+ */
+static void ipa_hal_save_regs_save_ipa_testbus(void)
+{
+	s32 sel_internal, sel_external, sel_ep;
+	union ipa_hwio_def_ipa_testbus_sel_u testbus_sel = { { 0 } };
+
+	if (ipa_reg_save.ipa.testbus == NULL) {
+		/*
+		 * Test-bus structure not allocated - exit test-bus collection
+		 */
+		IPADBG("ipa_reg_save.ipa.testbus was not allocated\n");
+		return;
+	}
+
+	/* Enable Test-bus */
+	testbus_sel.value = 0;
+	testbus_sel.def.testbus_en = true;
+
+	IPA_WRITE_SCALER_REG(IPA_TESTBUS_SEL, testbus_sel.value);
+
+	for (sel_external = 0;
+		 sel_external <= IPA_TESTBUS_SEL_EXTERNAL_MAX;
+		 sel_external++) {
+
+		for (sel_internal = 0;
+			 sel_internal <= IPA_TESTBUS_SEL_INTERNAL_MAX;
+			 sel_internal++) {
+
+			testbus_sel.value = 0;
+
+			testbus_sel.def.pipe_select = 0;
+			testbus_sel.def.external_block_select =
+				sel_external;
+			testbus_sel.def.internal_block_select =
+				sel_internal;
+
+			IPA_MASKED_WRITE_SCALER_REG(
+				IPA_TESTBUS_SEL,
+				testbus_sel.value);
+
+			ipa_reg_save.ipa.testbus->global.global[
+				sel_internal][sel_external].testbus_sel.value =
+				testbus_sel.value;
+
+			ipa_reg_save.ipa.testbus->global.global[
+				sel_internal][sel_external].testbus_data.value =
+				IPA_READ_SCALER_REG(IPA_DEBUG_DATA);
+		}
+	}
+
+	/* Collect per EP test bus */
+	for (sel_ep = 0;
+		 sel_ep <= IPA_TESTBUS_SEL_EP_MAX;
+		 sel_ep++) {
+
+		for (sel_external = 0;
+			 sel_external <=
+				 IPA_TESTBUS_SEL_EXTERNAL_MAX;
+			 sel_external++) {
+
+			for (sel_internal = 0;
+				 sel_internal <=
+					 IPA_TESTBUS_SEL_INTERNAL_PIPE_MAX;
+				 sel_internal++) {
+
+				testbus_sel.value = 0;
+
+				testbus_sel.def.pipe_select = sel_ep;
+				testbus_sel.def.external_block_select =
+					sel_external;
+				testbus_sel.def.internal_block_select =
+					sel_internal;
+
+				IPA_MASKED_WRITE_SCALER_REG(
+					IPA_TESTBUS_SEL,
+					testbus_sel.value);
+
+				ipa_reg_save.ipa.testbus->ep[sel_ep].entry_ep[
+					sel_internal][sel_external].
+					testbus_sel.value =
+					testbus_sel.value;
+
+				ipa_reg_save.ipa.testbus->ep[sel_ep].entry_ep[
+					sel_internal][sel_external].
+					testbus_data.value =
+					IPA_READ_SCALER_REG(
+						IPA_DEBUG_DATA);
+			}
+		}
+	}
+
+	/* Disable Test-bus */
+	testbus_sel.value = 0;
+
+	IPA_WRITE_SCALER_REG(
+		IPA_TESTBUS_SEL,
+		testbus_sel.value);
+}
+
+/*
+ * FUNCTION:  ipa_reg_save_init
+ *
+ * This function initializes and memsets the register save struct.
+ *
+ * @param
+ *
+ * @return
+ */
+int ipa_reg_save_init(u32 value)
+{
+	u32 i, num_regs = ARRAY_SIZE(ipa_regs_to_save_array);
+
+	if (!ipa3_ctx->do_register_collection_on_crash)
+		return 0;
+
+	memset(&ipa_reg_save, value, sizeof(ipa_reg_save));
+
+	ipa_reg_save.ipa.testbus = NULL;
+
+	if (ipa3_ctx->do_testbus_collection_on_crash) {
+		memset(ipa_testbus_mem, value, sizeof(ipa_testbus_mem));
+		ipa_reg_save.ipa.testbus =
+		    (struct ipa_reg_save_ipa_testbus_s *) ipa_testbus_mem;
+	}
+
+	/* setup access for register collection/dump on crash */
+	IPADBG("Mapping 0x%x bytes starting at 0x%x\n",
+	       ipa3_ctx->entire_ipa_block_size,
+	       ipa3_ctx->ipa_wrapper_base);
+
+	ipa3_ctx->reg_collection_base =
+		ioremap_nocache(ipa3_ctx->ipa_wrapper_base,
+			ipa3_ctx->entire_ipa_block_size);
+
+	if (!ipa3_ctx->reg_collection_base) {
+		IPAERR(":register collection ioremap err\n");
+		goto alloc_fail1;
+	}
+
+	num_regs -=
+		(CONFIG_IPA3_REGDUMP_NUM_EXTRA_ENDP_REGS *
+		 IPA_REG_SAVE_NUM_EXTRA_ENDP_REGS);
+
+	for (i = 0;
+		 i < (CONFIG_IPA3_REGDUMP_NUM_EXTRA_ENDP_REGS *
+			  IPA_REG_SAVE_NUM_EXTRA_ENDP_REGS);
+		 i++)
+		*(ipa_regs_to_save_array[num_regs + i].dst_addr) = 0x0;
+
+	ipa_reg_save.ipa.ipa_gsi_ptr  = NULL;
+	ipa_reg_save.ipa.ipa_seq_ptr  = NULL;
+	ipa_reg_save.ipa.ipa_hram_ptr = NULL;
+	ipa_reg_save.ipa.ipa_mbox_ptr = NULL;
+	ipa_reg_save.ipa.ipa_sram_ptr = NULL;
+	ipa_reg_save.ipa.ipa_iu_ptr   = NULL;
+
+	if (ipa3_ctx->do_ram_collection_on_crash) {
+		ipa_reg_save.ipa.ipa_iu_ptr =
+			alloc_and_init(IPA_IU_SIZE, value);
+		if (!ipa_reg_save.ipa.ipa_iu_ptr) {
+			IPAERR("ipa_iu_ptr memory alloc failed\n");
+			goto alloc_fail2;
+		}
+
+		ipa_reg_save.ipa.ipa_sram_ptr =
+			alloc_and_init(IPA_SRAM_SIZE, value);
+		if (!ipa_reg_save.ipa.ipa_sram_ptr) {
+			IPAERR("ipa_sram_ptr memory alloc failed\n");
+			goto alloc_fail2;
+		}
+
+		ipa_reg_save.ipa.ipa_mbox_ptr =
+			alloc_and_init(IPA_MBOX_SIZE, value);
+		if (!ipa_reg_save.ipa.ipa_mbox_ptr) {
+			IPAERR("ipa_mbox_ptr memory alloc failed\n");
+			goto alloc_fail2;
+		}
+
+		ipa_reg_save.ipa.ipa_hram_ptr =
+			alloc_and_init(IPA_HRAM_SIZE, value);
+		if (!ipa_reg_save.ipa.ipa_hram_ptr) {
+			IPAERR("ipa_hram_ptr memory alloc failed\n");
+			goto alloc_fail2;
+		}
+
+		ipa_reg_save.ipa.ipa_seq_ptr =
+			alloc_and_init(IPA_SEQ_SIZE, value);
+		if (!ipa_reg_save.ipa.ipa_seq_ptr) {
+			IPAERR("ipa_seq_ptr memory alloc failed\n");
+			goto alloc_fail2;
+		}
+
+		ipa_reg_save.ipa.ipa_gsi_ptr =
+			alloc_and_init(IPA_GSI_SIZE, value);
+		if (!ipa_reg_save.ipa.ipa_gsi_ptr) {
+			IPAERR("ipa_gsi_ptr memory alloc failed\n");
+			goto alloc_fail2;
+		}
+	}
+
+	return 0;
+
+alloc_fail2:
+	kfree(ipa_reg_save.ipa.ipa_seq_ptr);
+	kfree(ipa_reg_save.ipa.ipa_hram_ptr);
+	kfree(ipa_reg_save.ipa.ipa_mbox_ptr);
+	kfree(ipa_reg_save.ipa.ipa_sram_ptr);
+	kfree(ipa_reg_save.ipa.ipa_iu_ptr);
+	iounmap(ipa3_ctx->reg_collection_base);
+alloc_fail1:
+	return -ENOMEM;
+}
+
+/*
+ * FUNCTION:  ipa_hal_save_regs_rsrc_db
+ *
+ * This function saves the various IPA RSRC_MNGR_DB registers
+ *
+ * @param
+ *
+ * @return
+ */
+static void ipa_hal_save_regs_rsrc_db(void)
+{
+	u32 rsrc_type = 0;
+	u32 rsrc_id = 0;
+	u32 rsrc_group = 0;
+	union ipa_hwio_def_ipa_rsrc_mngr_db_cfg_u
+		ipa_rsrc_mngr_db_cfg = { { 0 } };
+
+	ipa_rsrc_mngr_db_cfg.def.rsrc_grp_sel = rsrc_group;
+
+	for (rsrc_type = 0; rsrc_type <= IPA_RSCR_MNGR_DB_RSRC_TYPE_MAX;
+	     rsrc_type++) {
+		for (rsrc_id = 0; rsrc_id <= IPA_RSCR_MNGR_DB_RSRC_ID_MAX;
+		     rsrc_id++) {
+			ipa_rsrc_mngr_db_cfg.def.rsrc_id_sel = rsrc_id;
+			ipa_rsrc_mngr_db_cfg.def.rsrc_type_sel = rsrc_type;
+			IPA_MASKED_WRITE_SCALER_REG(IPA_RSRC_MNGR_DB_CFG,
+					     ipa_rsrc_mngr_db_cfg.value);
+			ipa_reg_save.ipa.dbg.ipa_rsrc_mngr_db_rsrc_read_arr
+			    [rsrc_type][rsrc_id].value =
+			    IPA_READ_SCALER_REG(
+					IPA_RSRC_MNGR_DB_RSRC_READ);
+			ipa_reg_save.ipa.dbg.ipa_rsrc_mngr_db_list_read_arr
+			    [rsrc_type][rsrc_id].value =
+			    IPA_READ_SCALER_REG(
+					IPA_RSRC_MNGR_DB_LIST_READ);
+		}
+	}
+}
+
+/*
+ * FUNCTION:  ipa_reg_save_anomaly_check
+ *
+ * Checks RX state and TX state upon crash dump collection and prints
+ * anomalies.
+ *
+ * TBD- Add more anomaly checks in the future.
+ *
+ * @return
+ */
+static void ipa_reg_save_anomaly_check(void)
+{
+	if ((ipa_reg_save.ipa.gen.ipa_state.rx_wait != 0)
+	    || (ipa_reg_save.ipa.gen.ipa_state.rx_idle != 1)) {
+		IPADBG(
+		    "RX ACTIVITY, ipa_state.rx_wait = %d, ipa_state.rx_idle = %d, ipa_state_rx_active.endpoints = %d (bitmask)\n",
+		    ipa_reg_save.ipa.gen.ipa_state.rx_wait,
+		    ipa_reg_save.ipa.gen.ipa_state.rx_idle,
+		    ipa_reg_save.ipa.gen.ipa_state_rx_active.endpoints);
+
+		if (ipa_reg_save.ipa.gen.ipa_state.tx_idle != 1) {
+			IPADBG(
+			    "TX ACTIVITY, ipa_state.idle = %d, ipa_state_tx_wrapper.tx0_idle = %d, ipa_state_tx_wrapper.tx1_idle = %d\n",
+			    ipa_reg_save.ipa.gen.ipa_state.tx_idle,
+			    ipa_reg_save.ipa.gen.ipa_state_tx_wrapper.tx0_idle,
+			    ipa_reg_save.ipa.gen.ipa_state_tx_wrapper.tx1_idle);
+
+			IPADBG(
+			    "ipa_state_tx0.last_cmd_pipe = %d, ipa_state_tx1.last_cmd_pipe = %d\n",
+			    ipa_reg_save.ipa.gen.ipa_state_tx0.last_cmd_pipe,
+			    ipa_reg_save.ipa.gen.ipa_state_tx1.last_cmd_pipe);
+		}
+	}
+}

+ 1397 - 0
ipa/ipa_v3/dump/ipa_reg_dump.h

@@ -0,0 +1,1397 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2019, The Linux Foundation. All rights reserved.
+ */
+#if !defined(_IPA_REG_DUMP_H_)
+#define _IPA_REG_DUMP_H_
+
+#include <linux/types.h>
+#include <linux/string.h>
+
+#include "../ipa_i.h"
+
+#include "ipa_pkt_cntxt.h"
+#include "ipa_hw_common_ex.h"
+
+#define IPA_0_IPA_WRAPPER_BASE 0 /* required by following includes */
+
+#include "ipa_hwio.h"
+#include "gsi_hwio.h"
+#include "ipa_gcc_hwio.h"
+
+#include "ipa_hwio_def.h"
+#include "gsi_hwio_def.h"
+#include "ipa_gcc_hwio_def.h"
+
+#define IPA_DEBUG_CMDQ_DPS_SELECT_NUM_GROUPS     0x6
+#define IPA_DEBUG_CMDQ_HPS_SELECT_NUM_GROUPS     0x4
+#define IPA_DEBUG_TESTBUS_RSRC_NUM_EP            7
+#define IPA_DEBUG_TESTBUS_RSRC_NUM_GRP           3
+#define IPA_TESTBUS_SEL_EP_MAX                   0x1F
+#define IPA_TESTBUS_SEL_EXTERNAL_MAX             0x40
+#define IPA_TESTBUS_SEL_INTERNAL_MAX             0xFF
+#define IPA_TESTBUS_SEL_INTERNAL_PIPE_MAX        0x40
+#define IPA_DEBUG_CMDQ_ACK_SELECT_NUM_GROUPS     0x9
+#define IPA_RSCR_MNGR_DB_RSRC_ID_MAX             0x3F
+#define IPA_RSCR_MNGR_DB_RSRC_TYPE_MAX           0xA
+
+#define HWIO_GSI_DEBUG_TEST_BUS_SELECTOR_ZEROS   (0x0)
+#define HWIO_GSI_DEBUG_TEST_BUS_SELECTOR_MCS_0   (0x1)
+#define HWIO_GSI_DEBUG_TEST_BUS_SELECTOR_MCS_1   (0x2)
+#define HWIO_GSI_DEBUG_TEST_BUS_SELECTOR_MCS_2   (0x3)
+#define HWIO_GSI_DEBUG_TEST_BUS_SELECTOR_MCS_3   (0x4)
+#define HWIO_GSI_DEBUG_TEST_BUS_SELECTOR_MCS_4   (0x5)
+#define HWIO_GSI_DEBUG_TEST_BUS_SELECTOR_DB_ENG  (0x9)
+#define HWIO_GSI_DEBUG_TEST_BUS_SELECTOR_REE_0   (0xB)
+#define HWIO_GSI_DEBUG_TEST_BUS_SELECTOR_REE_1   (0xC)
+#define HWIO_GSI_DEBUG_TEST_BUS_SELECTOR_REE_2   (0xD)
+#define HWIO_GSI_DEBUG_TEST_BUS_SELECTOR_REE_3   (0xE)
+#define HWIO_GSI_DEBUG_TEST_BUS_SELECTOR_REE_4   (0xF)
+#define HWIO_GSI_DEBUG_TEST_BUS_SELECTOR_REE_5   (0x10)
+#define HWIO_GSI_DEBUG_TEST_BUS_SELECTOR_REE_6   (0x11)
+#define HWIO_GSI_DEBUG_TEST_BUS_SELECTOR_REE_7   (0x12)
+#define HWIO_GSI_DEBUG_TEST_BUS_SELECTOR_EVE_0   (0x13)
+#define HWIO_GSI_DEBUG_TEST_BUS_SELECTOR_EVE_1   (0x14)
+#define HWIO_GSI_DEBUG_TEST_BUS_SELECTOR_EVE_2   (0x15)
+#define HWIO_GSI_DEBUG_TEST_BUS_SELECTOR_EVE_3   (0x16)
+#define HWIO_GSI_DEBUG_TEST_BUS_SELECTOR_EVE_4   (0x17)
+#define HWIO_GSI_DEBUG_TEST_BUS_SELECTOR_EVE_5   (0x18)
+#define HWIO_GSI_DEBUG_TEST_BUS_SELECTOR_IE_0    (0x1B)
+#define HWIO_GSI_DEBUG_TEST_BUS_SELECTOR_IE_1    (0x1C)
+#define HWIO_GSI_DEBUG_TEST_BUS_SELECTOR_IC_0    (0x1F)
+#define HWIO_GSI_DEBUG_TEST_BUS_SELECTOR_IC_1    (0x20)
+#define HWIO_GSI_DEBUG_TEST_BUS_SELECTOR_IC_2    (0x21)
+#define HWIO_GSI_DEBUG_TEST_BUS_SELECTOR_IC_3    (0x22)
+#define HWIO_GSI_DEBUG_TEST_BUS_SELECTOR_IC_4    (0x23)
+#define HWIO_GSI_DEBUG_TEST_BUS_SELECTOR_MOQA_0  (0x27)
+#define HWIO_GSI_DEBUG_TEST_BUS_SELECTOR_MOQA_1  (0x28)
+#define HWIO_GSI_DEBUG_TEST_BUS_SELECTOR_MOQA_2  (0x29)
+#define HWIO_GSI_DEBUG_TEST_BUS_SELECTOR_MOQA_3  (0x2A)
+#define HWIO_GSI_DEBUG_TEST_BUS_SELECTOR_TMR_0   (0x2B)
+#define HWIO_GSI_DEBUG_TEST_BUS_SELECTOR_TMR_1   (0x2C)
+#define HWIO_GSI_DEBUG_TEST_BUS_SELECTOR_TMR_2   (0x2D)
+#define HWIO_GSI_DEBUG_TEST_BUS_SELECTOR_TMR_3   (0x2E)
+#define HWIO_GSI_DEBUG_TEST_BUS_SELECTOR_RD_WR_0 (0x33)
+#define HWIO_GSI_DEBUG_TEST_BUS_SELECTOR_RD_WR_1 (0x34)
+#define HWIO_GSI_DEBUG_TEST_BUS_SELECTOR_RD_WR_2 (0x35)
+#define HWIO_GSI_DEBUG_TEST_BUS_SELECTOR_RD_WR_3 (0x36)
+#define HWIO_GSI_DEBUG_TEST_BUS_SELECTOR_CSR     (0x3A)
+#define HWIO_GSI_DEBUG_TEST_BUS_SELECTOR_SDMA_0  (0x3C)
+#define HWIO_GSI_DEBUG_TEST_BUS_SELECTOR_SDMA_1  (0x3D)
+#define HWIO_GSI_DEBUG_TEST_BUS_SELECTOR_IE_2    (0x1D)
+#define HWIO_GSI_DEBUG_TEST_BUS_SELECTOR_CSR_1   (0x3E)
+#define HWIO_GSI_DEBUG_TEST_BUS_SELECTOR_CSR_2   (0x3F)
+#define HWIO_GSI_DEBUG_TEST_BUS_SELECTOR_MCS_5   (0x40)
+#define HWIO_GSI_DEBUG_TEST_BUS_SELECTOR_IC_5    (0x41)
+#define HWIO_GSI_DEBUG_TEST_BUS_SELECTOR_CSR_3   (0x42)
+#define HWIO_GSI_DEBUG_TEST_BUS_SELECTOR_TLV_0   (0x43)
+#define HWIO_GSI_DEBUG_TEST_BUS_SELECTOR_REE_8   (0x44)
+
+#define IPA_DEBUG_TESTBUS_DEF_EXTERNAL           50
+#define IPA_DEBUG_TESTBUS_DEF_INTERNAL           6
+
+#define IPA_REG_SAVE_GSI_NUM_EE                  3
+
+#define IPA_REG_SAVE_NUM_EXTRA_ENDP_REGS         22
+
+#define IPA_GSI_OFFSET_WORDS_SCRATCH4            6
+#define IPA_GSI_OFFSET_WORDS_SCRATCH5            7
+
+#define IPA_DEBUG_TESTBUS_RSRC_TYPE_CNT_BIT_MASK 0x7E000
+#define IPA_DEBUG_TESTBUS_RSRC_TYPE_CNT_SHIFT    13
+
+#define IPA_REG_SAVE_HWP_GSI_EE                  2
+
+/*
+ * A structure used to map a source address to destination address...
+ */
+struct map_src_dst_addr_s {
+	u32  src_addr; /* register offset to copy value from */
+	u32 *dst_addr; /* memory address to copy register value to */
+};
+
+/*
+ * A macro to generate the names of scaler (ie. non-vector) registers
+ * that reside in the *hwio.h files (said files contain the manifest
+ * constants for the registers' offsets in the register memory map).
+ */
+#define GEN_SCALER_REG_OFST(reg_name) \
+	(HWIO_ ## reg_name ## _ADDR)
+/*
+ * A macro designed to generate the rmsk associated with reg_name
+ */
+#define GEN_SCALER_REG_RMSK(reg_name) \
+	(HWIO_ ## reg_name ## _RMSK)
+
+/*
+ * A macro to generate the names of vector registers that reside in
+ * the *hwio.h files (said files contain the manifest constants for
+ * the registers' offsets in the register memory map). More
+ * specifically, this macro will generate access to registers that are
+ * addressed via one dimension.
+ */
+#define GEN_1xVECTOR_REG_OFST(reg_name, row) \
+	(HWIO_ ## reg_name ## _ADDR(row))
+
+/*
+ * A macro to generate the names of vector registers that reside in
+ * the *hwio.h files (said files contain the manifest constants for
+ * the registers' offsets in the register memory map). More
+ * specifically, this macro will generate access to registers that are
+ * addressed via two dimensions.
+ */
+#define GEN_2xVECTOR_REG_OFST(reg_name, row, col) \
+	(HWIO_ ## reg_name ## _ADDR(row, col))
+
+/*
+ * A macro to generate the access to scaler registers that reside in
+ * the *hwio.h files (said files contain the manifest constants for
+ * the registers' offsets in the register memory map). More
+ * specifically, this macro will generate read access from a scaler
+ * register..
+ */
+#define IPA_READ_SCALER_REG(reg_name) \
+	HWIO_ ## reg_name ## _IN
+
+/*
+ * A macro to generate the access to vector registers that reside in
+ * the *hwio.h files (said files contain the manifest constants for
+ * the registers' offsets in the register memory map). More
+ * specifically, this macro will generate read access from a one
+ * dimensional vector register...
+ */
+#define IPA_READ_1xVECTOR_REG(reg_name, row) \
+	HWIO_ ## reg_name ## _INI(row)
+
+/*
+ * A macro to generate the access to vector registers that reside in
+ * the *hwio.h files (said files contain the manifest constants for
+ * the registers' offsets in the register memory map). More
+ * specifically, this macro will generate read access from a two
+ * dimensional vector register...
+ */
+#define IPA_READ_2xVECTOR_REG(reg_name, row, col) \
+	HWIO_ ## reg_name ## _INI2(row, col)
+
+/*
+ * A macro to generate the access to scaler registers that reside in
+ * the *hwio.h files (said files contain the manifest constants for
+ * the registers' offsets in the register memory map). More
+ * specifically, this macro will generate write access to a scaler
+ * register..
+ */
+#define IPA_WRITE_SCALER_REG(reg_name, val) \
+	HWIO_ ## reg_name ## _OUT(val)
+
+/*
+ * Similar to the above, but with val masked by the register's rmsk...
+ */
+#define IPA_MASKED_WRITE_SCALER_REG(reg_name, val) \
+	out_dword(GEN_SCALER_REG_OFST(reg_name), \
+			  (GEN_SCALER_REG_RMSK(reg_name) & val))
+
+/*
+ * A macro to generate the access to vector registers that reside in
+ * the *hwio.h files (said files contain the manifest constants for
+ * the registers' offsets in the register memory map). More
+ * specifically, this macro will generate write access to a one
+ * dimensional vector register...
+ */
+#define IPA_WRITE_1xVECTOR_REG(reg_name, row, val) \
+	HWIO_ ## reg_name ## _OUTI(row, val)
+
+/*
+ * A macro to generate the access to vector registers that reside in
+ * the *hwio.h files (said files contain the manifest constants for
+ * the registers' offsets in the register memory map). More
+ * specifically, this macro will generate write access to a two
+ * dimensional vector register...
+ */
+#define IPA_WRITE_2xVECTOR_REG(reg_name, row, col, val) \
+	HWIO_ ## reg_name ## _OUTI2(row, col, val)
+
+/*
+ * Macro that helps generate a mapping between a register's address
+ * and where the register's value will get stored (ie. source and
+ * destination address mapping) upon dump...
+ */
+#define GEN_SRC_DST_ADDR_MAP(reg_name, sub_struct, field_name) \
+	{ GEN_SCALER_REG_OFST(reg_name), \
+	  (u32 *)&ipa_reg_save.sub_struct.field_name }
+
+/*
+ * Macro to get value of bits 18:13, used tp get rsrc cnts from
+ * IPA_DEBUG_DATA
+ */
+#define IPA_DEBUG_TESTBUS_DATA_GET_RSRC_CNT_BITS_FROM_DEBUG_DATA(x) \
+	((x & IPA_DEBUG_TESTBUS_RSRC_TYPE_CNT_BIT_MASK) >> \
+	 IPA_DEBUG_TESTBUS_RSRC_TYPE_CNT_SHIFT)
+
+/*
+ * Macro to get rsrc cnt of specific rsrc type and rsrc grp from test
+ * bus collected data
+ */
+#define IPA_DEBUG_TESTBUS_GET_RSRC_TYPE_CNT(rsrc_type, rsrc_grp) \
+	IPA_DEBUG_TESTBUS_DATA_GET_RSRC_CNT_BITS_FROM_DEBUG_DATA( \
+		ipa_reg_save.ipa.testbus->ep_rsrc[rsrc_type].entry_ep \
+		[rsrc_grp].testbus_data.value)
+
+/*
+ * Macro to pluck the gsi version from ram.
+ */
+#define IPA_REG_SAVE_GSI_VER(reg_name, var_name)	\
+	{ GEN_1xVECTOR_REG_OFST(reg_name, 0), \
+		(u32 *)&ipa_reg_save.gsi.gen.var_name }
+/*
+ * Macro to define a particular register cfg entry for all 3 EE
+ * indexed register
+ */
+#define IPA_REG_SAVE_CFG_ENTRY_GEN_EE(reg_name, var_name) \
+	({ GEN_1xVECTOR_REG_OFST(reg_name, IPA_HW_Q6_EE), \
+		(u32 *)&ipa_reg_save.ipa.gen_ee[IPA_HW_Q6_EE].var_name }, \
+	{ GEN_1xVECTOR_REG_OFST(reg_name, IPA_HW_A7_EE), \
+		(u32 *)&ipa_reg_save.ipa.gen_ee[IPA_HW_A7_EE].var_name }, \
+	{ GEN_1xVECTOR_REG_OFST(reg_name, IPA_HW_HWP_EE), \
+		(u32 *)&ipa_reg_save.ipa.gen_ee[IPA_HW_HWP_EE].var_name })
+
+#define IPA_REG_SAVE_CFG_ENTRY_GSI_FIFO(reg_name, var_name, index) \
+	{ GEN_SCALER_REG_OFST(reg_name), \
+		(u32 *)&ipa_reg_save.ipa.gsi_fifo_status[index].var_name }
+
+/*
+ * Macro to define a particular register cfg entry for all pipe
+ * indexed register
+ */
+#define IPA_REG_SAVE_CFG_ENTRY_PIPE_ENDP_EXTRA(reg_name, var_name) \
+	{ 0, 0 }
+
+/*
+ * Macro to define a particular register cfg entry for all resource
+ * group register
+ */
+#define IPA_REG_SAVE_CFG_ENTRY_SRC_RSRC_GRP(reg_name, var_name) \
+	({ GEN_1xVECTOR_REG_OFST(reg_name, 0), \
+		(u32 *)&ipa_reg_save.ipa.src_rsrc_grp[0].var_name }, \
+	{ GEN_1xVECTOR_REG_OFST(reg_name, 1), \
+		(u32 *)&ipa_reg_save.ipa.src_rsrc_grp[1].var_name }, \
+	{ GEN_1xVECTOR_REG_OFST(reg_name, 2), \
+		(u32 *)&ipa_reg_save.ipa.src_rsrc_grp[2].var_name }, \
+	{ GEN_1xVECTOR_REG_OFST(reg_name, 3), \
+		(u32 *)&ipa_reg_save.ipa.src_rsrc_grp[3].var_name }, \
+	{ GEN_1xVECTOR_REG_OFST(reg_name, 4), \
+		(u32 *)&ipa_reg_save.ipa.src_rsrc_grp[4].var_name })
+
+/*
+ * Macro to define a particular register cfg entry for all resource
+ * group register
+ */
+#define IPA_REG_SAVE_CFG_ENTRY_DST_RSRC_GRP(reg_name, var_name) \
+	({ GEN_1xVECTOR_REG_OFST(reg_name, 0), \
+		(u32 *)&ipa_reg_save.ipa.dst_rsrc_grp[0].var_name }, \
+	{ GEN_1xVECTOR_REG_OFST(reg_name, 1), \
+		(u32 *)&ipa_reg_save.ipa.dst_rsrc_grp[1].var_name })
+
+/*
+ * Macro to define a particular register cfg entry for all source
+ * resource group count register
+ */
+#define IPA_REG_SAVE_CFG_ENTRY_SRC_RSRC_CNT_GRP(reg_name, var_name) \
+	({ GEN_1xVECTOR_REG_OFST(reg_name, 0), \
+		(u32 *)&ipa_reg_save.ipa.src_rsrc_cnt[0].var_name }, \
+	{ GEN_1xVECTOR_REG_OFST(reg_name, 1), \
+		(u32 *)&ipa_reg_save.ipa.src_rsrc_cnt[1].var_name }, \
+	{ GEN_1xVECTOR_REG_OFST(reg_name, 2), \
+		(u32 *)&ipa_reg_save.ipa.src_rsrc_cnt[2].var_name }, \
+	{ GEN_1xVECTOR_REG_OFST(reg_name, 3), \
+		(u32 *)&ipa_reg_save.ipa.src_rsrc_cnt[3].var_name }, \
+	{ GEN_1xVECTOR_REG_OFST(reg_name, 4), \
+		(u32 *)&ipa_reg_save.ipa.src_rsrc_cnt[4].var_name })
+
+/*
+ * Macro to define a particular register cfg entry for all dest
+ * resource group count register
+ */
+#define IPA_REG_SAVE_CFG_ENTRY_DST_RSRC_CNT_GRP(reg_name, var_name) \
+	({ GEN_1xVECTOR_REG_OFST(reg_name, 0), \
+		(u32 *)&ipa_reg_save.ipa.dst_rsrc_cnt[0].var_name }, \
+	{ GEN_1xVECTOR_REG_OFST(reg_name, 1), \
+		(u32 *)&ipa_reg_save.ipa.dst_rsrc_cnt[1].var_name })
+
+#define IPA_REG_SAVE_CFG_ENTRY_GSI_GENERAL_EE(reg_name, var_name) \
+	({ GEN_1xVECTOR_REG_OFST(reg_name, IPA_HW_A7_EE), \
+		(u32 *)&ipa_reg_save.gsi.gen_ee[IPA_HW_A7_EE].var_name }, \
+	{ GEN_1xVECTOR_REG_OFST(reg_name, IPA_HW_Q6_EE), \
+		(u32 *)&ipa_reg_save.gsi.gen_ee[IPA_HW_Q6_EE].var_name }, \
+	{ GEN_1xVECTOR_REG_OFST(reg_name, IPA_REG_SAVE_HWP_GSI_EE), \
+		(u32 *)&ipa_reg_save.gsi.gen_ee[IPA_REG_SAVE_HWP_GSI_EE].\
+			var_name })
+
+/*
+ * Macro to define a particular register cfg entry for all GSI EE
+ * register
+ */
+#define IPA_REG_SAVE_CFG_ENTRY_GSI_CH_CNTXT(reg_name, var_name) \
+	({ GEN_2xVECTOR_REG_OFST(reg_name, IPA_HW_A7_EE, 0), \
+		(u32 *)&ipa_reg_save.gsi.ch_cntxt.a7[0].var_name }, \
+	{ GEN_2xVECTOR_REG_OFST(reg_name, IPA_HW_A7_EE, 1), \
+		(u32 *)&ipa_reg_save.gsi.ch_cntxt.a7[1].var_name }, \
+	{ GEN_2xVECTOR_REG_OFST(reg_name, IPA_HW_A7_EE, 2), \
+		(u32 *)&ipa_reg_save.gsi.ch_cntxt.a7[2].var_name }, \
+	{ GEN_2xVECTOR_REG_OFST(reg_name, IPA_HW_A7_EE, 3), \
+		(u32 *)&ipa_reg_save.gsi.ch_cntxt.a7[3].var_name }, \
+	{ GEN_2xVECTOR_REG_OFST(reg_name, IPA_HW_A7_EE, 4), \
+		(u32 *)&ipa_reg_save.gsi.ch_cntxt.a7[4].var_name }, \
+	{ GEN_2xVECTOR_REG_OFST(reg_name, IPA_HW_A7_EE, 5), \
+		(u32 *)&ipa_reg_save.gsi.ch_cntxt.a7[5].var_name }, \
+	{ GEN_2xVECTOR_REG_OFST(reg_name, IPA_HW_A7_EE, 6), \
+		(u32 *)&ipa_reg_save.gsi.ch_cntxt.a7[6].var_name }, \
+	{ GEN_2xVECTOR_REG_OFST(reg_name, IPA_HW_A7_EE, 7), \
+		(u32 *)&ipa_reg_save.gsi.ch_cntxt.a7[7].var_name }, \
+	{ GEN_2xVECTOR_REG_OFST(reg_name, IPA_HW_A7_EE, 8), \
+		(u32 *)&ipa_reg_save.gsi.ch_cntxt.a7[8].var_name }, \
+	{ GEN_2xVECTOR_REG_OFST(reg_name, IPA_HW_A7_EE, 9), \
+		(u32 *)&ipa_reg_save.gsi.ch_cntxt.a7[9].var_name }, \
+	{ GEN_2xVECTOR_REG_OFST(reg_name, IPA_HW_A7_EE, 10), \
+		(u32 *)&ipa_reg_save.gsi.ch_cntxt.a7[10].var_name }, \
+	{ GEN_2xVECTOR_REG_OFST(reg_name, IPA_HW_A7_EE, 11), \
+		(u32 *)&ipa_reg_save.gsi.ch_cntxt.a7[11].var_name }, \
+	{ GEN_2xVECTOR_REG_OFST(reg_name, IPA_HW_A7_EE, 12), \
+		(u32 *)&ipa_reg_save.gsi.ch_cntxt.a7[12].var_name }, \
+	{ GEN_2xVECTOR_REG_OFST(reg_name, IPA_HW_A7_EE, 13), \
+		(u32 *)&ipa_reg_save.gsi.ch_cntxt.a7[13].var_name }, \
+	{ GEN_2xVECTOR_REG_OFST(reg_name, IPA_HW_A7_EE, 14), \
+		(u32 *)&ipa_reg_save.gsi.ch_cntxt.a7[14].var_name }, \
+	{ GEN_2xVECTOR_REG_OFST(reg_name, IPA_HW_A7_EE, 15), \
+		(u32 *)&ipa_reg_save.gsi.ch_cntxt.a7[15].var_name }, \
+	{ GEN_2xVECTOR_REG_OFST(reg_name, IPA_HW_A7_EE, 16), \
+		(u32 *)&ipa_reg_save.gsi.ch_cntxt.a7[16].var_name }, \
+	{ GEN_2xVECTOR_REG_OFST(reg_name, IPA_HW_A7_EE, 17), \
+		(u32 *)&ipa_reg_save.gsi.ch_cntxt.a7[17].var_name }, \
+	{ GEN_2xVECTOR_REG_OFST(reg_name, IPA_HW_A7_EE, 18), \
+		(u32 *)&ipa_reg_save.gsi.ch_cntxt.a7[18].var_name }, \
+	{ GEN_2xVECTOR_REG_OFST(reg_name, IPA_HW_A7_EE, 19), \
+		(u32 *)&ipa_reg_save.gsi.ch_cntxt.a7[19].var_name }, \
+	{ GEN_2xVECTOR_REG_OFST(reg_name, IPA_REG_SAVE_HWP_GSI_EE, 1),	\
+		(u32 *)&ipa_reg_save.gsi.ch_cntxt.uc[0].var_name }, \
+	{ GEN_2xVECTOR_REG_OFST(reg_name, IPA_REG_SAVE_HWP_GSI_EE, 3), \
+		(u32 *)&ipa_reg_save.gsi.ch_cntxt.uc[1].var_name })
+
+#define IPA_REG_SAVE_CFG_ENTRY_GSI_EVT_CNTXT(reg_name, var_name) \
+	({ GEN_2xVECTOR_REG_OFST(reg_name, IPA_HW_A7_EE, 0), \
+		(u32 *)&ipa_reg_save.gsi.evt_cntxt.a7[0].var_name }, \
+	{ GEN_2xVECTOR_REG_OFST(reg_name, IPA_HW_A7_EE, 1), \
+		(u32 *)&ipa_reg_save.gsi.evt_cntxt.a7[1].var_name }, \
+	{ GEN_2xVECTOR_REG_OFST(reg_name, IPA_HW_A7_EE, 2), \
+		(u32 *)&ipa_reg_save.gsi.evt_cntxt.a7[2].var_name }, \
+	{ GEN_2xVECTOR_REG_OFST(reg_name, IPA_HW_A7_EE, 3), \
+		(u32 *)&ipa_reg_save.gsi.evt_cntxt.a7[3].var_name }, \
+	{ GEN_2xVECTOR_REG_OFST(reg_name, IPA_HW_A7_EE, 4), \
+		(u32 *)&ipa_reg_save.gsi.evt_cntxt.a7[4].var_name }, \
+	{ GEN_2xVECTOR_REG_OFST(reg_name, IPA_HW_A7_EE, 5), \
+		(u32 *)&ipa_reg_save.gsi.evt_cntxt.a7[5].var_name }, \
+	{ GEN_2xVECTOR_REG_OFST(reg_name, IPA_HW_A7_EE, 6), \
+		(u32 *)&ipa_reg_save.gsi.evt_cntxt.a7[6].var_name }, \
+	{ GEN_2xVECTOR_REG_OFST(reg_name, IPA_HW_A7_EE, 7), \
+		(u32 *)&ipa_reg_save.gsi.evt_cntxt.a7[7].var_name }, \
+	{ GEN_2xVECTOR_REG_OFST(reg_name, IPA_HW_A7_EE, 8), \
+		(u32 *)&ipa_reg_save.gsi.evt_cntxt.a7[8].var_name }, \
+	{ GEN_2xVECTOR_REG_OFST(reg_name, IPA_HW_A7_EE, 9), \
+		(u32 *)&ipa_reg_save.gsi.evt_cntxt.a7[9].var_name }, \
+	{ GEN_2xVECTOR_REG_OFST(reg_name, IPA_HW_A7_EE, 10), \
+		(u32 *)&ipa_reg_save.gsi.evt_cntxt.a7[10].var_name }, \
+	{ GEN_2xVECTOR_REG_OFST(reg_name, IPA_HW_A7_EE, 11), \
+		(u32 *)&ipa_reg_save.gsi.evt_cntxt.a7[11].var_name }, \
+	{ GEN_2xVECTOR_REG_OFST(reg_name, IPA_HW_A7_EE, 12), \
+		(u32 *)&ipa_reg_save.gsi.evt_cntxt.a7[12].var_name }, \
+	{ GEN_2xVECTOR_REG_OFST(reg_name, IPA_HW_A7_EE, 13), \
+		(u32 *)&ipa_reg_save.gsi.evt_cntxt.a7[13].var_name }, \
+	{ GEN_2xVECTOR_REG_OFST(reg_name, IPA_HW_A7_EE, 14), \
+		(u32 *)&ipa_reg_save.gsi.evt_cntxt.a7[14].var_name }, \
+	{ GEN_2xVECTOR_REG_OFST(reg_name, IPA_HW_A7_EE, 15), \
+		(u32 *)&ipa_reg_save.gsi.evt_cntxt.a7[15].var_name }, \
+	{ GEN_2xVECTOR_REG_OFST(reg_name, IPA_HW_A7_EE, 16), \
+		(u32 *)&ipa_reg_save.gsi.evt_cntxt.a7[16].var_name }, \
+	{ GEN_2xVECTOR_REG_OFST(reg_name, IPA_HW_A7_EE, 17), \
+		(u32 *)&ipa_reg_save.gsi.evt_cntxt.a7[17].var_name }, \
+	{ GEN_2xVECTOR_REG_OFST(reg_name, IPA_HW_A7_EE, 18), \
+		(u32 *)&ipa_reg_save.gsi.evt_cntxt.a7[18].var_name }, \
+	{ GEN_2xVECTOR_REG_OFST(reg_name, IPA_REG_SAVE_HWP_GSI_EE, 1), \
+		(u32 *)&ipa_reg_save.gsi.evt_cntxt.uc[0].var_name })
+
+/*
+ * Macro to define a particular register cfg entry for GSI QSB debug
+ * registers
+ */
+#define IPA_REG_SAVE_CFG_ENTRY_GSI_QSB_DEBUG(reg_name, var_name) \
+	({ GEN_1xVECTOR_REG_OFST(reg_name, 0), \
+		(u32 *)&ipa_reg_save.gsi.debug.gsi_qsb_debug.var_name[0] }, \
+	{ GEN_1xVECTOR_REG_OFST(reg_name, 1), \
+		(u32 *)&ipa_reg_save.gsi.debug.gsi_qsb_debug.var_name[1] }, \
+	{ GEN_1xVECTOR_REG_OFST(reg_name, 2), \
+		(u32 *)&ipa_reg_save.gsi.debug.gsi_qsb_debug.var_name[2] }, \
+	{ GEN_1xVECTOR_REG_OFST(reg_name, 3), \
+		(u32 *)&ipa_reg_save.gsi.debug.gsi_qsb_debug.var_name[3] })
+
+#define IPA_REG_SAVE_RX_SPLT_CMDQ(reg_name, var_name) \
+	({ GEN_1xVECTOR_REG_OFST(reg_name, 0), \
+		(u32 *)&ipa_reg_save.ipa.dbg.var_name[0]}, \
+	{ GEN_1xVECTOR_REG_OFST(reg_name, 1), \
+		(u32 *)&ipa_reg_save.ipa.dbg.var_name[1]}, \
+	{ GEN_1xVECTOR_REG_OFST(reg_name, 2), \
+		(u32 *)&ipa_reg_save.ipa.dbg.var_name[2]}, \
+	{ GEN_1xVECTOR_REG_OFST(reg_name, 3), \
+		(u32 *)&ipa_reg_save.ipa.dbg.var_name[3]})
+
+/*
+ * IPA HW Platform Type
+ */
+enum ipa_hw_ee_e {
+	IPA_HW_A7_EE  = 0, /* A7's execution environment */
+	IPA_HW_Q6_EE  = 1, /* Q6's execution environment */
+	IPA_HW_HWP_EE = 3, /* HWP's execution environment */
+	IPA_HW_EE_MAX,     /* Max EE to support */
+};
+
+/*
+ * General IPA register save data struct (ie. this is where register
+ * values, once read, get placed...
+ */
+struct ipa_gen_regs_s {
+	struct ipa_hwio_def_ipa_state_s
+	  ipa_state;
+	struct ipa_hwio_def_ipa_state_rx_active_s
+	  ipa_state_rx_active;
+	struct ipa_hwio_def_ipa_state_tx_wrapper_s
+	  ipa_state_tx_wrapper;
+	struct ipa_hwio_def_ipa_state_tx0_s
+	  ipa_state_tx0;
+	struct ipa_hwio_def_ipa_state_tx1_s
+	  ipa_state_tx1;
+	struct ipa_hwio_def_ipa_state_aggr_active_s
+	  ipa_state_aggr_active;
+	struct ipa_hwio_def_ipa_state_dfetcher_s
+	  ipa_state_dfetcher;
+	struct ipa_hwio_def_ipa_state_fetcher_mask_0_s
+	  ipa_state_fetcher_mask_0;
+	struct ipa_hwio_def_ipa_state_fetcher_mask_1_s
+	  ipa_state_fetcher_mask_1;
+	struct ipa_hwio_def_ipa_state_gsi_aos_s
+	  ipa_state_gsi_aos;
+	struct ipa_hwio_def_ipa_state_gsi_if_s
+	  ipa_state_gsi_if;
+	struct ipa_hwio_def_ipa_state_gsi_skip_s
+	  ipa_state_gsi_skip;
+	struct ipa_hwio_def_ipa_state_gsi_tlv_s
+	  ipa_state_gsi_tlv;
+	struct ipa_hwio_def_ipa_dpl_timer_lsb_s
+	  ipa_dpl_timer_lsb;
+	struct ipa_hwio_def_ipa_dpl_timer_msb_s
+	  ipa_dpl_timer_msb;
+	struct ipa_hwio_def_ipa_proc_iph_cfg_s
+	  ipa_proc_iph_cfg;
+	struct ipa_hwio_def_ipa_route_s
+	  ipa_route;
+	struct ipa_hwio_def_ipa_spare_reg_1_s
+	  ipa_spare_reg_1;
+	struct ipa_hwio_def_ipa_spare_reg_2_s
+	  ipa_spare_reg_2;
+	struct ipa_hwio_def_ipa_log_s
+	  ipa_log;
+	struct ipa_hwio_def_ipa_log_buf_status_cfg_s
+	  ipa_log_buf_status_cfg;
+	struct ipa_hwio_def_ipa_log_buf_status_addr_s
+	  ipa_log_buf_status_addr;
+	struct ipa_hwio_def_ipa_log_buf_status_write_ptr_s
+	  ipa_log_buf_status_write_ptr;
+	struct ipa_hwio_def_ipa_log_buf_status_ram_ptr_s
+	  ipa_log_buf_status_ram_ptr;
+	struct ipa_hwio_def_ipa_log_buf_hw_cmd_cfg_s
+	  ipa_log_buf_hw_cmd_cfg;
+	struct ipa_hwio_def_ipa_log_buf_hw_cmd_addr_s
+	  ipa_log_buf_hw_cmd_addr;
+	struct ipa_hwio_def_ipa_log_buf_hw_cmd_write_ptr_s
+	  ipa_log_buf_hw_cmd_write_ptr;
+	struct ipa_hwio_def_ipa_log_buf_hw_cmd_ram_ptr_s
+	  ipa_log_buf_hw_cmd_ram_ptr;
+	struct ipa_hwio_def_ipa_comp_hw_version_s
+	  ipa_comp_hw_version;
+	struct ipa_hwio_def_ipa_filt_rout_hash_en_s
+	  ipa_filt_rout_hash_en;
+	struct ipa_hwio_def_ipa_filt_rout_hash_flush_s
+	  ipa_filt_rout_hash_flush;
+	struct ipa_hwio_def_ipa_state_fetcher_s
+	  ipa_state_fetcher;
+	struct ipa_hwio_def_ipa_ipv4_filter_init_values_s
+	  ipa_ipv4_filter_init_values;
+	struct ipa_hwio_def_ipa_ipv6_filter_init_values_s
+	  ipa_ipv6_filter_init_values;
+	struct ipa_hwio_def_ipa_ipv4_route_init_values_s
+	  ipa_ipv4_route_init_values;
+	struct ipa_hwio_def_ipa_ipv6_route_init_values_s
+	  ipa_ipv6_route_init_values;
+	struct ipa_hwio_def_ipa_bam_activated_ports_s
+	  ipa_bam_activated_ports;
+	struct ipa_hwio_def_ipa_tx_commander_cmdq_status_s
+	  ipa_tx_commander_cmdq_status;
+	struct ipa_hwio_def_ipa_log_buf_hw_snif_el_en_s
+	  ipa_log_buf_hw_snif_el_en;
+	struct ipa_hwio_def_ipa_log_buf_hw_snif_el_wr_n_rd_sel_s
+	  ipa_log_buf_hw_snif_el_wr_n_rd_sel;
+	struct ipa_hwio_def_ipa_log_buf_hw_snif_el_cli_mux_s
+	  ipa_log_buf_hw_snif_el_cli_mux;
+	struct ipa_hwio_def_ipa_state_acl_s
+	  ipa_state_acl;
+	struct ipa_hwio_def_ipa_sys_pkt_proc_cntxt_base_s
+	  ipa_sys_pkt_proc_cntxt_base;
+	struct ipa_hwio_def_ipa_sys_pkt_proc_cntxt_base_msb_s
+	  ipa_sys_pkt_proc_cntxt_base_msb;
+	struct ipa_hwio_def_ipa_local_pkt_proc_cntxt_base_s
+	  ipa_local_pkt_proc_cntxt_base;
+	struct ipa_hwio_def_ipa_rsrc_grp_cfg_s
+	  ipa_rsrc_grp_cfg;
+	struct ipa_hwio_def_ipa_comp_cfg_s
+	  ipa_comp_cfg;
+	struct ipa_hwio_def_ipa_state_dpl_fifo_s
+	  ipa_state_dpl_fifo;
+	struct ipa_hwio_def_ipa_pipeline_disable_s
+	  ipa_pipeline_disable;
+	struct ipa_hwio_def_ipa_state_nlo_aggr_s
+	  ipa_state_nlo_aggr;
+	struct ipa_hwio_def_ipa_nlo_pp_cfg1_s
+	  ipa_nlo_pp_cfg1;
+	struct ipa_hwio_def_ipa_nlo_pp_cfg2_s
+	  ipa_nlo_pp_cfg2;
+	struct ipa_hwio_def_ipa_nlo_pp_ack_limit_cfg_s
+	  ipa_nlo_pp_ack_limit_cfg;
+	struct ipa_hwio_def_ipa_nlo_pp_data_limit_cfg_s
+	  ipa_nlo_pp_data_limit_cfg;
+	struct ipa_hwio_def_ipa_nlo_min_dsm_cfg_s
+	  ipa_nlo_min_dsm_cfg;
+	struct ipa_hwio_def_ipa_nlo_vp_flush_req_s
+	  ipa_nlo_vp_flush_req;
+	struct ipa_hwio_def_ipa_nlo_vp_flush_cookie_s
+	  ipa_nlo_vp_flush_cookie;
+	struct ipa_hwio_def_ipa_nlo_vp_flush_ack_s
+	  ipa_nlo_vp_flush_ack;
+	struct ipa_hwio_def_ipa_nlo_vp_dsm_open_s
+	  ipa_nlo_vp_dsm_open;
+	struct ipa_hwio_def_ipa_nlo_vp_qbap_open_s
+	  ipa_nlo_vp_qbap_open;
+};
+
+/*
+ * General IPA register save data struct
+ */
+struct ipa_reg_save_gen_ee_s {
+	struct ipa_hwio_def_ipa_irq_stts_ee_n_s
+	  ipa_irq_stts_ee_n;
+	struct ipa_hwio_def_ipa_irq_en_ee_n_s
+	  ipa_irq_en_ee_n;
+	struct ipa_hwio_def_ipa_fec_addr_ee_n_s
+	  ipa_fec_addr_ee_n;
+	struct ipa_hwio_def_ipa_fec_attr_ee_n_s
+	  ipa_fec_attr_ee_n;
+	struct ipa_hwio_def_ipa_snoc_fec_ee_n_s
+	  ipa_snoc_fec_ee_n;
+	struct ipa_hwio_def_ipa_holb_drop_irq_info_ee_n_s
+	  ipa_holb_drop_irq_info_ee_n;
+	struct ipa_hwio_def_ipa_suspend_irq_info_ee_n_s
+	  ipa_suspend_irq_info_ee_n;
+	struct ipa_hwio_def_ipa_suspend_irq_en_ee_n_s
+	  ipa_suspend_irq_en_ee_n;
+};
+
+/*
+ * Pipe Endp IPA register save data struct
+ */
+struct ipa_reg_save_pipe_endp_s {
+	struct ipa_hwio_def_ipa_endp_init_ctrl_n_s
+	  ipa_endp_init_ctrl_n;
+	struct ipa_hwio_def_ipa_endp_init_ctrl_scnd_n_s
+	  ipa_endp_init_ctrl_scnd_n;
+	struct ipa_hwio_def_ipa_endp_init_cfg_n_s
+	  ipa_endp_init_cfg_n;
+	struct ipa_hwio_def_ipa_endp_init_nat_n_s
+	  ipa_endp_init_nat_n;
+	struct ipa_hwio_def_ipa_endp_init_hdr_n_s
+	  ipa_endp_init_hdr_n;
+	struct ipa_hwio_def_ipa_endp_init_hdr_ext_n_s
+	  ipa_endp_init_hdr_ext_n;
+	struct ipa_hwio_def_ipa_endp_init_hdr_metadata_mask_n_s
+	  ipa_endp_init_hdr_metadata_mask_n;
+	struct ipa_hwio_def_ipa_endp_init_hdr_metadata_n_s
+	  ipa_endp_init_hdr_metadata_n;
+	struct ipa_hwio_def_ipa_endp_init_mode_n_s
+	  ipa_endp_init_mode_n;
+	struct ipa_hwio_def_ipa_endp_init_aggr_n_s
+	  ipa_endp_init_aggr_n;
+	struct ipa_hwio_def_ipa_endp_init_hol_block_en_n_s
+	  ipa_endp_init_hol_block_en_n;
+	struct ipa_hwio_def_ipa_endp_init_hol_block_timer_n_s
+	  ipa_endp_init_hol_block_timer_n;
+	struct ipa_hwio_def_ipa_endp_init_deaggr_n_s
+	  ipa_endp_init_deaggr_n;
+	struct ipa_hwio_def_ipa_endp_status_n_s
+	  ipa_endp_status_n;
+	struct ipa_hwio_def_ipa_endp_init_rsrc_grp_n_s
+	  ipa_endp_init_rsrc_grp_n;
+	struct ipa_hwio_def_ipa_endp_init_seq_n_s
+	  ipa_endp_init_seq_n;
+	struct ipa_hwio_def_ipa_endp_gsi_cfg_tlv_n_s
+	  ipa_endp_gsi_cfg_tlv_n;
+	struct ipa_hwio_def_ipa_endp_gsi_cfg_aos_n_s
+	  ipa_endp_gsi_cfg_aos_n;
+	struct ipa_hwio_def_ipa_endp_gsi_cfg1_n_s
+	  ipa_endp_gsi_cfg1_n;
+	struct ipa_hwio_def_ipa_endp_filter_router_hsh_cfg_n_s
+	  ipa_endp_filter_router_hsh_cfg_n;
+};
+
+/*
+ * Pipe IPA register save data struct
+ */
+struct ipa_reg_save_pipe_s {
+	u8				active;
+	struct ipa_reg_save_pipe_endp_s endp;
+};
+
+/*
+ * HWP IPA register save data struct
+ */
+struct ipa_reg_save_hwp_s {
+	struct ipa_hwio_def_ipa_uc_qmb_sys_addr_s
+	  ipa_uc_qmb_sys_addr;
+	struct ipa_hwio_def_ipa_uc_qmb_local_addr_s
+	  ipa_uc_qmb_local_addr;
+	struct ipa_hwio_def_ipa_uc_qmb_length_s
+	  ipa_uc_qmb_length;
+	struct ipa_hwio_def_ipa_uc_qmb_trigger_s
+	  ipa_uc_qmb_trigger;
+	struct ipa_hwio_def_ipa_uc_qmb_pending_tid_s
+	  ipa_uc_qmb_pending_tid;
+	struct ipa_hwio_def_ipa_uc_qmb_completed_rd_fifo_peek_s
+	  ipa_uc_qmb_completed_rd_fifo_peek;
+	struct ipa_hwio_def_ipa_uc_qmb_completed_wr_fifo_peek_s
+	  ipa_uc_qmb_completed_wr_fifo_peek;
+	struct ipa_hwio_def_ipa_uc_qmb_misc_s
+	  ipa_uc_qmb_misc;
+	struct ipa_hwio_def_ipa_uc_qmb_status_s
+	  ipa_uc_qmb_status;
+	struct ipa_hwio_def_ipa_uc_qmb_bus_attrib_s
+	  ipa_uc_qmb_bus_attrib;
+};
+
+/*
+ * IPA TESTBUS entry struct
+ */
+struct ipa_reg_save_ipa_testbus_entry_s {
+	union ipa_hwio_def_ipa_testbus_sel_u testbus_sel;
+	union ipa_hwio_def_ipa_debug_data_u testbus_data;
+};
+
+/* IPA TESTBUS global struct */
+struct ipa_reg_save_ipa_testbus_global_s {
+	struct ipa_reg_save_ipa_testbus_entry_s
+	global[IPA_TESTBUS_SEL_INTERNAL_MAX + 1]
+	[IPA_TESTBUS_SEL_EXTERNAL_MAX + 1];
+};
+
+/* IPA TESTBUS per EP struct */
+struct ipa_reg_save_ipa_testbus_ep_s {
+	struct ipa_reg_save_ipa_testbus_entry_s
+	entry_ep[IPA_TESTBUS_SEL_INTERNAL_PIPE_MAX + 1]
+	[IPA_TESTBUS_SEL_EXTERNAL_MAX + 1];
+};
+
+/* IPA TESTBUS per EP struct */
+struct ipa_reg_save_ipa_testbus_ep_rsrc_s {
+	struct ipa_reg_save_ipa_testbus_entry_s
+	  entry_ep[IPA_DEBUG_TESTBUS_RSRC_NUM_GRP];
+};
+
+/* IPA TESTBUS save data struct */
+struct ipa_reg_save_ipa_testbus_s {
+	struct ipa_reg_save_ipa_testbus_global_s global;
+	struct ipa_reg_save_ipa_testbus_ep_s
+	  ep[IPA_TESTBUS_SEL_EP_MAX + 1];
+	struct ipa_reg_save_ipa_testbus_ep_rsrc_s
+	  ep_rsrc[IPA_DEBUG_TESTBUS_RSRC_NUM_EP];
+};
+
+/*
+ * Debug IPA register save data struct
+ */
+struct ipa_reg_save_dbg_s {
+	struct ipa_hwio_def_ipa_debug_data_s
+	  ipa_debug_data;
+	struct ipa_hwio_def_ipa_step_mode_status_s
+	  ipa_step_mode_status;
+	struct ipa_hwio_def_ipa_step_mode_breakpoints_s
+	  ipa_step_mode_breakpoints;
+	struct ipa_hwio_def_ipa_rx_splt_cmdq_cmd_n_s
+	  ipa_rx_splt_cmdq_cmd_n[IPA_RX_SPLT_CMDQ_MAX];
+	struct ipa_hwio_def_ipa_rx_splt_cmdq_cfg_n_s
+	 ipa_rx_splt_cmdq_cfg_n[IPA_RX_SPLT_CMDQ_MAX];
+	struct ipa_hwio_def_ipa_rx_splt_cmdq_data_wr_0_n_s
+	  ipa_rx_splt_cmdq_data_wr_0_n[IPA_RX_SPLT_CMDQ_MAX];
+	struct ipa_hwio_def_ipa_rx_splt_cmdq_data_wr_1_n_s
+	  ipa_rx_splt_cmdq_data_wr_1_n[IPA_RX_SPLT_CMDQ_MAX];
+	struct ipa_hwio_def_ipa_rx_splt_cmdq_data_wr_2_n_s
+	  ipa_rx_splt_cmdq_data_wr_2_n[IPA_RX_SPLT_CMDQ_MAX];
+	struct ipa_hwio_def_ipa_rx_splt_cmdq_data_wr_3_n_s
+	  ipa_rx_splt_cmdq_data_wr_3_n[IPA_RX_SPLT_CMDQ_MAX];
+	struct ipa_hwio_def_ipa_rx_splt_cmdq_data_rd_0_n_s
+	  ipa_rx_splt_cmdq_data_rd_0_n[IPA_RX_SPLT_CMDQ_MAX];
+	struct ipa_hwio_def_ipa_rx_splt_cmdq_data_rd_1_n_s
+	  ipa_rx_splt_cmdq_data_rd_1_n[IPA_RX_SPLT_CMDQ_MAX];
+	struct ipa_hwio_def_ipa_rx_splt_cmdq_data_rd_2_n_s
+	  ipa_rx_splt_cmdq_data_rd_2_n[IPA_RX_SPLT_CMDQ_MAX];
+	struct ipa_hwio_def_ipa_rx_splt_cmdq_data_rd_3_n_s
+	  ipa_rx_splt_cmdq_data_rd_3_n[IPA_RX_SPLT_CMDQ_MAX];
+	struct ipa_hwio_def_ipa_rx_splt_cmdq_status_n_s
+	  ipa_rx_splt_cmdq_status_n[IPA_RX_SPLT_CMDQ_MAX];
+
+	union ipa_hwio_def_ipa_rx_hps_cmdq_cfg_wr_u
+		ipa_rx_hps_cmdq_cfg_wr;
+	union ipa_hwio_def_ipa_rx_hps_cmdq_cfg_rd_u
+		ipa_rx_hps_cmdq_cfg_rd;
+
+	struct ipa_hwio_def_ipa_rx_hps_cmdq_cmd_s
+	  ipa_rx_hps_cmdq_cmd;
+	union ipa_hwio_def_ipa_rx_hps_cmdq_data_rd_0_u
+		ipa_rx_hps_cmdq_data_rd_0_arr[
+		IPA_DEBUG_CMDQ_HPS_SELECT_NUM_GROUPS];
+	union ipa_hwio_def_ipa_rx_hps_cmdq_data_rd_1_u
+		ipa_rx_hps_cmdq_data_rd_1_arr[
+		IPA_DEBUG_CMDQ_HPS_SELECT_NUM_GROUPS];
+	union ipa_hwio_def_ipa_rx_hps_cmdq_data_rd_2_u
+		ipa_rx_hps_cmdq_data_rd_2_arr[
+		IPA_DEBUG_CMDQ_HPS_SELECT_NUM_GROUPS];
+	union ipa_hwio_def_ipa_rx_hps_cmdq_data_rd_3_u
+		ipa_rx_hps_cmdq_data_rd_3_arr[
+		IPA_DEBUG_CMDQ_HPS_SELECT_NUM_GROUPS];
+	union ipa_hwio_def_ipa_rx_hps_cmdq_count_u
+	  ipa_rx_hps_cmdq_count_arr[IPA_DEBUG_CMDQ_HPS_SELECT_NUM_GROUPS];
+	union ipa_hwio_def_ipa_rx_hps_cmdq_status_u
+	  ipa_rx_hps_cmdq_status_arr[IPA_DEBUG_CMDQ_HPS_SELECT_NUM_GROUPS];
+	struct ipa_hwio_def_ipa_rx_hps_cmdq_status_empty_s
+	  ipa_rx_hps_cmdq_status_empty;
+	struct ipa_hwio_def_ipa_rx_hps_clients_min_depth_0_s
+	  ipa_rx_hps_clients_min_depth_0;
+	struct ipa_hwio_def_ipa_rx_hps_clients_max_depth_0_s
+	  ipa_rx_hps_clients_max_depth_0;
+	struct ipa_hwio_def_ipa_hps_dps_cmdq_cmd_s
+	  ipa_hps_dps_cmdq_cmd;
+	union ipa_hwio_def_ipa_hps_dps_cmdq_data_rd_0_u
+		ipa_hps_dps_cmdq_data_rd_0_arr[IPA_TESTBUS_SEL_EP_MAX + 1];
+	union ipa_hwio_def_ipa_hps_dps_cmdq_count_u
+		ipa_hps_dps_cmdq_count_arr[IPA_TESTBUS_SEL_EP_MAX + 1];
+	union ipa_hwio_def_ipa_hps_dps_cmdq_status_u
+		ipa_hps_dps_cmdq_status_arr[IPA_TESTBUS_SEL_EP_MAX + 1];
+	struct ipa_hwio_def_ipa_hps_dps_cmdq_status_empty_s
+	  ipa_hps_dps_cmdq_status_empty;
+
+	struct ipa_hwio_def_ipa_dps_tx_cmdq_cmd_s
+	  ipa_dps_tx_cmdq_cmd;
+	union ipa_hwio_def_ipa_dps_tx_cmdq_data_rd_0_u
+		ipa_dps_tx_cmdq_data_rd_0_arr[
+		IPA_DEBUG_CMDQ_DPS_SELECT_NUM_GROUPS];
+	union ipa_hwio_def_ipa_dps_tx_cmdq_count_u
+		ipa_dps_tx_cmdq_count_arr[IPA_DEBUG_CMDQ_DPS_SELECT_NUM_GROUPS];
+	union ipa_hwio_def_ipa_dps_tx_cmdq_status_u
+	ipa_dps_tx_cmdq_status_arr[IPA_DEBUG_CMDQ_DPS_SELECT_NUM_GROUPS];
+	struct ipa_hwio_def_ipa_dps_tx_cmdq_status_empty_s
+	  ipa_dps_tx_cmdq_status_empty;
+
+	struct ipa_hwio_def_ipa_ackmngr_cmdq_cmd_s
+	  ipa_ackmngr_cmdq_cmd;
+	union ipa_hwio_def_ipa_ackmngr_cmdq_data_rd_u
+		ipa_ackmngr_cmdq_data_rd_arr[
+		IPA_DEBUG_CMDQ_ACK_SELECT_NUM_GROUPS];
+	union ipa_hwio_def_ipa_ackmngr_cmdq_count_u
+	  ipa_ackmngr_cmdq_count_arr[IPA_DEBUG_CMDQ_ACK_SELECT_NUM_GROUPS];
+	union ipa_hwio_def_ipa_ackmngr_cmdq_status_u
+		ipa_ackmngr_cmdq_status_arr[
+		IPA_DEBUG_CMDQ_ACK_SELECT_NUM_GROUPS];
+	struct ipa_hwio_def_ipa_ackmngr_cmdq_status_empty_s
+	  ipa_ackmngr_cmdq_status_empty;
+
+	struct ipa_hwio_def_ipa_prod_ackmngr_cmdq_cmd_s
+	  ipa_prod_ackmngr_cmdq_cmd;
+	union ipa_hwio_def_ipa_prod_ackmngr_cmdq_data_rd_u
+		ipa_prod_ackmngr_cmdq_data_rd_arr[IPA_TESTBUS_SEL_EP_MAX + 1];
+	union ipa_hwio_def_ipa_prod_ackmngr_cmdq_count_u
+		ipa_prod_ackmngr_cmdq_count_arr[IPA_TESTBUS_SEL_EP_MAX + 1];
+	union ipa_hwio_def_ipa_prod_ackmngr_cmdq_status_u
+		ipa_prod_ackmngr_cmdq_status_arr[IPA_TESTBUS_SEL_EP_MAX + 1];
+	struct ipa_hwio_def_ipa_prod_ackmngr_cmdq_status_empty_s
+	  ipa_prod_ackmngr_cmdq_status_empty;
+
+	struct ipa_hwio_def_ipa_ntf_tx_cmdq_cmd_s
+	  ipa_ntf_tx_cmdq_cmd;
+	union ipa_hwio_def_ipa_ntf_tx_cmdq_data_rd_0_u
+		ipa_ntf_tx_cmdq_data_rd_0_arr[IPA_TESTBUS_SEL_EP_MAX + 1];
+	union ipa_hwio_def_ipa_ntf_tx_cmdq_count_u
+		ipa_ntf_tx_cmdq_count_arr[IPA_TESTBUS_SEL_EP_MAX + 1];
+	union ipa_hwio_def_ipa_ntf_tx_cmdq_status_u
+		ipa_ntf_tx_cmdq_status_arr[IPA_TESTBUS_SEL_EP_MAX + 1];
+	struct ipa_hwio_def_ipa_ntf_tx_cmdq_status_empty_s
+	  ipa_ntf_tx_cmdq_status_empty;
+
+	union ipa_hwio_def_ipa_rsrc_mngr_db_rsrc_read_u
+		ipa_rsrc_mngr_db_rsrc_read_arr[IPA_RSCR_MNGR_DB_RSRC_TYPE_MAX +
+					       1][IPA_RSCR_MNGR_DB_RSRC_ID_MAX
+						  + 1];
+	union ipa_hwio_def_ipa_rsrc_mngr_db_list_read_u
+		ipa_rsrc_mngr_db_list_read_arr[IPA_RSCR_MNGR_DB_RSRC_TYPE_MAX +
+					       1][IPA_RSCR_MNGR_DB_RSRC_ID_MAX
+						  + 1];
+};
+
+/* Source Resource Group IPA register save data struct */
+struct ipa_reg_save_src_rsrc_grp_s {
+	struct ipa_hwio_def_ipa_src_rsrc_grp_01_rsrc_type_n_s
+	  ipa_src_rsrc_grp_01_rsrc_type_n;
+	struct ipa_hwio_def_ipa_src_rsrc_grp_23_rsrc_type_n_s
+	  ipa_src_rsrc_grp_23_rsrc_type_n;
+	struct ipa_hwio_def_ipa_src_rsrc_grp_45_rsrc_type_n_s
+	  ipa_src_rsrc_grp_45_rsrc_type_n;
+};
+
+/* Source Resource Group IPA register save data struct */
+struct ipa_reg_save_dst_rsrc_grp_s {
+	struct ipa_hwio_def_ipa_dst_rsrc_grp_01_rsrc_type_n_s
+	  ipa_dst_rsrc_grp_01_rsrc_type_n;
+	struct ipa_hwio_def_ipa_dst_rsrc_grp_23_rsrc_type_n_s
+	  ipa_dst_rsrc_grp_23_rsrc_type_n;
+	struct ipa_hwio_def_ipa_dst_rsrc_grp_45_rsrc_type_n_s
+	  ipa_dst_rsrc_grp_45_rsrc_type_n;
+};
+
+/* Source Resource Group Count IPA register save data struct */
+struct ipa_reg_save_src_rsrc_cnt_s {
+	struct ipa_hwio_def_ipa_src_rsrc_grp_0123_rsrc_type_cnt_n_s
+	  ipa_src_rsrc_grp_0123_rsrc_type_cnt_n;
+	struct ipa_hwio_def_ipa_src_rsrc_grp_4567_rsrc_type_cnt_n_s
+	  ipa_src_rsrc_grp_4567_rsrc_type_cnt_n;
+};
+
+/* Destination Resource Group Count IPA register save data struct */
+struct ipa_reg_save_dst_rsrc_cnt_s {
+	struct ipa_hwio_def_ipa_dst_rsrc_grp_0123_rsrc_type_cnt_n_s
+	  ipa_dst_rsrc_grp_0123_rsrc_type_cnt_n;
+	struct ipa_hwio_def_ipa_dst_rsrc_grp_4567_rsrc_type_cnt_n_s
+	  ipa_dst_rsrc_grp_4567_rsrc_type_cnt_n;
+};
+
+/* GSI General register save data struct */
+struct ipa_reg_save_gsi_gen_s {
+	struct gsi_hwio_def_gsi_cfg_s
+	  gsi_cfg;
+	struct gsi_hwio_def_gsi_ree_cfg_s
+	  gsi_ree_cfg;
+	struct ipa_hwio_def_ipa_gsi_top_gsi_inst_ram_n_s
+	  ipa_gsi_top_gsi_inst_ram_n;
+};
+
+/* GSI General EE register save data struct */
+struct ipa_reg_save_gsi_gen_ee_s {
+	struct gsi_hwio_def_gsi_manager_ee_qos_n_s
+	  gsi_manager_ee_qos_n;
+	struct gsi_hwio_def_ee_n_gsi_status_s
+	  ee_n_gsi_status;
+	struct gsi_hwio_def_ee_n_cntxt_type_irq_s
+	  ee_n_cntxt_type_irq;
+	struct gsi_hwio_def_ee_n_cntxt_type_irq_msk_s
+	  ee_n_cntxt_type_irq_msk;
+	struct gsi_hwio_def_ee_n_cntxt_src_gsi_ch_irq_s
+	  ee_n_cntxt_src_gsi_ch_irq;
+	struct gsi_hwio_def_ee_n_cntxt_src_ev_ch_irq_s
+	  ee_n_cntxt_src_ev_ch_irq;
+	struct gsi_hwio_def_ee_n_cntxt_src_gsi_ch_irq_msk_s
+	  ee_n_cntxt_src_gsi_ch_irq_msk;
+	struct gsi_hwio_def_ee_n_cntxt_src_ev_ch_irq_msk_s
+	  ee_n_cntxt_src_ev_ch_irq_msk;
+	struct gsi_hwio_def_ee_n_cntxt_src_ieob_irq_s
+	  ee_n_cntxt_src_ieob_irq;
+	struct gsi_hwio_def_ee_n_cntxt_src_ieob_irq_msk_s
+	  ee_n_cntxt_src_ieob_irq_msk;
+	struct gsi_hwio_def_ee_n_cntxt_gsi_irq_stts_s
+	  ee_n_cntxt_gsi_irq_stts;
+	struct gsi_hwio_def_ee_n_cntxt_glob_irq_stts_s
+	  ee_n_cntxt_glob_irq_stts;
+	struct gsi_hwio_def_ee_n_error_log_s
+	  ee_n_error_log;
+	struct gsi_hwio_def_ee_n_cntxt_scratch_0_s
+	  ee_n_cntxt_scratch_0;
+	struct gsi_hwio_def_ee_n_cntxt_scratch_1_s
+	  ee_n_cntxt_scratch_1;
+	struct gsi_hwio_def_ee_n_cntxt_intset_s
+	  ee_n_cntxt_intset;
+	struct gsi_hwio_def_ee_n_cntxt_msi_base_lsb_s
+	  ee_n_cntxt_msi_base_lsb;
+	struct gsi_hwio_def_ee_n_cntxt_msi_base_msb_s
+	  ee_n_cntxt_msi_base_msb;
+};
+
+/* GSI QSB debug register save data struct */
+struct ipa_reg_save_gsi_qsb_debug_s {
+	struct gsi_hwio_def_gsi_debug_qsb_log_last_misc_idn_s
+	  qsb_log_last_misc[GSI_HW_QSB_LOG_MISC_MAX];
+};
+
+static u32 ipa_reg_save_gsi_ch_test_bus_selector_array[] = {
+	HWIO_GSI_DEBUG_TEST_BUS_SELECTOR_ZEROS,
+	HWIO_GSI_DEBUG_TEST_BUS_SELECTOR_MCS_0,
+	HWIO_GSI_DEBUG_TEST_BUS_SELECTOR_MCS_1,
+	HWIO_GSI_DEBUG_TEST_BUS_SELECTOR_MCS_2,
+	HWIO_GSI_DEBUG_TEST_BUS_SELECTOR_MCS_3,
+	HWIO_GSI_DEBUG_TEST_BUS_SELECTOR_MCS_4,
+	HWIO_GSI_DEBUG_TEST_BUS_SELECTOR_DB_ENG,
+	HWIO_GSI_DEBUG_TEST_BUS_SELECTOR_REE_0,
+	HWIO_GSI_DEBUG_TEST_BUS_SELECTOR_REE_1,
+	HWIO_GSI_DEBUG_TEST_BUS_SELECTOR_REE_2,
+	HWIO_GSI_DEBUG_TEST_BUS_SELECTOR_REE_3,
+	HWIO_GSI_DEBUG_TEST_BUS_SELECTOR_REE_4,
+	HWIO_GSI_DEBUG_TEST_BUS_SELECTOR_REE_5,
+	HWIO_GSI_DEBUG_TEST_BUS_SELECTOR_REE_6,
+	HWIO_GSI_DEBUG_TEST_BUS_SELECTOR_REE_7,
+	HWIO_GSI_DEBUG_TEST_BUS_SELECTOR_EVE_0,
+	HWIO_GSI_DEBUG_TEST_BUS_SELECTOR_EVE_1,
+	HWIO_GSI_DEBUG_TEST_BUS_SELECTOR_EVE_2,
+	HWIO_GSI_DEBUG_TEST_BUS_SELECTOR_EVE_3,
+	HWIO_GSI_DEBUG_TEST_BUS_SELECTOR_EVE_4,
+	HWIO_GSI_DEBUG_TEST_BUS_SELECTOR_EVE_5,
+	HWIO_GSI_DEBUG_TEST_BUS_SELECTOR_IE_0,
+	HWIO_GSI_DEBUG_TEST_BUS_SELECTOR_IE_1,
+	HWIO_GSI_DEBUG_TEST_BUS_SELECTOR_IC_0,
+	HWIO_GSI_DEBUG_TEST_BUS_SELECTOR_IC_1,
+	HWIO_GSI_DEBUG_TEST_BUS_SELECTOR_IC_2,
+	HWIO_GSI_DEBUG_TEST_BUS_SELECTOR_IC_3,
+	HWIO_GSI_DEBUG_TEST_BUS_SELECTOR_IC_4,
+	HWIO_GSI_DEBUG_TEST_BUS_SELECTOR_MOQA_0,
+	HWIO_GSI_DEBUG_TEST_BUS_SELECTOR_MOQA_1,
+	HWIO_GSI_DEBUG_TEST_BUS_SELECTOR_MOQA_2,
+	HWIO_GSI_DEBUG_TEST_BUS_SELECTOR_MOQA_3,
+	HWIO_GSI_DEBUG_TEST_BUS_SELECTOR_TMR_0,
+	HWIO_GSI_DEBUG_TEST_BUS_SELECTOR_TMR_1,
+	HWIO_GSI_DEBUG_TEST_BUS_SELECTOR_TMR_2,
+	HWIO_GSI_DEBUG_TEST_BUS_SELECTOR_TMR_3,
+	HWIO_GSI_DEBUG_TEST_BUS_SELECTOR_RD_WR_0,
+	HWIO_GSI_DEBUG_TEST_BUS_SELECTOR_RD_WR_1,
+	HWIO_GSI_DEBUG_TEST_BUS_SELECTOR_RD_WR_2,
+	HWIO_GSI_DEBUG_TEST_BUS_SELECTOR_RD_WR_3,
+	HWIO_GSI_DEBUG_TEST_BUS_SELECTOR_CSR,
+	HWIO_GSI_DEBUG_TEST_BUS_SELECTOR_SDMA_0,
+	HWIO_GSI_DEBUG_TEST_BUS_SELECTOR_SDMA_1,
+	HWIO_GSI_DEBUG_TEST_BUS_SELECTOR_IE_2,
+	HWIO_GSI_DEBUG_TEST_BUS_SELECTOR_CSR_1,
+	HWIO_GSI_DEBUG_TEST_BUS_SELECTOR_CSR_2,
+	HWIO_GSI_DEBUG_TEST_BUS_SELECTOR_MCS_5,
+	HWIO_GSI_DEBUG_TEST_BUS_SELECTOR_IC_5,
+	HWIO_GSI_DEBUG_TEST_BUS_SELECTOR_CSR_3,
+	HWIO_GSI_DEBUG_TEST_BUS_SELECTOR_TLV_0,
+	HWIO_GSI_DEBUG_TEST_BUS_SELECTOR_REE_8,
+};
+
+/*
+ * GSI QSB debug bus register save data struct
+ */
+struct ipa_reg_save_gsi_test_bus_s {
+	u32 test_bus_selector[
+		ARRAY_SIZE(ipa_reg_save_gsi_ch_test_bus_selector_array)];
+	struct
+	  gsi_hwio_def_gsi_test_bus_reg_s
+	  test_bus_reg[ARRAY_SIZE(ipa_reg_save_gsi_ch_test_bus_selector_array)];
+};
+
+/* GSI debug MCS registers save data struct */
+struct ipa_reg_save_gsi_mcs_regs_s {
+	struct
+	  gsi_hwio_def_gsi_debug_sw_rf_n_read_s
+		mcs_reg[HWIO_GSI_DEBUG_SW_RF_n_READ_MAXn + 1];
+};
+
+/* GSI debug counters save data struct */
+struct ipa_reg_save_gsi_debug_cnt_s {
+	struct
+	  gsi_hwio_def_gsi_debug_countern_s
+		cnt[HWIO_GSI_DEBUG_COUNTERn_MAXn + 1];
+};
+
+/* GSI IRAM pointers (IEP) save data struct */
+struct ipa_reg_save_gsi_iram_ptr_regs_s {
+	struct ipa_hwio_def_ipa_gsi_top_gsi_iram_ptr_ch_cmd_s
+	  ipa_gsi_top_gsi_iram_ptr_ch_cmd;
+	struct ipa_hwio_def_ipa_gsi_top_gsi_iram_ptr_ee_generic_cmd_s
+	  ipa_gsi_top_gsi_iram_ptr_ee_generic_cmd;
+	struct ipa_hwio_def_ipa_gsi_top_gsi_iram_ptr_ch_db_s
+	  ipa_gsi_top_gsi_iram_ptr_ch_db;
+	struct ipa_hwio_def_ipa_gsi_top_gsi_iram_ptr_ev_db_s
+	  ipa_gsi_top_gsi_iram_ptr_ev_db;
+	struct ipa_hwio_def_ipa_gsi_top_gsi_iram_ptr_new_re_s
+	  ipa_gsi_top_gsi_iram_ptr_new_re;
+	struct ipa_hwio_def_ipa_gsi_top_gsi_iram_ptr_ch_dis_comp_s
+	  ipa_gsi_top_gsi_iram_ptr_ch_dis_comp;
+	struct ipa_hwio_def_ipa_gsi_top_gsi_iram_ptr_ch_empty_s
+	  ipa_gsi_top_gsi_iram_ptr_ch_empty;
+	struct ipa_hwio_def_ipa_gsi_top_gsi_iram_ptr_event_gen_comp_s
+	  ipa_gsi_top_gsi_iram_ptr_event_gen_comp;
+	struct ipa_hwio_def_ipa_gsi_top_gsi_iram_ptr_timer_expired_s
+	  ipa_gsi_top_gsi_iram_ptr_timer_expired;
+	struct ipa_hwio_def_ipa_gsi_top_gsi_iram_ptr_write_eng_comp_s
+	  ipa_gsi_top_gsi_iram_ptr_write_eng_comp;
+	struct ipa_hwio_def_ipa_gsi_top_gsi_iram_ptr_read_eng_comp_s
+	  ipa_gsi_top_gsi_iram_ptr_read_eng_comp;
+	struct ipa_hwio_def_ipa_gsi_top_gsi_iram_ptr_uc_gp_int_s
+	  ipa_gsi_top_gsi_iram_ptr_uc_gp_int;
+	struct ipa_hwio_def_ipa_gsi_top_gsi_iram_ptr_int_mod_stopped_s
+	  ipa_gsi_top_gsi_iram_ptr_int_mod_stopped;
+};
+
+/* GSI SHRAM pointers save data struct */
+struct ipa_reg_save_gsi_shram_ptr_regs_s {
+	struct ipa_hwio_def_ipa_gsi_top_gsi_shram_ptr_ch_cntxt_base_addr_s
+	  ipa_gsi_top_gsi_shram_ptr_ch_cntxt_base_addr;
+	struct ipa_hwio_def_ipa_gsi_top_gsi_shram_ptr_ev_cntxt_base_addr_s
+	  ipa_gsi_top_gsi_shram_ptr_ev_cntxt_base_addr;
+	struct ipa_hwio_def_ipa_gsi_top_gsi_shram_ptr_re_storage_base_addr_s
+	  ipa_gsi_top_gsi_shram_ptr_re_storage_base_addr;
+	struct ipa_hwio_def_ipa_gsi_top_gsi_shram_ptr_re_esc_buf_base_addr_s
+	  ipa_gsi_top_gsi_shram_ptr_re_esc_buf_base_addr;
+	struct ipa_hwio_def_ipa_gsi_top_gsi_shram_ptr_ee_scrach_base_addr_s
+	  ipa_gsi_top_gsi_shram_ptr_ee_scrach_base_addr;
+	struct ipa_hwio_def_ipa_gsi_top_gsi_shram_ptr_func_stack_base_addr_s
+	  ipa_gsi_top_gsi_shram_ptr_func_stack_base_addr;
+};
+
+/* GSI debug register save data struct */
+struct ipa_reg_save_gsi_debug_s {
+	struct ipa_hwio_def_ipa_gsi_top_gsi_debug_busy_reg_s
+	  ipa_gsi_top_gsi_debug_busy_reg;
+	struct ipa_hwio_def_ipa_gsi_top_gsi_debug_event_pending_s
+	  ipa_gsi_top_gsi_debug_event_pending;
+	struct ipa_hwio_def_ipa_gsi_top_gsi_debug_timer_pending_s
+	  ipa_gsi_top_gsi_debug_timer_pending;
+	struct ipa_hwio_def_ipa_gsi_top_gsi_debug_rd_wr_pending_s
+	  ipa_gsi_top_gsi_debug_rd_wr_pending;
+	struct ipa_hwio_def_ipa_gsi_top_gsi_debug_pc_from_sw_s
+	  ipa_gsi_top_gsi_debug_pc_from_sw;
+	struct ipa_hwio_def_ipa_gsi_top_gsi_debug_sw_stall_s
+	  ipa_gsi_top_gsi_debug_sw_stall;
+	struct ipa_hwio_def_ipa_gsi_top_gsi_debug_pc_for_debug_s
+	  ipa_gsi_top_gsi_debug_pc_for_debug;
+	struct ipa_hwio_def_ipa_gsi_top_gsi_debug_qsb_log_err_trns_id_s
+	  ipa_gsi_top_gsi_debug_qsb_log_err_trns_id;
+	struct ipa_reg_save_gsi_qsb_debug_s	gsi_qsb_debug;
+	struct ipa_reg_save_gsi_test_bus_s		gsi_test_bus;
+	struct ipa_reg_save_gsi_mcs_regs_s		gsi_mcs_regs;
+	struct ipa_reg_save_gsi_debug_cnt_s		gsi_cnt_regs;
+	struct ipa_reg_save_gsi_iram_ptr_regs_s		gsi_iram_ptrs;
+	struct ipa_reg_save_gsi_shram_ptr_regs_s	gsi_shram_ptrs;
+};
+
+/* GSI MCS channel scratch registers save data struct */
+struct ipa_reg_save_gsi_mcs_channel_scratch_regs_s {
+	struct gsi_hwio_def_gsi_shram_n_s
+	  scratch4;
+	struct gsi_hwio_def_gsi_shram_n_s
+	  scratch5;
+};
+
+/* GSI Channel Context register save data struct */
+struct ipa_reg_save_gsi_ch_cntxt_per_ep_s {
+	struct gsi_hwio_def_ee_n_gsi_ch_k_cntxt_0_s
+	  ee_n_gsi_ch_k_cntxt_0;
+	struct gsi_hwio_def_ee_n_gsi_ch_k_cntxt_1_s
+	  ee_n_gsi_ch_k_cntxt_1;
+	struct gsi_hwio_def_ee_n_gsi_ch_k_cntxt_2_s
+	  ee_n_gsi_ch_k_cntxt_2;
+	struct gsi_hwio_def_ee_n_gsi_ch_k_cntxt_3_s
+	  ee_n_gsi_ch_k_cntxt_3;
+	struct gsi_hwio_def_ee_n_gsi_ch_k_cntxt_4_s
+	  ee_n_gsi_ch_k_cntxt_4;
+	struct gsi_hwio_def_ee_n_gsi_ch_k_cntxt_5_s
+	  ee_n_gsi_ch_k_cntxt_5;
+	struct gsi_hwio_def_ee_n_gsi_ch_k_cntxt_6_s
+	  ee_n_gsi_ch_k_cntxt_6;
+	struct gsi_hwio_def_ee_n_gsi_ch_k_cntxt_7_s
+	  ee_n_gsi_ch_k_cntxt_7;
+	struct gsi_hwio_def_ee_n_gsi_ch_k_re_fetch_read_ptr_s
+	  ee_n_gsi_ch_k_re_fetch_read_ptr;
+	struct gsi_hwio_def_ee_n_gsi_ch_k_re_fetch_write_ptr_s
+	  ee_n_gsi_ch_k_re_fetch_write_ptr;
+	struct gsi_hwio_def_ee_n_gsi_ch_k_qos_s
+	  ee_n_gsi_ch_k_qos;
+	struct gsi_hwio_def_ee_n_gsi_ch_k_scratch_0_s
+	  ee_n_gsi_ch_k_scratch_0;
+	struct gsi_hwio_def_ee_n_gsi_ch_k_scratch_1_s
+	  ee_n_gsi_ch_k_scratch_1;
+	struct gsi_hwio_def_ee_n_gsi_ch_k_scratch_2_s
+	  ee_n_gsi_ch_k_scratch_2;
+	struct gsi_hwio_def_ee_n_gsi_ch_k_scratch_3_s
+	  ee_n_gsi_ch_k_scratch_3;
+	struct gsi_hwio_def_gsi_map_ee_n_ch_k_vp_table_s
+	  gsi_map_ee_n_ch_k_vp_table;
+	struct ipa_reg_save_gsi_mcs_channel_scratch_regs_s mcs_channel_scratch;
+};
+
+/* GSI Event Context register save data struct */
+struct ipa_reg_save_gsi_evt_cntxt_per_ep_s {
+	struct gsi_hwio_def_ee_n_ev_ch_k_cntxt_0_s
+	  ee_n_ev_ch_k_cntxt_0;
+	struct gsi_hwio_def_ee_n_ev_ch_k_cntxt_1_s
+	  ee_n_ev_ch_k_cntxt_1;
+	struct gsi_hwio_def_ee_n_ev_ch_k_cntxt_2_s
+	  ee_n_ev_ch_k_cntxt_2;
+	struct gsi_hwio_def_ee_n_ev_ch_k_cntxt_3_s
+	  ee_n_ev_ch_k_cntxt_3;
+	struct gsi_hwio_def_ee_n_ev_ch_k_cntxt_4_s
+	  ee_n_ev_ch_k_cntxt_4;
+	struct gsi_hwio_def_ee_n_ev_ch_k_cntxt_5_s
+	  ee_n_ev_ch_k_cntxt_5;
+	struct gsi_hwio_def_ee_n_ev_ch_k_cntxt_6_s
+	  ee_n_ev_ch_k_cntxt_6;
+	struct gsi_hwio_def_ee_n_ev_ch_k_cntxt_7_s
+	  ee_n_ev_ch_k_cntxt_7;
+	struct gsi_hwio_def_ee_n_ev_ch_k_cntxt_8_s
+	  ee_n_ev_ch_k_cntxt_8;
+	struct gsi_hwio_def_ee_n_ev_ch_k_cntxt_9_s
+	  ee_n_ev_ch_k_cntxt_9;
+	struct gsi_hwio_def_ee_n_ev_ch_k_cntxt_10_s
+	  ee_n_ev_ch_k_cntxt_10;
+	struct gsi_hwio_def_ee_n_ev_ch_k_cntxt_11_s
+	  ee_n_ev_ch_k_cntxt_11;
+	struct gsi_hwio_def_ee_n_ev_ch_k_cntxt_12_s
+	  ee_n_ev_ch_k_cntxt_12;
+	struct gsi_hwio_def_ee_n_ev_ch_k_cntxt_13_s
+	  ee_n_ev_ch_k_cntxt_13;
+	struct gsi_hwio_def_ee_n_ev_ch_k_scratch_0_s
+	  ee_n_ev_ch_k_scratch_0;
+	struct gsi_hwio_def_ee_n_ev_ch_k_scratch_1_s
+	  ee_n_ev_ch_k_scratch_1;
+	struct gsi_hwio_def_gsi_debug_ee_n_ev_k_vp_table_s
+	  gsi_debug_ee_n_ev_k_vp_table;
+};
+
+/* GSI FIFO status register save data struct */
+struct ipa_reg_save_gsi_fifo_status_s {
+	union ipa_hwio_def_ipa_gsi_fifo_status_ctrl_u
+		gsi_fifo_status_ctrl;
+	union ipa_hwio_def_ipa_gsi_tlv_fifo_status_u
+		gsi_tlv_fifo_status;
+	union ipa_hwio_def_ipa_gsi_aos_fifo_status_u
+		gsi_aos_fifo_status;
+};
+
+/* GSI Channel Context register save top level data struct */
+struct ipa_reg_save_gsi_ch_cntxt_s {
+	struct ipa_reg_save_gsi_ch_cntxt_per_ep_s
+		a7[IPA_HW_REG_SAVE_GSI_NUM_CH_CNTXT_A7];
+	struct ipa_reg_save_gsi_ch_cntxt_per_ep_s
+		uc[IPA_HW_REG_SAVE_GSI_NUM_CH_CNTXT_UC];
+};
+
+/* GSI Event Context register save top level data struct */
+struct ipa_reg_save_gsi_evt_cntxt_s {
+	struct ipa_reg_save_gsi_evt_cntxt_per_ep_s
+		a7[IPA_HW_REG_SAVE_GSI_NUM_EVT_CNTXT_A7];
+	struct ipa_reg_save_gsi_evt_cntxt_per_ep_s
+		uc[IPA_HW_REG_SAVE_GSI_NUM_EVT_CNTXT_UC];
+};
+
+/* Top level IPA register save data struct */
+struct ipa_regs_save_hierarchy_s {
+	struct ipa_gen_regs_s
+		gen;
+	struct ipa_reg_save_gen_ee_s
+		gen_ee[IPA_HW_EE_MAX];
+	struct ipa_reg_save_hwp_s
+		hwp;
+	struct ipa_reg_save_dbg_s
+		dbg;
+	struct ipa_reg_save_ipa_testbus_s
+		*testbus;
+	struct ipa_reg_save_pipe_s
+		pipes[IPA_HW_PIPE_ID_MAX];
+	struct ipa_reg_save_src_rsrc_grp_s
+		src_rsrc_grp[IPA_HW_SRC_RSRP_TYPE_MAX];
+	struct ipa_reg_save_dst_rsrc_grp_s
+		dst_rsrc_grp[IPA_HW_DST_RSRP_TYPE_MAX];
+	struct ipa_reg_save_src_rsrc_cnt_s
+		src_rsrc_cnt[IPA_HW_SRC_RSRP_TYPE_MAX];
+	struct ipa_reg_save_dst_rsrc_cnt_s
+		dst_rsrc_cnt[IPA_HW_DST_RSRP_TYPE_MAX];
+	u32 *ipa_iu_ptr;
+	u32 *ipa_sram_ptr;
+	u32 *ipa_mbox_ptr;
+	u32 *ipa_hram_ptr;
+	u32 *ipa_seq_ptr;
+	u32 *ipa_gsi_ptr;
+};
+
+/* Top level GSI register save data struct */
+struct gsi_regs_save_hierarchy_s {
+	u32 fw_ver;
+	struct ipa_reg_save_gsi_gen_s		gen;
+	struct ipa_reg_save_gsi_gen_ee_s	gen_ee[IPA_REG_SAVE_GSI_NUM_EE];
+	struct ipa_reg_save_gsi_ch_cntxt_s	ch_cntxt;
+	struct ipa_reg_save_gsi_evt_cntxt_s	evt_cntxt;
+	struct ipa_reg_save_gsi_debug_s		debug;
+};
+
+/* Source resources for a resource group */
+struct ipa_reg_save_src_rsrc_cnts_s {
+	u8 pkt_cntxt;
+	u8 descriptor_list;
+	u8 data_descriptor_buffer;
+	u8 hps_dmars;
+	u8 reserved_acks;
+};
+
+/* Destination resources for a resource group */
+struct ipa_reg_save_dst_rsrc_cnts_s {
+	u8 reserved_sectors;
+	u8 dps_dmars;
+};
+
+/* Resource count structure for a resource group */
+struct ipa_reg_save_rsrc_cnts_per_grp_s {
+	/* Resource group number */
+	u8 resource_group;
+	/* Source resources for a resource group */
+	struct ipa_reg_save_src_rsrc_cnts_s src;
+	/* Destination resources for a resource group */
+	struct ipa_reg_save_dst_rsrc_cnts_s dst;
+};
+
+/* Top level resource count structure */
+struct ipa_reg_save_rsrc_cnts_s {
+	/* Resource count structure for PCIE group */
+	struct ipa_reg_save_rsrc_cnts_per_grp_s pcie;
+	/* Resource count structure for DDR group */
+	struct ipa_reg_save_rsrc_cnts_per_grp_s ddr;
+};
+
+/*
+ * Top level IPA and GSI registers save data struct
+ */
+struct regs_save_hierarchy_s {
+	struct ipa_regs_save_hierarchy_s
+		ipa;
+	struct gsi_regs_save_hierarchy_s
+		gsi;
+	bool
+		pkt_ctntx_active[IPA_HW_PKT_CTNTX_MAX];
+	union ipa_hwio_def_ipa_ctxh_ctrl_u
+		pkt_ctntxt_lock;
+	enum ipa_hw_pkt_cntxt_state_e
+		pkt_cntxt_state[IPA_HW_PKT_CTNTX_MAX];
+	struct ipa_pkt_ctntx_s
+		pkt_ctntx[IPA_HW_PKT_CTNTX_MAX];
+	struct ipa_reg_save_rsrc_cnts_s
+		rsrc_cnts;
+	struct ipa_reg_save_gsi_fifo_status_s
+		gsi_fifo_status[IPA_HW_PIPE_ID_MAX];
+};
+
+/*
+ * The following section deals with handling IPA registers' memory
+ * access relative to pre-defined memory protection schemes
+ * (ie. "access control").
+ *
+ * In a nut shell, the intent of the data stuctures below is to allow
+ * higher level register accessors to be unaware of what really is
+ * going on at the lowest level (ie. real vs non-real access).  This
+ * methodology is also designed to allow for platform specific "access
+ * maps."
+ */
+
+/*
+ * Function for doing an actual read
+ */
+static inline u32
+act_read(void __iomem *addr)
+{
+	u32 val = ioread32(addr);
+
+	return val;
+}
+
+/*
+ * Function for doing an actual write
+ */
+static inline void
+act_write(void __iomem *addr, u32 val)
+{
+	iowrite32(val, addr);
+}
+
+/*
+ * Function that pretends to do a read
+ */
+static inline u32
+nop_read(void __iomem *addr)
+{
+	return IPA_MEM_INIT_VAL;
+}
+
+/*
+ * Function that pretends to do a write
+ */
+static inline void
+nop_write(void __iomem *addr, u32 val)
+{
+}
+
+/*
+ * The following are used to define struct reg_access_funcs_s below...
+ */
+typedef u32 (*reg_read_func_t)(
+	void __iomem *addr);
+typedef void (*reg_write_func_t)(
+	void __iomem *addr,
+	u32 val);
+
+/*
+ * The following in used to define io_matrix[] below...
+ */
+struct reg_access_funcs_s {
+	reg_read_func_t  read;
+	reg_write_func_t write;
+};
+
+/*
+ * The following will be used to appropriately index into the
+ * read/write combos defined in io_matrix[] below...
+ */
+#define AA_COMBO 0 /* actual read, actual write */
+#define AN_COMBO 1 /* actual read, no-op write  */
+#define NA_COMBO 2 /* no-op read,  actual write */
+#define NN_COMBO 3 /* no-op read,  no-op write  */
+
+/*
+ * The following will be used to dictate registers' access methods
+ * relative to the state of secure debug...whether it's enabled or
+ * disabled.
+ *
+ * NOTE: The table below defines all access combinations.
+ */
+static struct reg_access_funcs_s io_matrix[] = {
+	{ act_read, act_write }, /* the AA_COMBO */
+	{ act_read, nop_write }, /* the AN_COMBO */
+	{ nop_read, act_write }, /* the NA_COMBO */
+	{ nop_read, nop_write }, /* the NN_COMBO */
+};
+
+/*
+ * The following will be used to define and drive IPA's register
+ * access rules.
+ */
+struct reg_mem_access_map_t {
+	u32 addr_range_begin;
+	u32 addr_range_end;
+	struct reg_access_funcs_s *access[2];
+};
+
+#endif /* #if !defined(_IPA_REG_DUMP_H_) */

+ 8884 - 0
ipa/ipa_v3/ipa.c

@@ -0,0 +1,8884 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2012-2020, The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/clk.h>
+#include <linux/compat.h>
+#include <linux/device.h>
+#include <linux/dmapool.h>
+#include <linux/fs.h>
+#include <linux/genalloc.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <linux/rbtree.h>
+#include <linux/uaccess.h>
+#include <linux/interrupt.h>
+#include <linux/interconnect.h>
+#include <linux/netdevice.h>
+#include <linux/delay.h>
+#include <linux/msm_gsi.h>
+#include <linux/time.h>
+#include <linux/hashtable.h>
+#include <linux/jhash.h>
+#include <linux/pci.h>
+#include <soc/qcom/subsystem_restart.h>
+#include <linux/soc/qcom/smem.h>
+#include <linux/qcom_scm.h>
+#include <asm/cacheflush.h>
+#include <linux/soc/qcom/smem_state.h>
+#include <linux/of_irq.h>
+#include <linux/ctype.h>
+
+#ifdef CONFIG_ARM64
+
+/* Outer caches unsupported on ARM64 platforms */
+#define outer_flush_range(x, y)
+#define __cpuc_flush_dcache_area __flush_dcache_area
+
+#endif
+
+#define IPA_SUBSYSTEM_NAME "ipa_fws"
+#define IPA_UC_SUBSYSTEM_NAME "ipa_uc"
+
+#include "ipa_i.h"
+#include "../ipa_rm_i.h"
+#include "ipahal/ipahal.h"
+#include "ipahal/ipahal_fltrt.h"
+
+#define CREATE_TRACE_POINTS
+#include "ipa_trace.h"
+#include "ipa_odl.h"
+
+#define IPA_SUSPEND_BUSY_TIMEOUT (msecs_to_jiffies(10))
+
+/*
+ * The following for adding code (ie. for EMULATION) not found on x86.
+ */
+#if defined(CONFIG_IPA_EMULATION)
+# include "ipa_emulation_stubs.h"
+#endif
+
+#ifdef CONFIG_COMPAT
+/**
+ * struct ipa3_ioc_nat_alloc_mem32 - nat table memory allocation
+ * properties
+ * @dev_name: input parameter, the name of table
+ * @size: input parameter, size of table in bytes
+ * @offset: output parameter, offset into page in case of system memory
+ */
+struct ipa3_ioc_nat_alloc_mem32 {
+	char dev_name[IPA_RESOURCE_NAME_MAX];
+	compat_size_t size;
+	compat_off_t offset;
+};
+
+/**
+ * struct ipa_ioc_nat_ipv6ct_table_alloc32 - table memory allocation
+ * properties
+ * @size: input parameter, size of table in bytes
+ * @offset: output parameter, offset into page in case of system memory
+ */
+struct ipa_ioc_nat_ipv6ct_table_alloc32 {
+	compat_size_t size;
+	compat_off_t offset;
+};
+#endif /* #ifdef CONFIG_COMPAT */
+
+#define IPA_TZ_UNLOCK_ATTRIBUTE 0x0C0311
+
+struct tz_smmu_ipa_protect_region_iovec_s {
+	u64 input_addr;
+	u64 output_addr;
+	u64 size;
+	u32 attr;
+} __packed;
+
+struct tz_smmu_ipa_protect_region_s {
+	phys_addr_t iovec_buf;
+	u32 size_bytes;
+} __packed;
+
+static void ipa3_start_tag_process(struct work_struct *work);
+static DECLARE_WORK(ipa3_tag_work, ipa3_start_tag_process);
+
+static void ipa3_transport_release_resource(struct work_struct *work);
+static DECLARE_DELAYED_WORK(ipa3_transport_release_resource_work,
+	ipa3_transport_release_resource);
+static void ipa_gsi_notify_cb(struct gsi_per_notify *notify);
+
+static int ipa3_attach_to_smmu(void);
+static int ipa3_alloc_pkt_init(void);
+
+static void ipa3_load_ipa_fw(struct work_struct *work);
+static DECLARE_WORK(ipa3_fw_loading_work, ipa3_load_ipa_fw);
+
+static void ipa_dec_clients_disable_clks_on_wq(struct work_struct *work);
+static DECLARE_DELAYED_WORK(ipa_dec_clients_disable_clks_on_wq_work,
+	ipa_dec_clients_disable_clks_on_wq);
+
+static int ipa3_ioctl_add_rt_rule_v2(unsigned long arg);
+static int ipa3_ioctl_add_rt_rule_ext_v2(unsigned long arg);
+static int ipa3_ioctl_add_rt_rule_after_v2(unsigned long arg);
+static int ipa3_ioctl_mdfy_rt_rule_v2(unsigned long arg);
+static int ipa3_ioctl_add_flt_rule_v2(unsigned long arg);
+static int ipa3_ioctl_add_flt_rule_after_v2(unsigned long arg);
+static int ipa3_ioctl_mdfy_flt_rule_v2(unsigned long arg);
+static int ipa3_ioctl_fnr_counter_alloc(unsigned long arg);
+static int ipa3_ioctl_fnr_counter_query(unsigned long arg);
+static int ipa3_ioctl_fnr_counter_set(unsigned long arg);
+
+static struct ipa3_plat_drv_res ipa3_res = {0, };
+
+static struct clk *ipa3_clk;
+
+struct ipa3_context *ipa3_ctx;
+
+static struct {
+	bool present[IPA_SMMU_CB_MAX];
+	bool arm_smmu;
+	bool use_64_bit_dma_mask;
+	u32 ipa_base;
+	u32 ipa_size;
+} smmu_info;
+
+static char *active_clients_table_buf;
+
+int ipa3_active_clients_log_print_buffer(char *buf, int size)
+{
+	int i;
+	int nbytes;
+	int cnt = 0;
+	int start_idx;
+	int end_idx;
+	unsigned long flags;
+
+	spin_lock_irqsave(&ipa3_ctx->ipa3_active_clients_logging.lock, flags);
+	start_idx = (ipa3_ctx->ipa3_active_clients_logging.log_tail + 1) %
+			IPA3_ACTIVE_CLIENTS_LOG_BUFFER_SIZE_LINES;
+	end_idx = ipa3_ctx->ipa3_active_clients_logging.log_head;
+	for (i = start_idx; i != end_idx;
+		i = (i + 1) % IPA3_ACTIVE_CLIENTS_LOG_BUFFER_SIZE_LINES) {
+		nbytes = scnprintf(buf + cnt, size - cnt, "%s\n",
+				ipa3_ctx->ipa3_active_clients_logging
+				.log_buffer[i]);
+		cnt += nbytes;
+	}
+	spin_unlock_irqrestore(&ipa3_ctx->ipa3_active_clients_logging.lock,
+		flags);
+
+	return cnt;
+}
+
+int ipa3_active_clients_log_print_table(char *buf, int size)
+{
+	int i;
+	struct ipa3_active_client_htable_entry *iterator;
+	int cnt = 0;
+	unsigned long flags;
+
+	spin_lock_irqsave(&ipa3_ctx->ipa3_active_clients_logging.lock, flags);
+	cnt = scnprintf(buf, size, "\n---- Active Clients Table ----\n");
+	hash_for_each(ipa3_ctx->ipa3_active_clients_logging.htable, i,
+			iterator, list) {
+		switch (iterator->type) {
+		case IPA3_ACTIVE_CLIENT_LOG_TYPE_EP:
+			cnt += scnprintf(buf + cnt, size - cnt,
+					"%-40s %-3d ENDPOINT\n",
+					iterator->id_string, iterator->count);
+			break;
+		case IPA3_ACTIVE_CLIENT_LOG_TYPE_SIMPLE:
+			cnt += scnprintf(buf + cnt, size - cnt,
+					"%-40s %-3d SIMPLE\n",
+					iterator->id_string, iterator->count);
+			break;
+		case IPA3_ACTIVE_CLIENT_LOG_TYPE_RESOURCE:
+			cnt += scnprintf(buf + cnt, size - cnt,
+					"%-40s %-3d RESOURCE\n",
+					iterator->id_string, iterator->count);
+			break;
+		case IPA3_ACTIVE_CLIENT_LOG_TYPE_SPECIAL:
+			cnt += scnprintf(buf + cnt, size - cnt,
+					"%-40s %-3d SPECIAL\n",
+					iterator->id_string, iterator->count);
+			break;
+		default:
+			IPAERR("Trying to print illegal active_clients type");
+			break;
+		}
+	}
+	cnt += scnprintf(buf + cnt, size - cnt,
+			"\nTotal active clients count: %d\n",
+			atomic_read(&ipa3_ctx->ipa3_active_clients.cnt));
+
+	if (ipa3_is_mhip_offload_enabled())
+		cnt += ipa_mpm_panic_handler(buf + cnt, size - cnt);
+
+	spin_unlock_irqrestore(&ipa3_ctx->ipa3_active_clients_logging.lock,
+		flags);
+
+	return cnt;
+}
+
+static int ipa3_clean_modem_rule(void)
+{
+	struct ipa_install_fltr_rule_req_msg_v01 *req;
+	struct ipa_install_fltr_rule_req_ex_msg_v01 *req_ex;
+	int val = 0;
+
+	if (ipa3_ctx->ipa_hw_type < IPA_HW_v3_0) {
+		req = kzalloc(
+			sizeof(struct ipa_install_fltr_rule_req_msg_v01),
+			GFP_KERNEL);
+		if (!req) {
+			IPAERR("mem allocated failed!\n");
+			return -ENOMEM;
+		}
+		req->filter_spec_list_valid = false;
+		req->filter_spec_list_len = 0;
+		req->source_pipe_index_valid = 0;
+		val = ipa3_qmi_filter_request_send(req);
+		kfree(req);
+	} else {
+		req_ex = kzalloc(
+			sizeof(struct ipa_install_fltr_rule_req_ex_msg_v01),
+			GFP_KERNEL);
+		if (!req_ex) {
+			IPAERR("mem allocated failed!\n");
+			return -ENOMEM;
+		}
+		req_ex->filter_spec_ex_list_valid = false;
+		req_ex->filter_spec_ex_list_len = 0;
+		req_ex->source_pipe_index_valid = 0;
+		val = ipa3_qmi_filter_request_ex_send(req_ex);
+		kfree(req_ex);
+	}
+
+	return val;
+}
+
+static int ipa3_clean_mhip_dl_rule(void)
+{
+	struct ipa_remove_offload_connection_req_msg_v01 req;
+
+	memset(&req, 0, sizeof(struct
+		ipa_remove_offload_connection_req_msg_v01));
+
+	req.clean_all_rules_valid = true;
+	req.clean_all_rules = true;
+
+	if (ipa3_qmi_rmv_offload_request_send(&req)) {
+		IPAWANDBG("clean dl rule cache failed\n");
+		return -EFAULT;
+	}
+
+	return 0;
+}
+
+static int ipa3_active_clients_panic_notifier(struct notifier_block *this,
+		unsigned long event, void *ptr)
+{
+	ipa3_active_clients_log_print_table(active_clients_table_buf,
+			IPA3_ACTIVE_CLIENTS_TABLE_BUF_SIZE);
+	IPAERR("%s\n", active_clients_table_buf);
+
+	return NOTIFY_DONE;
+}
+
+static struct notifier_block ipa3_active_clients_panic_blk = {
+	.notifier_call  = ipa3_active_clients_panic_notifier,
+};
+
+#ifdef CONFIG_IPA_DEBUG
+static int ipa3_active_clients_log_insert(const char *string)
+{
+	int head;
+	int tail;
+
+	if (!ipa3_ctx->ipa3_active_clients_logging.log_rdy)
+		return -EPERM;
+
+	head = ipa3_ctx->ipa3_active_clients_logging.log_head;
+	tail = ipa3_ctx->ipa3_active_clients_logging.log_tail;
+
+	memset(ipa3_ctx->ipa3_active_clients_logging.log_buffer[head], '_',
+			IPA3_ACTIVE_CLIENTS_LOG_LINE_LEN);
+	strlcpy(ipa3_ctx->ipa3_active_clients_logging.log_buffer[head], string,
+			(size_t)IPA3_ACTIVE_CLIENTS_LOG_LINE_LEN);
+	head = (head + 1) % IPA3_ACTIVE_CLIENTS_LOG_BUFFER_SIZE_LINES;
+	if (tail == head)
+		tail = (tail + 1) % IPA3_ACTIVE_CLIENTS_LOG_BUFFER_SIZE_LINES;
+
+	ipa3_ctx->ipa3_active_clients_logging.log_tail = tail;
+	ipa3_ctx->ipa3_active_clients_logging.log_head = head;
+
+	return 0;
+}
+#endif
+
+static int ipa3_active_clients_log_init(void)
+{
+	int i;
+
+	spin_lock_init(&ipa3_ctx->ipa3_active_clients_logging.lock);
+	ipa3_ctx->ipa3_active_clients_logging.log_buffer[0] = kcalloc(
+			IPA3_ACTIVE_CLIENTS_LOG_BUFFER_SIZE_LINES,
+			sizeof(char[IPA3_ACTIVE_CLIENTS_LOG_LINE_LEN]),
+			GFP_KERNEL);
+	active_clients_table_buf = kzalloc(sizeof(
+			char[IPA3_ACTIVE_CLIENTS_TABLE_BUF_SIZE]), GFP_KERNEL);
+	if (ipa3_ctx->ipa3_active_clients_logging.log_buffer == NULL) {
+		pr_err("Active Clients Logging memory allocation failed\n");
+		goto bail;
+	}
+	for (i = 0; i < IPA3_ACTIVE_CLIENTS_LOG_BUFFER_SIZE_LINES; i++) {
+		ipa3_ctx->ipa3_active_clients_logging.log_buffer[i] =
+			ipa3_ctx->ipa3_active_clients_logging.log_buffer[0] +
+			(IPA3_ACTIVE_CLIENTS_LOG_LINE_LEN * i);
+	}
+	ipa3_ctx->ipa3_active_clients_logging.log_head = 0;
+	ipa3_ctx->ipa3_active_clients_logging.log_tail =
+			IPA3_ACTIVE_CLIENTS_LOG_BUFFER_SIZE_LINES - 1;
+	hash_init(ipa3_ctx->ipa3_active_clients_logging.htable);
+	atomic_notifier_chain_register(&panic_notifier_list,
+			&ipa3_active_clients_panic_blk);
+	ipa3_ctx->ipa3_active_clients_logging.log_rdy = true;
+
+	return 0;
+
+bail:
+	return -ENOMEM;
+}
+
+void ipa3_active_clients_log_clear(void)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&ipa3_ctx->ipa3_active_clients_logging.lock, flags);
+	ipa3_ctx->ipa3_active_clients_logging.log_head = 0;
+	ipa3_ctx->ipa3_active_clients_logging.log_tail =
+			IPA3_ACTIVE_CLIENTS_LOG_BUFFER_SIZE_LINES - 1;
+	spin_unlock_irqrestore(&ipa3_ctx->ipa3_active_clients_logging.lock,
+		flags);
+}
+
+static void ipa3_active_clients_log_destroy(void)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&ipa3_ctx->ipa3_active_clients_logging.lock, flags);
+	ipa3_ctx->ipa3_active_clients_logging.log_rdy = false;
+	kfree(active_clients_table_buf);
+	active_clients_table_buf = NULL;
+	kfree(ipa3_ctx->ipa3_active_clients_logging.log_buffer[0]);
+	ipa3_ctx->ipa3_active_clients_logging.log_head = 0;
+	ipa3_ctx->ipa3_active_clients_logging.log_tail =
+			IPA3_ACTIVE_CLIENTS_LOG_BUFFER_SIZE_LINES - 1;
+	spin_unlock_irqrestore(&ipa3_ctx->ipa3_active_clients_logging.lock,
+		flags);
+}
+
+static struct ipa_smmu_cb_ctx smmu_cb[IPA_SMMU_CB_MAX];
+
+struct iommu_domain *ipa3_get_smmu_domain_by_type(enum ipa_smmu_cb_type cb_type)
+{
+	if (VALID_IPA_SMMU_CB_TYPE(cb_type) && smmu_cb[cb_type].valid)
+		return smmu_cb[cb_type].iommu_domain;
+
+	IPAERR("cb_type(%d) not valid\n", cb_type);
+
+	return NULL;
+}
+
+struct iommu_domain *ipa3_get_smmu_domain(void)
+{
+	return ipa3_get_smmu_domain_by_type(IPA_SMMU_CB_AP);
+}
+
+struct iommu_domain *ipa3_get_uc_smmu_domain(void)
+{
+	return ipa3_get_smmu_domain_by_type(IPA_SMMU_CB_UC);
+}
+
+struct iommu_domain *ipa3_get_wlan_smmu_domain(void)
+{
+	return ipa3_get_smmu_domain_by_type(IPA_SMMU_CB_WLAN);
+}
+
+struct iommu_domain *ipa3_get_11ad_smmu_domain(void)
+{
+	return ipa3_get_smmu_domain_by_type(IPA_SMMU_CB_11AD);
+}
+
+struct device *ipa3_get_dma_dev(void)
+{
+	return ipa3_ctx->pdev;
+}
+
+/**
+ * ipa3_get_smmu_ctx()- Return smmu context for the given cb_type
+ *
+ * Return value: pointer to smmu context address
+ */
+struct ipa_smmu_cb_ctx *ipa3_get_smmu_ctx(enum ipa_smmu_cb_type cb_type)
+{
+	return &smmu_cb[cb_type];
+}
+
+static int ipa3_open(struct inode *inode, struct file *filp)
+{
+	IPADBG_LOW("ENTER\n");
+	filp->private_data = ipa3_ctx;
+
+	return 0;
+}
+
+static void ipa3_wan_msg_free_cb(void *buff, u32 len, u32 type)
+{
+	if (!buff) {
+		IPAERR("Null buffer\n");
+		return;
+	}
+
+	if (type != WAN_UPSTREAM_ROUTE_ADD &&
+	    type != WAN_UPSTREAM_ROUTE_DEL &&
+	    type != WAN_EMBMS_CONNECT) {
+		IPAERR("Wrong type given. buff %pK type %d\n", buff, type);
+		return;
+	}
+
+	kfree(buff);
+}
+
+static int ipa3_send_wan_msg(unsigned long usr_param,
+	uint8_t msg_type, bool is_cache)
+{
+	int retval;
+	struct ipa_wan_msg *wan_msg;
+	struct ipa_msg_meta msg_meta;
+	struct ipa_wan_msg cache_wan_msg;
+
+	wan_msg = kzalloc(sizeof(*wan_msg), GFP_KERNEL);
+	if (!wan_msg)
+		return -ENOMEM;
+
+	if (copy_from_user(wan_msg, (const void __user *)usr_param,
+		sizeof(struct ipa_wan_msg))) {
+		kfree(wan_msg);
+		return -EFAULT;
+	}
+
+	memcpy(&cache_wan_msg, wan_msg, sizeof(cache_wan_msg));
+
+	memset(&msg_meta, 0, sizeof(struct ipa_msg_meta));
+	msg_meta.msg_type = msg_type;
+	msg_meta.msg_len = sizeof(struct ipa_wan_msg);
+	retval = ipa3_send_msg(&msg_meta, wan_msg, ipa3_wan_msg_free_cb);
+	if (retval) {
+		IPAERR_RL("ipa3_send_msg failed: %d\n", retval);
+		kfree(wan_msg);
+		return retval;
+	}
+
+	if (is_cache) {
+		mutex_lock(&ipa3_ctx->ipa_cne_evt_lock);
+
+		/* cache the cne event */
+		memcpy(&ipa3_ctx->ipa_cne_evt_req_cache[
+			ipa3_ctx->num_ipa_cne_evt_req].wan_msg,
+			&cache_wan_msg,
+			sizeof(cache_wan_msg));
+
+		memcpy(&ipa3_ctx->ipa_cne_evt_req_cache[
+			ipa3_ctx->num_ipa_cne_evt_req].msg_meta,
+			&msg_meta,
+			sizeof(struct ipa_msg_meta));
+
+		ipa3_ctx->num_ipa_cne_evt_req++;
+		ipa3_ctx->num_ipa_cne_evt_req %= IPA_MAX_NUM_REQ_CACHE;
+		mutex_unlock(&ipa3_ctx->ipa_cne_evt_lock);
+	}
+
+	return 0;
+}
+
+static void ipa3_vlan_l2tp_msg_free_cb(void *buff, u32 len, u32 type)
+{
+	if (!buff) {
+		IPAERR("Null buffer\n");
+		return;
+	}
+
+	switch (type) {
+	case ADD_VLAN_IFACE:
+	case DEL_VLAN_IFACE:
+	case ADD_L2TP_VLAN_MAPPING:
+	case DEL_L2TP_VLAN_MAPPING:
+	case ADD_BRIDGE_VLAN_MAPPING:
+	case DEL_BRIDGE_VLAN_MAPPING:
+		break;
+	default:
+		IPAERR("Wrong type given. buff %pK type %d\n", buff, type);
+		return;
+	}
+
+	kfree(buff);
+}
+
+static int ipa3_send_vlan_l2tp_msg(unsigned long usr_param, uint8_t msg_type)
+{
+	int retval;
+	struct ipa_ioc_vlan_iface_info *vlan_info;
+	struct ipa_ioc_l2tp_vlan_mapping_info *mapping_info;
+	struct ipa_ioc_bridge_vlan_mapping_info *bridge_vlan_info;
+	struct ipa_msg_meta msg_meta;
+	void *buff;
+
+	IPADBG("type %d\n", msg_type);
+
+	memset(&msg_meta, 0, sizeof(msg_meta));
+	msg_meta.msg_type = msg_type;
+
+	if ((msg_type == ADD_VLAN_IFACE) ||
+		(msg_type == DEL_VLAN_IFACE)) {
+		vlan_info = kzalloc(sizeof(struct ipa_ioc_vlan_iface_info),
+			GFP_KERNEL);
+		if (!vlan_info)
+			return -ENOMEM;
+
+		if (copy_from_user((u8 *)vlan_info, (void __user *)usr_param,
+			sizeof(struct ipa_ioc_vlan_iface_info))) {
+			kfree(vlan_info);
+			return -EFAULT;
+		}
+
+		msg_meta.msg_len = sizeof(struct ipa_ioc_vlan_iface_info);
+		buff = vlan_info;
+	} else if ((msg_type == ADD_L2TP_VLAN_MAPPING) ||
+		(msg_type == DEL_L2TP_VLAN_MAPPING)) {
+		mapping_info = kzalloc(sizeof(struct
+			ipa_ioc_l2tp_vlan_mapping_info), GFP_KERNEL);
+		if (!mapping_info)
+			return -ENOMEM;
+
+		if (copy_from_user((u8 *)mapping_info,
+			(void __user *)usr_param,
+			sizeof(struct ipa_ioc_l2tp_vlan_mapping_info))) {
+			kfree(mapping_info);
+			return -EFAULT;
+		}
+
+		msg_meta.msg_len = sizeof(struct
+			ipa_ioc_l2tp_vlan_mapping_info);
+		buff = mapping_info;
+	} else if ((msg_type == ADD_BRIDGE_VLAN_MAPPING) ||
+		(msg_type == DEL_BRIDGE_VLAN_MAPPING)) {
+		bridge_vlan_info = kzalloc(
+			sizeof(struct ipa_ioc_bridge_vlan_mapping_info),
+			GFP_KERNEL);
+		if (!bridge_vlan_info)
+			return -ENOMEM;
+
+		if (copy_from_user((u8 *)bridge_vlan_info,
+			(void __user *)usr_param,
+			sizeof(struct ipa_ioc_bridge_vlan_mapping_info))) {
+			kfree(bridge_vlan_info);
+			IPAERR("copy from user failed\n");
+			return -EFAULT;
+		}
+
+		msg_meta.msg_len = sizeof(struct
+			ipa_ioc_bridge_vlan_mapping_info);
+		buff = bridge_vlan_info;
+	} else {
+		IPAERR("Unexpected event\n");
+		return -EFAULT;
+	}
+
+	retval = ipa3_send_msg(&msg_meta, buff,
+		ipa3_vlan_l2tp_msg_free_cb);
+	if (retval) {
+		IPAERR("ipa3_send_msg failed: %d, msg_type %d\n",
+			retval,
+			msg_type);
+		kfree(buff);
+		return retval;
+	}
+	IPADBG("exit\n");
+
+	return 0;
+}
+
+static void ipa3_gsb_msg_free_cb(void *buff, u32 len, u32 type)
+{
+	if (!buff) {
+		IPAERR("Null buffer\n");
+		return;
+	}
+
+	switch (type) {
+	case IPA_GSB_CONNECT:
+	case IPA_GSB_DISCONNECT:
+		break;
+	default:
+		IPAERR("Wrong type given. buff %pK type %d\n", buff, type);
+		return;
+	}
+
+	kfree(buff);
+}
+
+static int ipa3_send_gsb_msg(unsigned long usr_param, uint8_t msg_type)
+{
+	int retval;
+	struct ipa_ioc_gsb_info *gsb_info;
+	struct ipa_msg_meta msg_meta;
+	void *buff;
+
+	IPADBG("type %d\n", msg_type);
+
+	memset(&msg_meta, 0, sizeof(msg_meta));
+	msg_meta.msg_type = msg_type;
+
+	if ((msg_type == IPA_GSB_CONNECT) ||
+		(msg_type == IPA_GSB_DISCONNECT)) {
+		gsb_info = kzalloc(sizeof(struct ipa_ioc_gsb_info),
+			GFP_KERNEL);
+		if (!gsb_info) {
+			IPAERR("no memory\n");
+			return -ENOMEM;
+		}
+
+		if (copy_from_user((u8 *)gsb_info, (void __user *)usr_param,
+			sizeof(struct ipa_ioc_gsb_info))) {
+			kfree(gsb_info);
+			return -EFAULT;
+		}
+
+		msg_meta.msg_len = sizeof(struct ipa_ioc_gsb_info);
+		buff = gsb_info;
+	} else {
+		IPAERR("Unexpected event\n");
+		return -EFAULT;
+	}
+
+	retval = ipa3_send_msg(&msg_meta, buff,
+		ipa3_gsb_msg_free_cb);
+	if (retval) {
+		IPAERR("ipa3_send_msg failed: %d, msg_type %d\n",
+			retval,
+			msg_type);
+		kfree(buff);
+		return retval;
+	}
+	IPADBG("exit\n");
+
+	return 0;
+}
+
+static int ipa3_ioctl_add_rt_rule_v2(unsigned long arg)
+{
+	int retval = 0;
+	int i;
+	u8 header[128] = { 0 };
+	int pre_entry;
+	u32 usr_pyld_sz;
+	u32 pyld_sz;
+	u64 uptr = 0;
+	u8 *param = NULL;
+	u8 *kptr = NULL;
+
+	if (copy_from_user(header, (const void __user *)arg,
+		sizeof(struct ipa_ioc_add_rt_rule_v2))) {
+		IPAERR_RL("copy_from_user fails\n");
+		retval = -EFAULT;
+		goto free_param_kptr;
+	}
+	pre_entry =
+		((struct ipa_ioc_add_rt_rule_v2 *)header)->num_rules;
+	if (unlikely(((struct ipa_ioc_add_rt_rule_v2 *)
+		header)->rule_add_size >
+		sizeof(struct ipa_rt_rule_add_i))) {
+		IPAERR_RL("unexpected rule_add_size %d\n",
+		((struct ipa_ioc_add_rt_rule_v2 *)
+		header)->rule_add_size);
+		retval = -EPERM;
+		goto free_param_kptr;
+	}
+	/* user payload size */
+	usr_pyld_sz = ((struct ipa_ioc_add_rt_rule_v2 *)
+		header)->rule_add_size * pre_entry;
+	/* actual payload structure size in kernel */
+	pyld_sz = sizeof(struct ipa_rt_rule_add_i) * pre_entry;
+	uptr = ((struct ipa_ioc_add_rt_rule_v2 *)
+		header)->rules;
+	if (unlikely(!uptr)) {
+		IPAERR_RL("unexpected NULL rules\n");
+		retval = -EPERM;
+		goto free_param_kptr;
+	}
+	/* alloc param with same payload size as user payload */
+	param = memdup_user((const void __user *)uptr,
+		usr_pyld_sz);
+	if (IS_ERR(param)) {
+		retval = -EFAULT;
+		goto free_param_kptr;
+	}
+	/* alloc kernel pointer with actual payload size */
+	kptr = kzalloc(pyld_sz, GFP_KERNEL);
+	if (!kptr) {
+		retval = -ENOMEM;
+		goto free_param_kptr;
+	}
+	for (i = 0; i < pre_entry; i++)
+		memcpy(kptr + i * sizeof(struct ipa_rt_rule_add_i),
+			(void *)param + i *
+			((struct ipa_ioc_add_rt_rule_v2 *)
+			header)->rule_add_size,
+			((struct ipa_ioc_add_rt_rule_v2 *)
+			header)->rule_add_size);
+	/* modify the rule pointer to the kernel pointer */
+	((struct ipa_ioc_add_rt_rule_v2 *)header)->rules =
+		(u64)kptr;
+	if (ipa3_add_rt_rule_usr_v2(
+		(struct ipa_ioc_add_rt_rule_v2 *)header, true)) {
+		IPAERR_RL("ipa3_add_rt_rule_usr_v2 fails\n");
+		retval = -EPERM;
+		goto free_param_kptr;
+	}
+	for (i = 0; i < pre_entry; i++)
+		memcpy((void *)param + i *
+			((struct ipa_ioc_add_rt_rule_v2 *)
+			header)->rule_add_size,
+			kptr + i * sizeof(struct ipa_rt_rule_add_i),
+			((struct ipa_ioc_add_rt_rule_v2 *)
+			header)->rule_add_size);
+	if (copy_to_user((void __user *)uptr, param,
+		usr_pyld_sz)) {
+		IPAERR_RL("copy_to_user fails\n");
+		retval = -EFAULT;
+		goto free_param_kptr;
+	}
+
+free_param_kptr:
+	if (!IS_ERR(param))
+		kfree(param);
+	kfree(kptr);
+
+	return retval;
+}
+
+static int ipa3_ioctl_add_rt_rule_ext_v2(unsigned long arg)
+{
+	int retval = 0;
+	int i;
+	u8 header[128] = { 0 };
+	int pre_entry;
+	u32 usr_pyld_sz;
+	u32 pyld_sz;
+	u64 uptr = 0;
+	u8 *param = NULL;
+	u8 *kptr = NULL;
+
+	if (copy_from_user(header,
+			(const void __user *)arg,
+			sizeof(struct ipa_ioc_add_rt_rule_ext_v2))) {
+		IPAERR_RL("copy_from_user fails\n");
+		retval = -EFAULT;
+		goto free_param_kptr;
+	}
+	pre_entry =
+		((struct ipa_ioc_add_rt_rule_ext_v2 *)
+		header)->num_rules;
+	if (unlikely(((struct ipa_ioc_add_rt_rule_ext_v2 *)
+		header)->rule_add_ext_size >
+		sizeof(struct ipa_rt_rule_add_ext_i))) {
+		IPAERR_RL("unexpected rule_add_size %d\n",
+		((struct ipa_ioc_add_rt_rule_ext_v2 *)
+		header)->rule_add_ext_size);
+		retval = -EPERM;
+		goto free_param_kptr;
+	}
+	/* user payload size */
+	usr_pyld_sz = ((struct ipa_ioc_add_rt_rule_ext_v2 *)
+		header)->rule_add_ext_size * pre_entry;
+	/* actual payload structure size in kernel */
+	pyld_sz = sizeof(struct ipa_rt_rule_add_ext_i)
+		* pre_entry;
+	uptr = ((struct ipa_ioc_add_rt_rule_ext_v2 *)
+		header)->rules;
+	if (unlikely(!uptr)) {
+		IPAERR_RL("unexpected NULL rules\n");
+		retval = -EPERM;
+		goto free_param_kptr;
+	}
+	/* alloc param with same payload size as user payload */
+	param = memdup_user((const void __user *)uptr,
+		usr_pyld_sz);
+	if (IS_ERR(param)) {
+		retval = -EFAULT;
+		goto free_param_kptr;
+	}
+	/* alloc kernel pointer with actual payload size */
+	kptr = kzalloc(pyld_sz, GFP_KERNEL);
+	if (!kptr) {
+		retval = -ENOMEM;
+		goto free_param_kptr;
+	}
+	for (i = 0; i < pre_entry; i++)
+		memcpy(kptr + i *
+			sizeof(struct ipa_rt_rule_add_ext_i),
+			(void *)param + i *
+			((struct ipa_ioc_add_rt_rule_ext_v2 *)
+			header)->rule_add_ext_size,
+			((struct ipa_ioc_add_rt_rule_ext_v2 *)
+			header)->rule_add_ext_size);
+	/* modify the rule pointer to the kernel pointer */
+	((struct ipa_ioc_add_rt_rule_ext_v2 *)header)->rules =
+		(u64)kptr;
+	if (ipa3_add_rt_rule_ext_v2(
+		(struct ipa_ioc_add_rt_rule_ext_v2 *)header)) {
+		IPAERR_RL("ipa3_add_rt_rule_ext_v2 fails\n");
+		retval = -EPERM;
+		goto free_param_kptr;
+	}
+	for (i = 0; i < pre_entry; i++)
+		memcpy((void *)param + i *
+			((struct ipa_ioc_add_rt_rule_ext_v2 *)
+			header)->rule_add_ext_size,
+			kptr + i *
+			sizeof(struct ipa_rt_rule_add_ext_i),
+			((struct ipa_ioc_add_rt_rule_ext_v2 *)
+			header)->rule_add_ext_size);
+	if (copy_to_user((void __user *)uptr, param,
+		usr_pyld_sz)) {
+		IPAERR_RL("copy_to_user fails\n");
+		retval = -EFAULT;
+		goto free_param_kptr;
+	}
+
+free_param_kptr:
+	if (!IS_ERR(param))
+		kfree(param);
+	kfree(kptr);
+
+	return retval;
+}
+
+static int ipa3_ioctl_add_rt_rule_after_v2(unsigned long arg)
+{
+	int retval = 0;
+	int i;
+	u8 header[128] = { 0 };
+	int pre_entry;
+	u32 usr_pyld_sz;
+	u32 pyld_sz;
+	u64 uptr = 0;
+	u8 *param = NULL;
+	u8 *kptr = NULL;
+
+	if (copy_from_user(header, (const void __user *)arg,
+		sizeof(struct ipa_ioc_add_rt_rule_after_v2))) {
+		IPAERR_RL("copy_from_user fails\n");
+		retval = -EFAULT;
+		goto free_param_kptr;
+	}
+	pre_entry =
+		((struct ipa_ioc_add_rt_rule_after_v2 *)
+		header)->num_rules;
+	if (unlikely(((struct ipa_ioc_add_rt_rule_after_v2 *)
+		header)->rule_add_size >
+		sizeof(struct ipa_rt_rule_add_i))) {
+		IPAERR_RL("unexpected rule_add_size %d\n",
+		((struct ipa_ioc_add_rt_rule_after_v2 *)
+		header)->rule_add_size);
+		retval = -EPERM;
+		goto free_param_kptr;
+	}
+	/* user payload size */
+	usr_pyld_sz = ((struct ipa_ioc_add_rt_rule_after_v2 *)
+		header)->rule_add_size * pre_entry;
+	/* actual payload structure size in kernel */
+	pyld_sz = sizeof(struct ipa_rt_rule_add_i)
+		* pre_entry;
+	uptr = ((struct ipa_ioc_add_rt_rule_after_v2 *)
+		header)->rules;
+	if (unlikely(!uptr)) {
+		IPAERR_RL("unexpected NULL rules\n");
+		retval = -EPERM;
+		goto free_param_kptr;
+	}
+	/* alloc param with same payload size as user payload */
+	param = memdup_user((const void __user *)uptr,
+		usr_pyld_sz);
+	if (IS_ERR(param)) {
+		retval = -EFAULT;
+		goto free_param_kptr;
+	}
+	/* alloc kernel pointer with actual payload size */
+	kptr = kzalloc(pyld_sz, GFP_KERNEL);
+	if (!kptr) {
+		retval = -ENOMEM;
+		goto free_param_kptr;
+	}
+	for (i = 0; i < pre_entry; i++)
+		memcpy(kptr + i * sizeof(struct ipa_rt_rule_add_i),
+			(void *)param + i *
+			((struct ipa_ioc_add_rt_rule_after_v2 *)
+			header)->rule_add_size,
+			((struct ipa_ioc_add_rt_rule_after_v2 *)
+			header)->rule_add_size);
+	/* modify the rule pointer to the kernel pointer */
+	((struct ipa_ioc_add_rt_rule_after_v2 *)header)->rules =
+		(u64)kptr;
+	if (ipa3_add_rt_rule_after_v2(
+		(struct ipa_ioc_add_rt_rule_after_v2 *)header)) {
+		IPAERR_RL("ipa3_add_rt_rule_after_v2 fails\n");
+		retval = -EPERM;
+		goto free_param_kptr;
+	}
+	for (i = 0; i < pre_entry; i++)
+		memcpy((void *)param + i *
+			((struct ipa_ioc_add_rt_rule_after_v2 *)
+			header)->rule_add_size,
+			kptr + i * sizeof(struct ipa_rt_rule_add_i),
+			((struct ipa_ioc_add_rt_rule_after_v2 *)
+			header)->rule_add_size);
+	if (copy_to_user((void __user *)uptr, param,
+		usr_pyld_sz)) {
+		IPAERR_RL("copy_to_user fails\n");
+		retval = -EFAULT;
+		goto free_param_kptr;
+	}
+
+free_param_kptr:
+	if (!IS_ERR(param))
+		kfree(param);
+	kfree(kptr);
+
+	return retval;
+}
+
+static int ipa3_ioctl_mdfy_rt_rule_v2(unsigned long arg)
+{
+	int retval = 0;
+	int i;
+	u8 header[128] = { 0 };
+	int pre_entry;
+	u32 usr_pyld_sz;
+	u32 pyld_sz;
+	u64 uptr = 0;
+	u8 *param = NULL;
+	u8 *kptr = NULL;
+
+	if (copy_from_user(header, (const void __user *)arg,
+		sizeof(struct ipa_ioc_mdfy_rt_rule_v2))) {
+		IPAERR_RL("copy_from_user fails\n");
+		retval = -EFAULT;
+		goto free_param_kptr;
+	}
+	pre_entry =
+		((struct ipa_ioc_mdfy_rt_rule_v2 *)
+		header)->num_rules;
+	if (unlikely(((struct ipa_ioc_mdfy_rt_rule_v2 *)
+		header)->rule_mdfy_size >
+		sizeof(struct ipa_rt_rule_mdfy_i))) {
+		IPAERR_RL("unexpected rule_add_size %d\n",
+		((struct ipa_ioc_mdfy_rt_rule_v2 *)
+		header)->rule_mdfy_size);
+		retval = -EPERM;
+		goto free_param_kptr;
+	}
+	/* user payload size */
+	usr_pyld_sz = ((struct ipa_ioc_mdfy_rt_rule_v2 *)
+		header)->rule_mdfy_size * pre_entry;
+	/* actual payload structure size in kernel */
+	pyld_sz = sizeof(struct ipa_rt_rule_mdfy_i)
+		* pre_entry;
+	uptr = ((struct ipa_ioc_mdfy_rt_rule_v2 *)
+		header)->rules;
+	if (unlikely(!uptr)) {
+		IPAERR_RL("unexpected NULL rules\n");
+		retval = -EPERM;
+		goto free_param_kptr;
+	}
+	/* alloc param with same payload size as user payload */
+	param = memdup_user((const void __user *)uptr,
+		usr_pyld_sz);
+	if (IS_ERR(param)) {
+		retval = -EFAULT;
+		goto free_param_kptr;
+	}
+	/* alloc kernel pointer with actual payload size */
+	kptr = kzalloc(pyld_sz, GFP_KERNEL);
+	if (!kptr) {
+		retval = -ENOMEM;
+		goto free_param_kptr;
+	}
+	for (i = 0; i < pre_entry; i++)
+		memcpy(kptr + i * sizeof(struct ipa_rt_rule_mdfy_i),
+			(void *)param + i *
+			((struct ipa_ioc_mdfy_rt_rule_v2 *)
+			header)->rule_mdfy_size,
+			((struct ipa_ioc_mdfy_rt_rule_v2 *)
+			header)->rule_mdfy_size);
+	/* modify the rule pointer to the kernel pointer */
+	((struct ipa_ioc_mdfy_rt_rule_v2 *)header)->rules =
+		(u64)kptr;
+	if (ipa3_mdfy_rt_rule_v2((struct ipa_ioc_mdfy_rt_rule_v2 *)
+		header)) {
+		IPAERR_RL("ipa3_mdfy_rt_rule_v2 fails\n");
+		retval = -EPERM;
+		goto free_param_kptr;
+	}
+	for (i = 0; i < pre_entry; i++)
+		memcpy((void *)param + i *
+			((struct ipa_ioc_mdfy_rt_rule_v2 *)
+			header)->rule_mdfy_size,
+			kptr + i * sizeof(struct ipa_rt_rule_mdfy_i),
+			((struct ipa_ioc_mdfy_rt_rule_v2 *)
+			header)->rule_mdfy_size);
+	if (copy_to_user((void __user *)uptr, param,
+		usr_pyld_sz)) {
+		IPAERR_RL("copy_to_user fails\n");
+		retval = -EFAULT;
+		goto free_param_kptr;
+	}
+
+free_param_kptr:
+	if (!IS_ERR(param))
+		kfree(param);
+	kfree(kptr);
+
+	return retval;
+}
+
+static int ipa3_ioctl_add_flt_rule_v2(unsigned long arg)
+{
+	int retval = 0;
+	int i;
+	u8 header[128] = { 0 };
+	int pre_entry;
+	u32 usr_pyld_sz;
+	u32 pyld_sz;
+	u64 uptr = 0;
+	u8 *param = NULL;
+	u8 *kptr = NULL;
+
+	if (copy_from_user(header, (const void __user *)arg,
+		sizeof(struct ipa_ioc_add_flt_rule_v2))) {
+		IPAERR_RL("copy_from_user fails\n");
+		retval = -EFAULT;
+		goto free_param_kptr;
+	}
+	pre_entry =
+		((struct ipa_ioc_add_flt_rule_v2 *)header)->num_rules;
+	if (unlikely(((struct ipa_ioc_add_flt_rule_v2 *)
+		header)->flt_rule_size >
+		sizeof(struct ipa_flt_rule_add_i))) {
+		IPAERR_RL("unexpected rule_add_size %d\n",
+		((struct ipa_ioc_add_flt_rule_v2 *)
+		header)->flt_rule_size);
+		retval = -EPERM;
+		goto free_param_kptr;
+	}
+	/* user payload size */
+	usr_pyld_sz = ((struct ipa_ioc_add_flt_rule_v2 *)
+		header)->flt_rule_size * pre_entry;
+	/* actual payload structure size in kernel */
+	pyld_sz = sizeof(struct ipa_flt_rule_add_i)
+		* pre_entry;
+	uptr = ((struct ipa_ioc_add_flt_rule_v2 *)
+		header)->rules;
+	if (unlikely(!uptr)) {
+		IPAERR_RL("unexpected NULL rules\n");
+		retval = -EPERM;
+		goto free_param_kptr;
+	}
+	/* alloc param with same payload size as user payload */
+	param = memdup_user((const void __user *)uptr,
+		usr_pyld_sz);
+	if (IS_ERR(param)) {
+		retval = -EFAULT;
+		goto free_param_kptr;
+	}
+	/* alloc kernel pointer with actual payload size */
+	kptr = kzalloc(pyld_sz, GFP_KERNEL);
+	if (!kptr) {
+		retval = -ENOMEM;
+		goto free_param_kptr;
+	}
+	for (i = 0; i < pre_entry; i++)
+		memcpy(kptr + i * sizeof(struct ipa_flt_rule_add_i),
+			(void *)param + i *
+			((struct ipa_ioc_add_flt_rule_v2 *)
+			header)->flt_rule_size,
+			((struct ipa_ioc_add_flt_rule_v2 *)
+			header)->flt_rule_size);
+	/* modify the rule pointer to the kernel pointer */
+	((struct ipa_ioc_add_flt_rule_v2 *)header)->rules =
+		(u64)kptr;
+	if (ipa3_add_flt_rule_usr_v2((struct ipa_ioc_add_flt_rule_v2 *)
+			header, true)) {
+		IPAERR_RL("ipa3_add_flt_rule_usr_v2 fails\n");
+		retval = -EPERM;
+		goto free_param_kptr;
+	}
+	for (i = 0; i < pre_entry; i++)
+		memcpy((void *)param + i *
+			((struct ipa_ioc_add_flt_rule_v2 *)
+			header)->flt_rule_size,
+			kptr + i * sizeof(struct ipa_flt_rule_add_i),
+			((struct ipa_ioc_add_flt_rule_v2 *)
+			header)->flt_rule_size);
+	if (copy_to_user((void __user *)uptr, param,
+		usr_pyld_sz)) {
+		IPAERR_RL("copy_to_user fails\n");
+		retval = -EFAULT;
+		goto free_param_kptr;
+	}
+free_param_kptr:
+	if (!IS_ERR(param))
+		kfree(param);
+	kfree(kptr);
+
+	return retval;
+}
+
+static int ipa3_ioctl_add_flt_rule_after_v2(unsigned long arg)
+{
+	int retval = 0;
+	int i;
+	u8 header[128] = { 0 };
+	int pre_entry;
+	u32 usr_pyld_sz;
+	u32 pyld_sz;
+	u64 uptr = 0;
+	u8 *param = NULL;
+	u8 *kptr = NULL;
+
+	if (copy_from_user(header, (const void __user *)arg,
+		sizeof(struct ipa_ioc_add_flt_rule_after_v2))) {
+		IPAERR_RL("copy_from_user fails\n");
+		retval = -EFAULT;
+		goto free_param_kptr;
+	}
+	pre_entry =
+		((struct ipa_ioc_add_flt_rule_after_v2 *)
+		 header)->num_rules;
+	if (unlikely(((struct ipa_ioc_add_flt_rule_after_v2 *)
+		header)->flt_rule_size >
+		sizeof(struct ipa_flt_rule_add_i))) {
+		IPAERR_RL("unexpected rule_add_size %d\n",
+		((struct ipa_ioc_add_flt_rule_after_v2 *)
+		header)->flt_rule_size);
+		retval = -EPERM;
+		goto free_param_kptr;
+	}
+	/* user payload size */
+	usr_pyld_sz = ((struct ipa_ioc_add_flt_rule_after_v2 *)
+		header)->flt_rule_size * pre_entry;
+	/* actual payload structure size in kernel */
+	pyld_sz = sizeof(struct ipa_flt_rule_add_i)
+		* pre_entry;
+	uptr = ((struct ipa_ioc_add_flt_rule_after_v2 *)
+		header)->rules;
+	if (unlikely(!uptr)) {
+		IPAERR_RL("unexpected NULL rules\n");
+		retval = -EPERM;
+		goto free_param_kptr;
+	}
+	/* alloc param with same payload size as user payload */
+	param = memdup_user((const void __user *)uptr,
+		usr_pyld_sz);
+	if (IS_ERR(param)) {
+		retval = -EFAULT;
+		goto free_param_kptr;
+	}
+	/* alloc kernel pointer with actual payload size */
+	kptr = kzalloc(pyld_sz, GFP_KERNEL);
+	if (!kptr) {
+		retval = -ENOMEM;
+		goto free_param_kptr;
+	}
+	for (i = 0; i < pre_entry; i++)
+		memcpy(kptr + i * sizeof(struct ipa_flt_rule_add_i),
+			(void *)param + i *
+			((struct ipa_ioc_add_flt_rule_after_v2 *)
+			header)->flt_rule_size,
+			((struct ipa_ioc_add_flt_rule_after_v2 *)
+			header)->flt_rule_size);
+	/* modify the rule pointer to the kernel pointer */
+	((struct ipa_ioc_add_flt_rule_after_v2 *)header)->rules =
+		(u64)kptr;
+	if (ipa3_add_flt_rule_after_v2(
+		(struct ipa_ioc_add_flt_rule_after_v2 *)header)) {
+		IPAERR_RL("ipa3_add_flt_rule_after_v2 fails\n");
+		retval = -EPERM;
+		goto free_param_kptr;
+	}
+	for (i = 0; i < pre_entry; i++)
+		memcpy((void *)param + i *
+			((struct ipa_ioc_add_flt_rule_after_v2 *)
+			header)->flt_rule_size,
+			kptr + i * sizeof(struct ipa_flt_rule_add_i),
+			((struct ipa_ioc_add_flt_rule_after_v2 *)
+			header)->flt_rule_size);
+	if (copy_to_user((void __user *)uptr, param,
+		usr_pyld_sz)) {
+		IPAERR_RL("copy_to_user fails\n");
+		retval = -EFAULT;
+		goto free_param_kptr;
+	}
+
+free_param_kptr:
+	if (!IS_ERR(param))
+		kfree(param);
+	kfree(kptr);
+
+	return retval;
+}
+
+static int ipa3_ioctl_mdfy_flt_rule_v2(unsigned long arg)
+{
+	int retval = 0;
+	int i;
+	u8 header[128] = { 0 };
+	int pre_entry;
+	u32 usr_pyld_sz;
+	u32 pyld_sz;
+	u64 uptr = 0;
+	u8 *param = NULL;
+	u8 *kptr = NULL;
+
+	if (copy_from_user(header, (const void __user *)arg,
+		sizeof(struct ipa_ioc_mdfy_flt_rule_v2))) {
+		IPAERR_RL("copy_from_user fails\n");
+		retval = -EFAULT;
+		goto free_param_kptr;
+	}
+	pre_entry =
+		((struct ipa_ioc_mdfy_flt_rule_v2 *)
+		 header)->num_rules;
+	if (unlikely(((struct ipa_ioc_mdfy_flt_rule_v2 *)
+		header)->rule_mdfy_size >
+		sizeof(struct ipa_flt_rule_mdfy_i))) {
+		IPAERR_RL("unexpected rule_add_size %d\n",
+		((struct ipa_ioc_mdfy_flt_rule_v2 *)
+		header)->rule_mdfy_size);
+		retval = -EPERM;
+		goto free_param_kptr;
+	}
+	/* user payload size */
+	usr_pyld_sz = ((struct ipa_ioc_mdfy_flt_rule_v2 *)
+		header)->rule_mdfy_size * pre_entry;
+	/* actual payload structure size in kernel */
+	pyld_sz = sizeof(struct ipa_flt_rule_mdfy_i)
+		* pre_entry;
+	uptr = ((struct ipa_ioc_mdfy_flt_rule_v2 *)
+		header)->rules;
+	if (unlikely(!uptr)) {
+		IPAERR_RL("unexpected NULL rules\n");
+		retval = -EPERM;
+		goto free_param_kptr;
+	}
+	/* alloc param with same payload size as user payload */
+	param = memdup_user((const void __user *)uptr,
+		usr_pyld_sz);
+	if (IS_ERR(param)) {
+		retval = -EFAULT;
+		goto free_param_kptr;
+	}
+	/* alloc kernel pointer with actual payload size */
+	kptr = kzalloc(pyld_sz, GFP_KERNEL);
+	if (!kptr) {
+		retval = -ENOMEM;
+		goto free_param_kptr;
+	}
+	for (i = 0; i < pre_entry; i++)
+		memcpy(kptr + i * sizeof(struct ipa_flt_rule_mdfy_i),
+			(void *)param + i *
+			((struct ipa_ioc_mdfy_flt_rule_v2 *)
+			header)->rule_mdfy_size,
+			((struct ipa_ioc_mdfy_flt_rule_v2 *)
+			header)->rule_mdfy_size);
+	/* modify the rule pointer to the kernel pointer */
+	((struct ipa_ioc_mdfy_flt_rule_v2 *)header)->rules =
+		(u64)kptr;
+	if (ipa3_mdfy_flt_rule_v2
+		((struct ipa_ioc_mdfy_flt_rule_v2 *)header)) {
+		IPAERR_RL("ipa3_mdfy_flt_rule_v2 fails\n");
+		retval = -EPERM;
+		goto free_param_kptr;
+	}
+	for (i = 0; i < pre_entry; i++)
+		memcpy((void *)param + i *
+			((struct ipa_ioc_mdfy_flt_rule_v2 *)
+			header)->rule_mdfy_size,
+			kptr + i * sizeof(struct ipa_flt_rule_mdfy_i),
+			((struct ipa_ioc_mdfy_flt_rule_v2 *)
+			header)->rule_mdfy_size);
+	if (copy_to_user((void __user *)uptr, param,
+		usr_pyld_sz)) {
+		IPAERR_RL("copy_to_user fails\n");
+		retval = -EFAULT;
+		goto free_param_kptr;
+	}
+
+free_param_kptr:
+	if (!IS_ERR(param))
+		kfree(param);
+	kfree(kptr);
+
+	return retval;
+}
+
+static int ipa3_ioctl_fnr_counter_alloc(unsigned long arg)
+{
+	int retval = 0;
+	u8 header[128] = { 0 };
+
+	if (copy_from_user(header, (const void __user *)arg,
+		sizeof(struct ipa_ioc_flt_rt_counter_alloc))) {
+		IPAERR("copy_from_user fails\n");
+		return -EFAULT;
+	}
+	if (((struct ipa_ioc_flt_rt_counter_alloc *)
+		header)->hw_counter.num_counters >
+		IPA_FLT_RT_HW_COUNTER ||
+		((struct ipa_ioc_flt_rt_counter_alloc *)
+		header)->sw_counter.num_counters >
+		IPA_FLT_RT_SW_COUNTER) {
+		IPAERR("failed: wrong sw/hw num_counters\n");
+		return -EPERM;
+	}
+	if (((struct ipa_ioc_flt_rt_counter_alloc *)
+		header)->hw_counter.num_counters == 0 &&
+		((struct ipa_ioc_flt_rt_counter_alloc *)
+		header)->sw_counter.num_counters == 0) {
+		IPAERR("failed: both sw/hw num_counters 0\n");
+		return -EPERM;
+	}
+	retval = ipa3_alloc_counter_id
+		((struct ipa_ioc_flt_rt_counter_alloc *)header);
+	if (retval < 0) {
+		IPAERR("ipa3_alloc_counter_id failed\n");
+		return retval;
+	}
+	if (copy_to_user((void __user *)arg, header,
+		sizeof(struct ipa_ioc_flt_rt_counter_alloc))) {
+		IPAERR("copy_to_user fails\n");
+		ipa3_counter_remove_hdl(
+		((struct ipa_ioc_flt_rt_counter_alloc *)
+		header)->hdl);
+		return -EFAULT;
+	}
+	return 0;
+}
+
+static int ipa3_ioctl_fnr_counter_query(unsigned long arg)
+{
+	int retval = 0;
+	int i;
+	u8 header[128] = { 0 };
+	int pre_entry;
+	u32 usr_pyld_sz;
+	u32 pyld_sz;
+	u64 uptr = 0;
+	u8 *param = NULL;
+	u8 *kptr = NULL;
+
+	if (copy_from_user(header, (const void __user *)arg,
+		sizeof(struct ipa_ioc_flt_rt_query))) {
+		IPAERR_RL("copy_from_user fails\n");
+		retval = -EFAULT;
+		goto free_param_kptr;
+	}
+	pre_entry =
+		((struct ipa_ioc_flt_rt_query *)
+		header)->end_id - ((struct ipa_ioc_flt_rt_query *)
+		header)->start_id + 1;
+	if (pre_entry <= 0 || pre_entry > IPA_MAX_FLT_RT_CNT_INDEX) {
+		IPAERR("IPA_IOC_FNR_COUNTER_QUERY failed: num %d\n",
+			pre_entry);
+		retval = -EPERM;
+		goto free_param_kptr;
+	}
+	if (((struct ipa_ioc_flt_rt_query *)header)->stats_size
+		> sizeof(struct ipa_flt_rt_stats)) {
+		IPAERR_RL("unexpected stats_size %d\n",
+		((struct ipa_ioc_flt_rt_query *)header)->stats_size);
+		retval = -EPERM;
+		goto free_param_kptr;
+	}
+	/* user payload size */
+	usr_pyld_sz = ((struct ipa_ioc_flt_rt_query *)
+		header)->stats_size * pre_entry;
+	/* actual payload structure size in kernel */
+	pyld_sz = sizeof(struct ipa_flt_rt_stats) * pre_entry;
+	uptr = ((struct ipa_ioc_flt_rt_query *)
+		header)->stats;
+	if (unlikely(!uptr)) {
+		IPAERR_RL("unexpected NULL rules\n");
+		retval = -EPERM;
+		goto free_param_kptr;
+	}
+	/* alloc param with same payload size as user payload */
+	param = memdup_user((const void __user *)uptr,
+		usr_pyld_sz);
+	if (IS_ERR(param)) {
+		retval = -EFAULT;
+		goto free_param_kptr;
+	}
+	/* alloc kernel pointer with actual payload size */
+	kptr = kzalloc(pyld_sz, GFP_KERNEL);
+	if (!kptr) {
+		retval = -ENOMEM;
+		goto free_param_kptr;
+	}
+	for (i = 0; i < pre_entry; i++)
+		memcpy(kptr + i * sizeof(struct ipa_flt_rt_stats),
+			(void *)param + i *
+			((struct ipa_ioc_flt_rt_query *)
+			header)->stats_size,
+			((struct ipa_ioc_flt_rt_query *)
+			header)->stats_size);
+	/* modify the rule pointer to the kernel pointer */
+	((struct ipa_ioc_flt_rt_query *)
+		header)->stats = (u64)kptr;
+	retval = ipa_get_flt_rt_stats
+		((struct ipa_ioc_flt_rt_query *)header);
+	if (retval < 0) {
+		IPAERR("ipa_get_flt_rt_stats failed\n");
+		retval = -EPERM;
+		goto free_param_kptr;
+	}
+	for (i = 0; i < pre_entry; i++)
+		memcpy((void *)param + i *
+			((struct ipa_ioc_flt_rt_query *)
+			header)->stats_size,
+			kptr + i * sizeof(struct ipa_flt_rt_stats),
+			((struct ipa_ioc_flt_rt_query *)
+			header)->stats_size);
+	if (copy_to_user((void __user *)uptr, param,
+		usr_pyld_sz)) {
+		IPAERR_RL("copy_to_user fails\n");
+		retval = -EFAULT;
+		goto free_param_kptr;
+	}
+
+free_param_kptr:
+	if (!IS_ERR(param))
+		kfree(param);
+	kfree(kptr);
+
+	return retval;
+}
+
+static int ipa3_ioctl_fnr_counter_set(unsigned long arg)
+{
+	u8 header[128] = { 0 };
+	uint8_t value;
+
+	if (copy_from_user(header, (const void __user *)arg,
+		sizeof(struct ipa_ioc_fnr_index_info))) {
+		IPAERR_RL("copy_from_user fails\n");
+		return -EFAULT;
+	}
+
+	value = ((struct ipa_ioc_fnr_index_info *)
+		header)->hw_counter_offset;
+	if (value <= 0 || value > IPA_MAX_FLT_RT_CNT_INDEX) {
+		IPAERR("hw_counter_offset failed: num %d\n",
+			value);
+		return -EPERM;
+	}
+
+	ipa3_ctx->fnr_info.hw_counter_offset = value;
+
+	value = ((struct ipa_ioc_fnr_index_info *)
+		header)->sw_counter_offset;
+	if (value <= 0 || value > IPA_MAX_FLT_RT_CNT_INDEX) {
+		IPAERR("sw_counter_offset failed: num %d\n",
+			value);
+		return -EPERM;
+	}
+	ipa3_ctx->fnr_info.sw_counter_offset = value;
+	/* reset when ipacm-cleanup */
+	ipa3_ctx->fnr_info.valid = true;
+	IPADBG("fnr_info hw=%d, hw=%d\n",
+		ipa3_ctx->fnr_info.hw_counter_offset,
+		ipa3_ctx->fnr_info.sw_counter_offset);
+	return 0;
+}
+
+static int proc_sram_info_rqst(
+	unsigned long arg)
+{
+	struct ipa_nat_in_sram_info sram_info = { 0 };
+
+	if (ipa3_nat_get_sram_info(&sram_info))
+		return  -EFAULT;
+
+	if (copy_to_user(
+		(void __user *) arg,
+		&sram_info,
+		sizeof(struct ipa_nat_in_sram_info)))
+		return -EFAULT;
+
+	return 0;
+}
+
+static long ipa3_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
+{
+	int retval = 0;
+	u32 pyld_sz;
+	u8 header[128] = { 0 };
+	u8 *param = NULL;
+	bool is_vlan_mode;
+	struct ipa_ioc_nat_alloc_mem nat_mem;
+	struct ipa_ioc_nat_ipv6ct_table_alloc table_alloc;
+	struct ipa_ioc_v4_nat_init nat_init;
+	struct ipa_ioc_ipv6ct_init ipv6ct_init;
+	struct ipa_ioc_v4_nat_del nat_del;
+	struct ipa_ioc_nat_ipv6ct_table_del table_del;
+	struct ipa_ioc_nat_pdn_entry mdfy_pdn;
+	struct ipa_ioc_nat_dma_cmd *table_dma_cmd;
+	struct ipa_ioc_get_vlan_mode vlan_mode;
+	struct ipa_ioc_wigig_fst_switch fst_switch;
+	size_t sz;
+	int pre_entry;
+	int hdl;
+
+	IPADBG("cmd=%x nr=%d\n", cmd, _IOC_NR(cmd));
+
+	if (_IOC_TYPE(cmd) != IPA_IOC_MAGIC)
+		return -ENOTTY;
+
+	if (!ipa3_is_ready()) {
+		IPAERR("IPA not ready, waiting for init completion\n");
+		wait_for_completion(&ipa3_ctx->init_completion_obj);
+	}
+
+	IPA_ACTIVE_CLIENTS_INC_SIMPLE();
+
+	switch (cmd) {
+	case IPA_IOC_ALLOC_NAT_MEM:
+		if (copy_from_user(&nat_mem, (const void __user *)arg,
+			sizeof(struct ipa_ioc_nat_alloc_mem))) {
+			retval = -EFAULT;
+			break;
+		}
+		/* null terminate the string */
+		nat_mem.dev_name[IPA_RESOURCE_NAME_MAX - 1] = '\0';
+
+		if (ipa3_allocate_nat_device(&nat_mem)) {
+			retval = -EFAULT;
+			break;
+		}
+		if (copy_to_user((void __user *)arg, &nat_mem,
+			sizeof(struct ipa_ioc_nat_alloc_mem))) {
+			retval = -EFAULT;
+			break;
+		}
+		break;
+	case IPA_IOC_ALLOC_NAT_TABLE:
+		if (copy_from_user(&table_alloc, (const void __user *)arg,
+			sizeof(struct ipa_ioc_nat_ipv6ct_table_alloc))) {
+			retval = -EFAULT;
+			break;
+		}
+
+		if (ipa3_allocate_nat_table(&table_alloc)) {
+			retval = -EFAULT;
+			break;
+		}
+		if (table_alloc.offset &&
+			copy_to_user((void __user *)arg, &table_alloc, sizeof(
+				struct ipa_ioc_nat_ipv6ct_table_alloc))) {
+			retval = -EFAULT;
+			break;
+		}
+		break;
+
+	case IPA_IOC_ALLOC_IPV6CT_TABLE:
+		if (copy_from_user(&table_alloc, (const void __user *)arg,
+			sizeof(struct ipa_ioc_nat_ipv6ct_table_alloc))) {
+			retval = -EFAULT;
+			break;
+		}
+
+		if (ipa3_allocate_ipv6ct_table(&table_alloc)) {
+			retval = -EFAULT;
+			break;
+		}
+		if (table_alloc.offset &&
+			copy_to_user((void __user *)arg, &table_alloc, sizeof(
+				struct ipa_ioc_nat_ipv6ct_table_alloc))) {
+			retval = -EFAULT;
+			break;
+		}
+		break;
+
+	case IPA_IOC_V4_INIT_NAT:
+		if (copy_from_user(&nat_init, (const void __user *)arg,
+			sizeof(struct ipa_ioc_v4_nat_init))) {
+			retval = -EFAULT;
+			break;
+		}
+
+		if (ipa3_nat_init_cmd(&nat_init)) {
+			retval = -EFAULT;
+			break;
+		}
+		break;
+
+	case IPA_IOC_INIT_IPV6CT_TABLE:
+		if (copy_from_user(&ipv6ct_init, (const void __user *)arg,
+			sizeof(struct ipa_ioc_ipv6ct_init))) {
+			retval = -EFAULT;
+			break;
+		}
+		if (ipa3_ipv6ct_init_cmd(&ipv6ct_init)) {
+			retval = -EFAULT;
+			break;
+		}
+		break;
+
+	case IPA_IOC_TABLE_DMA_CMD:
+		table_dma_cmd = (struct ipa_ioc_nat_dma_cmd *)header;
+		if (copy_from_user(header, (const void __user *)arg,
+			sizeof(struct ipa_ioc_nat_dma_cmd))) {
+			retval = -EFAULT;
+			break;
+		}
+		pre_entry = table_dma_cmd->entries;
+		pyld_sz = sizeof(struct ipa_ioc_nat_dma_cmd) +
+			pre_entry * sizeof(struct ipa_ioc_nat_dma_one);
+		param = memdup_user((const void __user *)arg, pyld_sz);
+		if (IS_ERR(param)) {
+			retval = PTR_ERR(param);
+			break;
+		}
+		table_dma_cmd = (struct ipa_ioc_nat_dma_cmd *)param;
+		/* add check in case user-space module compromised */
+		if (unlikely(table_dma_cmd->entries != pre_entry)) {
+			IPAERR_RL("current %d pre %d\n",
+				table_dma_cmd->entries, pre_entry);
+			retval = -EFAULT;
+			break;
+		}
+		if (ipa3_table_dma_cmd(table_dma_cmd)) {
+			retval = -EFAULT;
+			break;
+		}
+		break;
+
+	case IPA_IOC_V4_DEL_NAT:
+		if (copy_from_user(&nat_del, (const void __user *)arg,
+			sizeof(struct ipa_ioc_v4_nat_del))) {
+			retval = -EFAULT;
+			break;
+		}
+		if (ipa3_nat_del_cmd(&nat_del)) {
+			retval = -EFAULT;
+			break;
+		}
+		break;
+
+	case IPA_IOC_DEL_NAT_TABLE:
+		if (copy_from_user(&table_del, (const void __user *)arg,
+			sizeof(struct ipa_ioc_nat_ipv6ct_table_del))) {
+			retval = -EFAULT;
+			break;
+		}
+
+		if (ipa3_del_nat_table(&table_del)) {
+			retval = -EFAULT;
+			break;
+		}
+		break;
+
+	case IPA_IOC_DEL_IPV6CT_TABLE:
+		if (copy_from_user(&table_del, (const void __user *)arg,
+			sizeof(struct ipa_ioc_nat_ipv6ct_table_del))) {
+			retval = -EFAULT;
+			break;
+		}
+
+		if (ipa3_del_ipv6ct_table(&table_del)) {
+			retval = -EFAULT;
+			break;
+		}
+		break;
+
+	case IPA_IOC_NAT_MODIFY_PDN:
+		if (copy_from_user(&mdfy_pdn, (const void __user *)arg,
+			sizeof(struct ipa_ioc_nat_pdn_entry))) {
+			retval = -EFAULT;
+			break;
+		}
+		if (ipa3_nat_mdfy_pdn(&mdfy_pdn)) {
+			retval = -EFAULT;
+			break;
+		}
+		break;
+
+	case IPA_IOC_ADD_HDR:
+		if (copy_from_user(header, (const void __user *)arg,
+			sizeof(struct ipa_ioc_add_hdr))) {
+			retval = -EFAULT;
+			break;
+		}
+		pre_entry =
+			((struct ipa_ioc_add_hdr *)header)->num_hdrs;
+		pyld_sz =
+		   sizeof(struct ipa_ioc_add_hdr) +
+		   pre_entry * sizeof(struct ipa_hdr_add);
+		param = memdup_user((const void __user *)arg, pyld_sz);
+		if (IS_ERR(param)) {
+			retval = PTR_ERR(param);
+			break;
+		}
+		/* add check in case user-space module compromised */
+		if (unlikely(((struct ipa_ioc_add_hdr *)param)->num_hdrs
+			!= pre_entry)) {
+			IPAERR_RL("current %d pre %d\n",
+				((struct ipa_ioc_add_hdr *)param)->num_hdrs,
+				pre_entry);
+			retval = -EFAULT;
+			break;
+		}
+		if (ipa3_add_hdr_usr((struct ipa_ioc_add_hdr *)param,
+			true)) {
+			retval = -EFAULT;
+			break;
+		}
+		if (copy_to_user((void __user *)arg, param, pyld_sz)) {
+			retval = -EFAULT;
+			break;
+		}
+		break;
+
+	case IPA_IOC_DEL_HDR:
+		if (copy_from_user(header, (const void __user *)arg,
+			sizeof(struct ipa_ioc_del_hdr))) {
+			retval = -EFAULT;
+			break;
+		}
+		pre_entry =
+			((struct ipa_ioc_del_hdr *)header)->num_hdls;
+		pyld_sz =
+		   sizeof(struct ipa_ioc_del_hdr) +
+		   pre_entry * sizeof(struct ipa_hdr_del);
+		param = memdup_user((const void __user *)arg, pyld_sz);
+		if (IS_ERR(param)) {
+			retval = PTR_ERR(param);
+			break;
+		}
+		/* add check in case user-space module compromised */
+		if (unlikely(((struct ipa_ioc_del_hdr *)param)->num_hdls
+			!= pre_entry)) {
+			IPAERR_RL("current %d pre %d\n",
+				((struct ipa_ioc_del_hdr *)param)->num_hdls,
+				pre_entry);
+			retval = -EFAULT;
+			break;
+		}
+		if (ipa3_del_hdr_by_user((struct ipa_ioc_del_hdr *)param,
+			true)) {
+			retval = -EFAULT;
+			break;
+		}
+		if (copy_to_user((void __user *)arg, param, pyld_sz)) {
+			retval = -EFAULT;
+			break;
+		}
+		break;
+
+	case IPA_IOC_ADD_RT_RULE:
+		if (copy_from_user(header, (const void __user *)arg,
+			sizeof(struct ipa_ioc_add_rt_rule))) {
+			retval = -EFAULT;
+			break;
+		}
+		pre_entry =
+			((struct ipa_ioc_add_rt_rule *)header)->num_rules;
+		pyld_sz =
+		   sizeof(struct ipa_ioc_add_rt_rule) +
+		   pre_entry * sizeof(struct ipa_rt_rule_add);
+		param = memdup_user((const void __user *)arg, pyld_sz);
+		if (IS_ERR(param)) {
+			retval = PTR_ERR(param);
+			break;
+		}
+		/* add check in case user-space module compromised */
+		if (unlikely(((struct ipa_ioc_add_rt_rule *)param)->num_rules
+			!= pre_entry)) {
+			IPAERR_RL("current %d pre %d\n",
+				((struct ipa_ioc_add_rt_rule *)param)->
+				num_rules,
+				pre_entry);
+			retval = -EFAULT;
+			break;
+		}
+		if (ipa3_add_rt_rule_usr((struct ipa_ioc_add_rt_rule *)param,
+				true)) {
+			retval = -EFAULT;
+			break;
+		}
+		if (copy_to_user((void __user *)arg, param, pyld_sz)) {
+			retval = -EFAULT;
+			break;
+		}
+		break;
+
+	case IPA_IOC_ADD_RT_RULE_EXT:
+		if (copy_from_user(header,
+				(const void __user *)arg,
+				sizeof(struct ipa_ioc_add_rt_rule_ext))) {
+			retval = -EFAULT;
+			break;
+		}
+		pre_entry =
+			((struct ipa_ioc_add_rt_rule_ext *)header)->num_rules;
+		pyld_sz =
+		   sizeof(struct ipa_ioc_add_rt_rule_ext) +
+		   pre_entry * sizeof(struct ipa_rt_rule_add_ext);
+		param = memdup_user((const void __user *)arg, pyld_sz);
+		if (IS_ERR(param)) {
+			retval = PTR_ERR(param);
+			break;
+		}
+		/* add check in case user-space module compromised */
+		if (unlikely(
+			((struct ipa_ioc_add_rt_rule_ext *)param)->num_rules
+			!= pre_entry)) {
+			IPAERR(" prevent memory corruption(%d not match %d)\n",
+				((struct ipa_ioc_add_rt_rule_ext *)param)->
+				num_rules,
+				pre_entry);
+			retval = -EINVAL;
+			break;
+		}
+		if (ipa3_add_rt_rule_ext(
+			(struct ipa_ioc_add_rt_rule_ext *)param)) {
+			retval = -EFAULT;
+			break;
+		}
+		if (copy_to_user((void __user *)arg, param, pyld_sz)) {
+			retval = -EFAULT;
+			break;
+		}
+		break;
+
+	case IPA_IOC_ADD_RT_RULE_AFTER:
+		if (copy_from_user(header, (const void __user *)arg,
+			sizeof(struct ipa_ioc_add_rt_rule_after))) {
+
+			retval = -EFAULT;
+			break;
+		}
+		pre_entry =
+			((struct ipa_ioc_add_rt_rule_after *)header)->num_rules;
+		pyld_sz =
+		   sizeof(struct ipa_ioc_add_rt_rule_after) +
+		   pre_entry * sizeof(struct ipa_rt_rule_add);
+		param = memdup_user((const void __user *)arg, pyld_sz);
+		if (IS_ERR(param)) {
+			retval = PTR_ERR(param);
+			break;
+		}
+		/* add check in case user-space module compromised */
+		if (unlikely(((struct ipa_ioc_add_rt_rule_after *)param)->
+			num_rules != pre_entry)) {
+			IPAERR_RL("current %d pre %d\n",
+				((struct ipa_ioc_add_rt_rule_after *)param)->
+				num_rules,
+				pre_entry);
+			retval = -EFAULT;
+			break;
+		}
+		if (ipa3_add_rt_rule_after(
+			(struct ipa_ioc_add_rt_rule_after *)param)) {
+
+			retval = -EFAULT;
+			break;
+		}
+		if (copy_to_user((void __user *)arg, param, pyld_sz)) {
+			retval = -EFAULT;
+			break;
+		}
+		break;
+
+	case IPA_IOC_MDFY_RT_RULE:
+		if (copy_from_user(header, (const void __user *)arg,
+			sizeof(struct ipa_ioc_mdfy_rt_rule))) {
+			retval = -EFAULT;
+			break;
+		}
+		pre_entry =
+			((struct ipa_ioc_mdfy_rt_rule *)header)->num_rules;
+		pyld_sz =
+		   sizeof(struct ipa_ioc_mdfy_rt_rule) +
+		   pre_entry * sizeof(struct ipa_rt_rule_mdfy);
+		param = memdup_user((const void __user *)arg, pyld_sz);
+		if (IS_ERR(param)) {
+			retval = PTR_ERR(param);
+			break;
+		}
+		/* add check in case user-space module compromised */
+		if (unlikely(((struct ipa_ioc_mdfy_rt_rule *)param)->num_rules
+			!= pre_entry)) {
+			IPAERR_RL("current %d pre %d\n",
+				((struct ipa_ioc_mdfy_rt_rule *)param)->
+				num_rules,
+				pre_entry);
+			retval = -EFAULT;
+			break;
+		}
+		if (ipa3_mdfy_rt_rule((struct ipa_ioc_mdfy_rt_rule *)param)) {
+			retval = -EFAULT;
+			break;
+		}
+		if (copy_to_user((void __user *)arg, param, pyld_sz)) {
+			retval = -EFAULT;
+			break;
+		}
+		break;
+
+	case IPA_IOC_DEL_RT_RULE:
+		if (copy_from_user(header, (const void __user *)arg,
+			sizeof(struct ipa_ioc_del_rt_rule))) {
+			retval = -EFAULT;
+			break;
+		}
+		pre_entry =
+			((struct ipa_ioc_del_rt_rule *)header)->num_hdls;
+		pyld_sz =
+		   sizeof(struct ipa_ioc_del_rt_rule) +
+		   pre_entry * sizeof(struct ipa_rt_rule_del);
+		param = memdup_user((const void __user *)arg, pyld_sz);
+		if (IS_ERR(param)) {
+			retval = PTR_ERR(param);
+			break;
+		}
+		/* add check in case user-space module compromised */
+		if (unlikely(((struct ipa_ioc_del_rt_rule *)param)->num_hdls
+			!= pre_entry)) {
+			IPAERR_RL("current %d pre %d\n",
+				((struct ipa_ioc_del_rt_rule *)param)->num_hdls,
+				pre_entry);
+			retval = -EFAULT;
+			break;
+		}
+		if (ipa3_del_rt_rule((struct ipa_ioc_del_rt_rule *)param)) {
+			retval = -EFAULT;
+			break;
+		}
+		if (copy_to_user((void __user *)arg, param, pyld_sz)) {
+			retval = -EFAULT;
+			break;
+		}
+		break;
+
+	case IPA_IOC_ADD_FLT_RULE:
+		if (copy_from_user(header, (const void __user *)arg,
+			sizeof(struct ipa_ioc_add_flt_rule))) {
+			retval = -EFAULT;
+			break;
+		}
+		pre_entry =
+			((struct ipa_ioc_add_flt_rule *)header)->num_rules;
+		pyld_sz =
+		   sizeof(struct ipa_ioc_add_flt_rule) +
+		   pre_entry * sizeof(struct ipa_flt_rule_add);
+		param = memdup_user((const void __user *)arg, pyld_sz);
+		if (IS_ERR(param)) {
+			retval = PTR_ERR(param);
+			break;
+		}
+		/* add check in case user-space module compromised */
+		if (unlikely(((struct ipa_ioc_add_flt_rule *)param)->num_rules
+			!= pre_entry)) {
+			IPAERR_RL("current %d pre %d\n",
+				((struct ipa_ioc_add_flt_rule *)param)->
+				num_rules,
+				pre_entry);
+			retval = -EFAULT;
+			break;
+		}
+		if (ipa3_add_flt_rule_usr((struct ipa_ioc_add_flt_rule *)param,
+				true)) {
+			retval = -EFAULT;
+			break;
+		}
+		if (copy_to_user((void __user *)arg, param, pyld_sz)) {
+			retval = -EFAULT;
+			break;
+		}
+		break;
+
+	case IPA_IOC_ADD_FLT_RULE_AFTER:
+		if (copy_from_user(header, (const void __user *)arg,
+			sizeof(struct ipa_ioc_add_flt_rule_after))) {
+
+			retval = -EFAULT;
+			break;
+		}
+		pre_entry =
+			((struct ipa_ioc_add_flt_rule_after *)header)->
+			num_rules;
+		pyld_sz =
+		   sizeof(struct ipa_ioc_add_flt_rule_after) +
+		   pre_entry * sizeof(struct ipa_flt_rule_add);
+		param = memdup_user((const void __user *)arg, pyld_sz);
+		if (IS_ERR(param)) {
+			retval = PTR_ERR(param);
+			break;
+		}
+		/* add check in case user-space module compromised */
+		if (unlikely(((struct ipa_ioc_add_flt_rule_after *)param)->
+			num_rules != pre_entry)) {
+			IPAERR_RL("current %d pre %d\n",
+				((struct ipa_ioc_add_flt_rule_after *)param)->
+				num_rules,
+				pre_entry);
+			retval = -EFAULT;
+			break;
+		}
+		if (ipa3_add_flt_rule_after(
+				(struct ipa_ioc_add_flt_rule_after *)param)) {
+			retval = -EFAULT;
+			break;
+		}
+		if (copy_to_user((void __user *)arg, param, pyld_sz)) {
+			retval = -EFAULT;
+			break;
+		}
+		break;
+
+	case IPA_IOC_DEL_FLT_RULE:
+		if (copy_from_user(header, (const void __user *)arg,
+			sizeof(struct ipa_ioc_del_flt_rule))) {
+			retval = -EFAULT;
+			break;
+		}
+		pre_entry =
+			((struct ipa_ioc_del_flt_rule *)header)->num_hdls;
+		pyld_sz =
+		   sizeof(struct ipa_ioc_del_flt_rule) +
+		   pre_entry * sizeof(struct ipa_flt_rule_del);
+		param = memdup_user((const void __user *)arg, pyld_sz);
+		if (IS_ERR(param)) {
+			retval = PTR_ERR(param);
+			break;
+		}
+		/* add check in case user-space module compromised */
+		if (unlikely(((struct ipa_ioc_del_flt_rule *)param)->num_hdls
+			!= pre_entry)) {
+			IPAERR_RL("current %d pre %d\n",
+				((struct ipa_ioc_del_flt_rule *)param)->
+				num_hdls,
+				pre_entry);
+			retval = -EFAULT;
+			break;
+		}
+		if (ipa3_del_flt_rule((struct ipa_ioc_del_flt_rule *)param)) {
+			retval = -EFAULT;
+			break;
+		}
+		if (copy_to_user((void __user *)arg, param, pyld_sz)) {
+			retval = -EFAULT;
+			break;
+		}
+		break;
+
+	case IPA_IOC_MDFY_FLT_RULE:
+		if (copy_from_user(header, (const void __user *)arg,
+			sizeof(struct ipa_ioc_mdfy_flt_rule))) {
+			retval = -EFAULT;
+			break;
+		}
+		pre_entry =
+			((struct ipa_ioc_mdfy_flt_rule *)header)->num_rules;
+		pyld_sz =
+		   sizeof(struct ipa_ioc_mdfy_flt_rule) +
+		   pre_entry * sizeof(struct ipa_flt_rule_mdfy);
+		param = memdup_user((const void __user *)arg, pyld_sz);
+		if (IS_ERR(param)) {
+			retval = PTR_ERR(param);
+			break;
+		}
+		/* add check in case user-space module compromised */
+		if (unlikely(((struct ipa_ioc_mdfy_flt_rule *)param)->num_rules
+			!= pre_entry)) {
+			IPAERR_RL("current %d pre %d\n",
+				((struct ipa_ioc_mdfy_flt_rule *)param)->
+				num_rules,
+				pre_entry);
+			retval = -EFAULT;
+			break;
+		}
+		if (ipa3_mdfy_flt_rule((struct ipa_ioc_mdfy_flt_rule *)param)) {
+			retval = -EFAULT;
+			break;
+		}
+		if (copy_to_user((void __user *)arg, param, pyld_sz)) {
+			retval = -EFAULT;
+			break;
+		}
+		break;
+
+	case IPA_IOC_COMMIT_HDR:
+		retval = ipa3_commit_hdr();
+		break;
+	case IPA_IOC_RESET_HDR:
+		retval = ipa3_reset_hdr(false);
+		break;
+	case IPA_IOC_COMMIT_RT:
+		retval = ipa3_commit_rt(arg);
+		break;
+	case IPA_IOC_RESET_RT:
+		retval = ipa3_reset_rt(arg, false);
+		break;
+	case IPA_IOC_COMMIT_FLT:
+		retval = ipa3_commit_flt(arg);
+		break;
+	case IPA_IOC_RESET_FLT:
+		retval = ipa3_reset_flt(arg, false);
+		break;
+	case IPA_IOC_GET_RT_TBL:
+		if (copy_from_user(header, (const void __user *)arg,
+			sizeof(struct ipa_ioc_get_rt_tbl))) {
+			retval = -EFAULT;
+			break;
+		}
+		if (ipa3_get_rt_tbl((struct ipa_ioc_get_rt_tbl *)header)) {
+			retval = -EFAULT;
+			break;
+		}
+		if (copy_to_user((void __user *)arg, header,
+					sizeof(struct ipa_ioc_get_rt_tbl))) {
+			retval = -EFAULT;
+			break;
+		}
+		break;
+	case IPA_IOC_PUT_RT_TBL:
+		retval = ipa3_put_rt_tbl(arg);
+		break;
+	case IPA_IOC_GET_HDR:
+		if (copy_from_user(header, (const void __user *)arg,
+			sizeof(struct ipa_ioc_get_hdr))) {
+			retval = -EFAULT;
+			break;
+		}
+		if (ipa3_get_hdr((struct ipa_ioc_get_hdr *)header)) {
+			retval = -EFAULT;
+			break;
+		}
+		if (copy_to_user((void __user *)arg, header,
+			sizeof(struct ipa_ioc_get_hdr))) {
+			retval = -EFAULT;
+			break;
+		}
+		break;
+	case IPA_IOC_PUT_HDR:
+		retval = ipa3_put_hdr(arg);
+		break;
+	case IPA_IOC_SET_FLT:
+		retval = ipa3_cfg_filter(arg);
+		break;
+	case IPA_IOC_COPY_HDR:
+		if (copy_from_user(header, (const void __user *)arg,
+			sizeof(struct ipa_ioc_copy_hdr))) {
+			retval = -EFAULT;
+			break;
+		}
+		if (ipa3_copy_hdr((struct ipa_ioc_copy_hdr *)header)) {
+			retval = -EFAULT;
+			break;
+		}
+		if (copy_to_user((void __user *)arg, header,
+			sizeof(struct ipa_ioc_copy_hdr))) {
+			retval = -EFAULT;
+			break;
+		}
+		break;
+	case IPA_IOC_QUERY_INTF:
+		if (copy_from_user(header, (const void __user *)arg,
+			sizeof(struct ipa_ioc_query_intf))) {
+			retval = -EFAULT;
+			break;
+		}
+		if (ipa3_query_intf((struct ipa_ioc_query_intf *)header)) {
+			retval = -1;
+			break;
+		}
+		if (copy_to_user((void __user *)arg, header,
+			sizeof(struct ipa_ioc_query_intf))) {
+			retval = -EFAULT;
+			break;
+		}
+		break;
+	case IPA_IOC_QUERY_INTF_TX_PROPS:
+		sz = sizeof(struct ipa_ioc_query_intf_tx_props);
+		if (copy_from_user(header, (const void __user *)arg, sz)) {
+			retval = -EFAULT;
+			break;
+		}
+
+		if (((struct ipa_ioc_query_intf_tx_props *)header)->num_tx_props
+			> IPA_NUM_PROPS_MAX) {
+			retval = -EFAULT;
+			break;
+		}
+		pre_entry =
+			((struct ipa_ioc_query_intf_tx_props *)
+			header)->num_tx_props;
+		pyld_sz = sz + pre_entry *
+			sizeof(struct ipa_ioc_tx_intf_prop);
+		param = memdup_user((const void __user *)arg, pyld_sz);
+		if (IS_ERR(param)) {
+			retval = PTR_ERR(param);
+			break;
+		}
+		/* add check in case user-space module compromised */
+		if (unlikely(((struct ipa_ioc_query_intf_tx_props *)
+			param)->num_tx_props
+			!= pre_entry)) {
+			IPAERR_RL("current %d pre %d\n",
+				((struct ipa_ioc_query_intf_tx_props *)
+				param)->num_tx_props, pre_entry);
+			retval = -EFAULT;
+			break;
+		}
+		if (ipa3_query_intf_tx_props(
+			(struct ipa_ioc_query_intf_tx_props *)param)) {
+			retval = -1;
+			break;
+		}
+		if (copy_to_user((void __user *)arg, param, pyld_sz)) {
+			retval = -EFAULT;
+			break;
+		}
+		break;
+	case IPA_IOC_QUERY_INTF_RX_PROPS:
+		sz = sizeof(struct ipa_ioc_query_intf_rx_props);
+		if (copy_from_user(header, (const void __user *)arg, sz)) {
+			retval = -EFAULT;
+			break;
+		}
+
+		if (((struct ipa_ioc_query_intf_rx_props *)header)->num_rx_props
+			> IPA_NUM_PROPS_MAX) {
+			retval = -EFAULT;
+			break;
+		}
+		pre_entry =
+			((struct ipa_ioc_query_intf_rx_props *)
+			header)->num_rx_props;
+		pyld_sz = sz + pre_entry *
+			sizeof(struct ipa_ioc_rx_intf_prop);
+		param = memdup_user((const void __user *)arg, pyld_sz);
+		if (IS_ERR(param)) {
+			retval = PTR_ERR(param);
+			break;
+		}
+		/* add check in case user-space module compromised */
+		if (unlikely(((struct ipa_ioc_query_intf_rx_props *)
+			param)->num_rx_props != pre_entry)) {
+			IPAERR_RL("current %d pre %d\n",
+				((struct ipa_ioc_query_intf_rx_props *)
+				param)->num_rx_props, pre_entry);
+			retval = -EFAULT;
+			break;
+		}
+		if (ipa3_query_intf_rx_props(
+			(struct ipa_ioc_query_intf_rx_props *)param)) {
+			retval = -1;
+			break;
+		}
+		if (copy_to_user((void __user *)arg, param, pyld_sz)) {
+			retval = -EFAULT;
+			break;
+		}
+		break;
+	case IPA_IOC_QUERY_INTF_EXT_PROPS:
+		sz = sizeof(struct ipa_ioc_query_intf_ext_props);
+		if (copy_from_user(header, (const void __user *)arg, sz)) {
+			retval = -EFAULT;
+			break;
+		}
+
+		if (((struct ipa_ioc_query_intf_ext_props *)
+			header)->num_ext_props > IPA_NUM_PROPS_MAX) {
+			retval = -EFAULT;
+			break;
+		}
+		pre_entry =
+			((struct ipa_ioc_query_intf_ext_props *)
+			header)->num_ext_props;
+		pyld_sz = sz + pre_entry *
+			sizeof(struct ipa_ioc_ext_intf_prop);
+		param = memdup_user((const void __user *)arg, pyld_sz);
+		if (IS_ERR(param)) {
+			retval = PTR_ERR(param);
+			break;
+		}
+		/* add check in case user-space module compromised */
+		if (unlikely(((struct ipa_ioc_query_intf_ext_props *)
+			param)->num_ext_props != pre_entry)) {
+			IPAERR_RL("current %d pre %d\n",
+				((struct ipa_ioc_query_intf_ext_props *)
+				param)->num_ext_props, pre_entry);
+			retval = -EFAULT;
+			break;
+		}
+		if (ipa3_query_intf_ext_props(
+			(struct ipa_ioc_query_intf_ext_props *)param)) {
+			retval = -1;
+			break;
+		}
+		if (copy_to_user((void __user *)arg, param, pyld_sz)) {
+			retval = -EFAULT;
+			break;
+		}
+		break;
+	case IPA_IOC_PULL_MSG:
+		if (copy_from_user(header, (const void __user *)arg,
+			sizeof(struct ipa_msg_meta))) {
+			retval = -EFAULT;
+			break;
+		}
+		pre_entry =
+		   ((struct ipa_msg_meta *)header)->msg_len;
+		pyld_sz = sizeof(struct ipa_msg_meta) +
+		   pre_entry;
+		param = memdup_user((const void __user *)arg, pyld_sz);
+		if (IS_ERR(param)) {
+			retval = PTR_ERR(param);
+			break;
+		}
+		/* add check in case user-space module compromised */
+		if (unlikely(((struct ipa_msg_meta *)param)->msg_len
+			!= pre_entry)) {
+			IPAERR_RL("current %d pre %d\n",
+				((struct ipa_msg_meta *)param)->msg_len,
+				pre_entry);
+			retval = -EFAULT;
+			break;
+		}
+		if (ipa3_pull_msg((struct ipa_msg_meta *)param,
+			(char *)param + sizeof(struct ipa_msg_meta),
+			((struct ipa_msg_meta *)param)->msg_len) !=
+			((struct ipa_msg_meta *)param)->msg_len) {
+			retval = -1;
+			break;
+		}
+		if (copy_to_user((void __user *)arg, param, pyld_sz)) {
+			retval = -EFAULT;
+			break;
+		}
+		break;
+	case IPA_IOC_RM_ADD_DEPENDENCY:
+		/* IPA RM is deprecate because IPA PM is used */
+		IPAERR("using obselete command: IPA_IOC_RM_ADD_DEPENDENCY");
+		return -EINVAL;
+
+	case IPA_IOC_RM_DEL_DEPENDENCY:
+		/* IPA RM is deprecate because IPA PM is used */
+		IPAERR("using obselete command: IPA_IOC_RM_DEL_DEPENDENCY");
+		return -EINVAL;
+
+	case IPA_IOC_GENERATE_FLT_EQ:
+		{
+			struct ipa_ioc_generate_flt_eq flt_eq;
+
+			if (copy_from_user(&flt_eq, (const void __user *)arg,
+				sizeof(struct ipa_ioc_generate_flt_eq))) {
+				retval = -EFAULT;
+				break;
+			}
+			if (ipahal_flt_generate_equation(flt_eq.ip,
+				&flt_eq.attrib, &flt_eq.eq_attrib)) {
+				retval = -EFAULT;
+				break;
+			}
+			if (copy_to_user((void __user *)arg, &flt_eq,
+				sizeof(struct ipa_ioc_generate_flt_eq))) {
+				retval = -EFAULT;
+				break;
+			}
+			break;
+		}
+	case IPA_IOC_QUERY_EP_MAPPING:
+		{
+			retval = ipa3_get_ep_mapping(arg);
+			break;
+		}
+	case IPA_IOC_QUERY_RT_TBL_INDEX:
+		if (copy_from_user(header, (const void __user *)arg,
+			sizeof(struct ipa_ioc_get_rt_tbl_indx))) {
+			retval = -EFAULT;
+			break;
+		}
+		if (ipa3_query_rt_index(
+			(struct ipa_ioc_get_rt_tbl_indx *)header)) {
+			retval = -EFAULT;
+			break;
+		}
+		if (copy_to_user((void __user *)arg, header,
+			sizeof(struct ipa_ioc_get_rt_tbl_indx))) {
+			retval = -EFAULT;
+			break;
+		}
+		break;
+	case IPA_IOC_WRITE_QMAPID:
+		if (copy_from_user(header, (const void __user *)arg,
+			sizeof(struct ipa_ioc_write_qmapid))) {
+			retval = -EFAULT;
+			break;
+		}
+		if (ipa3_write_qmap_id((struct ipa_ioc_write_qmapid *)header)) {
+			retval = -EFAULT;
+			break;
+		}
+		if (copy_to_user((void __user *)arg, header,
+			sizeof(struct ipa_ioc_write_qmapid))) {
+			retval = -EFAULT;
+			break;
+		}
+		break;
+	case IPA_IOC_NOTIFY_WAN_UPSTREAM_ROUTE_ADD:
+		retval = ipa3_send_wan_msg(arg, WAN_UPSTREAM_ROUTE_ADD, true);
+		if (retval) {
+			IPAERR("ipa3_send_wan_msg failed: %d\n", retval);
+			break;
+		}
+		break;
+	case IPA_IOC_NOTIFY_WAN_UPSTREAM_ROUTE_DEL:
+		retval = ipa3_send_wan_msg(arg, WAN_UPSTREAM_ROUTE_DEL, true);
+		if (retval) {
+			IPAERR("ipa3_send_wan_msg failed: %d\n", retval);
+			break;
+		}
+		break;
+	case IPA_IOC_NOTIFY_WAN_EMBMS_CONNECTED:
+		retval = ipa3_send_wan_msg(arg, WAN_EMBMS_CONNECT, false);
+		if (retval) {
+			IPAERR("ipa3_send_wan_msg failed: %d\n", retval);
+			break;
+		}
+		break;
+	case IPA_IOC_ADD_HDR_PROC_CTX:
+		if (copy_from_user(header, (const void __user *)arg,
+			sizeof(struct ipa_ioc_add_hdr_proc_ctx))) {
+			retval = -EFAULT;
+			break;
+		}
+		pre_entry =
+			((struct ipa_ioc_add_hdr_proc_ctx *)
+			header)->num_proc_ctxs;
+		pyld_sz =
+		   sizeof(struct ipa_ioc_add_hdr_proc_ctx) +
+		   pre_entry * sizeof(struct ipa_hdr_proc_ctx_add);
+		param = memdup_user((const void __user *)arg, pyld_sz);
+		if (IS_ERR(param)) {
+			retval = PTR_ERR(param);
+			break;
+		}
+		/* add check in case user-space module compromised */
+		if (unlikely(((struct ipa_ioc_add_hdr_proc_ctx *)
+			param)->num_proc_ctxs != pre_entry)) {
+			IPAERR_RL("current %d pre %d\n",
+				((struct ipa_ioc_add_hdr_proc_ctx *)
+				param)->num_proc_ctxs, pre_entry);
+			retval = -EFAULT;
+			break;
+		}
+		if (ipa3_add_hdr_proc_ctx(
+			(struct ipa_ioc_add_hdr_proc_ctx *)param, true)) {
+			retval = -EFAULT;
+			break;
+		}
+		if (copy_to_user((void __user *)arg, param, pyld_sz)) {
+			retval = -EFAULT;
+			break;
+		}
+		break;
+	case IPA_IOC_DEL_HDR_PROC_CTX:
+		if (copy_from_user(header, (const void __user *)arg,
+			sizeof(struct ipa_ioc_del_hdr_proc_ctx))) {
+			retval = -EFAULT;
+			break;
+		}
+		pre_entry =
+			((struct ipa_ioc_del_hdr_proc_ctx *)header)->num_hdls;
+		pyld_sz =
+		   sizeof(struct ipa_ioc_del_hdr_proc_ctx) +
+		   pre_entry * sizeof(struct ipa_hdr_proc_ctx_del);
+		param = memdup_user((const void __user *)arg, pyld_sz);
+		if (IS_ERR(param)) {
+			retval = PTR_ERR(param);
+			break;
+		}
+		/* add check in case user-space module compromised */
+		if (unlikely(((struct ipa_ioc_del_hdr_proc_ctx *)
+			param)->num_hdls != pre_entry)) {
+			IPAERR_RL("current %d pre %d\n",
+				((struct ipa_ioc_del_hdr_proc_ctx *)param)->
+				num_hdls,
+				pre_entry);
+			retval = -EFAULT;
+			break;
+		}
+		if (ipa3_del_hdr_proc_ctx_by_user(
+			(struct ipa_ioc_del_hdr_proc_ctx *)param, true)) {
+			retval = -EFAULT;
+			break;
+		}
+		if (copy_to_user((void __user *)arg, param, pyld_sz)) {
+			retval = -EFAULT;
+			break;
+		}
+		break;
+
+	case IPA_IOC_GET_HW_VERSION:
+		pyld_sz = sizeof(enum ipa_hw_type);
+		param = kmemdup(&ipa3_ctx->ipa_hw_type, pyld_sz, GFP_KERNEL);
+		if (!param) {
+			retval = -ENOMEM;
+			break;
+		}
+		if (copy_to_user((void __user *)arg, param, pyld_sz)) {
+			retval = -EFAULT;
+			break;
+		}
+		break;
+
+	case IPA_IOC_GET_VLAN_MODE:
+		if (copy_from_user(&vlan_mode, (const void __user *)arg,
+			sizeof(struct ipa_ioc_get_vlan_mode))) {
+			retval = -EFAULT;
+			break;
+		}
+		retval = ipa3_is_vlan_mode(
+			vlan_mode.iface,
+			&is_vlan_mode);
+		if (retval)
+			break;
+
+		vlan_mode.is_vlan_mode = is_vlan_mode;
+
+		if (copy_to_user((void __user *)arg,
+			&vlan_mode,
+			sizeof(struct ipa_ioc_get_vlan_mode))) {
+			retval = -EFAULT;
+			break;
+		}
+		break;
+
+	case IPA_IOC_ADD_VLAN_IFACE:
+		if (ipa3_send_vlan_l2tp_msg(arg, ADD_VLAN_IFACE)) {
+			retval = -EFAULT;
+			break;
+		}
+		break;
+
+	case IPA_IOC_DEL_VLAN_IFACE:
+		if (ipa3_send_vlan_l2tp_msg(arg, DEL_VLAN_IFACE)) {
+			retval = -EFAULT;
+			break;
+		}
+		break;
+	case IPA_IOC_ADD_BRIDGE_VLAN_MAPPING:
+		if (ipa3_send_vlan_l2tp_msg(arg, ADD_BRIDGE_VLAN_MAPPING)) {
+			retval = -EFAULT;
+			break;
+		}
+		break;
+	case IPA_IOC_DEL_BRIDGE_VLAN_MAPPING:
+		if (ipa3_send_vlan_l2tp_msg(arg, DEL_BRIDGE_VLAN_MAPPING)) {
+			retval = -EFAULT;
+			break;
+		}
+		break;
+	case IPA_IOC_ADD_L2TP_VLAN_MAPPING:
+		if (ipa3_send_vlan_l2tp_msg(arg, ADD_L2TP_VLAN_MAPPING)) {
+			retval = -EFAULT;
+			break;
+		}
+		break;
+
+	case IPA_IOC_DEL_L2TP_VLAN_MAPPING:
+		if (ipa3_send_vlan_l2tp_msg(arg, DEL_L2TP_VLAN_MAPPING)) {
+			retval = -EFAULT;
+			break;
+		}
+		break;
+
+	case IPA_IOC_CLEANUP:
+		/*Route and filter rules will also be clean*/
+		IPADBG("Got IPA_IOC_CLEANUP\n");
+		retval = ipa3_reset_hdr(true);
+		memset(&nat_del, 0, sizeof(nat_del));
+		nat_del.table_index = 0;
+		retval = ipa3_nat_del_cmd(&nat_del);
+		if (ipa3_ctx->platform_type == IPA_PLAT_TYPE_APQ)
+			retval = ipa3_clean_mhip_dl_rule();
+		else
+			retval = ipa3_clean_modem_rule();
+		ipa3_counter_id_remove_all();
+		break;
+
+	case IPA_IOC_QUERY_WLAN_CLIENT:
+		IPADBG("Got IPA_IOC_QUERY_WLAN_CLIENT\n");
+		retval = ipa3_resend_wlan_msg();
+		break;
+
+	case IPA_IOC_GSB_CONNECT:
+		IPADBG("Got IPA_IOC_GSB_CONNECT\n");
+		if (ipa3_send_gsb_msg(arg, IPA_GSB_CONNECT)) {
+			retval = -EFAULT;
+			break;
+		}
+		break;
+
+	case IPA_IOC_GSB_DISCONNECT:
+		IPADBG("Got IPA_IOC_GSB_DISCONNECT\n");
+		if (ipa3_send_gsb_msg(arg, IPA_GSB_DISCONNECT)) {
+			retval = -EFAULT;
+			break;
+		}
+		break;
+
+	case IPA_IOC_ADD_RT_RULE_V2:
+		retval = ipa3_ioctl_add_rt_rule_v2(arg);
+		break;
+
+	case IPA_IOC_ADD_RT_RULE_EXT_V2:
+		retval = ipa3_ioctl_add_rt_rule_ext_v2(arg);
+		break;
+
+	case IPA_IOC_ADD_RT_RULE_AFTER_V2:
+		retval = ipa3_ioctl_add_rt_rule_after_v2(arg);
+		break;
+
+	case IPA_IOC_MDFY_RT_RULE_V2:
+		retval = ipa3_ioctl_mdfy_rt_rule_v2(arg);
+		break;
+
+	case IPA_IOC_ADD_FLT_RULE_V2:
+		retval = ipa3_ioctl_add_flt_rule_v2(arg);
+		break;
+
+	case IPA_IOC_ADD_FLT_RULE_AFTER_V2:
+		retval = ipa3_ioctl_add_flt_rule_after_v2(arg);
+		break;
+
+	case IPA_IOC_MDFY_FLT_RULE_V2:
+		retval = ipa3_ioctl_mdfy_flt_rule_v2(arg);
+		break;
+
+	case IPA_IOC_FNR_COUNTER_ALLOC:
+		if (ipa3_ctx->ipa_hw_type < IPA_HW_v4_5) {
+			IPAERR("FNR stats not supported on IPA ver %d",
+				ipa3_ctx->ipa_hw_type);
+			retval = -EFAULT;
+			break;
+		}
+		retval = ipa3_ioctl_fnr_counter_alloc(arg);
+		break;
+
+	case IPA_IOC_FNR_COUNTER_DEALLOC:
+		if (ipa3_ctx->ipa_hw_type < IPA_HW_v4_5) {
+			IPAERR("FNR stats not supported on IPA ver %d",
+				 ipa3_ctx->ipa_hw_type);
+			retval = -EFAULT;
+			break;
+		}
+		hdl = (int)arg;
+		if (hdl < 0) {
+			IPAERR("IPA_FNR_COUNTER_DEALLOC failed: hdl %d\n",
+				hdl);
+			retval = -EPERM;
+			break;
+		}
+		ipa3_counter_remove_hdl(hdl);
+		break;
+
+	case IPA_IOC_FNR_COUNTER_QUERY:
+		if (ipa3_ctx->ipa_hw_type < IPA_HW_v4_5) {
+			IPAERR("FNR stats not supported on IPA ver %d",
+				ipa3_ctx->ipa_hw_type);
+			retval = -EFAULT;
+			break;
+		}
+		retval = ipa3_ioctl_fnr_counter_query(arg);
+		break;
+
+	case IPA_IOC_SET_FNR_COUNTER_INFO:
+		if (ipa3_ctx->ipa_hw_type < IPA_HW_v4_5) {
+			IPAERR("FNR stats not supported on IPA ver %d",
+				ipa3_ctx->ipa_hw_type);
+			retval = -EFAULT;
+			break;
+		}
+		retval = ipa3_ioctl_fnr_counter_set(arg);
+		break;
+
+	case IPA_IOC_WIGIG_FST_SWITCH:
+		IPADBG("Got IPA_IOCTL_WIGIG_FST_SWITCH\n");
+		if (copy_from_user(&fst_switch, (const void __user *)arg,
+			sizeof(struct ipa_ioc_wigig_fst_switch))) {
+			retval = -EFAULT;
+			break;
+		}
+
+		/* null terminate the string */
+		fst_switch.netdev_name[IPA_RESOURCE_NAME_MAX - 1] = '\0';
+
+		retval = ipa_wigig_send_msg(WIGIG_FST_SWITCH,
+			fst_switch.netdev_name,
+			fst_switch.client_mac_addr,
+			IPA_CLIENT_MAX,
+			fst_switch.to_wigig);
+		break;
+
+	case IPA_IOC_GET_NAT_IN_SRAM_INFO:
+		retval = proc_sram_info_rqst(arg);
+		break;
+
+	case IPA_IOC_APP_CLOCK_VOTE:
+		retval = ipa3_app_clk_vote(
+			(enum ipa_app_clock_vote_type) arg);
+		break;
+
+	default:
+		IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
+		return -ENOTTY;
+	}
+	if (!IS_ERR(param))
+		kfree(param);
+	IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
+
+	return retval;
+}
+
+/**
+ * ipa3_setup_dflt_rt_tables() - Setup default routing tables
+ *
+ * Return codes:
+ * 0: success
+ * -ENOMEM: failed to allocate memory
+ * -EPERM: failed to add the tables
+ */
+int ipa3_setup_dflt_rt_tables(void)
+{
+	struct ipa_ioc_add_rt_rule *rt_rule;
+	struct ipa_rt_rule_add *rt_rule_entry;
+
+	rt_rule =
+		kzalloc(sizeof(struct ipa_ioc_add_rt_rule) + 1 *
+			sizeof(struct ipa_rt_rule_add), GFP_KERNEL);
+	if (!rt_rule)
+		return -ENOMEM;
+
+	/* setup a default v4 route to point to Apps */
+	rt_rule->num_rules = 1;
+	rt_rule->commit = 1;
+	rt_rule->ip = IPA_IP_v4;
+	strlcpy(rt_rule->rt_tbl_name, IPA_DFLT_RT_TBL_NAME,
+		IPA_RESOURCE_NAME_MAX);
+
+	rt_rule_entry = &rt_rule->rules[0];
+	rt_rule_entry->at_rear = 1;
+	rt_rule_entry->rule.dst = IPA_CLIENT_APPS_LAN_CONS;
+	rt_rule_entry->rule.hdr_hdl = ipa3_ctx->excp_hdr_hdl;
+	rt_rule_entry->rule.retain_hdr = 1;
+
+	if (ipa3_add_rt_rule(rt_rule)) {
+		IPAERR("fail to add dflt v4 rule\n");
+		kfree(rt_rule);
+		return -EPERM;
+	}
+	IPADBG("dflt v4 rt rule hdl=%x\n", rt_rule_entry->rt_rule_hdl);
+	ipa3_ctx->dflt_v4_rt_rule_hdl = rt_rule_entry->rt_rule_hdl;
+
+	/* setup a default v6 route to point to A5 */
+	rt_rule->ip = IPA_IP_v6;
+	if (ipa3_add_rt_rule(rt_rule)) {
+		IPAERR("fail to add dflt v6 rule\n");
+		kfree(rt_rule);
+		return -EPERM;
+	}
+	IPADBG("dflt v6 rt rule hdl=%x\n", rt_rule_entry->rt_rule_hdl);
+	ipa3_ctx->dflt_v6_rt_rule_hdl = rt_rule_entry->rt_rule_hdl;
+
+	/*
+	 * because these tables are the very first to be added, they will both
+	 * have the same index (0) which is essential for programming the
+	 * "route" end-point config
+	 */
+
+	kfree(rt_rule);
+
+	return 0;
+}
+
+static int ipa3_setup_exception_path(void)
+{
+	struct ipa_ioc_add_hdr *hdr;
+	struct ipa_hdr_add *hdr_entry;
+	struct ipahal_reg_route route = { 0 };
+	int ret;
+
+	/* install the basic exception header */
+	hdr = kzalloc(sizeof(struct ipa_ioc_add_hdr) + 1 *
+		      sizeof(struct ipa_hdr_add), GFP_KERNEL);
+	if (!hdr)
+		return -ENOMEM;
+
+	hdr->num_hdrs = 1;
+	hdr->commit = 1;
+	hdr_entry = &hdr->hdr[0];
+
+	strlcpy(hdr_entry->name, IPA_LAN_RX_HDR_NAME, IPA_RESOURCE_NAME_MAX);
+	hdr_entry->hdr_len = IPA_LAN_RX_HEADER_LENGTH;
+
+	if (ipa3_add_hdr(hdr)) {
+		IPAERR("fail to add exception hdr\n");
+		ret = -EPERM;
+		goto bail;
+	}
+
+	if (hdr_entry->status) {
+		IPAERR("fail to add exception hdr\n");
+		ret = -EPERM;
+		goto bail;
+	}
+
+	ipa3_ctx->excp_hdr_hdl = hdr_entry->hdr_hdl;
+
+	/* set the route register to pass exception packets to Apps */
+	route.route_def_pipe = ipa3_get_ep_mapping(IPA_CLIENT_APPS_LAN_CONS);
+	route.route_frag_def_pipe = ipa3_get_ep_mapping(
+		IPA_CLIENT_APPS_LAN_CONS);
+	route.route_def_hdr_table = !ipa3_ctx->hdr_tbl_lcl;
+	route.route_def_retain_hdr = 1;
+
+	if (ipa3_cfg_route(&route)) {
+		IPAERR("fail to add exception hdr\n");
+		ret = -EPERM;
+		goto bail;
+	}
+
+	ret = 0;
+bail:
+	kfree(hdr);
+	return ret;
+}
+
+static int ipa3_init_smem_region(int memory_region_size,
+				int memory_region_offset)
+{
+	struct ipahal_imm_cmd_dma_shared_mem cmd;
+	struct ipahal_imm_cmd_pyld *cmd_pyld;
+	struct ipa3_desc desc;
+	struct ipa_mem_buffer mem;
+	int rc;
+
+	if (memory_region_size == 0)
+		return 0;
+
+	memset(&desc, 0, sizeof(desc));
+	memset(&cmd, 0, sizeof(cmd));
+	memset(&mem, 0, sizeof(mem));
+
+	mem.size = memory_region_size;
+	mem.base = dma_alloc_coherent(ipa3_ctx->pdev, mem.size,
+		&mem.phys_base, GFP_KERNEL);
+	if (!mem.base) {
+		IPAERR("failed to alloc DMA buff of size %d\n", mem.size);
+		return -ENOMEM;
+	}
+
+	cmd.is_read = false;
+	cmd.skip_pipeline_clear = false;
+	cmd.pipeline_clear_options = IPAHAL_HPS_CLEAR;
+	cmd.size = mem.size;
+	cmd.system_addr = mem.phys_base;
+	cmd.local_addr = ipa3_ctx->smem_restricted_bytes +
+		memory_region_offset;
+	cmd_pyld = ipahal_construct_imm_cmd(
+		IPA_IMM_CMD_DMA_SHARED_MEM, &cmd, false);
+	if (!cmd_pyld) {
+		IPAERR("failed to construct dma_shared_mem imm cmd\n");
+		return -ENOMEM;
+	}
+	ipa3_init_imm_cmd_desc(&desc, cmd_pyld);
+
+	rc = ipa3_send_cmd(1, &desc);
+	if (rc) {
+		IPAERR("failed to send immediate command (error %d)\n", rc);
+		rc = -EFAULT;
+	}
+
+	ipahal_destroy_imm_cmd(cmd_pyld);
+	dma_free_coherent(ipa3_ctx->pdev, mem.size, mem.base,
+		mem.phys_base);
+
+	return rc;
+}
+
+/**
+ * ipa3_init_q6_smem() - Initialize Q6 general memory and
+ *                      header memory regions in IPA.
+ *
+ * Return codes:
+ * 0: success
+ * -ENOMEM: failed to allocate dma memory
+ * -EFAULT: failed to send IPA command to initialize the memory
+ */
+int ipa3_init_q6_smem(void)
+{
+	int rc;
+
+	IPA_ACTIVE_CLIENTS_INC_SIMPLE();
+
+	rc = ipa3_init_smem_region(IPA_MEM_PART(modem_size),
+		IPA_MEM_PART(modem_ofst));
+	if (rc) {
+		IPAERR("failed to initialize Modem RAM memory\n");
+		IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
+		return rc;
+	}
+
+	rc = ipa3_init_smem_region(IPA_MEM_PART(modem_hdr_size),
+		IPA_MEM_PART(modem_hdr_ofst));
+	if (rc) {
+		IPAERR("failed to initialize Modem HDRs RAM memory\n");
+		IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
+		return rc;
+	}
+
+	rc = ipa3_init_smem_region(IPA_MEM_PART(modem_hdr_proc_ctx_size),
+		IPA_MEM_PART(modem_hdr_proc_ctx_ofst));
+	if (rc) {
+		IPAERR("failed to initialize Modem proc ctx RAM memory\n");
+		IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
+		return rc;
+	}
+
+	rc = ipa3_init_smem_region(IPA_MEM_PART(modem_comp_decomp_size),
+		IPA_MEM_PART(modem_comp_decomp_ofst));
+	if (rc) {
+		IPAERR("failed to initialize Modem Comp/Decomp RAM memory\n");
+		IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
+		return rc;
+	}
+	IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
+
+	return rc;
+}
+
+static void ipa3_destroy_imm(void *user1, int user2)
+{
+	ipahal_destroy_imm_cmd(user1);
+}
+
+static void ipa3_q6_pipe_delay(bool delay)
+{
+	int client_idx;
+	int ep_idx;
+	struct ipa_ep_cfg_ctrl ep_ctrl;
+
+	memset(&ep_ctrl, 0, sizeof(struct ipa_ep_cfg_ctrl));
+	ep_ctrl.ipa_ep_delay = delay;
+
+	for (client_idx = 0; client_idx < IPA_CLIENT_MAX; client_idx++) {
+		if (IPA_CLIENT_IS_Q6_PROD(client_idx)) {
+			ep_idx = ipa3_get_ep_mapping(client_idx);
+			if (ep_idx == -1)
+				continue;
+
+			ipahal_write_reg_n_fields(IPA_ENDP_INIT_CTRL_n,
+				ep_idx, &ep_ctrl);
+		}
+	}
+}
+
+static void ipa3_q6_avoid_holb(void)
+{
+	int ep_idx;
+	int client_idx;
+	struct ipa_ep_cfg_ctrl ep_suspend;
+	struct ipa_ep_cfg_holb ep_holb;
+
+	memset(&ep_suspend, 0, sizeof(ep_suspend));
+	memset(&ep_holb, 0, sizeof(ep_holb));
+
+	ep_suspend.ipa_ep_suspend = true;
+	ep_holb.tmr_val = 0;
+	ep_holb.en = 1;
+
+	if (ipa3_ctx->ipa_hw_type == IPA_HW_v4_2)
+		ipa3_cal_ep_holb_scale_base_val(ep_holb.tmr_val, &ep_holb);
+
+	for (client_idx = 0; client_idx < IPA_CLIENT_MAX; client_idx++) {
+		if (IPA_CLIENT_IS_Q6_CONS(client_idx)) {
+			ep_idx = ipa3_get_ep_mapping(client_idx);
+			if (ep_idx == -1)
+				continue;
+
+			/* from IPA 4.0 pipe suspend is not supported */
+			if (ipa3_ctx->ipa_hw_type < IPA_HW_v4_0)
+				ipahal_write_reg_n_fields(
+				IPA_ENDP_INIT_CTRL_n,
+				ep_idx, &ep_suspend);
+
+			/*
+			 * ipa3_cfg_ep_holb is not used here because we are
+			 * setting HOLB on Q6 pipes, and from APPS perspective
+			 * they are not valid, therefore, the above function
+			 * will fail.
+			 */
+			ipahal_write_reg_n_fields(
+				IPA_ENDP_INIT_HOL_BLOCK_TIMER_n,
+				ep_idx, &ep_holb);
+			ipahal_write_reg_n_fields(
+				IPA_ENDP_INIT_HOL_BLOCK_EN_n,
+				ep_idx, &ep_holb);
+
+			/* IPA4.5 issue requires HOLB_EN to be written twice */
+			if (ipa3_ctx->ipa_hw_type >= IPA_HW_v4_5)
+				ipahal_write_reg_n_fields(
+					IPA_ENDP_INIT_HOL_BLOCK_EN_n,
+					ep_idx, &ep_holb);
+		}
+	}
+}
+
+static void ipa3_halt_q6_gsi_channels(bool prod)
+{
+	int ep_idx;
+	int client_idx;
+	const struct ipa_gsi_ep_config *gsi_ep_cfg;
+	int i;
+	int ret;
+	int code = 0;
+
+	/* if prod flag is true, then we halt the producer channels also */
+	for (client_idx = 0; client_idx < IPA_CLIENT_MAX; client_idx++) {
+		if (IPA_CLIENT_IS_Q6_CONS(client_idx)
+			|| (IPA_CLIENT_IS_Q6_PROD(client_idx) && prod)) {
+			ep_idx = ipa3_get_ep_mapping(client_idx);
+			if (ep_idx == -1)
+				continue;
+
+			gsi_ep_cfg = ipa3_get_gsi_ep_info(client_idx);
+			if (!gsi_ep_cfg) {
+				IPAERR("failed to get GSI config\n");
+				ipa_assert();
+				return;
+			}
+
+			ret = gsi_halt_channel_ee(
+				gsi_ep_cfg->ipa_gsi_chan_num, gsi_ep_cfg->ee,
+				&code);
+			for (i = 0; i < IPA_GSI_CHANNEL_STOP_MAX_RETRY &&
+				ret == -GSI_STATUS_AGAIN; i++) {
+				IPADBG(
+				"ch %d ee %d with code %d\n is busy try again",
+					gsi_ep_cfg->ipa_gsi_chan_num,
+					gsi_ep_cfg->ee,
+					code);
+				usleep_range(IPA_GSI_CHANNEL_HALT_MIN_SLEEP,
+					IPA_GSI_CHANNEL_HALT_MAX_SLEEP);
+				ret = gsi_halt_channel_ee(
+					gsi_ep_cfg->ipa_gsi_chan_num,
+					gsi_ep_cfg->ee, &code);
+			}
+			if (ret == GSI_STATUS_SUCCESS)
+				IPADBG("halted gsi ch %d ee %d with code %d\n",
+				gsi_ep_cfg->ipa_gsi_chan_num,
+				gsi_ep_cfg->ee,
+				code);
+			else
+				IPAERR("failed to halt ch %d ee %d code %d\n",
+				gsi_ep_cfg->ipa_gsi_chan_num,
+				gsi_ep_cfg->ee,
+				code);
+		}
+	}
+}
+
+static int ipa3_q6_clean_q6_flt_tbls(enum ipa_ip_type ip,
+	enum ipa_rule_type rlt)
+{
+	struct ipa3_desc *desc;
+	struct ipahal_imm_cmd_dma_shared_mem cmd = {0};
+	struct ipahal_imm_cmd_pyld **cmd_pyld;
+	int retval = 0;
+	int pipe_idx;
+	int flt_idx = 0;
+	int num_cmds = 0;
+	int index;
+	u32 lcl_addr_mem_part;
+	u32 lcl_hdr_sz;
+	struct ipa_mem_buffer mem;
+	struct ipahal_reg_valmask valmask;
+	struct ipahal_imm_cmd_register_write reg_write_coal_close;
+	int i;
+
+	IPADBG("Entry\n");
+
+	if ((ip >= IPA_IP_MAX) || (rlt >= IPA_RULE_TYPE_MAX)) {
+		IPAERR("Input Err: ip=%d ; rlt=%d\n", ip, rlt);
+		return -EINVAL;
+	}
+
+	/*
+	 * SRAM memory not allocated to hash tables. Cleaning the of hash table
+	 * operation not supported.
+	 */
+	if (rlt == IPA_RULE_HASHABLE && ipa3_ctx->ipa_fltrt_not_hashable) {
+		IPADBG("Clean hashable rules not supported\n");
+		return retval;
+	}
+
+	/* Up to filtering pipes we have filtering tables + 1 for coal close */
+	desc = kcalloc(ipa3_ctx->ep_flt_num + 1, sizeof(struct ipa3_desc),
+		GFP_KERNEL);
+	if (!desc)
+		return -ENOMEM;
+
+	cmd_pyld = kcalloc(ipa3_ctx->ep_flt_num + 1,
+		sizeof(struct ipahal_imm_cmd_pyld *), GFP_KERNEL);
+	if (!cmd_pyld) {
+		retval = -ENOMEM;
+		goto free_desc;
+	}
+
+	if (ip == IPA_IP_v4) {
+		if (rlt == IPA_RULE_HASHABLE) {
+			lcl_addr_mem_part = IPA_MEM_PART(v4_flt_hash_ofst);
+			lcl_hdr_sz = IPA_MEM_PART(v4_flt_hash_size);
+		} else {
+			lcl_addr_mem_part = IPA_MEM_PART(v4_flt_nhash_ofst);
+			lcl_hdr_sz = IPA_MEM_PART(v4_flt_nhash_size);
+		}
+	} else {
+		if (rlt == IPA_RULE_HASHABLE) {
+			lcl_addr_mem_part = IPA_MEM_PART(v6_flt_hash_ofst);
+			lcl_hdr_sz = IPA_MEM_PART(v6_flt_hash_size);
+		} else {
+			lcl_addr_mem_part = IPA_MEM_PART(v6_flt_nhash_ofst);
+			lcl_hdr_sz = IPA_MEM_PART(v6_flt_nhash_size);
+		}
+	}
+
+	retval = ipahal_flt_generate_empty_img(1, lcl_hdr_sz, lcl_hdr_sz,
+		0, &mem, true);
+	if (retval) {
+		IPAERR("failed to generate flt single tbl empty img\n");
+		goto free_cmd_pyld;
+	}
+
+	/* IC to close the coal frame before HPS Clear if coal is enabled */
+	if (ipa3_get_ep_mapping(IPA_CLIENT_APPS_WAN_COAL_CONS) != -1) {
+		i = ipa3_get_ep_mapping(IPA_CLIENT_APPS_WAN_COAL_CONS);
+		reg_write_coal_close.skip_pipeline_clear = false;
+		reg_write_coal_close.pipeline_clear_options = IPAHAL_HPS_CLEAR;
+		reg_write_coal_close.offset = ipahal_get_reg_ofst(
+			IPA_AGGR_FORCE_CLOSE);
+		ipahal_get_aggr_force_close_valmask(i, &valmask);
+		reg_write_coal_close.value = valmask.val;
+		reg_write_coal_close.value_mask = valmask.mask;
+		cmd_pyld[num_cmds] = ipahal_construct_imm_cmd(
+			IPA_IMM_CMD_REGISTER_WRITE,
+			&reg_write_coal_close, false);
+		if (!cmd_pyld[num_cmds]) {
+			IPAERR("failed to construct coal close IC\n");
+			retval = -ENOMEM;
+			goto free_empty_img;
+		}
+		ipa3_init_imm_cmd_desc(&desc[num_cmds], cmd_pyld[num_cmds]);
+		++num_cmds;
+	}
+
+	for (pipe_idx = 0; pipe_idx < ipa3_ctx->ipa_num_pipes; pipe_idx++) {
+		if (!ipa_is_ep_support_flt(pipe_idx))
+			continue;
+
+		/*
+		 * Iterating over all the filtering pipes which are either
+		 * invalid but connected or connected but not configured by AP.
+		 */
+		if (!ipa3_ctx->ep[pipe_idx].valid ||
+		    ipa3_ctx->ep[pipe_idx].skip_ep_cfg) {
+
+			if (num_cmds >= ipa3_ctx->ep_flt_num) {
+				IPAERR("number of commands is out of range\n");
+				retval = -ENOBUFS;
+				goto free_empty_img;
+			}
+
+			cmd.is_read = false;
+			cmd.skip_pipeline_clear = false;
+			cmd.pipeline_clear_options = IPAHAL_HPS_CLEAR;
+			cmd.size = mem.size;
+			cmd.system_addr = mem.phys_base;
+			cmd.local_addr =
+				ipa3_ctx->smem_restricted_bytes +
+				lcl_addr_mem_part +
+				ipahal_get_hw_tbl_hdr_width() +
+				flt_idx * ipahal_get_hw_tbl_hdr_width();
+			cmd_pyld[num_cmds] = ipahal_construct_imm_cmd(
+				IPA_IMM_CMD_DMA_SHARED_MEM, &cmd, false);
+			if (!cmd_pyld[num_cmds]) {
+				IPAERR("fail construct dma_shared_mem cmd\n");
+				retval = -ENOMEM;
+				goto free_empty_img;
+			}
+			ipa3_init_imm_cmd_desc(&desc[num_cmds],
+				cmd_pyld[num_cmds]);
+			++num_cmds;
+		}
+
+		++flt_idx;
+	}
+
+	IPADBG("Sending %d descriptors for flt tbl clearing\n", num_cmds);
+	retval = ipa3_send_cmd(num_cmds, desc);
+	if (retval) {
+		IPAERR("failed to send immediate command (err %d)\n", retval);
+		retval = -EFAULT;
+	}
+
+free_empty_img:
+	ipahal_free_dma_mem(&mem);
+free_cmd_pyld:
+	for (index = 0; index < num_cmds; index++)
+		ipahal_destroy_imm_cmd(cmd_pyld[index]);
+	kfree(cmd_pyld);
+free_desc:
+	kfree(desc);
+	return retval;
+}
+
+static int ipa3_q6_clean_q6_rt_tbls(enum ipa_ip_type ip,
+	enum ipa_rule_type rlt)
+{
+	struct ipa3_desc *desc;
+	struct ipahal_imm_cmd_dma_shared_mem cmd = {0};
+	struct ipahal_imm_cmd_pyld **cmd_pyld;
+	int retval = 0;
+	int num_cmds = 0;
+	u32 modem_rt_index_lo;
+	u32 modem_rt_index_hi;
+	u32 lcl_addr_mem_part;
+	u32 lcl_hdr_sz;
+	struct ipa_mem_buffer mem;
+	struct ipahal_reg_valmask valmask;
+	struct ipahal_imm_cmd_register_write reg_write_coal_close;
+	int i;
+
+	IPADBG("Entry\n");
+
+	if ((ip >= IPA_IP_MAX) || (rlt >= IPA_RULE_TYPE_MAX)) {
+		IPAERR("Input Err: ip=%d ; rlt=%d\n", ip, rlt);
+		return -EINVAL;
+	}
+
+	/*
+	 * SRAM memory not allocated to hash tables. Cleaning the of hash table
+	 * operation not supported.
+	 */
+	if (rlt == IPA_RULE_HASHABLE && ipa3_ctx->ipa_fltrt_not_hashable) {
+		IPADBG("Clean hashable rules not supported\n");
+		return retval;
+	}
+
+	if (ip == IPA_IP_v4) {
+		modem_rt_index_lo = IPA_MEM_PART(v4_modem_rt_index_lo);
+		modem_rt_index_hi = IPA_MEM_PART(v4_modem_rt_index_hi);
+		if (rlt == IPA_RULE_HASHABLE) {
+			lcl_addr_mem_part = IPA_MEM_PART(v4_rt_hash_ofst);
+			lcl_hdr_sz =  IPA_MEM_PART(v4_flt_hash_size);
+		} else {
+			lcl_addr_mem_part = IPA_MEM_PART(v4_rt_nhash_ofst);
+			lcl_hdr_sz = IPA_MEM_PART(v4_flt_nhash_size);
+		}
+	} else {
+		modem_rt_index_lo = IPA_MEM_PART(v6_modem_rt_index_lo);
+		modem_rt_index_hi = IPA_MEM_PART(v6_modem_rt_index_hi);
+		if (rlt == IPA_RULE_HASHABLE) {
+			lcl_addr_mem_part = IPA_MEM_PART(v6_rt_hash_ofst);
+			lcl_hdr_sz =  IPA_MEM_PART(v6_flt_hash_size);
+		} else {
+			lcl_addr_mem_part = IPA_MEM_PART(v6_rt_nhash_ofst);
+			lcl_hdr_sz = IPA_MEM_PART(v6_flt_nhash_size);
+		}
+	}
+
+	retval = ipahal_rt_generate_empty_img(
+		modem_rt_index_hi - modem_rt_index_lo + 1,
+		lcl_hdr_sz, lcl_hdr_sz, &mem, true);
+	if (retval) {
+		IPAERR("fail generate empty rt img\n");
+		return -ENOMEM;
+	}
+
+	desc = kcalloc(2, sizeof(struct ipa3_desc), GFP_KERNEL);
+	if (!desc)
+		retval = -ENOMEM;
+		goto free_empty_img;
+
+	cmd_pyld = kcalloc(2, sizeof(struct ipahal_imm_cmd_pyld *), GFP_KERNEL);
+	if (!cmd_pyld) {
+		retval = -ENOMEM;
+		goto free_desc;
+	}
+
+	/* IC to close the coal frame before HPS Clear if coal is enabled */
+	if (ipa3_get_ep_mapping(IPA_CLIENT_APPS_WAN_COAL_CONS) != -1) {
+		i = ipa3_get_ep_mapping(IPA_CLIENT_APPS_WAN_COAL_CONS);
+		reg_write_coal_close.skip_pipeline_clear = false;
+		reg_write_coal_close.pipeline_clear_options = IPAHAL_HPS_CLEAR;
+		reg_write_coal_close.offset = ipahal_get_reg_ofst(
+			IPA_AGGR_FORCE_CLOSE);
+		ipahal_get_aggr_force_close_valmask(i, &valmask);
+		reg_write_coal_close.value = valmask.val;
+		reg_write_coal_close.value_mask = valmask.mask;
+		cmd_pyld[num_cmds] = ipahal_construct_imm_cmd(
+			IPA_IMM_CMD_REGISTER_WRITE,
+			&reg_write_coal_close, false);
+		if (!cmd_pyld[num_cmds]) {
+			IPAERR("failed to construct coal close IC\n");
+			retval = -ENOMEM;
+			goto free_cmd_pyld;
+		}
+		ipa3_init_imm_cmd_desc(&desc[num_cmds], cmd_pyld[num_cmds]);
+		++num_cmds;
+	}
+
+	cmd.is_read = false;
+	cmd.skip_pipeline_clear = false;
+	cmd.pipeline_clear_options = IPAHAL_HPS_CLEAR;
+	cmd.size = mem.size;
+	cmd.system_addr =  mem.phys_base;
+	cmd.local_addr = ipa3_ctx->smem_restricted_bytes +
+		lcl_addr_mem_part +
+		modem_rt_index_lo * ipahal_get_hw_tbl_hdr_width();
+	cmd_pyld[num_cmds] = ipahal_construct_imm_cmd(
+			IPA_IMM_CMD_DMA_SHARED_MEM, &cmd, false);
+	if (!cmd_pyld[num_cmds]) {
+		IPAERR("failed to construct dma_shared_mem imm cmd\n");
+		retval = -ENOMEM;
+		goto free_cmd_pyld;
+	}
+	ipa3_init_imm_cmd_desc(&desc[num_cmds], cmd_pyld[num_cmds]);
+	++num_cmds;
+
+	IPADBG("Sending 1 descriptor for rt tbl clearing\n");
+	retval = ipa3_send_cmd(num_cmds, desc);
+	if (retval) {
+		IPAERR("failed to send immediate command (err %d)\n", retval);
+		retval = -EFAULT;
+	}
+
+free_cmd_pyld:
+	for (i = 0; i < num_cmds; i++)
+		ipahal_destroy_imm_cmd(cmd_pyld[i]);
+	kfree(cmd_pyld);
+free_desc:
+	kfree(desc);
+free_empty_img:
+	ipahal_free_dma_mem(&mem);
+	return retval;
+}
+
+static int ipa3_q6_clean_q6_tables(void)
+{
+	struct ipa3_desc *desc;
+	struct ipahal_imm_cmd_pyld **cmd_pyld;
+	struct ipahal_imm_cmd_register_write reg_write_cmd = {0};
+	int retval = 0;
+	int num_cmds = 0;
+	struct ipahal_reg_fltrt_hash_flush flush;
+	struct ipahal_reg_valmask valmask;
+	struct ipahal_imm_cmd_register_write reg_write_coal_close;
+	int i;
+
+	IPADBG("Entry\n");
+
+
+	if (ipa3_q6_clean_q6_flt_tbls(IPA_IP_v4, IPA_RULE_HASHABLE)) {
+		IPAERR("failed to clean q6 flt tbls (v4/hashable)\n");
+		return -EFAULT;
+	}
+	if (ipa3_q6_clean_q6_flt_tbls(IPA_IP_v6, IPA_RULE_HASHABLE)) {
+		IPAERR("failed to clean q6 flt tbls (v6/hashable)\n");
+		return -EFAULT;
+	}
+	if (ipa3_q6_clean_q6_flt_tbls(IPA_IP_v4, IPA_RULE_NON_HASHABLE)) {
+		IPAERR("failed to clean q6 flt tbls (v4/non-hashable)\n");
+		return -EFAULT;
+	}
+	if (ipa3_q6_clean_q6_flt_tbls(IPA_IP_v6, IPA_RULE_NON_HASHABLE)) {
+		IPAERR("failed to clean q6 flt tbls (v6/non-hashable)\n");
+		return -EFAULT;
+	}
+
+	if (ipa3_q6_clean_q6_rt_tbls(IPA_IP_v4, IPA_RULE_HASHABLE)) {
+		IPAERR("failed to clean q6 rt tbls (v4/hashable)\n");
+		return -EFAULT;
+	}
+	if (ipa3_q6_clean_q6_rt_tbls(IPA_IP_v6, IPA_RULE_HASHABLE)) {
+		IPAERR("failed to clean q6 rt tbls (v6/hashable)\n");
+		return -EFAULT;
+	}
+	if (ipa3_q6_clean_q6_rt_tbls(IPA_IP_v4, IPA_RULE_NON_HASHABLE)) {
+		IPAERR("failed to clean q6 rt tbls (v4/non-hashable)\n");
+		return -EFAULT;
+	}
+	if (ipa3_q6_clean_q6_rt_tbls(IPA_IP_v6, IPA_RULE_NON_HASHABLE)) {
+		IPAERR("failed to clean q6 rt tbls (v6/non-hashable)\n");
+		return -EFAULT;
+	}
+
+	/*
+	 * SRAM memory not allocated to hash tables. Cleaning the of hash table
+	 * operation not supported.
+	 */
+	if (ipa3_ctx->ipa_fltrt_not_hashable)
+		return retval;
+	/* Flush rules cache */
+	desc = kcalloc(2, sizeof(struct ipa3_desc), GFP_KERNEL);
+	if (!desc)
+		return -ENOMEM;
+
+	cmd_pyld = kcalloc(2, sizeof(struct ipahal_imm_cmd_pyld *), GFP_KERNEL);
+	if (!cmd_pyld) {
+		retval = -ENOMEM;
+		goto bail_desc;
+	}
+
+	/* IC to close the coal frame before HPS Clear if coal is enabled */
+	if (ipa3_get_ep_mapping(IPA_CLIENT_APPS_WAN_COAL_CONS) != -1) {
+		i = ipa3_get_ep_mapping(IPA_CLIENT_APPS_WAN_COAL_CONS);
+		reg_write_coal_close.skip_pipeline_clear = false;
+		reg_write_coal_close.pipeline_clear_options = IPAHAL_HPS_CLEAR;
+		reg_write_coal_close.offset = ipahal_get_reg_ofst(
+			IPA_AGGR_FORCE_CLOSE);
+		ipahal_get_aggr_force_close_valmask(i, &valmask);
+		reg_write_coal_close.value = valmask.val;
+		reg_write_coal_close.value_mask = valmask.mask;
+		cmd_pyld[num_cmds] = ipahal_construct_imm_cmd(
+			IPA_IMM_CMD_REGISTER_WRITE,
+			&reg_write_coal_close, false);
+		if (!cmd_pyld[num_cmds]) {
+			IPAERR("failed to construct coal close IC\n");
+			retval = -ENOMEM;
+			goto free_cmd_pyld;
+		}
+		ipa3_init_imm_cmd_desc(&desc[num_cmds], cmd_pyld[num_cmds]);
+		++num_cmds;
+	}
+
+	flush.v4_flt = true;
+	flush.v4_rt = true;
+	flush.v6_flt = true;
+	flush.v6_rt = true;
+	ipahal_get_fltrt_hash_flush_valmask(&flush, &valmask);
+	reg_write_cmd.skip_pipeline_clear = false;
+	reg_write_cmd.pipeline_clear_options = IPAHAL_HPS_CLEAR;
+	reg_write_cmd.offset = ipahal_get_reg_ofst(IPA_FILT_ROUT_HASH_FLUSH);
+	reg_write_cmd.value = valmask.val;
+	reg_write_cmd.value_mask = valmask.mask;
+	cmd_pyld[num_cmds] = ipahal_construct_imm_cmd(
+		IPA_IMM_CMD_REGISTER_WRITE, &reg_write_cmd, false);
+	if (!cmd_pyld[num_cmds]) {
+		IPAERR("fail construct register_write imm cmd\n");
+		retval = -EFAULT;
+		goto free_cmd_pyld;
+	}
+	ipa3_init_imm_cmd_desc(&desc[num_cmds], cmd_pyld[num_cmds]);
+	++num_cmds;
+
+	IPADBG("Sending 1 descriptor for tbls flush\n");
+	retval = ipa3_send_cmd(num_cmds, desc);
+	if (retval) {
+		IPAERR("failed to send immediate command (err %d)\n", retval);
+		retval = -EFAULT;
+	}
+
+free_cmd_pyld:
+	for (i = 0; i < num_cmds; i++)
+		ipahal_destroy_imm_cmd(cmd_pyld[i]);
+	kfree(cmd_pyld);
+bail_desc:
+	kfree(desc);
+	IPADBG("Done - retval = %d\n", retval);
+	return retval;
+}
+
+static int ipa3_q6_set_ex_path_to_apps(void)
+{
+	int ep_idx;
+	int client_idx;
+	struct ipa3_desc *desc;
+	int num_descs = 0;
+	int index;
+	struct ipahal_imm_cmd_register_write reg_write;
+	struct ipahal_imm_cmd_pyld *cmd_pyld;
+	int retval;
+	struct ipahal_reg_valmask valmask;
+	struct ipahal_imm_cmd_register_write reg_write_coal_close;
+	int i;
+
+	desc = kcalloc(ipa3_ctx->ipa_num_pipes + 1, sizeof(struct ipa3_desc),
+			GFP_KERNEL);
+	if (!desc)
+		return -ENOMEM;
+
+	/* IC to close the coal frame before HPS Clear if coal is enabled */
+	if (ipa3_get_ep_mapping(IPA_CLIENT_APPS_WAN_COAL_CONS) != -1) {
+		i = ipa3_get_ep_mapping(IPA_CLIENT_APPS_WAN_COAL_CONS);
+		reg_write_coal_close.skip_pipeline_clear = false;
+		reg_write_coal_close.pipeline_clear_options = IPAHAL_HPS_CLEAR;
+		reg_write_coal_close.offset = ipahal_get_reg_ofst(
+			IPA_AGGR_FORCE_CLOSE);
+		ipahal_get_aggr_force_close_valmask(i, &valmask);
+		reg_write_coal_close.value = valmask.val;
+		reg_write_coal_close.value_mask = valmask.mask;
+		cmd_pyld = ipahal_construct_imm_cmd(
+			IPA_IMM_CMD_REGISTER_WRITE,
+			&reg_write_coal_close, false);
+		if (!cmd_pyld) {
+			IPAERR("failed to construct coal close IC\n");
+			ipa_assert();
+			return -ENOMEM;
+		}
+		ipa3_init_imm_cmd_desc(&desc[num_descs], cmd_pyld);
+		desc[num_descs].callback = ipa3_destroy_imm;
+		desc[num_descs].user1 = cmd_pyld;
+		++num_descs;
+	}
+
+	/* Set the exception path to AP */
+	for (client_idx = 0; client_idx < IPA_CLIENT_MAX; client_idx++) {
+		ep_idx = ipa3_get_ep_mapping(client_idx);
+		if (ep_idx == -1 || (ep_idx >= IPA3_MAX_NUM_PIPES))
+			continue;
+
+		/* disable statuses for all modem controlled prod pipes */
+		if (!IPA_CLIENT_IS_TEST(client_idx) &&
+			(IPA_CLIENT_IS_Q6_PROD(client_idx) ||
+			(IPA_CLIENT_IS_PROD(client_idx) &&
+			ipa3_ctx->ep[ep_idx].valid &&
+			ipa3_ctx->ep[ep_idx].skip_ep_cfg) ||
+			(ipa3_ctx->ep[ep_idx].client == IPA_CLIENT_APPS_WAN_PROD
+			&& ipa3_ctx->modem_cfg_emb_pipe_flt))) {
+			ipa_assert_on(num_descs >= ipa3_ctx->ipa_num_pipes);
+
+			ipa3_ctx->ep[ep_idx].status.status_en = false;
+			reg_write.skip_pipeline_clear = false;
+			reg_write.pipeline_clear_options =
+				IPAHAL_HPS_CLEAR;
+			reg_write.offset =
+				ipahal_get_reg_n_ofst(IPA_ENDP_STATUS_n,
+					ep_idx);
+			reg_write.value = 0;
+			reg_write.value_mask = ~0;
+			cmd_pyld = ipahal_construct_imm_cmd(
+				IPA_IMM_CMD_REGISTER_WRITE, &reg_write, false);
+			if (!cmd_pyld) {
+				IPAERR("fail construct register_write cmd\n");
+				ipa_assert();
+				return -ENOMEM;
+			}
+
+			ipa3_init_imm_cmd_desc(&desc[num_descs], cmd_pyld);
+			desc[num_descs].callback = ipa3_destroy_imm;
+			desc[num_descs].user1 = cmd_pyld;
+			++num_descs;
+		}
+	}
+
+	/* Will wait 500msecs for IPA tag process completion */
+	retval = ipa3_tag_process(desc, num_descs,
+		msecs_to_jiffies(CLEANUP_TAG_PROCESS_TIMEOUT));
+	if (retval) {
+		IPAERR("TAG process failed! (error %d)\n", retval);
+		/* For timeout error ipa3_destroy_imm cb will destroy user1 */
+		if (retval != -ETIME) {
+			for (index = 0; index < num_descs; index++)
+				if (desc[index].callback)
+					desc[index].callback(desc[index].user1,
+						desc[index].user2);
+			retval = -EINVAL;
+		}
+	}
+
+	kfree(desc);
+
+	return retval;
+}
+
+/*
+ * ipa3_update_ssr_state() - updating current SSR state
+ * @is_ssr:	[in] Current SSR state
+ */
+
+void ipa3_update_ssr_state(bool is_ssr)
+{
+	if (is_ssr)
+		atomic_set(&ipa3_ctx->is_ssr, 1);
+	else
+		atomic_set(&ipa3_ctx->is_ssr, 0);
+}
+
+/**
+ * ipa3_q6_pre_shutdown_cleanup() - A cleanup for all Q6 related configuration
+ *                    in IPA HW. This is performed in case of SSR.
+ *
+ * This is a mandatory procedure, in case one of the steps fails, the
+ * AP needs to restart.
+ */
+void ipa3_q6_pre_shutdown_cleanup(void)
+{
+	IPADBG_LOW("ENTER\n");
+
+	IPA_ACTIVE_CLIENTS_INC_SIMPLE();
+
+	ipa3_update_ssr_state(true);
+	if (!ipa3_ctx->ipa_endp_delay_wa)
+		ipa3_q6_pipe_delay(true);
+	ipa3_q6_avoid_holb();
+	if (ipa3_ctx->ipa_config_is_mhi)
+		ipa3_set_reset_client_cons_pipe_sus_holb(true,
+		IPA_CLIENT_MHI_CONS);
+	if (ipa3_q6_clean_q6_tables()) {
+		IPAERR("Failed to clean Q6 tables\n");
+		/*
+		 * Indicates IPA hardware is stalled, unexpected
+		 * hardware state.
+		 */
+		ipa_assert();
+	}
+	if (ipa3_q6_set_ex_path_to_apps()) {
+		IPAERR("Failed to redirect exceptions to APPS\n");
+		/*
+		 * Indicates IPA hardware is stalled, unexpected
+		 * hardware state.
+		 */
+		ipa_assert();
+	}
+	/* Remove delay from Q6 PRODs to avoid pending descriptors
+	 * on pipe reset procedure
+	 */
+	if (!ipa3_ctx->ipa_endp_delay_wa) {
+		ipa3_q6_pipe_delay(false);
+		ipa3_set_reset_client_prod_pipe_delay(true,
+			IPA_CLIENT_USB_PROD);
+	} else {
+		ipa3_start_stop_client_prod_gsi_chnl(IPA_CLIENT_USB_PROD,
+						false);
+	}
+
+	IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
+	IPADBG_LOW("Exit with success\n");
+}
+
+/*
+ * ipa3_q6_post_shutdown_cleanup() - As part of this cleanup
+ * check if GSI channel related to Q6 producer client is empty.
+ *
+ * Q6 GSI channel emptiness is needed to garantee no descriptors with invalid
+ *  info are injected into IPA RX from IPA_IF, while modem is restarting.
+ */
+void ipa3_q6_post_shutdown_cleanup(void)
+{
+	int client_idx;
+	int ep_idx;
+	bool prod = false;
+
+	IPADBG_LOW("ENTER\n");
+
+	IPA_ACTIVE_CLIENTS_INC_SIMPLE();
+
+	/* Handle the issue where SUSPEND was removed for some reason */
+	ipa3_q6_avoid_holb();
+
+	/* halt both prod and cons channels starting at IPAv4 */
+	if (ipa3_ctx->ipa_hw_type >= IPA_HW_v4_0) {
+		prod = true;
+		ipa3_halt_q6_gsi_channels(prod);
+		IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
+		IPADBG("Exit without consumer check\n");
+		return;
+	}
+
+	ipa3_halt_q6_gsi_channels(prod);
+
+	if (!ipa3_ctx->uc_ctx.uc_loaded) {
+		IPAERR("uC is not loaded. Skipping\n");
+		return;
+	}
+
+	for (client_idx = 0; client_idx < IPA_CLIENT_MAX; client_idx++)
+		if (IPA_CLIENT_IS_Q6_PROD(client_idx)) {
+			ep_idx = ipa3_get_ep_mapping(client_idx);
+			if (ep_idx == -1)
+				continue;
+
+			if (ipa3_uc_is_gsi_channel_empty(client_idx)) {
+				IPAERR("fail to validate Q6 ch emptiness %d\n",
+					client_idx);
+				/*
+				 * Indicates GSI hardware is stalled, unexpected
+				 * hardware state.
+				 * Remove bug for adb reboot issue.
+				 */
+			}
+		}
+
+	IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
+	IPADBG_LOW("Exit with success\n");
+}
+
+/**
+ * ipa3_q6_pre_powerup_cleanup() - A cleanup routine for pheripheral
+ * configuration in IPA HW. This is performed in case of SSR.
+ *
+ * This is a mandatory procedure, in case one of the steps fails, the
+ * AP needs to restart.
+ */
+void ipa3_q6_pre_powerup_cleanup(void)
+{
+	IPADBG_LOW("ENTER\n");
+
+	IPA_ACTIVE_CLIENTS_INC_SIMPLE();
+
+	if (ipa3_ctx->ipa_config_is_mhi)
+		ipa3_set_reset_client_prod_pipe_delay(true,
+			IPA_CLIENT_MHI_PROD);
+
+	IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
+	IPADBG_LOW("Exit with success\n");
+}
+
+/*
+ * ipa3_client_prod_post_shutdown_cleanup () - As part of this function
+ * set end point delay client producer pipes and starting corresponding
+ * gsi channels
+ */
+
+void ipa3_client_prod_post_shutdown_cleanup(void)
+{
+	IPADBG_LOW("ENTER\n");
+
+	IPA_ACTIVE_CLIENTS_INC_SIMPLE();
+
+	ipa3_set_reset_client_prod_pipe_delay(true,
+				IPA_CLIENT_USB_PROD);
+	ipa3_start_stop_client_prod_gsi_chnl(IPA_CLIENT_USB_PROD, true);
+
+	IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
+	IPADBG_LOW("Exit with success\n");
+}
+
+static inline void ipa3_sram_set_canary(u32 *sram_mmio, int offset)
+{
+	/* Set 4 bytes of CANARY before the offset */
+	sram_mmio[(offset - 4) / 4] = IPA_MEM_CANARY_VAL;
+}
+
+/**
+ * _ipa_init_sram_v3() - Initialize IPA local SRAM.
+ *
+ * Return codes: 0 for success, negative value for failure
+ */
+int _ipa_init_sram_v3(void)
+{
+	u32 *ipa_sram_mmio;
+	unsigned long phys_addr;
+
+	IPADBG(
+	    "ipa_wrapper_base(0x%08X) ipa_reg_base_ofst(0x%08X) IPA_SW_AREA_RAM_DIRECT_ACCESS_n(0x%08X) smem_restricted_bytes(0x%08X) smem_sz(0x%08X)\n",
+	    ipa3_ctx->ipa_wrapper_base,
+	    ipa3_ctx->ctrl->ipa_reg_base_ofst,
+	    ipahal_get_reg_n_ofst(
+		IPA_SW_AREA_RAM_DIRECT_ACCESS_n,
+		ipa3_ctx->smem_restricted_bytes / 4),
+	    ipa3_ctx->smem_restricted_bytes,
+	    ipa3_ctx->smem_sz);
+
+	phys_addr = ipa3_ctx->ipa_wrapper_base +
+		ipa3_ctx->ctrl->ipa_reg_base_ofst +
+		ipahal_get_reg_n_ofst(IPA_SW_AREA_RAM_DIRECT_ACCESS_n,
+			ipa3_ctx->smem_restricted_bytes / 4);
+
+	ipa_sram_mmio = ioremap(phys_addr, ipa3_ctx->smem_sz);
+	if (!ipa_sram_mmio) {
+		IPAERR("fail to ioremap IPA SRAM\n");
+		return -ENOMEM;
+	}
+
+	/* Consult with ipa_i.h on the location of the CANARY values */
+	ipa3_sram_set_canary(ipa_sram_mmio, IPA_MEM_PART(v4_flt_hash_ofst) - 4);
+	ipa3_sram_set_canary(ipa_sram_mmio, IPA_MEM_PART(v4_flt_hash_ofst));
+	ipa3_sram_set_canary(ipa_sram_mmio,
+		IPA_MEM_PART(v4_flt_nhash_ofst) - 4);
+	ipa3_sram_set_canary(ipa_sram_mmio, IPA_MEM_PART(v4_flt_nhash_ofst));
+	ipa3_sram_set_canary(ipa_sram_mmio, IPA_MEM_PART(v6_flt_hash_ofst) - 4);
+	ipa3_sram_set_canary(ipa_sram_mmio, IPA_MEM_PART(v6_flt_hash_ofst));
+	ipa3_sram_set_canary(ipa_sram_mmio,
+		IPA_MEM_PART(v6_flt_nhash_ofst) - 4);
+	ipa3_sram_set_canary(ipa_sram_mmio, IPA_MEM_PART(v6_flt_nhash_ofst));
+	ipa3_sram_set_canary(ipa_sram_mmio, IPA_MEM_PART(v4_rt_hash_ofst) - 4);
+	ipa3_sram_set_canary(ipa_sram_mmio, IPA_MEM_PART(v4_rt_hash_ofst));
+	ipa3_sram_set_canary(ipa_sram_mmio, IPA_MEM_PART(v4_rt_nhash_ofst) - 4);
+	ipa3_sram_set_canary(ipa_sram_mmio, IPA_MEM_PART(v4_rt_nhash_ofst));
+	ipa3_sram_set_canary(ipa_sram_mmio, IPA_MEM_PART(v6_rt_hash_ofst) - 4);
+	ipa3_sram_set_canary(ipa_sram_mmio, IPA_MEM_PART(v6_rt_hash_ofst));
+	ipa3_sram_set_canary(ipa_sram_mmio, IPA_MEM_PART(v6_rt_nhash_ofst) - 4);
+	ipa3_sram_set_canary(ipa_sram_mmio, IPA_MEM_PART(v6_rt_nhash_ofst));
+	ipa3_sram_set_canary(ipa_sram_mmio, IPA_MEM_PART(modem_hdr_ofst) - 4);
+	ipa3_sram_set_canary(ipa_sram_mmio, IPA_MEM_PART(modem_hdr_ofst));
+	ipa3_sram_set_canary(ipa_sram_mmio,
+		IPA_MEM_PART(modem_hdr_proc_ctx_ofst) - 4);
+	ipa3_sram_set_canary(ipa_sram_mmio,
+		IPA_MEM_PART(modem_hdr_proc_ctx_ofst));
+	if (ipa_get_hw_type() >= IPA_HW_v4_5) {
+		ipa3_sram_set_canary(ipa_sram_mmio,
+			IPA_MEM_PART(nat_tbl_ofst) - 12);
+		ipa3_sram_set_canary(ipa_sram_mmio,
+			IPA_MEM_PART(nat_tbl_ofst) - 8);
+		ipa3_sram_set_canary(ipa_sram_mmio,
+			IPA_MEM_PART(nat_tbl_ofst) - 4);
+		ipa3_sram_set_canary(ipa_sram_mmio, IPA_MEM_PART(nat_tbl_ofst));
+	}
+	if (ipa_get_hw_type() >= IPA_HW_v4_0) {
+		ipa3_sram_set_canary(ipa_sram_mmio,
+			IPA_MEM_PART(pdn_config_ofst) - 4);
+		ipa3_sram_set_canary(ipa_sram_mmio,
+			IPA_MEM_PART(pdn_config_ofst));
+		ipa3_sram_set_canary(ipa_sram_mmio,
+			IPA_MEM_PART(stats_quota_ofst) - 4);
+		ipa3_sram_set_canary(ipa_sram_mmio,
+			IPA_MEM_PART(stats_quota_ofst));
+	}
+	if (ipa_get_hw_type() <= IPA_HW_v3_5 ||
+		ipa_get_hw_type() >= IPA_HW_v4_5) {
+		ipa3_sram_set_canary(ipa_sram_mmio,
+			IPA_MEM_PART(modem_ofst) - 4);
+		ipa3_sram_set_canary(ipa_sram_mmio, IPA_MEM_PART(modem_ofst));
+	}
+	ipa3_sram_set_canary(ipa_sram_mmio,
+		(ipa_get_hw_type() >= IPA_HW_v3_5) ?
+			IPA_MEM_PART(uc_descriptor_ram_ofst) :
+			IPA_MEM_PART(end_ofst));
+
+	iounmap(ipa_sram_mmio);
+
+	return 0;
+}
+
+/**
+ * _ipa_init_hdr_v3_0() - Initialize IPA header block.
+ *
+ * Return codes: 0 for success, negative value for failure
+ */
+int _ipa_init_hdr_v3_0(void)
+{
+	struct ipa3_desc hdr_init_desc;
+	struct ipa3_desc dma_cmd_desc[2];
+	struct ipa_mem_buffer mem;
+	struct ipahal_imm_cmd_hdr_init_local cmd = {0};
+	struct ipahal_imm_cmd_pyld *hdr_init_cmd_payload;
+	struct ipahal_imm_cmd_pyld *cmd_pyld[2];
+	struct ipahal_imm_cmd_dma_shared_mem dma_cmd = { 0 };
+	struct ipahal_reg_valmask valmask;
+	struct ipahal_imm_cmd_register_write reg_write_coal_close;
+	int num_cmds = 0;
+	int i;
+
+	mem.size = IPA_MEM_PART(modem_hdr_size) + IPA_MEM_PART(apps_hdr_size);
+	mem.base = dma_alloc_coherent(ipa3_ctx->pdev, mem.size, &mem.phys_base,
+		GFP_KERNEL);
+	if (!mem.base) {
+		IPAERR("fail to alloc DMA buff of size %d\n", mem.size);
+		return -ENOMEM;
+	}
+
+	cmd.hdr_table_addr = mem.phys_base;
+	cmd.size_hdr_table = mem.size;
+	cmd.hdr_addr = ipa3_ctx->smem_restricted_bytes +
+		IPA_MEM_PART(modem_hdr_ofst);
+	hdr_init_cmd_payload = ipahal_construct_imm_cmd(
+		IPA_IMM_CMD_HDR_INIT_LOCAL, &cmd, false);
+	if (!hdr_init_cmd_payload) {
+		IPAERR("fail to construct hdr_init_local imm cmd\n");
+		dma_free_coherent(ipa3_ctx->pdev,
+			mem.size, mem.base,
+			mem.phys_base);
+		return -EFAULT;
+	}
+	ipa3_init_imm_cmd_desc(&hdr_init_desc, hdr_init_cmd_payload);
+	IPA_DUMP_BUFF(mem.base, mem.phys_base, mem.size);
+
+	if (ipa3_send_cmd(1, &hdr_init_desc)) {
+		IPAERR("fail to send immediate command\n");
+		ipahal_destroy_imm_cmd(hdr_init_cmd_payload);
+		dma_free_coherent(ipa3_ctx->pdev,
+			mem.size, mem.base,
+			mem.phys_base);
+		return -EFAULT;
+	}
+
+	ipahal_destroy_imm_cmd(hdr_init_cmd_payload);
+	dma_free_coherent(ipa3_ctx->pdev, mem.size, mem.base, mem.phys_base);
+
+	/* IC to close the coal frame before HPS Clear if coal is enabled */
+	if (ipa3_get_ep_mapping(IPA_CLIENT_APPS_WAN_COAL_CONS) != -1) {
+		i = ipa3_get_ep_mapping(IPA_CLIENT_APPS_WAN_COAL_CONS);
+		reg_write_coal_close.skip_pipeline_clear = false;
+		reg_write_coal_close.pipeline_clear_options = IPAHAL_HPS_CLEAR;
+		reg_write_coal_close.offset = ipahal_get_reg_ofst(
+			IPA_AGGR_FORCE_CLOSE);
+		ipahal_get_aggr_force_close_valmask(i, &valmask);
+		reg_write_coal_close.value = valmask.val;
+		reg_write_coal_close.value_mask = valmask.mask;
+		cmd_pyld[num_cmds] = ipahal_construct_imm_cmd(
+			IPA_IMM_CMD_REGISTER_WRITE,
+			&reg_write_coal_close, false);
+		if (!cmd_pyld[num_cmds]) {
+			IPAERR("failed to construct coal close IC\n");
+			return -ENOMEM;
+		}
+		ipa3_init_imm_cmd_desc(&dma_cmd_desc[num_cmds],
+			cmd_pyld[num_cmds]);
+		++num_cmds;
+	}
+
+	mem.size = IPA_MEM_PART(modem_hdr_proc_ctx_size) +
+		IPA_MEM_PART(apps_hdr_proc_ctx_size);
+	mem.base = dma_alloc_coherent(ipa3_ctx->pdev, mem.size, &mem.phys_base,
+		GFP_KERNEL);
+	if (!mem.base) {
+		IPAERR("fail to alloc DMA buff of size %d\n", mem.size);
+		return -ENOMEM;
+	}
+
+	dma_cmd.is_read = false;
+	dma_cmd.skip_pipeline_clear = false;
+	dma_cmd.pipeline_clear_options = IPAHAL_HPS_CLEAR;
+	dma_cmd.system_addr = mem.phys_base;
+	dma_cmd.local_addr = ipa3_ctx->smem_restricted_bytes +
+		IPA_MEM_PART(modem_hdr_proc_ctx_ofst);
+	dma_cmd.size = mem.size;
+	cmd_pyld[num_cmds] = ipahal_construct_imm_cmd(
+		IPA_IMM_CMD_DMA_SHARED_MEM, &dma_cmd, false);
+	if (!cmd_pyld[num_cmds]) {
+		IPAERR("fail to construct dma_shared_mem imm\n");
+		dma_free_coherent(ipa3_ctx->pdev,
+			mem.size, mem.base,
+			mem.phys_base);
+		return -ENOMEM;
+	}
+	ipa3_init_imm_cmd_desc(&dma_cmd_desc[num_cmds], cmd_pyld[num_cmds]);
+	++num_cmds;
+	IPA_DUMP_BUFF(mem.base, mem.phys_base, mem.size);
+
+	if (ipa3_send_cmd(num_cmds, dma_cmd_desc)) {
+		IPAERR("fail to send immediate command\n");
+		for (i = 0; i < num_cmds; i++)
+			ipahal_destroy_imm_cmd(cmd_pyld[i]);
+		dma_free_coherent(ipa3_ctx->pdev,
+			mem.size,
+			mem.base,
+			mem.phys_base);
+		return -EBUSY;
+	}
+	for (i = 0; i < num_cmds; i++)
+		ipahal_destroy_imm_cmd(cmd_pyld[i]);
+
+	ipahal_write_reg(IPA_LOCAL_PKT_PROC_CNTXT_BASE, dma_cmd.local_addr);
+
+	dma_free_coherent(ipa3_ctx->pdev, mem.size, mem.base, mem.phys_base);
+
+	return 0;
+}
+
+/**
+ * _ipa_init_rt4_v3() - Initialize IPA routing block for IPv4.
+ *
+ * Return codes: 0 for success, negative value for failure
+ */
+int _ipa_init_rt4_v3(void)
+{
+	struct ipa3_desc desc;
+	struct ipa_mem_buffer mem;
+	struct ipahal_imm_cmd_ip_v4_routing_init v4_cmd;
+	struct ipahal_imm_cmd_pyld *cmd_pyld;
+	int i;
+	int rc = 0;
+
+	for (i = IPA_MEM_PART(v4_modem_rt_index_lo);
+		i <= IPA_MEM_PART(v4_modem_rt_index_hi);
+		i++)
+		ipa3_ctx->rt_idx_bitmap[IPA_IP_v4] |= (1 << i);
+	IPADBG("v4 rt bitmap 0x%lx\n", ipa3_ctx->rt_idx_bitmap[IPA_IP_v4]);
+
+	rc = ipahal_rt_generate_empty_img(IPA_MEM_PART(v4_rt_num_index),
+		IPA_MEM_PART(v4_rt_hash_size), IPA_MEM_PART(v4_rt_nhash_size),
+		&mem, false);
+	if (rc) {
+		IPAERR("fail generate empty v4 rt img\n");
+		return rc;
+	}
+
+	/*
+	 * SRAM memory not allocated to hash tables. Initializing/Sending
+	 * command to hash tables(filer/routing) operation not supported.
+	 */
+	if (ipa3_ctx->ipa_fltrt_not_hashable) {
+		v4_cmd.hash_rules_addr = 0;
+		v4_cmd.hash_rules_size = 0;
+		v4_cmd.hash_local_addr = 0;
+	} else {
+		v4_cmd.hash_rules_addr = mem.phys_base;
+		v4_cmd.hash_rules_size = mem.size;
+		v4_cmd.hash_local_addr = ipa3_ctx->smem_restricted_bytes +
+			IPA_MEM_PART(v4_rt_hash_ofst);
+	}
+
+	v4_cmd.nhash_rules_addr = mem.phys_base;
+	v4_cmd.nhash_rules_size = mem.size;
+	v4_cmd.nhash_local_addr = ipa3_ctx->smem_restricted_bytes +
+		IPA_MEM_PART(v4_rt_nhash_ofst);
+	IPADBG("putting hashable routing IPv4 rules to phys 0x%x\n",
+				v4_cmd.hash_local_addr);
+	IPADBG("putting non-hashable routing IPv4 rules to phys 0x%x\n",
+				v4_cmd.nhash_local_addr);
+	cmd_pyld = ipahal_construct_imm_cmd(
+		IPA_IMM_CMD_IP_V4_ROUTING_INIT, &v4_cmd, false);
+	if (!cmd_pyld) {
+		IPAERR("fail construct ip_v4_rt_init imm cmd\n");
+		rc = -EPERM;
+		goto free_mem;
+	}
+
+	ipa3_init_imm_cmd_desc(&desc, cmd_pyld);
+	IPA_DUMP_BUFF(mem.base, mem.phys_base, mem.size);
+
+	if (ipa3_send_cmd(1, &desc)) {
+		IPAERR("fail to send immediate command\n");
+		rc = -EFAULT;
+	}
+
+	ipahal_destroy_imm_cmd(cmd_pyld);
+
+free_mem:
+	ipahal_free_dma_mem(&mem);
+	return rc;
+}
+
+/**
+ * _ipa_init_rt6_v3() - Initialize IPA routing block for IPv6.
+ *
+ * Return codes: 0 for success, negative value for failure
+ */
+int _ipa_init_rt6_v3(void)
+{
+	struct ipa3_desc desc;
+	struct ipa_mem_buffer mem;
+	struct ipahal_imm_cmd_ip_v6_routing_init v6_cmd;
+	struct ipahal_imm_cmd_pyld *cmd_pyld;
+	int i;
+	int rc = 0;
+
+	for (i = IPA_MEM_PART(v6_modem_rt_index_lo);
+		i <= IPA_MEM_PART(v6_modem_rt_index_hi);
+		i++)
+		ipa3_ctx->rt_idx_bitmap[IPA_IP_v6] |= (1 << i);
+	IPADBG("v6 rt bitmap 0x%lx\n", ipa3_ctx->rt_idx_bitmap[IPA_IP_v6]);
+
+	rc = ipahal_rt_generate_empty_img(IPA_MEM_PART(v6_rt_num_index),
+		IPA_MEM_PART(v6_rt_hash_size), IPA_MEM_PART(v6_rt_nhash_size),
+		&mem, false);
+	if (rc) {
+		IPAERR("fail generate empty v6 rt img\n");
+		return rc;
+	}
+
+	/*
+	 * SRAM memory not allocated to hash tables. Initializing/Sending
+	 * command to hash tables(filer/routing) operation not supported.
+	 */
+	if (ipa3_ctx->ipa_fltrt_not_hashable) {
+		v6_cmd.hash_rules_addr = 0;
+		v6_cmd.hash_rules_size = 0;
+		v6_cmd.hash_local_addr = 0;
+	} else {
+		v6_cmd.hash_rules_addr = mem.phys_base;
+		v6_cmd.hash_rules_size = mem.size;
+		v6_cmd.hash_local_addr = ipa3_ctx->smem_restricted_bytes +
+			IPA_MEM_PART(v6_rt_hash_ofst);
+	}
+
+	v6_cmd.nhash_rules_addr = mem.phys_base;
+	v6_cmd.nhash_rules_size = mem.size;
+	v6_cmd.nhash_local_addr = ipa3_ctx->smem_restricted_bytes +
+		IPA_MEM_PART(v6_rt_nhash_ofst);
+	IPADBG("putting hashable routing IPv6 rules to phys 0x%x\n",
+				v6_cmd.hash_local_addr);
+	IPADBG("putting non-hashable routing IPv6 rules to phys 0x%x\n",
+				v6_cmd.nhash_local_addr);
+	cmd_pyld = ipahal_construct_imm_cmd(
+		IPA_IMM_CMD_IP_V6_ROUTING_INIT, &v6_cmd, false);
+	if (!cmd_pyld) {
+		IPAERR("fail construct ip_v6_rt_init imm cmd\n");
+		rc = -EPERM;
+		goto free_mem;
+	}
+
+	ipa3_init_imm_cmd_desc(&desc, cmd_pyld);
+	IPA_DUMP_BUFF(mem.base, mem.phys_base, mem.size);
+
+	if (ipa3_send_cmd(1, &desc)) {
+		IPAERR("fail to send immediate command\n");
+		rc = -EFAULT;
+	}
+
+	ipahal_destroy_imm_cmd(cmd_pyld);
+
+free_mem:
+	ipahal_free_dma_mem(&mem);
+	return rc;
+}
+
+/**
+ * _ipa_init_flt4_v3() - Initialize IPA filtering block for IPv4.
+ *
+ * Return codes: 0 for success, negative value for failure
+ */
+int _ipa_init_flt4_v3(void)
+{
+	struct ipa3_desc desc;
+	struct ipa_mem_buffer mem;
+	struct ipahal_imm_cmd_ip_v4_filter_init v4_cmd;
+	struct ipahal_imm_cmd_pyld *cmd_pyld;
+	int rc;
+
+	rc = ipahal_flt_generate_empty_img(ipa3_ctx->ep_flt_num,
+		IPA_MEM_PART(v4_flt_hash_size),
+		IPA_MEM_PART(v4_flt_nhash_size), ipa3_ctx->ep_flt_bitmap,
+		&mem, false);
+	if (rc) {
+		IPAERR("fail generate empty v4 flt img\n");
+		return rc;
+	}
+
+	/*
+	 * SRAM memory not allocated to hash tables. Initializing/Sending
+	 * command to hash tables(filer/routing) operation not supported.
+	 */
+	if (ipa3_ctx->ipa_fltrt_not_hashable) {
+		v4_cmd.hash_rules_addr = 0;
+		v4_cmd.hash_rules_size = 0;
+		v4_cmd.hash_local_addr = 0;
+	} else {
+		v4_cmd.hash_rules_addr = mem.phys_base;
+		v4_cmd.hash_rules_size = mem.size;
+		v4_cmd.hash_local_addr = ipa3_ctx->smem_restricted_bytes +
+			IPA_MEM_PART(v4_flt_hash_ofst);
+	}
+
+	v4_cmd.nhash_rules_addr = mem.phys_base;
+	v4_cmd.nhash_rules_size = mem.size;
+	v4_cmd.nhash_local_addr = ipa3_ctx->smem_restricted_bytes +
+		IPA_MEM_PART(v4_flt_nhash_ofst);
+	IPADBG("putting hashable filtering IPv4 rules to phys 0x%x\n",
+				v4_cmd.hash_local_addr);
+	IPADBG("putting non-hashable filtering IPv4 rules to phys 0x%x\n",
+				v4_cmd.nhash_local_addr);
+	cmd_pyld = ipahal_construct_imm_cmd(
+		IPA_IMM_CMD_IP_V4_FILTER_INIT, &v4_cmd, false);
+	if (!cmd_pyld) {
+		IPAERR("fail construct ip_v4_flt_init imm cmd\n");
+		rc = -EPERM;
+		goto free_mem;
+	}
+
+	ipa3_init_imm_cmd_desc(&desc, cmd_pyld);
+	IPA_DUMP_BUFF(mem.base, mem.phys_base, mem.size);
+
+	if (ipa3_send_cmd(1, &desc)) {
+		IPAERR("fail to send immediate command\n");
+		rc = -EFAULT;
+	}
+
+	ipahal_destroy_imm_cmd(cmd_pyld);
+
+free_mem:
+	ipahal_free_dma_mem(&mem);
+	return rc;
+}
+
+/**
+ * _ipa_init_flt6_v3() - Initialize IPA filtering block for IPv6.
+ *
+ * Return codes: 0 for success, negative value for failure
+ */
+int _ipa_init_flt6_v3(void)
+{
+	struct ipa3_desc desc;
+	struct ipa_mem_buffer mem;
+	struct ipahal_imm_cmd_ip_v6_filter_init v6_cmd;
+	struct ipahal_imm_cmd_pyld *cmd_pyld;
+	int rc;
+
+	rc = ipahal_flt_generate_empty_img(ipa3_ctx->ep_flt_num,
+		IPA_MEM_PART(v6_flt_hash_size),
+		IPA_MEM_PART(v6_flt_nhash_size), ipa3_ctx->ep_flt_bitmap,
+		&mem, false);
+	if (rc) {
+		IPAERR("fail generate empty v6 flt img\n");
+		return rc;
+	}
+
+	/*
+	 * SRAM memory not allocated to hash tables. Initializing/Sending
+	 * command to hash tables(filer/routing) operation not supported.
+	 */
+	if (ipa3_ctx->ipa_fltrt_not_hashable) {
+		v6_cmd.hash_rules_addr = 0;
+		v6_cmd.hash_rules_size = 0;
+		v6_cmd.hash_local_addr = 0;
+	} else {
+		v6_cmd.hash_rules_addr = mem.phys_base;
+		v6_cmd.hash_rules_size = mem.size;
+		v6_cmd.hash_local_addr = ipa3_ctx->smem_restricted_bytes +
+			IPA_MEM_PART(v6_flt_hash_ofst);
+	}
+
+	v6_cmd.nhash_rules_addr = mem.phys_base;
+	v6_cmd.nhash_rules_size = mem.size;
+	v6_cmd.nhash_local_addr = ipa3_ctx->smem_restricted_bytes +
+		IPA_MEM_PART(v6_flt_nhash_ofst);
+	IPADBG("putting hashable filtering IPv6 rules to phys 0x%x\n",
+				v6_cmd.hash_local_addr);
+	IPADBG("putting non-hashable filtering IPv6 rules to phys 0x%x\n",
+				v6_cmd.nhash_local_addr);
+
+	cmd_pyld = ipahal_construct_imm_cmd(
+		IPA_IMM_CMD_IP_V6_FILTER_INIT, &v6_cmd, false);
+	if (!cmd_pyld) {
+		IPAERR("fail construct ip_v6_flt_init imm cmd\n");
+		rc = -EPERM;
+		goto free_mem;
+	}
+
+	ipa3_init_imm_cmd_desc(&desc, cmd_pyld);
+	IPA_DUMP_BUFF(mem.base, mem.phys_base, mem.size);
+
+	if (ipa3_send_cmd(1, &desc)) {
+		IPAERR("fail to send immediate command\n");
+		rc = -EFAULT;
+	}
+
+	ipahal_destroy_imm_cmd(cmd_pyld);
+
+free_mem:
+	ipahal_free_dma_mem(&mem);
+	return rc;
+}
+
+static int ipa3_setup_flt_hash_tuple(void)
+{
+	int pipe_idx;
+	struct ipahal_reg_hash_tuple tuple;
+
+	memset(&tuple, 0, sizeof(struct ipahal_reg_hash_tuple));
+
+	for (pipe_idx = 0; pipe_idx < ipa3_ctx->ipa_num_pipes ; pipe_idx++) {
+		if (!ipa_is_ep_support_flt(pipe_idx))
+			continue;
+
+		if (ipa_is_modem_pipe(pipe_idx))
+			continue;
+
+		if (ipa3_set_flt_tuple_mask(pipe_idx, &tuple)) {
+			IPAERR("failed to setup pipe %d flt tuple\n", pipe_idx);
+			return -EFAULT;
+		}
+	}
+
+	return 0;
+}
+
+static int ipa3_setup_rt_hash_tuple(void)
+{
+	int tbl_idx;
+	struct ipahal_reg_hash_tuple tuple;
+
+	memset(&tuple, 0, sizeof(struct ipahal_reg_hash_tuple));
+
+	for (tbl_idx = 0;
+		tbl_idx < max(IPA_MEM_PART(v6_rt_num_index),
+		IPA_MEM_PART(v4_rt_num_index));
+		tbl_idx++) {
+
+		if (tbl_idx >= IPA_MEM_PART(v4_modem_rt_index_lo) &&
+			tbl_idx <= IPA_MEM_PART(v4_modem_rt_index_hi))
+			continue;
+
+		if (tbl_idx >= IPA_MEM_PART(v6_modem_rt_index_lo) &&
+			tbl_idx <= IPA_MEM_PART(v6_modem_rt_index_hi))
+			continue;
+
+		if (ipa3_set_rt_tuple_mask(tbl_idx, &tuple)) {
+			IPAERR("failed to setup tbl %d rt tuple\n", tbl_idx);
+			return -EFAULT;
+		}
+	}
+
+	return 0;
+}
+
+static int ipa3_setup_apps_pipes(void)
+{
+	struct ipa_sys_connect_params sys_in;
+	int result = 0;
+
+	if (ipa3_ctx->gsi_ch20_wa) {
+		IPADBG("Allocating GSI physical channel 20\n");
+		result = ipa_gsi_ch20_wa();
+		if (result) {
+			IPAERR("ipa_gsi_ch20_wa failed %d\n", result);
+			goto fail_ch20_wa;
+		}
+	}
+
+	/* allocate the common PROD event ring */
+	if (ipa3_alloc_common_event_ring()) {
+		IPAERR("ipa3_alloc_common_event_ring failed.\n");
+		result = -EPERM;
+		goto fail_ch20_wa;
+	}
+
+	/* CMD OUT (AP->IPA) */
+	memset(&sys_in, 0, sizeof(struct ipa_sys_connect_params));
+	sys_in.client = IPA_CLIENT_APPS_CMD_PROD;
+	sys_in.desc_fifo_sz = IPA_SYS_DESC_FIFO_SZ;
+	sys_in.ipa_ep_cfg.mode.mode = IPA_DMA;
+	sys_in.ipa_ep_cfg.mode.dst = IPA_CLIENT_APPS_LAN_CONS;
+	if (ipa3_setup_sys_pipe(&sys_in, &ipa3_ctx->clnt_hdl_cmd)) {
+		IPAERR(":setup sys pipe (APPS_CMD_PROD) failed.\n");
+		result = -EPERM;
+		goto fail_ch20_wa;
+	}
+	IPADBG("Apps to IPA cmd pipe is connected\n");
+
+	IPADBG("Will initialize SRAM\n");
+	ipa3_ctx->ctrl->ipa_init_sram();
+	IPADBG("SRAM initialized\n");
+
+	IPADBG("Will initialize HDR\n");
+	ipa3_ctx->ctrl->ipa_init_hdr();
+	IPADBG("HDR initialized\n");
+
+	IPADBG("Will initialize V4 RT\n");
+	ipa3_ctx->ctrl->ipa_init_rt4();
+	IPADBG("V4 RT initialized\n");
+
+	IPADBG("Will initialize V6 RT\n");
+	ipa3_ctx->ctrl->ipa_init_rt6();
+	IPADBG("V6 RT initialized\n");
+
+	IPADBG("Will initialize V4 FLT\n");
+	ipa3_ctx->ctrl->ipa_init_flt4();
+	IPADBG("V4 FLT initialized\n");
+
+	IPADBG("Will initialize V6 FLT\n");
+	ipa3_ctx->ctrl->ipa_init_flt6();
+	IPADBG("V6 FLT initialized\n");
+
+	if (!ipa3_ctx->ipa_fltrt_not_hashable) {
+		if (ipa3_setup_flt_hash_tuple()) {
+			IPAERR(":fail to configure flt hash tuple\n");
+			result = -EPERM;
+			goto fail_flt_hash_tuple;
+		}
+		IPADBG("flt hash tuple is configured\n");
+
+		if (ipa3_setup_rt_hash_tuple()) {
+			IPAERR(":fail to configure rt hash tuple\n");
+			result = -EPERM;
+			goto fail_flt_hash_tuple;
+		}
+		IPADBG("rt hash tuple is configured\n");
+	}
+	if (ipa3_setup_exception_path()) {
+		IPAERR(":fail to setup excp path\n");
+		result = -EPERM;
+		goto fail_flt_hash_tuple;
+	}
+	IPADBG("Exception path was successfully set");
+
+	if (ipa3_setup_dflt_rt_tables()) {
+		IPAERR(":fail to setup dflt routes\n");
+		result = -EPERM;
+		goto fail_flt_hash_tuple;
+	}
+	IPADBG("default routing was set\n");
+
+	/* LAN IN (IPA->AP) */
+	memset(&sys_in, 0, sizeof(struct ipa_sys_connect_params));
+	sys_in.client = IPA_CLIENT_APPS_LAN_CONS;
+	sys_in.desc_fifo_sz = IPA_SYS_DESC_FIFO_SZ;
+	sys_in.notify = ipa3_lan_rx_cb;
+	sys_in.priv = NULL;
+	if (ipa3_ctx->lan_rx_napi_enable)
+		sys_in.napi_obj = &ipa3_ctx->napi_lan_rx;
+	sys_in.ipa_ep_cfg.hdr.hdr_len = IPA_LAN_RX_HEADER_LENGTH;
+	sys_in.ipa_ep_cfg.hdr_ext.hdr_little_endian = false;
+	sys_in.ipa_ep_cfg.hdr_ext.hdr_total_len_or_pad_valid = true;
+	sys_in.ipa_ep_cfg.hdr_ext.hdr_total_len_or_pad = IPA_HDR_PAD;
+	sys_in.ipa_ep_cfg.hdr_ext.hdr_payload_len_inc_padding = false;
+	sys_in.ipa_ep_cfg.hdr_ext.hdr_total_len_or_pad_offset = 0;
+	sys_in.ipa_ep_cfg.hdr_ext.hdr_pad_to_alignment = 2;
+	sys_in.ipa_ep_cfg.cfg.cs_offload_en = IPA_DISABLE_CS_OFFLOAD;
+
+	/**
+	 * ipa_lan_rx_cb() intended to notify the source EP about packet
+	 * being received on the LAN_CONS via calling the source EP call-back.
+	 * There could be a race condition with calling this call-back. Other
+	 * thread may nullify it - e.g. on EP disconnect.
+	 * This lock intended to protect the access to the source EP call-back
+	 */
+	spin_lock_init(&ipa3_ctx->disconnect_lock);
+	if (ipa3_setup_sys_pipe(&sys_in, &ipa3_ctx->clnt_hdl_data_in)) {
+		IPAERR(":setup sys pipe (LAN_CONS) failed.\n");
+		result = -EPERM;
+		goto fail_flt_hash_tuple;
+	}
+
+	/* LAN OUT (AP->IPA) */
+	if (!ipa3_ctx->ipa_config_is_mhi) {
+		memset(&sys_in, 0, sizeof(struct ipa_sys_connect_params));
+		sys_in.client = IPA_CLIENT_APPS_LAN_PROD;
+		sys_in.desc_fifo_sz = IPA_SYS_TX_DATA_DESC_FIFO_SZ;
+		sys_in.ipa_ep_cfg.mode.mode = IPA_BASIC;
+		if (ipa3_setup_sys_pipe(&sys_in,
+			&ipa3_ctx->clnt_hdl_data_out)) {
+			IPAERR(":setup sys pipe (LAN_PROD) failed.\n");
+			result = -EPERM;
+			goto fail_lan_data_out;
+		}
+	}
+
+	return 0;
+
+fail_lan_data_out:
+	ipa3_teardown_sys_pipe(ipa3_ctx->clnt_hdl_data_in);
+fail_flt_hash_tuple:
+	if (ipa3_ctx->dflt_v6_rt_rule_hdl)
+		__ipa3_del_rt_rule(ipa3_ctx->dflt_v6_rt_rule_hdl);
+	if (ipa3_ctx->dflt_v4_rt_rule_hdl)
+		__ipa3_del_rt_rule(ipa3_ctx->dflt_v4_rt_rule_hdl);
+	if (ipa3_ctx->excp_hdr_hdl)
+		__ipa3_del_hdr(ipa3_ctx->excp_hdr_hdl, false);
+	ipa3_teardown_sys_pipe(ipa3_ctx->clnt_hdl_cmd);
+fail_ch20_wa:
+	return result;
+}
+
+static void ipa3_teardown_apps_pipes(void)
+{
+	if (!ipa3_ctx->ipa_config_is_mhi)
+		ipa3_teardown_sys_pipe(ipa3_ctx->clnt_hdl_data_out);
+	ipa3_teardown_sys_pipe(ipa3_ctx->clnt_hdl_data_in);
+	__ipa3_del_rt_rule(ipa3_ctx->dflt_v6_rt_rule_hdl);
+	__ipa3_del_rt_rule(ipa3_ctx->dflt_v4_rt_rule_hdl);
+	__ipa3_del_hdr(ipa3_ctx->excp_hdr_hdl, false);
+	ipa3_teardown_sys_pipe(ipa3_ctx->clnt_hdl_cmd);
+}
+
+#ifdef CONFIG_COMPAT
+
+static long compat_ipa3_nat_ipv6ct_alloc_table(unsigned long arg,
+	int (alloc_func)(struct ipa_ioc_nat_ipv6ct_table_alloc *))
+{
+	long retval;
+	struct ipa_ioc_nat_ipv6ct_table_alloc32 table_alloc32;
+	struct ipa_ioc_nat_ipv6ct_table_alloc table_alloc;
+
+	retval = copy_from_user(&table_alloc32, (const void __user *)arg,
+		sizeof(struct ipa_ioc_nat_ipv6ct_table_alloc32));
+	if (retval)
+		return retval;
+
+	table_alloc.size = (size_t)table_alloc32.size;
+	table_alloc.offset = (off_t)table_alloc32.offset;
+
+	retval = alloc_func(&table_alloc);
+	if (retval)
+		return retval;
+
+	if (table_alloc.offset) {
+		table_alloc32.offset = (compat_off_t)table_alloc.offset;
+		retval = copy_to_user((void __user *)arg, &table_alloc32,
+			sizeof(struct ipa_ioc_nat_ipv6ct_table_alloc32));
+	}
+
+	return retval;
+}
+
+long compat_ipa3_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
+{
+	long retval = 0;
+	struct ipa3_ioc_nat_alloc_mem32 nat_mem32;
+	struct ipa_ioc_nat_alloc_mem nat_mem;
+
+	switch (cmd) {
+	case IPA_IOC_ADD_HDR32:
+		cmd = IPA_IOC_ADD_HDR;
+		break;
+	case IPA_IOC_DEL_HDR32:
+		cmd = IPA_IOC_DEL_HDR;
+		break;
+	case IPA_IOC_ADD_RT_RULE32:
+		cmd = IPA_IOC_ADD_RT_RULE;
+		break;
+	case IPA_IOC_DEL_RT_RULE32:
+		cmd = IPA_IOC_DEL_RT_RULE;
+		break;
+	case IPA_IOC_ADD_FLT_RULE32:
+		cmd = IPA_IOC_ADD_FLT_RULE;
+		break;
+	case IPA_IOC_DEL_FLT_RULE32:
+		cmd = IPA_IOC_DEL_FLT_RULE;
+		break;
+	case IPA_IOC_GET_RT_TBL32:
+		cmd = IPA_IOC_GET_RT_TBL;
+		break;
+	case IPA_IOC_COPY_HDR32:
+		cmd = IPA_IOC_COPY_HDR;
+		break;
+	case IPA_IOC_QUERY_INTF32:
+		cmd = IPA_IOC_QUERY_INTF;
+		break;
+	case IPA_IOC_QUERY_INTF_TX_PROPS32:
+		cmd = IPA_IOC_QUERY_INTF_TX_PROPS;
+		break;
+	case IPA_IOC_QUERY_INTF_RX_PROPS32:
+		cmd = IPA_IOC_QUERY_INTF_RX_PROPS;
+		break;
+	case IPA_IOC_QUERY_INTF_EXT_PROPS32:
+		cmd = IPA_IOC_QUERY_INTF_EXT_PROPS;
+		break;
+	case IPA_IOC_GET_HDR32:
+		cmd = IPA_IOC_GET_HDR;
+		break;
+	case IPA_IOC_ALLOC_NAT_MEM32:
+		retval = copy_from_user(&nat_mem32, (const void __user *)arg,
+			sizeof(struct ipa3_ioc_nat_alloc_mem32));
+		if (retval)
+			return retval;
+		memcpy(nat_mem.dev_name, nat_mem32.dev_name,
+				IPA_RESOURCE_NAME_MAX);
+		nat_mem.size = (size_t)nat_mem32.size;
+		nat_mem.offset = (off_t)nat_mem32.offset;
+
+		/* null terminate the string */
+		nat_mem.dev_name[IPA_RESOURCE_NAME_MAX - 1] = '\0';
+
+		retval = ipa3_allocate_nat_device(&nat_mem);
+		if (retval)
+			return retval;
+		nat_mem32.offset = (compat_off_t)nat_mem.offset;
+		retval = copy_to_user((void __user *)arg, &nat_mem32,
+			sizeof(struct ipa3_ioc_nat_alloc_mem32));
+		return retval;
+	case IPA_IOC_ALLOC_NAT_TABLE32:
+		return compat_ipa3_nat_ipv6ct_alloc_table(arg,
+			ipa3_allocate_nat_table);
+	case IPA_IOC_ALLOC_IPV6CT_TABLE32:
+		return compat_ipa3_nat_ipv6ct_alloc_table(arg,
+			ipa3_allocate_ipv6ct_table);
+	case IPA_IOC_V4_INIT_NAT32:
+		cmd = IPA_IOC_V4_INIT_NAT;
+		break;
+	case IPA_IOC_INIT_IPV6CT_TABLE32:
+		cmd = IPA_IOC_INIT_IPV6CT_TABLE;
+		break;
+	case IPA_IOC_TABLE_DMA_CMD32:
+		cmd = IPA_IOC_TABLE_DMA_CMD;
+		break;
+	case IPA_IOC_V4_DEL_NAT32:
+		cmd = IPA_IOC_V4_DEL_NAT;
+		break;
+	case IPA_IOC_DEL_NAT_TABLE32:
+		cmd = IPA_IOC_DEL_NAT_TABLE;
+		break;
+	case IPA_IOC_DEL_IPV6CT_TABLE32:
+		cmd = IPA_IOC_DEL_IPV6CT_TABLE;
+		break;
+	case IPA_IOC_NAT_MODIFY_PDN32:
+		cmd = IPA_IOC_NAT_MODIFY_PDN;
+		break;
+	case IPA_IOC_GET_NAT_OFFSET32:
+		cmd = IPA_IOC_GET_NAT_OFFSET;
+		break;
+	case IPA_IOC_PULL_MSG32:
+		cmd = IPA_IOC_PULL_MSG;
+		break;
+	case IPA_IOC_RM_ADD_DEPENDENCY32:
+		cmd = IPA_IOC_RM_ADD_DEPENDENCY;
+		break;
+	case IPA_IOC_RM_DEL_DEPENDENCY32:
+		cmd = IPA_IOC_RM_DEL_DEPENDENCY;
+		break;
+	case IPA_IOC_GENERATE_FLT_EQ32:
+		cmd = IPA_IOC_GENERATE_FLT_EQ;
+		break;
+	case IPA_IOC_QUERY_RT_TBL_INDEX32:
+		cmd = IPA_IOC_QUERY_RT_TBL_INDEX;
+		break;
+	case IPA_IOC_WRITE_QMAPID32:
+		cmd = IPA_IOC_WRITE_QMAPID;
+		break;
+	case IPA_IOC_MDFY_FLT_RULE32:
+		cmd = IPA_IOC_MDFY_FLT_RULE;
+		break;
+	case IPA_IOC_NOTIFY_WAN_UPSTREAM_ROUTE_ADD32:
+		cmd = IPA_IOC_NOTIFY_WAN_UPSTREAM_ROUTE_ADD;
+		break;
+	case IPA_IOC_NOTIFY_WAN_UPSTREAM_ROUTE_DEL32:
+		cmd = IPA_IOC_NOTIFY_WAN_UPSTREAM_ROUTE_DEL;
+		break;
+	case IPA_IOC_NOTIFY_WAN_EMBMS_CONNECTED32:
+		cmd = IPA_IOC_NOTIFY_WAN_EMBMS_CONNECTED;
+		break;
+	case IPA_IOC_MDFY_RT_RULE32:
+		cmd = IPA_IOC_MDFY_RT_RULE;
+		break;
+	case IPA_IOC_GET_NAT_IN_SRAM_INFO32:
+		cmd = IPA_IOC_GET_NAT_IN_SRAM_INFO;
+		break;
+	case IPA_IOC_APP_CLOCK_VOTE32:
+		cmd = IPA_IOC_APP_CLOCK_VOTE;
+		break;
+	case IPA_IOC_COMMIT_HDR:
+	case IPA_IOC_RESET_HDR:
+	case IPA_IOC_COMMIT_RT:
+	case IPA_IOC_RESET_RT:
+	case IPA_IOC_COMMIT_FLT:
+	case IPA_IOC_RESET_FLT:
+	case IPA_IOC_DUMP:
+	case IPA_IOC_PUT_RT_TBL:
+	case IPA_IOC_PUT_HDR:
+	case IPA_IOC_SET_FLT:
+	case IPA_IOC_QUERY_EP_MAPPING:
+		break;
+	default:
+		return -ENOIOCTLCMD;
+	}
+	return ipa3_ioctl(file, cmd, (unsigned long) compat_ptr(arg));
+}
+#endif
+
+static ssize_t ipa3_write(struct file *file, const char __user *buf,
+			  size_t count, loff_t *ppos);
+
+static const struct file_operations ipa3_drv_fops = {
+	.owner = THIS_MODULE,
+	.open = ipa3_open,
+	.read = ipa3_read,
+	.write = ipa3_write,
+	.unlocked_ioctl = ipa3_ioctl,
+#ifdef CONFIG_COMPAT
+	.compat_ioctl = compat_ipa3_ioctl,
+#endif
+};
+
+static int ipa3_get_clks(struct device *dev)
+{
+	if (ipa3_ctx->ipa3_hw_mode != IPA_HW_MODE_NORMAL) {
+		IPADBG("not supported in this HW mode\n");
+		ipa3_clk = NULL;
+		return 0;
+	}
+
+	if (ipa3_res.use_bw_vote) {
+		IPADBG("Vote IPA clock by bw voting via bus scaling driver\n");
+		ipa3_clk = NULL;
+		return 0;
+	}
+
+	ipa3_clk = clk_get(dev, "core_clk");
+	if (IS_ERR(ipa3_clk)) {
+		if (ipa3_clk != ERR_PTR(-EPROBE_DEFER))
+			IPAERR("fail to get ipa clk\n");
+		return PTR_ERR(ipa3_clk);
+	}
+	return 0;
+}
+
+/**
+ * _ipa_enable_clks_v3_0() - Enable IPA clocks.
+ */
+void _ipa_enable_clks_v3_0(void)
+{
+	IPADBG_LOW("curr_ipa_clk_rate=%d", ipa3_ctx->curr_ipa_clk_rate);
+	if (ipa3_clk) {
+		IPADBG_LOW("enabling gcc_ipa_clk\n");
+		clk_prepare(ipa3_clk);
+		clk_enable(ipa3_clk);
+		clk_set_rate(ipa3_clk, ipa3_ctx->curr_ipa_clk_rate);
+	}
+
+	ipa3_uc_notify_clk_state(true);
+}
+
+static unsigned int ipa3_get_bus_vote(void)
+{
+	unsigned int idx = 1;
+
+	if (ipa3_ctx->curr_ipa_clk_rate == ipa3_ctx->ctrl->ipa_clk_rate_svs2) {
+		idx = 1;
+	} else if (ipa3_ctx->curr_ipa_clk_rate ==
+		ipa3_ctx->ctrl->ipa_clk_rate_svs) {
+		idx = 2;
+	} else if (ipa3_ctx->curr_ipa_clk_rate ==
+		ipa3_ctx->ctrl->ipa_clk_rate_nominal) {
+		idx = 3;
+	} else if (ipa3_ctx->curr_ipa_clk_rate ==
+			ipa3_ctx->ctrl->ipa_clk_rate_turbo) {
+		idx = 4;
+	} else {
+		WARN(1, "unexpected clock rate");
+	}
+	IPADBG_LOW("curr %d idx %d\n", ipa3_ctx->curr_ipa_clk_rate, idx);
+
+	return idx;
+}
+
+/**
+ * ipa3_enable_clks() - Turn on IPA clocks
+ *
+ * Return codes:
+ * None
+ */
+void ipa3_enable_clks(void)
+{
+	int idx;
+	int i;
+
+	if (ipa3_ctx->ipa3_hw_mode != IPA_HW_MODE_NORMAL) {
+		IPAERR("not supported in this mode\n");
+		return;
+	}
+
+	IPADBG("enabling IPA clocks and bus voting\n");
+
+	idx = ipa3_get_bus_vote();
+
+	for (i = 0; i < ipa3_ctx->icc_num_paths; i++) {
+		if (ipa3_ctx->ctrl->icc_path[i] &&
+			icc_set_bw(
+			ipa3_ctx->ctrl->icc_path[i],
+			ipa3_ctx->icc_clk[idx][i][IPA_ICC_AB],
+			ipa3_ctx->icc_clk[idx][i][IPA_ICC_IB]))
+			WARN(1, "path %d bus scaling failed", i);
+	}
+	ipa3_ctx->ctrl->ipa3_enable_clks();
+	atomic_set(&ipa3_ctx->ipa_clk_vote, 1);
+}
+
+
+/**
+ * _ipa_disable_clks_v3_0() - Disable IPA clocks.
+ */
+void _ipa_disable_clks_v3_0(void)
+{
+	ipa3_uc_notify_clk_state(false);
+	if (ipa3_clk) {
+		IPADBG_LOW("disabling gcc_ipa_clk\n");
+		clk_disable_unprepare(ipa3_clk);
+	}
+}
+
+/**
+ * ipa3_disable_clks() - Turn off IPA clocks
+ *
+ * Return codes:
+ * None
+ */
+void ipa3_disable_clks(void)
+{
+	int i;
+
+	if (ipa3_ctx->ipa3_hw_mode != IPA_HW_MODE_NORMAL) {
+		IPAERR("not supported in this mode\n");
+		return;
+	}
+
+	IPADBG("disabling IPA clocks and bus voting\n");
+
+	ipa3_ctx->ctrl->ipa3_disable_clks();
+
+	ipa_pm_set_clock_index(0);
+
+	for (i = 0; i < ipa3_ctx->icc_num_paths; i++) {
+		if (ipa3_ctx->ctrl->icc_path[i] &&
+			icc_set_bw(
+			ipa3_ctx->ctrl->icc_path[i],
+			ipa3_ctx->icc_clk[IPA_ICC_NONE][i][IPA_ICC_AB],
+			ipa3_ctx->icc_clk[IPA_ICC_NONE][i][IPA_ICC_IB]))
+			WARN(1, "path %d bus off failed", i);
+	}
+	atomic_set(&ipa3_ctx->ipa_clk_vote, 0);
+}
+
+/**
+ * ipa3_start_tag_process() - Send TAG packet and wait for it to come back
+ *
+ * This function is called prior to clock gating when active client counter
+ * is 1. TAG process ensures that there are no packets inside IPA HW that
+ * were not submitted to the IPA client via the transport. During TAG process
+ * all aggregation frames are (force) closed.
+ *
+ * Return codes:
+ * None
+ */
+static void ipa3_start_tag_process(struct work_struct *work)
+{
+	int res;
+
+	IPADBG("starting TAG process\n");
+	/* close aggregation frames on all pipes */
+	res = ipa3_tag_aggr_force_close(-1);
+	if (res)
+		IPAERR("ipa3_tag_aggr_force_close failed %d\n", res);
+	IPA_ACTIVE_CLIENTS_DEC_SPECIAL("TAG_PROCESS");
+
+	IPADBG("TAG process done\n");
+}
+
+/**
+ * ipa3_active_clients_log_mod() - Log a modification in the active clients
+ * reference count
+ *
+ * This method logs any modification in the active clients reference count:
+ * It logs the modification in the circular history buffer
+ * It logs the modification in the hash table - looking for an entry,
+ * creating one if needed and deleting one if needed.
+ *
+ * @id: ipa3_active client logging info struct to hold the log information
+ * @inc: a boolean variable to indicate whether the modification is an increase
+ * or decrease
+ * @int_ctx: a boolean variable to indicate whether this call is being made from
+ * an interrupt context and therefore should allocate GFP_ATOMIC memory
+ *
+ * Method process:
+ * - Hash the unique identifier string
+ * - Find the hash in the table
+ *    1)If found, increase or decrease the reference count
+ *    2)If not found, allocate a new hash table entry struct and initialize it
+ * - Remove and deallocate unneeded data structure
+ * - Log the call in the circular history buffer (unless it is a simple call)
+ */
+#ifdef CONFIG_IPA_DEBUG
+static void ipa3_active_clients_log_mod(
+		struct ipa_active_client_logging_info *id,
+		bool inc, bool int_ctx)
+{
+	char temp_str[IPA3_ACTIVE_CLIENTS_LOG_LINE_LEN];
+	unsigned long long t;
+	unsigned long nanosec_rem;
+	struct ipa3_active_client_htable_entry *hentry;
+	struct ipa3_active_client_htable_entry *hfound;
+	u32 hkey;
+	char str_to_hash[IPA3_ACTIVE_CLIENTS_LOG_NAME_LEN];
+	unsigned long flags;
+
+	spin_lock_irqsave(&ipa3_ctx->ipa3_active_clients_logging.lock, flags);
+	int_ctx = true;
+	hfound = NULL;
+	memset(str_to_hash, 0, IPA3_ACTIVE_CLIENTS_LOG_NAME_LEN);
+	strlcpy(str_to_hash, id->id_string, IPA3_ACTIVE_CLIENTS_LOG_NAME_LEN);
+	hkey = jhash(str_to_hash, IPA3_ACTIVE_CLIENTS_LOG_NAME_LEN,
+			0);
+	hash_for_each_possible(ipa3_ctx->ipa3_active_clients_logging.htable,
+			hentry, list, hkey) {
+		if (!strcmp(hentry->id_string, id->id_string)) {
+			hentry->count = hentry->count + (inc ? 1 : -1);
+			hfound = hentry;
+		}
+	}
+	if (hfound == NULL) {
+		hentry = NULL;
+		hentry = kzalloc(sizeof(
+				struct ipa3_active_client_htable_entry),
+				int_ctx ? GFP_ATOMIC : GFP_KERNEL);
+		if (hentry == NULL) {
+			spin_unlock_irqrestore(
+				&ipa3_ctx->ipa3_active_clients_logging.lock,
+				flags);
+			return;
+		}
+		hentry->type = id->type;
+		strlcpy(hentry->id_string, id->id_string,
+				IPA3_ACTIVE_CLIENTS_LOG_NAME_LEN);
+		INIT_HLIST_NODE(&hentry->list);
+		hentry->count = inc ? 1 : -1;
+		hash_add(ipa3_ctx->ipa3_active_clients_logging.htable,
+				&hentry->list, hkey);
+	} else if (hfound->count == 0) {
+		hash_del(&hfound->list);
+		kfree(hfound);
+	}
+
+	if (id->type != SIMPLE) {
+		t = local_clock();
+		nanosec_rem = do_div(t, 1000000000) / 1000;
+		snprintf(temp_str, IPA3_ACTIVE_CLIENTS_LOG_LINE_LEN,
+				inc ? "[%5lu.%06lu] ^ %s, %s: %d" :
+						"[%5lu.%06lu] v %s, %s: %d",
+				(unsigned long)t, nanosec_rem,
+				id->id_string, id->file, id->line);
+		ipa3_active_clients_log_insert(temp_str);
+	}
+	spin_unlock_irqrestore(&ipa3_ctx->ipa3_active_clients_logging.lock,
+		flags);
+}
+#else
+static void ipa3_active_clients_log_mod(
+		struct ipa_active_client_logging_info *id,
+		bool inc, bool int_ctx)
+{
+}
+#endif
+
+void ipa3_active_clients_log_dec(struct ipa_active_client_logging_info *id,
+		bool int_ctx)
+{
+	ipa3_active_clients_log_mod(id, false, int_ctx);
+}
+
+void ipa3_active_clients_log_inc(struct ipa_active_client_logging_info *id,
+		bool int_ctx)
+{
+	ipa3_active_clients_log_mod(id, true, int_ctx);
+}
+
+/**
+ * ipa3_inc_client_enable_clks() - Increase active clients counter, and
+ * enable ipa clocks if necessary
+ *
+ * Return codes:
+ * None
+ */
+void ipa3_inc_client_enable_clks(struct ipa_active_client_logging_info *id)
+{
+	int ret;
+
+	ipa3_active_clients_log_inc(id, false);
+	ret = atomic_inc_not_zero(&ipa3_ctx->ipa3_active_clients.cnt);
+	if (ret) {
+		IPADBG_LOW("active clients = %d\n",
+			atomic_read(&ipa3_ctx->ipa3_active_clients.cnt));
+		return;
+	}
+
+	mutex_lock(&ipa3_ctx->ipa3_active_clients.mutex);
+
+	/* somebody might voted to clocks meanwhile */
+	ret = atomic_inc_not_zero(&ipa3_ctx->ipa3_active_clients.cnt);
+	if (ret) {
+		mutex_unlock(&ipa3_ctx->ipa3_active_clients.mutex);
+		IPADBG_LOW("active clients = %d\n",
+			atomic_read(&ipa3_ctx->ipa3_active_clients.cnt));
+		return;
+	}
+
+	ipa3_enable_clks();
+	ipa3_suspend_apps_pipes(false);
+	atomic_inc(&ipa3_ctx->ipa3_active_clients.cnt);
+	IPADBG_LOW("active clients = %d\n",
+		atomic_read(&ipa3_ctx->ipa3_active_clients.cnt));
+	mutex_unlock(&ipa3_ctx->ipa3_active_clients.mutex);
+}
+
+/**
+ * ipa3_active_clks_status() - update the current msm bus clock vote
+ * status
+ */
+int ipa3_active_clks_status(void)
+{
+	return atomic_read(&ipa3_ctx->ipa_clk_vote);
+}
+
+/**
+ * ipa3_inc_client_enable_clks_no_block() - Only increment the number of active
+ * clients if no asynchronous actions should be done. Asynchronous actions are
+ * locking a mutex and waking up IPA HW.
+ *
+ * Return codes: 0 for success
+ *		-EPERM if an asynchronous action should have been done
+ */
+int ipa3_inc_client_enable_clks_no_block(struct ipa_active_client_logging_info
+		*id)
+{
+	int ret;
+
+	ret = atomic_inc_not_zero(&ipa3_ctx->ipa3_active_clients.cnt);
+	if (ret) {
+		ipa3_active_clients_log_inc(id, true);
+		IPADBG_LOW("active clients = %d\n",
+			atomic_read(&ipa3_ctx->ipa3_active_clients.cnt));
+		return 0;
+	}
+
+	return -EPERM;
+}
+
+static void __ipa3_dec_client_disable_clks(void)
+{
+	int ret;
+
+	if (!atomic_read(&ipa3_ctx->ipa3_active_clients.cnt)) {
+		IPAERR("trying to disable clocks with refcnt is 0\n");
+		ipa_assert();
+		return;
+	}
+
+	ret = atomic_add_unless(&ipa3_ctx->ipa3_active_clients.cnt, -1, 1);
+	if (ret)
+		goto bail;
+
+	/* Send force close coalsecing frame command in LPM mode before taking
+	 * mutex lock and otherwise observing race condition.
+	 */
+	if (atomic_read(&ipa3_ctx->ipa3_active_clients.cnt) == 1 &&
+		!ipa3_ctx->tag_process_before_gating) {
+		ipa3_force_close_coal();
+		/* While sending force close command setting
+		 * tag process as true to make configure to
+		 * original state
+		 */
+		ipa3_ctx->tag_process_before_gating = false;
+	}
+	/* seems like this is the only client holding the clocks */
+	mutex_lock(&ipa3_ctx->ipa3_active_clients.mutex);
+	if (atomic_read(&ipa3_ctx->ipa3_active_clients.cnt) == 1 &&
+	    ipa3_ctx->tag_process_before_gating) {
+		ipa3_ctx->tag_process_before_gating = false;
+		/*
+		 * When TAG process ends, active clients will be
+		 * decreased
+		 */
+		queue_work(ipa3_ctx->power_mgmt_wq, &ipa3_tag_work);
+		goto unlock_mutex;
+	}
+
+	/* a different context might increase the clock reference meanwhile */
+	ret = atomic_sub_return(1, &ipa3_ctx->ipa3_active_clients.cnt);
+	if (ret > 0)
+		goto unlock_mutex;
+	ret = ipa3_suspend_apps_pipes(true);
+	if (ret) {
+		/* HW is busy, retry after some time */
+		atomic_inc(&ipa3_ctx->ipa3_active_clients.cnt);
+		queue_delayed_work(ipa3_ctx->power_mgmt_wq,
+			&ipa_dec_clients_disable_clks_on_wq_work,
+			IPA_SUSPEND_BUSY_TIMEOUT);
+	} else {
+		ipa3_disable_clks();
+	}
+
+unlock_mutex:
+	mutex_unlock(&ipa3_ctx->ipa3_active_clients.mutex);
+bail:
+	IPADBG_LOW("active clients = %d\n",
+		atomic_read(&ipa3_ctx->ipa3_active_clients.cnt));
+}
+
+/**
+ * ipa3_dec_client_disable_clks() - Decrease active clients counter
+ *
+ * In case that there are no active clients this function also starts
+ * TAG process. When TAG progress ends ipa clocks will be gated.
+ * start_tag_process_again flag is set during this function to signal TAG
+ * process to start again as there was another client that may send data to ipa
+ *
+ * Return codes:
+ * None
+ */
+void ipa3_dec_client_disable_clks(struct ipa_active_client_logging_info *id)
+{
+	ipa3_active_clients_log_dec(id, false);
+	__ipa3_dec_client_disable_clks();
+}
+
+static void ipa_dec_clients_disable_clks_on_wq(struct work_struct *work)
+{
+	__ipa3_dec_client_disable_clks();
+}
+
+/**
+ * ipa3_dec_client_disable_clks_no_block() - Decrease active clients counter
+ * if possible without blocking. If this is the last client then the desrease
+ * will happen from work queue context.
+ *
+ * Return codes:
+ * None
+ */
+void ipa3_dec_client_disable_clks_no_block(
+	struct ipa_active_client_logging_info *id)
+{
+	int ret;
+
+	ipa3_active_clients_log_dec(id, true);
+	ret = atomic_add_unless(&ipa3_ctx->ipa3_active_clients.cnt, -1, 1);
+	if (ret) {
+		IPADBG_LOW("active clients = %d\n",
+			atomic_read(&ipa3_ctx->ipa3_active_clients.cnt));
+		return;
+	}
+
+	/* seems like this is the only client holding the clocks */
+	queue_delayed_work(ipa3_ctx->power_mgmt_wq,
+		&ipa_dec_clients_disable_clks_on_wq_work, 0);
+}
+
+/**
+ * ipa3_inc_acquire_wakelock() - Increase active clients counter, and
+ * acquire wakelock if necessary
+ *
+ * Return codes:
+ * None
+ */
+void ipa3_inc_acquire_wakelock(void)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&ipa3_ctx->wakelock_ref_cnt.spinlock, flags);
+	ipa3_ctx->wakelock_ref_cnt.cnt++;
+	if (ipa3_ctx->wakelock_ref_cnt.cnt == 1)
+		__pm_stay_awake(ipa3_ctx->w_lock);
+	IPADBG_LOW("active wakelock ref cnt = %d\n",
+		ipa3_ctx->wakelock_ref_cnt.cnt);
+	spin_unlock_irqrestore(&ipa3_ctx->wakelock_ref_cnt.spinlock, flags);
+}
+
+/**
+ * ipa3_dec_release_wakelock() - Decrease active clients counter
+ *
+ * In case if the ref count is 0, release the wakelock.
+ *
+ * Return codes:
+ * None
+ */
+void ipa3_dec_release_wakelock(void)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&ipa3_ctx->wakelock_ref_cnt.spinlock, flags);
+	ipa3_ctx->wakelock_ref_cnt.cnt--;
+	IPADBG_LOW("active wakelock ref cnt = %d\n",
+		ipa3_ctx->wakelock_ref_cnt.cnt);
+	if (ipa3_ctx->wakelock_ref_cnt.cnt == 0)
+		__pm_relax(ipa3_ctx->w_lock);
+	spin_unlock_irqrestore(&ipa3_ctx->wakelock_ref_cnt.spinlock, flags);
+}
+
+int ipa3_set_clock_plan_from_pm(int idx)
+{
+	u32 clk_rate;
+	int i;
+
+	IPADBG_LOW("idx = %d\n", idx);
+
+	if (!ipa3_ctx->enable_clock_scaling) {
+		ipa3_ctx->ipa3_active_clients.bus_vote_idx = idx;
+		return 0;
+	}
+
+	if (ipa3_ctx->ipa3_hw_mode != IPA_HW_MODE_NORMAL) {
+		IPAERR("not supported in this mode\n");
+		return 0;
+	}
+
+	if (idx <= 0 || idx >= 5) {
+		IPAERR("bad voltage\n");
+		return -EINVAL;
+	}
+
+	if (idx == 1)
+		clk_rate = ipa3_ctx->ctrl->ipa_clk_rate_svs2;
+	else if (idx == 2)
+		clk_rate = ipa3_ctx->ctrl->ipa_clk_rate_svs;
+	else if (idx == 3)
+		clk_rate = ipa3_ctx->ctrl->ipa_clk_rate_nominal;
+	else if (idx == 4)
+		clk_rate = ipa3_ctx->ctrl->ipa_clk_rate_turbo;
+	else {
+		IPAERR("bad voltage\n");
+		WARN_ON(1);
+		return -EFAULT;
+	}
+
+	if (clk_rate == ipa3_ctx->curr_ipa_clk_rate) {
+		IPADBG_LOW("Same voltage\n");
+		return 0;
+	}
+
+	mutex_lock(&ipa3_ctx->ipa3_active_clients.mutex);
+	ipa3_ctx->curr_ipa_clk_rate = clk_rate;
+	ipa3_ctx->ipa3_active_clients.bus_vote_idx = idx;
+	IPADBG_LOW("setting clock rate to %u\n", ipa3_ctx->curr_ipa_clk_rate);
+	if (atomic_read(&ipa3_ctx->ipa3_active_clients.cnt) > 0) {
+		if (ipa3_clk)
+			clk_set_rate(ipa3_clk, ipa3_ctx->curr_ipa_clk_rate);
+		idx = ipa3_get_bus_vote();
+		for (i = 0; i < ipa3_ctx->icc_num_paths; i++) {
+			if (ipa3_ctx->ctrl->icc_path[i] &&
+			    icc_set_bw(
+			    ipa3_ctx->ctrl->icc_path[i],
+			    ipa3_ctx->icc_clk[idx][i][IPA_ICC_AB],
+			    ipa3_ctx->icc_clk[idx][i][IPA_ICC_IB])) {
+				WARN(1, "path %d bus scaling failed",
+					i);
+			}
+		}
+	} else {
+		IPADBG_LOW("clocks are gated, not setting rate\n");
+	}
+	mutex_unlock(&ipa3_ctx->ipa3_active_clients.mutex);
+	IPADBG_LOW("Done\n");
+
+	return 0;
+}
+
+int ipa3_set_required_perf_profile(enum ipa_voltage_level floor_voltage,
+				  u32 bandwidth_mbps)
+{
+	enum ipa_voltage_level needed_voltage;
+	u32 clk_rate;
+	int i;
+	int idx;
+
+	if (ipa3_ctx->ipa3_hw_mode != IPA_HW_MODE_NORMAL) {
+		IPAERR("not supported in this mode\n");
+		return 0;
+	}
+
+	IPADBG_LOW("floor_voltage=%d, bandwidth_mbps=%u",
+					floor_voltage, bandwidth_mbps);
+
+	if (floor_voltage < IPA_VOLTAGE_UNSPECIFIED ||
+		floor_voltage >= IPA_VOLTAGE_MAX) {
+		IPAERR("bad voltage\n");
+		return -EINVAL;
+	}
+
+	if (ipa3_ctx->enable_clock_scaling) {
+		IPADBG_LOW("Clock scaling is enabled\n");
+		if (bandwidth_mbps >=
+			ipa3_ctx->ctrl->clock_scaling_bw_threshold_turbo)
+			needed_voltage = IPA_VOLTAGE_TURBO;
+		else if (bandwidth_mbps >=
+			ipa3_ctx->ctrl->clock_scaling_bw_threshold_nominal)
+			needed_voltage = IPA_VOLTAGE_NOMINAL;
+		else if (bandwidth_mbps >=
+			ipa3_ctx->ctrl->clock_scaling_bw_threshold_svs)
+			needed_voltage = IPA_VOLTAGE_SVS;
+		else
+			needed_voltage = IPA_VOLTAGE_SVS2;
+	} else {
+		IPADBG_LOW("Clock scaling is disabled\n");
+		needed_voltage = IPA_VOLTAGE_NOMINAL;
+	}
+
+	needed_voltage = max(needed_voltage, floor_voltage);
+	switch (needed_voltage) {
+	case IPA_VOLTAGE_SVS2:
+		clk_rate = ipa3_ctx->ctrl->ipa_clk_rate_svs2;
+		break;
+	case IPA_VOLTAGE_SVS:
+		clk_rate = ipa3_ctx->ctrl->ipa_clk_rate_svs;
+		break;
+	case IPA_VOLTAGE_NOMINAL:
+		clk_rate = ipa3_ctx->ctrl->ipa_clk_rate_nominal;
+		break;
+	case IPA_VOLTAGE_TURBO:
+		clk_rate = ipa3_ctx->ctrl->ipa_clk_rate_turbo;
+		break;
+	default:
+		IPAERR("bad voltage\n");
+		WARN_ON(1);
+		return -EFAULT;
+	}
+
+	if (clk_rate == ipa3_ctx->curr_ipa_clk_rate) {
+		IPADBG_LOW("Same voltage\n");
+		return 0;
+	}
+
+	/* Hold the mutex to avoid race conditions with ipa3_enable_clocks() */
+	mutex_lock(&ipa3_ctx->ipa3_active_clients.mutex);
+	ipa3_ctx->curr_ipa_clk_rate = clk_rate;
+	IPADBG_LOW("setting clock rate to %u\n", ipa3_ctx->curr_ipa_clk_rate);
+	if (atomic_read(&ipa3_ctx->ipa3_active_clients.cnt) > 0) {
+		if (ipa3_clk)
+			clk_set_rate(ipa3_clk, ipa3_ctx->curr_ipa_clk_rate);
+		idx = ipa3_get_bus_vote();
+		for (i = 0; i < ipa3_ctx->icc_num_paths; i++) {
+			if (ipa3_ctx->ctrl->icc_path[i] &&
+				icc_set_bw(
+				ipa3_ctx->ctrl->icc_path[i],
+				ipa3_ctx->icc_clk[idx][i][IPA_ICC_AB],
+				ipa3_ctx->icc_clk[idx][i][IPA_ICC_IB]))
+				WARN(1, "path %d bus scaling failed", i);
+		}
+	} else {
+		IPADBG_LOW("clocks are gated, not setting rate\n");
+	}
+	mutex_unlock(&ipa3_ctx->ipa3_active_clients.mutex);
+	IPADBG_LOW("Done\n");
+
+	return 0;
+}
+
+static void ipa3_process_irq_schedule_rel(void)
+{
+	queue_delayed_work(ipa3_ctx->transport_power_mgmt_wq,
+		&ipa3_transport_release_resource_work,
+		msecs_to_jiffies(IPA_TRANSPORT_PROD_TIMEOUT_MSEC));
+}
+
+/**
+ * ipa3_suspend_handler() - Handles the suspend interrupt:
+ * wakes up the suspended peripheral by requesting its consumer
+ * @interrupt:		Interrupt type
+ * @private_data:	The client's private data
+ * @interrupt_data:	Interrupt specific information data
+ */
+void ipa3_suspend_handler(enum ipa_irq_type interrupt,
+				void *private_data,
+				void *interrupt_data)
+{
+	u32 suspend_data =
+		((struct ipa_tx_suspend_irq_data *)interrupt_data)->endpoints;
+	u32 bmsk = 1;
+	u32 i = 0;
+	int res;
+	struct ipa_ep_cfg_holb holb_cfg;
+	u32 pipe_bitmask = 0;
+
+	IPADBG("interrupt=%d, interrupt_data=%u\n",
+		interrupt, suspend_data);
+	memset(&holb_cfg, 0, sizeof(holb_cfg));
+
+	for (i = 0; i < ipa3_ctx->ipa_num_pipes; i++, bmsk = bmsk << 1)
+		if ((suspend_data & bmsk) && (ipa3_ctx->ep[i].valid))
+			pipe_bitmask |= bmsk;
+	res = ipa_pm_handle_suspend(pipe_bitmask);
+	if (res) {
+		IPAERR("ipa_pm_handle_suspend failed %d\n", res);
+		return;
+	}
+}
+
+/**
+ * ipa3_restore_suspend_handler() - restores the original suspend IRQ handler
+ * as it was registered in the IPA init sequence.
+ * Return codes:
+ * 0: success
+ * -EPERM: failed to remove current handler or failed to add original handler
+ */
+int ipa3_restore_suspend_handler(void)
+{
+	int result = 0;
+
+	result  = ipa3_remove_interrupt_handler(IPA_TX_SUSPEND_IRQ);
+	if (result) {
+		IPAERR("remove handler for suspend interrupt failed\n");
+		return -EPERM;
+	}
+
+	result = ipa3_add_interrupt_handler(IPA_TX_SUSPEND_IRQ,
+			ipa3_suspend_handler, false, NULL);
+	if (result) {
+		IPAERR("register handler for suspend interrupt failed\n");
+		result = -EPERM;
+	}
+
+	IPADBG("suspend handler successfully restored\n");
+
+	return result;
+}
+
+static int ipa3_apps_cons_release_resource(void)
+{
+	return 0;
+}
+
+static int ipa3_apps_cons_request_resource(void)
+{
+	return 0;
+}
+
+static void ipa3_transport_release_resource(struct work_struct *work)
+{
+	mutex_lock(&ipa3_ctx->transport_pm.transport_pm_mutex);
+	/* check whether still need to decrease client usage */
+	if (atomic_read(&ipa3_ctx->transport_pm.dec_clients)) {
+		if (atomic_read(&ipa3_ctx->transport_pm.eot_activity)) {
+			IPADBG("EOT pending Re-scheduling\n");
+			ipa3_process_irq_schedule_rel();
+		} else {
+			atomic_set(&ipa3_ctx->transport_pm.dec_clients, 0);
+			ipa3_dec_release_wakelock();
+			IPA_ACTIVE_CLIENTS_DEC_SPECIAL("TRANSPORT_RESOURCE");
+		}
+	}
+	atomic_set(&ipa3_ctx->transport_pm.eot_activity, 0);
+	mutex_unlock(&ipa3_ctx->transport_pm.transport_pm_mutex);
+}
+
+int ipa3_create_apps_resource(void)
+{
+	struct ipa_rm_create_params apps_cons_create_params;
+	struct ipa_rm_perf_profile profile;
+	int result = 0;
+
+	memset(&apps_cons_create_params, 0,
+				sizeof(apps_cons_create_params));
+	apps_cons_create_params.name = IPA_RM_RESOURCE_APPS_CONS;
+	apps_cons_create_params.request_resource =
+		ipa3_apps_cons_request_resource;
+	apps_cons_create_params.release_resource =
+		ipa3_apps_cons_release_resource;
+	result = ipa_rm_create_resource(&apps_cons_create_params);
+	if (result) {
+		IPAERR("ipa_rm_create_resource failed\n");
+		return result;
+	}
+
+	profile.max_supported_bandwidth_mbps = IPA_APPS_MAX_BW_IN_MBPS;
+	ipa_rm_set_perf_profile(IPA_RM_RESOURCE_APPS_CONS, &profile);
+
+	return result;
+}
+
+/**
+ * ipa3_init_interrupts() - Register to IPA IRQs
+ *
+ * Return codes: 0 in success, negative in failure
+ *
+ */
+int ipa3_init_interrupts(void)
+{
+	int result;
+
+	/*register IPA IRQ handler*/
+	result = ipa3_interrupts_init(ipa3_res.ipa_irq, 0,
+			&ipa3_ctx->master_pdev->dev);
+	if (result) {
+		IPAERR("ipa interrupts initialization failed\n");
+		return -ENODEV;
+	}
+
+	/*add handler for suspend interrupt*/
+	result = ipa3_add_interrupt_handler(IPA_TX_SUSPEND_IRQ,
+			ipa3_suspend_handler, false, NULL);
+	if (result) {
+		IPAERR("register handler for suspend interrupt failed\n");
+		result = -ENODEV;
+		goto fail_add_interrupt_handler;
+	}
+
+	return 0;
+
+fail_add_interrupt_handler:
+	ipa3_interrupts_destroy(ipa3_res.ipa_irq, &ipa3_ctx->master_pdev->dev);
+	return result;
+}
+
+/**
+ * ipa3_destroy_flt_tbl_idrs() - destroy the idr structure for flt tables
+ *  The idr strcuture per filtering table is intended for rule id generation
+ *  per filtering rule.
+ */
+static void ipa3_destroy_flt_tbl_idrs(void)
+{
+	int i;
+	struct ipa3_flt_tbl *flt_tbl;
+
+	idr_destroy(&ipa3_ctx->flt_rule_ids[IPA_IP_v4]);
+	idr_destroy(&ipa3_ctx->flt_rule_ids[IPA_IP_v6]);
+
+	for (i = 0; i < ipa3_ctx->ipa_num_pipes; i++) {
+		if (!ipa_is_ep_support_flt(i))
+			continue;
+
+		flt_tbl = &ipa3_ctx->flt_tbl[i][IPA_IP_v4];
+		flt_tbl->rule_ids = NULL;
+		flt_tbl = &ipa3_ctx->flt_tbl[i][IPA_IP_v6];
+		flt_tbl->rule_ids = NULL;
+	}
+}
+
+static void ipa3_freeze_clock_vote_and_notify_modem(void)
+{
+	int res;
+	struct ipa_active_client_logging_info log_info;
+
+	if (ipa3_ctx->platform_type == IPA_PLAT_TYPE_APQ) {
+		IPADBG("Ignore smp2p on APQ platform\n");
+		return;
+	}
+
+	if (ipa3_ctx->smp2p_info.res_sent)
+		return;
+
+	if (IS_ERR(ipa3_ctx->smp2p_info.smem_state)) {
+		IPAERR("fail to get smp2p clk resp bit %ld\n",
+			PTR_ERR(ipa3_ctx->smp2p_info.smem_state));
+		return;
+	}
+
+	IPA_ACTIVE_CLIENTS_PREP_SPECIAL(log_info, "FREEZE_VOTE");
+	res = ipa3_inc_client_enable_clks_no_block(&log_info);
+	if (res)
+		ipa3_ctx->smp2p_info.ipa_clk_on = false;
+	else
+		ipa3_ctx->smp2p_info.ipa_clk_on = true;
+
+	qcom_smem_state_update_bits(ipa3_ctx->smp2p_info.smem_state,
+			IPA_SMP2P_SMEM_STATE_MASK,
+			((ipa3_ctx->smp2p_info.ipa_clk_on <<
+			IPA_SMP2P_OUT_CLK_VOTE_IDX) |
+			(1 << IPA_SMP2P_OUT_CLK_RSP_CMPLT_IDX)));
+
+	ipa3_ctx->smp2p_info.res_sent = true;
+	IPADBG("IPA clocks are %s\n",
+		ipa3_ctx->smp2p_info.ipa_clk_on ? "ON" : "OFF");
+}
+
+void ipa3_reset_freeze_vote(void)
+{
+	if (!ipa3_ctx->smp2p_info.res_sent)
+		return;
+
+	if (ipa3_ctx->smp2p_info.ipa_clk_on)
+		IPA_ACTIVE_CLIENTS_DEC_SPECIAL("FREEZE_VOTE");
+
+	qcom_smem_state_update_bits(ipa3_ctx->smp2p_info.smem_state,
+			IPA_SMP2P_SMEM_STATE_MASK,
+			((0 <<
+			IPA_SMP2P_OUT_CLK_VOTE_IDX) |
+			(0 << IPA_SMP2P_OUT_CLK_RSP_CMPLT_IDX)));
+
+	ipa3_ctx->smp2p_info.res_sent = false;
+	ipa3_ctx->smp2p_info.ipa_clk_on = false;
+}
+
+static int ipa3_panic_notifier(struct notifier_block *this,
+	unsigned long event, void *ptr)
+{
+	int res;
+
+	ipa3_freeze_clock_vote_and_notify_modem();
+
+	IPADBG("Calling uC panic handler\n");
+	res = ipa3_uc_panic_notifier(this, event, ptr);
+	if (res)
+		IPAERR("uC panic handler failed %d\n", res);
+
+	if (atomic_read(&ipa3_ctx->ipa_clk_vote)) {
+		ipahal_print_all_regs(false);
+		ipa_save_registers();
+		ipa_wigig_save_regs();
+	}
+
+	return NOTIFY_DONE;
+}
+
+static struct notifier_block ipa3_panic_blk = {
+	.notifier_call = ipa3_panic_notifier,
+	/* IPA panic handler needs to run before modem shuts down */
+	.priority = INT_MAX,
+};
+
+static void ipa3_register_panic_hdlr(void)
+{
+	atomic_notifier_chain_register(&panic_notifier_list,
+		&ipa3_panic_blk);
+}
+
+static void ipa3_trigger_ipa_ready_cbs(void)
+{
+	struct ipa3_ready_cb_info *info;
+
+	mutex_lock(&ipa3_ctx->lock);
+
+	/* Call all the CBs */
+	list_for_each_entry(info, &ipa3_ctx->ipa_ready_cb_list, link)
+		if (info->ready_cb)
+			info->ready_cb(info->user_data);
+
+	mutex_unlock(&ipa3_ctx->lock);
+}
+
+static void ipa3_uc_is_loaded(void)
+{
+	IPADBG("\n");
+	complete_all(&ipa3_ctx->uc_loaded_completion_obj);
+}
+
+static enum gsi_ver ipa3_get_gsi_ver(enum ipa_hw_type ipa_hw_type)
+{
+	enum gsi_ver gsi_ver;
+
+	switch (ipa_hw_type) {
+	case IPA_HW_v3_0:
+	case IPA_HW_v3_1:
+		gsi_ver = GSI_VER_1_0;
+		break;
+	case IPA_HW_v3_5:
+		gsi_ver = GSI_VER_1_2;
+		break;
+	case IPA_HW_v3_5_1:
+		gsi_ver = GSI_VER_1_3;
+		break;
+	case IPA_HW_v4_0:
+	case IPA_HW_v4_1:
+		gsi_ver = GSI_VER_2_0;
+		break;
+	case IPA_HW_v4_2:
+		gsi_ver = GSI_VER_2_2;
+		break;
+	case IPA_HW_v4_5:
+		gsi_ver = GSI_VER_2_5;
+		break;
+	case IPA_HW_v4_7:
+		gsi_ver = GSI_VER_2_7;
+		break;
+	case IPA_HW_v4_9:
+		gsi_ver = GSI_VER_2_9;
+		break;
+	default:
+		IPAERR("No GSI version for ipa type %d\n", ipa_hw_type);
+		WARN_ON(1);
+		gsi_ver = GSI_VER_ERR;
+	}
+
+	IPADBG("GSI version %d\n", gsi_ver);
+
+	return gsi_ver;
+}
+
+static int ipa3_gsi_pre_fw_load_init(void)
+{
+	int result;
+
+	result = gsi_configure_regs(
+		ipa3_res.ipa_mem_base,
+		ipa3_get_gsi_ver(ipa3_res.ipa_hw_type));
+
+	if (result) {
+		IPAERR("Failed to configure GSI registers\n");
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static int ipa3_alloc_gsi_channel(void)
+{
+	const struct ipa_gsi_ep_config *gsi_ep_cfg;
+	enum ipa_client_type type;
+	int code = 0;
+	int ret = 0;
+	int i;
+
+	for (i = 0; i < ipa3_ctx->ipa_num_pipes; i++) {
+		type = ipa3_get_client_by_pipe(i);
+		gsi_ep_cfg = ipa3_get_gsi_ep_info(type);
+		IPADBG("for ep %d client is %d\n", i, type);
+		if (!gsi_ep_cfg)
+			continue;
+
+		ret = gsi_alloc_channel_ee(gsi_ep_cfg->ipa_gsi_chan_num,
+					gsi_ep_cfg->ee, &code);
+		if (ret == GSI_STATUS_SUCCESS) {
+			IPADBG("alloc gsi ch %d ee %d with code %d\n",
+					gsi_ep_cfg->ipa_gsi_chan_num,
+					gsi_ep_cfg->ee,
+					code);
+		} else {
+			IPAERR("failed to alloc ch %d ee %d code %d\n",
+					gsi_ep_cfg->ipa_gsi_chan_num,
+					gsi_ep_cfg->ee,
+					code);
+			return ret;
+		}
+	}
+	return ret;
+}
+
+static inline void ipa3_enable_napi_lan_rx(void)
+{
+	if (ipa3_ctx->lan_rx_napi_enable)
+		napi_enable(&ipa3_ctx->napi_lan_rx);
+}
+
+/**
+ * ipa3_post_init() - Initialize the IPA Driver (Part II).
+ * This part contains all initialization which requires interaction with
+ * IPA HW (via GSI).
+ *
+ * @resource_p:	contain platform specific values from DST file
+ * @pdev:	The platform device structure representing the IPA driver
+ *
+ * Function initialization process:
+ * - Initialize endpoints bitmaps
+ * - Initialize resource groups min and max values
+ * - Initialize filtering lists heads and idr
+ * - Initialize interrupts
+ * - Register GSI
+ * - Setup APPS pipes
+ * - Initialize tethering bridge
+ * - Initialize IPA debugfs
+ * - Initialize IPA uC interface
+ * - Initialize WDI interface
+ * - Initialize USB interface
+ * - Register for panic handler
+ * - Trigger IPA ready callbacks (to all subscribers)
+ * - Trigger IPA completion object (to all who wait on it)
+ */
+static int ipa3_post_init(const struct ipa3_plat_drv_res *resource_p,
+			  struct device *ipa_dev)
+{
+	int result;
+	struct gsi_per_props gsi_props;
+	struct ipa3_uc_hdlrs uc_hdlrs = { 0 };
+	struct ipa3_flt_tbl *flt_tbl;
+	int i;
+	struct idr *idr;
+
+	if (ipa3_ctx == NULL) {
+		IPADBG("IPA driver haven't initialized\n");
+		return -ENXIO;
+	}
+
+	/* Prevent consequent calls from trying to load the FW again. */
+	if (ipa3_ctx->ipa_initialization_complete)
+		return 0;
+
+	IPADBG("active clients = %d\n",
+			atomic_read(&ipa3_ctx->ipa3_active_clients.cnt));
+	/* move proxy vote for modem on ipa3_post_init */
+	if (ipa3_ctx->ipa_hw_type != IPA_HW_v4_0)
+		ipa3_proxy_clk_vote();
+
+	/* The following will retrieve and save the gsi fw version */
+	ipa_save_gsi_ver();
+
+	if (ipahal_init(ipa3_ctx->ipa_hw_type, ipa3_ctx->mmio,
+		ipa3_ctx->pdev)) {
+		IPAERR("fail to init ipahal\n");
+		result = -EFAULT;
+		goto fail_ipahal;
+	}
+
+	result = ipa3_init_hw();
+	if (result) {
+		IPAERR(":error initializing HW\n");
+		result = -ENODEV;
+		goto fail_init_hw;
+	}
+	IPADBG("IPA HW initialization sequence completed");
+
+	ipa3_ctx->ipa_num_pipes = ipa3_get_num_pipes();
+	IPADBG("IPA Pipes num %u\n", ipa3_ctx->ipa_num_pipes);
+	if (ipa3_ctx->ipa_num_pipes > IPA3_MAX_NUM_PIPES) {
+		IPAERR("IPA has more pipes then supported has %d, max %d\n",
+			ipa3_ctx->ipa_num_pipes, IPA3_MAX_NUM_PIPES);
+		result = -ENODEV;
+		goto fail_init_hw;
+	}
+
+	ipa3_ctx->ctrl->ipa_sram_read_settings();
+	IPADBG("SRAM, size: 0x%x, restricted bytes: 0x%x\n",
+		ipa3_ctx->smem_sz, ipa3_ctx->smem_restricted_bytes);
+
+	IPADBG("hdr_lcl=%u ip4_rt_hash=%u ip4_rt_nonhash=%u\n",
+		ipa3_ctx->hdr_tbl_lcl, ipa3_ctx->ip4_rt_tbl_hash_lcl,
+		ipa3_ctx->ip4_rt_tbl_nhash_lcl);
+
+	IPADBG("ip6_rt_hash=%u ip6_rt_nonhash=%u\n",
+		ipa3_ctx->ip6_rt_tbl_hash_lcl, ipa3_ctx->ip6_rt_tbl_nhash_lcl);
+
+	IPADBG("ip4_flt_hash=%u ip4_flt_nonhash=%u\n",
+		ipa3_ctx->ip4_flt_tbl_hash_lcl,
+		ipa3_ctx->ip4_flt_tbl_nhash_lcl);
+
+	IPADBG("ip6_flt_hash=%u ip6_flt_nonhash=%u\n",
+		ipa3_ctx->ip6_flt_tbl_hash_lcl,
+		ipa3_ctx->ip6_flt_tbl_nhash_lcl);
+
+	if (ipa3_ctx->smem_reqd_sz > ipa3_ctx->smem_sz) {
+		IPAERR("SW expect more core memory, needed %d, avail %d\n",
+			ipa3_ctx->smem_reqd_sz, ipa3_ctx->smem_sz);
+		result = -ENOMEM;
+		goto fail_init_hw;
+	}
+
+	result = ipa3_allocate_dma_task_for_gsi();
+	if (result) {
+		IPAERR("failed to allocate dma task\n");
+		goto fail_dma_task;
+	}
+
+	result = ipa3_allocate_coal_close_frame();
+	if (result) {
+		IPAERR("failed to allocate coal frame cmd\n");
+		goto fail_coal_frame;
+	}
+
+	if (ipa3_nat_ipv6ct_init_devices()) {
+		IPAERR("unable to init NAT and IPv6CT devices\n");
+		result = -ENODEV;
+		goto fail_nat_ipv6ct_init_dev;
+	}
+
+	result = ipa3_alloc_pkt_init();
+	if (result) {
+		IPAERR("Failed to alloc pkt_init payload\n");
+		result = -ENODEV;
+		goto fail_allok_pkt_init;
+	}
+
+	if (ipa3_ctx->ipa_hw_type >= IPA_HW_v3_5)
+		ipa3_enable_dcd();
+
+	/*
+	 * indication whether working in MHI config or non MHI config is given
+	 * in ipa3_write which is launched before ipa3_post_init. i.e. from
+	 * this point it is safe to use ipa3_ep_mapping array and the correct
+	 * entry will be returned from ipa3_get_hw_type_index()
+	 */
+	ipa_init_ep_flt_bitmap();
+	IPADBG("EP with flt support bitmap 0x%x (%u pipes)\n",
+		ipa3_ctx->ep_flt_bitmap, ipa3_ctx->ep_flt_num);
+
+	/* Assign resource limitation to each group */
+	ipa3_set_resorce_groups_min_max_limits();
+
+	idr = &(ipa3_ctx->flt_rule_ids[IPA_IP_v4]);
+	idr_init(idr);
+	idr = &(ipa3_ctx->flt_rule_ids[IPA_IP_v6]);
+	idr_init(idr);
+
+	for (i = 0; i < ipa3_ctx->ipa_num_pipes; i++) {
+		if (!ipa_is_ep_support_flt(i))
+			continue;
+
+		flt_tbl = &ipa3_ctx->flt_tbl[i][IPA_IP_v4];
+		INIT_LIST_HEAD(&flt_tbl->head_flt_rule_list);
+		flt_tbl->in_sys[IPA_RULE_HASHABLE] =
+			!ipa3_ctx->ip4_flt_tbl_hash_lcl;
+		flt_tbl->in_sys[IPA_RULE_NON_HASHABLE] =
+			!ipa3_ctx->ip4_flt_tbl_nhash_lcl;
+		flt_tbl->rule_ids = &ipa3_ctx->flt_rule_ids[IPA_IP_v4];
+
+		flt_tbl = &ipa3_ctx->flt_tbl[i][IPA_IP_v6];
+		INIT_LIST_HEAD(&flt_tbl->head_flt_rule_list);
+		flt_tbl->in_sys[IPA_RULE_HASHABLE] =
+			!ipa3_ctx->ip6_flt_tbl_hash_lcl;
+		flt_tbl->in_sys[IPA_RULE_NON_HASHABLE] =
+			!ipa3_ctx->ip6_flt_tbl_nhash_lcl;
+		flt_tbl->rule_ids = &ipa3_ctx->flt_rule_ids[IPA_IP_v6];
+	}
+
+	result = ipa3_init_interrupts();
+	if (result) {
+		IPAERR("ipa initialization of interrupts failed\n");
+		result = -ENODEV;
+		goto fail_init_interrupts;
+	}
+
+	/*
+	 * Disable prefetch for USB or MHI at IPAv3.5/IPA.3.5.1
+	 * This is to allow MBIM to work.
+	 */
+	if ((ipa3_ctx->ipa_hw_type >= IPA_HW_v3_5
+		&& ipa3_ctx->ipa_hw_type < IPA_HW_v4_0) &&
+		(!ipa3_ctx->ipa_config_is_mhi))
+		ipa3_disable_prefetch(IPA_CLIENT_USB_CONS);
+
+	if ((ipa3_ctx->ipa_hw_type >= IPA_HW_v3_5
+		&& ipa3_ctx->ipa_hw_type < IPA_HW_v4_0) &&
+		(ipa3_ctx->ipa_config_is_mhi))
+		ipa3_disable_prefetch(IPA_CLIENT_MHI_CONS);
+
+	memset(&gsi_props, 0, sizeof(gsi_props));
+	gsi_props.ver = ipa3_get_gsi_ver(resource_p->ipa_hw_type);
+	gsi_props.ee = resource_p->ee;
+	gsi_props.intr = GSI_INTR_IRQ;
+	gsi_props.phys_addr = resource_p->transport_mem_base;
+	gsi_props.size = resource_p->transport_mem_size;
+	if (ipa3_ctx->ipa3_hw_mode == IPA_HW_MODE_EMULATION) {
+		gsi_props.irq = resource_p->emulator_irq;
+		gsi_props.emulator_intcntrlr_client_isr = ipa3_get_isr();
+		gsi_props.emulator_intcntrlr_addr =
+		    resource_p->emulator_intcntrlr_mem_base;
+		gsi_props.emulator_intcntrlr_size =
+		    resource_p->emulator_intcntrlr_mem_size;
+	} else {
+		gsi_props.irq = resource_p->transport_irq;
+	}
+	gsi_props.notify_cb = ipa_gsi_notify_cb;
+	gsi_props.req_clk_cb = NULL;
+	gsi_props.rel_clk_cb = NULL;
+	gsi_props.clk_status_cb = ipa3_active_clks_status;
+
+	if (ipa3_ctx->ipa_config_is_mhi) {
+		gsi_props.mhi_er_id_limits_valid = true;
+		gsi_props.mhi_er_id_limits[0] = resource_p->mhi_evid_limits[0];
+		gsi_props.mhi_er_id_limits[1] = resource_p->mhi_evid_limits[1];
+	}
+	gsi_props.skip_ieob_mask_wa = resource_p->skip_ieob_mask_wa;
+
+	result = gsi_register_device(&gsi_props,
+		&ipa3_ctx->gsi_dev_hdl);
+	if (result != GSI_STATUS_SUCCESS) {
+		IPAERR(":gsi register error - %d\n", result);
+		result = -ENODEV;
+		goto fail_register_device;
+	}
+	IPADBG("IPA gsi is registered\n");
+	/* GSI 2.2 requires to allocate all EE GSI channel
+	 * during device bootup.
+	 */
+	if (ipa3_get_gsi_ver(resource_p->ipa_hw_type) == GSI_VER_2_2) {
+		result = ipa3_alloc_gsi_channel();
+		if (result) {
+			IPAERR("Failed to alloc the GSI channels\n");
+			result = -ENODEV;
+			goto fail_alloc_gsi_channel;
+		}
+	}
+
+	/* setup the AP-IPA pipes */
+	if (ipa3_setup_apps_pipes()) {
+		IPAERR(":failed to setup IPA-Apps pipes\n");
+		result = -ENODEV;
+		goto fail_setup_apps_pipes;
+	}
+	IPADBG("IPA GPI pipes were connected\n");
+
+	if (ipa3_ctx->use_ipa_teth_bridge) {
+		/* Initialize the tethering bridge driver */
+		result = ipa3_teth_bridge_driver_init();
+		if (result) {
+			IPAERR(":teth_bridge init failed (%d)\n", -result);
+			result = -ENODEV;
+			goto fail_teth_bridge_driver_init;
+		}
+		IPADBG("teth_bridge initialized");
+	}
+
+	result = ipa3_uc_interface_init();
+	if (result)
+		IPAERR(":ipa Uc interface init failed (%d)\n", -result);
+	else
+		IPADBG(":ipa Uc interface init ok\n");
+
+	uc_hdlrs.ipa_uc_loaded_hdlr = ipa3_uc_is_loaded;
+	ipa3_uc_register_handlers(IPA_HW_FEATURE_COMMON, &uc_hdlrs);
+
+	result = ipa3_wdi_init();
+	if (result)
+		IPAERR(":wdi init failed (%d)\n", -result);
+	else
+		IPADBG(":wdi init ok\n");
+
+	result = ipa3_wigig_init_i();
+	if (result)
+		IPAERR(":wigig init failed (%d)\n", -result);
+	else
+		IPADBG(":wigig init ok\n");
+
+	result = ipa3_ntn_init();
+	if (result)
+		IPAERR(":ntn init failed (%d)\n", -result);
+	else
+		IPADBG(":ntn init ok\n");
+
+	result = ipa_hw_stats_init();
+	if (result)
+		IPAERR("fail to init stats %d\n", result);
+	else
+		IPADBG(":stats init ok\n");
+
+	ipa3_register_panic_hdlr();
+
+	ipa3_debugfs_init();
+
+	mutex_lock(&ipa3_ctx->lock);
+	ipa3_ctx->ipa_initialization_complete = true;
+	mutex_unlock(&ipa3_ctx->lock);
+	ipa3_enable_napi_lan_rx();
+	ipa3_trigger_ipa_ready_cbs();
+	complete_all(&ipa3_ctx->init_completion_obj);
+
+	ipa_ut_module_init();
+
+	pr_info("IPA driver initialization was successful.\n");
+
+	return 0;
+
+fail_teth_bridge_driver_init:
+	ipa3_teardown_apps_pipes();
+fail_alloc_gsi_channel:
+fail_setup_apps_pipes:
+	gsi_deregister_device(ipa3_ctx->gsi_dev_hdl, false);
+fail_register_device:
+	ipa3_destroy_flt_tbl_idrs();
+fail_init_interrupts:
+	 ipa3_remove_interrupt_handler(IPA_TX_SUSPEND_IRQ);
+	 ipa3_interrupts_destroy(ipa3_res.ipa_irq, &ipa3_ctx->master_pdev->dev);
+fail_allok_pkt_init:
+	ipa3_nat_ipv6ct_destroy_devices();
+fail_nat_ipv6ct_init_dev:
+	ipa3_free_coal_close_frame();
+fail_coal_frame:
+	ipa3_free_dma_task_for_gsi();
+fail_dma_task:
+fail_init_hw:
+	ipahal_destroy();
+fail_ipahal:
+	ipa3_proxy_clk_unvote();
+
+	return result;
+}
+
+static int ipa3_manual_load_ipa_fws(void)
+{
+	int result;
+	const struct firmware *fw;
+	const char *path = IPA_FWS_PATH;
+
+	if (ipa3_ctx->ipa3_hw_mode == IPA_HW_MODE_EMULATION) {
+		switch (ipa3_get_emulation_type()) {
+		case IPA_HW_v3_5_1:
+			path = IPA_FWS_PATH_3_5_1;
+			break;
+		case IPA_HW_v4_0:
+			path = IPA_FWS_PATH_4_0;
+			break;
+		case IPA_HW_v4_5:
+			path = IPA_FWS_PATH_4_5;
+			break;
+		default:
+			break;
+		}
+	}
+
+	IPADBG("Manual FW loading (%s) process initiated\n", path);
+
+	result = request_firmware(&fw, path, ipa3_ctx->cdev.dev);
+	if (result < 0) {
+		IPAERR("request_firmware failed, error %d\n", result);
+		return result;
+	}
+
+	IPADBG("FWs are available for loading\n");
+
+	if (ipa3_ctx->ipa3_hw_mode == IPA_HW_MODE_EMULATION) {
+		result = emulator_load_fws(fw,
+			ipa3_res.transport_mem_base,
+			ipa3_res.transport_mem_size,
+			ipa3_get_gsi_ver(ipa3_res.ipa_hw_type));
+	} else {
+		result = ipa3_load_fws(fw, ipa3_res.transport_mem_base,
+			ipa3_get_gsi_ver(ipa3_res.ipa_hw_type));
+	}
+
+	if (result) {
+		IPAERR("Manual IPA FWs loading has failed\n");
+		release_firmware(fw);
+		return result;
+	}
+
+	result = gsi_enable_fw(ipa3_res.transport_mem_base,
+				ipa3_res.transport_mem_size,
+				ipa3_get_gsi_ver(ipa3_res.ipa_hw_type));
+	if (result) {
+		IPAERR("Failed to enable GSI FW\n");
+		release_firmware(fw);
+		return result;
+	}
+
+	release_firmware(fw);
+
+	IPADBG("Manual FW loading process is complete\n");
+
+	return 0;
+}
+
+static int ipa3_pil_load_ipa_fws(const char *sub_sys)
+{
+	void *subsystem_get_retval = NULL;
+
+	IPADBG("PIL FW loading process initiated sub_sys=%s\n",
+		sub_sys);
+
+	subsystem_get_retval = subsystem_get(sub_sys);
+	if (IS_ERR_OR_NULL(subsystem_get_retval)) {
+		IPAERR("Unable to PIL load FW for sub_sys=%s\n", sub_sys);
+		return -EINVAL;
+	}
+
+	IPADBG("PIL FW loading process is complete sub_sys=%s\n", sub_sys);
+	return 0;
+}
+
+static void ipa3_load_ipa_fw(struct work_struct *work)
+{
+	int result;
+
+	IPADBG("Entry\n");
+
+	IPA_ACTIVE_CLIENTS_INC_SIMPLE();
+
+	result = ipa3_attach_to_smmu();
+	if (result) {
+		IPAERR("IPA attach to smmu failed %d\n", result);
+		IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
+		return;
+	}
+
+	if (ipa3_ctx->ipa3_hw_mode != IPA_HW_MODE_EMULATION &&
+	    ((ipa3_ctx->platform_type != IPA_PLAT_TYPE_MDM) ||
+	    (ipa3_ctx->ipa_hw_type >= IPA_HW_v3_5)))
+		result = ipa3_pil_load_ipa_fws(IPA_SUBSYSTEM_NAME);
+	else
+		result = ipa3_manual_load_ipa_fws();
+
+	IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
+
+	if (result) {
+		IPAERR("IPA FW loading process has failed result=%d\n",
+			result);
+		return;
+	}
+	mutex_lock(&ipa3_ctx->fw_load_data.lock);
+	ipa3_ctx->fw_load_data.state = IPA_FW_LOAD_STATE_LOADED;
+	mutex_unlock(&ipa3_ctx->fw_load_data.lock);
+	pr_info("IPA FW loaded successfully\n");
+
+	result = ipa3_post_init(&ipa3_res, ipa3_ctx->cdev.dev);
+	if (result) {
+		IPAERR("IPA post init failed %d\n", result);
+		return;
+	}
+
+	if (ipa3_ctx->platform_type == IPA_PLAT_TYPE_APQ &&
+		ipa3_ctx->ipa3_hw_mode != IPA_HW_MODE_VIRTUAL &&
+		ipa3_ctx->ipa3_hw_mode != IPA_HW_MODE_EMULATION) {
+
+		IPADBG("Loading IPA uC via PIL\n");
+
+		/* Unvoting will happen when uC loaded event received. */
+		ipa3_proxy_clk_vote();
+
+		result = ipa3_pil_load_ipa_fws(IPA_UC_SUBSYSTEM_NAME);
+		if (result) {
+			IPAERR("IPA uC loading process has failed result=%d\n",
+				result);
+			return;
+		}
+		IPADBG("IPA uC PIL loading succeeded\n");
+	}
+}
+
+static void ipa_fw_load_sm_handle_event(enum ipa_fw_load_event ev)
+{
+	mutex_lock(&ipa3_ctx->fw_load_data.lock);
+
+	IPADBG("state=%d event=%d\n", ipa3_ctx->fw_load_data.state, ev);
+
+	if (ev == IPA_FW_LOAD_EVNT_FWFILE_READY) {
+		if (ipa3_ctx->fw_load_data.state == IPA_FW_LOAD_STATE_INIT) {
+			ipa3_ctx->fw_load_data.state =
+				IPA_FW_LOAD_STATE_FWFILE_READY;
+			goto out;
+		}
+		if (ipa3_ctx->fw_load_data.state ==
+			IPA_FW_LOAD_STATE_SMMU_DONE) {
+			ipa3_ctx->fw_load_data.state =
+				IPA_FW_LOAD_STATE_LOAD_READY;
+			goto sched_fw_load;
+		}
+		IPAERR("ignore multiple requests to load FW\n");
+		goto out;
+	}
+	if (ev == IPA_FW_LOAD_EVNT_SMMU_DONE) {
+		if (ipa3_ctx->fw_load_data.state == IPA_FW_LOAD_STATE_INIT) {
+			ipa3_ctx->fw_load_data.state =
+				IPA_FW_LOAD_STATE_SMMU_DONE;
+			goto out;
+		}
+		if (ipa3_ctx->fw_load_data.state ==
+			IPA_FW_LOAD_STATE_FWFILE_READY) {
+			ipa3_ctx->fw_load_data.state =
+				IPA_FW_LOAD_STATE_LOAD_READY;
+			goto sched_fw_load;
+		}
+		IPAERR("ignore multiple smmu done events\n");
+		goto out;
+	}
+	IPAERR("invalid event ev=%d\n", ev);
+	mutex_unlock(&ipa3_ctx->fw_load_data.lock);
+	ipa_assert();
+	return;
+
+out:
+	mutex_unlock(&ipa3_ctx->fw_load_data.lock);
+	return;
+
+sched_fw_load:
+	IPADBG("Scheduled a work to load IPA FW\n");
+	mutex_unlock(&ipa3_ctx->fw_load_data.lock);
+	queue_work(ipa3_ctx->transport_power_mgmt_wq,
+		&ipa3_fw_loading_work);
+}
+
+static ssize_t ipa3_write(struct file *file, const char __user *buf,
+			  size_t count, loff_t *ppos)
+{
+	unsigned long missing;
+
+	char dbg_buff[32] = { 0 };
+	int i = 0;
+
+	if (count >= sizeof(dbg_buff))
+		return -EFAULT;
+
+	missing = copy_from_user(dbg_buff, buf, count);
+
+	if (missing) {
+		IPAERR("Unable to copy data from user\n");
+		return -EFAULT;
+	}
+
+	if (count > 0)
+		dbg_buff[count] = '\0';
+
+	IPADBG("user input string %s\n", dbg_buff);
+
+	/* Prevent consequent calls from trying to load the FW again. */
+	if (ipa3_is_ready())
+		return count;
+
+	/*Ignore empty ipa_config file*/
+	for (i = 0 ; i < count ; ++i) {
+		if (!isspace(dbg_buff[i]))
+			break;
+	}
+
+	if (i == count) {
+		IPADBG("Empty ipa_config file\n");
+		return count;
+	}
+
+	/* Check MHI configuration on MDM devices */
+	if (ipa3_ctx->platform_type == IPA_PLAT_TYPE_MDM) {
+
+		if (strnstr(dbg_buff, "vlan", strlen(dbg_buff))) {
+			if (strnstr(dbg_buff, "eth", strlen(dbg_buff)))
+				ipa3_ctx->vlan_mode_iface[IPA_VLAN_IF_EMAC] =
+				true;
+			if (strnstr(dbg_buff, "rndis", strlen(dbg_buff)))
+				ipa3_ctx->vlan_mode_iface[IPA_VLAN_IF_RNDIS] =
+				true;
+			if (strnstr(dbg_buff, "ecm", strlen(dbg_buff)))
+				ipa3_ctx->vlan_mode_iface[IPA_VLAN_IF_ECM] =
+				true;
+
+			/*
+			 * when vlan mode is passed to our dev we expect
+			 * another write
+			 */
+			return count;
+		}
+
+		/* trim ending newline character if any */
+		if (count && (dbg_buff[count - 1] == '\n'))
+			dbg_buff[count - 1] = '\0';
+
+		/*
+		 * This logic enforeces MHI mode based on userspace input.
+		 * Note that MHI mode could be already determined due
+		 *  to previous logic.
+		 */
+		if (!strcasecmp(dbg_buff, "MHI")) {
+			ipa3_ctx->ipa_config_is_mhi = true;
+		} else if (strcmp(dbg_buff, "1")) {
+			IPAERR("got invalid string %s not loading FW\n",
+				dbg_buff);
+			return count;
+		}
+		pr_info("IPA is loading with %sMHI configuration\n",
+			ipa3_ctx->ipa_config_is_mhi ? "" : "non ");
+	}
+
+	ipa_fw_load_sm_handle_event(IPA_FW_LOAD_EVNT_FWFILE_READY);
+
+	return count;
+}
+
+/**
+ * ipa3_tz_unlock_reg - Unlocks memory regions so that they become accessible
+ *	from AP.
+ * @reg_info - Pointer to array of memory regions to unlock
+ * @num_regs - Number of elements in the array
+ *
+ * Converts the input array of regions to a struct that TZ understands and
+ * issues an SCM call.
+ * Also flushes the memory cache to DDR in order to make sure that TZ sees the
+ * correct data structure.
+ *
+ * Returns: 0 on success, negative on failure
+ */
+int ipa3_tz_unlock_reg(struct ipa_tz_unlock_reg_info *reg_info, u16 num_regs)
+{
+	int i, ret;
+	compat_size_t size;
+	struct tz_smmu_ipa_protect_region_iovec_s *ipa_tz_unlock_vec;
+	struct tz_smmu_ipa_protect_region_s cmd_buf;
+
+	if (reg_info ==  NULL || num_regs == 0) {
+		IPAERR("Bad parameters\n");
+		return -EFAULT;
+	}
+
+	size = num_regs * sizeof(struct tz_smmu_ipa_protect_region_iovec_s);
+	ipa_tz_unlock_vec = kzalloc(PAGE_ALIGN(size), GFP_KERNEL);
+	if (ipa_tz_unlock_vec == NULL)
+		return -ENOMEM;
+
+	for (i = 0; i < num_regs; i++) {
+		ipa_tz_unlock_vec[i].input_addr = reg_info[i].reg_addr ^
+			(reg_info[i].reg_addr & 0xFFF);
+		ipa_tz_unlock_vec[i].output_addr = reg_info[i].reg_addr ^
+			(reg_info[i].reg_addr & 0xFFF);
+		ipa_tz_unlock_vec[i].size = reg_info[i].size;
+		ipa_tz_unlock_vec[i].attr = IPA_TZ_UNLOCK_ATTRIBUTE;
+	}
+
+	/* pass physical address of command buffer */
+	cmd_buf.iovec_buf = virt_to_phys((void *)ipa_tz_unlock_vec);
+	cmd_buf.size_bytes = size;
+
+	ret = qcom_scm_mem_protect_region_id(
+			virt_to_phys((void *)ipa_tz_unlock_vec),
+			size);
+
+	if (ret) {
+		IPAERR("scm call SCM_SVC_MP failed: %d\n", ret);
+		kfree(ipa_tz_unlock_vec);
+		return -EFAULT;
+	}
+	kfree(ipa_tz_unlock_vec);
+	return 0;
+}
+
+static int ipa3_alloc_pkt_init(void)
+{
+	struct ipa_mem_buffer mem;
+	struct ipahal_imm_cmd_pyld *cmd_pyld;
+	struct ipahal_imm_cmd_ip_packet_init cmd = {0};
+	int i;
+
+	cmd_pyld = ipahal_construct_imm_cmd(IPA_IMM_CMD_IP_PACKET_INIT,
+		&cmd, false);
+	if (!cmd_pyld) {
+		IPAERR("failed to construct IMM cmd\n");
+		return -ENOMEM;
+	}
+	ipa3_ctx->pkt_init_imm_opcode = cmd_pyld->opcode;
+
+	mem.size = cmd_pyld->len * ipa3_ctx->ipa_num_pipes;
+	mem.base = dma_alloc_coherent(ipa3_ctx->pdev, mem.size,
+		&mem.phys_base, GFP_KERNEL);
+	if (!mem.base) {
+		IPAERR("failed to alloc DMA buff of size %d\n", mem.size);
+		ipahal_destroy_imm_cmd(cmd_pyld);
+		return -ENOMEM;
+	}
+	ipahal_destroy_imm_cmd(cmd_pyld);
+
+	memset(mem.base, 0, mem.size);
+	for (i = 0; i < ipa3_ctx->ipa_num_pipes; i++) {
+		cmd.destination_pipe_index = i;
+		cmd_pyld = ipahal_construct_imm_cmd(IPA_IMM_CMD_IP_PACKET_INIT,
+			&cmd, false);
+		if (!cmd_pyld) {
+			IPAERR("failed to construct IMM cmd\n");
+			dma_free_coherent(ipa3_ctx->pdev,
+				mem.size,
+				mem.base,
+				mem.phys_base);
+			return -ENOMEM;
+		}
+		memcpy(mem.base + i * cmd_pyld->len, cmd_pyld->data,
+			cmd_pyld->len);
+		ipa3_ctx->pkt_init_imm[i] = mem.phys_base + i * cmd_pyld->len;
+		ipahal_destroy_imm_cmd(cmd_pyld);
+	}
+
+	return 0;
+}
+
+/*
+ * SCM call to check if secure dump is allowed.
+ *
+ * Returns true in secure dump allowed.
+ * Return false when secure dump not allowed.
+ */
+static bool ipa_is_mem_dump_allowed(void)
+{
+	int ret;
+	u32 dump_state;
+
+	ret = qcom_scm_get_sec_dump_state(&dump_state);
+
+	if (ret) {
+		IPAERR("SCM DUMP_STATE call failed\n");
+		return false;
+	}
+
+	return (dump_state == 1);
+}
+
+static int ipa3_lan_poll(struct napi_struct *napi, int budget)
+{
+	int rcvd_pkts = 0;
+
+	rcvd_pkts = ipa3_lan_rx_poll(ipa3_ctx->clnt_hdl_data_in,
+							NAPI_WEIGHT);
+	return rcvd_pkts;
+}
+
+static inline void ipa3_enable_napi_netdev(void)
+{
+	if (ipa3_ctx->lan_rx_napi_enable) {
+		init_dummy_netdev(&ipa3_ctx->lan_ndev);
+		netif_napi_add(&ipa3_ctx->lan_ndev, &ipa3_ctx->napi_lan_rx,
+				ipa3_lan_poll, NAPI_WEIGHT);
+	}
+}
+
+/**
+ * ipa3_pre_init() - Initialize the IPA Driver.
+ * This part contains all initialization which doesn't require IPA HW, such
+ * as structure allocations and initializations, register writes, etc.
+ *
+ * @resource_p:	contain platform specific values from DST file
+ * @pdev:	The platform device structure representing the IPA driver
+ *
+ * Function initialization process:
+ * Allocate memory for the driver context data struct
+ * Initializing the ipa3_ctx with :
+ *    1)parsed values from the dts file
+ *    2)parameters passed to the module initialization
+ *    3)read HW values(such as core memory size)
+ * Map IPA core registers to CPU memory
+ * Restart IPA core(HW reset)
+ * Initialize the look-aside caches(kmem_cache/slab) for filter,
+ *   routing and IPA-tree
+ * Create memory pool with 4 objects for DMA operations(each object
+ *   is 512Bytes long), this object will be use for tx(A5->IPA)
+ * Initialize lists head(routing, hdr, system pipes)
+ * Initialize mutexes (for ipa_ctx and NAT memory mutexes)
+ * Initialize spinlocks (for list related to A5<->IPA pipes)
+ * Initialize 2 single-threaded work-queue named "ipa rx wq" and "ipa tx wq"
+ * Initialize Red-Black-Tree(s) for handles of header,routing rule,
+ *  routing table ,filtering rule
+ * Initialize the filter block by committing IPV4 and IPV6 default rules
+ * Create empty routing table in system memory(no committing)
+ * Create a char-device for IPA
+ * Initialize IPA PM (power manager)
+ * Configure GSI registers (in GSI case)
+ */
+static int ipa3_pre_init(const struct ipa3_plat_drv_res *resource_p,
+		struct platform_device *ipa_pdev)
+{
+	int result = 0;
+	int i, j;
+	struct ipa3_rt_tbl_set *rset;
+	struct ipa_active_client_logging_info log_info;
+	struct cdev *cdev;
+
+	IPADBG("IPA Driver initialization started\n");
+
+	ipa3_ctx = kzalloc(sizeof(*ipa3_ctx), GFP_KERNEL);
+	if (!ipa3_ctx) {
+		result = -ENOMEM;
+		goto fail_mem_ctx;
+	}
+
+	ipa3_ctx->fw_load_data.state = IPA_FW_LOAD_STATE_INIT;
+	mutex_init(&ipa3_ctx->fw_load_data.lock);
+
+	ipa3_ctx->logbuf = ipc_log_context_create(IPA_IPC_LOG_PAGES, "ipa", 0);
+	if (ipa3_ctx->logbuf == NULL)
+		IPADBG("failed to create IPC log, continue...\n");
+
+	/* ipa3_ctx->pdev and ipa3_ctx->uc_pdev will be set in the smmu probes*/
+	ipa3_ctx->master_pdev = ipa_pdev;
+	for (i = 0; i < IPA_SMMU_CB_MAX; i++)
+		ipa3_ctx->s1_bypass_arr[i] = true;
+
+	/* initialize the gsi protocol info for uC debug stats */
+	for (i = 0; i < IPA_HW_PROTOCOL_MAX; i++) {
+		ipa3_ctx->gsi_info[i].protocol = i;
+		/* initialize all to be not started */
+		for (j = 0; j < IPA_MAX_CH_STATS_SUPPORTED; j++)
+			ipa3_ctx->gsi_info[i].ch_id_info[j].ch_id =
+				0xFF;
+	}
+
+	ipa3_ctx->ipa_wrapper_base = resource_p->ipa_mem_base;
+	ipa3_ctx->ipa_wrapper_size = resource_p->ipa_mem_size;
+	ipa3_ctx->ipa_hw_type = resource_p->ipa_hw_type;
+	ipa3_ctx->ipa3_hw_mode = resource_p->ipa3_hw_mode;
+	ipa3_ctx->platform_type = resource_p->platform_type;
+	ipa3_ctx->use_ipa_teth_bridge = resource_p->use_ipa_teth_bridge;
+	ipa3_ctx->modem_cfg_emb_pipe_flt = resource_p->modem_cfg_emb_pipe_flt;
+	ipa3_ctx->ipa_wdi2 = resource_p->ipa_wdi2;
+	ipa3_ctx->ipa_wdi2_over_gsi = resource_p->ipa_wdi2_over_gsi;
+	ipa3_ctx->ipa_wdi3_over_gsi = resource_p->ipa_wdi3_over_gsi;
+	ipa3_ctx->ipa_fltrt_not_hashable = resource_p->ipa_fltrt_not_hashable;
+	ipa3_ctx->use_64_bit_dma_mask = resource_p->use_64_bit_dma_mask;
+	ipa3_ctx->wan_rx_ring_size = resource_p->wan_rx_ring_size;
+	ipa3_ctx->lan_rx_ring_size = resource_p->lan_rx_ring_size;
+	ipa3_ctx->ipa_wan_skb_page = resource_p->ipa_wan_skb_page;
+	ipa3_ctx->stats.page_recycle_stats[0].total_replenished = 0;
+	ipa3_ctx->stats.page_recycle_stats[0].tmp_alloc = 0;
+	ipa3_ctx->stats.page_recycle_stats[1].total_replenished = 0;
+	ipa3_ctx->stats.page_recycle_stats[1].tmp_alloc = 0;
+	ipa3_ctx->skip_uc_pipe_reset = resource_p->skip_uc_pipe_reset;
+	ipa3_ctx->tethered_flow_control = resource_p->tethered_flow_control;
+	ipa3_ctx->ee = resource_p->ee;
+	ipa3_ctx->gsi_ch20_wa = resource_p->gsi_ch20_wa;
+	ipa3_ctx->wdi_over_pcie = resource_p->wdi_over_pcie;
+	ipa3_ctx->ipa3_active_clients_logging.log_rdy = false;
+	ipa3_ctx->ipa_config_is_mhi = resource_p->ipa_mhi_dynamic_config;
+	ipa3_ctx->mhi_evid_limits[0] = resource_p->mhi_evid_limits[0];
+	ipa3_ctx->mhi_evid_limits[1] = resource_p->mhi_evid_limits[1];
+	ipa3_ctx->entire_ipa_block_size = resource_p->entire_ipa_block_size;
+	ipa3_ctx->do_register_collection_on_crash =
+	    resource_p->do_register_collection_on_crash;
+	ipa3_ctx->do_testbus_collection_on_crash =
+	    resource_p->do_testbus_collection_on_crash;
+	ipa3_ctx->do_non_tn_collection_on_crash =
+	    resource_p->do_non_tn_collection_on_crash;
+	ipa3_ctx->secure_debug_check_action =
+		resource_p->secure_debug_check_action;
+	ipa3_ctx->do_ram_collection_on_crash =
+		resource_p->do_ram_collection_on_crash;
+	ipa3_ctx->lan_rx_napi_enable = resource_p->lan_rx_napi_enable;
+
+	if (ipa3_ctx->secure_debug_check_action == USE_SCM) {
+		if (ipa_is_mem_dump_allowed())
+			ipa3_ctx->sd_state = SD_ENABLED;
+		else
+			ipa3_ctx->sd_state = SD_DISABLED;
+	} else {
+		if (ipa3_ctx->secure_debug_check_action == OVERRIDE_SCM_TRUE)
+			ipa3_ctx->sd_state = SD_ENABLED;
+		else
+			/* secure_debug_check_action == OVERRIDE_SCM_FALSE */
+			ipa3_ctx->sd_state = SD_DISABLED;
+	}
+
+	if (ipa3_ctx->sd_state == SD_ENABLED) {
+		/* secure debug is enabled. */
+		IPADBG("secure debug enabled\n");
+	} else {
+		/* secure debug is disabled. */
+		IPADBG("secure debug disabled\n");
+		ipa3_ctx->do_testbus_collection_on_crash = false;
+	}
+	ipa3_ctx->ipa_endp_delay_wa = resource_p->ipa_endp_delay_wa;
+
+	WARN(ipa3_ctx->ipa3_hw_mode != IPA_HW_MODE_NORMAL,
+		"Non NORMAL IPA HW mode, is this emulation platform ?");
+
+	if (resource_p->ipa_tz_unlock_reg) {
+		ipa3_ctx->ipa_tz_unlock_reg_num =
+			resource_p->ipa_tz_unlock_reg_num;
+		ipa3_ctx->ipa_tz_unlock_reg = kcalloc(
+			ipa3_ctx->ipa_tz_unlock_reg_num,
+			sizeof(*ipa3_ctx->ipa_tz_unlock_reg),
+			GFP_KERNEL);
+		if (ipa3_ctx->ipa_tz_unlock_reg == NULL) {
+			result = -ENOMEM;
+			goto fail_tz_unlock_reg;
+		}
+		for (i = 0; i < ipa3_ctx->ipa_tz_unlock_reg_num; i++) {
+			ipa3_ctx->ipa_tz_unlock_reg[i].reg_addr =
+				resource_p->ipa_tz_unlock_reg[i].reg_addr;
+			ipa3_ctx->ipa_tz_unlock_reg[i].size =
+				resource_p->ipa_tz_unlock_reg[i].size;
+		}
+
+		/* unlock registers for uc */
+		result = ipa3_tz_unlock_reg(ipa3_ctx->ipa_tz_unlock_reg,
+					    ipa3_ctx->ipa_tz_unlock_reg_num);
+		if (result)
+			IPAERR("Failed to unlock memory region using TZ\n");
+	}
+
+	/* default aggregation parameters */
+	ipa3_ctx->aggregation_type = IPA_MBIM_16;
+	ipa3_ctx->aggregation_byte_limit = 1;
+	ipa3_ctx->aggregation_time_limit = 0;
+
+	/* configure interconnect parameters */
+	ipa3_ctx->icc_num_cases = resource_p->icc_num_cases;
+	ipa3_ctx->icc_num_paths = resource_p->icc_num_paths;
+	for (i = 0; i < ipa3_ctx->icc_num_cases; i++) {
+		for (j = 0; j < ipa3_ctx->icc_num_paths; j++) {
+			ipa3_ctx->icc_clk[i][j][IPA_ICC_AB] =
+			    resource_p->icc_clk_val[i][j*IPA_ICC_TYPE_MAX];
+			ipa3_ctx->icc_clk[i][j][IPA_ICC_IB] =
+			    resource_p->icc_clk_val[i][j*IPA_ICC_TYPE_MAX+1];
+		}
+	}
+
+	ipa3_ctx->ctrl = kzalloc(sizeof(*ipa3_ctx->ctrl), GFP_KERNEL);
+	if (!ipa3_ctx->ctrl) {
+		result = -ENOMEM;
+		goto fail_mem_ctrl;
+	}
+	result = ipa3_controller_static_bind(ipa3_ctx->ctrl,
+			ipa3_ctx->ipa_hw_type);
+	if (result) {
+		IPAERR("fail to static bind IPA ctrl\n");
+		result = -EFAULT;
+		goto fail_bind;
+	}
+
+	result = ipa3_init_mem_partition(ipa3_ctx->ipa_hw_type);
+	if (result) {
+		IPAERR(":ipa3_init_mem_partition failed\n");
+		result = -ENODEV;
+		goto fail_init_mem_partition;
+	}
+
+	if (ipa3_ctx->ipa3_hw_mode != IPA_HW_MODE_VIRTUAL &&
+	    ipa3_ctx->ipa3_hw_mode != IPA_HW_MODE_EMULATION) {
+		/* get BUS handle */
+		for (i = 0; i < ipa3_ctx->icc_num_paths; i++) {
+			ipa3_ctx->ctrl->icc_path[i] = of_icc_get(
+				&ipa3_ctx->master_pdev->dev,
+				resource_p->icc_path_name[i]);
+			if (IS_ERR(ipa3_ctx->ctrl->icc_path[i])) {
+				IPAERR("fail to register with bus mgr!\n");
+				result = PTR_ERR(ipa3_ctx->ctrl->icc_path[i]);
+				if (result != -EPROBE_DEFER) {
+					IPAERR("Failed to get path %s\n",
+						ipa3_ctx->master_pdev->name);
+				}
+				goto fail_bus_reg;
+			}
+		}
+	}
+
+	/* get IPA clocks */
+	result = ipa3_get_clks(&ipa3_ctx->master_pdev->dev);
+	if (result)
+		goto fail_bus_reg;
+
+	/* init active_clients_log after getting ipa-clk */
+	result = ipa3_active_clients_log_init();
+	if (result)
+		goto fail_init_active_client;
+
+	/* Enable ipa3_ctx->enable_clock_scaling */
+	ipa3_ctx->enable_clock_scaling = 1;
+	/* vote for svs2 on bootup */
+	ipa3_ctx->curr_ipa_clk_rate = ipa3_ctx->ctrl->ipa_clk_rate_svs2;
+
+	/* Enable ipa3_ctx->enable_napi_chain */
+	ipa3_ctx->enable_napi_chain = 1;
+
+	/* assume clock is on in virtual/emulation mode */
+	if (ipa3_ctx->ipa3_hw_mode == IPA_HW_MODE_VIRTUAL ||
+	    ipa3_ctx->ipa3_hw_mode == IPA_HW_MODE_EMULATION)
+		atomic_set(&ipa3_ctx->ipa_clk_vote, 1);
+
+	/* enable IPA clocks explicitly to allow the initialization */
+	ipa3_enable_clks();
+
+	/* setup IPA register access */
+	IPADBG("Mapping 0x%x\n", resource_p->ipa_mem_base +
+		ipa3_ctx->ctrl->ipa_reg_base_ofst);
+	ipa3_ctx->mmio = ioremap(resource_p->ipa_mem_base +
+			ipa3_ctx->ctrl->ipa_reg_base_ofst,
+			resource_p->ipa_mem_size);
+	if (!ipa3_ctx->mmio) {
+		IPAERR(":ipa-base ioremap err\n");
+		result = -EFAULT;
+		goto fail_remap;
+	}
+
+	IPADBG(
+	    "base(0x%x)+offset(0x%x)=(0x%x) mapped to (%pK) with len (0x%x)\n",
+	    resource_p->ipa_mem_base,
+	    ipa3_ctx->ctrl->ipa_reg_base_ofst,
+	    resource_p->ipa_mem_base + ipa3_ctx->ctrl->ipa_reg_base_ofst,
+	    ipa3_ctx->mmio,
+	    resource_p->ipa_mem_size);
+
+	/*
+	 * Setup access for register collection/dump on crash
+	 */
+	if (ipa_reg_save_init(IPA_MEM_INIT_VAL) != 0) {
+		result = -EFAULT;
+		goto fail_gsi_map;
+	}
+
+	/*
+	 * Since we now know where the transport's registers live,
+	 * let's set up access to them.  This is done since subseqent
+	 * functions, that deal with the transport, require the
+	 * access.
+	 */
+	if (gsi_map_base(
+		ipa3_res.transport_mem_base,
+		ipa3_res.transport_mem_size) != 0) {
+		IPAERR("Allocation of gsi base failed\n");
+		result = -EFAULT;
+		goto fail_gsi_map;
+	}
+
+	mutex_init(&ipa3_ctx->ipa3_active_clients.mutex);
+
+	IPA_ACTIVE_CLIENTS_PREP_SPECIAL(log_info, "PROXY_CLK_VOTE");
+	ipa3_active_clients_log_inc(&log_info, false);
+	ipa3_ctx->q6_proxy_clk_vote_valid = true;
+	ipa3_ctx->q6_proxy_clk_vote_cnt = 1;
+
+	/*Updating the proxy vote cnt 1 */
+	atomic_set(&ipa3_ctx->ipa3_active_clients.cnt, 1);
+
+	/* Create workqueues for power management */
+	ipa3_ctx->power_mgmt_wq =
+		create_singlethread_workqueue("ipa_power_mgmt");
+	if (!ipa3_ctx->power_mgmt_wq) {
+		IPAERR("failed to create power mgmt wq\n");
+		result = -ENOMEM;
+		goto fail_init_hw;
+	}
+
+	ipa3_ctx->transport_power_mgmt_wq =
+		create_singlethread_workqueue("transport_power_mgmt");
+	if (!ipa3_ctx->transport_power_mgmt_wq) {
+		IPAERR("failed to create transport power mgmt wq\n");
+		result = -ENOMEM;
+		goto fail_create_transport_wq;
+	}
+
+	mutex_init(&ipa3_ctx->transport_pm.transport_pm_mutex);
+
+	/* init the lookaside cache */
+	ipa3_ctx->flt_rule_cache = kmem_cache_create("IPA_FLT",
+			sizeof(struct ipa3_flt_entry), 0, 0, NULL);
+	if (!ipa3_ctx->flt_rule_cache) {
+		IPAERR(":ipa flt cache create failed\n");
+		result = -ENOMEM;
+		goto fail_flt_rule_cache;
+	}
+	ipa3_ctx->rt_rule_cache = kmem_cache_create("IPA_RT",
+			sizeof(struct ipa3_rt_entry), 0, 0, NULL);
+	if (!ipa3_ctx->rt_rule_cache) {
+		IPAERR(":ipa rt cache create failed\n");
+		result = -ENOMEM;
+		goto fail_rt_rule_cache;
+	}
+	ipa3_ctx->hdr_cache = kmem_cache_create("IPA_HDR",
+			sizeof(struct ipa3_hdr_entry), 0, 0, NULL);
+	if (!ipa3_ctx->hdr_cache) {
+		IPAERR(":ipa hdr cache create failed\n");
+		result = -ENOMEM;
+		goto fail_hdr_cache;
+	}
+	ipa3_ctx->hdr_offset_cache =
+	   kmem_cache_create("IPA_HDR_OFFSET",
+			   sizeof(struct ipa_hdr_offset_entry), 0, 0, NULL);
+	if (!ipa3_ctx->hdr_offset_cache) {
+		IPAERR(":ipa hdr off cache create failed\n");
+		result = -ENOMEM;
+		goto fail_hdr_offset_cache;
+	}
+	ipa3_ctx->hdr_proc_ctx_cache = kmem_cache_create("IPA_HDR_PROC_CTX",
+		sizeof(struct ipa3_hdr_proc_ctx_entry), 0, 0, NULL);
+	if (!ipa3_ctx->hdr_proc_ctx_cache) {
+		IPAERR(":ipa hdr proc ctx cache create failed\n");
+		result = -ENOMEM;
+		goto fail_hdr_proc_ctx_cache;
+	}
+	ipa3_ctx->hdr_proc_ctx_offset_cache =
+		kmem_cache_create("IPA_HDR_PROC_CTX_OFFSET",
+		sizeof(struct ipa3_hdr_proc_ctx_offset_entry), 0, 0, NULL);
+	if (!ipa3_ctx->hdr_proc_ctx_offset_cache) {
+		IPAERR(":ipa hdr proc ctx off cache create failed\n");
+		result = -ENOMEM;
+		goto fail_hdr_proc_ctx_offset_cache;
+	}
+	ipa3_ctx->rt_tbl_cache = kmem_cache_create("IPA_RT_TBL",
+			sizeof(struct ipa3_rt_tbl), 0, 0, NULL);
+	if (!ipa3_ctx->rt_tbl_cache) {
+		IPAERR(":ipa rt tbl cache create failed\n");
+		result = -ENOMEM;
+		goto fail_rt_tbl_cache;
+	}
+	ipa3_ctx->tx_pkt_wrapper_cache =
+	   kmem_cache_create("IPA_TX_PKT_WRAPPER",
+			   sizeof(struct ipa3_tx_pkt_wrapper), 0, 0, NULL);
+	if (!ipa3_ctx->tx_pkt_wrapper_cache) {
+		IPAERR(":ipa tx pkt wrapper cache create failed\n");
+		result = -ENOMEM;
+		goto fail_tx_pkt_wrapper_cache;
+	}
+	ipa3_ctx->rx_pkt_wrapper_cache =
+	   kmem_cache_create("IPA_RX_PKT_WRAPPER",
+			   sizeof(struct ipa3_rx_pkt_wrapper), 0, 0, NULL);
+	if (!ipa3_ctx->rx_pkt_wrapper_cache) {
+		IPAERR(":ipa rx pkt wrapper cache create failed\n");
+		result = -ENOMEM;
+		goto fail_rx_pkt_wrapper_cache;
+	}
+
+	/* init the various list heads */
+	INIT_LIST_HEAD(&ipa3_ctx->hdr_tbl.head_hdr_entry_list);
+	for (i = 0; i < IPA_HDR_BIN_MAX; i++) {
+		INIT_LIST_HEAD(&ipa3_ctx->hdr_tbl.head_offset_list[i]);
+		INIT_LIST_HEAD(&ipa3_ctx->hdr_tbl.head_free_offset_list[i]);
+	}
+	INIT_LIST_HEAD(&ipa3_ctx->hdr_proc_ctx_tbl.head_proc_ctx_entry_list);
+	for (i = 0; i < IPA_HDR_PROC_CTX_BIN_MAX; i++) {
+		INIT_LIST_HEAD(
+			&ipa3_ctx->hdr_proc_ctx_tbl.head_offset_list[i]);
+		INIT_LIST_HEAD(
+			&ipa3_ctx->hdr_proc_ctx_tbl.head_free_offset_list[i]);
+	}
+	INIT_LIST_HEAD(&ipa3_ctx->rt_tbl_set[IPA_IP_v4].head_rt_tbl_list);
+	idr_init(&ipa3_ctx->rt_tbl_set[IPA_IP_v4].rule_ids);
+	INIT_LIST_HEAD(&ipa3_ctx->rt_tbl_set[IPA_IP_v6].head_rt_tbl_list);
+	idr_init(&ipa3_ctx->rt_tbl_set[IPA_IP_v6].rule_ids);
+
+	rset = &ipa3_ctx->reap_rt_tbl_set[IPA_IP_v4];
+	INIT_LIST_HEAD(&rset->head_rt_tbl_list);
+	idr_init(&rset->rule_ids);
+	rset = &ipa3_ctx->reap_rt_tbl_set[IPA_IP_v6];
+	INIT_LIST_HEAD(&rset->head_rt_tbl_list);
+	idr_init(&rset->rule_ids);
+	idr_init(&ipa3_ctx->flt_rt_counters.hdl);
+	spin_lock_init(&ipa3_ctx->flt_rt_counters.hdl_lock);
+	memset(&ipa3_ctx->flt_rt_counters.used_hw, 0,
+		   sizeof(ipa3_ctx->flt_rt_counters.used_hw));
+	memset(&ipa3_ctx->flt_rt_counters.used_sw, 0,
+		   sizeof(ipa3_ctx->flt_rt_counters.used_sw));
+
+	INIT_LIST_HEAD(&ipa3_ctx->intf_list);
+	INIT_LIST_HEAD(&ipa3_ctx->msg_list);
+	INIT_LIST_HEAD(&ipa3_ctx->pull_msg_list);
+	init_waitqueue_head(&ipa3_ctx->msg_waitq);
+	mutex_init(&ipa3_ctx->msg_lock);
+
+	/* store wlan client-connect-msg-list */
+	INIT_LIST_HEAD(&ipa3_ctx->msg_wlan_client_list);
+	mutex_init(&ipa3_ctx->msg_wlan_client_lock);
+
+	mutex_init(&ipa3_ctx->lock);
+	mutex_init(&ipa3_ctx->q6_proxy_clk_vote_mutex);
+	mutex_init(&ipa3_ctx->ipa_cne_evt_lock);
+
+	idr_init(&ipa3_ctx->ipa_idr);
+	spin_lock_init(&ipa3_ctx->idr_lock);
+
+	/* wlan related member */
+	memset(&ipa3_ctx->wc_memb, 0, sizeof(ipa3_ctx->wc_memb));
+	spin_lock_init(&ipa3_ctx->wc_memb.wlan_spinlock);
+	spin_lock_init(&ipa3_ctx->wc_memb.ipa_tx_mul_spinlock);
+	INIT_LIST_HEAD(&ipa3_ctx->wc_memb.wlan_comm_desc_list);
+
+	ipa3_ctx->cdev.class = class_create(THIS_MODULE, DRV_NAME);
+
+	result = alloc_chrdev_region(&ipa3_ctx->cdev.dev_num, 0, 1, DRV_NAME);
+	if (result) {
+		IPAERR("alloc_chrdev_region err\n");
+		result = -ENODEV;
+		goto fail_alloc_chrdev_region;
+	}
+
+	ipa3_ctx->cdev.dev = device_create(ipa3_ctx->cdev.class, NULL,
+		 ipa3_ctx->cdev.dev_num, ipa3_ctx, DRV_NAME);
+	if (IS_ERR(ipa3_ctx->cdev.dev)) {
+		IPAERR(":device_create err.\n");
+		result = -ENODEV;
+		goto fail_device_create;
+	}
+
+	/* Register a wakeup source. */
+	ipa3_ctx->w_lock =
+		wakeup_source_register(&ipa_pdev->dev, "IPA_WS");
+	if (!ipa3_ctx->w_lock) {
+		IPAERR("IPA wakeup source register failed\n");
+		result = -ENOMEM;
+		goto fail_w_source_register;
+	}
+	spin_lock_init(&ipa3_ctx->wakelock_ref_cnt.spinlock);
+
+	/* Initialize Power Management framework */
+	result = ipa_pm_init(&ipa3_res.pm_init);
+	if (result) {
+		IPAERR("IPA PM initialization failed (%d)\n", -result);
+		result = -ENODEV;
+		goto fail_ipa_pm_init;
+	}
+	IPADBG("IPA power manager initialized\n");
+
+	INIT_LIST_HEAD(&ipa3_ctx->ipa_ready_cb_list);
+
+	init_completion(&ipa3_ctx->init_completion_obj);
+	init_completion(&ipa3_ctx->uc_loaded_completion_obj);
+
+	result = ipa3_dma_setup();
+	if (result) {
+		IPAERR("Failed to setup IPA DMA\n");
+		result = -ENODEV;
+		goto fail_ipa_dma_setup;
+	}
+
+	/*
+	 * We can't register the GSI driver yet, as it expects
+	 * the GSI FW to be up and running before the registration.
+	 *
+	 * For IPA3.0 and the emulation system, the GSI configuration
+	 * is done by the GSI driver.
+	 *
+	 * For IPA3.1 (and on), the GSI configuration is done by TZ.
+	 */
+	if (ipa3_ctx->ipa_hw_type == IPA_HW_v3_0 ||
+	    ipa3_ctx->ipa3_hw_mode == IPA_HW_MODE_EMULATION) {
+		result = ipa3_gsi_pre_fw_load_init();
+		if (result) {
+			IPAERR("gsi pre FW loading config failed\n");
+			result = -ENODEV;
+			goto fail_gsi_pre_fw_load_init;
+		}
+	}
+
+	cdev = &ipa3_ctx->cdev.cdev;
+	cdev_init(cdev, &ipa3_drv_fops);
+	cdev->owner = THIS_MODULE;
+	cdev->ops = &ipa3_drv_fops;  /* from LDD3 */
+
+	result = cdev_add(cdev, ipa3_ctx->cdev.dev_num, 1);
+	if (result) {
+		IPAERR(":cdev_add err=%d\n", -result);
+		result = -ENODEV;
+		goto fail_cdev_add;
+	}
+	IPADBG("ipa cdev added successful. major:%d minor:%d\n",
+			MAJOR(ipa3_ctx->cdev.dev_num),
+			MINOR(ipa3_ctx->cdev.dev_num));
+
+	if (ipa3_ctx->ipa_hw_type >= IPA_HW_v4_1) {
+		result = ipa_odl_init();
+		if (result) {
+			IPADBG("Error: ODL init fialed\n");
+			result = -ENODEV;
+			goto fail_cdev_add;
+		}
+	}
+
+	/*
+	 * for IPA 4.0 offline charge is not needed and we need to prevent
+	 * power collapse until IPA uC is loaded.
+	 */
+
+	/* proxy vote for modem is added in ipa3_post_init() phase */
+	if (ipa3_ctx->ipa_hw_type != IPA_HW_v4_0)
+		ipa3_proxy_clk_unvote();
+
+	/* Create the dummy netdev for LAN RX NAPI*/
+	ipa3_enable_napi_netdev();
+
+	ipa3_wwan_init();
+
+	mutex_init(&ipa3_ctx->app_clock_vote.mutex);
+
+	return 0;
+
+fail_cdev_add:
+fail_gsi_pre_fw_load_init:
+	ipa3_dma_shutdown();
+fail_ipa_dma_setup:
+	ipa_pm_destroy();
+fail_w_source_register:
+	device_destroy(ipa3_ctx->cdev.class, ipa3_ctx->cdev.dev_num);
+fail_ipa_pm_init:
+	wakeup_source_unregister(ipa3_ctx->w_lock);
+	ipa3_ctx->w_lock = NULL;
+fail_device_create:
+	unregister_chrdev_region(ipa3_ctx->cdev.dev_num, 1);
+fail_alloc_chrdev_region:
+	idr_destroy(&ipa3_ctx->ipa_idr);
+	rset = &ipa3_ctx->reap_rt_tbl_set[IPA_IP_v6];
+	idr_destroy(&rset->rule_ids);
+	rset = &ipa3_ctx->reap_rt_tbl_set[IPA_IP_v4];
+	idr_destroy(&rset->rule_ids);
+	idr_destroy(&ipa3_ctx->rt_tbl_set[IPA_IP_v6].rule_ids);
+	idr_destroy(&ipa3_ctx->rt_tbl_set[IPA_IP_v4].rule_ids);
+	kmem_cache_destroy(ipa3_ctx->rx_pkt_wrapper_cache);
+fail_rx_pkt_wrapper_cache:
+	kmem_cache_destroy(ipa3_ctx->tx_pkt_wrapper_cache);
+fail_tx_pkt_wrapper_cache:
+	kmem_cache_destroy(ipa3_ctx->rt_tbl_cache);
+fail_rt_tbl_cache:
+	kmem_cache_destroy(ipa3_ctx->hdr_proc_ctx_offset_cache);
+fail_hdr_proc_ctx_offset_cache:
+	kmem_cache_destroy(ipa3_ctx->hdr_proc_ctx_cache);
+fail_hdr_proc_ctx_cache:
+	kmem_cache_destroy(ipa3_ctx->hdr_offset_cache);
+fail_hdr_offset_cache:
+	kmem_cache_destroy(ipa3_ctx->hdr_cache);
+fail_hdr_cache:
+	kmem_cache_destroy(ipa3_ctx->rt_rule_cache);
+fail_rt_rule_cache:
+	kmem_cache_destroy(ipa3_ctx->flt_rule_cache);
+fail_flt_rule_cache:
+	destroy_workqueue(ipa3_ctx->transport_power_mgmt_wq);
+fail_create_transport_wq:
+	destroy_workqueue(ipa3_ctx->power_mgmt_wq);
+fail_init_hw:
+	gsi_unmap_base();
+fail_gsi_map:
+	if (ipa3_ctx->reg_collection_base)
+		iounmap(ipa3_ctx->reg_collection_base);
+	iounmap(ipa3_ctx->mmio);
+fail_remap:
+	ipa3_disable_clks();
+	ipa3_active_clients_log_destroy();
+fail_init_active_client:
+	if (ipa3_clk)
+		clk_put(ipa3_clk);
+	ipa3_clk = NULL;
+fail_bus_reg:
+	for (i = 0; i < ipa3_ctx->icc_num_paths; i++)
+		if (ipa3_ctx->ctrl->icc_path[i]) {
+			icc_put(ipa3_ctx->ctrl->icc_path[i]);
+			ipa3_ctx->ctrl->icc_path[i] = NULL;
+		}
+fail_init_mem_partition:
+fail_bind:
+	kfree(ipa3_ctx->ctrl);
+fail_mem_ctrl:
+	kfree(ipa3_ctx->ipa_tz_unlock_reg);
+fail_tz_unlock_reg:
+	if (ipa3_ctx->logbuf)
+		ipc_log_context_destroy(ipa3_ctx->logbuf);
+	kfree(ipa3_ctx);
+	ipa3_ctx = NULL;
+fail_mem_ctx:
+	return result;
+}
+
+static int get_ipa_dts_pm_info(struct platform_device *pdev,
+	struct ipa3_plat_drv_res *ipa_drv_res)
+{
+	int result;
+	int i, j;
+
+	/* this interconnects entry must be presented */
+	if (!of_find_property(pdev->dev.of_node,
+			"interconnects", NULL)) {
+		IPAERR("No interconnect info\n");
+		return -EFAULT;
+	}
+
+	result = of_property_read_u32(pdev->dev.of_node,
+		"qcom,interconnect,num-cases",
+		&ipa_drv_res->icc_num_cases);
+	/* No vote is ignored */
+	ipa_drv_res->pm_init.threshold_size =
+		ipa_drv_res->icc_num_cases - 2;
+	if (result || ipa_drv_res->pm_init.threshold_size >
+		IPA_PM_THRESHOLD_MAX) {
+		IPAERR("invalid qcom,interconnect,num-cases %d\n",
+			ipa_drv_res->pm_init.threshold_size);
+		return -EFAULT;
+	}
+
+	result = of_property_read_u32(pdev->dev.of_node,
+		"qcom,interconnect,num-paths",
+		&ipa_drv_res->icc_num_paths);
+	if (result || ipa_drv_res->icc_num_paths >
+		IPA_ICC_PATH_MAX) {
+		IPAERR("invalid qcom,interconnect,num-paths %d\n",
+			ipa_drv_res->icc_num_paths);
+		return -EFAULT;
+	}
+
+	for (i = 0; i < ipa_drv_res->icc_num_paths; i++) {
+		result = of_property_read_string_index(pdev->dev.of_node,
+			"interconnect-names",
+			i,
+			&ipa_drv_res->icc_path_name[i]);
+		if (result) {
+			IPAERR("invalid interconnect-names %d\n", i);
+			return -EFAULT;
+		}
+	}
+	/* read no-vote AB IB value */
+	result = of_property_read_u32_array(pdev->dev.of_node,
+			"qcom,no-vote",
+			ipa_drv_res->icc_clk_val[IPA_ICC_NONE],
+			ipa_drv_res->icc_num_paths *
+			IPA_ICC_TYPE_MAX);
+	if (result) {
+		IPAERR("invalid property qcom,no-vote\n");
+		return -EFAULT;
+	}
+
+	/* read svs2 AB IB value */
+	result = of_property_read_u32_array(pdev->dev.of_node,
+			"qcom,svs2",
+			ipa_drv_res->icc_clk_val[IPA_ICC_SVS2],
+			ipa_drv_res->icc_num_paths *
+			IPA_ICC_TYPE_MAX);
+	if (result) {
+		IPAERR("invalid property qcom,svs2\n");
+		return -EFAULT;
+	}
+
+	/* read svs AB IB value */
+	result = of_property_read_u32_array(pdev->dev.of_node,
+			"qcom,svs",
+			ipa_drv_res->icc_clk_val[IPA_ICC_SVS],
+			ipa_drv_res->icc_num_paths *
+			IPA_ICC_TYPE_MAX);
+	if (result) {
+		IPAERR("invalid property qcom,svs\n");
+		return -EFAULT;
+	}
+
+	/* read nominal AB IB value */
+	result = of_property_read_u32_array(pdev->dev.of_node,
+			"qcom,nominal",
+			ipa_drv_res->icc_clk_val[IPA_ICC_NOMINAL],
+			ipa_drv_res->icc_num_paths *
+			IPA_ICC_TYPE_MAX);
+	if (result) {
+		IPAERR("invalid property qcom,nominal\n");
+		return -EFAULT;
+	}
+
+	/* read turbo AB IB value */
+	result = of_property_read_u32_array(pdev->dev.of_node,
+			"qcom,turbo",
+			ipa_drv_res->icc_clk_val[IPA_ICC_TURBO],
+			ipa_drv_res->icc_num_paths *
+			IPA_ICC_TYPE_MAX);
+	if (result) {
+		IPAERR("invalid property qcom,turbo\n");
+		return -EFAULT;
+	}
+
+	result = of_property_read_u32_array(pdev->dev.of_node,
+		"qcom,throughput-threshold",
+		ipa_drv_res->pm_init.default_threshold,
+		ipa_drv_res->pm_init.threshold_size);
+	if (result) {
+		IPAERR("failed to read qcom,throughput-thresholds\n");
+		return -EFAULT;
+	}
+
+	result = of_property_count_strings(pdev->dev.of_node,
+		"qcom,scaling-exceptions");
+	if (result < 0) {
+		IPADBG("no exception list for ipa pm\n");
+		result = 0;
+	}
+
+	if (result % (ipa_drv_res->pm_init.threshold_size + 1)) {
+		IPAERR("failed to read qcom,scaling-exceptions\n");
+		return -EFAULT;
+	}
+
+	ipa_drv_res->pm_init.exception_size = result /
+		(ipa_drv_res->pm_init.threshold_size + 1);
+	if (ipa_drv_res->pm_init.exception_size >=
+		IPA_PM_EXCEPTION_MAX) {
+		IPAERR("exception list larger then max %d\n",
+			ipa_drv_res->pm_init.exception_size);
+		return -EFAULT;
+	}
+
+	for (i = 0; i < ipa_drv_res->pm_init.exception_size; i++) {
+		struct ipa_pm_exception *ex = ipa_drv_res->pm_init.exceptions;
+
+		result = of_property_read_string_index(pdev->dev.of_node,
+			"qcom,scaling-exceptions",
+			i * (ipa_drv_res->pm_init.threshold_size + 1),
+			&ex[i].usecase);
+		if (result) {
+			IPAERR("failed to read qcom,scaling-exceptions");
+			return -EFAULT;
+		}
+
+		for (j = 0; j < ipa_drv_res->pm_init.threshold_size; j++) {
+			const char *str;
+
+			result = of_property_read_string_index(
+				pdev->dev.of_node,
+				"qcom,scaling-exceptions",
+				i * (ipa_drv_res->pm_init.threshold_size + 1)
+				+ j + 1,
+				&str);
+			if (result) {
+				IPAERR("failed to read qcom,scaling-exceptions"
+					);
+				return -EFAULT;
+			}
+
+			if (kstrtou32(str, 0, &ex[i].threshold[j])) {
+				IPAERR("error str=%s\n", str);
+				return -EFAULT;
+			}
+		}
+	}
+
+	return 0;
+}
+
+static int get_ipa_dts_configuration(struct platform_device *pdev,
+		struct ipa3_plat_drv_res *ipa_drv_res)
+{
+	int i, result, pos;
+	struct resource *resource;
+	u32 *ipa_tz_unlock_reg;
+	int elem_num;
+	u32 mhi_evid_limits[2];
+
+	/* initialize ipa3_res */
+	ipa_drv_res->ipa_pipe_mem_start_ofst = IPA_PIPE_MEM_START_OFST;
+	ipa_drv_res->ipa_pipe_mem_size = IPA_PIPE_MEM_SIZE;
+	ipa_drv_res->ipa_hw_type = 0;
+	ipa_drv_res->ipa3_hw_mode = 0;
+	ipa_drv_res->platform_type = 0;
+	ipa_drv_res->modem_cfg_emb_pipe_flt = false;
+	ipa_drv_res->ipa_wdi2 = false;
+	ipa_drv_res->ipa_wan_skb_page = false;
+	ipa_drv_res->ipa_wdi2_over_gsi = false;
+	ipa_drv_res->ipa_wdi3_over_gsi = false;
+	ipa_drv_res->ipa_mhi_dynamic_config = false;
+	ipa_drv_res->use_64_bit_dma_mask = false;
+	ipa_drv_res->use_bw_vote = false;
+	ipa_drv_res->wan_rx_ring_size = IPA_GENERIC_RX_POOL_SZ;
+	ipa_drv_res->lan_rx_ring_size = IPA_GENERIC_RX_POOL_SZ;
+	ipa_drv_res->apply_rg10_wa = false;
+	ipa_drv_res->gsi_ch20_wa = false;
+	ipa_drv_res->ipa_tz_unlock_reg_num = 0;
+	ipa_drv_res->ipa_tz_unlock_reg = NULL;
+	ipa_drv_res->mhi_evid_limits[0] = IPA_MHI_GSI_EVENT_RING_ID_START;
+	ipa_drv_res->mhi_evid_limits[1] = IPA_MHI_GSI_EVENT_RING_ID_END;
+	ipa_drv_res->ipa_fltrt_not_hashable = false;
+	ipa_drv_res->ipa_endp_delay_wa = false;
+	ipa_drv_res->skip_ieob_mask_wa = false;
+
+	/* Get IPA HW Version */
+	result = of_property_read_u32(pdev->dev.of_node, "qcom,ipa-hw-ver",
+					&ipa_drv_res->ipa_hw_type);
+	if ((result) || (ipa_drv_res->ipa_hw_type == 0)) {
+		IPAERR(":get resource failed for ipa-hw-ver\n");
+		return -ENODEV;
+	}
+	IPADBG(": ipa_hw_type = %d", ipa_drv_res->ipa_hw_type);
+
+	if (ipa_drv_res->ipa_hw_type < IPA_HW_v3_0) {
+		IPAERR(":IPA version below 3.0 not supported\n");
+		return -ENODEV;
+	}
+
+	if (ipa_drv_res->ipa_hw_type >= IPA_HW_MAX) {
+		IPAERR(":IPA version is greater than the MAX\n");
+		return -ENODEV;
+	}
+
+	/* Get IPA HW mode */
+	result = of_property_read_u32(pdev->dev.of_node, "qcom,ipa-hw-mode",
+			&ipa_drv_res->ipa3_hw_mode);
+	if (result)
+		IPADBG("using default (IPA_MODE_NORMAL) for ipa-hw-mode\n");
+	else
+		IPADBG(": found ipa_drv_res->ipa3_hw_mode = %d",
+				ipa_drv_res->ipa3_hw_mode);
+
+	/* Get Platform Type */
+	result = of_property_read_u32(pdev->dev.of_node, "qcom,platform-type",
+			&ipa_drv_res->platform_type);
+	if (result)
+		IPADBG("using default (IPA_PLAT_TYPE_MDM) for platform-type\n");
+	else
+		IPADBG(": found ipa_drv_res->platform_type = %d",
+				ipa_drv_res->platform_type);
+
+	/* Get IPA WAN / LAN RX pool size */
+	result = of_property_read_u32(pdev->dev.of_node,
+			"qcom,wan-rx-ring-size",
+			&ipa_drv_res->wan_rx_ring_size);
+	if (result)
+		IPADBG("using default for wan-rx-ring-size = %u\n",
+				ipa_drv_res->wan_rx_ring_size);
+	else
+		IPADBG(": found ipa_drv_res->wan-rx-ring-size = %u",
+				ipa_drv_res->wan_rx_ring_size);
+
+	result = of_property_read_u32(pdev->dev.of_node,
+			"qcom,lan-rx-ring-size",
+			&ipa_drv_res->lan_rx_ring_size);
+	if (result)
+		IPADBG("using default for lan-rx-ring-size = %u\n",
+			ipa_drv_res->lan_rx_ring_size);
+	else
+		IPADBG(": found ipa_drv_res->lan-rx-ring-size = %u",
+			ipa_drv_res->lan_rx_ring_size);
+
+	ipa_drv_res->use_ipa_teth_bridge =
+			of_property_read_bool(pdev->dev.of_node,
+			"qcom,use-ipa-tethering-bridge");
+	IPADBG(": using ipa teth bridge = %s",
+		ipa_drv_res->use_ipa_teth_bridge
+		? "True" : "False");
+
+	ipa_drv_res->ipa_mhi_dynamic_config =
+			of_property_read_bool(pdev->dev.of_node,
+			"qcom,use-ipa-in-mhi-mode");
+	IPADBG(": ipa_mhi_dynamic_config (%s)\n",
+		ipa_drv_res->ipa_mhi_dynamic_config
+		? "True" : "False");
+
+	ipa_drv_res->modem_cfg_emb_pipe_flt =
+			of_property_read_bool(pdev->dev.of_node,
+			"qcom,modem-cfg-emb-pipe-flt");
+	IPADBG(": modem configure embedded pipe filtering = %s\n",
+			ipa_drv_res->modem_cfg_emb_pipe_flt
+			? "True" : "False");
+	ipa_drv_res->ipa_wdi2_over_gsi =
+			of_property_read_bool(pdev->dev.of_node,
+			"qcom,ipa-wdi2_over_gsi");
+	IPADBG(": WDI-2.0 over gsi= %s\n",
+			ipa_drv_res->ipa_wdi2_over_gsi
+			? "True" : "False");
+	ipa_drv_res->ipa_endp_delay_wa =
+			of_property_read_bool(pdev->dev.of_node,
+			"qcom,ipa-endp-delay-wa");
+	IPADBG(": endppoint delay wa = %s\n",
+			ipa_drv_res->ipa_endp_delay_wa
+			? "True" : "False");
+
+	ipa_drv_res->ipa_wdi3_over_gsi =
+			of_property_read_bool(pdev->dev.of_node,
+			"qcom,ipa-wdi3-over-gsi");
+	IPADBG(": WDI-3.0 over gsi= %s\n",
+			ipa_drv_res->ipa_wdi3_over_gsi
+			? "True" : "False");
+
+	ipa_drv_res->ipa_wdi2 =
+			of_property_read_bool(pdev->dev.of_node,
+			"qcom,ipa-wdi2");
+	IPADBG(": WDI-2.0 = %s\n",
+			ipa_drv_res->ipa_wdi2
+			? "True" : "False");
+
+	ipa_drv_res->ipa_wan_skb_page =
+			of_property_read_bool(pdev->dev.of_node,
+			"qcom,wan-use-skb-page");
+	IPADBG(": Use skb page = %s\n",
+			ipa_drv_res->ipa_wan_skb_page
+			? "True" : "False");
+
+	ipa_drv_res->ipa_fltrt_not_hashable =
+			of_property_read_bool(pdev->dev.of_node,
+			"qcom,ipa-fltrt-not-hashable");
+	IPADBG(": IPA filter/route rule hashable = %s\n",
+			ipa_drv_res->ipa_fltrt_not_hashable
+			? "True" : "False");
+
+	ipa_drv_res->use_64_bit_dma_mask =
+			of_property_read_bool(pdev->dev.of_node,
+			"qcom,use-64-bit-dma-mask");
+	IPADBG(": use_64_bit_dma_mask = %s\n",
+			ipa_drv_res->use_64_bit_dma_mask
+			? "True" : "False");
+
+	ipa_drv_res->use_bw_vote =
+			of_property_read_bool(pdev->dev.of_node,
+			"qcom,bandwidth-vote-for-ipa");
+	IPADBG(": use_bw_vote = %s\n",
+			ipa_drv_res->use_bw_vote
+			? "True" : "False");
+	ipa_drv_res->skip_ieob_mask_wa =
+			of_property_read_bool(pdev->dev.of_node,
+			"qcom,skip-ieob-mask-wa");
+	IPADBG(": skip ieob mask wa = %s\n",
+			ipa_drv_res->skip_ieob_mask_wa
+			? "True" : "False");
+
+	ipa_drv_res->skip_uc_pipe_reset =
+		of_property_read_bool(pdev->dev.of_node,
+		"qcom,skip-uc-pipe-reset");
+	IPADBG(": skip uC pipe reset = %s\n",
+		ipa_drv_res->skip_uc_pipe_reset
+		? "True" : "False");
+
+	ipa_drv_res->tethered_flow_control =
+		of_property_read_bool(pdev->dev.of_node,
+		"qcom,tethered-flow-control");
+	IPADBG(": Use apps based flow control = %s\n",
+		ipa_drv_res->tethered_flow_control
+		? "True" : "False");
+
+	ipa_drv_res->lan_rx_napi_enable =
+		of_property_read_bool(pdev->dev.of_node,
+		"qcom,lan-rx-napi");
+	IPADBG(": Enable LAN rx NAPI = %s\n",
+		ipa_drv_res->lan_rx_napi_enable
+		? "True" : "False");
+
+	/* Get IPA wrapper address */
+	resource = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+			"ipa-base");
+	if (!resource) {
+		IPAERR(":get resource failed for ipa-base!\n");
+		return -ENODEV;
+	}
+	ipa_drv_res->ipa_mem_base = resource->start;
+	ipa_drv_res->ipa_mem_size = resource_size(resource);
+	IPADBG(": ipa-base = 0x%x, size = 0x%x\n",
+			ipa_drv_res->ipa_mem_base,
+			ipa_drv_res->ipa_mem_size);
+
+	smmu_info.ipa_base = ipa_drv_res->ipa_mem_base;
+	smmu_info.ipa_size = ipa_drv_res->ipa_mem_size;
+
+	/* Get IPA GSI address */
+	resource = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+			"gsi-base");
+	if (!resource) {
+		IPAERR(":get resource failed for gsi-base\n");
+		return -ENODEV;
+	}
+	ipa_drv_res->transport_mem_base = resource->start;
+	ipa_drv_res->transport_mem_size = resource_size(resource);
+	IPADBG(": gsi-base = 0x%x, size = 0x%x\n",
+			ipa_drv_res->transport_mem_base,
+			ipa_drv_res->transport_mem_size);
+
+	/* Get IPA GSI IRQ number */
+	resource = platform_get_resource_byname(pdev, IORESOURCE_IRQ,
+			"gsi-irq");
+	if (!resource) {
+		IPAERR(":get resource failed for gsi-irq\n");
+		return -ENODEV;
+	}
+	ipa_drv_res->transport_irq = resource->start;
+	IPADBG(": gsi-irq = %d\n", ipa_drv_res->transport_irq);
+
+	/* Get IPA pipe mem start ofst */
+	resource = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+			"ipa-pipe-mem");
+	if (!resource) {
+		IPADBG(":not using pipe memory - resource nonexisting\n");
+	} else {
+		ipa_drv_res->ipa_pipe_mem_start_ofst = resource->start;
+		ipa_drv_res->ipa_pipe_mem_size = resource_size(resource);
+		IPADBG(":using pipe memory - at 0x%x of size 0x%x\n",
+				ipa_drv_res->ipa_pipe_mem_start_ofst,
+				ipa_drv_res->ipa_pipe_mem_size);
+	}
+
+	/* Get IPA IRQ number */
+	resource = platform_get_resource_byname(pdev, IORESOURCE_IRQ,
+			"ipa-irq");
+	if (!resource) {
+		IPAERR(":get resource failed for ipa-irq\n");
+		return -ENODEV;
+	}
+	ipa_drv_res->ipa_irq = resource->start;
+	IPADBG(":ipa-irq = %d\n", ipa_drv_res->ipa_irq);
+
+	result = of_property_read_u32(pdev->dev.of_node, "qcom,ee",
+			&ipa_drv_res->ee);
+	if (result)
+		ipa_drv_res->ee = 0;
+	IPADBG(":ee = %u\n", ipa_drv_res->ee);
+
+	ipa_drv_res->apply_rg10_wa =
+		of_property_read_bool(pdev->dev.of_node,
+		"qcom,use-rg10-limitation-mitigation");
+	IPADBG(": Use Register Group 10 limitation mitigation = %s\n",
+		ipa_drv_res->apply_rg10_wa
+		? "True" : "False");
+
+	ipa_drv_res->gsi_ch20_wa =
+		of_property_read_bool(pdev->dev.of_node,
+		"qcom,do-not-use-ch-gsi-20");
+	IPADBG(": GSI CH 20 WA is = %s\n",
+		ipa_drv_res->gsi_ch20_wa
+		? "Needed" : "Not needed");
+
+	elem_num = of_property_count_elems_of_size(pdev->dev.of_node,
+		"qcom,mhi-event-ring-id-limits", sizeof(u32));
+
+	if (elem_num == 2) {
+		if (of_property_read_u32_array(pdev->dev.of_node,
+			"qcom,mhi-event-ring-id-limits", mhi_evid_limits, 2)) {
+			IPAERR("failed to read mhi event ring id limits\n");
+			return -EFAULT;
+		}
+		if (mhi_evid_limits[0] > mhi_evid_limits[1]) {
+			IPAERR("mhi event ring id low limit > high limit\n");
+			return -EFAULT;
+		}
+		ipa_drv_res->mhi_evid_limits[0] = mhi_evid_limits[0];
+		ipa_drv_res->mhi_evid_limits[1] = mhi_evid_limits[1];
+		IPADBG(": mhi-event-ring-id-limits start=%u end=%u\n",
+			mhi_evid_limits[0], mhi_evid_limits[1]);
+	} else {
+		if (elem_num > 0) {
+			IPAERR("Invalid mhi event ring id limits number %d\n",
+				elem_num);
+			return -EINVAL;
+		}
+		IPADBG("use default mhi evt ring id limits start=%u end=%u\n",
+			ipa_drv_res->mhi_evid_limits[0],
+			ipa_drv_res->mhi_evid_limits[1]);
+	}
+
+	elem_num = of_property_count_elems_of_size(pdev->dev.of_node,
+		"qcom,ipa-tz-unlock-reg", sizeof(u32));
+
+	if (elem_num > 0 && elem_num % 2 == 0) {
+		ipa_drv_res->ipa_tz_unlock_reg_num = elem_num / 2;
+
+		ipa_tz_unlock_reg = kcalloc(elem_num, sizeof(u32), GFP_KERNEL);
+		if (ipa_tz_unlock_reg == NULL)
+			return -ENOMEM;
+
+		ipa_drv_res->ipa_tz_unlock_reg = kcalloc(
+			ipa_drv_res->ipa_tz_unlock_reg_num,
+			sizeof(*ipa_drv_res->ipa_tz_unlock_reg),
+			GFP_KERNEL);
+		if (ipa_drv_res->ipa_tz_unlock_reg == NULL) {
+			kfree(ipa_tz_unlock_reg);
+			return -ENOMEM;
+		}
+
+		if (of_property_read_u32_array(pdev->dev.of_node,
+			"qcom,ipa-tz-unlock-reg", ipa_tz_unlock_reg,
+			elem_num)) {
+			IPAERR("failed to read register addresses\n");
+			kfree(ipa_tz_unlock_reg);
+			kfree(ipa_drv_res->ipa_tz_unlock_reg);
+			return -EFAULT;
+		}
+
+		pos = 0;
+		for (i = 0; i < ipa_drv_res->ipa_tz_unlock_reg_num; i++) {
+			ipa_drv_res->ipa_tz_unlock_reg[i].reg_addr =
+				ipa_tz_unlock_reg[pos++];
+			ipa_drv_res->ipa_tz_unlock_reg[i].size =
+				ipa_tz_unlock_reg[pos++];
+			IPADBG("tz unlock reg %d: addr 0x%pa size %llu\n", i,
+				&ipa_drv_res->ipa_tz_unlock_reg[i].reg_addr,
+				ipa_drv_res->ipa_tz_unlock_reg[i].size);
+		}
+		kfree(ipa_tz_unlock_reg);
+	}
+
+	/* get IPA PM related information */
+	result = get_ipa_dts_pm_info(pdev, ipa_drv_res);
+	if (result) {
+		IPAERR("failed to get pm info from dts %d\n", result);
+		return result;
+	}
+
+	ipa_drv_res->wdi_over_pcie =
+		of_property_read_bool(pdev->dev.of_node,
+		"qcom,wlan-ce-db-over-pcie");
+	IPADBG("Is wdi_over_pcie ? (%s)\n",
+		ipa_drv_res->wdi_over_pcie ? "Yes":"No");
+
+	/*
+	 * If we're on emulator, get its interrupt controller's mem
+	 * start and size
+	 */
+	if (ipa_drv_res->ipa3_hw_mode == IPA_HW_MODE_EMULATION) {
+		resource = platform_get_resource_byname(
+		    pdev, IORESOURCE_MEM, "intctrl-base");
+		if (!resource) {
+			IPAERR(":Can't find intctrl-base resource\n");
+			return -ENODEV;
+		}
+		ipa_drv_res->emulator_intcntrlr_mem_base =
+		    resource->start;
+		ipa_drv_res->emulator_intcntrlr_mem_size =
+		    resource_size(resource);
+		IPADBG(":using intctrl-base at 0x%x of size 0x%x\n",
+		       ipa_drv_res->emulator_intcntrlr_mem_base,
+		       ipa_drv_res->emulator_intcntrlr_mem_size);
+	}
+
+	ipa_drv_res->entire_ipa_block_size = 0x100000;
+	result = of_property_read_u32(pdev->dev.of_node,
+				      "qcom,entire-ipa-block-size",
+				      &ipa_drv_res->entire_ipa_block_size);
+	IPADBG(": entire_ipa_block_size = %d\n",
+	       ipa_drv_res->entire_ipa_block_size);
+
+	/*
+	 * We'll read register-collection-on-crash here, but log it
+	 * later below because its value may change based on other
+	 * subsequent dtsi reads......
+	 */
+	ipa_drv_res->do_register_collection_on_crash =
+	    of_property_read_bool(pdev->dev.of_node,
+				  "qcom,register-collection-on-crash");
+	/*
+	 * We'll read testbus-collection-on-crash here...
+	 */
+	ipa_drv_res->do_testbus_collection_on_crash =
+	    of_property_read_bool(pdev->dev.of_node,
+				  "qcom,testbus-collection-on-crash");
+	IPADBG(": doing testbus collection on crash = %u\n",
+	       ipa_drv_res->do_testbus_collection_on_crash);
+
+	/*
+	 * We'll read non-tn-collection-on-crash here...
+	 */
+	ipa_drv_res->do_non_tn_collection_on_crash =
+	    of_property_read_bool(pdev->dev.of_node,
+				  "qcom,non-tn-collection-on-crash");
+	IPADBG(": doing non-tn collection on crash = %u\n",
+	       ipa_drv_res->do_non_tn_collection_on_crash);
+
+	/*
+	 * We'll read ram-collection-on-crash here...
+	 */
+	ipa_drv_res->do_ram_collection_on_crash =
+		of_property_read_bool(
+			pdev->dev.of_node,
+			"qcom,ram-collection-on-crash");
+	IPADBG(": doing ram collection on crash = %u\n",
+		   ipa_drv_res->do_ram_collection_on_crash);
+
+	if (ipa_drv_res->do_testbus_collection_on_crash ||
+		ipa_drv_res->do_non_tn_collection_on_crash ||
+		ipa_drv_res->do_ram_collection_on_crash)
+		ipa_drv_res->do_register_collection_on_crash = true;
+
+	IPADBG(": doing register collection on crash = %u\n",
+	       ipa_drv_res->do_register_collection_on_crash);
+
+	result = of_property_read_u32(
+		pdev->dev.of_node,
+		"qcom,secure-debug-check-action",
+		&ipa_drv_res->secure_debug_check_action);
+	if (result ||
+		(ipa_drv_res->secure_debug_check_action != 0 &&
+		 ipa_drv_res->secure_debug_check_action != 1 &&
+		 ipa_drv_res->secure_debug_check_action != 2))
+		ipa_drv_res->secure_debug_check_action = USE_SCM;
+
+	IPADBG(": secure-debug-check-action = %d\n",
+		   ipa_drv_res->secure_debug_check_action);
+
+	return 0;
+}
+
+static int ipa_smmu_wlan_cb_probe(struct device *dev)
+{
+	struct ipa_smmu_cb_ctx *cb = ipa3_get_smmu_ctx(IPA_SMMU_CB_WLAN);
+	int fast = 0;
+	int bypass = 0;
+	u32 add_map_size;
+	const u32 *add_map;
+	int i;
+	u32 iova_ap_mapping[2];
+
+	IPADBG("WLAN CB PROBE dev=%pK\n", dev);
+
+	if (!smmu_info.present[IPA_SMMU_CB_WLAN]) {
+		IPAERR("WLAN SMMU is disabled\n");
+		return 0;
+	}
+
+	IPADBG("WLAN CB PROBE dev=%pK retrieving IOMMU mapping\n", dev);
+
+	cb->iommu_domain = iommu_get_domain_for_dev(dev);
+	if (IS_ERR_OR_NULL(cb->iommu_domain)) {
+		IPAERR("could not get iommu domain\n");
+		return -EINVAL;
+	}
+
+	IPADBG("WLAN CB PROBE mapping retrieved\n");
+	cb->is_cache_coherent = of_property_read_bool(dev->of_node,
+							"dma-coherent");
+	cb->dev   = dev;
+	cb->valid = true;
+
+	cb->va_start = cb->va_end  = cb->va_size = 0;
+	if (of_property_read_u32_array(
+			dev->of_node, "qcom,iommu-dma-addr-pool",
+			iova_ap_mapping, 2) == 0) {
+		cb->va_start = iova_ap_mapping[0];
+		cb->va_size  = iova_ap_mapping[1];
+		cb->va_end   = cb->va_start + cb->va_size;
+	}
+
+	IPADBG("WLAN CB PROBE dev=%pK va_start=0x%x va_size=0x%x\n",
+		   dev, cb->va_start, cb->va_size);
+
+	/*
+	 * Prior to these calls to iommu_domain_get_attr(), these
+	 * attributes were set in this function relative to dtsi values
+	 * defined for this driver.  In other words, if corresponding ipa
+	 * driver owned values were found in the dtsi, they were read and
+	 * set here.
+	 *
+	 * In this new world, the developer will use iommu owned dtsi
+	 * settings to set them there.  This new logic below, simply
+	 * checks to see if they've been set in dtsi.  If so, the logic
+	 * further below acts accordingly...
+	 */
+	iommu_domain_get_attr(cb->iommu_domain, DOMAIN_ATTR_S1_BYPASS, &bypass);
+	iommu_domain_get_attr(cb->iommu_domain, DOMAIN_ATTR_FAST, &fast);
+
+	IPADBG(
+	  "WLAN CB PROBE dev=%pK DOMAIN ATTRS bypass=%d fast=%d\n",
+	  dev, bypass, fast);
+
+	ipa3_ctx->s1_bypass_arr[IPA_SMMU_CB_WLAN] = (bypass != 0);
+
+	/* MAP ipa-uc ram */
+	add_map = of_get_property(dev->of_node,
+		"qcom,additional-mapping", &add_map_size);
+	if (add_map) {
+		/* mapping size is an array of 3-tuple of u32 */
+		if (add_map_size % (3 * sizeof(u32))) {
+			IPAERR("wrong additional mapping format\n");
+			cb->valid = false;
+			return -EFAULT;
+		}
+
+		/* iterate of each entry of the additional mapping array */
+		for (i = 0; i < add_map_size / sizeof(u32); i += 3) {
+			u32 iova = be32_to_cpu(add_map[i]);
+			u32 pa = be32_to_cpu(add_map[i + 1]);
+			u32 size = be32_to_cpu(add_map[i + 2]);
+			unsigned long iova_p;
+			phys_addr_t pa_p;
+			u32 size_p;
+
+			IPA_SMMU_ROUND_TO_PAGE(iova, pa, size,
+				iova_p, pa_p, size_p);
+			IPADBG_LOW("mapping 0x%lx to 0x%pa size %d\n",
+				iova_p, &pa_p, size_p);
+			ipa3_iommu_map(cb->iommu_domain,
+				iova_p, pa_p, size_p,
+				IOMMU_READ | IOMMU_WRITE | IOMMU_MMIO);
+		}
+	}
+
+	return 0;
+}
+
+static int ipa_smmu_uc_cb_probe(struct device *dev)
+{
+	struct ipa_smmu_cb_ctx *cb = ipa3_get_smmu_ctx(IPA_SMMU_CB_UC);
+	int bypass = 0;
+	int fast = 0;
+	u32 iova_ap_mapping[2];
+
+	IPADBG("UC CB PROBE dev=%pK\n", dev);
+
+	if (!smmu_info.present[IPA_SMMU_CB_UC]) {
+		IPAERR("UC SMMU is disabled\n");
+		return 0;
+	}
+
+	if (smmu_info.use_64_bit_dma_mask) {
+		if (dma_set_mask(dev, DMA_BIT_MASK(64)) ||
+			dma_set_coherent_mask(dev, DMA_BIT_MASK(64))) {
+			IPAERR("DMA set 64bit mask failed\n");
+			return -EOPNOTSUPP;
+		}
+	} else {
+		if (dma_set_mask(dev, DMA_BIT_MASK(32)) ||
+			dma_set_coherent_mask(dev, DMA_BIT_MASK(32))) {
+			IPAERR("DMA set 32bit mask failed\n");
+			return -EOPNOTSUPP;
+		}
+	}
+
+	IPADBG("UC CB PROBE dev=%pK retrieving IOMMU mapping\n", dev);
+
+	cb->iommu_domain = iommu_get_domain_for_dev(dev);
+	if (IS_ERR_OR_NULL(cb->iommu_domain)) {
+		IPAERR("could not get iommu domain\n");
+		return -EINVAL;
+	}
+
+	IPADBG("UC CB PROBE mapping retrieved\n");
+
+	cb->is_cache_coherent = of_property_read_bool(dev->of_node,
+						"dma-coherent");
+	cb->dev   = dev;
+	cb->valid = true;
+
+	cb->va_start = cb->va_end  = cb->va_size = 0;
+	if (of_property_read_u32_array(
+			dev->of_node, "qcom,iommu-dma-addr-pool",
+			iova_ap_mapping, 2) == 0) {
+		cb->va_start = iova_ap_mapping[0];
+		cb->va_size  = iova_ap_mapping[1];
+		cb->va_end   = cb->va_start + cb->va_size;
+	}
+
+	IPADBG("UC CB PROBE dev=%pK va_start=0x%x va_size=0x%x\n",
+		   dev, cb->va_start, cb->va_size);
+
+	/*
+	 * Prior to these calls to iommu_domain_get_attr(), these
+	 * attributes were set in this function relative to dtsi values
+	 * defined for this driver.  In other words, if corresponding ipa
+	 * driver owned values were found in the dtsi, they were read and
+	 * set here.
+	 *
+	 * In this new world, the developer will use iommu owned dtsi
+	 * settings to set them there.  This new logic below, simply
+	 * checks to see if they've been set in dtsi.  If so, the logic
+	 * further below acts accordingly...
+	 */
+	iommu_domain_get_attr(cb->iommu_domain, DOMAIN_ATTR_S1_BYPASS, &bypass);
+	iommu_domain_get_attr(cb->iommu_domain, DOMAIN_ATTR_FAST, &fast);
+
+	IPADBG("UC CB PROBE dev=%pK DOMAIN ATTRS bypass=%d fast=%d\n",
+		   dev, bypass, fast);
+
+	ipa3_ctx->s1_bypass_arr[IPA_SMMU_CB_UC] = (bypass != 0);
+
+	ipa3_ctx->uc_pdev = dev;
+
+	return 0;
+}
+
+static int ipa_smmu_ap_cb_probe(struct device *dev)
+{
+	struct ipa_smmu_cb_ctx *cb = ipa3_get_smmu_ctx(IPA_SMMU_CB_AP);
+	int fast = 0;
+	int bypass = 0;
+	u32 add_map_size;
+	const u32 *add_map;
+	void *smem_addr;
+	size_t smem_size;
+	u32 ipa_smem_size = 0;
+	int ret;
+	int i;
+	unsigned long iova_p;
+	phys_addr_t pa_p;
+	u32 size_p;
+	phys_addr_t iova;
+	phys_addr_t pa;
+	u32 iova_ap_mapping[2];
+
+	IPADBG("AP CB PROBE dev=%pK\n", dev);
+
+	if (!smmu_info.present[IPA_SMMU_CB_AP]) {
+		IPAERR("AP SMMU is disabled");
+		return 0;
+	}
+
+	if (smmu_info.use_64_bit_dma_mask) {
+		if (dma_set_mask(dev, DMA_BIT_MASK(64)) ||
+			dma_set_coherent_mask(dev, DMA_BIT_MASK(64))) {
+			IPAERR("DMA set 64bit mask failed\n");
+			return -EOPNOTSUPP;
+		}
+	} else {
+		if (dma_set_mask(dev, DMA_BIT_MASK(32)) ||
+			dma_set_coherent_mask(dev, DMA_BIT_MASK(32))) {
+			IPAERR("DMA set 32bit mask failed\n");
+			return -EOPNOTSUPP;
+		}
+	}
+
+	IPADBG("AP CB PROBE dev=%pK retrieving IOMMU mapping\n", dev);
+
+	cb->iommu_domain = iommu_get_domain_for_dev(dev);
+	if (IS_ERR_OR_NULL(cb->iommu_domain)) {
+		IPAERR("could not get iommu domain\n");
+		return -EINVAL;
+	}
+
+	IPADBG("AP CB PROBE mapping retrieved\n");
+
+	cb->is_cache_coherent = of_property_read_bool(dev->of_node,
+						"dma-coherent");
+	cb->dev   = dev;
+	cb->valid = true;
+
+	cb->va_start = cb->va_end  = cb->va_size = 0;
+	if (of_property_read_u32_array(
+			dev->of_node, "qcom,iommu-dma-addr-pool",
+			iova_ap_mapping, 2) == 0) {
+		cb->va_start = iova_ap_mapping[0];
+		cb->va_size  = iova_ap_mapping[1];
+		cb->va_end   = cb->va_start + cb->va_size;
+	}
+
+	IPADBG("AP CB PROBE dev=%pK va_start=0x%x va_size=0x%x\n",
+		   dev, cb->va_start, cb->va_size);
+
+	/*
+	 * Prior to these calls to iommu_domain_get_attr(), these
+	 * attributes were set in this function relative to dtsi values
+	 * defined for this driver.  In other words, if corresponding ipa
+	 * driver owned values were found in the dtsi, they were read and
+	 * set here.
+	 *
+	 * In this new world, the developer will use iommu owned dtsi
+	 * settings to set them there.  This new logic below, simply
+	 * checks to see if they've been set in dtsi.  If so, the logic
+	 * further below acts accordingly...
+	 */
+	iommu_domain_get_attr(cb->iommu_domain, DOMAIN_ATTR_S1_BYPASS, &bypass);
+	iommu_domain_get_attr(cb->iommu_domain, DOMAIN_ATTR_FAST, &fast);
+
+	IPADBG("AP CB PROBE dev=%pK DOMAIN ATTRS bypass=%d fast=%d\n",
+		   dev, bypass, fast);
+
+	ipa3_ctx->s1_bypass_arr[IPA_SMMU_CB_AP] = (bypass != 0);
+
+	add_map = of_get_property(dev->of_node,
+		"qcom,additional-mapping", &add_map_size);
+	if (add_map) {
+		/* mapping size is an array of 3-tuple of u32 */
+		if (add_map_size % (3 * sizeof(u32))) {
+			IPAERR("wrong additional mapping format\n");
+			cb->valid = false;
+			return -EFAULT;
+		}
+
+		/* iterate of each entry of the additional mapping array */
+		for (i = 0; i < add_map_size / sizeof(u32); i += 3) {
+			u32 iova = be32_to_cpu(add_map[i]);
+			u32 pa = be32_to_cpu(add_map[i + 1]);
+			u32 size = be32_to_cpu(add_map[i + 2]);
+			unsigned long iova_p;
+			phys_addr_t pa_p;
+			u32 size_p;
+
+			IPA_SMMU_ROUND_TO_PAGE(iova, pa, size,
+				iova_p, pa_p, size_p);
+			IPADBG_LOW("mapping 0x%lx to 0x%pa size %d\n",
+				iova_p, &pa_p, size_p);
+			ipa3_iommu_map(cb->iommu_domain,
+				iova_p, pa_p, size_p,
+				IOMMU_READ | IOMMU_WRITE | IOMMU_MMIO);
+		}
+	}
+
+	ret = of_property_read_u32(dev->of_node, "qcom,ipa-q6-smem-size",
+					&ipa_smem_size);
+	if (ret) {
+		IPADBG("ipa q6 smem size (default) = %u\n", IPA_SMEM_SIZE);
+		ipa_smem_size = IPA_SMEM_SIZE;
+	} else {
+		IPADBG("ipa q6 smem size = %u\n", ipa_smem_size);
+	}
+
+	if (ipa3_ctx->platform_type != IPA_PLAT_TYPE_APQ) {
+		/* map SMEM memory for IPA table accesses */
+		ret = qcom_smem_alloc(SMEM_MODEM,
+			SMEM_IPA_FILTER_TABLE,
+			ipa_smem_size);
+
+		if (ret < 0 && ret != -EEXIST) {
+			IPAERR("unable to allocate smem MODEM entry\n");
+			cb->valid = false;
+			return -EFAULT;
+		}
+		smem_addr = qcom_smem_get(SMEM_MODEM,
+			SMEM_IPA_FILTER_TABLE,
+			&smem_size);
+		if (IS_ERR(smem_addr)) {
+			IPAERR("unable to acquire smem MODEM entry\n");
+			cb->valid = false;
+			return -EFAULT;
+		}
+		if (smem_size != ipa_smem_size)
+			IPAERR("unexpected read q6 smem size %zu %u\n",
+				smem_size, ipa_smem_size);
+
+		iova = qcom_smem_virt_to_phys(smem_addr);
+		pa = iova;
+
+		IPA_SMMU_ROUND_TO_PAGE(iova, pa, ipa_smem_size,
+				iova_p, pa_p, size_p);
+		IPADBG("mapping 0x%lx to 0x%pa size %d\n",
+				iova_p, &pa_p, size_p);
+		ipa3_iommu_map(cb->iommu_domain,
+				iova_p, pa_p, size_p,
+				IOMMU_READ | IOMMU_WRITE);
+	}
+
+	smmu_info.present[IPA_SMMU_CB_AP] = true;
+
+	ipa3_ctx->pdev = dev;
+	cb->next_addr = cb->va_end;
+
+	return 0;
+}
+
+static int ipa_smmu_11ad_cb_probe(struct device *dev)
+{
+	int bypass = 0;
+	struct ipa_smmu_cb_ctx *cb = ipa3_get_smmu_ctx(IPA_SMMU_CB_11AD);
+	u32 iova_ap_mapping[2];
+
+	IPADBG("11AD CB probe: dev=%pK\n", dev);
+
+	if (!smmu_info.present[IPA_SMMU_CB_11AD]) {
+		IPAERR("11AD SMMU is disabled");
+		return 0;
+	}
+
+	cb->iommu_domain = iommu_get_domain_for_dev(dev);
+	if (IS_ERR_OR_NULL(cb->iommu_domain)) {
+		IPAERR("could not get iommu domain\n");
+		return -EINVAL;
+	}
+	cb->is_cache_coherent = of_property_read_bool(dev->of_node,
+							"dma-coherent");
+	cb->dev   = dev;
+	cb->valid = true;
+
+	cb->va_start = cb->va_end  = cb->va_size = 0;
+	if (of_property_read_u32_array(
+			dev->of_node, "qcom,iommu-dma-addr-pool",
+			iova_ap_mapping, 2) == 0) {
+		cb->va_start = iova_ap_mapping[0];
+		cb->va_size  = iova_ap_mapping[1];
+		cb->va_end   = cb->va_start + cb->va_size;
+	}
+
+	IPADBG("11AD CB PROBE dev=%pK va_start=0x%x va_size=0x%x\n",
+		   dev, cb->va_start, cb->va_size);
+
+	iommu_domain_get_attr(cb->iommu_domain, DOMAIN_ATTR_S1_BYPASS, &bypass);
+
+	IPADBG("11AD CB PROBE dev=%pK DOMAIN ATTRS bypass=%d\n",
+		   dev, bypass);
+
+	ipa3_ctx->s1_bypass_arr[IPA_SMMU_CB_11AD] = (bypass != 0);
+
+	if (of_property_read_bool(dev->of_node, "qcom,shared-cb")) {
+		IPADBG("11AD using shared CB\n");
+		cb->shared = true;
+	}
+
+	return 0;
+}
+
+static int ipa_smmu_cb_probe(struct device *dev, enum ipa_smmu_cb_type cb_type)
+{
+	switch (cb_type) {
+	case IPA_SMMU_CB_AP:
+		return ipa_smmu_ap_cb_probe(dev);
+	case IPA_SMMU_CB_WLAN:
+		return ipa_smmu_wlan_cb_probe(dev);
+	case IPA_SMMU_CB_UC:
+		return ipa_smmu_uc_cb_probe(dev);
+	case IPA_SMMU_CB_11AD:
+		return ipa_smmu_11ad_cb_probe(dev);
+	case IPA_SMMU_CB_MAX:
+		IPAERR("Invalid cb_type\n");
+	}
+	return 0;
+}
+
+static int ipa3_attach_to_smmu(void)
+{
+	struct ipa_smmu_cb_ctx *cb;
+	int i, result;
+
+	ipa3_ctx->pdev = &ipa3_ctx->master_pdev->dev;
+	ipa3_ctx->uc_pdev = &ipa3_ctx->master_pdev->dev;
+
+	if (smmu_info.arm_smmu) {
+		IPADBG("smmu is enabled\n");
+		for (i = 0; i < IPA_SMMU_CB_MAX; i++) {
+			cb = ipa3_get_smmu_ctx(i);
+			result = ipa_smmu_cb_probe(cb->dev, i);
+			if (result)
+				IPAERR("probe failed for cb %d\n", i);
+		}
+	} else {
+		IPADBG("smmu is disabled\n");
+	}
+	return 0;
+}
+
+static irqreturn_t ipa3_smp2p_modem_clk_query_isr(int irq, void *ctxt)
+{
+	ipa3_freeze_clock_vote_and_notify_modem();
+
+	return IRQ_HANDLED;
+}
+
+static int ipa3_smp2p_probe(struct device *dev)
+{
+	struct device_node *node = dev->of_node;
+	int res;
+	int irq = 0;
+
+	if (ipa3_ctx == NULL) {
+		IPAERR("ipa3_ctx was not initialized\n");
+		return -EPROBE_DEFER;
+	}
+	IPADBG("node->name=%s\n", node->name);
+	if (ipa3_ctx->platform_type == IPA_PLAT_TYPE_APQ) {
+		IPADBG("Ignore smp2p on APQ platform\n");
+		return 0;
+	}
+
+	if (strcmp("qcom,smp2p_map_ipa_1_out", node->name) == 0) {
+		if (of_find_property(node, "qcom,smem-states", NULL)) {
+			ipa3_ctx->smp2p_info.smem_state =
+			qcom_smem_state_get(dev, "ipa-smp2p-out",
+			&ipa3_ctx->smp2p_info.smem_bit);
+			if (IS_ERR(ipa3_ctx->smp2p_info.smem_state)) {
+				IPAERR("fail to get smp2p clk resp bit %ld\n",
+				PTR_ERR(ipa3_ctx->smp2p_info.smem_state));
+				return PTR_ERR(ipa3_ctx->smp2p_info.smem_state);
+			}
+			IPADBG("smem_bit=%d\n", ipa3_ctx->smp2p_info.smem_bit);
+		}
+	} else if (strcmp("qcom,smp2p_map_ipa_1_in", node->name) == 0) {
+		res = irq = of_irq_get_byname(node, "ipa-smp2p-in");
+		if (res < 0) {
+			IPADBG("of_irq_get_byname returned %d\n", irq);
+			return res;
+		}
+
+		ipa3_ctx->smp2p_info.in_base_id = irq;
+		IPADBG("smp2p irq#=%d\n", irq);
+		res = devm_request_threaded_irq(dev, irq, NULL,
+			(irq_handler_t)ipa3_smp2p_modem_clk_query_isr,
+			IRQF_TRIGGER_RISING | IRQF_ONESHOT,
+			"ipa_smp2p_clk_vote", dev);
+		if (res) {
+			IPAERR("fail to register smp2p irq=%d\n", irq);
+			return -ENODEV;
+		}
+	}
+	return 0;
+}
+
+static void ipa_smmu_update_fw_loader(void)
+{
+	int i;
+
+	if (smmu_info.arm_smmu) {
+		IPADBG("smmu is enabled\n");
+		for (i = 0; i < IPA_SMMU_CB_MAX; i++) {
+			if (!smmu_info.present[i]) {
+				IPADBG("CB %d not probed yet\n", i);
+				break;
+			}
+		}
+		if (i == IPA_SMMU_CB_MAX) {
+			IPADBG("All %d CBs probed\n", IPA_SMMU_CB_MAX);
+			ipa_fw_load_sm_handle_event(IPA_FW_LOAD_EVNT_SMMU_DONE);
+		}
+	} else {
+		IPADBG("smmu is disabled\n");
+	}
+}
+
+int ipa3_plat_drv_probe(struct platform_device *pdev_p,
+	struct ipa_api_controller *api_ctrl,
+	const struct of_device_id *pdrv_match)
+{
+	int result;
+	struct device *dev = &pdev_p->dev;
+	struct ipa_smmu_cb_ctx *cb;
+
+	IPADBG("IPA driver probing started\n");
+	IPADBG("dev->of_node->name = %s\n", dev->of_node->name);
+
+	if (of_device_is_compatible(dev->of_node, "qcom,ipa-smmu-ap-cb")) {
+		if (ipa3_ctx == NULL) {
+			IPAERR("ipa3_ctx was not initialized\n");
+			return -EPROBE_DEFER;
+		}
+		cb = ipa3_get_smmu_ctx(IPA_SMMU_CB_AP);
+		cb->dev = dev;
+		smmu_info.present[IPA_SMMU_CB_AP] = true;
+		ipa_smmu_update_fw_loader();
+
+		return 0;
+	}
+
+	if (of_device_is_compatible(dev->of_node, "qcom,ipa-smmu-wlan-cb")) {
+		if (ipa3_ctx == NULL) {
+			IPAERR("ipa3_ctx was not initialized\n");
+			return -EPROBE_DEFER;
+		}
+		cb = ipa3_get_smmu_ctx(IPA_SMMU_CB_WLAN);
+		cb->dev = dev;
+		smmu_info.present[IPA_SMMU_CB_WLAN] = true;
+		ipa_smmu_update_fw_loader();
+
+		return 0;
+	}
+
+	if (of_device_is_compatible(dev->of_node, "qcom,ipa-smmu-uc-cb")) {
+		if (ipa3_ctx == NULL) {
+			IPAERR("ipa3_ctx was not initialized\n");
+			return -EPROBE_DEFER;
+		}
+		cb =  ipa3_get_smmu_ctx(IPA_SMMU_CB_UC);
+		cb->dev = dev;
+		smmu_info.present[IPA_SMMU_CB_UC] = true;
+		ipa_smmu_update_fw_loader();
+
+		return 0;
+	}
+
+	if (of_device_is_compatible(dev->of_node, "qcom,ipa-smmu-11ad-cb")) {
+		if (ipa3_ctx == NULL) {
+			IPAERR("ipa3_ctx was not initialized\n");
+			return -EPROBE_DEFER;
+		}
+		cb = ipa3_get_smmu_ctx(IPA_SMMU_CB_11AD);
+		cb->dev = dev;
+		smmu_info.present[IPA_SMMU_CB_11AD] = true;
+		ipa_smmu_update_fw_loader();
+
+		return 0;
+	}
+
+	if (of_device_is_compatible(dev->of_node,
+	    "qcom,smp2p-map-ipa-1-out"))
+		return ipa3_smp2p_probe(dev);
+	if (of_device_is_compatible(dev->of_node,
+	    "qcom,smp2p-map-ipa-1-in"))
+		return ipa3_smp2p_probe(dev);
+
+	result = get_ipa_dts_configuration(pdev_p, &ipa3_res);
+	if (result) {
+		IPAERR("IPA dts parsing failed\n");
+		return result;
+	}
+
+	result = ipa3_bind_api_controller(ipa3_res.ipa_hw_type, api_ctrl);
+	if (result) {
+		IPAERR("IPA API binding failed\n");
+		return result;
+	}
+
+	if (of_property_read_bool(pdev_p->dev.of_node, "qcom,arm-smmu")) {
+		if (of_property_read_bool(pdev_p->dev.of_node,
+			"qcom,use-64-bit-dma-mask"))
+			smmu_info.use_64_bit_dma_mask = true;
+		smmu_info.arm_smmu = true;
+	} else {
+		if (of_property_read_bool(pdev_p->dev.of_node,
+			"qcom,use-64-bit-dma-mask")) {
+			if (dma_set_mask(&pdev_p->dev, DMA_BIT_MASK(64)) ||
+			    dma_set_coherent_mask(&pdev_p->dev,
+			    DMA_BIT_MASK(64))) {
+				IPAERR("DMA set 64bit mask failed\n");
+				return -EOPNOTSUPP;
+			}
+		} else {
+			if (dma_set_mask(&pdev_p->dev, DMA_BIT_MASK(32)) ||
+			    dma_set_coherent_mask(&pdev_p->dev,
+			    DMA_BIT_MASK(32))) {
+				IPAERR("DMA set 32bit mask failed\n");
+				return -EOPNOTSUPP;
+			}
+		}
+		ipa_fw_load_sm_handle_event(IPA_FW_LOAD_EVNT_SMMU_DONE);
+	}
+
+	/* Proceed to real initialization */
+	result = ipa3_pre_init(&ipa3_res, pdev_p);
+	if (result) {
+		IPAERR("ipa3_init failed\n");
+		return result;
+	}
+
+	result = of_platform_populate(pdev_p->dev.of_node,
+		pdrv_match, NULL, &pdev_p->dev);
+	if (result) {
+		IPAERR("failed to populate platform\n");
+		return result;
+	}
+
+	return result;
+}
+
+/**
+ * ipa3_ap_suspend() - suspend callback for runtime_pm
+ * @dev: pointer to device
+ *
+ * This callback will be invoked by the runtime_pm framework when an AP suspend
+ * operation is invoked, usually by pressing a suspend button.
+ *
+ * Returns -EAGAIN to runtime_pm framework in case IPA is in use by AP.
+ * This will postpone the suspend operation until IPA is no longer used by AP.
+ */
+int ipa3_ap_suspend(struct device *dev)
+{
+	int i;
+
+	IPADBG("Enter...\n");
+
+	/* In case there is a tx/rx handler in polling mode fail to suspend */
+	for (i = 0; i < ipa3_ctx->ipa_num_pipes; i++) {
+		if (ipa3_ctx->ep[i].sys &&
+			atomic_read(&ipa3_ctx->ep[i].sys->curr_polling_state)) {
+			IPAERR("EP %d is in polling state, do not suspend\n",
+				i);
+			return -EAGAIN;
+		}
+	}
+
+	ipa_pm_deactivate_all_deferred();
+
+	IPADBG("Exit\n");
+
+	return 0;
+}
+
+/**
+ * ipa3_ap_resume() - resume callback for runtime_pm
+ * @dev: pointer to device
+ *
+ * This callback will be invoked by the runtime_pm framework when an AP resume
+ * operation is invoked.
+ *
+ * Always returns 0 since resume should always succeed.
+ */
+int ipa3_ap_resume(struct device *dev)
+{
+	return 0;
+}
+
+struct ipa3_context *ipa3_get_ctx(void)
+{
+	return ipa3_ctx;
+}
+
+bool ipa3_get_lan_rx_napi(void)
+{
+	return ipa3_ctx->lan_rx_napi_enable;
+}
+
+static void ipa_gsi_notify_cb(struct gsi_per_notify *notify)
+{
+	/*
+	 * These values are reported by hardware. Any error indicates
+	 * hardware unexpected state.
+	 */
+	switch (notify->evt_id) {
+	case GSI_PER_EVT_GLOB_ERROR:
+		IPAERR("Got GSI_PER_EVT_GLOB_ERROR\n");
+		IPAERR("Err_desc = 0x%04x\n", notify->data.err_desc);
+		break;
+	case GSI_PER_EVT_GLOB_GP1:
+		IPAERR("Got GSI_PER_EVT_GLOB_GP1\n");
+		ipa_assert();
+		break;
+	case GSI_PER_EVT_GLOB_GP2:
+		IPAERR("Got GSI_PER_EVT_GLOB_GP2\n");
+		ipa_assert();
+		break;
+	case GSI_PER_EVT_GLOB_GP3:
+		IPAERR("Got GSI_PER_EVT_GLOB_GP3\n");
+		ipa_assert();
+		break;
+	case GSI_PER_EVT_GENERAL_BREAK_POINT:
+		IPAERR("Got GSI_PER_EVT_GENERAL_BREAK_POINT\n");
+		break;
+	case GSI_PER_EVT_GENERAL_BUS_ERROR:
+		IPAERR("Got GSI_PER_EVT_GENERAL_BUS_ERROR\n");
+		ipa_assert();
+		break;
+	case GSI_PER_EVT_GENERAL_CMD_FIFO_OVERFLOW:
+		IPAERR("Got GSI_PER_EVT_GENERAL_CMD_FIFO_OVERFLOW\n");
+		ipa_assert();
+		break;
+	case GSI_PER_EVT_GENERAL_MCS_STACK_OVERFLOW:
+		IPAERR("Got GSI_PER_EVT_GENERAL_MCS_STACK_OVERFLOW\n");
+		ipa_assert();
+		break;
+	default:
+		IPAERR("Received unexpected evt: %d\n",
+			notify->evt_id);
+		ipa_assert();
+	}
+}
+
+int ipa3_register_ipa_ready_cb(void (*ipa_ready_cb)(void *), void *user_data)
+{
+	struct ipa3_ready_cb_info *cb_info = NULL;
+
+	/* check ipa3_ctx existed or not */
+	if (!ipa3_ctx) {
+		IPADBG("IPA driver haven't initialized\n");
+		return -ENXIO;
+	}
+	mutex_lock(&ipa3_ctx->lock);
+	if (ipa3_ctx->ipa_initialization_complete) {
+		mutex_unlock(&ipa3_ctx->lock);
+		IPADBG("IPA driver finished initialization already\n");
+		return -EEXIST;
+	}
+
+	cb_info = kmalloc(sizeof(struct ipa3_ready_cb_info), GFP_KERNEL);
+	if (!cb_info) {
+		mutex_unlock(&ipa3_ctx->lock);
+		return -ENOMEM;
+	}
+
+	cb_info->ready_cb = ipa_ready_cb;
+	cb_info->user_data = user_data;
+
+	list_add_tail(&cb_info->link, &ipa3_ctx->ipa_ready_cb_list);
+	mutex_unlock(&ipa3_ctx->lock);
+
+	return 0;
+}
+
+int ipa3_iommu_map(struct iommu_domain *domain,
+	unsigned long iova, phys_addr_t paddr, size_t size, int prot)
+{
+	struct ipa_smmu_cb_ctx *cb = NULL;
+
+	IPADBG_LOW("domain =0x%pK iova 0x%lx\n", domain, iova);
+	IPADBG_LOW("paddr =0x%pa size 0x%x\n", &paddr, (u32)size);
+
+	/* make sure no overlapping */
+	if (domain == ipa3_get_smmu_domain()) {
+		cb = ipa3_get_smmu_ctx(IPA_SMMU_CB_AP);
+		if (iova >= cb->va_start && iova < cb->va_end) {
+			IPAERR("iommu AP overlap addr 0x%lx\n", iova);
+			ipa_assert();
+			return -EFAULT;
+		}
+	} else if (domain == ipa3_get_wlan_smmu_domain()) {
+		/* wlan is one time map */
+		cb = ipa3_get_smmu_ctx(IPA_SMMU_CB_WLAN);
+	} else if (domain == ipa3_get_11ad_smmu_domain()) {
+		/* 11ad is one time map */
+		cb = ipa3_get_smmu_ctx(IPA_SMMU_CB_11AD);
+	} else if (domain == ipa3_get_uc_smmu_domain()) {
+		cb = ipa3_get_smmu_ctx(IPA_SMMU_CB_UC);
+		if (iova >= cb->va_start && iova < cb->va_end) {
+			IPAERR("iommu uC overlap addr 0x%lx\n", iova);
+			ipa_assert();
+			return -EFAULT;
+		}
+	} else {
+		IPAERR("Unexpected domain 0x%pK\n", domain);
+		ipa_assert();
+		return -EFAULT;
+	}
+
+	if (cb == NULL) {
+		IPAERR("Unexpected cb turning NULL for domain 0x%pK\n", domain);
+		ipa_assert();
+	}
+
+	/*
+	 * IOMMU_CACHE is needed to make the entries cachable
+	 * if cache coherency is enabled in dtsi.
+	 */
+	if (cb->is_cache_coherent)
+		prot |= IOMMU_CACHE;
+
+	return iommu_map(domain, iova, paddr, size, prot);
+}
+
+/**
+ * ipa3_get_smmu_params()- Return the ipa3 smmu related params.
+ */
+int ipa3_get_smmu_params(struct ipa_smmu_in_params *in,
+	struct ipa_smmu_out_params *out)
+{
+	bool is_smmu_enable = false;
+
+	if (out == NULL || in == NULL) {
+		IPAERR("bad parms for Client SMMU out params\n");
+		return -EINVAL;
+	}
+
+	if (!ipa3_ctx) {
+		IPAERR("IPA not yet initialized\n");
+		return -EINVAL;
+	}
+
+	out->shared_cb = false;
+
+	switch (in->smmu_client) {
+	case IPA_SMMU_WLAN_CLIENT:
+		if (ipa3_ctx->ipa_wdi3_over_gsi ||
+			ipa3_ctx->ipa_wdi2_over_gsi)
+			is_smmu_enable =
+				!(ipa3_ctx->s1_bypass_arr[IPA_SMMU_CB_AP] ||
+				ipa3_ctx->s1_bypass_arr[IPA_SMMU_CB_WLAN]);
+		else
+			is_smmu_enable =
+			!(ipa3_ctx->s1_bypass_arr[IPA_SMMU_CB_UC] ||
+			ipa3_ctx->s1_bypass_arr[IPA_SMMU_CB_WLAN]);
+		break;
+	case IPA_SMMU_WIGIG_CLIENT:
+		is_smmu_enable = !(ipa3_ctx->s1_bypass_arr[IPA_SMMU_CB_UC] ||
+			ipa3_ctx->s1_bypass_arr[IPA_SMMU_CB_11AD] ||
+			ipa3_ctx->s1_bypass_arr[IPA_SMMU_CB_AP]);
+		if (is_smmu_enable) {
+			if (ipa3_ctx->s1_bypass_arr[IPA_SMMU_CB_UC] ||
+				ipa3_ctx->s1_bypass_arr[IPA_SMMU_CB_11AD] ||
+				ipa3_ctx->s1_bypass_arr[IPA_SMMU_CB_AP]) {
+				IPAERR("11AD SMMU Discrepancy (%d %d %d)\n",
+				ipa3_ctx->s1_bypass_arr[IPA_SMMU_CB_UC],
+				ipa3_ctx->s1_bypass_arr[IPA_SMMU_CB_AP],
+				ipa3_ctx->s1_bypass_arr[IPA_SMMU_CB_11AD]);
+				WARN_ON(1);
+				return -EINVAL;
+			}
+		} else {
+			if (!ipa3_ctx->s1_bypass_arr[IPA_SMMU_CB_UC] ||
+				!ipa3_ctx->s1_bypass_arr[IPA_SMMU_CB_11AD] ||
+				!ipa3_ctx->s1_bypass_arr[IPA_SMMU_CB_AP]) {
+				IPAERR("11AD SMMU Discrepancy (%d %d %d)\n",
+				ipa3_ctx->s1_bypass_arr[IPA_SMMU_CB_UC],
+				ipa3_ctx->s1_bypass_arr[IPA_SMMU_CB_AP],
+				ipa3_ctx->s1_bypass_arr[IPA_SMMU_CB_11AD]);
+				WARN_ON(1);
+				return -EINVAL;
+			}
+		}
+		out->shared_cb = (ipa3_get_smmu_ctx(IPA_SMMU_CB_11AD))->shared;
+		break;
+	case IPA_SMMU_AP_CLIENT:
+		is_smmu_enable =
+			!(ipa3_ctx->s1_bypass_arr[IPA_SMMU_CB_AP]);
+		break;
+	default:
+		is_smmu_enable = false;
+		IPAERR("Trying to get illegal clients SMMU status");
+		return -EINVAL;
+	}
+
+	out->smmu_enable = is_smmu_enable;
+
+	return 0;
+}
+
+#define MAX_LEN 96
+
+void ipa_pc_qmp_enable(void)
+{
+	char buf[MAX_LEN] = "{class: bcm, res: ipa_pc, val: 1}";
+	struct qmp_pkt pkt;
+	int ret = 0;
+	struct ipa3_pc_mbox_data *mbox_data = &ipa3_ctx->pc_mbox;
+
+	IPADBG("Enter\n");
+
+	/* prepare the mailbox struct */
+	mbox_data->mbox_client.dev = &ipa3_ctx->master_pdev->dev;
+	mbox_data->mbox_client.tx_block = true;
+	mbox_data->mbox_client.tx_tout = MBOX_TOUT_MS;
+	mbox_data->mbox_client.knows_txdone = false;
+
+	mbox_data->mbox = mbox_request_channel(&mbox_data->mbox_client, 0);
+	if (IS_ERR(mbox_data->mbox)) {
+		ret = PTR_ERR(mbox_data->mbox);
+		if (ret != -EPROBE_DEFER)
+			IPAERR("mailbox channel request failed, ret=%d\n", ret);
+
+		return;
+	}
+
+	/* prepare the QMP packet to send */
+	pkt.size = MAX_LEN;
+	pkt.data = buf;
+
+	/* send the QMP packet to AOP */
+	ret = mbox_send_message(mbox_data->mbox, &pkt);
+	if (ret < 0)
+		IPAERR("qmp message send failed, ret=%d\n", ret);
+
+	if (mbox_data->mbox) {
+		mbox_free_channel(mbox_data->mbox);
+		mbox_data->mbox = NULL;
+	}
+}
+
+/**************************************************************
+ *            PCIe Version
+ *************************************************************/
+
+int ipa3_pci_drv_probe(
+	struct pci_dev            *pci_dev,
+	struct ipa_api_controller *api_ctrl,
+	const struct of_device_id *pdrv_match)
+{
+	int result;
+	struct ipa3_plat_drv_res *ipa_drv_res;
+	u32 bar0_offset;
+	u32 mem_start;
+	u32 mem_end;
+	uint32_t bits;
+	uint32_t ipa_start, gsi_start, intctrl_start;
+	struct device *dev;
+	static struct platform_device platform_dev;
+
+	if (!pci_dev || !api_ctrl || !pdrv_match) {
+		IPAERR(
+		    "Bad arg: pci_dev (%pK) and/or api_ctrl (%pK) and/or pdrv_match (%pK)\n",
+		    pci_dev, api_ctrl, pdrv_match);
+		return -EOPNOTSUPP;
+	}
+
+	dev = &(pci_dev->dev);
+
+	IPADBG("IPA PCI driver probing started\n");
+
+	/*
+	 * Follow PCI driver flow here.
+	 * pci_enable_device:  Enables device and assigns resources
+	 * pci_request_region:  Makes BAR0 address region usable
+	 */
+	result = pci_enable_device(pci_dev);
+	if (result < 0) {
+		IPAERR("pci_enable_device() failed\n");
+		return -EOPNOTSUPP;
+	}
+
+	result = pci_request_region(pci_dev, 0, "IPA Memory");
+	if (result < 0) {
+		IPAERR("pci_request_region() failed\n");
+		pci_disable_device(pci_dev);
+		return -EOPNOTSUPP;
+	}
+
+	/*
+	 * When in the PCI/emulation environment, &platform_dev is
+	 * passed to get_ipa_dts_configuration(), but is unused, since
+	 * all usages of it in the function are replaced by CPP
+	 * relative to definitions in ipa_emulation_stubs.h.  Passing
+	 * &platform_dev makes code validity tools happy.
+	 */
+	if (get_ipa_dts_configuration(&platform_dev, &ipa3_res) != 0) {
+		IPAERR("get_ipa_dts_configuration() failed\n");
+		pci_release_region(pci_dev, 0);
+		pci_disable_device(pci_dev);
+		return -EOPNOTSUPP;
+	}
+
+	ipa_drv_res = &ipa3_res;
+
+	result =
+		of_property_read_u32(NULL, "emulator-bar0-offset",
+				     &bar0_offset);
+	if (result) {
+		IPAERR(":get resource failed for emulator-bar0-offset!\n");
+		pci_release_region(pci_dev, 0);
+		pci_disable_device(pci_dev);
+		return -ENODEV;
+	}
+	IPADBG(":using emulator-bar0-offset 0x%08X\n", bar0_offset);
+
+	ipa_start     = ipa_drv_res->ipa_mem_base;
+	gsi_start     = ipa_drv_res->transport_mem_base;
+	intctrl_start = ipa_drv_res->emulator_intcntrlr_mem_base;
+
+	/*
+	 * Where will we be inerrupted at?
+	 */
+	ipa_drv_res->emulator_irq = pci_dev->irq;
+	IPADBG(
+	    "EMULATION PCI_INTERRUPT_PIN(%u)\n",
+	    ipa_drv_res->emulator_irq);
+
+	/*
+	 * Set the ipa_mem_base to the PCI base address of BAR0
+	 */
+	mem_start = pci_resource_start(pci_dev, 0);
+	mem_end   = pci_resource_end(pci_dev, 0);
+
+	IPADBG("PCI START                = 0x%x\n", mem_start);
+	IPADBG("PCI END                  = 0x%x\n", mem_end);
+
+	ipa_drv_res->ipa_mem_base = mem_start + bar0_offset;
+
+	smmu_info.ipa_base = ipa_drv_res->ipa_mem_base;
+	smmu_info.ipa_size = ipa_drv_res->ipa_mem_size;
+
+	ipa_drv_res->transport_mem_base =
+	    ipa_drv_res->ipa_mem_base + (gsi_start - ipa_start);
+
+	ipa_drv_res->emulator_intcntrlr_mem_base =
+	    ipa_drv_res->ipa_mem_base + (intctrl_start - ipa_start);
+
+	IPADBG("ipa_mem_base                = 0x%x\n",
+	       ipa_drv_res->ipa_mem_base);
+	IPADBG("ipa_mem_size                = 0x%x\n",
+	       ipa_drv_res->ipa_mem_size);
+
+	IPADBG("transport_mem_base          = 0x%x\n",
+	       ipa_drv_res->transport_mem_base);
+	IPADBG("transport_mem_size          = 0x%x\n",
+	       ipa_drv_res->transport_mem_size);
+
+	IPADBG("emulator_intcntrlr_mem_base = 0x%x\n",
+	       ipa_drv_res->emulator_intcntrlr_mem_base);
+	IPADBG("emulator_intcntrlr_mem_size = 0x%x\n",
+	       ipa_drv_res->emulator_intcntrlr_mem_size);
+
+	result = ipa3_bind_api_controller(ipa_drv_res->ipa_hw_type, api_ctrl);
+	if (result != 0) {
+		IPAERR("ipa3_bind_api_controller() failed\n");
+		pci_release_region(pci_dev, 0);
+		pci_disable_device(pci_dev);
+		return result;
+	}
+
+	bits = (ipa_drv_res->use_64_bit_dma_mask) ? 64 : 32;
+
+	if (dma_set_mask(dev, DMA_BIT_MASK(bits)) != 0) {
+		IPAERR("dma_set_mask(%pK, %u) failed\n", dev, bits);
+		pci_release_region(pci_dev, 0);
+		pci_disable_device(pci_dev);
+		return -EOPNOTSUPP;
+	}
+
+	if (dma_set_coherent_mask(dev, DMA_BIT_MASK(bits)) != 0) {
+		IPAERR("dma_set_coherent_mask(%pK, %u) failed\n", dev, bits);
+		pci_release_region(pci_dev, 0);
+		pci_disable_device(pci_dev);
+		return -EOPNOTSUPP;
+	}
+
+	pci_set_master(pci_dev);
+
+	memset(&platform_dev, 0, sizeof(platform_dev));
+	platform_dev.dev = *dev;
+
+	/* Proceed to real initialization */
+	result = ipa3_pre_init(&ipa3_res, &platform_dev);
+	if (result) {
+		IPAERR("ipa3_init failed\n");
+		pci_clear_master(pci_dev);
+		pci_release_region(pci_dev, 0);
+		pci_disable_device(pci_dev);
+		return result;
+	}
+
+	return result;
+}
+
+/*
+ * The following returns transport register memory location and
+ * size...
+ */
+int ipa3_get_transport_info(
+	phys_addr_t *phys_addr_ptr,
+	unsigned long *size_ptr)
+{
+	if (!phys_addr_ptr || !size_ptr) {
+		IPAERR("Bad arg: phys_addr_ptr(%pK) and/or size_ptr(%pK)\n",
+		       phys_addr_ptr, size_ptr);
+		return -EINVAL;
+	}
+
+	*phys_addr_ptr = ipa3_res.transport_mem_base;
+	*size_ptr      = ipa3_res.transport_mem_size;
+
+	return 0;
+}
+EXPORT_SYMBOL(ipa3_get_transport_info);
+
+static uint emulation_type = IPA_HW_v4_0;
+
+/*
+ * The following returns emulation type...
+ */
+uint ipa3_get_emulation_type(void)
+{
+	return emulation_type;
+}
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("IPA HW device driver");
+
+/*
+ * Module parameter. Invoke as follows:
+ *     insmod ipat.ko emulation_type=[13|14|17|...|N]
+ * Examples:
+ *   insmod ipat.ko emulation_type=13 # for IPA 3.5.1
+ *   insmod ipat.ko emulation_type=14 # for IPA 4.0
+ *   insmod ipat.ko emulation_type=17 # for IPA 4.5
+ *
+ * NOTE: The emulation_type values need to come from: enum ipa_hw_type
+ *
+ */
+
+module_param(emulation_type, uint, 0000);
+MODULE_PARM_DESC(
+	emulation_type,
+	"emulation_type=N N can be 13 for IPA 3.5.1, 14 for IPA 4.0, 17 for IPA 4.5");

+ 1849 - 0
ipa/ipa_v3/ipa_client.c

@@ -0,0 +1,1849 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2012-2020, The Linux Foundation. All rights reserved.
+ */
+
+#include <asm/barrier.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include "ipa_i.h"
+#include "linux/msm_gsi.h"
+
+/*
+ * These values were determined empirically and shows good E2E bi-
+ * directional throughputs
+ */
+#define IPA_HOLB_TMR_EN 0x1
+#define IPA_HOLB_TMR_DIS 0x0
+#define IPA_POLL_AGGR_STATE_RETRIES_NUM 3
+#define IPA_POLL_AGGR_STATE_SLEEP_MSEC 1
+
+#define IPA_PKT_FLUSH_TO_US 100
+
+#define IPA_POLL_FOR_EMPTINESS_NUM 50
+#define IPA_POLL_FOR_EMPTINESS_SLEEP_USEC 20
+#define IPA_CHANNEL_STOP_IN_PROC_TO_MSEC 5
+#define IPA_CHANNEL_STOP_IN_PROC_SLEEP_USEC 200
+
+/* xfer_rsc_idx should be 7 bits */
+#define IPA_XFER_RSC_IDX_MAX 127
+
+static int ipa3_is_xdci_channel_empty(struct ipa3_ep_context *ep,
+	bool *is_empty);
+static void ipa3_start_gsi_debug_monitor(u32 clnt_hdl);
+
+int ipa3_enable_data_path(u32 clnt_hdl)
+{
+	struct ipa3_ep_context *ep = &ipa3_ctx->ep[clnt_hdl];
+	struct ipa_ep_cfg_holb holb_cfg;
+	struct ipa_ep_cfg_ctrl ep_cfg_ctrl;
+	int res = 0;
+	struct ipahal_reg_endp_init_rsrc_grp rsrc_grp;
+
+	/* Assign the resource group for pipe */
+	memset(&rsrc_grp, 0, sizeof(rsrc_grp));
+	rsrc_grp.rsrc_grp = ipa_get_ep_group(ep->client);
+	if (rsrc_grp.rsrc_grp == -1) {
+		IPAERR("invalid group for client %d\n", ep->client);
+		WARN_ON(1);
+		return -EFAULT;
+	}
+
+	IPADBG("Setting group %d for pipe %d\n",
+		rsrc_grp.rsrc_grp, clnt_hdl);
+	ipahal_write_reg_n_fields(IPA_ENDP_INIT_RSRC_GRP_n, clnt_hdl,
+		&rsrc_grp);
+
+	IPADBG("Enabling data path\n");
+	if (IPA_CLIENT_IS_CONS(ep->client)) {
+		memset(&holb_cfg, 0, sizeof(holb_cfg));
+		/*
+		 * Set HOLB on USB DPL CONS to avoid IPA stall
+		 * if DPL client is not pulling the data
+		 * on other end from IPA hw.
+		 */
+		if ((ep->client == IPA_CLIENT_USB_DPL_CONS) ||
+				(ep->client == IPA_CLIENT_MHI_DPL_CONS))
+			holb_cfg.en = IPA_HOLB_TMR_EN;
+		else
+			holb_cfg.en = IPA_HOLB_TMR_DIS;
+		holb_cfg.tmr_val = 0;
+		res = ipa3_cfg_ep_holb(clnt_hdl, &holb_cfg);
+	}
+
+	/* Enable the pipe */
+	if (ipa3_ctx->ipa_hw_type < IPA_HW_v4_0) {
+		if (IPA_CLIENT_IS_CONS(ep->client) &&
+		    (ep->keep_ipa_awake ||
+		    ipa3_ctx->resume_on_connect[ep->client] ||
+		    !ipa3_should_pipe_be_suspended(ep->client))) {
+			memset(&ep_cfg_ctrl, 0, sizeof(ep_cfg_ctrl));
+			ep_cfg_ctrl.ipa_ep_suspend = false;
+			res = ipa3_cfg_ep_ctrl(clnt_hdl, &ep_cfg_ctrl);
+		}
+	}
+
+	return res;
+}
+
+int ipa3_disable_data_path(u32 clnt_hdl)
+{
+	struct ipa3_ep_context *ep = &ipa3_ctx->ep[clnt_hdl];
+	struct ipa_ep_cfg_holb holb_cfg;
+	struct ipa_ep_cfg_ctrl ep_cfg_ctrl;
+	struct ipa_ep_cfg_aggr ep_aggr;
+	int res = 0;
+
+	IPADBG("Disabling data path\n");
+	if (IPA_CLIENT_IS_CONS(ep->client)) {
+		memset(&holb_cfg, 0, sizeof(holb_cfg));
+		holb_cfg.en = IPA_HOLB_TMR_EN;
+		holb_cfg.tmr_val = 0;
+		res = ipa3_cfg_ep_holb(clnt_hdl, &holb_cfg);
+	}
+
+	/*
+	 * for IPA 4.0 and above aggregation frame is closed together with
+	 * channel STOP
+	 */
+	if (ipa3_ctx->ipa_hw_type < IPA_HW_v4_0) {
+		/* Suspend the pipe */
+		if (IPA_CLIENT_IS_CONS(ep->client)) {
+			memset(&ep_cfg_ctrl, 0, sizeof(struct ipa_ep_cfg_ctrl));
+			ep_cfg_ctrl.ipa_ep_suspend = true;
+			res = ipa3_cfg_ep_ctrl(clnt_hdl, &ep_cfg_ctrl);
+		}
+
+		udelay(IPA_PKT_FLUSH_TO_US);
+		ipahal_read_reg_n_fields(IPA_ENDP_INIT_AGGR_n, clnt_hdl,
+			&ep_aggr);
+		if (ep_aggr.aggr_en) {
+			res = ipa3_tag_aggr_force_close(clnt_hdl);
+			if (res) {
+				IPAERR("tag process timeout client:%d err:%d\n",
+					clnt_hdl, res);
+				ipa_assert();
+			}
+		}
+	}
+
+	return res;
+}
+
+int ipa3_reset_gsi_channel(u32 clnt_hdl)
+{
+	struct ipa3_ep_context *ep;
+	int result = -EFAULT;
+	enum gsi_status gsi_res;
+	bool undo_aggr_value = false;
+	struct ipahal_reg_clkon_cfg fields;
+
+	IPADBG("entry\n");
+	if (clnt_hdl >= ipa3_ctx->ipa_num_pipes ||
+		ipa3_ctx->ep[clnt_hdl].valid == 0) {
+		IPAERR("Bad parameter\n");
+		return -EINVAL;
+	}
+
+	ep = &ipa3_ctx->ep[clnt_hdl];
+
+	if (!ep->keep_ipa_awake)
+		IPA_ACTIVE_CLIENTS_INC_EP(ipa3_get_client_mapping(clnt_hdl));
+
+	/*
+	 * IPAv4.0 HW has a limitation where WSEQ in MBIM NTH header is not
+	 * reset to 0 when MBIM pipe is reset. Workaround is to disable
+	 * HW clock gating for AGGR block using IPA_CLKON_CFG reg. undo flag to
+	 * disable the bit after reset is finished
+	 */
+	if (ipa3_ctx->ipa_hw_type >= IPA_HW_v4_0) {
+		if (ep->cfg.aggr.aggr == IPA_MBIM_16 &&
+			ep->cfg.aggr.aggr_en != IPA_BYPASS_AGGR) {
+			ipahal_read_reg_fields(IPA_CLKON_CFG, &fields);
+			if (fields.open_aggr_wrapper) {
+				undo_aggr_value = true;
+				fields.open_aggr_wrapper = false;
+				ipahal_write_reg_fields(IPA_CLKON_CFG, &fields);
+			}
+		}
+	}
+
+	/*
+	 * Reset channel
+	 * If the reset called after stop, need to wait 1ms
+	 */
+	msleep(IPA_POLL_AGGR_STATE_SLEEP_MSEC);
+	gsi_res = gsi_reset_channel(ep->gsi_chan_hdl);
+	if (gsi_res != GSI_STATUS_SUCCESS) {
+		IPAERR("Error resetting channel: %d\n", gsi_res);
+		result = -EFAULT;
+		goto reset_chan_fail;
+	}
+
+	/* undo the aggr value if flag was set above*/
+	if (undo_aggr_value) {
+		fields.open_aggr_wrapper = false;
+		ipahal_write_reg_fields(IPA_CLKON_CFG, &fields);
+	}
+
+	if (!ep->keep_ipa_awake)
+		IPA_ACTIVE_CLIENTS_DEC_EP(ipa3_get_client_mapping(clnt_hdl));
+
+	IPADBG("exit\n");
+	return 0;
+
+reset_chan_fail:
+	/* undo the aggr value if flag was set above*/
+	if (undo_aggr_value) {
+		fields.open_aggr_wrapper = false;
+		ipahal_write_reg_fields(IPA_CLKON_CFG, &fields);
+	}
+
+	if (!ep->keep_ipa_awake)
+		IPA_ACTIVE_CLIENTS_DEC_EP(ipa3_get_client_mapping(clnt_hdl));
+	return result;
+}
+EXPORT_SYMBOL(ipa3_reset_gsi_channel);
+
+int ipa3_reset_gsi_event_ring(u32 clnt_hdl)
+{
+	struct ipa3_ep_context *ep;
+	int result = -EFAULT;
+	enum gsi_status gsi_res;
+
+	IPADBG("entry\n");
+	if (clnt_hdl >= ipa3_ctx->ipa_num_pipes ||
+		ipa3_ctx->ep[clnt_hdl].valid == 0) {
+		IPAERR("Bad parameter.\n");
+		return -EINVAL;
+	}
+
+	ep = &ipa3_ctx->ep[clnt_hdl];
+
+	if (!ep->keep_ipa_awake)
+		IPA_ACTIVE_CLIENTS_INC_EP(ipa3_get_client_mapping(clnt_hdl));
+	/* Reset event ring */
+	gsi_res = gsi_reset_evt_ring(ep->gsi_evt_ring_hdl);
+	if (gsi_res != GSI_STATUS_SUCCESS) {
+		IPAERR("Error resetting event: %d\n", gsi_res);
+		result = -EFAULT;
+		goto reset_evt_fail;
+	}
+
+	if (!ep->keep_ipa_awake)
+		IPA_ACTIVE_CLIENTS_DEC_EP(ipa3_get_client_mapping(clnt_hdl));
+
+	IPADBG("exit\n");
+	return 0;
+
+reset_evt_fail:
+	if (!ep->keep_ipa_awake)
+		IPA_ACTIVE_CLIENTS_DEC_EP(ipa3_get_client_mapping(clnt_hdl));
+	return result;
+}
+EXPORT_SYMBOL(ipa3_reset_gsi_event_ring);
+
+static bool ipa3_is_legal_params(struct ipa_request_gsi_channel_params *params)
+{
+	if (params->client >= IPA_CLIENT_MAX)
+		return false;
+	else
+		return true;
+}
+
+static void ipa3_start_gsi_debug_monitor(u32 clnt_hdl)
+{
+	struct IpaHwOffloadStatsAllocCmdData_t *gsi_info;
+	struct ipa3_ep_context *ep;
+	enum ipa_client_type client_type;
+
+	IPADBG("entry\n");
+	if (clnt_hdl >= ipa3_ctx->ipa_num_pipes ||
+		ipa3_ctx->ep[clnt_hdl].valid == 0) {
+		IPAERR("Bad parameters.\n");
+		return;
+	}
+
+	ep = &ipa3_ctx->ep[clnt_hdl];
+	client_type = ipa3_get_client_mapping(clnt_hdl);
+
+	/* start uC gsi dbg stats monitor */
+	if (ipa3_ctx->ipa_hw_type >= IPA_HW_v4_5 &&
+		ipa3_ctx->ipa_hw_type != IPA_HW_v4_7) {
+		switch (client_type) {
+		case IPA_CLIENT_MHI_PRIME_TETH_PROD:
+			gsi_info = &ipa3_ctx->gsi_info[IPA_HW_PROTOCOL_MHIP];
+			gsi_info->ch_id_info[0].ch_id = ep->gsi_chan_hdl;
+			gsi_info->ch_id_info[0].dir = DIR_PRODUCER;
+			ipa3_uc_debug_stats_alloc(*gsi_info);
+			break;
+		case IPA_CLIENT_MHI_PRIME_TETH_CONS:
+			gsi_info = &ipa3_ctx->gsi_info[IPA_HW_PROTOCOL_MHIP];
+			gsi_info->ch_id_info[1].ch_id = ep->gsi_chan_hdl;
+			gsi_info->ch_id_info[1].dir = DIR_CONSUMER;
+			ipa3_uc_debug_stats_alloc(*gsi_info);
+			break;
+		case IPA_CLIENT_MHI_PRIME_RMNET_PROD:
+			gsi_info = &ipa3_ctx->gsi_info[IPA_HW_PROTOCOL_MHIP];
+			gsi_info->ch_id_info[2].ch_id = ep->gsi_chan_hdl;
+			gsi_info->ch_id_info[2].dir = DIR_PRODUCER;
+			ipa3_uc_debug_stats_alloc(*gsi_info);
+			break;
+		case IPA_CLIENT_MHI_PRIME_RMNET_CONS:
+			gsi_info = &ipa3_ctx->gsi_info[IPA_HW_PROTOCOL_MHIP];
+			gsi_info->ch_id_info[3].ch_id = ep->gsi_chan_hdl;
+			gsi_info->ch_id_info[3].dir = DIR_CONSUMER;
+			ipa3_uc_debug_stats_alloc(*gsi_info);
+			break;
+		case IPA_CLIENT_USB_PROD:
+			gsi_info = &ipa3_ctx->gsi_info[IPA_HW_PROTOCOL_USB];
+			gsi_info->ch_id_info[0].ch_id = ep->gsi_chan_hdl;
+			gsi_info->ch_id_info[0].dir = DIR_PRODUCER;
+			ipa3_uc_debug_stats_alloc(*gsi_info);
+			break;
+		case IPA_CLIENT_USB_CONS:
+			gsi_info = &ipa3_ctx->gsi_info[IPA_HW_PROTOCOL_USB];
+			gsi_info->ch_id_info[1].ch_id = ep->gsi_chan_hdl;
+			gsi_info->ch_id_info[1].dir = DIR_CONSUMER;
+			ipa3_uc_debug_stats_alloc(*gsi_info);
+			break;
+		default:
+			IPADBG("client_type %d not supported\n",
+				client_type);
+		}
+	}
+}
+
+int ipa3_smmu_map_peer_reg(phys_addr_t phys_addr, bool map,
+	enum ipa_smmu_cb_type cb_type)
+{
+	struct iommu_domain *smmu_domain;
+	int res;
+
+	if (!VALID_IPA_SMMU_CB_TYPE(cb_type)) {
+		IPAERR("invalid cb_type\n");
+		return -EINVAL;
+	}
+
+	if (ipa3_ctx->s1_bypass_arr[cb_type]) {
+		IPADBG("CB# %d is set to s1 bypass\n", cb_type);
+		return 0;
+	}
+
+	smmu_domain = ipa3_get_smmu_domain_by_type(cb_type);
+	if (!smmu_domain) {
+		IPAERR("invalid smmu domain\n");
+		return -EINVAL;
+	}
+
+	if (map) {
+		res = ipa3_iommu_map(smmu_domain, phys_addr, phys_addr,
+			PAGE_SIZE, IOMMU_READ | IOMMU_WRITE | IOMMU_MMIO);
+	} else {
+		res = iommu_unmap(smmu_domain, phys_addr, PAGE_SIZE);
+		res = (res != PAGE_SIZE);
+	}
+	if (res) {
+		IPAERR("Fail to %s reg 0x%pa\n", map ? "map" : "unmap",
+			&phys_addr);
+		return -EINVAL;
+	}
+
+	IPADBG("Peer reg 0x%pa %s\n", &phys_addr, map ? "map" : "unmap");
+
+	return 0;
+}
+EXPORT_SYMBOL(ipa3_smmu_map_peer_reg);
+
+int ipa3_smmu_map_peer_buff(u64 iova, u32 size, bool map, struct sg_table *sgt,
+	enum ipa_smmu_cb_type cb_type)
+{
+	struct iommu_domain *smmu_domain;
+	int res;
+	phys_addr_t phys;
+	unsigned long va;
+	struct scatterlist *sg;
+	int count = 0;
+	size_t len;
+	int i;
+	struct page *page;
+
+	if (!VALID_IPA_SMMU_CB_TYPE(cb_type)) {
+		IPAERR("invalid cb_type\n");
+		return -EINVAL;
+	}
+
+	if (ipa3_ctx->s1_bypass_arr[cb_type]) {
+		IPADBG("CB# %d is set to s1 bypass\n", cb_type);
+		return 0;
+	}
+
+	smmu_domain = ipa3_get_smmu_domain_by_type(cb_type);
+	if (!smmu_domain) {
+		IPAERR("invalid smmu domain\n");
+		return -EINVAL;
+	}
+
+	/*
+	 * USB GSI driver would update sgt irrespective of USB S1
+	 * is enable or bypass.
+	 * If USB S1 is enabled using IOMMU, iova != pa.
+	 * If USB S1 is bypass, iova == pa.
+	 */
+	if (map) {
+		if (sgt != NULL) {
+			va = rounddown(iova, PAGE_SIZE);
+			for_each_sg(sgt->sgl, sg, sgt->nents, i) {
+				page = sg_page(sg);
+				phys = page_to_phys(page);
+				len = PAGE_ALIGN(sg->offset + sg->length);
+				res = ipa3_iommu_map(smmu_domain, va, phys,
+					len, IOMMU_READ | IOMMU_WRITE);
+				if (res) {
+					IPAERR("Fail to map pa=%pa\n", &phys);
+					return -EINVAL;
+				}
+				va += len;
+				count++;
+			}
+		} else {
+			res = ipa3_iommu_map(smmu_domain,
+				rounddown(iova, PAGE_SIZE),
+				rounddown(iova, PAGE_SIZE),
+				roundup(size + iova -
+					rounddown(iova, PAGE_SIZE),
+				PAGE_SIZE),
+				IOMMU_READ | IOMMU_WRITE);
+			if (res) {
+				IPAERR("Fail to map 0x%llx\n", iova);
+				return -EINVAL;
+			}
+		}
+	} else {
+		res = iommu_unmap(smmu_domain,
+		rounddown(iova, PAGE_SIZE),
+		roundup(size + iova - rounddown(iova, PAGE_SIZE),
+		PAGE_SIZE));
+		if (res != roundup(size + iova - rounddown(iova, PAGE_SIZE),
+			PAGE_SIZE)) {
+			IPAERR("Fail to unmap 0x%llx\n", iova);
+			return -EINVAL;
+		}
+	}
+	IPADBG("Peer buff %s 0x%llx\n", map ? "map" : "unmap", iova);
+	return 0;
+}
+EXPORT_SYMBOL(ipa3_smmu_map_peer_buff);
+
+static enum ipa_client_cb_type ipa_get_client_cb_type(
+					enum ipa_client_type client_type)
+{
+	enum ipa_client_cb_type client_cb;
+
+	if (client_type == IPA_CLIENT_USB_PROD ||
+			client_type == IPA_CLIENT_USB_CONS) {
+		IPADBG("USB Client registered\n");
+		client_cb = IPA_USB_CLNT;
+	} else if (client_type == IPA_CLIENT_MHI_PROD ||
+			client_type == IPA_CLIENT_MHI_CONS) {
+		IPADBG("MHI Client registered\n");
+		client_cb = IPA_MHI_CLNT;
+	} else {
+		IPAERR("Invalid IPA client\n");
+		client_cb = IPA_MAX_CLNT;
+	}
+
+	return client_cb;
+}
+void ipa3_register_client_callback(int (*client_cb)(bool is_lock),
+				bool (*teth_port_state)(void),
+				enum ipa_client_type client_type)
+{
+	enum ipa_client_cb_type client;
+
+	IPADBG("entry\n");
+
+	client = ipa_get_client_cb_type(client_type);
+	if (client == IPA_MAX_CLNT)
+		return;
+
+	if (client_cb == NULL) {
+		IPAERR("Bad Param");
+		return;
+	}
+
+	if (!ipa3_ctx->client_lock_unlock[client])
+		ipa3_ctx->client_lock_unlock[client] = client_cb;
+	if (!ipa3_ctx->get_teth_port_state[client])
+		ipa3_ctx->get_teth_port_state[client] = teth_port_state;
+	IPADBG("exit\n");
+}
+
+void ipa3_deregister_client_callback(enum ipa_client_type client_type)
+{
+	enum ipa_client_cb_type client_cb;
+
+	IPADBG("entry\n");
+
+	client_cb = ipa_get_client_cb_type(client_type);
+	if (client_cb == IPA_MAX_CLNT)
+		return;
+
+	if (ipa3_ctx->client_lock_unlock[client_cb] == NULL &&
+		ipa3_ctx->get_teth_port_state[client_cb] == NULL) {
+		IPAERR("client_lock_unlock is already NULL");
+		return;
+	}
+
+	ipa3_ctx->client_lock_unlock[client_cb] = NULL;
+	ipa3_ctx->get_teth_port_state[client_cb] = NULL;
+	IPADBG("exit\n");
+}
+
+static void client_lock_unlock_cb(enum ipa_client_type client, bool is_lock)
+{
+	enum ipa_client_cb_type client_cb;
+
+	IPADBG("entry\n");
+
+	client_cb = ipa_get_client_cb_type(client);
+	if (client_cb == IPA_MAX_CLNT)
+		return;
+
+	if (ipa3_ctx->client_lock_unlock[client_cb])
+		ipa3_ctx->client_lock_unlock[client_cb](is_lock);
+
+	IPADBG("exit\n");
+}
+
+int ipa3_request_gsi_channel(struct ipa_request_gsi_channel_params *params,
+			     struct ipa_req_chan_out_params *out_params)
+{
+	int ipa_ep_idx;
+	int result = -EFAULT;
+	struct ipa3_ep_context *ep;
+	struct ipahal_reg_ep_cfg_status ep_status;
+	unsigned long gsi_dev_hdl;
+	enum gsi_status gsi_res;
+	const struct ipa_gsi_ep_config *gsi_ep_cfg_ptr;
+
+	IPADBG("entry\n");
+	if (params == NULL || out_params == NULL ||
+		!ipa3_is_legal_params(params)) {
+		IPAERR("bad parameters\n");
+		return -EINVAL;
+	}
+
+	ipa_ep_idx = ipa3_get_ep_mapping(params->client);
+	if (ipa_ep_idx == -1) {
+		IPAERR("fail to alloc EP.\n");
+		goto fail;
+	}
+
+	ep = &ipa3_ctx->ep[ipa_ep_idx];
+
+	if (ep->valid) {
+		IPAERR("EP already allocated.\n");
+		goto fail;
+	}
+
+	memset(&ipa3_ctx->ep[ipa_ep_idx], 0, sizeof(struct ipa3_ep_context));
+	IPA_ACTIVE_CLIENTS_INC_SIMPLE();
+
+	ep->skip_ep_cfg = params->skip_ep_cfg;
+	ep->valid = 1;
+	ep->client = params->client;
+	ep->client_notify = params->notify;
+	ep->priv = params->priv;
+	ep->keep_ipa_awake = params->keep_ipa_awake;
+
+
+	/* Config QMB for USB_CONS ep */
+	if (!IPA_CLIENT_IS_PROD(ep->client)) {
+		IPADBG("Configuring QMB on USB CONS pipe\n");
+		if (ipa_ep_idx >= ipa3_ctx->ipa_num_pipes ||
+			ipa3_ctx->ep[ipa_ep_idx].valid == 0) {
+			IPAERR("bad parm.\n");
+			return -EINVAL;
+		}
+		result = ipa3_cfg_ep_cfg(ipa_ep_idx, &params->ipa_ep_cfg.cfg);
+		if (result) {
+			IPAERR("fail to configure QMB.\n");
+			return result;
+		}
+	}
+
+	if (!ep->skip_ep_cfg) {
+		if (ipa3_cfg_ep(ipa_ep_idx, &params->ipa_ep_cfg)) {
+			IPAERR("fail to configure EP.\n");
+			goto ipa_cfg_ep_fail;
+		}
+		/* Setting EP status 0 */
+		memset(&ep_status, 0, sizeof(ep_status));
+		if (ipa3_cfg_ep_status(ipa_ep_idx, &ep_status)) {
+			IPAERR("fail to configure status of EP.\n");
+			goto ipa_cfg_ep_fail;
+		}
+		IPADBG("ep configuration successful\n");
+	} else {
+		IPADBG("Skipping endpoint configuration.\n");
+	}
+
+	out_params->clnt_hdl = ipa_ep_idx;
+
+	result = ipa3_enable_data_path(out_params->clnt_hdl);
+	if (result) {
+		IPAERR("enable data path failed res=%d clnt=%d.\n", result,
+				out_params->clnt_hdl);
+		goto ipa_cfg_ep_fail;
+	}
+
+	gsi_dev_hdl = ipa3_ctx->gsi_dev_hdl;
+	gsi_res = gsi_alloc_evt_ring(&params->evt_ring_params, gsi_dev_hdl,
+		&ep->gsi_evt_ring_hdl);
+	if (gsi_res != GSI_STATUS_SUCCESS) {
+		IPAERR("Error allocating event ring: %d\n", gsi_res);
+		result = -EFAULT;
+		goto ipa_cfg_ep_fail;
+	}
+
+	gsi_res = gsi_write_evt_ring_scratch(ep->gsi_evt_ring_hdl,
+		params->evt_scratch);
+	if (gsi_res != GSI_STATUS_SUCCESS) {
+		IPAERR("Error writing event ring scratch: %d\n", gsi_res);
+		result = -EFAULT;
+		goto write_evt_scratch_fail;
+	}
+
+	gsi_ep_cfg_ptr = ipa3_get_gsi_ep_info(ep->client);
+	if (gsi_ep_cfg_ptr == NULL) {
+		IPAERR("Error ipa3_get_gsi_ep_info ret NULL\n");
+		result = -EFAULT;
+		goto write_evt_scratch_fail;
+	}
+
+	params->chan_params.evt_ring_hdl = ep->gsi_evt_ring_hdl;
+	params->chan_params.ch_id = gsi_ep_cfg_ptr->ipa_gsi_chan_num;
+	params->chan_params.prefetch_mode = gsi_ep_cfg_ptr->prefetch_mode;
+	params->chan_params.empty_lvl_threshold =
+		gsi_ep_cfg_ptr->prefetch_threshold;
+	gsi_res = gsi_alloc_channel(&params->chan_params, gsi_dev_hdl,
+		&ep->gsi_chan_hdl);
+	if (gsi_res != GSI_STATUS_SUCCESS) {
+		IPAERR("Error allocating channel: %d, chan_id: %d\n", gsi_res,
+			params->chan_params.ch_id);
+		result = -EFAULT;
+		goto write_evt_scratch_fail;
+	}
+
+	memcpy(&ep->chan_scratch, &params->chan_scratch,
+		sizeof(union __packed gsi_channel_scratch));
+
+	/*
+	 * Update scratch for MCS smart prefetch:
+	 * Starting IPA4.5, smart prefetch implemented by H/W.
+	 * At IPA 4.0/4.1/4.2, we do not use MCS smart prefetch
+	 *  so keep the fields zero.
+	 */
+	if (ipa3_ctx->ipa_hw_type < IPA_HW_v4_0) {
+		ep->chan_scratch.xdci.max_outstanding_tre =
+		params->chan_params.re_size * gsi_ep_cfg_ptr->ipa_if_tlv;
+	}
+
+	gsi_res = gsi_write_channel_scratch(ep->gsi_chan_hdl,
+		params->chan_scratch);
+	if (gsi_res != GSI_STATUS_SUCCESS) {
+		IPAERR("Error writing channel scratch: %d\n", gsi_res);
+		result = -EFAULT;
+		goto write_chan_scratch_fail;
+	}
+
+	gsi_res = gsi_query_channel_db_addr(ep->gsi_chan_hdl,
+		&out_params->db_reg_phs_addr_lsb,
+		&out_params->db_reg_phs_addr_msb);
+	if (gsi_res != GSI_STATUS_SUCCESS) {
+		IPAERR("Error querying channel DB registers addresses: %d\n",
+			gsi_res);
+		result = -EFAULT;
+		goto write_chan_scratch_fail;
+	}
+
+	ep->gsi_mem_info.evt_ring_len = params->evt_ring_params.ring_len;
+	ep->gsi_mem_info.evt_ring_base_addr =
+		params->evt_ring_params.ring_base_addr;
+	ep->gsi_mem_info.evt_ring_base_vaddr =
+		params->evt_ring_params.ring_base_vaddr;
+	ep->gsi_mem_info.chan_ring_len = params->chan_params.ring_len;
+	ep->gsi_mem_info.chan_ring_base_addr =
+		params->chan_params.ring_base_addr;
+	ep->gsi_mem_info.chan_ring_base_vaddr =
+		params->chan_params.ring_base_vaddr;
+
+	ipa3_ctx->skip_ep_cfg_shadow[ipa_ep_idx] = ep->skip_ep_cfg;
+	if (!ep->skip_ep_cfg && IPA_CLIENT_IS_PROD(params->client))
+		ipa3_install_dflt_flt_rules(ipa_ep_idx);
+
+	IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
+
+	IPADBG("client %d (ep: %d) connected\n", params->client, ipa_ep_idx);
+	IPADBG("exit\n");
+
+	return 0;
+
+write_chan_scratch_fail:
+	gsi_dealloc_channel(ep->gsi_chan_hdl);
+write_evt_scratch_fail:
+	gsi_dealloc_evt_ring(ep->gsi_evt_ring_hdl);
+ipa_cfg_ep_fail:
+	memset(&ipa3_ctx->ep[ipa_ep_idx], 0, sizeof(struct ipa3_ep_context));
+	IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
+fail:
+	return result;
+}
+EXPORT_SYMBOL(ipa3_request_gsi_channel);
+
+int ipa3_set_usb_max_packet_size(
+	enum ipa_usb_max_usb_packet_size usb_max_packet_size)
+{
+	struct gsi_device_scratch dev_scratch;
+	enum gsi_status gsi_res;
+
+	IPADBG("entry\n");
+
+	IPA_ACTIVE_CLIENTS_INC_SIMPLE();
+
+	memset(&dev_scratch, 0, sizeof(struct gsi_device_scratch));
+	dev_scratch.mhi_base_chan_idx_valid = false;
+	dev_scratch.max_usb_pkt_size_valid = true;
+	dev_scratch.max_usb_pkt_size = usb_max_packet_size;
+
+	gsi_res = gsi_write_device_scratch(ipa3_ctx->gsi_dev_hdl,
+		&dev_scratch);
+	if (gsi_res != GSI_STATUS_SUCCESS) {
+		IPAERR("Error writing device scratch: %d\n", gsi_res);
+		return -EFAULT;
+	}
+	IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
+
+	IPADBG("exit\n");
+	return 0;
+}
+EXPORT_SYMBOL(ipa3_set_usb_max_packet_size);
+
+/**
+ * ipa3_get_usb_gsi_stats() - Query USB gsi stats from uc
+ * @stats:	[inout] stats blob from client populated by driver
+ *
+ * Returns:	0 on success, negative on failure
+ *
+ * @note Cannot be called from atomic context
+ *
+ */
+int ipa3_get_usb_gsi_stats(struct ipa_uc_dbg_ring_stats *stats)
+{
+	int i;
+
+	if (!ipa3_ctx->usb_ctx.dbg_stats.uc_dbg_stats_mmio) {
+		IPAERR("bad parms NULL usb_gsi_stats_mmio\n");
+		return -EINVAL;
+	}
+	IPA_ACTIVE_CLIENTS_INC_SIMPLE();
+	for (i = 0; i < MAX_USB_CHANNELS; i++) {
+		stats->ring[i].ringFull = ioread32(
+			ipa3_ctx->usb_ctx.dbg_stats.uc_dbg_stats_mmio
+			+ i * IPA3_UC_DEBUG_STATS_OFF +
+			IPA3_UC_DEBUG_STATS_RINGFULL_OFF);
+		stats->ring[i].ringEmpty = ioread32(
+			ipa3_ctx->usb_ctx.dbg_stats.uc_dbg_stats_mmio
+			+ i * IPA3_UC_DEBUG_STATS_OFF +
+			IPA3_UC_DEBUG_STATS_RINGEMPTY_OFF);
+		stats->ring[i].ringUsageHigh = ioread32(
+			ipa3_ctx->usb_ctx.dbg_stats.uc_dbg_stats_mmio
+			+ i * IPA3_UC_DEBUG_STATS_OFF +
+			IPA3_UC_DEBUG_STATS_RINGUSAGEHIGH_OFF);
+		stats->ring[i].ringUsageLow = ioread32(
+			ipa3_ctx->usb_ctx.dbg_stats.uc_dbg_stats_mmio
+			+ i * IPA3_UC_DEBUG_STATS_OFF +
+			IPA3_UC_DEBUG_STATS_RINGUSAGELOW_OFF);
+		stats->ring[i].RingUtilCount = ioread32(
+			ipa3_ctx->usb_ctx.dbg_stats.uc_dbg_stats_mmio
+			+ i * IPA3_UC_DEBUG_STATS_OFF +
+			IPA3_UC_DEBUG_STATS_RINGUTILCOUNT_OFF);
+	}
+	IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
+
+
+	return 0;
+}
+
+/* This function called as part of usb pipe resume */
+int ipa3_xdci_connect(u32 clnt_hdl)
+{
+	int result;
+	struct ipa3_ep_context *ep;
+
+	IPADBG("entry\n");
+
+	if (clnt_hdl >= ipa3_ctx->ipa_num_pipes ||
+		ipa3_ctx->ep[clnt_hdl].valid == 0) {
+		IPAERR("Bad parameter.\n");
+		return -EINVAL;
+	}
+
+	ep = &ipa3_ctx->ep[clnt_hdl];
+	IPA_ACTIVE_CLIENTS_INC_EP(ipa3_get_client_mapping(clnt_hdl));
+
+	result = ipa3_start_gsi_channel(clnt_hdl);
+	if (result) {
+		IPAERR("failed to start gsi channel clnt_hdl=%u\n", clnt_hdl);
+		goto exit;
+	}
+
+	result = ipa3_enable_data_path(clnt_hdl);
+	if (result) {
+		IPAERR("enable data path failed res=%d clnt_hdl=%d.\n", result,
+			clnt_hdl);
+		goto stop_ch;
+	}
+
+	IPADBG("exit\n");
+	goto exit;
+
+stop_ch:
+	(void)ipa3_stop_gsi_channel(clnt_hdl);
+exit:
+	IPA_ACTIVE_CLIENTS_DEC_EP(ipa3_get_client_mapping(clnt_hdl));
+	return result;
+}
+EXPORT_SYMBOL(ipa3_xdci_connect);
+
+/* This function called as part of usb pipe connect */
+int ipa3_xdci_start(u32 clnt_hdl, u8 xferrscidx, bool xferrscidx_valid)
+{
+	struct ipa3_ep_context *ep;
+	int result = -EFAULT;
+	enum gsi_status gsi_res;
+	struct ipa_ep_cfg_ctrl ep_cfg_ctrl;
+
+	IPADBG("entry\n");
+	if (clnt_hdl >= ipa3_ctx->ipa_num_pipes  ||
+		ipa3_ctx->ep[clnt_hdl].valid == 0 ||
+		xferrscidx > IPA_XFER_RSC_IDX_MAX) {
+		IPAERR("Bad parameters.\n");
+		return -EINVAL;
+	}
+
+	ep = &ipa3_ctx->ep[clnt_hdl];
+	IPA_ACTIVE_CLIENTS_INC_EP(ipa3_get_client_mapping(clnt_hdl));
+
+	if (xferrscidx_valid) {
+		ep->chan_scratch.xdci.xferrscidx = xferrscidx;
+		gsi_res = gsi_write_channel_scratch(ep->gsi_chan_hdl,
+			ep->chan_scratch);
+		if (gsi_res != GSI_STATUS_SUCCESS) {
+			IPAERR("Error writing channel scratch: %d\n", gsi_res);
+			goto write_chan_scratch_fail;
+		}
+	}
+
+	if (IPA_CLIENT_IS_PROD(ep->client) && ep->skip_ep_cfg) {
+		memset(&ep_cfg_ctrl, 0, sizeof(struct ipa_ep_cfg_ctrl));
+		ep_cfg_ctrl.ipa_ep_delay = true;
+		ep->ep_delay_set = true;
+
+		result = ipa3_cfg_ep_ctrl(clnt_hdl, &ep_cfg_ctrl);
+		if (result)
+			IPAERR("client (ep: %d) failed result=%d\n",
+			clnt_hdl, result);
+		else
+			IPADBG("client (ep: %d) success\n", clnt_hdl);
+	} else {
+		ep->ep_delay_set = false;
+	}
+
+	gsi_res = gsi_start_channel(ep->gsi_chan_hdl);
+	if (gsi_res != GSI_STATUS_SUCCESS) {
+		IPAERR("Error starting channel: %d\n", gsi_res);
+		goto write_chan_scratch_fail;
+	}
+	ipa3_start_gsi_debug_monitor(clnt_hdl);
+	if (!ep->keep_ipa_awake)
+		IPA_ACTIVE_CLIENTS_DEC_EP(ipa3_get_client_mapping(clnt_hdl));
+
+	IPADBG("exit\n");
+	return 0;
+
+write_chan_scratch_fail:
+	IPA_ACTIVE_CLIENTS_DEC_EP(ipa3_get_client_mapping(clnt_hdl));
+	return result;
+}
+EXPORT_SYMBOL(ipa3_xdci_start);
+
+int ipa3_get_gsi_chan_info(struct gsi_chan_info *gsi_chan_info,
+	unsigned long chan_hdl)
+{
+	enum gsi_status gsi_res;
+
+	memset(gsi_chan_info, 0, sizeof(struct gsi_chan_info));
+	gsi_res = gsi_query_channel_info(chan_hdl, gsi_chan_info);
+	if (gsi_res != GSI_STATUS_SUCCESS) {
+		IPAERR("Error querying channel info: %d\n", gsi_res);
+		return -EFAULT;
+	}
+	if (!gsi_chan_info->evt_valid) {
+		IPAERR("Event info invalid\n");
+		return -EFAULT;
+	}
+
+	return 0;
+}
+
+static bool ipa3_is_xdci_channel_with_given_info_empty(
+	struct ipa3_ep_context *ep, struct gsi_chan_info *chan_info)
+{
+	bool is_empty = false;
+
+	if (!IPA_CLIENT_IS_CONS(ep->client)) {
+		/* For UL channel: chan.RP == chan.WP */
+		is_empty = (chan_info->rp == chan_info->wp);
+	} else {
+		/* For DL channel: */
+		if (chan_info->wp !=
+		    (ep->gsi_mem_info.chan_ring_base_addr +
+		     ep->gsi_mem_info.chan_ring_len -
+		     GSI_CHAN_RE_SIZE_16B)) {
+			/*  if chan.WP != LINK TRB: chan.WP == evt.RP */
+			is_empty = (chan_info->wp == chan_info->evt_rp);
+		} else {
+			/*
+			 * if chan.WP == LINK TRB: chan.base_xfer_ring_addr
+			 * == evt.RP
+			 */
+			is_empty = (ep->gsi_mem_info.chan_ring_base_addr ==
+				chan_info->evt_rp);
+		}
+	}
+
+	return is_empty;
+}
+
+static int ipa3_is_xdci_channel_empty(struct ipa3_ep_context *ep,
+	bool *is_empty)
+{
+	struct gsi_chan_info chan_info;
+	int res;
+
+	if (!ep || !is_empty || !ep->valid) {
+		IPAERR("Input Error\n");
+		return -EFAULT;
+	}
+
+	res = ipa3_get_gsi_chan_info(&chan_info, ep->gsi_chan_hdl);
+	if (res) {
+		IPAERR("Failed to get GSI channel info\n");
+		return -EFAULT;
+	}
+
+	*is_empty = ipa3_is_xdci_channel_with_given_info_empty(ep, &chan_info);
+
+	return 0;
+}
+
+int ipa3_enable_force_clear(u32 request_id, bool throttle_source,
+	u32 source_pipe_bitmask)
+{
+	struct ipa_enable_force_clear_datapath_req_msg_v01 req;
+	int result;
+
+	if (ipa3_ctx->platform_type == IPA_PLAT_TYPE_APQ) {
+		IPADBG("APQ platform - ignore force clear\n");
+		return 0;
+	}
+
+	memset(&req, 0, sizeof(req));
+	req.request_id = request_id;
+	req.source_pipe_bitmask = source_pipe_bitmask;
+	if (throttle_source) {
+		req.throttle_source_valid = 1;
+		req.throttle_source = 1;
+	}
+	result = ipa3_qmi_enable_force_clear_datapath_send(&req);
+	if (result) {
+		IPAERR("ipa3_qmi_enable_force_clear_datapath_send failed %d\n",
+			result);
+		return result;
+	}
+
+	return 0;
+}
+
+int ipa3_disable_force_clear(u32 request_id)
+{
+	struct ipa_disable_force_clear_datapath_req_msg_v01 req;
+	int result;
+
+	if (ipa3_ctx->platform_type == IPA_PLAT_TYPE_APQ) {
+		IPADBG("APQ platform - ignore force clear\n");
+		return 0;
+	}
+
+	memset(&req, 0, sizeof(req));
+	req.request_id = request_id;
+	result = ipa3_qmi_disable_force_clear_datapath_send(&req);
+	if (result) {
+		IPAERR("ipa3_qmi_disable_force_clear_datapath_send failed %d\n",
+			result);
+		return result;
+	}
+
+	return 0;
+}
+
+/* Clocks should be voted before invoking this function */
+static int ipa3_xdci_stop_gsi_channel(u32 clnt_hdl, bool *stop_in_proc)
+{
+	int res;
+
+	IPADBG("entry\n");
+	if (clnt_hdl >= ipa3_ctx->ipa_num_pipes ||
+		ipa3_ctx->ep[clnt_hdl].valid == 0 ||
+		!stop_in_proc) {
+		IPAERR("Bad parameter.\n");
+		return -EINVAL;
+	}
+
+	res = ipa3_stop_gsi_channel(clnt_hdl);
+	if (res != 0 && res != -GSI_STATUS_AGAIN &&
+		res != -GSI_STATUS_TIMED_OUT) {
+		IPAERR("xDCI stop channel failed res=%d\n", res);
+		return -EFAULT;
+	}
+
+	if (res)
+		*stop_in_proc = true;
+	else
+		*stop_in_proc = false;
+
+	IPADBG("xDCI channel is %s (result=%d)\n",
+		res ? "STOP_IN_PROC/TimeOut" : "STOP", res);
+
+	IPADBG("exit\n");
+	return 0;
+}
+
+/* Clocks should be voted before invoking this function */
+static int ipa3_xdci_stop_gsi_ch_brute_force(u32 clnt_hdl,
+	bool *stop_in_proc)
+{
+	unsigned long jiffies_start;
+	unsigned long jiffies_timeout =
+		msecs_to_jiffies(IPA_CHANNEL_STOP_IN_PROC_TO_MSEC);
+	int res;
+
+	IPADBG("entry\n");
+	if (clnt_hdl >= ipa3_ctx->ipa_num_pipes ||
+		ipa3_ctx->ep[clnt_hdl].valid == 0 ||
+		!stop_in_proc) {
+		IPAERR("Bad parameter.\n");
+		return -EINVAL;
+	}
+
+	jiffies_start = jiffies;
+	while (1) {
+		res = ipa3_xdci_stop_gsi_channel(clnt_hdl,
+			stop_in_proc);
+		if (res) {
+			IPAERR("failed to stop xDCI channel hdl=%d\n",
+				clnt_hdl);
+			return res;
+		}
+
+		if (!*stop_in_proc) {
+			IPADBG("xDCI channel STOP hdl=%d\n", clnt_hdl);
+			return res;
+		}
+
+		/*
+		 * Give chance to the previous stop request to be accomplished
+		 * before the retry
+		 */
+		udelay(IPA_CHANNEL_STOP_IN_PROC_SLEEP_USEC);
+
+		if (time_after(jiffies, jiffies_start + jiffies_timeout)) {
+			IPADBG("timeout waiting for xDCI channel emptiness\n");
+			return res;
+		}
+	}
+}
+
+/* Clocks should be voted for before invoking this function */
+static int ipa3_stop_ul_chan_with_data_drain(u32 qmi_req_id,
+		u32 source_pipe_bitmask, bool should_force_clear, u32 clnt_hdl,
+		bool remove_delay)
+{
+	int result;
+	bool is_empty = false;
+	int i;
+	bool stop_in_proc;
+	struct ipa3_ep_context *ep;
+	struct ipa_ep_cfg_ctrl ep_cfg_ctrl;
+
+	IPADBG("entry\n");
+
+	if (clnt_hdl >= ipa3_ctx->ipa_num_pipes ||
+		ipa3_ctx->ep[clnt_hdl].valid == 0) {
+		IPAERR("Bad parameter.\n");
+		return -EINVAL;
+	}
+
+	ep = &ipa3_ctx->ep[clnt_hdl];
+
+	/* first try to stop the channel */
+	result = ipa3_xdci_stop_gsi_ch_brute_force(clnt_hdl,
+			&stop_in_proc);
+	if (result) {
+		IPAERR("fail to stop UL channel - hdl=%d clnt=%d\n",
+			clnt_hdl, ep->client);
+		goto exit;
+	}
+	if (!stop_in_proc)
+		goto exit;
+
+	/* Remove delay only if stop channel success*/
+	if (remove_delay && ep->ep_delay_set == true && !stop_in_proc) {
+		memset(&ep_cfg_ctrl, 0, sizeof(struct ipa_ep_cfg_ctrl));
+		ep_cfg_ctrl.ipa_ep_delay = false;
+		result = ipa3_cfg_ep_ctrl(clnt_hdl,
+			&ep_cfg_ctrl);
+		if (result) {
+			IPAERR
+			("client (ep: %d) failed to remove delay result=%d\n",
+				clnt_hdl, result);
+		} else {
+			IPADBG("client (ep: %d) delay removed\n",
+				clnt_hdl);
+			ep->ep_delay_set = false;
+		}
+	}
+
+	/* if stop_in_proc, lets wait for emptiness */
+	for (i = 0; i < IPA_POLL_FOR_EMPTINESS_NUM; i++) {
+		result = ipa3_is_xdci_channel_empty(ep, &is_empty);
+		if (result)
+			goto exit;
+		if (is_empty)
+			break;
+		udelay(IPA_POLL_FOR_EMPTINESS_SLEEP_USEC);
+	}
+	/* In case of empty, lets try to stop the channel again */
+	if (is_empty) {
+		result = ipa3_xdci_stop_gsi_ch_brute_force(clnt_hdl,
+			&stop_in_proc);
+		if (result) {
+			IPAERR("fail to stop UL channel - hdl=%d clnt=%d\n",
+				clnt_hdl, ep->client);
+			goto exit;
+		}
+		if (!stop_in_proc)
+			goto exit;
+	}
+	/* if still stop_in_proc or not empty, activate force clear */
+	if (should_force_clear) {
+		result = ipa3_enable_force_clear(qmi_req_id, false,
+			source_pipe_bitmask);
+		if (result) {
+			struct ipahal_ep_cfg_ctrl_scnd ep_ctrl_scnd = { 0 };
+
+			/*
+			 * assuming here modem SSR\shutdown, AP can remove
+			 * the delay in this case
+			 */
+			IPAERR(
+				"failed to force clear %d, remove delay from SCND reg\n"
+				, result);
+			ep_ctrl_scnd.endp_delay = false;
+			ipahal_write_reg_n_fields(
+				IPA_ENDP_INIT_CTRL_SCND_n, clnt_hdl,
+				&ep_ctrl_scnd);
+		}
+	}
+	/* with force clear, wait for emptiness */
+	for (i = 0; i < IPA_POLL_FOR_EMPTINESS_NUM; i++) {
+		result = ipa3_is_xdci_channel_empty(ep, &is_empty);
+		if (result)
+			goto disable_force_clear_and_exit;
+		if (is_empty)
+			break;
+
+		udelay(IPA_POLL_FOR_EMPTINESS_SLEEP_USEC);
+	}
+	/* try to stop for the last time */
+	result = ipa3_xdci_stop_gsi_ch_brute_force(clnt_hdl,
+		&stop_in_proc);
+	if (result) {
+		IPAERR("fail to stop UL channel - hdl=%d clnt=%d\n",
+			clnt_hdl, ep->client);
+		goto disable_force_clear_and_exit;
+	}
+	result = stop_in_proc ? -EFAULT : 0;
+
+disable_force_clear_and_exit:
+	if (should_force_clear)
+		ipa3_disable_force_clear(qmi_req_id);
+exit:
+	if (remove_delay && ep->ep_delay_set == true && !stop_in_proc) {
+		memset(&ep_cfg_ctrl, 0, sizeof(struct ipa_ep_cfg_ctrl));
+		ep_cfg_ctrl.ipa_ep_delay = false;
+		result = ipa3_cfg_ep_ctrl(clnt_hdl,
+			&ep_cfg_ctrl);
+		if (result) {
+			IPAERR
+			("client (ep: %d) failed to remove delay result=%d\n",
+				clnt_hdl, result);
+		} else {
+			IPADBG("client (ep: %d) delay removed\n",
+				clnt_hdl);
+			ep->ep_delay_set = false;
+		}
+	}
+	IPADBG("exit\n");
+	return result;
+}
+
+/*
+ * Set reset ep_delay for CLIENT PROD pipe
+ * Clocks, should be voted before calling this API
+ * locks should be taken before calling this API
+ */
+
+int ipa3_set_reset_client_prod_pipe_delay(bool set_reset,
+		enum ipa_client_type client)
+{
+	int result = 0;
+	int pipe_idx;
+	struct ipa3_ep_context *ep;
+	struct ipa_ep_cfg_ctrl ep_ctrl;
+
+	memset(&ep_ctrl, 0, sizeof(struct ipa_ep_cfg_ctrl));
+	ep_ctrl.ipa_ep_delay = set_reset;
+
+	if (IPA_CLIENT_IS_CONS(client)) {
+		IPAERR("client (%d) not PROD\n", client);
+		return -EINVAL;
+	}
+
+	pipe_idx = ipa3_get_ep_mapping(client);
+
+	if (pipe_idx == IPA_EP_NOT_ALLOCATED) {
+		IPAERR("client (%d) not valid\n", client);
+		return -EINVAL;
+	}
+
+	ep = &ipa3_ctx->ep[pipe_idx];
+
+	/* Setting delay on USB_PROD with skip_ep_cfg */
+	client_lock_unlock_cb(client, true);
+	if (ep->valid && ep->skip_ep_cfg) {
+		ep->ep_delay_set = ep_ctrl.ipa_ep_delay;
+		result = ipa3_cfg_ep_ctrl(pipe_idx, &ep_ctrl);
+		if (result)
+			IPAERR("client (ep: %d) failed result=%d\n",
+				pipe_idx, result);
+		else
+			IPADBG("client (ep: %d) success\n", pipe_idx);
+	}
+	client_lock_unlock_cb(client, false);
+	return result;
+}
+
+static bool ipa3_get_teth_port_status(enum ipa_client_type client)
+{
+	enum ipa_client_cb_type client_cb;
+
+	client_cb = ipa_get_client_cb_type(client);
+	if (client_cb == IPA_MAX_CLNT)
+		return false;
+	if (ipa3_ctx->get_teth_port_state[client_cb])
+		return ipa3_ctx->get_teth_port_state[client_cb]();
+	return false;
+}
+
+/*
+ * Start/stop the CLIENT PROD pipes in SSR scenarios
+ */
+
+int ipa3_start_stop_client_prod_gsi_chnl(enum ipa_client_type client,
+		bool start_chnl)
+{
+	int result = 0;
+	int pipe_idx;
+	struct ipa3_ep_context *ep;
+
+	if (IPA_CLIENT_IS_CONS(client)) {
+		IPAERR("client (%d) not PROD\n", client);
+		return -EINVAL;
+	}
+
+	pipe_idx = ipa3_get_ep_mapping(client);
+
+	if (pipe_idx == IPA_EP_NOT_ALLOCATED) {
+		IPAERR("client (%d) not valid\n", client);
+		return -EINVAL;
+	}
+
+	client_lock_unlock_cb(client, true);
+	ep = &ipa3_ctx->ep[pipe_idx];
+	if (ep->valid && ep->skip_ep_cfg && ipa3_get_teth_port_status(client)) {
+		if (start_chnl)
+			result = ipa3_start_gsi_channel(pipe_idx);
+		else
+			result = ipa3_stop_gsi_channel(pipe_idx);
+	}
+	client_lock_unlock_cb(client, false);
+	return result;
+}
+int ipa3_set_reset_client_cons_pipe_sus_holb(bool set_reset,
+		enum ipa_client_type client)
+{
+	int pipe_idx;
+	struct ipa3_ep_context *ep;
+	struct ipa_ep_cfg_ctrl ep_suspend;
+	struct ipa_ep_cfg_holb ep_holb;
+
+	memset(&ep_suspend, 0, sizeof(ep_suspend));
+	memset(&ep_holb, 0, sizeof(ep_holb));
+
+	ep_suspend.ipa_ep_suspend = set_reset;
+	ep_holb.tmr_val = 0;
+	ep_holb.en = set_reset;
+
+	if (IPA_CLIENT_IS_PROD(client)) {
+		IPAERR("client (%d) not CONS\n", client);
+		return -EINVAL;
+	}
+
+	pipe_idx = ipa3_get_ep_mapping(client);
+
+	if (pipe_idx == IPA_EP_NOT_ALLOCATED) {
+		IPAERR("client (%d) not valid\n", client);
+		return -EINVAL;
+	}
+
+	ep = &ipa3_ctx->ep[pipe_idx];
+	/* Setting sus/holb on MHI_CONS with skip_ep_cfg */
+	client_lock_unlock_cb(client, true);
+	if (ep->valid && ep->skip_ep_cfg) {
+		if (ipa3_ctx->ipa_hw_type < IPA_HW_v4_0)
+			ipahal_write_reg_n_fields(
+					IPA_ENDP_INIT_CTRL_n,
+					pipe_idx, &ep_suspend);
+		/*
+		 * ipa3_cfg_ep_holb is not used here because we are
+		 * setting HOLB on Q6 pipes, and from APPS perspective
+		 * they are not valid, therefore, the above function
+		 * will fail.
+		 */
+		ipahal_write_reg_n_fields(
+			IPA_ENDP_INIT_HOL_BLOCK_TIMER_n,
+			pipe_idx, &ep_holb);
+		ipahal_write_reg_n_fields(
+			IPA_ENDP_INIT_HOL_BLOCK_EN_n,
+			pipe_idx, &ep_holb);
+
+		/* IPA4.5 issue requires HOLB_EN to be written twice */
+		if (ipa3_ctx->ipa_hw_type >= IPA_HW_v4_5)
+			ipahal_write_reg_n_fields(
+				IPA_ENDP_INIT_HOL_BLOCK_EN_n,
+				pipe_idx, &ep_holb);
+	}
+	client_lock_unlock_cb(client, false);
+	return 0;
+}
+
+void ipa3_xdci_ep_delay_rm(u32 clnt_hdl)
+{
+	struct ipa3_ep_context *ep;
+	struct ipa_ep_cfg_ctrl ep_cfg_ctrl;
+	int result;
+
+	if (clnt_hdl >= ipa3_ctx->ipa_num_pipes ||
+		ipa3_ctx->ep[clnt_hdl].valid == 0) {
+		IPAERR("bad parm.\n");
+		return;
+	}
+
+	ep = &ipa3_ctx->ep[clnt_hdl];
+
+	if (ep->ep_delay_set) {
+
+		memset(&ep_cfg_ctrl, 0, sizeof(struct ipa_ep_cfg_ctrl));
+		ep_cfg_ctrl.ipa_ep_delay = false;
+
+		if (!ep->keep_ipa_awake)
+			IPA_ACTIVE_CLIENTS_INC_EP
+				(ipa3_get_client_mapping(clnt_hdl));
+
+		result = ipa3_cfg_ep_ctrl(clnt_hdl,
+			&ep_cfg_ctrl);
+
+		if (!ep->keep_ipa_awake)
+			IPA_ACTIVE_CLIENTS_DEC_EP
+				(ipa3_get_client_mapping(clnt_hdl));
+
+		if (result) {
+			IPAERR
+			("client (ep: %d) failed to remove delay result=%d\n",
+				clnt_hdl, result);
+		} else {
+			IPADBG("client (ep: %d) delay removed\n",
+				clnt_hdl);
+			ep->ep_delay_set = false;
+		}
+	}
+}
+EXPORT_SYMBOL(ipa3_xdci_ep_delay_rm);
+
+int ipa3_xdci_disconnect(u32 clnt_hdl, bool should_force_clear, u32 qmi_req_id)
+{
+	struct ipa3_ep_context *ep;
+	int result;
+	u32 source_pipe_bitmask = 0;
+
+	IPADBG("entry\n");
+	if (clnt_hdl >= ipa3_ctx->ipa_num_pipes ||
+		ipa3_ctx->ep[clnt_hdl].valid == 0) {
+		IPAERR("Bad parameter.\n");
+		return -EINVAL;
+	}
+
+	ep = &ipa3_ctx->ep[clnt_hdl];
+
+	if (!ep->keep_ipa_awake)
+		IPA_ACTIVE_CLIENTS_INC_EP(ipa3_get_client_mapping(clnt_hdl));
+
+	ipa3_disable_data_path(clnt_hdl);
+
+	if (!IPA_CLIENT_IS_CONS(ep->client)) {
+		IPADBG("Stopping PROD channel - hdl=%d clnt=%d\n",
+			clnt_hdl, ep->client);
+		source_pipe_bitmask = 1 <<
+			ipa3_get_ep_mapping(ep->client);
+		result = ipa3_stop_ul_chan_with_data_drain(qmi_req_id,
+			source_pipe_bitmask, should_force_clear, clnt_hdl,
+			true);
+		if (result) {
+			IPAERR("Fail to stop UL channel with data drain\n");
+			WARN_ON(1);
+			goto stop_chan_fail;
+		}
+	} else {
+		IPADBG("Stopping CONS channel - hdl=%d clnt=%d\n",
+			clnt_hdl, ep->client);
+		result = ipa3_stop_gsi_channel(clnt_hdl);
+		if (result) {
+			IPAERR("Error stopping channel (CONS client): %d\n",
+				result);
+			goto stop_chan_fail;
+		}
+	}
+	IPA_ACTIVE_CLIENTS_DEC_EP(ipa3_get_client_mapping(clnt_hdl));
+
+	IPADBG("exit\n");
+	return 0;
+
+stop_chan_fail:
+	if (!ep->keep_ipa_awake)
+		IPA_ACTIVE_CLIENTS_DEC_EP(ipa3_get_client_mapping(clnt_hdl));
+	return result;
+}
+EXPORT_SYMBOL(ipa3_xdci_disconnect);
+
+int ipa3_release_gsi_channel(u32 clnt_hdl)
+{
+	struct ipa3_ep_context *ep;
+	int result = -EFAULT;
+	enum gsi_status gsi_res;
+
+	IPADBG("entry\n");
+	if (clnt_hdl >= ipa3_ctx->ipa_num_pipes ||
+		ipa3_ctx->ep[clnt_hdl].valid == 0) {
+		IPAERR("Bad parameter.\n");
+		return -EINVAL;
+	}
+
+	ep = &ipa3_ctx->ep[clnt_hdl];
+
+	if (!ep->keep_ipa_awake)
+		IPA_ACTIVE_CLIENTS_INC_EP(ipa3_get_client_mapping(clnt_hdl));
+
+	gsi_res = gsi_dealloc_channel(ep->gsi_chan_hdl);
+	if (gsi_res != GSI_STATUS_SUCCESS) {
+		IPAERR("Error deallocating channel: %d\n", gsi_res);
+		goto dealloc_chan_fail;
+	}
+
+	gsi_res = gsi_dealloc_evt_ring(ep->gsi_evt_ring_hdl);
+	if (gsi_res != GSI_STATUS_SUCCESS) {
+		IPAERR("Error deallocating event: %d\n", gsi_res);
+		goto dealloc_chan_fail;
+	}
+
+	if (!ep->skip_ep_cfg && IPA_CLIENT_IS_PROD(ep->client))
+		ipa3_delete_dflt_flt_rules(clnt_hdl);
+
+	if (!ep->keep_ipa_awake)
+		IPA_ACTIVE_CLIENTS_DEC_EP(ipa3_get_client_mapping(clnt_hdl));
+
+	memset(&ipa3_ctx->ep[clnt_hdl], 0, sizeof(struct ipa3_ep_context));
+
+	IPADBG("exit\n");
+	return 0;
+
+dealloc_chan_fail:
+	if (!ep->keep_ipa_awake)
+		IPA_ACTIVE_CLIENTS_DEC_EP(ipa3_get_client_mapping(clnt_hdl));
+	return result;
+}
+EXPORT_SYMBOL(ipa3_release_gsi_channel);
+
+int ipa3_xdci_suspend(u32 ul_clnt_hdl, u32 dl_clnt_hdl,
+	bool should_force_clear, u32 qmi_req_id, bool is_dpl)
+{
+	struct ipa3_ep_context *ul_ep = NULL;
+	struct ipa3_ep_context *dl_ep;
+	int result = -EFAULT;
+	u32 source_pipe_bitmask = 0;
+	bool dl_data_pending = true;
+	bool ul_data_pending = true;
+	int i;
+	bool is_empty = false;
+	struct gsi_chan_info ul_gsi_chan_info, dl_gsi_chan_info;
+	int aggr_active_bitmap = 0;
+	struct ipa_ep_cfg_ctrl ep_cfg_ctrl;
+
+	/* In case of DPL, dl is the DPL channel/client */
+
+	IPADBG("entry\n");
+	if (dl_clnt_hdl >= ipa3_ctx->ipa_num_pipes ||
+		ipa3_ctx->ep[dl_clnt_hdl].valid == 0 ||
+		(!is_dpl && (ul_clnt_hdl >= ipa3_ctx->ipa_num_pipes ||
+		ipa3_ctx->ep[ul_clnt_hdl].valid == 0))) {
+		IPAERR("Bad parameter.\n");
+		return -EINVAL;
+	}
+
+	dl_ep = &ipa3_ctx->ep[dl_clnt_hdl];
+	if (!is_dpl)
+		ul_ep = &ipa3_ctx->ep[ul_clnt_hdl];
+	IPA_ACTIVE_CLIENTS_INC_EP(ipa3_get_client_mapping(dl_clnt_hdl));
+
+	result = ipa3_get_gsi_chan_info(&dl_gsi_chan_info,
+		dl_ep->gsi_chan_hdl);
+	if (result)
+		goto disable_clk_and_exit;
+
+	if (!is_dpl) {
+		result = ipa3_get_gsi_chan_info(&ul_gsi_chan_info,
+			ul_ep->gsi_chan_hdl);
+		if (result)
+			goto disable_clk_and_exit;
+	}
+
+	for (i = 0; i < IPA_POLL_FOR_EMPTINESS_NUM; i++) {
+		if (!dl_data_pending && !ul_data_pending)
+			break;
+		result = ipa3_is_xdci_channel_empty(dl_ep, &is_empty);
+		if (result)
+			goto disable_clk_and_exit;
+		if (!is_empty) {
+			dl_data_pending = true;
+			break;
+		}
+		dl_data_pending = false;
+		if (!is_dpl) {
+			result = ipa3_is_xdci_channel_empty(ul_ep, &is_empty);
+			if (result)
+				goto disable_clk_and_exit;
+			ul_data_pending = !is_empty;
+		} else {
+			ul_data_pending = false;
+		}
+
+		udelay(IPA_POLL_FOR_EMPTINESS_SLEEP_USEC);
+	}
+
+	if (!dl_data_pending) {
+		aggr_active_bitmap = ipahal_read_reg(IPA_STATE_AGGR_ACTIVE);
+		if (aggr_active_bitmap & (1 << dl_clnt_hdl)) {
+			IPADBG("DL/DPL data pending due to open aggr. frame\n");
+			dl_data_pending = true;
+		}
+	}
+	if (dl_data_pending) {
+		IPAERR("DL/DPL data pending, can't suspend\n");
+		result = -EFAULT;
+		goto disable_clk_and_exit;
+	}
+
+	if (ipa3_ctx->ipa_hw_type < IPA_HW_v4_0) {
+		/* Suspend the DL/DPL EP */
+		memset(&ep_cfg_ctrl, 0, sizeof(struct ipa_ep_cfg_ctrl));
+		ep_cfg_ctrl.ipa_ep_suspend = true;
+		ipa3_cfg_ep_ctrl(dl_clnt_hdl, &ep_cfg_ctrl);
+	}
+
+	/*
+	 * Check if DL/DPL channel is empty again, data could enter the channel
+	 * before its IPA EP was suspended
+	 */
+	result = ipa3_is_xdci_channel_empty(dl_ep, &is_empty);
+	if (result)
+		goto unsuspend_dl_and_exit;
+	if (!is_empty) {
+		IPAERR("DL/DPL data pending, can't suspend\n");
+		result = -EFAULT;
+		goto unsuspend_dl_and_exit;
+	}
+
+	/* Stop DL channel */
+	result = ipa3_stop_gsi_channel(dl_clnt_hdl);
+	if (result) {
+		IPAERR("Error stopping DL/DPL channel: %d\n", result);
+		result = -EFAULT;
+		goto unsuspend_dl_and_exit;
+	}
+
+	/* STOP UL channel */
+	if (!is_dpl) {
+		source_pipe_bitmask = 1 << ipa3_get_ep_mapping(ul_ep->client);
+		result = ipa3_stop_ul_chan_with_data_drain(qmi_req_id,
+			source_pipe_bitmask, should_force_clear, ul_clnt_hdl,
+			false);
+		if (result) {
+			IPAERR("Error stopping UL channel: result = %d\n",
+				result);
+			goto start_dl_and_exit;
+		}
+	}
+
+	IPA_ACTIVE_CLIENTS_DEC_EP(ipa3_get_client_mapping(dl_clnt_hdl));
+
+	IPADBG("exit\n");
+	return 0;
+
+start_dl_and_exit:
+	gsi_start_channel(dl_ep->gsi_chan_hdl);
+	ipa3_start_gsi_debug_monitor(dl_clnt_hdl);
+unsuspend_dl_and_exit:
+	if (ipa3_ctx->ipa_hw_type < IPA_HW_v4_0) {
+		/* Unsuspend the DL EP */
+		memset(&ep_cfg_ctrl, 0, sizeof(struct ipa_ep_cfg_ctrl));
+		ep_cfg_ctrl.ipa_ep_suspend = false;
+		ipa3_cfg_ep_ctrl(dl_clnt_hdl, &ep_cfg_ctrl);
+	}
+disable_clk_and_exit:
+	IPA_ACTIVE_CLIENTS_DEC_EP(ipa3_get_client_mapping(dl_clnt_hdl));
+	return result;
+}
+EXPORT_SYMBOL(ipa3_xdci_suspend);
+
+int ipa3_start_gsi_channel(u32 clnt_hdl)
+{
+	struct ipa3_ep_context *ep;
+	int result = -EFAULT;
+	enum gsi_status gsi_res;
+	enum ipa_client_type client_type;
+
+	IPADBG("entry\n");
+	if (clnt_hdl >= ipa3_ctx->ipa_num_pipes  ||
+		ipa3_ctx->ep[clnt_hdl].valid == 0) {
+		IPAERR("Bad parameters.\n");
+		return -EINVAL;
+	}
+
+	ep = &ipa3_ctx->ep[clnt_hdl];
+	client_type = ipa3_get_client_mapping(clnt_hdl);
+	if (!ep->keep_ipa_awake)
+		IPA_ACTIVE_CLIENTS_INC_EP(client_type);
+
+	gsi_res = gsi_start_channel(ep->gsi_chan_hdl);
+	if (gsi_res != GSI_STATUS_SUCCESS) {
+		IPAERR("Error starting channel: %d\n", gsi_res);
+		goto start_chan_fail;
+	}
+	ipa3_start_gsi_debug_monitor(clnt_hdl);
+
+	if (!ep->keep_ipa_awake)
+		IPA_ACTIVE_CLIENTS_DEC_EP(client_type);
+
+	IPADBG("exit\n");
+	return 0;
+
+start_chan_fail:
+	if (!ep->keep_ipa_awake)
+		IPA_ACTIVE_CLIENTS_DEC_EP(ipa3_get_client_mapping(clnt_hdl));
+	return result;
+}
+EXPORT_SYMBOL(ipa3_start_gsi_channel);
+
+int ipa3_xdci_resume(u32 ul_clnt_hdl, u32 dl_clnt_hdl, bool is_dpl)
+{
+	struct ipa3_ep_context *ul_ep = NULL;
+	struct ipa3_ep_context *dl_ep = NULL;
+	enum gsi_status gsi_res;
+	struct ipa_ep_cfg_ctrl ep_cfg_ctrl;
+
+	/* In case of DPL, dl is the DPL channel/client */
+
+	IPADBG("entry\n");
+	if (dl_clnt_hdl >= ipa3_ctx->ipa_num_pipes ||
+		ipa3_ctx->ep[dl_clnt_hdl].valid == 0 ||
+		(!is_dpl && (ul_clnt_hdl >= ipa3_ctx->ipa_num_pipes ||
+		ipa3_ctx->ep[ul_clnt_hdl].valid == 0))) {
+		IPAERR("Bad parameter.\n");
+		return -EINVAL;
+	}
+
+	dl_ep = &ipa3_ctx->ep[dl_clnt_hdl];
+	if (!is_dpl)
+		ul_ep = &ipa3_ctx->ep[ul_clnt_hdl];
+	IPA_ACTIVE_CLIENTS_INC_EP(ipa3_get_client_mapping(dl_clnt_hdl));
+
+	if (ipa3_ctx->ipa_hw_type < IPA_HW_v4_0) {
+		/* Unsuspend the DL/DPL EP */
+		memset(&ep_cfg_ctrl, 0, sizeof(struct ipa_ep_cfg_ctrl));
+		ep_cfg_ctrl.ipa_ep_suspend = false;
+		ipa3_cfg_ep_ctrl(dl_clnt_hdl, &ep_cfg_ctrl);
+	}
+
+	/* Start DL channel */
+	gsi_res = gsi_start_channel(dl_ep->gsi_chan_hdl);
+	if (gsi_res != GSI_STATUS_SUCCESS)
+		IPAERR("Error starting DL channel: %d\n", gsi_res);
+	ipa3_start_gsi_debug_monitor(dl_clnt_hdl);
+
+	/* Start UL channel */
+	if (!is_dpl) {
+		gsi_res = gsi_start_channel(ul_ep->gsi_chan_hdl);
+		if (gsi_res != GSI_STATUS_SUCCESS)
+			IPAERR("Error starting UL channel: %d\n", gsi_res);
+		ipa3_start_gsi_debug_monitor(ul_clnt_hdl);
+	}
+
+	IPA_ACTIVE_CLIENTS_DEC_EP(ipa3_get_client_mapping(dl_clnt_hdl));
+
+	IPADBG("exit\n");
+	return 0;
+}
+EXPORT_SYMBOL(ipa3_xdci_resume);
+
+/**
+ * ipa3_clear_endpoint_delay() - Remove ep delay set on the IPA pipe before
+ * client disconnect.
+ * @clnt_hdl:	[in] opaque client handle assigned by IPA to client
+ *
+ * Should be called by the driver of the peripheral that wants to remove
+ * ep delay on IPA consumer ipe before disconnect in non GPI mode. this api
+ * expects caller to take responsibility to free any needed headers, routing
+ * and filtering tables and rules as needed.
+ *
+ * Returns:	0 on success, negative on failure
+ *
+ * Note:	Should not be called from atomic context
+ */
+int ipa3_clear_endpoint_delay(u32 clnt_hdl)
+{
+	struct ipa3_ep_context *ep;
+	struct ipa_ep_cfg_ctrl ep_ctrl = {0};
+	struct ipa_enable_force_clear_datapath_req_msg_v01 req = {0};
+	int res;
+
+	if (unlikely(!ipa3_ctx)) {
+		IPAERR("IPA driver was not initialized\n");
+		return -EINVAL;
+	}
+
+	if (clnt_hdl >= ipa3_ctx->ipa_num_pipes ||
+		ipa3_ctx->ep[clnt_hdl].valid == 0) {
+		IPAERR("bad parm.\n");
+		return -EINVAL;
+	}
+
+	ep = &ipa3_ctx->ep[clnt_hdl];
+
+	if (!ipa3_ctx->tethered_flow_control) {
+		IPADBG("APPS flow control is not enabled\n");
+		/* Send a message to modem to disable flow control honoring. */
+		req.request_id = clnt_hdl;
+		req.source_pipe_bitmask = 1 << clnt_hdl;
+		res = ipa3_qmi_enable_force_clear_datapath_send(&req);
+		if (res) {
+			IPADBG("enable_force_clear_datapath failed %d\n",
+				res);
+		}
+		ep->qmi_request_sent = true;
+	}
+
+	IPA_ACTIVE_CLIENTS_INC_EP(ipa3_get_client_mapping(clnt_hdl));
+	/* Set disconnect in progress flag so further flow control events are
+	 * not honored.
+	 */
+	atomic_set(&ep->disconnect_in_progress, 1);
+
+	/* If flow is disabled at this point, restore the ep state.*/
+	ep_ctrl.ipa_ep_delay = false;
+	ep_ctrl.ipa_ep_suspend = false;
+	ipa3_cfg_ep_ctrl(clnt_hdl, &ep_ctrl);
+
+	IPA_ACTIVE_CLIENTS_DEC_EP(ipa3_get_client_mapping(clnt_hdl));
+
+	IPADBG("client (ep: %d) removed ep delay\n", clnt_hdl);
+
+	return 0;
+}
+
+/**
+ * ipa3_get_aqc_gsi_stats() - Query AQC gsi stats from uc
+ * @stats:	[inout] stats blob from client populated by driver
+ *
+ * Returns:	0 on success, negative on failure
+ *
+ * @note Cannot be called from atomic context
+ *
+ */
+int ipa3_get_aqc_gsi_stats(struct ipa_uc_dbg_ring_stats *stats)
+{
+	int i;
+
+	if (!ipa3_ctx->aqc_ctx.dbg_stats.uc_dbg_stats_mmio) {
+		IPAERR("bad parms NULL aqc_gsi_stats_mmio\n");
+		return -EINVAL;
+	}
+	IPA_ACTIVE_CLIENTS_INC_SIMPLE();
+	for (i = 0; i < MAX_AQC_CHANNELS; i++) {
+		stats->ring[i].ringFull = ioread32(
+			ipa3_ctx->aqc_ctx.dbg_stats.uc_dbg_stats_mmio
+			+ i * IPA3_UC_DEBUG_STATS_OFF +
+			IPA3_UC_DEBUG_STATS_RINGFULL_OFF);
+		stats->ring[i].ringEmpty = ioread32(
+			ipa3_ctx->aqc_ctx.dbg_stats.uc_dbg_stats_mmio
+			+ i * IPA3_UC_DEBUG_STATS_OFF +
+			IPA3_UC_DEBUG_STATS_RINGEMPTY_OFF);
+		stats->ring[i].ringUsageHigh = ioread32(
+			ipa3_ctx->aqc_ctx.dbg_stats.uc_dbg_stats_mmio
+			+ i * IPA3_UC_DEBUG_STATS_OFF +
+			IPA3_UC_DEBUG_STATS_RINGUSAGEHIGH_OFF);
+		stats->ring[i].ringUsageLow = ioread32(
+			ipa3_ctx->aqc_ctx.dbg_stats.uc_dbg_stats_mmio
+			+ i * IPA3_UC_DEBUG_STATS_OFF +
+			IPA3_UC_DEBUG_STATS_RINGUSAGELOW_OFF);
+		stats->ring[i].RingUtilCount = ioread32(
+			ipa3_ctx->aqc_ctx.dbg_stats.uc_dbg_stats_mmio
+			+ i * IPA3_UC_DEBUG_STATS_OFF +
+			IPA3_UC_DEBUG_STATS_RINGUTILCOUNT_OFF);
+	}
+	IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
+
+
+	return 0;
+}

+ 3104 - 0
ipa/ipa_v3/ipa_debugfs.c

@@ -0,0 +1,3104 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2012-2020, The Linux Foundation. All rights reserved.
+ */
+
+#ifdef CONFIG_DEBUG_FS
+
+#include <linux/debugfs.h>
+#include <linux/kernel.h>
+#include <linux/stringify.h>
+#include "ipa_i.h"
+#include "../ipa_rm_i.h"
+#include "ipahal/ipahal_nat.h"
+#include "ipa_odl.h"
+#include "ipa_qmi_service.h"
+
+#define IPA_MAX_ENTRY_STRING_LEN 500
+#define IPA_MAX_MSG_LEN 4096
+#define IPA_DBG_MAX_RULE_IN_TBL 128
+#define IPA_DBG_ACTIVE_CLIENT_BUF_SIZE ((IPA3_ACTIVE_CLIENTS_LOG_LINE_LEN \
+	* IPA3_ACTIVE_CLIENTS_LOG_BUFFER_SIZE_LINES) + IPA_MAX_MSG_LEN)
+
+#define IPA_DUMP_STATUS_FIELD(f) \
+	pr_err(#f "=0x%x\n", status->f)
+
+#define IPA_READ_ONLY_MODE  0444
+#define IPA_READ_WRITE_MODE 0664
+#define IPA_WRITE_ONLY_MODE 0220
+
+struct ipa3_debugfs_file {
+	const char *name;
+	umode_t mode;
+	void *data;
+	const struct file_operations fops;
+};
+
+
+const char *ipa3_event_name[IPA_EVENT_MAX_NUM] = {
+	__stringify(WLAN_CLIENT_CONNECT),
+	__stringify(WLAN_CLIENT_DISCONNECT),
+	__stringify(WLAN_CLIENT_POWER_SAVE_MODE),
+	__stringify(WLAN_CLIENT_NORMAL_MODE),
+	__stringify(SW_ROUTING_ENABLE),
+	__stringify(SW_ROUTING_DISABLE),
+	__stringify(WLAN_AP_CONNECT),
+	__stringify(WLAN_AP_DISCONNECT),
+	__stringify(WLAN_STA_CONNECT),
+	__stringify(WLAN_STA_DISCONNECT),
+	__stringify(WLAN_CLIENT_CONNECT_EX),
+	__stringify(WLAN_SWITCH_TO_SCC),
+	__stringify(WLAN_SWITCH_TO_MCC),
+	__stringify(WLAN_WDI_ENABLE),
+	__stringify(WLAN_WDI_DISABLE),
+	__stringify(WAN_UPSTREAM_ROUTE_ADD),
+	__stringify(WAN_UPSTREAM_ROUTE_DEL),
+	__stringify(WAN_EMBMS_CONNECT),
+	__stringify(WAN_XLAT_CONNECT),
+	__stringify(ECM_CONNECT),
+	__stringify(ECM_DISCONNECT),
+	__stringify(IPA_TETHERING_STATS_UPDATE_STATS),
+	__stringify(IPA_TETHERING_STATS_UPDATE_NETWORK_STATS),
+	__stringify(IPA_QUOTA_REACH),
+	__stringify(IPA_SSR_BEFORE_SHUTDOWN),
+	__stringify(IPA_SSR_AFTER_POWERUP),
+	__stringify(ADD_VLAN_IFACE),
+	__stringify(DEL_VLAN_IFACE),
+	__stringify(ADD_L2TP_VLAN_MAPPING),
+	__stringify(DEL_L2TP_VLAN_MAPPING),
+	__stringify(IPA_PER_CLIENT_STATS_CONNECT_EVENT),
+	__stringify(IPA_PER_CLIENT_STATS_DISCONNECT_EVENT),
+	__stringify(ADD_BRIDGE_VLAN_MAPPING),
+	__stringify(DEL_BRIDGE_VLAN_MAPPING),
+	__stringify(WLAN_FWR_SSR_BEFORE_SHUTDOWN),
+	__stringify(IPA_GSB_CONNECT),
+	__stringify(IPA_GSB_DISCONNECT),
+	__stringify(IPA_COALESCE_ENABLE),
+	__stringify(IPA_COALESCE_DISABLE),
+	__stringify_1(WIGIG_CLIENT_CONNECT),
+	__stringify_1(WIGIG_FST_SWITCH),
+};
+
+const char *ipa3_hdr_l2_type_name[] = {
+	__stringify(IPA_HDR_L2_NONE),
+	__stringify(IPA_HDR_L2_ETHERNET_II),
+	__stringify(IPA_HDR_L2_802_3),
+	__stringify(IPA_HDR_L2_802_1Q),
+};
+
+const char *ipa3_hdr_proc_type_name[] = {
+	__stringify(IPA_HDR_PROC_NONE),
+	__stringify(IPA_HDR_PROC_ETHII_TO_ETHII),
+	__stringify(IPA_HDR_PROC_ETHII_TO_802_3),
+	__stringify(IPA_HDR_PROC_802_3_TO_ETHII),
+	__stringify(IPA_HDR_PROC_802_3_TO_802_3),
+	__stringify(IPA_HDR_PROC_L2TP_HEADER_ADD),
+	__stringify(IPA_HDR_PROC_L2TP_HEADER_REMOVE),
+	__stringify(IPA_HDR_PROC_ETHII_TO_ETHII_EX),
+};
+
+static struct dentry *dent;
+static char dbg_buff[IPA_MAX_MSG_LEN + 1];
+static char *active_clients_buf;
+
+static s8 ep_reg_idx;
+static void *ipa_ipc_low_buff;
+
+
+static ssize_t ipa3_read_gen_reg(struct file *file, char __user *ubuf,
+		size_t count, loff_t *ppos)
+{
+	int nbytes;
+	struct ipahal_reg_shared_mem_size smem_sz;
+
+	memset(&smem_sz, 0, sizeof(smem_sz));
+
+	IPA_ACTIVE_CLIENTS_INC_SIMPLE();
+
+	ipahal_read_reg_fields(IPA_SHARED_MEM_SIZE, &smem_sz);
+	nbytes = scnprintf(dbg_buff, IPA_MAX_MSG_LEN,
+			"IPA_VERSION=0x%x\n"
+			"IPA_COMP_HW_VERSION=0x%x\n"
+			"IPA_ROUTE=0x%x\n"
+			"IPA_SHARED_MEM_RESTRICTED=0x%x\n"
+			"IPA_SHARED_MEM_SIZE=0x%x\n"
+			"IPA_QTIME_TIMESTAMP_CFG=0x%x\n"
+			"IPA_TIMERS_PULSE_GRAN_CFG=0x%x\n"
+			"IPA_TIMERS_XO_CLK_DIV_CFG=0x%x\n",
+			ipahal_read_reg(IPA_VERSION),
+			ipahal_read_reg(IPA_COMP_HW_VERSION),
+			ipahal_read_reg(IPA_ROUTE),
+			smem_sz.shared_mem_baddr,
+			smem_sz.shared_mem_sz,
+			ipahal_read_reg(IPA_QTIME_TIMESTAMP_CFG),
+			ipahal_read_reg(IPA_TIMERS_PULSE_GRAN_CFG),
+			ipahal_read_reg(IPA_TIMERS_XO_CLK_DIV_CFG));
+
+	IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
+
+	return simple_read_from_buffer(ubuf, count, ppos, dbg_buff, nbytes);
+}
+
+static ssize_t ipa3_write_ep_holb(struct file *file,
+		const char __user *buf, size_t count, loff_t *ppos)
+{
+	struct ipa_ep_cfg_holb holb;
+	u32 en;
+	u32 tmr_val;
+	u32 ep_idx;
+	unsigned long missing;
+	char *sptr, *token;
+
+	if (count >= sizeof(dbg_buff))
+		return -EFAULT;
+
+	missing = copy_from_user(dbg_buff, buf, count);
+	if (missing)
+		return -EFAULT;
+
+	dbg_buff[count] = '\0';
+
+	sptr = dbg_buff;
+
+	token = strsep(&sptr, " ");
+	if (!token)
+		return -EINVAL;
+	if (kstrtou32(token, 0, &ep_idx))
+		return -EINVAL;
+
+	token = strsep(&sptr, " ");
+	if (!token)
+		return -EINVAL;
+	if (kstrtou32(token, 0, &en))
+		return -EINVAL;
+
+	token = strsep(&sptr, " ");
+	if (!token)
+		return -EINVAL;
+	if (kstrtou32(token, 0, &tmr_val))
+		return -EINVAL;
+
+	holb.en = en;
+	holb.tmr_val = tmr_val;
+
+	ipa3_cfg_ep_holb(ep_idx, &holb);
+
+	return count;
+}
+
+static ssize_t ipa3_write_ep_reg(struct file *file, const char __user *buf,
+		size_t count, loff_t *ppos)
+{
+	s8 option;
+	int ret;
+
+	ret = kstrtos8_from_user(buf, count, 0, &option);
+	if (ret)
+		return ret;
+
+	if (option >= ipa3_ctx->ipa_num_pipes) {
+		IPAERR("bad pipe specified %u\n", option);
+		return count;
+	}
+
+	ep_reg_idx = option;
+
+	return count;
+}
+
+/**
+ * _ipa_read_ep_reg_v3_0() - Reads and prints endpoint configuration registers
+ *
+ * Returns the number of characters printed
+ */
+int _ipa_read_ep_reg_v3_0(char *buf, int max_len, int pipe)
+{
+	return scnprintf(
+		dbg_buff, IPA_MAX_MSG_LEN,
+		"IPA_ENDP_INIT_NAT_%u=0x%x\n"
+		"IPA_ENDP_INIT_HDR_%u=0x%x\n"
+		"IPA_ENDP_INIT_HDR_EXT_%u=0x%x\n"
+		"IPA_ENDP_INIT_MODE_%u=0x%x\n"
+		"IPA_ENDP_INIT_AGGR_%u=0x%x\n"
+		"IPA_ENDP_INIT_ROUTE_%u=0x%x\n"
+		"IPA_ENDP_INIT_CTRL_%u=0x%x\n"
+		"IPA_ENDP_INIT_HOL_EN_%u=0x%x\n"
+		"IPA_ENDP_INIT_HOL_TIMER_%u=0x%x\n"
+		"IPA_ENDP_INIT_DEAGGR_%u=0x%x\n"
+		"IPA_ENDP_INIT_CFG_%u=0x%x\n",
+		pipe, ipahal_read_reg_n(IPA_ENDP_INIT_NAT_n, pipe),
+		pipe, ipahal_read_reg_n(IPA_ENDP_INIT_HDR_n, pipe),
+		pipe, ipahal_read_reg_n(IPA_ENDP_INIT_HDR_EXT_n, pipe),
+		pipe, ipahal_read_reg_n(IPA_ENDP_INIT_MODE_n, pipe),
+		pipe, ipahal_read_reg_n(IPA_ENDP_INIT_AGGR_n, pipe),
+		pipe, ipahal_read_reg_n(IPA_ENDP_INIT_ROUTE_n, pipe),
+		pipe, ipahal_read_reg_n(IPA_ENDP_INIT_CTRL_n, pipe),
+		pipe, ipahal_read_reg_n(IPA_ENDP_INIT_HOL_BLOCK_EN_n, pipe),
+		pipe, ipahal_read_reg_n(IPA_ENDP_INIT_HOL_BLOCK_TIMER_n, pipe),
+		pipe, ipahal_read_reg_n(IPA_ENDP_INIT_DEAGGR_n, pipe),
+		pipe, ipahal_read_reg_n(IPA_ENDP_INIT_CFG_n, pipe));
+}
+
+/**
+ * _ipa_read_ep_reg_v4_0() - Reads and prints endpoint configuration registers
+ *
+ * Returns the number of characters printed
+ * Removed IPA_ENDP_INIT_ROUTE_n from v3
+ */
+int _ipa_read_ep_reg_v4_0(char *buf, int max_len, int pipe)
+{
+	return scnprintf(
+		dbg_buff, IPA_MAX_MSG_LEN,
+		"IPA_ENDP_INIT_NAT_%u=0x%x\n"
+		"IPA_ENDP_INIT_CONN_TRACK_n%u=0x%x\n"
+		"IPA_ENDP_INIT_HDR_%u=0x%x\n"
+		"IPA_ENDP_INIT_HDR_EXT_%u=0x%x\n"
+		"IPA_ENDP_INIT_MODE_%u=0x%x\n"
+		"IPA_ENDP_INIT_AGGR_%u=0x%x\n"
+		"IPA_ENDP_INIT_CTRL_%u=0x%x\n"
+		"IPA_ENDP_INIT_HOL_EN_%u=0x%x\n"
+		"IPA_ENDP_INIT_HOL_TIMER_%u=0x%x\n"
+		"IPA_ENDP_INIT_DEAGGR_%u=0x%x\n"
+		"IPA_ENDP_INIT_CFG_%u=0x%x\n",
+		pipe, ipahal_read_reg_n(IPA_ENDP_INIT_NAT_n, pipe),
+		pipe, ipahal_read_reg_n(IPA_ENDP_INIT_CONN_TRACK_n, pipe),
+		pipe, ipahal_read_reg_n(IPA_ENDP_INIT_HDR_n, pipe),
+		pipe, ipahal_read_reg_n(IPA_ENDP_INIT_HDR_EXT_n, pipe),
+		pipe, ipahal_read_reg_n(IPA_ENDP_INIT_MODE_n, pipe),
+		pipe, ipahal_read_reg_n(IPA_ENDP_INIT_AGGR_n, pipe),
+		pipe, ipahal_read_reg_n(IPA_ENDP_INIT_CTRL_n, pipe),
+		pipe, ipahal_read_reg_n(IPA_ENDP_INIT_HOL_BLOCK_EN_n, pipe),
+		pipe, ipahal_read_reg_n(IPA_ENDP_INIT_HOL_BLOCK_TIMER_n, pipe),
+		pipe, ipahal_read_reg_n(IPA_ENDP_INIT_DEAGGR_n, pipe),
+		pipe, ipahal_read_reg_n(IPA_ENDP_INIT_CFG_n, pipe));
+}
+
+static ssize_t ipa3_read_ep_reg(struct file *file, char __user *ubuf,
+		size_t count, loff_t *ppos)
+{
+	int nbytes;
+	int i;
+	int start_idx;
+	int end_idx;
+	int size = 0;
+	int ret;
+	loff_t pos;
+
+	/* negative ep_reg_idx means all registers */
+	if (ep_reg_idx < 0) {
+		start_idx = 0;
+		end_idx = ipa3_ctx->ipa_num_pipes;
+	} else {
+		start_idx = ep_reg_idx;
+		end_idx = start_idx + 1;
+	}
+	pos = *ppos;
+	IPA_ACTIVE_CLIENTS_INC_SIMPLE();
+	for (i = start_idx; i < end_idx; i++) {
+
+		nbytes = ipa3_ctx->ctrl->ipa3_read_ep_reg(dbg_buff,
+				IPA_MAX_MSG_LEN, i);
+
+		*ppos = pos;
+		ret = simple_read_from_buffer(ubuf, count, ppos, dbg_buff,
+					      nbytes);
+		if (ret < 0) {
+			IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
+			return ret;
+		}
+
+		size += ret;
+		ubuf += nbytes;
+		count -= nbytes;
+	}
+	IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
+
+	*ppos = pos + size;
+	return size;
+}
+
+static ssize_t ipa3_write_keep_awake(struct file *file, const char __user *buf,
+	size_t count, loff_t *ppos)
+{
+	s8 option = 0;
+	int ret;
+	uint32_t bw_mbps = 0;
+
+	ret = kstrtos8_from_user(buf, count, 0, &option);
+	if (ret)
+		return ret;
+
+	switch (option) {
+	case 0:
+		IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
+		bw_mbps = 0;
+		break;
+	case 1:
+		IPA_ACTIVE_CLIENTS_INC_SIMPLE();
+		bw_mbps = 0;
+		break;
+	case 2:
+		IPA_ACTIVE_CLIENTS_INC_SIMPLE();
+		bw_mbps = 700;
+		break;
+	case 3:
+		IPA_ACTIVE_CLIENTS_INC_SIMPLE();
+		bw_mbps = 3000;
+		break;
+	case 4:
+		IPA_ACTIVE_CLIENTS_INC_SIMPLE();
+		bw_mbps = 7000;
+		break;
+	default:
+		pr_err("Not support this vote (%d)\n", option);
+		return -EFAULT;
+	}
+	if (ipa3_vote_for_bus_bw(&bw_mbps)) {
+		IPAERR("Failed to vote for bus BW (%u)\n", bw_mbps);
+		return -EFAULT;
+	}
+
+	return count;
+}
+
+static ssize_t ipa3_read_keep_awake(struct file *file, char __user *ubuf,
+	size_t count, loff_t *ppos)
+{
+	int nbytes;
+
+	mutex_lock(&ipa3_ctx->ipa3_active_clients.mutex);
+	if (atomic_read(&ipa3_ctx->ipa3_active_clients.cnt))
+		nbytes = scnprintf(dbg_buff, IPA_MAX_MSG_LEN,
+				"IPA APPS power state is ON\n");
+	else
+		nbytes = scnprintf(dbg_buff, IPA_MAX_MSG_LEN,
+				"IPA APPS power state is OFF\n");
+	mutex_unlock(&ipa3_ctx->ipa3_active_clients.mutex);
+
+	return simple_read_from_buffer(ubuf, count, ppos, dbg_buff, nbytes);
+}
+
+static ssize_t ipa3_read_hdr(struct file *file, char __user *ubuf, size_t count,
+		loff_t *ppos)
+{
+	int nbytes = 0;
+	int i = 0;
+	struct ipa3_hdr_entry *entry;
+
+	mutex_lock(&ipa3_ctx->lock);
+
+	if (ipa3_ctx->hdr_tbl_lcl)
+		pr_err("Table resides on local memory\n");
+	else
+		pr_err("Table resides on system (ddr) memory\n");
+
+	list_for_each_entry(entry, &ipa3_ctx->hdr_tbl.head_hdr_entry_list,
+			link) {
+		if (entry->cookie != IPA_HDR_COOKIE)
+			continue;
+		nbytes += scnprintf(
+			dbg_buff + nbytes,
+			IPA_MAX_MSG_LEN - nbytes,
+			"name:%s len=%d ref=%d partial=%d type=%s ",
+			entry->name,
+			entry->hdr_len,
+			entry->ref_cnt,
+			entry->is_partial,
+			ipa3_hdr_l2_type_name[entry->type]);
+
+		if (entry->is_hdr_proc_ctx) {
+			nbytes += scnprintf(
+				dbg_buff + nbytes,
+				IPA_MAX_MSG_LEN - nbytes,
+				"phys_base=0x%pa ",
+				&entry->phys_base);
+		} else {
+			nbytes += scnprintf(
+				dbg_buff + nbytes,
+				IPA_MAX_MSG_LEN - nbytes,
+				"ofst=%u ",
+				entry->offset_entry->offset >> 2);
+		}
+		for (i = 0; i < entry->hdr_len; i++) {
+			nbytes += scnprintf(
+				dbg_buff + nbytes,
+				IPA_MAX_MSG_LEN - nbytes,
+				"%02x", entry->hdr[i]);
+		}
+		nbytes += scnprintf(
+			dbg_buff + nbytes,
+			IPA_MAX_MSG_LEN - nbytes,
+			"\n");
+	}
+	mutex_unlock(&ipa3_ctx->lock);
+
+	return simple_read_from_buffer(ubuf, count, ppos, dbg_buff, nbytes);
+}
+
+static int ipa3_attrib_dump(struct ipa_rule_attrib *attrib,
+		enum ipa_ip_type ip, int nbytes)
+{
+	uint32_t addr[4];
+	uint32_t mask[4];
+	int i;
+
+	if (attrib->attrib_mask & IPA_FLT_IS_PURE_ACK)
+		nbytes += scnprintf(
+			dbg_buff + nbytes,
+			IPA_MAX_MSG_LEN - nbytes,
+			"is_pure_ack ");
+
+	if (attrib->attrib_mask & IPA_FLT_TOS)
+		nbytes += scnprintf(
+			dbg_buff + nbytes,
+			IPA_MAX_MSG_LEN - nbytes,
+			"tos:%d ", attrib->u.v4.tos);
+
+	if (attrib->attrib_mask & IPA_FLT_TOS_MASKED) {
+		nbytes += scnprintf(
+			dbg_buff + nbytes,
+			IPA_MAX_MSG_LEN - nbytes,
+			"tos_value:%d ", attrib->tos_value);
+		nbytes += scnprintf(
+			dbg_buff + nbytes,
+			IPA_MAX_MSG_LEN - nbytes,
+			"tos_mask:%d ", attrib->tos_mask);
+	}
+
+	if (attrib->attrib_mask & IPA_FLT_PROTOCOL)
+		nbytes += scnprintf(
+			dbg_buff + nbytes,
+			IPA_MAX_MSG_LEN - nbytes,
+			"protocol:%d ", attrib->u.v4.protocol);
+
+	if (attrib->attrib_mask & IPA_FLT_SRC_ADDR) {
+		if (ip == IPA_IP_v4) {
+			addr[0] = htonl(attrib->u.v4.src_addr);
+			mask[0] = htonl(attrib->u.v4.src_addr_mask);
+			nbytes += scnprintf(
+				dbg_buff + nbytes,
+				IPA_MAX_MSG_LEN - nbytes,
+				"src_addr:%pI4 src_addr_mask:%pI4 ",
+				addr + 0, mask + 0);
+		} else if (ip == IPA_IP_v6) {
+			for (i = 0; i < 4; i++) {
+				addr[i] = htonl(attrib->u.v6.src_addr[i]);
+				mask[i] = htonl(attrib->u.v6.src_addr_mask[i]);
+			}
+			nbytes += scnprintf(
+				dbg_buff + nbytes,
+				IPA_MAX_MSG_LEN - nbytes,
+				"src_addr:%pI6 src_addr_mask:%pI6 ",
+				addr + 0, mask + 0);
+		}
+	}
+	if (attrib->attrib_mask & IPA_FLT_DST_ADDR) {
+		if (ip == IPA_IP_v4) {
+			addr[0] = htonl(attrib->u.v4.dst_addr);
+			mask[0] = htonl(attrib->u.v4.dst_addr_mask);
+			nbytes += scnprintf(
+				dbg_buff + nbytes,
+				IPA_MAX_MSG_LEN - nbytes,
+				"dst_addr:%pI4 dst_addr_mask:%pI4 ",
+				addr + 0, mask + 0);
+		} else if (ip == IPA_IP_v6) {
+			for (i = 0; i < 4; i++) {
+				addr[i] = htonl(attrib->u.v6.dst_addr[i]);
+				mask[i] = htonl(attrib->u.v6.dst_addr_mask[i]);
+			}
+			nbytes += scnprintf(
+				dbg_buff + nbytes,
+				IPA_MAX_MSG_LEN - nbytes,
+				"dst_addr:%pI6 dst_addr_mask:%pI6 ",
+				addr + 0, mask + 0);
+		}
+	}
+	if (attrib->attrib_mask & IPA_FLT_SRC_PORT_RANGE)
+		nbytes += scnprintf(
+			dbg_buff + nbytes,
+			IPA_MAX_MSG_LEN - nbytes,
+			"src_port_range:%u %u ",
+			attrib->src_port_lo,
+			attrib->src_port_hi);
+
+	if (attrib->attrib_mask & IPA_FLT_DST_PORT_RANGE)
+		nbytes += scnprintf(
+			dbg_buff + nbytes,
+			IPA_MAX_MSG_LEN - nbytes,
+			"dst_port_range:%u %u ",
+			attrib->dst_port_lo,
+			attrib->dst_port_hi);
+
+	if (attrib->attrib_mask & IPA_FLT_TYPE)
+		nbytes += scnprintf(
+			dbg_buff + nbytes,
+			IPA_MAX_MSG_LEN - nbytes,
+			"type:%d ", attrib->type);
+
+	if (attrib->attrib_mask & IPA_FLT_CODE)
+		nbytes += scnprintf(
+			dbg_buff + nbytes,
+			IPA_MAX_MSG_LEN - nbytes,
+			"code:%d ", attrib->code);
+
+	if (attrib->attrib_mask & IPA_FLT_SPI)
+		nbytes += scnprintf(
+			dbg_buff + nbytes,
+			IPA_MAX_MSG_LEN - nbytes,
+			"spi:%x ", attrib->spi);
+
+	if (attrib->attrib_mask & IPA_FLT_SRC_PORT)
+		nbytes += scnprintf(
+			dbg_buff + nbytes,
+			IPA_MAX_MSG_LEN - nbytes,
+			"src_port:%u ", attrib->src_port);
+
+	if (attrib->attrib_mask & IPA_FLT_DST_PORT)
+		nbytes += scnprintf(
+			dbg_buff + nbytes,
+			IPA_MAX_MSG_LEN - nbytes,
+			"dst_port:%u ", attrib->dst_port);
+
+	if (attrib->attrib_mask & IPA_FLT_TC)
+		nbytes += scnprintf(
+			dbg_buff + nbytes,
+			IPA_MAX_MSG_LEN - nbytes,
+			"tc:%d ", attrib->u.v6.tc);
+
+	if (attrib->attrib_mask & IPA_FLT_FLOW_LABEL)
+		nbytes += scnprintf(
+			dbg_buff + nbytes,
+			IPA_MAX_MSG_LEN - nbytes,
+			"flow_label:%x ",
+			attrib->u.v6.flow_label);
+
+	if (attrib->attrib_mask & IPA_FLT_NEXT_HDR)
+		nbytes += scnprintf(
+			dbg_buff + nbytes,
+			IPA_MAX_MSG_LEN - nbytes,
+			"next_hdr:%d ",
+			attrib->u.v6.next_hdr);
+
+	if (attrib->attrib_mask & IPA_FLT_META_DATA)
+		nbytes += scnprintf(
+			dbg_buff + nbytes,
+			IPA_MAX_MSG_LEN - nbytes,
+			"metadata:%x metadata_mask:%x ",
+			attrib->meta_data,
+			attrib->meta_data_mask);
+
+	if (attrib->attrib_mask & IPA_FLT_FRAGMENT)
+		nbytes += scnprintf(
+			dbg_buff + nbytes,
+			IPA_MAX_MSG_LEN - nbytes,
+			"frg ");
+
+	if ((attrib->attrib_mask & IPA_FLT_MAC_SRC_ADDR_ETHER_II) ||
+		(attrib->attrib_mask & IPA_FLT_MAC_SRC_ADDR_802_3) ||
+		(attrib->attrib_mask & IPA_FLT_MAC_SRC_ADDR_802_1Q))
+		nbytes += scnprintf(
+			dbg_buff + nbytes,
+			IPA_MAX_MSG_LEN - nbytes,
+			"src_mac_addr:%pM ",
+			attrib->src_mac_addr);
+
+	if ((attrib->attrib_mask & IPA_FLT_MAC_DST_ADDR_ETHER_II) ||
+		(attrib->attrib_mask & IPA_FLT_MAC_DST_ADDR_802_3) ||
+		(attrib->attrib_mask & IPA_FLT_MAC_DST_ADDR_L2TP) ||
+		(attrib->attrib_mask & IPA_FLT_MAC_DST_ADDR_802_1Q))
+		nbytes += scnprintf(
+			dbg_buff + nbytes,
+			IPA_MAX_MSG_LEN - nbytes,
+			"dst_mac_addr:%pM ",
+			attrib->dst_mac_addr);
+
+	if (attrib->attrib_mask & IPA_FLT_MAC_ETHER_TYPE)
+		nbytes += scnprintf(
+			dbg_buff + nbytes,
+			IPA_MAX_MSG_LEN - nbytes,
+			"ether_type:%x ",
+			attrib->ether_type);
+
+	if (attrib->attrib_mask & IPA_FLT_VLAN_ID)
+		nbytes += scnprintf(
+			dbg_buff + nbytes,
+			IPA_MAX_MSG_LEN - nbytes,
+			"vlan_id:%x ",
+			attrib->vlan_id);
+
+	if (attrib->attrib_mask & IPA_FLT_TCP_SYN)
+		nbytes += scnprintf(
+			dbg_buff + nbytes,
+			IPA_MAX_MSG_LEN - nbytes,
+			"tcp syn ");
+
+	if (attrib->attrib_mask & IPA_FLT_TCP_SYN_L2TP)
+		nbytes += scnprintf(
+			dbg_buff + nbytes,
+			IPA_MAX_MSG_LEN - nbytes,
+			"tcp syn l2tp ");
+
+	if (attrib->attrib_mask & IPA_FLT_L2TP_INNER_IP_TYPE)
+		nbytes += scnprintf(
+			dbg_buff + nbytes,
+			IPA_MAX_MSG_LEN - nbytes,
+			"l2tp inner ip type: %d ",
+			attrib->type);
+
+	if (attrib->attrib_mask & IPA_FLT_L2TP_INNER_IPV4_DST_ADDR) {
+		addr[0] = htonl(attrib->u.v4.dst_addr);
+		mask[0] = htonl(attrib->u.v4.dst_addr_mask);
+		nbytes += scnprintf(
+			dbg_buff + nbytes,
+			IPA_MAX_MSG_LEN - nbytes,
+			"dst_addr:%pI4 dst_addr_mask:%pI4 ",
+			addr, mask);
+	}
+
+	nbytes += scnprintf(
+			dbg_buff + nbytes,
+			IPA_MAX_MSG_LEN - nbytes,
+			"\n");
+	return 0;
+}
+
+static int ipa3_attrib_dump_eq(struct ipa_ipfltri_rule_eq *attrib,
+	int nbytes)
+{
+	uint8_t addr[16];
+	uint8_t mask[16];
+	int i;
+	int j;
+
+	if (attrib->tos_eq_present) {
+		if (ipa3_ctx->ipa_hw_type >= IPA_HW_v4_5)
+			nbytes += scnprintf(
+				dbg_buff + nbytes,
+				IPA_MAX_MSG_LEN - nbytes,
+				"pure_ack ");
+		else
+			nbytes += scnprintf(
+				dbg_buff + nbytes,
+				IPA_MAX_MSG_LEN - nbytes,
+				"tos:%d ",
+				attrib->tos_eq);
+	}
+
+	if (attrib->protocol_eq_present)
+		nbytes += scnprintf(
+			dbg_buff + nbytes,
+			IPA_MAX_MSG_LEN - nbytes,
+			"protocol:%d ",
+			attrib->protocol_eq);
+
+	if (attrib->tc_eq_present)
+		nbytes += scnprintf(
+			dbg_buff + nbytes,
+			IPA_MAX_MSG_LEN - nbytes,
+			"tc:%d ", attrib->tc_eq);
+
+	if (attrib->num_offset_meq_128 > IPA_IPFLTR_NUM_MEQ_128_EQNS) {
+		IPAERR_RL("num_offset_meq_128  Max %d passed value %d\n",
+		IPA_IPFLTR_NUM_MEQ_128_EQNS, attrib->num_offset_meq_128);
+		return -EPERM;
+	}
+
+	for (i = 0; i < attrib->num_offset_meq_128; i++) {
+		for (j = 0; j < 16; j++) {
+			addr[j] = attrib->offset_meq_128[i].value[j];
+			mask[j] = attrib->offset_meq_128[i].mask[j];
+		}
+		nbytes += scnprintf(
+			dbg_buff + nbytes,
+			IPA_MAX_MSG_LEN - nbytes,
+			"(ofst_meq128: ofst:%d mask:%pI6 val:%pI6) ",
+			attrib->offset_meq_128[i].offset,
+			mask, addr);
+	}
+
+	if (attrib->num_offset_meq_32 > IPA_IPFLTR_NUM_MEQ_32_EQNS) {
+		IPAERR_RL("num_offset_meq_32  Max %d passed value %d\n",
+		IPA_IPFLTR_NUM_MEQ_32_EQNS, attrib->num_offset_meq_32);
+		return -EPERM;
+	}
+
+	for (i = 0; i < attrib->num_offset_meq_32; i++)
+		nbytes += scnprintf(
+			dbg_buff + nbytes,
+			IPA_MAX_MSG_LEN - nbytes,
+			"(ofst_meq32: ofst:%u mask:0x%x val:0x%x) ",
+			attrib->offset_meq_32[i].offset,
+			attrib->offset_meq_32[i].mask,
+			attrib->offset_meq_32[i].value);
+
+	if (attrib->num_ihl_offset_meq_32 > IPA_IPFLTR_NUM_IHL_MEQ_32_EQNS) {
+		IPAERR_RL("num_ihl_offset_meq_32  Max %d passed value %d\n",
+		IPA_IPFLTR_NUM_IHL_MEQ_32_EQNS, attrib->num_ihl_offset_meq_32);
+		return -EPERM;
+	}
+
+	for (i = 0; i < attrib->num_ihl_offset_meq_32; i++)
+		nbytes += scnprintf(
+			dbg_buff + nbytes,
+			IPA_MAX_MSG_LEN - nbytes,
+			"(ihl_ofst_meq32: ofts:%d mask:0x%x val:0x%x) ",
+			attrib->ihl_offset_meq_32[i].offset,
+			attrib->ihl_offset_meq_32[i].mask,
+			attrib->ihl_offset_meq_32[i].value);
+
+	if (attrib->metadata_meq32_present)
+		nbytes += scnprintf(
+			dbg_buff + nbytes,
+			IPA_MAX_MSG_LEN - nbytes,
+			"(metadata: ofst:%u mask:0x%x val:0x%x) ",
+			attrib->metadata_meq32.offset,
+			attrib->metadata_meq32.mask,
+			attrib->metadata_meq32.value);
+
+	if (attrib->num_ihl_offset_range_16 >
+			IPA_IPFLTR_NUM_IHL_RANGE_16_EQNS) {
+		IPAERR_RL("num_ihl_offset_range_16  Max %d passed value %d\n",
+			IPA_IPFLTR_NUM_IHL_RANGE_16_EQNS,
+			attrib->num_ihl_offset_range_16);
+		return -EPERM;
+	}
+
+	for (i = 0; i < attrib->num_ihl_offset_range_16; i++)
+		nbytes += scnprintf(
+			dbg_buff + nbytes,
+			IPA_MAX_MSG_LEN - nbytes,
+			"(ihl_ofst_range16: ofst:%u lo:%u hi:%u) ",
+			attrib->ihl_offset_range_16[i].offset,
+			attrib->ihl_offset_range_16[i].range_low,
+			attrib->ihl_offset_range_16[i].range_high);
+
+	if (attrib->ihl_offset_eq_32_present)
+		nbytes += scnprintf(
+			dbg_buff + nbytes,
+			IPA_MAX_MSG_LEN - nbytes,
+			"(ihl_ofst_eq32:%d val:0x%x) ",
+			attrib->ihl_offset_eq_32.offset,
+			attrib->ihl_offset_eq_32.value);
+
+	if (attrib->ihl_offset_eq_16_present)
+		nbytes += scnprintf(
+			dbg_buff + nbytes,
+			IPA_MAX_MSG_LEN - nbytes,
+			"(ihl_ofst_eq16:%d val:0x%x) ",
+			attrib->ihl_offset_eq_16.offset,
+			attrib->ihl_offset_eq_16.value);
+
+	if (attrib->fl_eq_present)
+		nbytes += scnprintf(
+			dbg_buff + nbytes,
+			IPA_MAX_MSG_LEN - nbytes,
+			"flow_label:%d ", attrib->fl_eq);
+
+	if (attrib->ipv4_frag_eq_present)
+		nbytes += scnprintf(
+			dbg_buff + nbytes,
+			IPA_MAX_MSG_LEN - nbytes,
+			"frag ");
+
+	nbytes += scnprintf(
+			dbg_buff + nbytes,
+			IPA_MAX_MSG_LEN - nbytes,
+			"\n");
+	return 0;
+}
+
+static int ipa3_open_dbg(struct inode *inode, struct file *file)
+{
+	file->private_data = inode->i_private;
+	return 0;
+}
+
+static ssize_t ipa3_read_rt(struct file *file, char __user *ubuf, size_t count,
+		loff_t *ppos)
+{
+	int i = 0;
+	struct ipa3_rt_tbl *tbl;
+	struct ipa3_rt_entry *entry;
+	struct ipa3_rt_tbl_set *set;
+	enum ipa_ip_type ip = (enum ipa_ip_type)file->private_data;
+	u32 ofst;
+	u32 ofst_words;
+	int nbytes = 0;
+
+	set = &ipa3_ctx->rt_tbl_set[ip];
+
+	mutex_lock(&ipa3_ctx->lock);
+
+	if (ip ==  IPA_IP_v6) {
+		if (ipa3_ctx->ip6_rt_tbl_hash_lcl)
+			pr_err("Hashable table resides on local memory\n");
+		else
+			pr_err("Hashable table resides on system (ddr) memory\n");
+		if (ipa3_ctx->ip6_rt_tbl_nhash_lcl)
+			pr_err("Non-Hashable table resides on local memory\n");
+		else
+			pr_err("Non-Hashable table resides on system (ddr) memory\n");
+	} else if (ip == IPA_IP_v4) {
+		if (ipa3_ctx->ip4_rt_tbl_hash_lcl)
+			pr_err("Hashable table resides on local memory\n");
+		else
+			pr_err("Hashable table resides on system (ddr) memory\n");
+		if (ipa3_ctx->ip4_rt_tbl_nhash_lcl)
+			pr_err("Non-Hashable table resides on local memory\n");
+		else
+			pr_err("Non-Hashable table resides on system (ddr) memory\n");
+	}
+
+	list_for_each_entry(tbl, &set->head_rt_tbl_list, link) {
+		i = 0;
+		list_for_each_entry(entry, &tbl->head_rt_rule_list, link) {
+			if (entry->proc_ctx) {
+				ofst = entry->proc_ctx->offset_entry->offset;
+				ofst_words =
+					(ofst +
+					ipa3_ctx->hdr_proc_ctx_tbl.start_offset)
+					>> 5;
+				nbytes += scnprintf(
+					dbg_buff + nbytes,
+					IPA_MAX_MSG_LEN - nbytes,
+					"tbl_idx:%d tbl_name:%s tbl_ref:%u ",
+					entry->tbl->idx, entry->tbl->name,
+					entry->tbl->ref_cnt);
+				nbytes += scnprintf(
+					dbg_buff + nbytes,
+					IPA_MAX_MSG_LEN - nbytes,
+					"rule_idx:%d dst:%d ep:%d S:%u ",
+					i, entry->rule.dst,
+					ipa3_get_ep_mapping(entry->rule.dst),
+					!ipa3_ctx->hdr_proc_ctx_tbl_lcl);
+				nbytes += scnprintf(
+					dbg_buff + nbytes,
+					IPA_MAX_MSG_LEN - nbytes,
+					"proc_ctx[32B]:%u attrib_mask:%08x ",
+					ofst_words,
+					entry->rule.attrib.attrib_mask);
+				nbytes += scnprintf(
+					dbg_buff + nbytes,
+					IPA_MAX_MSG_LEN - nbytes,
+					"rule_id:%u max_prio:%u prio:%u ",
+					entry->rule_id, entry->rule.max_prio,
+					entry->prio);
+				nbytes += scnprintf(
+					dbg_buff + nbytes,
+					IPA_MAX_MSG_LEN - nbytes,
+					"enable_stats:%u counter_id:%u\n",
+					entry->rule.enable_stats,
+					entry->rule.cnt_idx);
+				nbytes += scnprintf(
+					dbg_buff + nbytes,
+					IPA_MAX_MSG_LEN - nbytes,
+					"hashable:%u retain_hdr:%u ",
+					entry->rule.hashable,
+					entry->rule.retain_hdr);
+			} else {
+				if (entry->hdr)
+					ofst = entry->hdr->offset_entry->offset;
+				else
+					ofst = 0;
+				nbytes += scnprintf(
+					dbg_buff + nbytes,
+					IPA_MAX_MSG_LEN - nbytes,
+					"tbl_idx:%d tbl_name:%s tbl_ref:%u ",
+					entry->tbl->idx, entry->tbl->name,
+					entry->tbl->ref_cnt);
+				nbytes += scnprintf(
+					dbg_buff + nbytes,
+					IPA_MAX_MSG_LEN - nbytes,
+					"rule_idx:%d dst:%d ep:%d S:%u ",
+					i, entry->rule.dst,
+					ipa3_get_ep_mapping(entry->rule.dst),
+					!ipa3_ctx->hdr_tbl_lcl);
+				nbytes += scnprintf(
+					dbg_buff + nbytes,
+					IPA_MAX_MSG_LEN - nbytes,
+					"hdr_ofst[words]:%u attrib_mask:%08x ",
+					ofst >> 2,
+					entry->rule.attrib.attrib_mask);
+				nbytes += scnprintf(
+					dbg_buff + nbytes,
+					IPA_MAX_MSG_LEN - nbytes,
+					"rule_id:%u max_prio:%u prio:%u ",
+					entry->rule_id, entry->rule.max_prio,
+					entry->prio);
+				nbytes += scnprintf(
+					dbg_buff + nbytes,
+					IPA_MAX_MSG_LEN - nbytes,
+					"enable_stats:%u counter_id:%u\n",
+					entry->rule.enable_stats,
+					entry->rule.cnt_idx);
+				nbytes += scnprintf(
+					dbg_buff + nbytes,
+					IPA_MAX_MSG_LEN - nbytes,
+					"hashable:%u retain_hdr:%u ",
+					entry->rule.hashable,
+					entry->rule.retain_hdr);
+			}
+
+			ipa3_attrib_dump(&entry->rule.attrib, ip, nbytes);
+			i++;
+		}
+	}
+	mutex_unlock(&ipa3_ctx->lock);
+
+	return simple_read_from_buffer(ubuf, count, ppos, dbg_buff, nbytes);
+}
+
+static ssize_t ipa3_read_rt_hw(struct file *file, char __user *ubuf,
+	size_t count, loff_t *ppos)
+{
+	enum ipa_ip_type ip = (enum ipa_ip_type)file->private_data;
+	int tbls_num;
+	int rules_num;
+	int tbl;
+	int rl;
+	int res = 0;
+	struct ipahal_rt_rule_entry *rules = NULL;
+	int nbytes = 0;
+
+	switch (ip) {
+	case IPA_IP_v4:
+		tbls_num = IPA_MEM_PART(v4_rt_num_index);
+		break;
+	case IPA_IP_v6:
+		tbls_num = IPA_MEM_PART(v6_rt_num_index);
+		break;
+	default:
+		IPAERR("ip type error %d\n", ip);
+		return -EINVAL;
+	}
+
+	IPADBG("Tring to parse %d H/W routing tables - IP=%d\n", tbls_num, ip);
+
+	rules = kzalloc(sizeof(*rules) * IPA_DBG_MAX_RULE_IN_TBL, GFP_KERNEL);
+	if (!rules) {
+		IPAERR("failed to allocate mem for tbl rules\n");
+		return -ENOMEM;
+	}
+
+	IPA_ACTIVE_CLIENTS_INC_SIMPLE();
+	mutex_lock(&ipa3_ctx->lock);
+
+	for (tbl = 0 ; tbl < tbls_num ; tbl++) {
+		nbytes += scnprintf(
+			dbg_buff + nbytes,
+			IPA_MAX_MSG_LEN - nbytes,
+			"=== Routing Table %d = Hashable Rules ===\n",
+			tbl);
+		rules_num = IPA_DBG_MAX_RULE_IN_TBL;
+		res = ipa3_rt_read_tbl_from_hw(tbl, ip, true, rules,
+			&rules_num);
+		if (res) {
+			nbytes += scnprintf(
+				dbg_buff + nbytes,
+				IPA_MAX_MSG_LEN - nbytes,
+				"ERROR - Check the logs\n");
+			IPAERR("failed reading tbl from hw\n");
+			goto bail;
+		}
+		if (!rules_num)
+			nbytes += scnprintf(
+				dbg_buff + nbytes,
+				IPA_MAX_MSG_LEN - nbytes,
+				"-->No rules. Empty tbl or modem system table\n");
+
+		for (rl = 0 ; rl < rules_num ; rl++) {
+			nbytes += scnprintf(
+				dbg_buff + nbytes,
+				IPA_MAX_MSG_LEN - nbytes,
+				"rule_idx:%d dst ep:%d L:%u ",
+				rl, rules[rl].dst_pipe_idx, rules[rl].hdr_lcl);
+
+			if (rules[rl].hdr_type == IPAHAL_RT_RULE_HDR_PROC_CTX)
+				nbytes += scnprintf(
+					dbg_buff + nbytes,
+					IPA_MAX_MSG_LEN - nbytes,
+					"proc_ctx:%u attrib_mask:%08x ",
+					rules[rl].hdr_ofst,
+					rules[rl].eq_attrib.rule_eq_bitmap);
+			else
+				nbytes += scnprintf(
+					dbg_buff + nbytes,
+					IPA_MAX_MSG_LEN - nbytes,
+					"hdr_ofst:%u attrib_mask:%08x ",
+					rules[rl].hdr_ofst,
+					rules[rl].eq_attrib.rule_eq_bitmap);
+
+			nbytes += scnprintf(
+				dbg_buff + nbytes,
+				IPA_MAX_MSG_LEN - nbytes,
+				"rule_id:%u cnt_id:%hhu prio:%u retain_hdr:%u\n",
+				rules[rl].id, rules[rl].cnt_idx,
+				rules[rl].priority, rules[rl].retain_hdr);
+			res = ipa3_attrib_dump_eq(&rules[rl].eq_attrib, nbytes);
+			if (res) {
+				IPAERR_RL("failed read attrib eq\n");
+				goto bail;
+			}
+		}
+
+		nbytes += scnprintf(
+			dbg_buff + nbytes,
+			IPA_MAX_MSG_LEN - nbytes,
+			"=== Routing Table %d = Non-Hashable Rules ===\n",
+			tbl);
+		rules_num = IPA_DBG_MAX_RULE_IN_TBL;
+		res = ipa3_rt_read_tbl_from_hw(tbl, ip, false, rules,
+			&rules_num);
+		if (res) {
+			nbytes += scnprintf(
+				dbg_buff + nbytes,
+				IPA_MAX_MSG_LEN - nbytes,
+				"ERROR - Check the logs\n");
+			IPAERR("failed reading tbl from hw\n");
+			goto bail;
+		}
+		if (!rules_num)
+			nbytes += scnprintf(
+				dbg_buff + nbytes,
+				IPA_MAX_MSG_LEN - nbytes,
+				"-->No rules. Empty tbl or modem system table\n");
+
+		for (rl = 0 ; rl < rules_num ; rl++) {
+			nbytes += scnprintf(
+				dbg_buff + nbytes,
+				IPA_MAX_MSG_LEN - nbytes,
+				"rule_idx:%d dst ep:%d L:%u ",
+				rl, rules[rl].dst_pipe_idx,
+				rules[rl].hdr_lcl);
+
+			if (rules[rl].hdr_type == IPAHAL_RT_RULE_HDR_PROC_CTX)
+				nbytes += scnprintf(
+					dbg_buff + nbytes,
+					IPA_MAX_MSG_LEN - nbytes,
+					"proc_ctx:%u attrib_mask:%08x ",
+					rules[rl].hdr_ofst,
+					rules[rl].eq_attrib.rule_eq_bitmap);
+			else
+				nbytes += scnprintf(
+					dbg_buff + nbytes,
+					IPA_MAX_MSG_LEN - nbytes,
+					"hdr_ofst:%u attrib_mask:%08x ",
+					rules[rl].hdr_ofst,
+					rules[rl].eq_attrib.rule_eq_bitmap);
+			nbytes += scnprintf(
+				dbg_buff + nbytes,
+				IPA_MAX_MSG_LEN - nbytes,
+				"rule_id:%u cnt_id:%hhu prio:%u retain_hdr:%u\n",
+				rules[rl].id, rules[rl].cnt_idx,
+				rules[rl].priority, rules[rl].retain_hdr);
+			res = ipa3_attrib_dump_eq(&rules[rl].eq_attrib, nbytes);
+			if (res) {
+				IPAERR_RL("failed read attrib eq\n");
+				goto bail;
+			}
+		}
+		nbytes += scnprintf(
+			dbg_buff + nbytes,
+			IPA_MAX_MSG_LEN - nbytes,
+			"\n");
+	}
+
+bail:
+	mutex_unlock(&ipa3_ctx->lock);
+	IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
+	kfree(rules);
+	return simple_read_from_buffer(ubuf, count, ppos, dbg_buff, nbytes);
+}
+
+static ssize_t ipa3_read_proc_ctx(struct file *file, char __user *ubuf,
+		size_t count, loff_t *ppos)
+{
+	int nbytes = 0;
+	struct ipa3_hdr_proc_ctx_tbl *tbl;
+	struct ipa3_hdr_proc_ctx_entry *entry;
+	u32 ofst_words;
+
+	tbl = &ipa3_ctx->hdr_proc_ctx_tbl;
+
+	mutex_lock(&ipa3_ctx->lock);
+
+	if (ipa3_ctx->hdr_proc_ctx_tbl_lcl)
+		pr_info("Table resides on local memory\n");
+	else
+		pr_info("Table resides on system(ddr) memory\n");
+
+	list_for_each_entry(entry, &tbl->head_proc_ctx_entry_list, link) {
+		ofst_words = (entry->offset_entry->offset +
+			ipa3_ctx->hdr_proc_ctx_tbl.start_offset)
+			>> 5;
+		if (entry->hdr->is_hdr_proc_ctx) {
+			nbytes += scnprintf(
+				dbg_buff + nbytes,
+				IPA_MAX_MSG_LEN - nbytes,
+				"id:%u hdr_proc_type:%s proc_ctx[32B]:%u ",
+				entry->id,
+				ipa3_hdr_proc_type_name[entry->type],
+				ofst_words);
+			nbytes += scnprintf(
+				dbg_buff + nbytes,
+				IPA_MAX_MSG_LEN - nbytes,
+				"hdr_phys_base:0x%pa\n",
+				&entry->hdr->phys_base);
+		} else {
+			nbytes += scnprintf(
+				dbg_buff + nbytes,
+				IPA_MAX_MSG_LEN - nbytes,
+				"id:%u hdr_proc_type:%s proc_ctx[32B]:%u ",
+				entry->id,
+				ipa3_hdr_proc_type_name[entry->type],
+				ofst_words);
+			nbytes += scnprintf(
+				dbg_buff + nbytes,
+				IPA_MAX_MSG_LEN - nbytes,
+				"hdr[words]:%u\n",
+				entry->hdr->offset_entry->offset >> 2);
+		}
+
+	}
+	mutex_unlock(&ipa3_ctx->lock);
+
+	return simple_read_from_buffer(ubuf, count, ppos, dbg_buff, nbytes);
+}
+
+static ssize_t ipa3_read_flt(struct file *file, char __user *ubuf, size_t count,
+		loff_t *ppos)
+{
+	int i;
+	int j;
+	struct ipa3_flt_tbl *tbl;
+	struct ipa3_flt_entry *entry;
+	enum ipa_ip_type ip = (enum ipa_ip_type)file->private_data;
+	struct ipa3_rt_tbl *rt_tbl;
+	u32 rt_tbl_idx;
+	u32 bitmap;
+	bool eq;
+	int res = 0;
+	int nbytes = 0;
+
+	mutex_lock(&ipa3_ctx->lock);
+
+	for (j = 0; j < ipa3_ctx->ipa_num_pipes; j++) {
+		if (!ipa_is_ep_support_flt(j))
+			continue;
+		tbl = &ipa3_ctx->flt_tbl[j][ip];
+		i = 0;
+		list_for_each_entry(entry, &tbl->head_flt_rule_list, link) {
+			if (entry->cookie != IPA_FLT_COOKIE)
+				continue;
+			if (entry->rule.eq_attrib_type) {
+				rt_tbl_idx = entry->rule.rt_tbl_idx;
+				bitmap = entry->rule.eq_attrib.rule_eq_bitmap;
+				eq = true;
+			} else {
+				rt_tbl = ipa3_id_find(entry->rule.rt_tbl_hdl);
+				if (rt_tbl == NULL ||
+					rt_tbl->cookie != IPA_RT_TBL_COOKIE)
+					rt_tbl_idx =  ~0;
+				else
+					rt_tbl_idx = rt_tbl->idx;
+				bitmap = entry->rule.attrib.attrib_mask;
+				eq = false;
+			}
+			nbytes += scnprintf(
+				dbg_buff + nbytes,
+				IPA_MAX_MSG_LEN - nbytes,
+				"ep_idx:%d rule_idx:%d act:%d rt_tbl_idx:%d ",
+				j, i, entry->rule.action, rt_tbl_idx);
+			nbytes += scnprintf(
+				dbg_buff + nbytes,
+				IPA_MAX_MSG_LEN - nbytes,
+				"attrib_mask:%08x retain_hdr:%d eq:%d ",
+				bitmap, entry->rule.retain_hdr, eq);
+			nbytes += scnprintf(
+				dbg_buff + nbytes,
+				IPA_MAX_MSG_LEN - nbytes,
+				"hashable:%u rule_id:%u max_prio:%u prio:%u ",
+				entry->rule.hashable, entry->rule_id,
+				entry->rule.max_prio, entry->prio);
+			nbytes += scnprintf(
+				dbg_buff + nbytes,
+				IPA_MAX_MSG_LEN - nbytes,
+				"enable_stats:%u counter_id:%u\n",
+				entry->rule.enable_stats,
+				entry->rule.cnt_idx);
+			if (ipa3_ctx->ipa_hw_type >= IPA_HW_v4_0)
+				nbytes += scnprintf(
+					dbg_buff + nbytes,
+					IPA_MAX_MSG_LEN - nbytes,
+					"pdn index %d, set metadata %d ",
+					entry->rule.pdn_idx,
+					entry->rule.set_metadata);
+			if (eq) {
+				res = ipa3_attrib_dump_eq(
+						&entry->rule.eq_attrib, nbytes);
+				if (res) {
+					IPAERR_RL("failed read attrib eq\n");
+					goto bail;
+				}
+			} else
+				ipa3_attrib_dump(
+					&entry->rule.attrib, ip, nbytes);
+			i++;
+			nbytes += scnprintf(
+				dbg_buff + nbytes,
+				IPA_MAX_MSG_LEN - nbytes,
+				"\n");
+		}
+	}
+bail:
+	mutex_unlock(&ipa3_ctx->lock);
+
+	return simple_read_from_buffer(ubuf, count, ppos, dbg_buff, nbytes);
+}
+
+static ssize_t ipa3_read_flt_hw(struct file *file, char __user *ubuf,
+	size_t count, loff_t *ppos)
+{
+	int pipe;
+	int rl;
+	int rules_num;
+	struct ipahal_flt_rule_entry *rules;
+	enum ipa_ip_type ip = (enum ipa_ip_type)file->private_data;
+	u32 rt_tbl_idx;
+	u32 bitmap;
+	int res = 0;
+	int nbytes = 0;
+
+	IPADBG("Tring to parse %d H/W filtering tables - IP=%d\n",
+		ipa3_ctx->ep_flt_num, ip);
+
+	rules = kzalloc(sizeof(*rules) * IPA_DBG_MAX_RULE_IN_TBL, GFP_KERNEL);
+	if (!rules)
+		return -ENOMEM;
+
+	IPA_ACTIVE_CLIENTS_INC_SIMPLE();
+	mutex_lock(&ipa3_ctx->lock);
+	for (pipe = 0; pipe < ipa3_ctx->ipa_num_pipes; pipe++) {
+		if (!ipa_is_ep_support_flt(pipe))
+			continue;
+		nbytes += scnprintf(
+			dbg_buff + nbytes,
+			IPA_MAX_MSG_LEN - nbytes,
+			"=== Filtering Table ep:%d = Hashable Rules ===\n",
+			pipe);
+		rules_num = IPA_DBG_MAX_RULE_IN_TBL;
+		res = ipa3_flt_read_tbl_from_hw(pipe, ip, true, rules,
+			&rules_num);
+		if (res) {
+			nbytes += scnprintf(
+				dbg_buff + nbytes,
+				IPA_MAX_MSG_LEN - nbytes,
+				"ERROR - Check the logs\n");
+			IPAERR("failed reading tbl from hw\n");
+			goto bail;
+		}
+		if (!rules_num)
+			nbytes += scnprintf(
+				dbg_buff + nbytes,
+				IPA_MAX_MSG_LEN - nbytes,
+				"-->No rules. Empty tbl or modem sys table\n");
+
+		for (rl = 0; rl < rules_num; rl++) {
+			rt_tbl_idx = rules[rl].rule.rt_tbl_idx;
+			bitmap = rules[rl].rule.eq_attrib.rule_eq_bitmap;
+			nbytes += scnprintf(
+				dbg_buff + nbytes,
+				IPA_MAX_MSG_LEN - nbytes,
+				"ep_idx:%d rule_idx:%d act:%d rt_tbl_idx:%d ",
+				pipe, rl, rules[rl].rule.action, rt_tbl_idx);
+			nbytes += scnprintf(
+				dbg_buff + nbytes,
+				IPA_MAX_MSG_LEN - nbytes,
+				"attrib_mask:%08x retain_hdr:%d ",
+				bitmap, rules[rl].rule.retain_hdr);
+			nbytes += scnprintf(
+				dbg_buff + nbytes,
+				IPA_MAX_MSG_LEN - nbytes,
+				"rule_id:%u cnt_id:%hhu prio:%u\n",
+				rules[rl].id, rules[rl].cnt_idx,
+				rules[rl].priority);
+			if (ipa3_ctx->ipa_hw_type >= IPA_HW_v4_0)
+				nbytes += scnprintf(
+					dbg_buff + nbytes,
+					IPA_MAX_MSG_LEN - nbytes,
+					"pdn: %u, set_metadata: %u ",
+					rules[rl].rule.pdn_idx,
+					rules[rl].rule.set_metadata);
+			res = ipa3_attrib_dump_eq(&rules[rl].rule.eq_attrib,
+				nbytes);
+			if (res) {
+				IPAERR_RL("failed read attrib eq\n");
+				goto bail;
+			}
+		}
+		nbytes += scnprintf(
+			dbg_buff + nbytes,
+			IPA_MAX_MSG_LEN - nbytes,
+			"=== Filtering Table ep:%d = Non-Hashable Rules ===\n",
+			pipe);
+		rules_num = IPA_DBG_MAX_RULE_IN_TBL;
+		res = ipa3_flt_read_tbl_from_hw(pipe, ip, false, rules,
+			&rules_num);
+		if (res) {
+			IPAERR("failed reading tbl from hw\n");
+			goto bail;
+		}
+		if (!rules_num)
+			nbytes += scnprintf(
+				dbg_buff + nbytes,
+				IPA_MAX_MSG_LEN - nbytes,
+				"-->No rules. Empty tbl or modem sys table\n");
+		for (rl = 0; rl < rules_num; rl++) {
+			rt_tbl_idx = rules[rl].rule.rt_tbl_idx;
+			bitmap = rules[rl].rule.eq_attrib.rule_eq_bitmap;
+			nbytes += scnprintf(
+				dbg_buff + nbytes,
+				IPA_MAX_MSG_LEN - nbytes,
+				"ep_idx:%d rule_idx:%d act:%d rt_tbl_idx:%d ",
+				pipe, rl, rules[rl].rule.action, rt_tbl_idx);
+			nbytes += scnprintf(
+				dbg_buff + nbytes,
+				IPA_MAX_MSG_LEN - nbytes,
+				"attrib_mask:%08x retain_hdr:%d ",
+				bitmap, rules[rl].rule.retain_hdr);
+			nbytes += scnprintf(
+				dbg_buff + nbytes,
+				IPA_MAX_MSG_LEN - nbytes,
+				"rule_id:%u cnt_id:%hhu prio:%u\n",
+				rules[rl].id, rules[rl].cnt_idx,
+				rules[rl].priority);
+			if (ipa3_ctx->ipa_hw_type >= IPA_HW_v4_0)
+				nbytes += scnprintf(
+					dbg_buff + nbytes,
+					IPA_MAX_MSG_LEN - nbytes,
+					"pdn: %u, set_metadata: %u ",
+					rules[rl].rule.pdn_idx,
+					rules[rl].rule.set_metadata);
+			res = ipa3_attrib_dump_eq(&rules[rl].rule.eq_attrib,
+				nbytes);
+			if (res) {
+				IPAERR_RL("failed read attrib eq\n");
+				goto bail;
+			}
+		}
+		nbytes += scnprintf(
+			dbg_buff + nbytes,
+			IPA_MAX_MSG_LEN - nbytes,
+			"\n");
+	}
+
+bail:
+	mutex_unlock(&ipa3_ctx->lock);
+	kfree(rules);
+	IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
+	return simple_read_from_buffer(ubuf, count, ppos, dbg_buff, nbytes);
+}
+
+static ssize_t ipa3_read_stats(struct file *file, char __user *ubuf,
+		size_t count, loff_t *ppos)
+{
+	int nbytes;
+	int i;
+	int cnt = 0;
+	uint connect = 0;
+
+	for (i = 0; i < ipa3_ctx->ipa_num_pipes; i++)
+		connect |= (ipa3_ctx->ep[i].valid << i);
+
+	nbytes = scnprintf(dbg_buff, IPA_MAX_MSG_LEN,
+		"sw_tx=%u\n"
+		"hw_tx=%u\n"
+		"tx_non_linear=%u\n"
+		"tx_compl=%u\n"
+		"wan_rx=%u\n"
+		"stat_compl=%u\n"
+		"lan_aggr_close=%u\n"
+		"wan_aggr_close=%u\n"
+		"act_clnt=%u\n"
+		"con_clnt_bmap=0x%x\n"
+		"wan_rx_empty=%u\n"
+		"wan_repl_rx_empty=%u\n"
+		"lan_rx_empty=%u\n"
+		"lan_repl_rx_empty=%u\n"
+		"flow_enable=%u\n"
+		"flow_disable=%u\n",
+		ipa3_ctx->stats.tx_sw_pkts,
+		ipa3_ctx->stats.tx_hw_pkts,
+		ipa3_ctx->stats.tx_non_linear,
+		ipa3_ctx->stats.tx_pkts_compl,
+		ipa3_ctx->stats.rx_pkts,
+		ipa3_ctx->stats.stat_compl,
+		ipa3_ctx->stats.aggr_close,
+		ipa3_ctx->stats.wan_aggr_close,
+		atomic_read(&ipa3_ctx->ipa3_active_clients.cnt),
+		connect,
+		ipa3_ctx->stats.wan_rx_empty,
+		ipa3_ctx->stats.wan_repl_rx_empty,
+		ipa3_ctx->stats.lan_rx_empty,
+		ipa3_ctx->stats.lan_repl_rx_empty,
+		ipa3_ctx->stats.flow_enable,
+		ipa3_ctx->stats.flow_disable);
+	cnt += nbytes;
+
+	for (i = 0; i < IPAHAL_PKT_STATUS_EXCEPTION_MAX; i++) {
+		nbytes = scnprintf(dbg_buff + cnt,
+			IPA_MAX_MSG_LEN - cnt,
+			"lan_rx_excp[%u:%20s]=%u\n", i,
+			ipahal_pkt_status_exception_str(i),
+			ipa3_ctx->stats.rx_excp_pkts[i]);
+		cnt += nbytes;
+	}
+
+	return simple_read_from_buffer(ubuf, count, ppos, dbg_buff, cnt);
+}
+
+static ssize_t ipa3_read_odlstats(struct file *file, char __user *ubuf,
+		size_t count, loff_t *ppos)
+{
+	int nbytes;
+	int cnt = 0;
+
+	nbytes = scnprintf(dbg_buff, IPA_MAX_MSG_LEN,
+			"ODL received pkt =%u\n"
+			"ODL processed pkt to DIAG=%u\n"
+			"ODL dropped pkt =%u\n"
+			"ODL packet in queue  =%u\n",
+			ipa3_odl_ctx->stats.odl_rx_pkt,
+			ipa3_odl_ctx->stats.odl_tx_diag_pkt,
+			ipa3_odl_ctx->stats.odl_drop_pkt,
+			atomic_read(&ipa3_odl_ctx->stats.numer_in_queue));
+
+	cnt += nbytes;
+
+	return simple_read_from_buffer(ubuf, count, ppos, dbg_buff, cnt);
+}
+
+static ssize_t ipa3_read_page_recycle_stats(struct file *file,
+		char __user *ubuf, size_t count, loff_t *ppos)
+{
+	int nbytes;
+	int cnt = 0;
+
+	nbytes = scnprintf(dbg_buff, IPA_MAX_MSG_LEN,
+			"COAL : Total number of packets replenished =%llu\n"
+			"COAL : Number of tmp alloc packets  =%llu\n"
+			"DEF  : Total number of packets replenished =%llu\n"
+			"DEF  : Number of tmp alloc packets  =%llu\n",
+			ipa3_ctx->stats.page_recycle_stats[0].total_replenished,
+			ipa3_ctx->stats.page_recycle_stats[0].tmp_alloc,
+			ipa3_ctx->stats.page_recycle_stats[1].total_replenished,
+			ipa3_ctx->stats.page_recycle_stats[1].tmp_alloc);
+
+	cnt += nbytes;
+
+	return simple_read_from_buffer(ubuf, count, ppos, dbg_buff, cnt);
+}
+static ssize_t ipa3_read_wstats(struct file *file, char __user *ubuf,
+		size_t count, loff_t *ppos)
+{
+
+#define HEAD_FRMT_STR "%25s\n"
+#define FRMT_STR "%25s %10u\n"
+#define FRMT_STR1 "%25s %10u\n\n"
+
+	int cnt = 0;
+	int nbytes;
+	int ipa_ep_idx;
+	enum ipa_client_type client = IPA_CLIENT_WLAN1_PROD;
+	struct ipa3_ep_context *ep;
+
+	do {
+		nbytes = scnprintf(dbg_buff + cnt, IPA_MAX_MSG_LEN - cnt,
+			HEAD_FRMT_STR, "Client IPA_CLIENT_WLAN1_PROD Stats:");
+		cnt += nbytes;
+
+		ipa_ep_idx = ipa3_get_ep_mapping(client);
+		if (ipa_ep_idx == -1) {
+			nbytes = scnprintf(dbg_buff + cnt,
+				IPA_MAX_MSG_LEN - cnt, HEAD_FRMT_STR, "Not up");
+			cnt += nbytes;
+			break;
+		}
+
+		ep = &ipa3_ctx->ep[ipa_ep_idx];
+		if (ep->valid != 1) {
+			nbytes = scnprintf(dbg_buff + cnt,
+				IPA_MAX_MSG_LEN - cnt, HEAD_FRMT_STR, "Not up");
+			cnt += nbytes;
+			break;
+		}
+
+		nbytes = scnprintf(dbg_buff + cnt, IPA_MAX_MSG_LEN - cnt,
+			FRMT_STR, "Avail Fifo Desc:",
+			atomic_read(&ep->avail_fifo_desc));
+		cnt += nbytes;
+
+		nbytes = scnprintf(dbg_buff + cnt, IPA_MAX_MSG_LEN - cnt,
+			FRMT_STR, "Rx Pkts Rcvd:", ep->wstats.rx_pkts_rcvd);
+		cnt += nbytes;
+
+		nbytes = scnprintf(dbg_buff + cnt, IPA_MAX_MSG_LEN - cnt,
+			FRMT_STR, "Rx Pkts Status Rcvd:",
+			ep->wstats.rx_pkts_status_rcvd);
+		cnt += nbytes;
+
+		nbytes = scnprintf(dbg_buff + cnt, IPA_MAX_MSG_LEN - cnt,
+			FRMT_STR, "Rx DH Rcvd:", ep->wstats.rx_hd_rcvd);
+		cnt += nbytes;
+
+		nbytes = scnprintf(dbg_buff + cnt, IPA_MAX_MSG_LEN - cnt,
+			FRMT_STR, "Rx DH Processed:",
+			ep->wstats.rx_hd_processed);
+		cnt += nbytes;
+
+		nbytes = scnprintf(dbg_buff + cnt, IPA_MAX_MSG_LEN - cnt,
+			FRMT_STR, "Rx DH Sent Back:", ep->wstats.rx_hd_reply);
+		cnt += nbytes;
+
+		nbytes = scnprintf(dbg_buff + cnt, IPA_MAX_MSG_LEN - cnt,
+			FRMT_STR, "Rx Pkt Leak:", ep->wstats.rx_pkt_leak);
+		cnt += nbytes;
+
+		nbytes = scnprintf(dbg_buff + cnt, IPA_MAX_MSG_LEN - cnt,
+			FRMT_STR1, "Rx DP Fail:", ep->wstats.rx_dp_fail);
+		cnt += nbytes;
+
+	} while (0);
+
+	client = IPA_CLIENT_WLAN1_CONS;
+	nbytes = scnprintf(dbg_buff + cnt, IPA_MAX_MSG_LEN - cnt, HEAD_FRMT_STR,
+		"Client IPA_CLIENT_WLAN1_CONS Stats:");
+	cnt += nbytes;
+	while (1) {
+		ipa_ep_idx = ipa3_get_ep_mapping(client);
+		if (ipa_ep_idx == -1) {
+			nbytes = scnprintf(dbg_buff + cnt,
+				IPA_MAX_MSG_LEN - cnt, HEAD_FRMT_STR, "Not up");
+			cnt += nbytes;
+			goto nxt_clnt_cons;
+		}
+
+		ep = &ipa3_ctx->ep[ipa_ep_idx];
+		if (ep->valid != 1) {
+			nbytes = scnprintf(dbg_buff + cnt,
+				IPA_MAX_MSG_LEN - cnt, HEAD_FRMT_STR, "Not up");
+			cnt += nbytes;
+			goto nxt_clnt_cons;
+		}
+
+		nbytes = scnprintf(dbg_buff + cnt, IPA_MAX_MSG_LEN - cnt,
+			FRMT_STR, "Tx Pkts Received:", ep->wstats.tx_pkts_rcvd);
+		cnt += nbytes;
+
+		nbytes = scnprintf(dbg_buff + cnt, IPA_MAX_MSG_LEN - cnt,
+			FRMT_STR, "Tx Pkts Sent:", ep->wstats.tx_pkts_sent);
+		cnt += nbytes;
+
+		nbytes = scnprintf(dbg_buff + cnt, IPA_MAX_MSG_LEN - cnt,
+			FRMT_STR1, "Tx Pkts Dropped:",
+			ep->wstats.tx_pkts_dropped);
+		cnt += nbytes;
+
+nxt_clnt_cons:
+			switch (client) {
+			case IPA_CLIENT_WLAN1_CONS:
+				client = IPA_CLIENT_WLAN2_CONS;
+				nbytes = scnprintf(dbg_buff + cnt,
+					IPA_MAX_MSG_LEN - cnt, HEAD_FRMT_STR,
+					"Client IPA_CLIENT_WLAN2_CONS Stats:");
+				cnt += nbytes;
+				continue;
+			case IPA_CLIENT_WLAN2_CONS:
+				client = IPA_CLIENT_WLAN3_CONS;
+				nbytes = scnprintf(dbg_buff + cnt,
+					IPA_MAX_MSG_LEN - cnt, HEAD_FRMT_STR,
+					"Client IPA_CLIENT_WLAN3_CONS Stats:");
+				cnt += nbytes;
+				continue;
+			case IPA_CLIENT_WLAN3_CONS:
+				client = IPA_CLIENT_WLAN4_CONS;
+				nbytes = scnprintf(dbg_buff + cnt,
+					IPA_MAX_MSG_LEN - cnt, HEAD_FRMT_STR,
+					"Client IPA_CLIENT_WLAN4_CONS Stats:");
+				cnt += nbytes;
+				continue;
+			case IPA_CLIENT_WLAN4_CONS:
+			default:
+				break;
+			}
+		break;
+	}
+
+	nbytes = scnprintf(dbg_buff + cnt, IPA_MAX_MSG_LEN - cnt,
+		"\n"HEAD_FRMT_STR, "All Wlan Consumer pipes stats:");
+	cnt += nbytes;
+
+	nbytes = scnprintf(dbg_buff + cnt, IPA_MAX_MSG_LEN - cnt, FRMT_STR,
+		"Tx Comm Buff Allocated:",
+		ipa3_ctx->wc_memb.wlan_comm_total_cnt);
+	cnt += nbytes;
+
+	nbytes = scnprintf(dbg_buff + cnt, IPA_MAX_MSG_LEN - cnt, FRMT_STR,
+		"Tx Comm Buff Avail:", ipa3_ctx->wc_memb.wlan_comm_free_cnt);
+	cnt += nbytes;
+
+	nbytes = scnprintf(dbg_buff + cnt, IPA_MAX_MSG_LEN - cnt, FRMT_STR1,
+		"Total Tx Pkts Freed:", ipa3_ctx->wc_memb.total_tx_pkts_freed);
+	cnt += nbytes;
+
+	return simple_read_from_buffer(ubuf, count, ppos, dbg_buff, cnt);
+}
+
+static ssize_t ipa3_read_ntn(struct file *file, char __user *ubuf,
+		size_t count, loff_t *ppos)
+{
+#define TX_STATS(y) \
+	ipa3_ctx->uc_ntn_ctx.ntn_uc_stats_mmio->tx_ch_stats[0].y
+#define RX_STATS(y) \
+	ipa3_ctx->uc_ntn_ctx.ntn_uc_stats_mmio->rx_ch_stats[0].y
+
+	struct Ipa3HwStatsNTNInfoData_t stats;
+	int nbytes;
+	int cnt = 0;
+
+	if (!ipa3_get_ntn_stats(&stats)) {
+		nbytes = scnprintf(dbg_buff, IPA_MAX_MSG_LEN,
+			"TX num_pkts_processed=%u\n"
+			"TX ringFull=%u\n"
+			"TX ringEmpty=%u\n"
+			"TX ringUsageHigh=%u\n"
+			"TX ringUsageLow=%u\n"
+			"TX RingUtilCount=%u\n"
+			"TX bamFifoFull=%u\n"
+			"TX bamFifoEmpty=%u\n"
+			"TX bamFifoUsageHigh=%u\n"
+			"TX bamFifoUsageLow=%u\n"
+			"TX bamUtilCount=%u\n"
+			"TX num_db=%u\n"
+			"TX num_qmb_int_handled=%u\n"
+			"TX ipa_pipe_number=%u\n",
+			TX_STATS(num_pkts_processed),
+			TX_STATS(ring_stats.ringFull),
+			TX_STATS(ring_stats.ringEmpty),
+			TX_STATS(ring_stats.ringUsageHigh),
+			TX_STATS(ring_stats.ringUsageLow),
+			TX_STATS(ring_stats.RingUtilCount),
+			TX_STATS(gsi_stats.bamFifoFull),
+			TX_STATS(gsi_stats.bamFifoEmpty),
+			TX_STATS(gsi_stats.bamFifoUsageHigh),
+			TX_STATS(gsi_stats.bamFifoUsageLow),
+			TX_STATS(gsi_stats.bamUtilCount),
+			TX_STATS(num_db),
+			TX_STATS(num_qmb_int_handled),
+			TX_STATS(ipa_pipe_number));
+		cnt += nbytes;
+		nbytes = scnprintf(dbg_buff + cnt, IPA_MAX_MSG_LEN - cnt,
+			"RX num_pkts_processed=%u\n"
+			"RX ringFull=%u\n"
+			"RX ringEmpty=%u\n"
+			"RX ringUsageHigh=%u\n"
+			"RX ringUsageLow=%u\n"
+			"RX RingUtilCount=%u\n"
+			"RX bamFifoFull=%u\n"
+			"RX bamFifoEmpty=%u\n"
+			"RX bamFifoUsageHigh=%u\n"
+			"RX bamFifoUsageLow=%u\n"
+			"RX bamUtilCount=%u\n"
+			"RX num_db=%u\n"
+			"RX num_qmb_int_handled=%u\n"
+			"RX ipa_pipe_number=%u\n",
+			RX_STATS(num_pkts_processed),
+			RX_STATS(ring_stats.ringFull),
+			RX_STATS(ring_stats.ringEmpty),
+			RX_STATS(ring_stats.ringUsageHigh),
+			RX_STATS(ring_stats.ringUsageLow),
+			RX_STATS(ring_stats.RingUtilCount),
+			RX_STATS(gsi_stats.bamFifoFull),
+			RX_STATS(gsi_stats.bamFifoEmpty),
+			RX_STATS(gsi_stats.bamFifoUsageHigh),
+			RX_STATS(gsi_stats.bamFifoUsageLow),
+			RX_STATS(gsi_stats.bamUtilCount),
+			RX_STATS(num_db),
+			RX_STATS(num_qmb_int_handled),
+			RX_STATS(ipa_pipe_number));
+		cnt += nbytes;
+	} else {
+		nbytes = scnprintf(dbg_buff, IPA_MAX_MSG_LEN,
+				"Fail to read NTN stats\n");
+		cnt += nbytes;
+	}
+
+	return simple_read_from_buffer(ubuf, count, ppos, dbg_buff, cnt);
+}
+
+static ssize_t ipa3_read_wdi(struct file *file, char __user *ubuf,
+		size_t count, loff_t *ppos)
+{
+	struct IpaHwStatsWDIInfoData_t stats;
+	int nbytes;
+	int cnt = 0;
+	struct IpaHwStatsWDITxInfoData_t *tx_ch_ptr;
+
+	if (!ipa3_get_wdi_stats(&stats)) {
+		tx_ch_ptr = &stats.tx_ch_stats;
+		nbytes = scnprintf(dbg_buff, IPA_MAX_MSG_LEN,
+			"TX num_pkts_processed=%u\n"
+			"TX copy_engine_doorbell_value=%u\n"
+			"TX num_db_fired=%u\n"
+			"TX ringFull=%u\n"
+			"TX ringEmpty=%u\n"
+			"TX ringUsageHigh=%u\n"
+			"TX ringUsageLow=%u\n"
+			"TX RingUtilCount=%u\n"
+			"TX bamFifoFull=%u\n"
+			"TX bamFifoEmpty=%u\n"
+			"TX bamFifoUsageHigh=%u\n"
+			"TX bamFifoUsageLow=%u\n"
+			"TX bamUtilCount=%u\n"
+			"TX num_db=%u\n"
+			"TX num_unexpected_db=%u\n"
+			"TX num_bam_int_handled=%u\n"
+			"TX num_bam_int_in_non_running_state=%u\n"
+			"TX num_qmb_int_handled=%u\n"
+			"TX num_bam_int_handled_while_wait_for_bam=%u\n",
+			tx_ch_ptr->num_pkts_processed,
+			tx_ch_ptr->copy_engine_doorbell_value,
+			tx_ch_ptr->num_db_fired,
+			tx_ch_ptr->tx_comp_ring_stats.ringFull,
+			tx_ch_ptr->tx_comp_ring_stats.ringEmpty,
+			tx_ch_ptr->tx_comp_ring_stats.ringUsageHigh,
+			tx_ch_ptr->tx_comp_ring_stats.ringUsageLow,
+			tx_ch_ptr->tx_comp_ring_stats.RingUtilCount,
+			tx_ch_ptr->bam_stats.bamFifoFull,
+			tx_ch_ptr->bam_stats.bamFifoEmpty,
+			tx_ch_ptr->bam_stats.bamFifoUsageHigh,
+			tx_ch_ptr->bam_stats.bamFifoUsageLow,
+			tx_ch_ptr->bam_stats.bamUtilCount,
+			tx_ch_ptr->num_db,
+			tx_ch_ptr->num_unexpected_db,
+			tx_ch_ptr->num_bam_int_handled,
+			tx_ch_ptr->num_bam_int_in_non_running_state,
+			tx_ch_ptr->num_qmb_int_handled,
+			tx_ch_ptr->num_bam_int_handled_while_wait_for_bam);
+		cnt += nbytes;
+		nbytes = scnprintf(dbg_buff + cnt, IPA_MAX_MSG_LEN - cnt,
+			"RX max_outstanding_pkts=%u\n"
+			"RX num_pkts_processed=%u\n"
+			"RX rx_ring_rp_value=%u\n"
+			"RX ringFull=%u\n"
+			"RX ringEmpty=%u\n"
+			"RX ringUsageHigh=%u\n"
+			"RX ringUsageLow=%u\n"
+			"RX RingUtilCount=%u\n"
+			"RX bamFifoFull=%u\n"
+			"RX bamFifoEmpty=%u\n"
+			"RX bamFifoUsageHigh=%u\n"
+			"RX bamFifoUsageLow=%u\n"
+			"RX bamUtilCount=%u\n"
+			"RX num_bam_int_handled=%u\n"
+			"RX num_db=%u\n"
+			"RX num_unexpected_db=%u\n"
+			"RX num_pkts_in_dis_uninit_state=%u\n"
+			"RX num_ic_inj_vdev_change=%u\n"
+			"RX num_ic_inj_fw_desc_change=%u\n"
+			"RX num_qmb_int_handled=%u\n"
+			"RX reserved1=%u\n"
+			"RX reserved2=%u\n",
+			stats.rx_ch_stats.max_outstanding_pkts,
+			stats.rx_ch_stats.num_pkts_processed,
+			stats.rx_ch_stats.rx_ring_rp_value,
+			stats.rx_ch_stats.rx_ind_ring_stats.ringFull,
+			stats.rx_ch_stats.rx_ind_ring_stats.ringEmpty,
+			stats.rx_ch_stats.rx_ind_ring_stats.ringUsageHigh,
+			stats.rx_ch_stats.rx_ind_ring_stats.ringUsageLow,
+			stats.rx_ch_stats.rx_ind_ring_stats.RingUtilCount,
+			stats.rx_ch_stats.bam_stats.bamFifoFull,
+			stats.rx_ch_stats.bam_stats.bamFifoEmpty,
+			stats.rx_ch_stats.bam_stats.bamFifoUsageHigh,
+			stats.rx_ch_stats.bam_stats.bamFifoUsageLow,
+			stats.rx_ch_stats.bam_stats.bamUtilCount,
+			stats.rx_ch_stats.num_bam_int_handled,
+			stats.rx_ch_stats.num_db,
+			stats.rx_ch_stats.num_unexpected_db,
+			stats.rx_ch_stats.num_pkts_in_dis_uninit_state,
+			stats.rx_ch_stats.num_ic_inj_vdev_change,
+			stats.rx_ch_stats.num_ic_inj_fw_desc_change,
+			stats.rx_ch_stats.num_qmb_int_handled,
+			stats.rx_ch_stats.reserved1,
+			stats.rx_ch_stats.reserved2);
+		cnt += nbytes;
+	} else {
+		nbytes = scnprintf(dbg_buff, IPA_MAX_MSG_LEN,
+				"Fail to read WDI stats\n");
+		cnt += nbytes;
+	}
+
+	return simple_read_from_buffer(ubuf, count, ppos, dbg_buff, cnt);
+}
+
+static ssize_t ipa3_write_dbg_cnt(struct file *file, const char __user *buf,
+		size_t count, loff_t *ppos)
+{
+	u32 option = 0;
+	struct ipahal_reg_debug_cnt_ctrl dbg_cnt_ctrl;
+	int ret;
+
+	if (ipa3_ctx->ipa_hw_type >= IPA_HW_v4_0) {
+		IPAERR("IPA_DEBUG_CNT_CTRL is not supported in IPA 4.0\n");
+		return -EPERM;
+	}
+
+	ret = kstrtou32_from_user(buf, count, 0, &option);
+	if (ret)
+		return ret;
+
+	memset(&dbg_cnt_ctrl, 0, sizeof(dbg_cnt_ctrl));
+	dbg_cnt_ctrl.type = DBG_CNT_TYPE_GENERAL;
+	dbg_cnt_ctrl.product = true;
+	dbg_cnt_ctrl.src_pipe = 0xff;
+	dbg_cnt_ctrl.rule_idx_pipe_rule = false;
+	dbg_cnt_ctrl.rule_idx = 0;
+	if (option == 1)
+		dbg_cnt_ctrl.en = true;
+	else
+		dbg_cnt_ctrl.en = false;
+
+	IPA_ACTIVE_CLIENTS_INC_SIMPLE();
+	ipahal_write_reg_n_fields(IPA_DEBUG_CNT_CTRL_n, 0, &dbg_cnt_ctrl);
+	IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
+
+	return count;
+}
+
+static ssize_t ipa3_read_dbg_cnt(struct file *file, char __user *ubuf,
+		size_t count, loff_t *ppos)
+{
+	int nbytes;
+	u32 regval;
+
+	if (ipa3_ctx->ipa_hw_type >= IPA_HW_v4_0) {
+		IPAERR("IPA_DEBUG_CNT_REG is not supported in IPA 4.0\n");
+		return -EPERM;
+	}
+
+	IPA_ACTIVE_CLIENTS_INC_SIMPLE();
+	regval =
+		ipahal_read_reg_n(IPA_DEBUG_CNT_REG_n, 0);
+	nbytes = scnprintf(dbg_buff, IPA_MAX_MSG_LEN,
+			"IPA_DEBUG_CNT_REG_0=0x%x\n", regval);
+	IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
+
+	return simple_read_from_buffer(ubuf, count, ppos, dbg_buff, nbytes);
+}
+
+static ssize_t ipa3_read_msg(struct file *file, char __user *ubuf,
+		size_t count, loff_t *ppos)
+{
+	int nbytes;
+	int cnt = 0;
+	int i;
+
+	for (i = 0; i < ARRAY_SIZE(ipa3_event_name); i++) {
+		nbytes = scnprintf(dbg_buff + cnt, IPA_MAX_MSG_LEN - cnt,
+				"msg[%u:%27s] W:%u R:%u\n", i,
+				ipa3_event_name[i],
+				ipa3_ctx->stats.msg_w[i],
+				ipa3_ctx->stats.msg_r[i]);
+		cnt += nbytes;
+	}
+
+	return simple_read_from_buffer(ubuf, count, ppos, dbg_buff, cnt);
+}
+
+static void ipa3_read_table(
+	char *table_addr,
+	u32 table_size,
+	u32 *total_num_entries,
+	u32 *rule_id,
+	enum ipahal_nat_type nat_type)
+{
+	int result;
+	char *entry;
+	size_t entry_size;
+	bool entry_zeroed;
+	bool entry_valid;
+	u32 i, num_entries = 0, id = *rule_id;
+	char *buff;
+	size_t buff_size = 2 * IPA_MAX_ENTRY_STRING_LEN;
+
+	IPADBG("In\n");
+
+	if (table_addr == NULL) {
+		pr_err("NULL NAT table\n");
+		goto bail;
+	}
+
+	result = ipahal_nat_entry_size(nat_type, &entry_size);
+
+	if (result) {
+		IPAERR("Failed to retrieve size of %s entry\n",
+			ipahal_nat_type_str(nat_type));
+		goto bail;
+	}
+
+	buff = kzalloc(buff_size, GFP_KERNEL);
+
+	if (!buff) {
+		IPAERR("Out of memory\n");
+		goto bail;
+	}
+
+	for (i = 0, entry = table_addr;
+		i < table_size;
+		++i, ++id, entry += entry_size) {
+
+		result = ipahal_nat_is_entry_zeroed(nat_type, entry,
+			&entry_zeroed);
+
+		if (result) {
+			IPAERR("Undefined if %s entry is zero\n",
+				   ipahal_nat_type_str(nat_type));
+			goto free_buf;
+		}
+
+		if (entry_zeroed)
+			continue;
+
+		result = ipahal_nat_is_entry_valid(nat_type, entry,
+			&entry_valid);
+
+		if (result) {
+			IPAERR("Undefined if %s entry is valid\n",
+				   ipahal_nat_type_str(nat_type));
+			goto free_buf;
+		}
+
+		if (entry_valid) {
+			++num_entries;
+			pr_err("\tEntry_Index=%d\n", id);
+		} else
+			pr_err("\tEntry_Index=%d - Invalid Entry\n", id);
+
+		ipahal_nat_stringify_entry(nat_type, entry,
+			buff, buff_size);
+
+		pr_err("%s\n", buff);
+
+		memset(buff, 0, buff_size);
+	}
+
+	if (num_entries)
+		pr_err("\n");
+	else
+		pr_err("\tEmpty\n\n");
+
+free_buf:
+	kfree(buff);
+	*rule_id = id;
+	*total_num_entries += num_entries;
+
+bail:
+	IPADBG("Out\n");
+}
+
+static void ipa3_start_read_memory_device(
+	struct ipa3_nat_ipv6ct_common_mem *dev,
+	enum ipahal_nat_type nat_type,
+	u32 *num_ddr_ent_ptr,
+	u32 *num_sram_ent_ptr)
+{
+	u32 rule_id = 0;
+
+	if (dev->is_ipv6ct_mem) {
+
+		IPADBG("In: v6\n");
+
+		pr_err("%s_Table_Size=%d\n",
+			   dev->name, dev->table_entries + 1);
+
+		pr_err("%s_Expansion_Table_Size=%d\n",
+			   dev->name, dev->expn_table_entries);
+
+		pr_err("\n%s Base Table:\n", dev->name);
+
+		if (dev->base_table_addr)
+			ipa3_read_table(
+				dev->base_table_addr,
+				dev->table_entries + 1,
+				num_ddr_ent_ptr,
+				&rule_id,
+				nat_type);
+
+		pr_err("%s Expansion Table:\n", dev->name);
+
+		if (dev->expansion_table_addr)
+			ipa3_read_table(
+				dev->expansion_table_addr,
+				dev->expn_table_entries,
+				num_ddr_ent_ptr,
+				&rule_id,
+				nat_type);
+	}
+
+	if (dev->is_nat_mem) {
+		struct ipa3_nat_mem *nm_ptr = (struct ipa3_nat_mem *) dev;
+		struct ipa3_nat_mem_loc_data *mld_ptr = NULL;
+		u32 *num_ent_ptr;
+		const char *type_ptr;
+
+		IPADBG("In: v4\n");
+
+		if (nm_ptr->active_table == IPA_NAT_MEM_IN_DDR &&
+			nm_ptr->ddr_in_use) {
+
+			mld_ptr     = &nm_ptr->mem_loc[IPA_NAT_MEM_IN_DDR];
+			num_ent_ptr = num_ddr_ent_ptr;
+			type_ptr    = "DDR based table";
+		}
+
+		if (nm_ptr->active_table == IPA_NAT_MEM_IN_SRAM &&
+			nm_ptr->sram_in_use) {
+
+			mld_ptr     = &nm_ptr->mem_loc[IPA_NAT_MEM_IN_SRAM];
+			num_ent_ptr = num_sram_ent_ptr;
+			type_ptr    = "SRAM based table";
+		}
+
+		if (mld_ptr) {
+			pr_err("(%s) %s_Table_Size=%d\n",
+				   type_ptr,
+				   dev->name,
+				   mld_ptr->table_entries + 1);
+
+			pr_err("(%s) %s_Expansion_Table_Size=%d\n",
+				   type_ptr,
+				   dev->name,
+				   mld_ptr->expn_table_entries);
+
+			pr_err("\n(%s) %s_Base Table:\n",
+				   type_ptr,
+				   dev->name);
+
+			if (mld_ptr->base_table_addr)
+				ipa3_read_table(
+					mld_ptr->base_table_addr,
+					mld_ptr->table_entries + 1,
+					num_ent_ptr,
+					&rule_id,
+					nat_type);
+
+			pr_err("(%s) %s_Expansion Table:\n",
+				   type_ptr,
+				   dev->name);
+
+			if (mld_ptr->expansion_table_addr)
+				ipa3_read_table(
+					mld_ptr->expansion_table_addr,
+					mld_ptr->expn_table_entries,
+					num_ent_ptr,
+					&rule_id,
+					nat_type);
+		}
+	}
+
+	IPADBG("Out\n");
+}
+
+static void ipa3_finish_read_memory_device(
+	struct ipa3_nat_ipv6ct_common_mem *dev,
+	u32 num_ddr_entries,
+	u32 num_sram_entries)
+{
+	IPADBG("In\n");
+
+	if (dev->is_ipv6ct_mem) {
+		pr_err("Overall number %s entries: %u\n\n",
+			   dev->name,
+			   num_ddr_entries);
+	} else {
+		struct ipa3_nat_mem *nm_ptr = (struct ipa3_nat_mem *) dev;
+
+		if (num_ddr_entries)
+			pr_err("%s: Overall number of DDR entries: %u\n\n",
+				   dev->name,
+				   num_ddr_entries);
+
+		if (num_sram_entries)
+			pr_err("%s: Overall number of SRAM entries: %u\n\n",
+				   dev->name,
+				   num_sram_entries);
+
+		pr_err("%s: Driver focus changes to DDR(%u) to SRAM(%u)\n",
+			   dev->name,
+			   nm_ptr->switch2ddr_cnt,
+			   nm_ptr->switch2sram_cnt);
+	}
+
+	IPADBG("Out\n");
+}
+
+static void ipa3_read_pdn_table(void)
+{
+	int i, result;
+	char *pdn_entry;
+	size_t pdn_entry_size;
+	bool entry_zeroed;
+	bool entry_valid;
+	char *buff;
+	size_t buff_size = 128;
+
+	IPADBG("In\n");
+
+	if (ipa3_ctx->nat_mem.pdn_mem.base) {
+
+		result = ipahal_nat_entry_size(
+			IPAHAL_NAT_IPV4_PDN, &pdn_entry_size);
+
+		if (result) {
+			IPAERR("Failed to retrieve size of PDN entry");
+			goto bail;
+		}
+
+		buff = kzalloc(buff_size, GFP_KERNEL);
+		if (!buff) {
+			IPAERR("Out of memory\n");
+			goto bail;
+		}
+
+		for (i = 0, pdn_entry = ipa3_ctx->nat_mem.pdn_mem.base;
+			 i < IPA_MAX_PDN_NUM;
+			 ++i, pdn_entry += pdn_entry_size) {
+
+			result = ipahal_nat_is_entry_zeroed(
+				IPAHAL_NAT_IPV4_PDN,
+				pdn_entry, &entry_zeroed);
+
+			if (result) {
+				IPAERR("ipahal_nat_is_entry_zeroed() fail\n");
+				goto free;
+			}
+
+			if (entry_zeroed)
+				continue;
+
+			result = ipahal_nat_is_entry_valid(
+				IPAHAL_NAT_IPV4_PDN,
+				pdn_entry, &entry_valid);
+
+			if (result) {
+				IPAERR(
+					"Failed to determine whether the PDN entry is valid\n");
+				goto free;
+			}
+
+			ipahal_nat_stringify_entry(
+				IPAHAL_NAT_IPV4_PDN,
+				pdn_entry, buff, buff_size);
+
+			if (entry_valid)
+				pr_err("PDN %d: %s\n", i, buff);
+			else
+				pr_err("PDN %d - Invalid: %s\n", i, buff);
+
+			memset(buff, 0, buff_size);
+		}
+		pr_err("\n");
+free:
+		kfree(buff);
+	}
+bail:
+	IPADBG("Out\n");
+}
+
+static ssize_t ipa3_read_nat4(
+	struct file *file,
+	char __user *ubuf,
+	size_t count,
+	loff_t *ppos)
+{
+	struct ipa3_nat_ipv6ct_common_mem *dev = &ipa3_ctx->nat_mem.dev;
+	struct ipa3_nat_mem *nm_ptr = (struct ipa3_nat_mem *) dev;
+	struct ipa3_nat_mem_loc_data *mld_ptr = NULL;
+
+	u32  rule_id = 0;
+
+	u32 *num_ents_ptr;
+	u32  num_ddr_ents = 0;
+	u32  num_sram_ents = 0;
+
+	u32 *num_index_ents_ptr;
+	u32  num_ddr_index_ents = 0;
+	u32  num_sram_index_ents = 0;
+
+	const char *type_ptr;
+
+	bool any_table_active = (nm_ptr->ddr_in_use || nm_ptr->sram_in_use);
+
+	pr_err("IPA3 NAT stats\n");
+
+	if (!dev->is_dev_init) {
+		pr_err("NAT hasn't been initialized or not supported\n");
+		goto ret;
+	}
+
+	mutex_lock(&dev->lock);
+
+	if (!dev->is_hw_init || !any_table_active) {
+		pr_err("NAT H/W and/or S/W not initialized\n");
+		goto bail;
+	}
+
+	if (ipa3_ctx->ipa_hw_type >= IPA_HW_v4_0) {
+		ipa3_read_pdn_table();
+	} else {
+		pr_err("NAT Table IP Address=%pI4h\n\n",
+			   &ipa3_ctx->nat_mem.public_ip_addr);
+	}
+
+	ipa3_start_read_memory_device(
+		dev,
+		IPAHAL_NAT_IPV4,
+		&num_ddr_ents,
+		&num_sram_ents);
+
+	if (nm_ptr->active_table == IPA_NAT_MEM_IN_DDR &&
+		nm_ptr->ddr_in_use) {
+
+		mld_ptr            = &nm_ptr->mem_loc[IPA_NAT_MEM_IN_DDR];
+		num_ents_ptr       = &num_ddr_ents;
+		num_index_ents_ptr = &num_ddr_index_ents;
+		type_ptr           = "DDR based table";
+	}
+
+	if (nm_ptr->active_table == IPA_NAT_MEM_IN_SRAM &&
+		nm_ptr->sram_in_use) {
+
+		mld_ptr            = &nm_ptr->mem_loc[IPA_NAT_MEM_IN_SRAM];
+		num_ents_ptr       = &num_sram_ents;
+		num_index_ents_ptr = &num_sram_index_ents;
+		type_ptr           = "SRAM based table";
+	}
+
+	if (mld_ptr) {
+		/* Print Index tables */
+		pr_err("(%s) ipaNatTable Index Table:\n", type_ptr);
+
+		ipa3_read_table(
+			mld_ptr->index_table_addr,
+			mld_ptr->table_entries + 1,
+			num_index_ents_ptr,
+			&rule_id,
+			IPAHAL_NAT_IPV4_INDEX);
+
+		pr_err("(%s) ipaNatTable Expansion Index Table:\n", type_ptr);
+
+		ipa3_read_table(
+			mld_ptr->index_table_expansion_addr,
+			mld_ptr->expn_table_entries,
+			num_index_ents_ptr,
+			&rule_id,
+			IPAHAL_NAT_IPV4_INDEX);
+
+		if (*num_ents_ptr != *num_index_ents_ptr)
+			IPAERR(
+				"(%s) Base Table vs Index Table entry count differs (%u vs %u)\n",
+				type_ptr, *num_ents_ptr, *num_index_ents_ptr);
+	}
+
+	ipa3_finish_read_memory_device(
+		dev,
+		num_ddr_ents,
+		num_sram_ents);
+
+bail:
+	mutex_unlock(&dev->lock);
+
+ret:
+	IPADBG("Out\n");
+
+	return 0;
+}
+
+static ssize_t ipa3_read_ipv6ct(
+	struct file *file,
+	char __user *ubuf,
+	size_t count,
+	loff_t *ppos)
+{
+	struct ipa3_nat_ipv6ct_common_mem *dev = &ipa3_ctx->ipv6ct_mem.dev;
+
+	u32 num_ddr_ents, num_sram_ents;
+
+	num_ddr_ents = num_sram_ents = 0;
+
+	IPADBG("In\n");
+
+	pr_err("\n");
+
+	if (!dev->is_dev_init) {
+		pr_err("IPv6 Conntrack not initialized or not supported\n");
+		goto bail;
+	}
+
+	if (!dev->is_hw_init) {
+		pr_err("IPv6 connection tracking H/W hasn't been initialized\n");
+		goto bail;
+	}
+
+	mutex_lock(&dev->lock);
+
+	ipa3_start_read_memory_device(
+		dev,
+		IPAHAL_NAT_IPV6CT,
+		&num_ddr_ents,
+		&num_sram_ents);
+
+	ipa3_finish_read_memory_device(
+		dev,
+		num_ddr_ents,
+		num_sram_ents);
+
+	mutex_unlock(&dev->lock);
+
+bail:
+	IPADBG("Out\n");
+
+	return 0;
+}
+
+static ssize_t ipa3_pm_read_stats(struct file *file, char __user *ubuf,
+		size_t count, loff_t *ppos)
+{
+	int result, cnt = 0;
+
+	result = ipa_pm_stat(dbg_buff, IPA_MAX_MSG_LEN);
+	if (result < 0) {
+		cnt += scnprintf(dbg_buff + cnt, IPA_MAX_MSG_LEN - cnt,
+				"Error in printing PM stat %d\n", result);
+		goto ret;
+	}
+	cnt += result;
+ret:
+	return simple_read_from_buffer(ubuf, count, ppos, dbg_buff, cnt);
+}
+
+static ssize_t ipa3_pm_ex_read_stats(struct file *file, char __user *ubuf,
+		size_t count, loff_t *ppos)
+{
+	int result, cnt = 0;
+
+	result = ipa_pm_exceptions_stat(dbg_buff, IPA_MAX_MSG_LEN);
+	if (result < 0) {
+		cnt += scnprintf(dbg_buff + cnt, IPA_MAX_MSG_LEN - cnt,
+				"Error in printing PM stat %d\n", result);
+		goto ret;
+	}
+	cnt += result;
+ret:
+	return simple_read_from_buffer(ubuf, count, ppos, dbg_buff, cnt);
+}
+
+static ssize_t ipa3_read_ipahal_regs(struct file *file, char __user *ubuf,
+		size_t count, loff_t *ppos)
+{
+	IPA_ACTIVE_CLIENTS_INC_SIMPLE();
+	ipahal_print_all_regs(true);
+	IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
+
+	return 0;
+}
+
+static ssize_t ipa3_read_wdi_gsi_stats(struct file *file,
+		char __user *ubuf, size_t count, loff_t *ppos)
+{
+	struct ipa_uc_dbg_ring_stats stats;
+	int nbytes;
+	int cnt = 0;
+
+	if (ipa3_ctx->ipa_hw_type < IPA_HW_v4_5) {
+		nbytes = scnprintf(dbg_buff, IPA_MAX_MSG_LEN,
+				"This feature only support on IPA4.5+\n");
+		cnt += nbytes;
+		goto done;
+	}
+
+	if (!ipa3_get_wdi_gsi_stats(&stats)) {
+		nbytes = scnprintf(dbg_buff, IPA_MAX_MSG_LEN,
+			"TX ringFull=%u\n"
+			"TX ringEmpty=%u\n"
+			"TX ringUsageHigh=%u\n"
+			"TX ringUsageLow=%u\n"
+			"TX RingUtilCount=%u\n",
+			stats.ring[1].ringFull,
+			stats.ring[1].ringEmpty,
+			stats.ring[1].ringUsageHigh,
+			stats.ring[1].ringUsageLow,
+			stats.ring[1].RingUtilCount);
+		cnt += nbytes;
+		nbytes = scnprintf(dbg_buff + cnt, IPA_MAX_MSG_LEN - cnt,
+			"RX ringFull=%u\n"
+			"RX ringEmpty=%u\n"
+			"RX ringUsageHigh=%u\n"
+			"RX ringUsageLow=%u\n"
+			"RX RingUtilCount=%u\n",
+			stats.ring[0].ringFull,
+			stats.ring[0].ringEmpty,
+			stats.ring[0].ringUsageHigh,
+			stats.ring[0].ringUsageLow,
+			stats.ring[0].RingUtilCount);
+		cnt += nbytes;
+	} else {
+		nbytes = scnprintf(dbg_buff, IPA_MAX_MSG_LEN,
+				"Fail to read WDI GSI stats\n");
+		cnt += nbytes;
+	}
+done:
+	return simple_read_from_buffer(ubuf, count, ppos, dbg_buff, cnt);
+}
+
+static ssize_t ipa3_read_wdi3_gsi_stats(struct file *file,
+		char __user *ubuf, size_t count, loff_t *ppos)
+{
+	struct ipa_uc_dbg_ring_stats stats;
+	int nbytes;
+	int cnt = 0;
+
+	if (ipa3_ctx->ipa_hw_type < IPA_HW_v4_5) {
+		nbytes = scnprintf(dbg_buff, IPA_MAX_MSG_LEN,
+				"This feature only support on IPA4.5+\n");
+		cnt += nbytes;
+		goto done;
+	}
+	if (!ipa3_get_wdi3_gsi_stats(&stats)) {
+		nbytes = scnprintf(dbg_buff, IPA_MAX_MSG_LEN,
+			"TX ringFull=%u\n"
+			"TX ringEmpty=%u\n"
+			"TX ringUsageHigh=%u\n"
+			"TX ringUsageLow=%u\n"
+			"TX RingUtilCount=%u\n",
+			stats.ring[1].ringFull,
+			stats.ring[1].ringEmpty,
+			stats.ring[1].ringUsageHigh,
+			stats.ring[1].ringUsageLow,
+			stats.ring[1].RingUtilCount);
+		cnt += nbytes;
+		nbytes = scnprintf(dbg_buff + cnt, IPA_MAX_MSG_LEN - cnt,
+			"RX ringFull=%u\n"
+			"RX ringEmpty=%u\n"
+			"RX ringUsageHigh=%u\n"
+			"RX ringUsageLow=%u\n"
+			"RX RingUtilCount=%u\n",
+			stats.ring[0].ringFull,
+			stats.ring[0].ringEmpty,
+			stats.ring[0].ringUsageHigh,
+			stats.ring[0].ringUsageLow,
+			stats.ring[0].RingUtilCount);
+		cnt += nbytes;
+	} else {
+		nbytes = scnprintf(dbg_buff, IPA_MAX_MSG_LEN,
+				"Fail to read WDI GSI stats\n");
+		cnt += nbytes;
+	}
+
+done:
+	return simple_read_from_buffer(ubuf, count, ppos, dbg_buff, cnt);
+}
+
+static ssize_t ipa3_read_11ad_gsi_stats(struct file *file,
+		char __user *ubuf, size_t count, loff_t *ppos)
+{
+	int nbytes;
+	int cnt = 0;
+
+	if (ipa3_ctx->ipa_hw_type < IPA_HW_v4_5) {
+		nbytes = scnprintf(dbg_buff, IPA_MAX_MSG_LEN,
+				"This feature only support on IPA4.5+\n");
+		cnt += nbytes;
+		goto done;
+	}
+	return 0;
+done:
+	return simple_read_from_buffer(ubuf, count, ppos, dbg_buff, cnt);
+}
+
+static ssize_t ipa3_read_aqc_gsi_stats(struct file *file,
+		char __user *ubuf, size_t count, loff_t *ppos)
+{
+	struct ipa_uc_dbg_ring_stats stats;
+	int nbytes;
+	int cnt = 0;
+
+	if (ipa3_ctx->ipa_hw_type < IPA_HW_v4_5) {
+		nbytes = scnprintf(dbg_buff, IPA_MAX_MSG_LEN,
+				"This feature only support on IPA4.5+\n");
+		cnt += nbytes;
+		goto done;
+	}
+	if (!ipa3_get_aqc_gsi_stats(&stats)) {
+		nbytes = scnprintf(dbg_buff, IPA_MAX_MSG_LEN,
+			"TX ringFull=%u\n"
+			"TX ringEmpty=%u\n"
+			"TX ringUsageHigh=%u\n"
+			"TX ringUsageLow=%u\n"
+			"TX RingUtilCount=%u\n",
+			stats.ring[1].ringFull,
+			stats.ring[1].ringEmpty,
+			stats.ring[1].ringUsageHigh,
+			stats.ring[1].ringUsageLow,
+			stats.ring[1].RingUtilCount);
+		cnt += nbytes;
+		nbytes = scnprintf(dbg_buff + cnt, IPA_MAX_MSG_LEN - cnt,
+			"RX ringFull=%u\n"
+			"RX ringEmpty=%u\n"
+			"RX ringUsageHigh=%u\n"
+			"RX ringUsageLow=%u\n"
+			"RX RingUtilCount=%u\n",
+			stats.ring[0].ringFull,
+			stats.ring[0].ringEmpty,
+			stats.ring[0].ringUsageHigh,
+			stats.ring[0].ringUsageLow,
+			stats.ring[0].RingUtilCount);
+		cnt += nbytes;
+	} else {
+		nbytes = scnprintf(dbg_buff, IPA_MAX_MSG_LEN,
+			"Fail to read AQC GSI stats\n");
+		cnt += nbytes;
+	}
+done:
+	return simple_read_from_buffer(ubuf, count, ppos, dbg_buff, cnt);
+}
+
+static ssize_t ipa3_read_mhip_gsi_stats(struct file *file,
+	char __user *ubuf, size_t count, loff_t *ppos)
+{
+	struct ipa_uc_dbg_ring_stats stats;
+	int nbytes;
+	int cnt = 0;
+
+	if (ipa3_ctx->ipa_hw_type < IPA_HW_v4_5) {
+		nbytes = scnprintf(dbg_buff, IPA_MAX_MSG_LEN,
+			"This feature only support on IPA4.5+\n");
+		cnt += nbytes;
+		goto done;
+	}
+	if (!ipa3_get_mhip_gsi_stats(&stats)) {
+		nbytes = scnprintf(dbg_buff, IPA_MAX_MSG_LEN,
+			"IPA_CLIENT_MHI_PRIME_TETH_CONS ringFull=%u\n"
+			"IPA_CLIENT_MHI_PRIME_TETH_CONS ringEmpty=%u\n"
+			"IPA_CLIENT_MHI_PRIME_TETH_CONS ringUsageHigh=%u\n"
+			"IPA_CLIENT_MHI_PRIME_TETH_CONS ringUsageLow=%u\n"
+			"IPA_CLIENT_MHI_PRIME_TETH_CONS RingUtilCount=%u\n",
+			stats.ring[1].ringFull,
+			stats.ring[1].ringEmpty,
+			stats.ring[1].ringUsageHigh,
+			stats.ring[1].ringUsageLow,
+			stats.ring[1].RingUtilCount);
+		cnt += nbytes;
+		nbytes = scnprintf(dbg_buff + cnt, IPA_MAX_MSG_LEN - cnt,
+			"IPA_CLIENT_MHI_PRIME_TETH_PROD ringFull=%u\n"
+			"IPA_CLIENT_MHI_PRIME_TETH_PROD ringEmpty=%u\n"
+			"IPA_CLIENT_MHI_PRIME_TETH_PROD ringUsageHigh=%u\n"
+			"IPA_CLIENT_MHI_PRIME_TETH_PROD ringUsageLow=%u\n"
+			"IPA_CLIENT_MHI_PRIME_TETH_PROD RingUtilCount=%u\n",
+			stats.ring[0].ringFull,
+			stats.ring[0].ringEmpty,
+			stats.ring[0].ringUsageHigh,
+			stats.ring[0].ringUsageLow,
+			stats.ring[0].RingUtilCount);
+		cnt += nbytes;
+		nbytes = scnprintf(dbg_buff + cnt, IPA_MAX_MSG_LEN - cnt,
+			"IPA_CLIENT_MHI_PRIME_RMNET_CONS ringFull=%u\n"
+			"IPA_CLIENT_MHI_PRIME_RMNET_CONS ringEmpty=%u\n"
+			"IPA_CLIENT_MHI_PRIME_RMNET_CONS ringUsageHigh=%u\n"
+			"IPA_CLIENT_MHI_PRIME_RMNET_CONS ringUsageLow=%u\n"
+			"IPA_CLIENT_MHI_PRIME_RMNET_CONS RingUtilCount=%u\n",
+			stats.ring[3].ringFull,
+			stats.ring[3].ringEmpty,
+			stats.ring[3].ringUsageHigh,
+			stats.ring[3].ringUsageLow,
+			stats.ring[3].RingUtilCount);
+		cnt += nbytes;
+		nbytes = scnprintf(dbg_buff + cnt, IPA_MAX_MSG_LEN - cnt,
+			"IPA_CLIENT_MHI_PRIME_RMNET_PROD ringFull=%u\n"
+			"IPA_CLIENT_MHI_PRIME_RMNET_PROD ringEmpty=%u\n"
+			"IPA_CLIENT_MHI_PRIME_RMNET_PROD ringUsageHigh=%u\n"
+			"IPA_CLIENT_MHI_PRIME_RMNET_PROD ringUsageLow=%u\n"
+			"IPA_CLIENT_MHI_PRIME_RMNET_PROD RingUtilCount=%u\n",
+			stats.ring[2].ringFull,
+			stats.ring[2].ringEmpty,
+			stats.ring[2].ringUsageHigh,
+			stats.ring[2].ringUsageLow,
+			stats.ring[2].RingUtilCount);
+		cnt += nbytes;
+	} else {
+		nbytes = scnprintf(dbg_buff, IPA_MAX_MSG_LEN,
+			"Fail to read WDI GSI stats\n");
+		cnt += nbytes;
+	}
+
+done:
+	return simple_read_from_buffer(ubuf, count, ppos, dbg_buff, cnt);
+}
+
+static ssize_t ipa3_read_usb_gsi_stats(struct file *file,
+	char __user *ubuf, size_t count, loff_t *ppos)
+{
+	struct ipa_uc_dbg_ring_stats stats;
+	int nbytes;
+	int cnt = 0;
+
+	if (ipa3_ctx->ipa_hw_type < IPA_HW_v4_5) {
+		nbytes = scnprintf(dbg_buff, IPA_MAX_MSG_LEN,
+			"This feature only support on IPA4.5+\n");
+		cnt += nbytes;
+		goto done;
+	}
+	if (!ipa3_get_usb_gsi_stats(&stats)) {
+		nbytes = scnprintf(dbg_buff, IPA_MAX_MSG_LEN,
+			"TX ringFull=%u\n"
+			"TX ringEmpty=%u\n"
+			"TX ringUsageHigh=%u\n"
+			"TX ringUsageLow=%u\n"
+			"TX RingUtilCount=%u\n",
+			stats.ring[1].ringFull,
+			stats.ring[1].ringEmpty,
+			stats.ring[1].ringUsageHigh,
+			stats.ring[1].ringUsageLow,
+			stats.ring[1].RingUtilCount);
+		cnt += nbytes;
+		nbytes = scnprintf(dbg_buff + cnt, IPA_MAX_MSG_LEN - cnt,
+			"RX ringFull=%u\n"
+			"RX ringEmpty=%u\n"
+			"RX ringUsageHigh=%u\n"
+			"RX ringUsageLow=%u\n"
+			"RX RingUtilCount=%u\n",
+			stats.ring[0].ringFull,
+			stats.ring[0].ringEmpty,
+			stats.ring[0].ringUsageHigh,
+			stats.ring[0].ringUsageLow,
+			stats.ring[0].RingUtilCount);
+		cnt += nbytes;
+	} else {
+		nbytes = scnprintf(dbg_buff, IPA_MAX_MSG_LEN,
+			"Fail to read WDI GSI stats\n");
+		cnt += nbytes;
+	}
+
+done:
+	return simple_read_from_buffer(ubuf, count, ppos, dbg_buff, cnt);
+}
+
+static ssize_t ipa3_read_app_clk_vote(
+	struct file *file,
+	char __user *ubuf,
+	size_t count,
+	loff_t *ppos)
+{
+	int cnt =
+		scnprintf(
+			dbg_buff,
+			IPA_MAX_MSG_LEN,
+			"%u\n",
+			ipa3_ctx->app_clock_vote.cnt);
+
+	return simple_read_from_buffer(ubuf, count, ppos, dbg_buff, cnt);
+}
+
+static void ipa_dump_status(struct ipahal_pkt_status *status)
+{
+	IPA_DUMP_STATUS_FIELD(status_opcode);
+	IPA_DUMP_STATUS_FIELD(exception);
+	IPA_DUMP_STATUS_FIELD(status_mask);
+	IPA_DUMP_STATUS_FIELD(pkt_len);
+	IPA_DUMP_STATUS_FIELD(endp_src_idx);
+	IPA_DUMP_STATUS_FIELD(endp_dest_idx);
+	IPA_DUMP_STATUS_FIELD(metadata);
+	IPA_DUMP_STATUS_FIELD(flt_local);
+	IPA_DUMP_STATUS_FIELD(flt_hash);
+	IPA_DUMP_STATUS_FIELD(flt_global);
+	IPA_DUMP_STATUS_FIELD(flt_ret_hdr);
+	IPA_DUMP_STATUS_FIELD(flt_miss);
+	IPA_DUMP_STATUS_FIELD(flt_rule_id);
+	IPA_DUMP_STATUS_FIELD(rt_local);
+	IPA_DUMP_STATUS_FIELD(rt_hash);
+	IPA_DUMP_STATUS_FIELD(ucp);
+	IPA_DUMP_STATUS_FIELD(rt_tbl_idx);
+	IPA_DUMP_STATUS_FIELD(rt_miss);
+	IPA_DUMP_STATUS_FIELD(rt_rule_id);
+	IPA_DUMP_STATUS_FIELD(nat_hit);
+	IPA_DUMP_STATUS_FIELD(nat_entry_idx);
+	IPA_DUMP_STATUS_FIELD(nat_type);
+	pr_err("tag = 0x%llx\n", (u64)status->tag_info & 0xFFFFFFFFFFFF);
+	IPA_DUMP_STATUS_FIELD(seq_num);
+	IPA_DUMP_STATUS_FIELD(time_of_day_ctr);
+	IPA_DUMP_STATUS_FIELD(hdr_local);
+	IPA_DUMP_STATUS_FIELD(hdr_offset);
+	IPA_DUMP_STATUS_FIELD(frag_hit);
+	IPA_DUMP_STATUS_FIELD(frag_rule);
+}
+
+static ssize_t ipa_status_stats_read(struct file *file, char __user *ubuf,
+		size_t count, loff_t *ppos)
+{
+	struct ipa3_status_stats *stats;
+	int i, j;
+
+	stats = kzalloc(sizeof(*stats), GFP_KERNEL);
+	if (!stats)
+		return -EFAULT;
+
+	for (i = 0; i < ipa3_ctx->ipa_num_pipes; i++) {
+		if (!ipa3_ctx->ep[i].sys || !ipa3_ctx->ep[i].sys->status_stat)
+			continue;
+
+		memcpy(stats, ipa3_ctx->ep[i].sys->status_stat, sizeof(*stats));
+		pr_err("Statuses for pipe %d\n", i);
+		for (j = 0; j < IPA_MAX_STATUS_STAT_NUM; j++) {
+			pr_err("curr=%d\n", stats->curr);
+			ipa_dump_status(&stats->status[stats->curr]);
+			pr_err("\n\n\n");
+			stats->curr = (stats->curr + 1) %
+				IPA_MAX_STATUS_STAT_NUM;
+		}
+	}
+
+	kfree(stats);
+	return 0;
+}
+
+static ssize_t ipa3_print_active_clients_log(struct file *file,
+		char __user *ubuf, size_t count, loff_t *ppos)
+{
+	int cnt;
+	int table_size;
+
+	if (active_clients_buf == NULL) {
+		IPAERR("Active Clients buffer is not allocated");
+		return 0;
+	}
+	memset(active_clients_buf, 0, IPA_DBG_ACTIVE_CLIENT_BUF_SIZE);
+	mutex_lock(&ipa3_ctx->ipa3_active_clients.mutex);
+	cnt = ipa3_active_clients_log_print_buffer(active_clients_buf,
+			IPA_DBG_ACTIVE_CLIENT_BUF_SIZE - IPA_MAX_MSG_LEN);
+	table_size = ipa3_active_clients_log_print_table(active_clients_buf
+			+ cnt, IPA_MAX_MSG_LEN);
+	mutex_unlock(&ipa3_ctx->ipa3_active_clients.mutex);
+
+	return simple_read_from_buffer(ubuf, count, ppos,
+			active_clients_buf, cnt + table_size);
+}
+
+static ssize_t ipa3_clear_active_clients_log(struct file *file,
+		const char __user *ubuf, size_t count, loff_t *ppos)
+{
+	ipa3_active_clients_log_clear();
+
+	return count;
+}
+
+static ssize_t ipa3_enable_ipc_low(struct file *file,
+	const char __user *ubuf, size_t count, loff_t *ppos)
+{
+	s8 option = 0;
+	int ret;
+
+	ret = kstrtos8_from_user(ubuf, count, 0, &option);
+	if (ret)
+		return ret;
+
+	mutex_lock(&ipa3_ctx->lock);
+	if (option) {
+		if (!ipa_ipc_low_buff) {
+			ipa_ipc_low_buff =
+				ipc_log_context_create(IPA_IPC_LOG_PAGES,
+					"ipa_low", 0);
+		}
+			if (ipa_ipc_low_buff == NULL)
+				IPADBG("failed to get logbuf_low\n");
+		ipa3_ctx->logbuf_low = ipa_ipc_low_buff;
+	} else {
+		ipa3_ctx->logbuf_low = NULL;
+	}
+	mutex_unlock(&ipa3_ctx->lock);
+
+	return count;
+}
+
+static const struct ipa3_debugfs_file debugfs_files[] = {
+	{
+		"gen_reg", IPA_READ_ONLY_MODE, NULL, {
+			.read = ipa3_read_gen_reg
+		}
+	}, {
+		"active_clients", IPA_READ_WRITE_MODE, NULL, {
+			.read = ipa3_print_active_clients_log,
+			.write = ipa3_clear_active_clients_log
+		}
+	}, {
+		"ep_reg", IPA_READ_WRITE_MODE, NULL, {
+			.read = ipa3_read_ep_reg,
+			.write = ipa3_write_ep_reg,
+		}
+	}, {
+		"keep_awake", IPA_READ_WRITE_MODE, NULL, {
+			.read = ipa3_read_keep_awake,
+			.write = ipa3_write_keep_awake,
+		}
+	}, {
+		"holb", IPA_WRITE_ONLY_MODE, NULL, {
+			.write = ipa3_write_ep_holb,
+		}
+	}, {
+		"hdr", IPA_READ_ONLY_MODE, NULL, {
+			.read = ipa3_read_hdr,
+		}
+	}, {
+		"proc_ctx", IPA_READ_ONLY_MODE, NULL, {
+			.read = ipa3_read_proc_ctx,
+		}
+	}, {
+		"ip4_rt", IPA_READ_ONLY_MODE, (void *)IPA_IP_v4, {
+			.read = ipa3_read_rt,
+			.open = ipa3_open_dbg,
+		}
+	}, {
+		"ip4_rt_hw", IPA_READ_ONLY_MODE, (void *)IPA_IP_v4, {
+			.read = ipa3_read_rt_hw,
+			.open = ipa3_open_dbg,
+		}
+	}, {
+		"ip6_rt", IPA_READ_ONLY_MODE, (void *)IPA_IP_v6, {
+			.read = ipa3_read_rt,
+			.open = ipa3_open_dbg,
+		}
+	}, {
+		"ip6_rt_hw", IPA_READ_ONLY_MODE, (void *)IPA_IP_v6, {
+			.read = ipa3_read_rt_hw,
+			.open = ipa3_open_dbg,
+		}
+	}, {
+		"ip4_flt", IPA_READ_ONLY_MODE, (void *)IPA_IP_v4, {
+			.read = ipa3_read_flt,
+			.open = ipa3_open_dbg,
+		}
+	}, {
+		"ip4_flt_hw", IPA_READ_ONLY_MODE, (void *)IPA_IP_v4, {
+			.read = ipa3_read_flt_hw,
+			.open = ipa3_open_dbg,
+		}
+	}, {
+		"ip6_flt", IPA_READ_ONLY_MODE, (void *)IPA_IP_v6, {
+			.read = ipa3_read_flt,
+			.open = ipa3_open_dbg,
+		}
+	}, {
+		"ip6_flt_hw", IPA_READ_ONLY_MODE, (void *)IPA_IP_v6, {
+			.read = ipa3_read_flt_hw,
+			.open = ipa3_open_dbg,
+		}
+	}, {
+		"stats", IPA_READ_ONLY_MODE, NULL, {
+			.read = ipa3_read_stats,
+		}
+	}, {
+		"wstats", IPA_READ_ONLY_MODE, NULL, {
+			.read = ipa3_read_wstats,
+		}
+	}, {
+		"odlstats", IPA_READ_ONLY_MODE, NULL, {
+			.read = ipa3_read_odlstats,
+		}
+	}, {
+		"page_recycle_stats", IPA_READ_ONLY_MODE, NULL, {
+			.read = ipa3_read_page_recycle_stats,
+		}
+	}, {
+		"wdi", IPA_READ_ONLY_MODE, NULL, {
+			.read = ipa3_read_wdi,
+		}
+	}, {
+		"ntn", IPA_READ_ONLY_MODE, NULL, {
+			.read = ipa3_read_ntn,
+		}
+	}, {
+		"dbg_cnt", IPA_READ_WRITE_MODE, NULL, {
+			.read = ipa3_read_dbg_cnt,
+			.write = ipa3_write_dbg_cnt,
+		}
+	}, {
+		"msg", IPA_READ_ONLY_MODE, NULL, {
+			.read = ipa3_read_msg,
+		}
+	}, {
+		"ip4_nat", IPA_READ_ONLY_MODE, NULL, {
+			.read = ipa3_read_nat4,
+		}
+	}, {
+		"ipv6ct", IPA_READ_ONLY_MODE, NULL, {
+			.read = ipa3_read_ipv6ct,
+		}
+	}, {
+		"pm_stats", IPA_READ_ONLY_MODE, NULL, {
+			.read = ipa3_pm_read_stats,
+		}
+	}, {
+		"pm_ex_stats", IPA_READ_ONLY_MODE, NULL, {
+			.read = ipa3_pm_ex_read_stats,
+		}
+	}, {
+		"status_stats", IPA_READ_ONLY_MODE, NULL, {
+			.read = ipa_status_stats_read,
+		}
+	}, {
+		"enable_low_prio_print", IPA_WRITE_ONLY_MODE, NULL, {
+			.write = ipa3_enable_ipc_low,
+		}
+	}, {
+		"ipa_dump_regs", IPA_READ_ONLY_MODE, NULL, {
+			.read = ipa3_read_ipahal_regs,
+		}
+	}, {
+		"wdi_gsi_stats", IPA_READ_ONLY_MODE, NULL, {
+			.read = ipa3_read_wdi_gsi_stats,
+		}
+	}, {
+		"wdi3_gsi_stats", IPA_READ_ONLY_MODE, NULL, {
+			.read = ipa3_read_wdi3_gsi_stats,
+		}
+	}, {
+		"11ad_gsi_stats", IPA_READ_ONLY_MODE, NULL, {
+			.read = ipa3_read_11ad_gsi_stats,
+		}
+	}, {
+		"aqc_gsi_stats", IPA_READ_ONLY_MODE, NULL, {
+			.read = ipa3_read_aqc_gsi_stats,
+		}
+	}, {
+		"mhip_gsi_stats", IPA_READ_ONLY_MODE, NULL, {
+			.read = ipa3_read_mhip_gsi_stats,
+		}
+	}, {
+		"usb_gsi_stats", IPA_READ_ONLY_MODE, NULL, {
+			.read = ipa3_read_usb_gsi_stats,
+		}
+	}, {
+		"app_clk_vote_cnt", IPA_READ_ONLY_MODE, NULL, {
+			.read = ipa3_read_app_clk_vote,
+		}
+	},
+};
+
+void ipa3_debugfs_init(void)
+{
+	const size_t debugfs_files_num =
+		sizeof(debugfs_files) / sizeof(struct ipa3_debugfs_file);
+	size_t i;
+	struct dentry *file;
+
+	dent = debugfs_create_dir("ipa", 0);
+	if (IS_ERR(dent)) {
+		IPAERR("fail to create folder in debug_fs.\n");
+		return;
+	}
+
+	file = debugfs_create_u32("hw_type", IPA_READ_ONLY_MODE,
+		dent, &ipa3_ctx->ipa_hw_type);
+	if (!file) {
+		IPAERR("could not create hw_type file\n");
+		goto fail;
+	}
+
+
+	for (i = 0; i < debugfs_files_num; ++i) {
+		const struct ipa3_debugfs_file *curr = &debugfs_files[i];
+
+		file = debugfs_create_file(curr->name, curr->mode, dent,
+			curr->data, &curr->fops);
+		if (!file || IS_ERR(file)) {
+			IPAERR("fail to create file for debug_fs %s\n",
+				curr->name);
+			goto fail;
+		}
+	}
+
+	active_clients_buf = NULL;
+	active_clients_buf = kzalloc(IPA_DBG_ACTIVE_CLIENT_BUF_SIZE,
+			GFP_KERNEL);
+	if (active_clients_buf == NULL)
+		goto fail;
+
+	file = debugfs_create_u32("enable_clock_scaling", IPA_READ_WRITE_MODE,
+		dent, &ipa3_ctx->enable_clock_scaling);
+	if (!file) {
+		IPAERR("could not create enable_clock_scaling file\n");
+		goto fail;
+	}
+
+	file = debugfs_create_u32("enable_napi_chain", IPA_READ_WRITE_MODE,
+		dent, &ipa3_ctx->enable_napi_chain);
+	if (!file) {
+		IPAERR("could not create enable_napi_chain file\n");
+		goto fail;
+	}
+
+	file = debugfs_create_u32("clock_scaling_bw_threshold_nominal_mbps",
+		IPA_READ_WRITE_MODE, dent,
+		&ipa3_ctx->ctrl->clock_scaling_bw_threshold_nominal);
+	if (!file) {
+		IPAERR("could not create bw_threshold_nominal_mbps\n");
+		goto fail;
+	}
+
+	file = debugfs_create_u32("clock_scaling_bw_threshold_turbo_mbps",
+			IPA_READ_WRITE_MODE, dent,
+			&ipa3_ctx->ctrl->clock_scaling_bw_threshold_turbo);
+	if (!file) {
+		IPAERR("could not create bw_threshold_turbo_mbps\n");
+		goto fail;
+	}
+
+	file = debugfs_create_u32("clk_rate", IPA_READ_ONLY_MODE,
+		dent, &ipa3_ctx->curr_ipa_clk_rate);
+	if (!file) {
+		IPAERR("could not create clk_rate file\n");
+		goto fail;
+	}
+
+	ipa_debugfs_init_stats(dent);
+
+	ipa3_wigig_init_debugfs_i(dent);
+
+	return;
+
+fail:
+	debugfs_remove_recursive(dent);
+}
+
+void ipa3_debugfs_remove(void)
+{
+	if (IS_ERR(dent)) {
+		IPAERR("Debugfs:folder was not created.\n");
+		return;
+	}
+	if (active_clients_buf != NULL) {
+		kfree(active_clients_buf);
+		active_clients_buf = NULL;
+	}
+	debugfs_remove_recursive(dent);
+}
+
+struct dentry *ipa_debugfs_get_root(void)
+{
+	return dent;
+}
+EXPORT_SYMBOL(ipa_debugfs_get_root);
+
+#else /* !CONFIG_DEBUG_FS */
+void ipa3_debugfs_init(void) {}
+void ipa3_debugfs_remove(void) {}
+#endif

+ 94 - 0
ipa/ipa_v3/ipa_defs.h

@@ -0,0 +1,94 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _IPA_DEFS_H_
+#define _IPA_DEFS_H_
+#include <linux/ipa.h>
+
+/**
+ * struct ipa_rt_rule_i - attributes of a routing rule
+ * @dst: dst "client"
+ * @hdr_hdl: handle to the dynamic header
+	it is not an index or an offset
+ * @hdr_proc_ctx_hdl: handle to header processing context. if it is provided
+	hdr_hdl shall be 0
+ * @attrib: attributes of the rule
+ * @max_prio: bool switch. is this rule with Max priority? meaning on rule hit,
+ *  IPA will use the rule and will not look for other rules that may have
+ *  higher priority
+ * @hashable: bool switch. is this rule hashable or not?
+ *  ipa uses hashable rules to cache their hit results to be used in
+ *  consecutive packets
+ * @retain_hdr: bool switch to instruct IPA core to add back to the packet
+ *  the header removed as part of header removal
+ * @coalesce: bool to decide whether packets should be coalesced or not
+ * @enable_stats: is true when we want to enable stats for this
+ * rt rule.
+ * @cnt_idx: if enable_stats is 1 and cnt_idx is 0, then cnt_idx
+ * will be assigned by ipa driver.
+ */
+struct ipa_rt_rule_i {
+	enum ipa_client_type dst;
+	u32 hdr_hdl;
+	u32 hdr_proc_ctx_hdl;
+	struct ipa_rule_attrib attrib;
+	u8 max_prio;
+	u8 hashable;
+	u8 retain_hdr;
+	u8 coalesce;
+	u8 enable_stats;
+	u8 cnt_idx;
+};
+
+/**
+ * struct ipa_flt_rule_i - attributes of a filtering rule
+ * @retain_hdr: bool switch to instruct IPA core to add back to the packet
+ *  the header removed as part of header removal
+ * @to_uc: bool switch to pass packet to micro-controller
+ * @action: action field
+ * @rt_tbl_hdl: handle of table from "get"
+ * @attrib: attributes of the rule
+ * @eq_attrib: attributes of the rule in equation form (valid when
+ * eq_attrib_type is true)
+ * @rt_tbl_idx: index of RT table referred to by filter rule (valid when
+ * eq_attrib_type is true and non-exception action)
+ * @eq_attrib_type: true if equation level form used to specify attributes
+ * @max_prio: bool switch. is this rule with Max priority? meaning on rule hit,
+ *  IPA will use the rule and will not look for other rules that may have
+ *  higher priority
+ * @hashable: bool switch. is this rule hashable or not?
+ *  ipa uses hashable rules to cache their hit results to be used in
+ *  consecutive packets
+ * @rule_id: rule_id to be assigned to the filter rule. In case client specifies
+ *  rule_id as 0 the driver will assign a new rule_id
+ * @set_metadata: bool switch. should metadata replacement at the NAT block
+ *  take place?
+ * @pdn_idx: if action is "pass to source\destination NAT" then a comparison
+ * against the PDN index in the matching PDN entry will take place as an
+ * additional condition for NAT hit.
+ * @enable_stats: is true when we want to enable stats for this
+ * flt rule.
+ * @cnt_idx: if 0 means disable, otherwise use for index.
+ * will be assigned by ipa driver.
+ */
+struct ipa_flt_rule_i {
+	u8 retain_hdr;
+	u8 to_uc;
+	enum ipa_flt_action action;
+	u32 rt_tbl_hdl;
+	struct ipa_rule_attrib attrib;
+	struct ipa_ipfltri_rule_eq eq_attrib;
+	u32 rt_tbl_idx;
+	u8 eq_attrib_type;
+	u8 max_prio;
+	u8 hashable;
+	u16 rule_id;
+	u8 set_metadata;
+	u8 pdn_idx;
+	u8 enable_stats;
+	u8 cnt_idx;
+};
+
+#endif /* _IPA_DEFS_H_ */

+ 1243 - 0
ipa/ipa_v3/ipa_dma.c

@@ -0,0 +1,1243 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2015-2019, The Linux Foundation. All rights reserved.
+ */
+
+
+#include <linux/debugfs.h>
+#include <linux/export.h>
+#include <linux/delay.h>
+#include <linux/kernel.h>
+#include <linux/msm_ipa.h>
+#include <linux/mutex.h>
+#include <linux/ipa.h>
+#include "linux/msm_gsi.h"
+#include <linux/dmapool.h>
+#include "ipa_i.h"
+
+#define IPA_DMA_POLLING_MIN_SLEEP_RX 1010
+#define IPA_DMA_POLLING_MAX_SLEEP_RX 1050
+#define IPA_DMA_SYS_DESC_MAX_FIFO_SZ 0x7FF8
+#define IPA_DMA_MAX_PKT_SZ 0xFFFF
+#define IPA_DMA_DUMMY_BUFF_SZ 8
+#define IPA_DMA_PREFETCH_WA_THRESHOLD 9
+
+#define IPADMA_DRV_NAME "ipa_dma"
+
+#define IPADMA_DBG(fmt, args...) \
+	do { \
+		pr_debug(IPADMA_DRV_NAME " %s:%d " fmt, \
+			__func__, __LINE__, ## args); \
+		IPA_IPC_LOGGING(ipa_get_ipc_logbuf(), \
+			IPADMA_DRV_NAME " %s:%d " fmt, ## args); \
+		IPA_IPC_LOGGING(ipa_get_ipc_logbuf_low(), \
+			IPADMA_DRV_NAME " %s:%d " fmt, ## args); \
+	} while (0)
+
+#define IPADMA_DBG_LOW(fmt, args...) \
+	do { \
+		pr_debug(IPADMA_DRV_NAME " %s:%d " fmt, \
+			__func__, __LINE__, ## args); \
+		IPA_IPC_LOGGING(ipa_get_ipc_logbuf_low(), \
+			IPADMA_DRV_NAME " %s:%d " fmt, ## args); \
+	} while (0)
+
+#define IPADMA_ERR(fmt, args...) \
+	do { \
+		pr_err(IPADMA_DRV_NAME " %s:%d " fmt, \
+			__func__, __LINE__, ## args); \
+		IPA_IPC_LOGGING(ipa_get_ipc_logbuf(), \
+			IPADMA_DRV_NAME " %s:%d " fmt, ## args); \
+		IPA_IPC_LOGGING(ipa_get_ipc_logbuf_low(), \
+			IPADMA_DRV_NAME " %s:%d " fmt, ## args); \
+	} while (0)
+
+#define IPADMA_FUNC_ENTRY() \
+	IPADMA_DBG_LOW("ENTRY\n")
+
+#define IPADMA_FUNC_EXIT() \
+	IPADMA_DBG_LOW("EXIT\n")
+
+#ifdef CONFIG_DEBUG_FS
+#define IPADMA_MAX_MSG_LEN 1024
+static char dbg_buff[IPADMA_MAX_MSG_LEN];
+static void ipa3_dma_debugfs_init(void);
+static void ipa3_dma_debugfs_destroy(void);
+#else
+static void ipa3_dma_debugfs_init(void) {}
+static void ipa3_dma_debugfs_destroy(void) {}
+#endif
+
+/**
+ * struct ipa3_dma_ctx -IPADMA driver context information
+ * @enable_ref_cnt: ipa dma enable reference count
+ * @destroy_pending: destroy ipa_dma after handling all pending memcpy
+ * @ipa_dma_xfer_wrapper_cache: cache of ipa3_dma_xfer_wrapper structs
+ * @sync_lock: lock for synchronisation in sync_memcpy
+ * @async_lock: lock for synchronisation in async_memcpy
+ * @enable_lock: lock for is_enabled
+ * @pending_lock: lock for synchronize is_enable and pending_cnt
+ * @done: no pending works-ipadma can be destroyed
+ * @ipa_dma_sync_prod_hdl: handle of sync memcpy producer
+ * @ipa_dma_async_prod_hdl:handle of async memcpy producer
+ * @ipa_dma_sync_cons_hdl: handle of sync memcpy consumer
+ * @sync_memcpy_pending_cnt: number of pending sync memcopy operations
+ * @async_memcpy_pending_cnt: number of pending async memcopy operations
+ * @uc_memcpy_pending_cnt: number of pending uc memcopy operations
+ * @total_sync_memcpy: total number of sync memcpy (statistics)
+ * @total_async_memcpy: total number of async memcpy (statistics)
+ * @total_uc_memcpy: total number of uc memcpy (statistics)
+ */
+struct ipa3_dma_ctx {
+	unsigned int enable_ref_cnt;
+	bool destroy_pending;
+	struct kmem_cache *ipa_dma_xfer_wrapper_cache;
+	struct mutex sync_lock;
+	spinlock_t async_lock;
+	struct mutex enable_lock;
+	spinlock_t pending_lock;
+	struct completion done;
+	u32 ipa_dma_sync_prod_hdl;
+	u32 ipa_dma_async_prod_hdl;
+	u32 ipa_dma_sync_cons_hdl;
+	u32 ipa_dma_async_cons_hdl;
+	atomic_t sync_memcpy_pending_cnt;
+	atomic_t async_memcpy_pending_cnt;
+	atomic_t uc_memcpy_pending_cnt;
+	atomic_t total_sync_memcpy;
+	atomic_t total_async_memcpy;
+	atomic_t total_uc_memcpy;
+	struct ipa_mem_buffer ipa_dma_dummy_src_sync;
+	struct ipa_mem_buffer ipa_dma_dummy_dst_sync;
+	struct ipa_mem_buffer ipa_dma_dummy_src_async;
+	struct ipa_mem_buffer ipa_dma_dummy_dst_async;
+};
+static struct ipa3_dma_ctx *ipa3_dma_ctx;
+
+/**
+ * struct ipa3_dma_init_refcnt_ctrl -IPADMA driver init control information
+ * @ref_cnt: reference count for initialization operations
+ * @lock: lock for the reference count
+ */
+struct ipa3_dma_init_refcnt_ctrl {
+	unsigned int ref_cnt;
+	struct mutex lock;
+};
+static struct ipa3_dma_init_refcnt_ctrl *ipa3_dma_init_refcnt_ctrl;
+
+/**
+ * ipa3_dma_setup() - One time setup for IPA DMA
+ *
+ * This function should be called once to setup ipa dma
+ *  by creating the init reference count controller
+ *
+ * Return codes: 0: success
+ *		 Negative value: failure
+ */
+int ipa3_dma_setup(void)
+{
+	IPADMA_FUNC_ENTRY();
+
+	if (ipa3_dma_init_refcnt_ctrl) {
+		IPADMA_ERR("Setup already done\n");
+		return -EFAULT;
+	}
+
+	ipa3_dma_init_refcnt_ctrl =
+		kzalloc(sizeof(*(ipa3_dma_init_refcnt_ctrl)), GFP_KERNEL);
+
+	if (!ipa3_dma_init_refcnt_ctrl) {
+		IPADMA_ERR("kzalloc error.\n");
+		return -ENOMEM;
+	}
+
+	mutex_init(&ipa3_dma_init_refcnt_ctrl->lock);
+
+	IPADMA_FUNC_EXIT();
+	return 0;
+}
+
+/**
+ * ipa3_dma_shutdown() - Clear setup operations.
+ *
+ * Cleanup for the setup function.
+ * Should be called during IPA driver unloading.
+ * It assumes all ipa_dma operations are done and ipa_dma is destroyed.
+ *
+ * Return codes: None.
+ */
+void ipa3_dma_shutdown(void)
+{
+	IPADMA_FUNC_ENTRY();
+
+	if (!ipa3_dma_init_refcnt_ctrl)
+		return;
+
+	kfree(ipa3_dma_init_refcnt_ctrl);
+	ipa3_dma_init_refcnt_ctrl = NULL;
+
+	IPADMA_FUNC_EXIT();
+}
+
+/**
+ * ipa3_dma_init() -Initialize IPADMA.
+ *
+ * This function initialize all IPADMA internal data and connect in dma:
+ *	MEMCPY_DMA_SYNC_PROD ->MEMCPY_DMA_SYNC_CONS
+ *	MEMCPY_DMA_ASYNC_PROD->MEMCPY_DMA_SYNC_CONS
+ *
+ * Can be executed several times (re-entrant)
+ *
+ * Return codes: 0: success
+ *		-EFAULT: Mismatch between context existence and init ref_cnt
+ *		-EINVAL: IPA driver is not initialized
+ *		-ENOMEM: allocating memory error
+ *		-EPERM: pipe connection failed
+ */
+int ipa3_dma_init(void)
+{
+	struct ipa3_dma_ctx *ipa_dma_ctx_t;
+	struct ipa_sys_connect_params sys_in;
+	int res = 0;
+	int sync_sz;
+	int async_sz;
+
+	IPADMA_FUNC_ENTRY();
+
+	if (!ipa3_dma_init_refcnt_ctrl) {
+		IPADMA_ERR("Setup isn't done yet!\n");
+		return -EINVAL;
+	}
+
+	mutex_lock(&ipa3_dma_init_refcnt_ctrl->lock);
+	if (ipa3_dma_init_refcnt_ctrl->ref_cnt > 0) {
+		IPADMA_DBG("Already initialized refcnt=%d\n",
+			ipa3_dma_init_refcnt_ctrl->ref_cnt);
+		if (!ipa3_dma_ctx) {
+			IPADMA_ERR("Context missing. refcnt=%d\n",
+				ipa3_dma_init_refcnt_ctrl->ref_cnt);
+			res = -EFAULT;
+		} else {
+			ipa3_dma_init_refcnt_ctrl->ref_cnt++;
+		}
+		goto init_unlock;
+	}
+
+	if (ipa3_dma_ctx) {
+		IPADMA_ERR("Context already exist\n");
+		res = -EFAULT;
+		goto init_unlock;
+	}
+
+	if (!ipa3_is_ready()) {
+		IPADMA_ERR("IPA is not ready yet\n");
+		res = -EINVAL;
+		goto init_unlock;
+	}
+
+	ipa_dma_ctx_t = kzalloc(sizeof(*(ipa3_dma_ctx)), GFP_KERNEL);
+
+	if (!ipa_dma_ctx_t) {
+		res = -ENOMEM;
+		goto init_unlock;
+	}
+
+	ipa_dma_ctx_t->ipa_dma_xfer_wrapper_cache =
+		kmem_cache_create("IPA DMA XFER WRAPPER",
+			sizeof(struct ipa3_dma_xfer_wrapper), 0, 0, NULL);
+	if (!ipa_dma_ctx_t->ipa_dma_xfer_wrapper_cache) {
+		IPAERR(":failed to create ipa dma xfer wrapper cache.\n");
+		res = -ENOMEM;
+		goto fail_mem_ctrl;
+	}
+
+	mutex_init(&ipa_dma_ctx_t->enable_lock);
+	spin_lock_init(&ipa_dma_ctx_t->async_lock);
+	mutex_init(&ipa_dma_ctx_t->sync_lock);
+	spin_lock_init(&ipa_dma_ctx_t->pending_lock);
+	init_completion(&ipa_dma_ctx_t->done);
+	ipa_dma_ctx_t->enable_ref_cnt = 0;
+	ipa_dma_ctx_t->destroy_pending = false;
+	atomic_set(&ipa_dma_ctx_t->async_memcpy_pending_cnt, 0);
+	atomic_set(&ipa_dma_ctx_t->sync_memcpy_pending_cnt, 0);
+	atomic_set(&ipa_dma_ctx_t->uc_memcpy_pending_cnt, 0);
+	atomic_set(&ipa_dma_ctx_t->total_async_memcpy, 0);
+	atomic_set(&ipa_dma_ctx_t->total_sync_memcpy, 0);
+	atomic_set(&ipa_dma_ctx_t->total_uc_memcpy, 0);
+
+	sync_sz = IPA_SYS_DESC_FIFO_SZ;
+	async_sz = IPA_DMA_SYS_DESC_MAX_FIFO_SZ;
+	/*
+	 * for ipav3.5 we need to double the rings and allocate dummy buffers
+	 * in order to apply the prefetch WA
+	 */
+	if (ipa_get_hw_type() == IPA_HW_v3_5) {
+		sync_sz *= 2;
+		async_sz *= 2;
+
+		ipa_dma_ctx_t->ipa_dma_dummy_src_sync.base =
+			dma_alloc_coherent(ipa3_ctx->pdev,
+			IPA_DMA_DUMMY_BUFF_SZ * 4,
+			&ipa_dma_ctx_t->ipa_dma_dummy_src_sync.phys_base,
+			GFP_KERNEL);
+
+		if (!ipa_dma_ctx_t->ipa_dma_dummy_src_sync.base) {
+			IPAERR("DMA alloc fail %d bytes for prefetch WA\n",
+				IPA_DMA_DUMMY_BUFF_SZ);
+			res = -ENOMEM;
+			goto fail_alloc_dummy;
+		}
+
+		ipa_dma_ctx_t->ipa_dma_dummy_dst_sync.base =
+			ipa_dma_ctx_t->ipa_dma_dummy_src_sync.base +
+			IPA_DMA_DUMMY_BUFF_SZ;
+		ipa_dma_ctx_t->ipa_dma_dummy_dst_sync.phys_base =
+			ipa_dma_ctx_t->ipa_dma_dummy_src_sync.phys_base +
+			IPA_DMA_DUMMY_BUFF_SZ;
+		ipa_dma_ctx_t->ipa_dma_dummy_src_async.base =
+			ipa_dma_ctx_t->ipa_dma_dummy_dst_sync.base +
+			IPA_DMA_DUMMY_BUFF_SZ;
+		ipa_dma_ctx_t->ipa_dma_dummy_src_async.phys_base =
+			ipa_dma_ctx_t->ipa_dma_dummy_dst_sync.phys_base +
+			IPA_DMA_DUMMY_BUFF_SZ;
+		ipa_dma_ctx_t->ipa_dma_dummy_dst_async.base =
+			ipa_dma_ctx_t->ipa_dma_dummy_src_async.base +
+			IPA_DMA_DUMMY_BUFF_SZ;
+		ipa_dma_ctx_t->ipa_dma_dummy_dst_async.phys_base =
+			ipa_dma_ctx_t->ipa_dma_dummy_src_async.phys_base +
+			IPA_DMA_DUMMY_BUFF_SZ;
+	}
+
+	/* IPADMA SYNC PROD-source for sync memcpy */
+	memset(&sys_in, 0, sizeof(struct ipa_sys_connect_params));
+	sys_in.client = IPA_CLIENT_MEMCPY_DMA_SYNC_PROD;
+	sys_in.desc_fifo_sz = sync_sz;
+	sys_in.ipa_ep_cfg.mode.mode = IPA_DMA;
+	sys_in.ipa_ep_cfg.mode.dst = IPA_CLIENT_MEMCPY_DMA_SYNC_CONS;
+	sys_in.skip_ep_cfg = false;
+	if (ipa3_setup_sys_pipe(&sys_in,
+		&ipa_dma_ctx_t->ipa_dma_sync_prod_hdl)) {
+		IPADMA_ERR(":setup sync prod pipe failed\n");
+		res = -EPERM;
+		goto fail_sync_prod;
+	}
+
+	/* IPADMA SYNC CONS-destination for sync memcpy */
+	memset(&sys_in, 0, sizeof(struct ipa_sys_connect_params));
+	sys_in.client = IPA_CLIENT_MEMCPY_DMA_SYNC_CONS;
+	sys_in.desc_fifo_sz = sync_sz;
+	sys_in.skip_ep_cfg = false;
+	sys_in.ipa_ep_cfg.mode.mode = IPA_BASIC;
+	sys_in.notify = NULL;
+	sys_in.priv = NULL;
+	if (ipa3_setup_sys_pipe(&sys_in,
+		&ipa_dma_ctx_t->ipa_dma_sync_cons_hdl)) {
+		IPADMA_ERR(":setup sync cons pipe failed.\n");
+		res = -EPERM;
+		goto fail_sync_cons;
+	}
+
+	IPADMA_DBG("SYNC MEMCPY pipes are connected\n");
+
+	/* IPADMA ASYNC PROD-source for sync memcpy */
+	memset(&sys_in, 0, sizeof(struct ipa_sys_connect_params));
+	sys_in.client = IPA_CLIENT_MEMCPY_DMA_ASYNC_PROD;
+	sys_in.desc_fifo_sz = async_sz;
+	sys_in.ipa_ep_cfg.mode.mode = IPA_DMA;
+	sys_in.ipa_ep_cfg.mode.dst = IPA_CLIENT_MEMCPY_DMA_ASYNC_CONS;
+	sys_in.skip_ep_cfg = false;
+	sys_in.notify = NULL;
+	if (ipa3_setup_sys_pipe(&sys_in,
+		&ipa_dma_ctx_t->ipa_dma_async_prod_hdl)) {
+		IPADMA_ERR(":setup async prod pipe failed.\n");
+		res = -EPERM;
+		goto fail_async_prod;
+	}
+
+	/* IPADMA ASYNC CONS-destination for sync memcpy */
+	memset(&sys_in, 0, sizeof(struct ipa_sys_connect_params));
+	sys_in.client = IPA_CLIENT_MEMCPY_DMA_ASYNC_CONS;
+	sys_in.desc_fifo_sz = async_sz;
+	sys_in.skip_ep_cfg = false;
+	sys_in.ipa_ep_cfg.mode.mode = IPA_BASIC;
+	sys_in.notify = ipa3_dma_async_memcpy_notify_cb;
+	sys_in.priv = NULL;
+	if (ipa3_setup_sys_pipe(&sys_in,
+		&ipa_dma_ctx_t->ipa_dma_async_cons_hdl)) {
+		IPADMA_ERR(":setup async cons pipe failed.\n");
+		res = -EPERM;
+		goto fail_async_cons;
+	}
+	ipa3_dma_debugfs_init();
+	ipa3_dma_ctx = ipa_dma_ctx_t;
+	ipa3_dma_init_refcnt_ctrl->ref_cnt = 1;
+	IPADMA_DBG("ASYNC MEMCPY pipes are connected\n");
+
+	IPADMA_FUNC_EXIT();
+	goto init_unlock;
+
+fail_async_cons:
+	ipa3_teardown_sys_pipe(ipa_dma_ctx_t->ipa_dma_async_prod_hdl);
+fail_async_prod:
+	ipa3_teardown_sys_pipe(ipa_dma_ctx_t->ipa_dma_sync_cons_hdl);
+fail_sync_cons:
+	ipa3_teardown_sys_pipe(ipa_dma_ctx_t->ipa_dma_sync_prod_hdl);
+fail_sync_prod:
+	dma_free_coherent(ipa3_ctx->pdev, IPA_DMA_DUMMY_BUFF_SZ * 4,
+		ipa_dma_ctx_t->ipa_dma_dummy_src_sync.base,
+		ipa_dma_ctx_t->ipa_dma_dummy_src_sync.phys_base);
+fail_alloc_dummy:
+	kmem_cache_destroy(ipa_dma_ctx_t->ipa_dma_xfer_wrapper_cache);
+fail_mem_ctrl:
+	kfree(ipa_dma_ctx_t);
+	ipa3_dma_ctx = NULL;
+init_unlock:
+	mutex_unlock(&ipa3_dma_init_refcnt_ctrl->lock);
+	return res;
+
+}
+
+/**
+ * ipa3_dma_enable() -Vote for IPA clocks.
+ *
+ * Can be executed several times (re-entrant)
+ *
+ *Return codes: 0: success
+ *		-EINVAL: IPADMA is not initialized
+ */
+int ipa3_dma_enable(void)
+{
+	IPADMA_FUNC_ENTRY();
+	if ((ipa3_dma_ctx == NULL) ||
+		(ipa3_dma_init_refcnt_ctrl->ref_cnt < 1)) {
+		IPADMA_ERR("IPADMA isn't initialized, can't enable\n");
+		return -EINVAL;
+	}
+	mutex_lock(&ipa3_dma_ctx->enable_lock);
+	if (ipa3_dma_ctx->enable_ref_cnt > 0) {
+		IPADMA_ERR("Already enabled refcnt=%d\n",
+			ipa3_dma_ctx->enable_ref_cnt);
+		ipa3_dma_ctx->enable_ref_cnt++;
+		mutex_unlock(&ipa3_dma_ctx->enable_lock);
+		return 0;
+	}
+	IPA_ACTIVE_CLIENTS_INC_SPECIAL("DMA");
+	ipa3_dma_ctx->enable_ref_cnt = 1;
+	mutex_unlock(&ipa3_dma_ctx->enable_lock);
+
+	IPADMA_FUNC_EXIT();
+	return 0;
+}
+
+static bool ipa3_dma_work_pending(void)
+{
+	if (atomic_read(&ipa3_dma_ctx->sync_memcpy_pending_cnt)) {
+		IPADMA_DBG("pending sync\n");
+		return true;
+	}
+	if (atomic_read(&ipa3_dma_ctx->async_memcpy_pending_cnt)) {
+		IPADMA_DBG("pending async\n");
+		return true;
+	}
+	if (atomic_read(&ipa3_dma_ctx->uc_memcpy_pending_cnt)) {
+		IPADMA_DBG("pending uc\n");
+		return true;
+	}
+	IPADMA_DBG_LOW("no pending work\n");
+	return false;
+}
+
+/**
+ * ipa3_dma_disable()- Unvote for IPA clocks.
+ *
+ * enter to power save mode.
+ *
+ * Return codes: 0: success
+ *		-EINVAL: IPADMA is not initialized
+ *		-EPERM: Operation not permitted as ipa_dma is already
+ *			diabled
+ *		-EFAULT: can not disable ipa_dma as there are pending
+ *			memcopy works
+ */
+int ipa3_dma_disable(void)
+{
+	unsigned long flags;
+	int res = 0;
+	bool dec_clks = false;
+
+	IPADMA_FUNC_ENTRY();
+	if ((ipa3_dma_ctx == NULL) ||
+		(ipa3_dma_init_refcnt_ctrl->ref_cnt < 1)) {
+		IPADMA_ERR("IPADMA isn't initialized, can't disable\n");
+		return -EINVAL;
+	}
+	mutex_lock(&ipa3_dma_ctx->enable_lock);
+	spin_lock_irqsave(&ipa3_dma_ctx->pending_lock, flags);
+	if (ipa3_dma_ctx->enable_ref_cnt > 1) {
+		IPADMA_DBG("Multiple enablement done. refcnt=%d\n",
+			ipa3_dma_ctx->enable_ref_cnt);
+		ipa3_dma_ctx->enable_ref_cnt--;
+		goto completed;
+	}
+
+	if (ipa3_dma_ctx->enable_ref_cnt == 0) {
+		IPADMA_ERR("Already disabled\n");
+		res = -EPERM;
+		goto completed;
+	}
+
+	if (ipa3_dma_work_pending()) {
+		IPADMA_ERR("There is pending work, can't disable.\n");
+		res = -EFAULT;
+		goto completed;
+	}
+	ipa3_dma_ctx->enable_ref_cnt = 0;
+	dec_clks = true;
+	IPADMA_FUNC_EXIT();
+
+completed:
+	spin_unlock_irqrestore(&ipa3_dma_ctx->pending_lock, flags);
+	if (dec_clks)
+		IPA_ACTIVE_CLIENTS_DEC_SPECIAL("DMA");
+	mutex_unlock(&ipa3_dma_ctx->enable_lock);
+	return res;
+}
+
+/**
+ * ipa3_dma_sync_memcpy()- Perform synchronous memcpy using IPA.
+ *
+ * @dest: physical address to store the copied data.
+ * @src: physical address of the source data to copy.
+ * @len: number of bytes to copy.
+ *
+ * Return codes: 0: success
+ *		-EINVAL: invalid params
+ *		-EPERM: operation not permitted as ipa_dma isn't enable or
+ *			initialized
+ *		-gsi_status : on GSI failures
+ *		-EFAULT: other
+ */
+int ipa3_dma_sync_memcpy(u64 dest, u64 src, int len)
+{
+	int ep_idx;
+	int res;
+	int i = 0;
+	struct ipa3_sys_context *cons_sys;
+	struct ipa3_sys_context *prod_sys;
+	struct ipa3_dma_xfer_wrapper *xfer_descr = NULL;
+	struct ipa3_dma_xfer_wrapper *head_descr = NULL;
+	struct gsi_xfer_elem prod_xfer_elem;
+	struct gsi_xfer_elem cons_xfer_elem;
+	struct gsi_chan_xfer_notify gsi_notify;
+	unsigned long flags;
+	bool stop_polling = false;
+	bool prefetch_wa = false;
+
+	IPADMA_FUNC_ENTRY();
+	IPADMA_DBG_LOW("dest =  0x%llx, src = 0x%llx, len = %d\n",
+		dest, src, len);
+	if (ipa3_dma_ctx == NULL) {
+		IPADMA_ERR("IPADMA isn't initialized, can't memcpy\n");
+		return -EPERM;
+	}
+	if ((max(src, dest) - min(src, dest)) < len) {
+		IPADMA_ERR("invalid addresses - overlapping buffers\n");
+		return -EINVAL;
+	}
+	if (len > IPA_DMA_MAX_PKT_SZ || len <= 0) {
+		IPADMA_ERR("invalid len, %d\n", len);
+		return	-EINVAL;
+	}
+	spin_lock_irqsave(&ipa3_dma_ctx->pending_lock, flags);
+	if (!ipa3_dma_ctx->enable_ref_cnt) {
+		IPADMA_ERR("can't memcpy, IPADMA isn't enabled\n");
+		spin_unlock_irqrestore(&ipa3_dma_ctx->pending_lock, flags);
+		return -EPERM;
+	}
+	atomic_inc(&ipa3_dma_ctx->sync_memcpy_pending_cnt);
+	spin_unlock_irqrestore(&ipa3_dma_ctx->pending_lock, flags);
+
+	ep_idx = ipa3_get_ep_mapping(IPA_CLIENT_MEMCPY_DMA_SYNC_CONS);
+	if (-1 == ep_idx) {
+		IPADMA_ERR("Client %u is not mapped\n",
+			IPA_CLIENT_MEMCPY_DMA_SYNC_CONS);
+		return -EFAULT;
+	}
+	cons_sys = ipa3_ctx->ep[ep_idx].sys;
+
+	ep_idx = ipa3_get_ep_mapping(IPA_CLIENT_MEMCPY_DMA_SYNC_PROD);
+	if (-1 == ep_idx) {
+		IPADMA_ERR("Client %u is not mapped\n",
+			IPA_CLIENT_MEMCPY_DMA_SYNC_PROD);
+		return -EFAULT;
+	}
+	prod_sys = ipa3_ctx->ep[ep_idx].sys;
+
+	xfer_descr = kmem_cache_zalloc(ipa3_dma_ctx->ipa_dma_xfer_wrapper_cache,
+					GFP_KERNEL);
+	if (!xfer_descr) {
+		IPADMA_ERR("failed to alloc xfer descr wrapper\n");
+		res = -ENOMEM;
+		goto fail_mem_alloc;
+	}
+	xfer_descr->phys_addr_dest = dest;
+	xfer_descr->phys_addr_src = src;
+	xfer_descr->len = len;
+	init_completion(&xfer_descr->xfer_done);
+
+	mutex_lock(&ipa3_dma_ctx->sync_lock);
+	list_add_tail(&xfer_descr->link, &cons_sys->head_desc_list);
+	cons_sys->len++;
+	cons_xfer_elem.addr = dest;
+	cons_xfer_elem.len = len;
+	cons_xfer_elem.type = GSI_XFER_ELEM_DATA;
+	cons_xfer_elem.flags = GSI_XFER_FLAG_EOT;
+
+	prod_xfer_elem.addr = src;
+	prod_xfer_elem.len = len;
+	prod_xfer_elem.type = GSI_XFER_ELEM_DATA;
+	prod_xfer_elem.xfer_user_data = NULL;
+
+	/*
+	 * when copy is less than 9B we need to chain another dummy
+	 * copy so the total size will be larger (for ipav3.5)
+	 * for the consumer we have to prepare an additional credit
+	 */
+	prefetch_wa = ((ipa_get_hw_type() == IPA_HW_v3_5) &&
+		len < IPA_DMA_PREFETCH_WA_THRESHOLD);
+	if (prefetch_wa) {
+		cons_xfer_elem.xfer_user_data = NULL;
+		res = gsi_queue_xfer(cons_sys->ep->gsi_chan_hdl, 1,
+			&cons_xfer_elem, false);
+		if (res) {
+			IPADMA_ERR(
+				"Failed: gsi_queue_xfer dest descr res:%d\n",
+				res);
+			goto fail_send;
+		}
+		cons_xfer_elem.addr =
+			ipa3_dma_ctx->ipa_dma_dummy_dst_sync.phys_base;
+		cons_xfer_elem.len = IPA_DMA_DUMMY_BUFF_SZ;
+		cons_xfer_elem.type = GSI_XFER_ELEM_DATA;
+		cons_xfer_elem.flags = GSI_XFER_FLAG_EOT;
+		cons_xfer_elem.xfer_user_data = xfer_descr;
+		res = gsi_queue_xfer(cons_sys->ep->gsi_chan_hdl, 1,
+			&cons_xfer_elem, true);
+		if (res) {
+			IPADMA_ERR(
+				"Failed: gsi_queue_xfer dummy dest descr res:%d\n",
+				res);
+			goto fail_send;
+		}
+		prod_xfer_elem.flags = GSI_XFER_FLAG_CHAIN;
+		res = gsi_queue_xfer(prod_sys->ep->gsi_chan_hdl, 1,
+			&prod_xfer_elem, false);
+		if (res) {
+			IPADMA_ERR(
+				"Failed: gsi_queue_xfer src descr res:%d\n",
+				res);
+			ipa_assert();
+			goto fail_send;
+		}
+		prod_xfer_elem.addr =
+			ipa3_dma_ctx->ipa_dma_dummy_src_sync.phys_base;
+		prod_xfer_elem.len = IPA_DMA_DUMMY_BUFF_SZ;
+		prod_xfer_elem.type = GSI_XFER_ELEM_DATA;
+		prod_xfer_elem.flags = GSI_XFER_FLAG_EOT;
+		prod_xfer_elem.xfer_user_data = NULL;
+		res = gsi_queue_xfer(prod_sys->ep->gsi_chan_hdl, 1,
+				&prod_xfer_elem, true);
+		if (res) {
+			IPADMA_ERR(
+					"Failed: gsi_queue_xfer dummy src descr res:%d\n",
+					res);
+				ipa_assert();
+				goto fail_send;
+			}
+	} else {
+		cons_xfer_elem.xfer_user_data = xfer_descr;
+		res = gsi_queue_xfer(cons_sys->ep->gsi_chan_hdl, 1,
+			&cons_xfer_elem, true);
+		if (res) {
+			IPADMA_ERR(
+				"Failed: gsi_queue_xfer dest descr res:%d\n",
+				res);
+			goto fail_send;
+		}
+		prod_xfer_elem.flags = GSI_XFER_FLAG_EOT;
+		res = gsi_queue_xfer(prod_sys->ep->gsi_chan_hdl, 1,
+			&prod_xfer_elem, true);
+		if (res) {
+			IPADMA_ERR(
+			"Failed: gsi_queue_xfer src descr res:%d\n",
+			 res);
+			ipa_assert();
+			goto fail_send;
+		}
+	}
+	head_descr = list_first_entry(&cons_sys->head_desc_list,
+				struct ipa3_dma_xfer_wrapper, link);
+
+	/* in case we are not the head of the list, wait for head to wake us */
+	if (xfer_descr != head_descr) {
+		mutex_unlock(&ipa3_dma_ctx->sync_lock);
+		wait_for_completion(&xfer_descr->xfer_done);
+		mutex_lock(&ipa3_dma_ctx->sync_lock);
+		head_descr = list_first_entry(&cons_sys->head_desc_list,
+					struct ipa3_dma_xfer_wrapper, link);
+		/* Unexpected transfer sent from HW */
+		ipa_assert_on(xfer_descr != head_descr);
+	}
+	mutex_unlock(&ipa3_dma_ctx->sync_lock);
+
+	do {
+		/* wait for transfer to complete */
+		res = gsi_poll_channel(cons_sys->ep->gsi_chan_hdl,
+			&gsi_notify);
+		if (res == GSI_STATUS_SUCCESS)
+			stop_polling = true;
+		else if (res != GSI_STATUS_POLL_EMPTY)
+			IPADMA_ERR(
+				"Failed: gsi_poll_chanel, returned %d loop#:%d\n",
+				res, i);
+		usleep_range(IPA_DMA_POLLING_MIN_SLEEP_RX,
+			IPA_DMA_POLLING_MAX_SLEEP_RX);
+		i++;
+	} while (!stop_polling);
+
+	/* for prefetch WA we will receive the length of the dummy
+	 * transfer in the event (because it is the second element)
+	 */
+	if (prefetch_wa)
+		ipa_assert_on(gsi_notify.bytes_xfered !=
+			IPA_DMA_DUMMY_BUFF_SZ);
+	else
+		ipa_assert_on(len != gsi_notify.bytes_xfered);
+
+	ipa_assert_on(dest != ((struct ipa3_dma_xfer_wrapper *)
+			(gsi_notify.xfer_user_data))->phys_addr_dest);
+
+	mutex_lock(&ipa3_dma_ctx->sync_lock);
+	list_del(&head_descr->link);
+	cons_sys->len--;
+	kmem_cache_free(ipa3_dma_ctx->ipa_dma_xfer_wrapper_cache, xfer_descr);
+	/* wake the head of the list */
+	if (!list_empty(&cons_sys->head_desc_list)) {
+		head_descr = list_first_entry(&cons_sys->head_desc_list,
+				struct ipa3_dma_xfer_wrapper, link);
+		complete(&head_descr->xfer_done);
+	}
+	mutex_unlock(&ipa3_dma_ctx->sync_lock);
+
+	atomic_inc(&ipa3_dma_ctx->total_sync_memcpy);
+	atomic_dec(&ipa3_dma_ctx->sync_memcpy_pending_cnt);
+	if (ipa3_dma_ctx->destroy_pending && !ipa3_dma_work_pending())
+		complete(&ipa3_dma_ctx->done);
+
+	IPADMA_FUNC_EXIT();
+	return res;
+
+fail_send:
+	list_del(&xfer_descr->link);
+	cons_sys->len--;
+	mutex_unlock(&ipa3_dma_ctx->sync_lock);
+	kmem_cache_free(ipa3_dma_ctx->ipa_dma_xfer_wrapper_cache, xfer_descr);
+fail_mem_alloc:
+	atomic_dec(&ipa3_dma_ctx->sync_memcpy_pending_cnt);
+	if (ipa3_dma_ctx->destroy_pending && !ipa3_dma_work_pending())
+		complete(&ipa3_dma_ctx->done);
+	return res;
+}
+
+/**
+ * ipa3_dma_async_memcpy()- Perform asynchronous memcpy using IPA.
+ *
+ * @dest: physical address to store the copied data.
+ * @src: physical address of the source data to copy.
+ * @len: number of bytes to copy.
+ * @user_cb: callback function to notify the client when the copy was done.
+ * @user_param: cookie for user_cb.
+ *
+ * Return codes: 0: success
+ *		-EINVAL: invalid params
+ *		-EPERM: operation not permitted as ipa_dma isn't enable or
+ *			initialized
+ *		-gsi_status : on GSI failures
+ *		-EFAULT: descr fifo is full.
+ */
+int ipa3_dma_async_memcpy(u64 dest, u64 src, int len,
+		void (*user_cb)(void *user1), void *user_param)
+{
+	int ep_idx;
+	int res = 0;
+	struct ipa3_dma_xfer_wrapper *xfer_descr = NULL;
+	struct ipa3_sys_context *prod_sys;
+	struct ipa3_sys_context *cons_sys;
+	struct gsi_xfer_elem xfer_elem_cons, xfer_elem_prod;
+	unsigned long flags;
+
+	IPADMA_FUNC_ENTRY();
+	IPADMA_DBG_LOW("dest =  0x%llx, src = 0x%llx, len = %d\n",
+		dest, src, len);
+	if (ipa3_dma_ctx == NULL) {
+		IPADMA_ERR("IPADMA isn't initialized, can't memcpy\n");
+		return -EPERM;
+	}
+	if ((max(src, dest) - min(src, dest)) < len) {
+		IPADMA_ERR("invalid addresses - overlapping buffers\n");
+		return -EINVAL;
+	}
+	if (len > IPA_DMA_MAX_PKT_SZ || len <= 0) {
+		IPADMA_ERR("invalid len, %d\n", len);
+		return	-EINVAL;
+	}
+	if (!user_cb) {
+		IPADMA_ERR("null pointer: user_cb\n");
+		return -EINVAL;
+	}
+	spin_lock_irqsave(&ipa3_dma_ctx->pending_lock, flags);
+	if (!ipa3_dma_ctx->enable_ref_cnt) {
+		IPADMA_ERR("can't memcpy, IPA_DMA isn't enabled\n");
+		spin_unlock_irqrestore(&ipa3_dma_ctx->pending_lock, flags);
+		return -EPERM;
+	}
+	atomic_inc(&ipa3_dma_ctx->async_memcpy_pending_cnt);
+	spin_unlock_irqrestore(&ipa3_dma_ctx->pending_lock, flags);
+
+	ep_idx = ipa3_get_ep_mapping(IPA_CLIENT_MEMCPY_DMA_ASYNC_CONS);
+	if (-1 == ep_idx) {
+		IPADMA_ERR("Client %u is not mapped\n",
+			IPA_CLIENT_MEMCPY_DMA_ASYNC_CONS);
+		return -EFAULT;
+	}
+	cons_sys = ipa3_ctx->ep[ep_idx].sys;
+
+	ep_idx = ipa3_get_ep_mapping(IPA_CLIENT_MEMCPY_DMA_ASYNC_PROD);
+	if (-1 == ep_idx) {
+		IPADMA_ERR("Client %u is not mapped\n",
+			IPA_CLIENT_MEMCPY_DMA_ASYNC_PROD);
+		return -EFAULT;
+	}
+	prod_sys = ipa3_ctx->ep[ep_idx].sys;
+
+	xfer_descr = kmem_cache_zalloc(ipa3_dma_ctx->ipa_dma_xfer_wrapper_cache,
+					GFP_KERNEL);
+	if (!xfer_descr) {
+		res = -ENOMEM;
+		goto fail_mem_alloc;
+	}
+	xfer_descr->phys_addr_dest = dest;
+	xfer_descr->phys_addr_src = src;
+	xfer_descr->len = len;
+	xfer_descr->callback = user_cb;
+	xfer_descr->user1 = user_param;
+
+	spin_lock_irqsave(&ipa3_dma_ctx->async_lock, flags);
+	list_add_tail(&xfer_descr->link, &cons_sys->head_desc_list);
+	cons_sys->len++;
+	/*
+	 * when copy is less than 9B we need to chain another dummy
+	 * copy so the total size will be larger (for ipav3.5)
+	 */
+	if ((ipa_get_hw_type() == IPA_HW_v3_5) && len <
+		IPA_DMA_PREFETCH_WA_THRESHOLD) {
+		xfer_elem_cons.addr = dest;
+		xfer_elem_cons.len = len;
+		xfer_elem_cons.type = GSI_XFER_ELEM_DATA;
+		xfer_elem_cons.flags = GSI_XFER_FLAG_EOT;
+		xfer_elem_cons.xfer_user_data = NULL;
+		res = gsi_queue_xfer(cons_sys->ep->gsi_chan_hdl, 1,
+			&xfer_elem_cons, false);
+		if (res) {
+			IPADMA_ERR(
+				"Failed: gsi_queue_xfer on dest descr res: %d\n",
+				res);
+			goto fail_send;
+		}
+		xfer_elem_cons.addr =
+			ipa3_dma_ctx->ipa_dma_dummy_dst_async.phys_base;
+		xfer_elem_cons.len = IPA_DMA_DUMMY_BUFF_SZ;
+		xfer_elem_cons.type = GSI_XFER_ELEM_DATA;
+		xfer_elem_cons.flags = GSI_XFER_FLAG_EOT;
+		xfer_elem_cons.xfer_user_data = xfer_descr;
+		res = gsi_queue_xfer(cons_sys->ep->gsi_chan_hdl, 1,
+			&xfer_elem_cons, true);
+		if (res) {
+			IPADMA_ERR(
+				"Failed: gsi_queue_xfer on dummy dest descr res: %d\n",
+				res);
+			goto fail_send;
+		}
+
+		xfer_elem_prod.addr = src;
+		xfer_elem_prod.len = len;
+		xfer_elem_prod.type = GSI_XFER_ELEM_DATA;
+		xfer_elem_prod.flags = GSI_XFER_FLAG_CHAIN;
+		xfer_elem_prod.xfer_user_data = NULL;
+		res = gsi_queue_xfer(prod_sys->ep->gsi_chan_hdl, 1,
+			&xfer_elem_prod, false);
+		if (res) {
+			IPADMA_ERR(
+				"Failed: gsi_queue_xfer on src descr res: %d\n",
+				res);
+			ipa_assert();
+			goto fail_send;
+		}
+		xfer_elem_prod.addr =
+			ipa3_dma_ctx->ipa_dma_dummy_src_async.phys_base;
+		xfer_elem_prod.len = IPA_DMA_DUMMY_BUFF_SZ;
+		xfer_elem_prod.type = GSI_XFER_ELEM_DATA;
+		xfer_elem_prod.flags = GSI_XFER_FLAG_EOT;
+		xfer_elem_prod.xfer_user_data = NULL;
+		res = gsi_queue_xfer(prod_sys->ep->gsi_chan_hdl, 1,
+			&xfer_elem_prod, true);
+		if (res) {
+			IPADMA_ERR(
+				"Failed: gsi_queue_xfer on dummy src descr res: %d\n",
+				res);
+			ipa_assert();
+			goto fail_send;
+		}
+	} else {
+
+		xfer_elem_cons.addr = dest;
+		xfer_elem_cons.len = len;
+		xfer_elem_cons.type = GSI_XFER_ELEM_DATA;
+		xfer_elem_cons.flags = GSI_XFER_FLAG_EOT;
+		xfer_elem_cons.xfer_user_data = xfer_descr;
+		res = gsi_queue_xfer(cons_sys->ep->gsi_chan_hdl, 1,
+			&xfer_elem_cons, true);
+		if (res) {
+			IPADMA_ERR(
+					"Failed: gsi_queue_xfer on dummy dest descr res: %d\n",
+				res);
+				ipa_assert();
+			goto fail_send;
+		}
+		xfer_elem_prod.addr = src;
+		xfer_elem_prod.len = len;
+		xfer_elem_prod.type = GSI_XFER_ELEM_DATA;
+		xfer_elem_prod.flags = GSI_XFER_FLAG_EOT;
+		xfer_elem_prod.xfer_user_data = NULL;
+		res = gsi_queue_xfer(prod_sys->ep->gsi_chan_hdl, 1,
+			&xfer_elem_prod, true);
+		if (res) {
+			IPADMA_ERR(
+					"Failed: gsi_queue_xfer on dummy src descr res: %d\n",
+				res);
+			ipa_assert();
+			goto fail_send;
+		}
+
+	}
+	spin_unlock_irqrestore(&ipa3_dma_ctx->async_lock, flags);
+	IPADMA_FUNC_EXIT();
+	return res;
+
+fail_send:
+	list_del(&xfer_descr->link);
+	spin_unlock_irqrestore(&ipa3_dma_ctx->async_lock, flags);
+	kmem_cache_free(ipa3_dma_ctx->ipa_dma_xfer_wrapper_cache, xfer_descr);
+fail_mem_alloc:
+	atomic_dec(&ipa3_dma_ctx->async_memcpy_pending_cnt);
+	if (ipa3_dma_ctx->destroy_pending && !ipa3_dma_work_pending())
+		complete(&ipa3_dma_ctx->done);
+	return res;
+}
+
+/**
+ * ipa3_dma_uc_memcpy() - Perform a memcpy action using IPA uC
+ * @dest: physical address to store the copied data.
+ * @src: physical address of the source data to copy.
+ * @len: number of bytes to copy.
+ *
+ * Return codes: 0: success
+ *		-EINVAL: invalid params
+ *		-EPERM: operation not permitted as ipa_dma isn't enable or
+ *			initialized
+ *		-EBADF: IPA uC is not loaded
+ */
+int ipa3_dma_uc_memcpy(phys_addr_t dest, phys_addr_t src, int len)
+{
+	int res;
+	unsigned long flags;
+
+	IPADMA_FUNC_ENTRY();
+	if (ipa3_dma_ctx == NULL) {
+		IPADMA_ERR("IPADMA isn't initialized, can't memcpy\n");
+		return -EPERM;
+	}
+	if ((max(src, dest) - min(src, dest)) < len) {
+		IPADMA_ERR("invalid addresses - overlapping buffers\n");
+		return -EINVAL;
+	}
+	if (len > IPA_DMA_MAX_PKT_SZ || len <= 0) {
+		IPADMA_ERR("invalid len, %d\n", len);
+		return	-EINVAL;
+	}
+
+	spin_lock_irqsave(&ipa3_dma_ctx->pending_lock, flags);
+	if (!ipa3_dma_ctx->enable_ref_cnt) {
+		IPADMA_ERR("can't memcpy, IPADMA isn't enabled\n");
+		spin_unlock_irqrestore(&ipa3_dma_ctx->pending_lock, flags);
+		return -EPERM;
+	}
+	atomic_inc(&ipa3_dma_ctx->uc_memcpy_pending_cnt);
+	spin_unlock_irqrestore(&ipa3_dma_ctx->pending_lock, flags);
+
+	res = ipa3_uc_memcpy(dest, src, len);
+	if (res) {
+		IPADMA_ERR("ipa3_uc_memcpy failed %d\n", res);
+		goto dec_and_exit;
+	}
+
+	atomic_inc(&ipa3_dma_ctx->total_uc_memcpy);
+	res = 0;
+dec_and_exit:
+	atomic_dec(&ipa3_dma_ctx->uc_memcpy_pending_cnt);
+	if (ipa3_dma_ctx->destroy_pending && !ipa3_dma_work_pending())
+		complete(&ipa3_dma_ctx->done);
+	IPADMA_FUNC_EXIT();
+	return res;
+}
+
+/**
+ * ipa3_dma_destroy() -teardown IPADMA pipes and release ipadma.
+ *
+ * this is a blocking function, returns just after destroying IPADMA.
+ */
+void ipa3_dma_destroy(void)
+{
+	int res = 0;
+
+	IPADMA_FUNC_ENTRY();
+
+	if (!ipa3_dma_init_refcnt_ctrl) {
+		IPADMA_ERR("Setup isn't done\n");
+		return;
+	}
+
+	mutex_lock(&ipa3_dma_init_refcnt_ctrl->lock);
+	if (ipa3_dma_init_refcnt_ctrl->ref_cnt > 1) {
+		IPADMA_DBG("Multiple initialization done. refcnt=%d\n",
+			ipa3_dma_init_refcnt_ctrl->ref_cnt);
+		ipa3_dma_init_refcnt_ctrl->ref_cnt--;
+		goto completed;
+	}
+
+	if ((!ipa3_dma_ctx) || (ipa3_dma_init_refcnt_ctrl->ref_cnt == 0)) {
+		IPADMA_ERR("IPADMA isn't initialized ctx=%pK\n", ipa3_dma_ctx);
+		goto completed;
+	}
+
+	if (ipa3_dma_work_pending()) {
+		ipa3_dma_ctx->destroy_pending = true;
+		IPADMA_DBG("There are pending memcpy, wait for completion\n");
+		wait_for_completion(&ipa3_dma_ctx->done);
+	}
+
+	if (ipa3_dma_ctx->enable_ref_cnt > 0) {
+		IPADMA_ERR("IPADMA still enabled\n");
+		goto completed;
+	}
+
+	res = ipa3_teardown_sys_pipe(ipa3_dma_ctx->ipa_dma_async_cons_hdl);
+	if (res)
+		IPADMA_ERR("teardown IPADMA ASYNC CONS failed\n");
+	ipa3_dma_ctx->ipa_dma_async_cons_hdl = 0;
+	res = ipa3_teardown_sys_pipe(ipa3_dma_ctx->ipa_dma_sync_cons_hdl);
+	if (res)
+		IPADMA_ERR("teardown IPADMA SYNC CONS failed\n");
+	ipa3_dma_ctx->ipa_dma_sync_cons_hdl = 0;
+	res = ipa3_teardown_sys_pipe(ipa3_dma_ctx->ipa_dma_async_prod_hdl);
+	if (res)
+		IPADMA_ERR("teardown IPADMA ASYNC PROD failed\n");
+	ipa3_dma_ctx->ipa_dma_async_prod_hdl = 0;
+	res = ipa3_teardown_sys_pipe(ipa3_dma_ctx->ipa_dma_sync_prod_hdl);
+	if (res)
+		IPADMA_ERR("teardown IPADMA SYNC PROD failed\n");
+	ipa3_dma_ctx->ipa_dma_sync_prod_hdl = 0;
+
+	ipa3_dma_debugfs_destroy();
+	kmem_cache_destroy(ipa3_dma_ctx->ipa_dma_xfer_wrapper_cache);
+	dma_free_coherent(ipa3_ctx->pdev, IPA_DMA_DUMMY_BUFF_SZ * 4,
+		ipa3_dma_ctx->ipa_dma_dummy_src_sync.base,
+		ipa3_dma_ctx->ipa_dma_dummy_src_sync.phys_base);
+	kfree(ipa3_dma_ctx);
+	ipa3_dma_ctx = NULL;
+
+	ipa3_dma_init_refcnt_ctrl->ref_cnt = 0;
+	IPADMA_FUNC_EXIT();
+
+completed:
+	mutex_unlock(&ipa3_dma_init_refcnt_ctrl->lock);
+}
+
+/**
+ * ipa3_dma_async_memcpy_notify_cb() - Callback function which will be called
+ * by IPA driver after getting notify on Rx operation is completed (data was
+ * written to dest descriptor on async_cons ep).
+ *
+ * @priv -not in use.
+ * @evt - event name - IPA_RECIVE.
+ * @data -the ipa_mem_buffer.
+ */
+void ipa3_dma_async_memcpy_notify_cb(void *priv
+			, enum ipa_dp_evt_type evt, unsigned long data)
+{
+	int ep_idx = 0;
+	struct ipa3_dma_xfer_wrapper *xfer_descr_expected;
+	struct ipa3_sys_context *sys;
+	unsigned long flags;
+
+	IPADMA_FUNC_ENTRY();
+
+	ep_idx = ipa3_get_ep_mapping(IPA_CLIENT_MEMCPY_DMA_ASYNC_CONS);
+	if (ep_idx < 0) {
+		IPADMA_ERR("IPA Client mapping failed\n");
+		return;
+	}
+	sys = ipa3_ctx->ep[ep_idx].sys;
+
+	spin_lock_irqsave(&ipa3_dma_ctx->async_lock, flags);
+	xfer_descr_expected = list_first_entry(&sys->head_desc_list,
+				 struct ipa3_dma_xfer_wrapper, link);
+	list_del(&xfer_descr_expected->link);
+	sys->len--;
+	spin_unlock_irqrestore(&ipa3_dma_ctx->async_lock, flags);
+	atomic_inc(&ipa3_dma_ctx->total_async_memcpy);
+	atomic_dec(&ipa3_dma_ctx->async_memcpy_pending_cnt);
+	xfer_descr_expected->callback(xfer_descr_expected->user1);
+
+	kmem_cache_free(ipa3_dma_ctx->ipa_dma_xfer_wrapper_cache,
+		xfer_descr_expected);
+
+	if (ipa3_dma_ctx->destroy_pending && !ipa3_dma_work_pending())
+		complete(&ipa3_dma_ctx->done);
+
+	IPADMA_FUNC_EXIT();
+}
+
+#ifdef CONFIG_DEBUG_FS
+static struct dentry *dent;
+static struct dentry *dfile_info;
+
+static ssize_t ipa3_dma_debugfs_read(struct file *file, char __user *ubuf,
+				 size_t count, loff_t *ppos)
+{
+	int nbytes = 0;
+
+	if (!ipa3_dma_init_refcnt_ctrl) {
+		nbytes += scnprintf(&dbg_buff[nbytes],
+			IPADMA_MAX_MSG_LEN - nbytes,
+			"Setup was not done\n");
+		goto completed;
+
+	}
+
+	if (!ipa3_dma_ctx) {
+		nbytes += scnprintf(&dbg_buff[nbytes],
+			IPADMA_MAX_MSG_LEN - nbytes,
+			"Status:\n	Not initialized (ref_cnt=%d)\n",
+			ipa3_dma_init_refcnt_ctrl->ref_cnt);
+	} else {
+		nbytes += scnprintf(&dbg_buff[nbytes],
+			IPADMA_MAX_MSG_LEN - nbytes,
+			"Status:\n	Initialized (ref_cnt=%d)\n",
+			ipa3_dma_init_refcnt_ctrl->ref_cnt);
+		nbytes += scnprintf(&dbg_buff[nbytes],
+			IPADMA_MAX_MSG_LEN - nbytes,
+			"	%s (ref_cnt=%d)\n",
+			(ipa3_dma_ctx->enable_ref_cnt > 0) ?
+			"Enabled" : "Disabled",
+			ipa3_dma_ctx->enable_ref_cnt);
+		nbytes += scnprintf(&dbg_buff[nbytes],
+			IPADMA_MAX_MSG_LEN - nbytes,
+			"Statistics:\n	total sync memcpy: %d\n	",
+			atomic_read(&ipa3_dma_ctx->total_sync_memcpy));
+		nbytes += scnprintf(&dbg_buff[nbytes],
+			IPADMA_MAX_MSG_LEN - nbytes,
+			"total async memcpy: %d\n	",
+			atomic_read(&ipa3_dma_ctx->total_async_memcpy));
+		nbytes += scnprintf(&dbg_buff[nbytes],
+			IPADMA_MAX_MSG_LEN - nbytes,
+			"total uc memcpy: %d\n	",
+			atomic_read(&ipa3_dma_ctx->total_uc_memcpy));
+		nbytes += scnprintf(&dbg_buff[nbytes],
+			IPADMA_MAX_MSG_LEN - nbytes,
+			"pending sync memcpy jobs: %d\n	",
+			atomic_read(&ipa3_dma_ctx->sync_memcpy_pending_cnt));
+		nbytes += scnprintf(&dbg_buff[nbytes],
+			IPADMA_MAX_MSG_LEN - nbytes,
+			"pending async memcpy jobs: %d\n	",
+			atomic_read(&ipa3_dma_ctx->async_memcpy_pending_cnt));
+		nbytes += scnprintf(&dbg_buff[nbytes],
+			IPADMA_MAX_MSG_LEN - nbytes,
+			"pending uc memcpy jobs: %d\n",
+			atomic_read(&ipa3_dma_ctx->uc_memcpy_pending_cnt));
+	}
+
+completed:
+	return simple_read_from_buffer(ubuf, count, ppos, dbg_buff, nbytes);
+}
+
+static ssize_t ipa3_dma_debugfs_reset_statistics(struct file *file,
+					const char __user *ubuf,
+					size_t count,
+					loff_t *ppos)
+{
+	s8 in_num = 0;
+	int ret;
+
+	ret = kstrtos8_from_user(ubuf, count, 0, &in_num);
+	if (ret)
+		return ret;
+
+	switch (in_num) {
+	case 0:
+		if (ipa3_dma_work_pending())
+			IPADMA_ERR("Note, there are pending memcpy\n");
+
+		atomic_set(&ipa3_dma_ctx->total_async_memcpy, 0);
+		atomic_set(&ipa3_dma_ctx->total_sync_memcpy, 0);
+		break;
+	default:
+		IPADMA_ERR("invalid argument: To reset statistics echo 0\n");
+		break;
+	}
+	return count;
+}
+
+const struct file_operations ipa3_ipadma_stats_ops = {
+	.read = ipa3_dma_debugfs_read,
+	.write = ipa3_dma_debugfs_reset_statistics,
+};
+
+static void ipa3_dma_debugfs_init(void)
+{
+	const mode_t read_write_mode = 0666;
+
+	dent = debugfs_create_dir("ipa_dma", 0);
+	if (IS_ERR(dent)) {
+		IPADMA_ERR("fail to create folder ipa_dma\n");
+		return;
+	}
+
+	dfile_info =
+		debugfs_create_file("info", read_write_mode, dent,
+				 0, &ipa3_ipadma_stats_ops);
+	if (!dfile_info || IS_ERR(dfile_info)) {
+		IPADMA_ERR("fail to create file stats\n");
+		goto fail;
+	}
+	return;
+fail:
+	debugfs_remove_recursive(dent);
+}
+
+static void ipa3_dma_debugfs_destroy(void)
+{
+	debugfs_remove_recursive(dent);
+}
+
+#endif /* !CONFIG_DEBUG_FS */

+ 5140 - 0
ipa/ipa_v3/ipa_dp.c

@@ -0,0 +1,5140 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2012-2020, The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/dmapool.h>
+#include <linux/list.h>
+#include <linux/netdevice.h>
+#include <linux/msm_gsi.h>
+#include <net/sock.h>
+#include "ipa_i.h"
+#include "ipa_trace.h"
+#include "ipahal/ipahal.h"
+#include "ipahal/ipahal_fltrt.h"
+
+#define IPA_WAN_AGGR_PKT_CNT 5
+#define IPA_WAN_NAPI_MAX_FRAMES (NAPI_WEIGHT / IPA_WAN_AGGR_PKT_CNT)
+#define IPA_WAN_PAGE_ORDER 3
+#define IPA_LAN_AGGR_PKT_CNT 5
+#define IPA_LAN_NAPI_MAX_FRAMES (NAPI_WEIGHT / IPA_LAN_AGGR_PKT_CNT)
+#define IPA_LAST_DESC_CNT 0xFFFF
+#define POLLING_INACTIVITY_RX 40
+#define POLLING_MIN_SLEEP_RX 1010
+#define POLLING_MAX_SLEEP_RX 1050
+#define POLLING_INACTIVITY_TX 40
+#define POLLING_MIN_SLEEP_TX 400
+#define POLLING_MAX_SLEEP_TX 500
+#define SUSPEND_MIN_SLEEP_RX 1000
+#define SUSPEND_MAX_SLEEP_RX 1005
+/* 8K less 1 nominal MTU (1500 bytes) rounded to units of KB */
+#define IPA_MTU 1500
+#define IPA_GENERIC_AGGR_BYTE_LIMIT 6
+#define IPA_GENERIC_AGGR_TIME_LIMIT 500 /* 0.5msec */
+#define IPA_GENERIC_AGGR_PKT_LIMIT 0
+
+#define IPA_GSB_AGGR_BYTE_LIMIT 14
+#define IPA_GSB_RX_BUFF_BASE_SZ 16384
+
+#define IPA_GENERIC_RX_BUFF_BASE_SZ 8192
+#define IPA_REAL_GENERIC_RX_BUFF_SZ(X) (SKB_DATA_ALIGN(\
+		(X) + NET_SKB_PAD) +\
+		SKB_DATA_ALIGN(sizeof(struct skb_shared_info)))
+#define IPA_GENERIC_RX_BUFF_SZ(X) ((X) -\
+		(IPA_REAL_GENERIC_RX_BUFF_SZ(X) - (X)))
+#define IPA_GENERIC_RX_BUFF_LIMIT (\
+		IPA_REAL_GENERIC_RX_BUFF_SZ(\
+		IPA_GENERIC_RX_BUFF_BASE_SZ) -\
+		IPA_GENERIC_RX_BUFF_BASE_SZ)
+
+/* less 1 nominal MTU (1500 bytes) rounded to units of KB */
+#define IPA_ADJUST_AGGR_BYTE_LIMIT(X) (((X) - IPA_MTU)/1000)
+
+#define IPA_RX_BUFF_CLIENT_HEADROOM 256
+
+#define IPA_WLAN_RX_POOL_SZ 100
+#define IPA_WLAN_RX_POOL_SZ_LOW_WM 5
+#define IPA_WLAN_RX_BUFF_SZ 2048
+#define IPA_WLAN_COMM_RX_POOL_LOW 100
+#define IPA_WLAN_COMM_RX_POOL_HIGH 900
+
+#define IPA_ODU_RX_BUFF_SZ 2048
+#define IPA_ODU_RX_POOL_SZ 64
+
+#define IPA_ODL_RX_BUFF_SZ (16 * 1024)
+
+#define IPA_GSI_MAX_CH_LOW_WEIGHT 15
+#define IPA_GSI_EVT_RING_INT_MODT (16) /* 0.5ms under 32KHz clock */
+#define IPA_GSI_EVT_RING_INT_MODC (20)
+
+#define IPA_GSI_CH_20_WA_NUM_CH_TO_ALLOC 10
+/* The below virtual channel cannot be used by any entity */
+#define IPA_GSI_CH_20_WA_VIRT_CHAN 29
+
+#define IPA_DEFAULT_SYS_YELLOW_WM 32
+#define IPA_REPL_XFER_THRESH 20
+#define IPA_REPL_XFER_MAX 36
+
+#define IPA_TX_SEND_COMPL_NOP_DELAY_NS (2 * 1000 * 1000)
+
+#define IPA_APPS_BW_FOR_PM 700
+
+#define IPA_SEND_MAX_DESC (20)
+
+#define IPA_EOT_THRESH 32
+
+#define IPA_QMAP_ID_BYTE 0
+
+static struct sk_buff *ipa3_get_skb_ipa_rx(unsigned int len, gfp_t flags);
+static void ipa3_replenish_wlan_rx_cache(struct ipa3_sys_context *sys);
+static void ipa3_replenish_rx_cache(struct ipa3_sys_context *sys);
+static void ipa3_replenish_rx_work_func(struct work_struct *work);
+static void ipa3_fast_replenish_rx_cache(struct ipa3_sys_context *sys);
+static void ipa3_replenish_rx_page_cache(struct ipa3_sys_context *sys);
+static void ipa3_wq_page_repl(struct work_struct *work);
+static void ipa3_replenish_rx_page_recycle(struct ipa3_sys_context *sys);
+static struct ipa3_rx_pkt_wrapper *ipa3_alloc_rx_pkt_page(gfp_t flag,
+	bool is_tmp_alloc);
+static void ipa3_wq_handle_rx(struct work_struct *work);
+static void ipa3_wq_rx_common(struct ipa3_sys_context *sys,
+	struct gsi_chan_xfer_notify *notify);
+static void ipa3_rx_napi_chain(struct ipa3_sys_context *sys,
+		struct gsi_chan_xfer_notify *notify, uint32_t num);
+static void ipa3_wlan_wq_rx_common(struct ipa3_sys_context *sys,
+				struct gsi_chan_xfer_notify *notify);
+static int ipa3_assign_policy(struct ipa_sys_connect_params *in,
+		struct ipa3_sys_context *sys);
+static void ipa3_cleanup_rx(struct ipa3_sys_context *sys);
+static void ipa3_wq_rx_avail(struct work_struct *work);
+static void ipa3_alloc_wlan_rx_common_cache(u32 size);
+static void ipa3_cleanup_wlan_rx_common_cache(void);
+static void ipa3_wq_repl_rx(struct work_struct *work);
+static void ipa3_dma_memcpy_notify(struct ipa3_sys_context *sys);
+static int ipa_gsi_setup_coal_def_channel(struct ipa_sys_connect_params *in,
+	struct ipa3_ep_context *ep, struct ipa3_ep_context *coal_ep);
+static int ipa_gsi_setup_channel(struct ipa_sys_connect_params *in,
+	struct ipa3_ep_context *ep);
+static int ipa_gsi_setup_event_ring(struct ipa3_ep_context *ep,
+	u32 ring_size, gfp_t mem_flag);
+static int ipa_gsi_setup_transfer_ring(struct ipa3_ep_context *ep,
+	u32 ring_size, struct ipa3_sys_context *user_data, gfp_t mem_flag);
+static int ipa3_teardown_coal_def_pipe(u32 clnt_hdl);
+static int ipa_populate_tag_field(struct ipa3_desc *desc,
+		struct ipa3_tx_pkt_wrapper *tx_pkt,
+		struct ipahal_imm_cmd_pyld **tag_pyld_ret);
+static int ipa_poll_gsi_pkt(struct ipa3_sys_context *sys,
+	struct gsi_chan_xfer_notify *notify);
+static int ipa_poll_gsi_n_pkt(struct ipa3_sys_context *sys,
+	struct gsi_chan_xfer_notify *notify, int expected_num,
+	int *actual_num);
+static unsigned long tag_to_pointer_wa(uint64_t tag);
+static uint64_t pointer_to_tag_wa(struct ipa3_tx_pkt_wrapper *tx_pkt);
+
+static u32 ipa_adjust_ra_buff_base_sz(u32 aggr_byte_limit);
+
+static void ipa3_wq_write_done_common(struct ipa3_sys_context *sys,
+				struct ipa3_tx_pkt_wrapper *tx_pkt)
+{
+	struct ipa3_tx_pkt_wrapper *next_pkt;
+	int i, cnt;
+
+	if (unlikely(tx_pkt == NULL)) {
+		IPAERR("tx_pkt is NULL\n");
+		return;
+	}
+
+	cnt = tx_pkt->cnt;
+	IPADBG_LOW("cnt: %d\n", cnt);
+	for (i = 0; i < cnt; i++) {
+		spin_lock_bh(&sys->spinlock);
+		if (unlikely(list_empty(&sys->head_desc_list))) {
+			spin_unlock_bh(&sys->spinlock);
+			return;
+		}
+		next_pkt = list_next_entry(tx_pkt, link);
+		list_del(&tx_pkt->link);
+		sys->len--;
+		spin_unlock_bh(&sys->spinlock);
+		if (!tx_pkt->no_unmap_dma) {
+			if (tx_pkt->type != IPA_DATA_DESC_SKB_PAGED) {
+				dma_unmap_single(ipa3_ctx->pdev,
+					tx_pkt->mem.phys_base,
+					tx_pkt->mem.size,
+					DMA_TO_DEVICE);
+			} else {
+				dma_unmap_page(ipa3_ctx->pdev,
+					tx_pkt->mem.phys_base,
+					tx_pkt->mem.size,
+					DMA_TO_DEVICE);
+			}
+		}
+		if (tx_pkt->callback)
+			tx_pkt->callback(tx_pkt->user1, tx_pkt->user2);
+
+		kmem_cache_free(ipa3_ctx->tx_pkt_wrapper_cache, tx_pkt);
+		tx_pkt = next_pkt;
+	}
+}
+
+static void ipa3_wq_write_done_status(int src_pipe,
+			struct ipa3_tx_pkt_wrapper *tx_pkt)
+{
+	struct ipa3_sys_context *sys;
+
+	WARN_ON(src_pipe >= ipa3_ctx->ipa_num_pipes);
+
+	if (!ipa3_ctx->ep[src_pipe].status.status_en)
+		return;
+
+	sys = ipa3_ctx->ep[src_pipe].sys;
+	if (!sys)
+		return;
+
+	ipa3_wq_write_done_common(sys, tx_pkt);
+}
+
+/**
+ * ipa_write_done() - this function will be (eventually) called when a Tx
+ * operation is complete
+ * @data: user pointer point to the ipa3_sys_context
+ *
+ * Will be called in deferred context.
+ * - invoke the callback supplied by the client who sent this command
+ * - iterate over all packets and validate that
+ *   the order for sent packet is the same as expected
+ * - delete all the tx packet descriptors from the system
+ *   pipe context (not needed anymore)
+ */
+static void ipa3_tasklet_write_done(unsigned long data)
+{
+	struct ipa3_sys_context *sys;
+	struct ipa3_tx_pkt_wrapper *this_pkt;
+	bool xmit_done = false;
+
+	sys = (struct ipa3_sys_context *)data;
+	spin_lock_bh(&sys->spinlock);
+	while (atomic_add_unless(&sys->xmit_eot_cnt, -1, 0)) {
+		while (!list_empty(&sys->head_desc_list)) {
+			this_pkt = list_first_entry(&sys->head_desc_list,
+				struct ipa3_tx_pkt_wrapper, link);
+			xmit_done = this_pkt->xmit_done;
+			spin_unlock_bh(&sys->spinlock);
+			ipa3_wq_write_done_common(sys, this_pkt);
+			spin_lock_bh(&sys->spinlock);
+			if (xmit_done)
+				break;
+		}
+	}
+	spin_unlock_bh(&sys->spinlock);
+}
+
+
+static void ipa3_send_nop_desc(struct work_struct *work)
+{
+	struct ipa3_sys_context *sys = container_of(work,
+		struct ipa3_sys_context, work);
+	struct gsi_xfer_elem nop_xfer;
+	struct ipa3_tx_pkt_wrapper *tx_pkt;
+
+	IPADBG_LOW("gsi send NOP for ch: %lu\n", sys->ep->gsi_chan_hdl);
+
+	if (atomic_read(&sys->workqueue_flushed))
+		return;
+
+	tx_pkt = kmem_cache_zalloc(ipa3_ctx->tx_pkt_wrapper_cache, GFP_KERNEL);
+	if (!tx_pkt) {
+		queue_work(sys->wq, &sys->work);
+		return;
+	}
+
+	INIT_LIST_HEAD(&tx_pkt->link);
+	tx_pkt->cnt = 1;
+	tx_pkt->no_unmap_dma = true;
+	tx_pkt->sys = sys;
+	spin_lock_bh(&sys->spinlock);
+	if (unlikely(!sys->nop_pending)) {
+		spin_unlock_bh(&sys->spinlock);
+		kmem_cache_free(ipa3_ctx->tx_pkt_wrapper_cache, tx_pkt);
+		return;
+	}
+	list_add_tail(&tx_pkt->link, &sys->head_desc_list);
+	sys->nop_pending = false;
+
+	memset(&nop_xfer, 0, sizeof(nop_xfer));
+	nop_xfer.type = GSI_XFER_ELEM_NOP;
+	nop_xfer.flags = GSI_XFER_FLAG_EOT;
+	nop_xfer.xfer_user_data = tx_pkt;
+	if (gsi_queue_xfer(sys->ep->gsi_chan_hdl, 1, &nop_xfer, true)) {
+		spin_unlock_bh(&sys->spinlock);
+		IPAERR("gsi_queue_xfer for ch:%lu failed\n",
+			sys->ep->gsi_chan_hdl);
+		queue_work(sys->wq, &sys->work);
+		return;
+	}
+	spin_unlock_bh(&sys->spinlock);
+
+	/* make sure TAG process is sent before clocks are gated */
+	ipa3_ctx->tag_process_before_gating = true;
+
+}
+
+
+/**
+ * ipa3_send() - Send multiple descriptors in one HW transaction
+ * @sys: system pipe context
+ * @num_desc: number of packets
+ * @desc: packets to send (may be immediate command or data)
+ * @in_atomic:  whether caller is in atomic context
+ *
+ * This function is used for GPI connection.
+ * - ipa3_tx_pkt_wrapper will be used for each ipa
+ *   descriptor (allocated from wrappers cache)
+ * - The wrapper struct will be configured for each ipa-desc payload and will
+ *   contain information which will be later used by the user callbacks
+ * - Each packet (command or data) that will be sent will also be saved in
+ *   ipa3_sys_context for later check that all data was sent
+ *
+ * Return codes: 0: success, -EFAULT: failure
+ */
+int ipa3_send(struct ipa3_sys_context *sys,
+		u32 num_desc,
+		struct ipa3_desc *desc,
+		bool in_atomic)
+{
+	struct ipa3_tx_pkt_wrapper *tx_pkt, *tx_pkt_first = NULL;
+	struct ipahal_imm_cmd_pyld *tag_pyld_ret = NULL;
+	struct ipa3_tx_pkt_wrapper *next_pkt;
+	struct gsi_xfer_elem gsi_xfer[IPA_SEND_MAX_DESC];
+	int i = 0;
+	int j;
+	int result;
+	u32 mem_flag = GFP_ATOMIC;
+	const struct ipa_gsi_ep_config *gsi_ep_cfg;
+	bool send_nop = false;
+	unsigned int max_desc;
+
+	if (unlikely(!in_atomic))
+		mem_flag = GFP_KERNEL;
+
+	gsi_ep_cfg = ipa3_get_gsi_ep_info(sys->ep->client);
+	if (unlikely(!gsi_ep_cfg)) {
+		IPAERR("failed to get gsi EP config for client=%d\n",
+			sys->ep->client);
+		return -EFAULT;
+	}
+	if (unlikely(num_desc > IPA_SEND_MAX_DESC)) {
+		IPAERR("max descriptors reached need=%d max=%d\n",
+			num_desc, IPA_SEND_MAX_DESC);
+		WARN_ON(1);
+		return -EPERM;
+	}
+
+	max_desc = gsi_ep_cfg->ipa_if_tlv;
+	if (gsi_ep_cfg->prefetch_mode == GSI_SMART_PRE_FETCH ||
+		gsi_ep_cfg->prefetch_mode == GSI_FREE_PRE_FETCH)
+		max_desc -= gsi_ep_cfg->prefetch_threshold;
+
+	if (unlikely(num_desc > max_desc)) {
+		IPAERR("Too many chained descriptors need=%d max=%d\n",
+			num_desc, max_desc);
+		WARN_ON(1);
+		return -EPERM;
+	}
+
+	/* initialize only the xfers we use */
+	memset(gsi_xfer, 0, sizeof(gsi_xfer[0]) * num_desc);
+
+	spin_lock_bh(&sys->spinlock);
+
+	for (i = 0; i < num_desc; i++) {
+		tx_pkt = kmem_cache_zalloc(ipa3_ctx->tx_pkt_wrapper_cache,
+					   GFP_ATOMIC);
+		if (!tx_pkt) {
+			IPAERR("failed to alloc tx wrapper\n");
+			result = -ENOMEM;
+			goto failure;
+		}
+		INIT_LIST_HEAD(&tx_pkt->link);
+
+		if (i == 0) {
+			tx_pkt_first = tx_pkt;
+			tx_pkt->cnt = num_desc;
+		}
+
+		/* populate tag field */
+		if (desc[i].is_tag_status) {
+			if (ipa_populate_tag_field(&desc[i], tx_pkt,
+				&tag_pyld_ret)) {
+				IPAERR("Failed to populate tag field\n");
+				result = -EFAULT;
+				goto failure_dma_map;
+			}
+		}
+
+		tx_pkt->type = desc[i].type;
+
+		if (desc[i].type != IPA_DATA_DESC_SKB_PAGED) {
+			tx_pkt->mem.base = desc[i].pyld;
+			tx_pkt->mem.size = desc[i].len;
+
+			if (!desc[i].dma_address_valid) {
+				tx_pkt->mem.phys_base =
+					dma_map_single(ipa3_ctx->pdev,
+					tx_pkt->mem.base,
+					tx_pkt->mem.size,
+					DMA_TO_DEVICE);
+			} else {
+				tx_pkt->mem.phys_base =
+					desc[i].dma_address;
+				tx_pkt->no_unmap_dma = true;
+			}
+		} else {
+			tx_pkt->mem.base = desc[i].frag;
+			tx_pkt->mem.size = desc[i].len;
+
+			if (!desc[i].dma_address_valid) {
+				tx_pkt->mem.phys_base =
+					skb_frag_dma_map(ipa3_ctx->pdev,
+					desc[i].frag,
+					0, tx_pkt->mem.size,
+					DMA_TO_DEVICE);
+			} else {
+				tx_pkt->mem.phys_base =
+					desc[i].dma_address;
+				tx_pkt->no_unmap_dma = true;
+			}
+		}
+		if (dma_mapping_error(ipa3_ctx->pdev, tx_pkt->mem.phys_base)) {
+			IPAERR("failed to do dma map.\n");
+			result = -EFAULT;
+			goto failure_dma_map;
+		}
+
+		tx_pkt->sys = sys;
+		tx_pkt->callback = desc[i].callback;
+		tx_pkt->user1 = desc[i].user1;
+		tx_pkt->user2 = desc[i].user2;
+		tx_pkt->xmit_done = false;
+
+		list_add_tail(&tx_pkt->link, &sys->head_desc_list);
+
+		gsi_xfer[i].addr = tx_pkt->mem.phys_base;
+
+		/*
+		 * Special treatment for immediate commands, where
+		 * the structure of the descriptor is different
+		 */
+		if (desc[i].type == IPA_IMM_CMD_DESC) {
+			gsi_xfer[i].len = desc[i].opcode;
+			gsi_xfer[i].type =
+				GSI_XFER_ELEM_IMME_CMD;
+		} else {
+			gsi_xfer[i].len = desc[i].len;
+			gsi_xfer[i].type =
+				GSI_XFER_ELEM_DATA;
+		}
+
+		if (i == (num_desc - 1)) {
+			if (!sys->use_comm_evt_ring ||
+			    (sys->pkt_sent % IPA_EOT_THRESH == 0)) {
+				gsi_xfer[i].flags |=
+					GSI_XFER_FLAG_EOT;
+				gsi_xfer[i].flags |=
+					GSI_XFER_FLAG_BEI;
+			} else {
+				send_nop = true;
+			}
+			gsi_xfer[i].xfer_user_data =
+				tx_pkt_first;
+		} else {
+			gsi_xfer[i].flags |=
+				GSI_XFER_FLAG_CHAIN;
+		}
+	}
+
+	IPADBG_LOW("ch:%lu queue xfer\n", sys->ep->gsi_chan_hdl);
+	result = gsi_queue_xfer(sys->ep->gsi_chan_hdl, num_desc,
+			gsi_xfer, true);
+	if (result != GSI_STATUS_SUCCESS) {
+		IPAERR_RL("GSI xfer failed.\n");
+		result = -EFAULT;
+		goto failure;
+	}
+
+	if (send_nop && !sys->nop_pending)
+		sys->nop_pending = true;
+	else
+		send_nop = false;
+
+	sys->pkt_sent++;
+	spin_unlock_bh(&sys->spinlock);
+
+	/* set the timer for sending the NOP descriptor */
+	if (send_nop) {
+
+		ktime_t time = ktime_set(0, IPA_TX_SEND_COMPL_NOP_DELAY_NS);
+
+		IPADBG_LOW("scheduling timer for ch %lu\n",
+			sys->ep->gsi_chan_hdl);
+		hrtimer_start(&sys->db_timer, time, HRTIMER_MODE_REL);
+	}
+
+	/* make sure TAG process is sent before clocks are gated */
+	ipa3_ctx->tag_process_before_gating = true;
+
+	return 0;
+
+failure_dma_map:
+		kmem_cache_free(ipa3_ctx->tx_pkt_wrapper_cache, tx_pkt);
+
+failure:
+	ipahal_destroy_imm_cmd(tag_pyld_ret);
+	tx_pkt = tx_pkt_first;
+	for (j = 0; j < i; j++) {
+		next_pkt = list_next_entry(tx_pkt, link);
+		list_del(&tx_pkt->link);
+
+		if (!tx_pkt->no_unmap_dma) {
+			if (desc[j].type != IPA_DATA_DESC_SKB_PAGED) {
+				dma_unmap_single(ipa3_ctx->pdev,
+					tx_pkt->mem.phys_base,
+					tx_pkt->mem.size, DMA_TO_DEVICE);
+			} else {
+				dma_unmap_page(ipa3_ctx->pdev,
+					tx_pkt->mem.phys_base,
+					tx_pkt->mem.size,
+					DMA_TO_DEVICE);
+			}
+		}
+		kmem_cache_free(ipa3_ctx->tx_pkt_wrapper_cache, tx_pkt);
+		tx_pkt = next_pkt;
+	}
+
+	spin_unlock_bh(&sys->spinlock);
+	return result;
+}
+
+/**
+ * ipa3_send_one() - Send a single descriptor
+ * @sys:	system pipe context
+ * @desc:	descriptor to send
+ * @in_atomic:  whether caller is in atomic context
+ *
+ * - Allocate tx_packet wrapper
+ * - transfer data to the IPA
+ * - after the transfer was done the SPS will
+ *   notify the sending user via ipa_sps_irq_comp_tx()
+ *
+ * Return codes: 0: success, -EFAULT: failure
+ */
+int ipa3_send_one(struct ipa3_sys_context *sys, struct ipa3_desc *desc,
+	bool in_atomic)
+{
+	return ipa3_send(sys, 1, desc, in_atomic);
+}
+
+/**
+ * ipa3_transport_irq_cmd_ack - callback function which will be called by
+ * the transport driver after an immediate command is complete.
+ * @user1:	pointer to the descriptor of the transfer
+ * @user2:
+ *
+ * Complete the immediate commands completion object, this will release the
+ * thread which waits on this completion object (ipa3_send_cmd())
+ */
+static void ipa3_transport_irq_cmd_ack(void *user1, int user2)
+{
+	struct ipa3_desc *desc = (struct ipa3_desc *)user1;
+
+	if (WARN(!desc, "desc is NULL"))
+		return;
+
+	IPADBG_LOW("got ack for cmd=%d\n", desc->opcode);
+	complete(&desc->xfer_done);
+}
+
+/**
+ * ipa3_transport_irq_cmd_ack_free - callback function which will be
+ * called by the transport driver after an immediate command is complete.
+ * This function will also free the completion object once it is done.
+ * @tag_comp: pointer to the completion object
+ * @ignored: parameter not used
+ *
+ * Complete the immediate commands completion object, this will release the
+ * thread which waits on this completion object (ipa3_send_cmd())
+ */
+static void ipa3_transport_irq_cmd_ack_free(void *tag_comp, int ignored)
+{
+	struct ipa3_tag_completion *comp = tag_comp;
+
+	if (!comp) {
+		IPAERR("comp is NULL\n");
+		return;
+	}
+
+	complete(&comp->comp);
+	if (atomic_dec_return(&comp->cnt) == 0)
+		kfree(comp);
+}
+
+/**
+ * ipa3_send_cmd - send immediate commands
+ * @num_desc:	number of descriptors within the desc struct
+ * @descr:	descriptor structure
+ *
+ * Function will block till command gets ACK from IPA HW, caller needs
+ * to free any resources it allocated after function returns
+ * The callback in ipa3_desc should not be set by the caller
+ * for this function.
+ */
+int ipa3_send_cmd(u16 num_desc, struct ipa3_desc *descr)
+{
+	struct ipa3_desc *desc;
+	int i, result = 0;
+	struct ipa3_sys_context *sys;
+	int ep_idx;
+
+	for (i = 0; i < num_desc; i++)
+		IPADBG("sending imm cmd %d\n", descr[i].opcode);
+
+	ep_idx = ipa3_get_ep_mapping(IPA_CLIENT_APPS_CMD_PROD);
+	if (-1 == ep_idx) {
+		IPAERR("Client %u is not mapped\n",
+			IPA_CLIENT_APPS_CMD_PROD);
+		return -EFAULT;
+	}
+
+	sys = ipa3_ctx->ep[ep_idx].sys;
+	IPA_ACTIVE_CLIENTS_INC_SIMPLE();
+
+	if (num_desc == 1) {
+		init_completion(&descr->xfer_done);
+
+		if (descr->callback || descr->user1)
+			WARN_ON(1);
+
+		descr->callback = ipa3_transport_irq_cmd_ack;
+		descr->user1 = descr;
+		if (ipa3_send_one(sys, descr, true)) {
+			IPAERR("fail to send immediate command\n");
+			result = -EFAULT;
+			goto bail;
+		}
+		wait_for_completion(&descr->xfer_done);
+	} else {
+		desc = &descr[num_desc - 1];
+		init_completion(&desc->xfer_done);
+
+		if (desc->callback || desc->user1)
+			WARN_ON(1);
+
+		desc->callback = ipa3_transport_irq_cmd_ack;
+		desc->user1 = desc;
+		if (ipa3_send(sys, num_desc, descr, true)) {
+			IPAERR("fail to send multiple immediate command set\n");
+			result = -EFAULT;
+			goto bail;
+		}
+		wait_for_completion(&desc->xfer_done);
+	}
+
+bail:
+		IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
+		return result;
+}
+
+/**
+ * ipa3_send_cmd_timeout - send immediate commands with limited time
+ *	waiting for ACK from IPA HW
+ * @num_desc:	number of descriptors within the desc struct
+ * @descr:	descriptor structure
+ * @timeout:	millisecond to wait till get ACK from IPA HW
+ *
+ * Function will block till command gets ACK from IPA HW or timeout.
+ * Caller needs to free any resources it allocated after function returns
+ * The callback in ipa3_desc should not be set by the caller
+ * for this function.
+ */
+int ipa3_send_cmd_timeout(u16 num_desc, struct ipa3_desc *descr, u32 timeout)
+{
+	struct ipa3_desc *desc;
+	int i, result = 0;
+	struct ipa3_sys_context *sys;
+	int ep_idx;
+	int completed;
+	struct ipa3_tag_completion *comp;
+
+	for (i = 0; i < num_desc; i++)
+		IPADBG("sending imm cmd %d\n", descr[i].opcode);
+
+	ep_idx = ipa3_get_ep_mapping(IPA_CLIENT_APPS_CMD_PROD);
+	if (-1 == ep_idx) {
+		IPAERR("Client %u is not mapped\n",
+			IPA_CLIENT_APPS_CMD_PROD);
+		return -EFAULT;
+	}
+
+	comp = kzalloc(sizeof(*comp), GFP_ATOMIC);
+	if (!comp)
+		return -ENOMEM;
+
+	init_completion(&comp->comp);
+
+	/* completion needs to be released from both here and in ack callback */
+	atomic_set(&comp->cnt, 2);
+
+	sys = ipa3_ctx->ep[ep_idx].sys;
+
+	if (num_desc == 1) {
+		if (descr->callback || descr->user1)
+			WARN_ON(1);
+
+		descr->callback = ipa3_transport_irq_cmd_ack_free;
+		descr->user1 = comp;
+		if (ipa3_send_one(sys, descr, true)) {
+			IPAERR("fail to send immediate command\n");
+			kfree(comp);
+			result = -EFAULT;
+			goto bail;
+		}
+	} else {
+		desc = &descr[num_desc - 1];
+
+		if (desc->callback || desc->user1)
+			WARN_ON(1);
+
+		desc->callback = ipa3_transport_irq_cmd_ack_free;
+		desc->user1 = comp;
+		if (ipa3_send(sys, num_desc, descr, true)) {
+			IPAERR("fail to send multiple immediate command set\n");
+			kfree(comp);
+			result = -EFAULT;
+			goto bail;
+		}
+	}
+
+	completed = wait_for_completion_timeout(
+		&comp->comp, msecs_to_jiffies(timeout));
+	if (!completed) {
+		IPADBG("timeout waiting for imm-cmd ACK\n");
+		result = -EBUSY;
+	}
+
+	if (atomic_dec_return(&comp->cnt) == 0)
+		kfree(comp);
+
+bail:
+	return result;
+}
+
+/**
+ * ipa3_handle_rx_core() - The core functionality of packet reception. This
+ * function is read from multiple code paths.
+ *
+ * All the packets on the Rx data path are received on the IPA_A5_LAN_WAN_IN
+ * endpoint. The function runs as long as there are packets in the pipe.
+ * For each packet:
+ *  - Disconnect the packet from the system pipe linked list
+ *  - Unmap the packets skb, make it non DMAable
+ *  - Free the packet from the cache
+ *  - Prepare a proper skb
+ *  - Call the endpoints notify function, passing the skb in the parameters
+ *  - Replenish the rx cache
+ */
+static int ipa3_handle_rx_core(struct ipa3_sys_context *sys, bool process_all,
+		bool in_poll_state)
+{
+	int ret;
+	int cnt = 0;
+	struct gsi_chan_xfer_notify notify = { 0 };
+
+	while ((in_poll_state ? atomic_read(&sys->curr_polling_state) :
+		!atomic_read(&sys->curr_polling_state))) {
+		if (cnt && !process_all)
+			break;
+
+		ret = ipa_poll_gsi_pkt(sys, &notify);
+		if (ret)
+			break;
+
+		if (IPA_CLIENT_IS_MEMCPY_DMA_CONS(sys->ep->client))
+			ipa3_dma_memcpy_notify(sys);
+		else if (IPA_CLIENT_IS_WLAN_CONS(sys->ep->client))
+			ipa3_wlan_wq_rx_common(sys, &notify);
+		else
+			ipa3_wq_rx_common(sys, &notify);
+
+		++cnt;
+	}
+	return cnt;
+}
+
+/**
+ * __ipa3_update_curr_poll_state -> update current polling for default wan and
+ *                                  coalescing pipe.
+ * In RSC/RSB enabled cases using common event ring, so both the pipe
+ * polling state should be in sync.
+ */
+void __ipa3_update_curr_poll_state(enum ipa_client_type client, int state)
+{
+	int ep_idx = IPA_EP_NOT_ALLOCATED;
+
+	if (client == IPA_CLIENT_APPS_WAN_COAL_CONS)
+		ep_idx = ipa3_get_ep_mapping(IPA_CLIENT_APPS_WAN_CONS);
+	if (client == IPA_CLIENT_APPS_WAN_CONS)
+		ep_idx = ipa3_get_ep_mapping(IPA_CLIENT_APPS_WAN_COAL_CONS);
+
+	if (ep_idx != IPA_EP_NOT_ALLOCATED && ipa3_ctx->ep[ep_idx].sys)
+		atomic_set(&ipa3_ctx->ep[ep_idx].sys->curr_polling_state,
+									state);
+}
+
+/**
+ * ipa3_rx_switch_to_intr_mode() - Operate the Rx data path in interrupt mode
+ */
+static int ipa3_rx_switch_to_intr_mode(struct ipa3_sys_context *sys)
+{
+	int ret;
+
+	atomic_set(&sys->curr_polling_state, 0);
+	__ipa3_update_curr_poll_state(sys->ep->client, 0);
+
+	ipa3_dec_release_wakelock();
+	ret = gsi_config_channel_mode(sys->ep->gsi_chan_hdl,
+		GSI_CHAN_MODE_CALLBACK);
+	if ((ret != GSI_STATUS_SUCCESS) &&
+		!atomic_read(&sys->curr_polling_state)) {
+		if (ret == -GSI_STATUS_PENDING_IRQ) {
+			ipa3_inc_acquire_wakelock();
+			atomic_set(&sys->curr_polling_state, 1);
+			__ipa3_update_curr_poll_state(sys->ep->client, 1);
+		} else {
+			IPAERR("Failed to switch to intr mode %d ch_id %d\n",
+			 sys->curr_polling_state, sys->ep->gsi_chan_hdl);
+		}
+	}
+
+	return ret;
+}
+
+/**
+ * ipa3_handle_rx() - handle packet reception. This function is executed in the
+ * context of a work queue.
+ * @work: work struct needed by the work queue
+ *
+ * ipa3_handle_rx_core() is run in polling mode. After all packets has been
+ * received, the driver switches back to interrupt mode.
+ */
+static void ipa3_handle_rx(struct ipa3_sys_context *sys)
+{
+	int inactive_cycles;
+	int cnt;
+	int ret;
+
+	ipa_pm_activate_sync(sys->pm_hdl);
+start_poll:
+	inactive_cycles = 0;
+	do {
+		cnt = ipa3_handle_rx_core(sys, true, true);
+		if (cnt == 0)
+			inactive_cycles++;
+		else
+			inactive_cycles = 0;
+
+		trace_idle_sleep_enter3(sys->ep->client);
+		usleep_range(POLLING_MIN_SLEEP_RX, POLLING_MAX_SLEEP_RX);
+		trace_idle_sleep_exit3(sys->ep->client);
+
+		/*
+		 * if pipe is out of buffers there is no point polling for
+		 * completed descs; release the worker so delayed work can
+		 * run in a timely manner
+		 */
+		if (sys->len == 0)
+			break;
+
+	} while (inactive_cycles <= POLLING_INACTIVITY_RX);
+
+	trace_poll_to_intr3(sys->ep->client);
+	ret = ipa3_rx_switch_to_intr_mode(sys);
+	if (ret == -GSI_STATUS_PENDING_IRQ)
+		goto start_poll;
+
+	ipa_pm_deferred_deactivate(sys->pm_hdl);
+}
+
+static void ipa3_switch_to_intr_rx_work_func(struct work_struct *work)
+{
+	struct delayed_work *dwork;
+	struct ipa3_sys_context *sys;
+
+	dwork = container_of(work, struct delayed_work, work);
+	sys = container_of(dwork, struct ipa3_sys_context, switch_to_intr_work);
+
+	if (sys->napi_obj) {
+		/* interrupt mode is done in ipa3_rx_poll context */
+		ipa_assert();
+	} else
+		ipa3_handle_rx(sys);
+}
+
+enum hrtimer_restart ipa3_ring_doorbell_timer_fn(struct hrtimer *param)
+{
+	struct ipa3_sys_context *sys = container_of(param,
+		struct ipa3_sys_context, db_timer);
+
+	queue_work(sys->wq, &sys->work);
+	return HRTIMER_NORESTART;
+}
+
+static void ipa_pm_sys_pipe_cb(void *p, enum ipa_pm_cb_event event)
+{
+	struct ipa3_sys_context *sys = (struct ipa3_sys_context *)p;
+
+	switch (event) {
+	case IPA_PM_CLIENT_ACTIVATED:
+		/*
+		 * this event is ignored as the sync version of activation
+		 * will be used.
+		 */
+		break;
+	case IPA_PM_REQUEST_WAKEUP:
+		/*
+		 * pipe will be unsuspended as part of
+		 * enabling IPA clocks
+		 */
+		IPADBG("calling wakeup for client %d\n", sys->ep->client);
+		if (sys->ep->client == IPA_CLIENT_APPS_WAN_CONS) {
+			IPA_ACTIVE_CLIENTS_INC_SPECIAL("PIPE_SUSPEND_WAN");
+			usleep_range(SUSPEND_MIN_SLEEP_RX,
+				SUSPEND_MAX_SLEEP_RX);
+			IPA_ACTIVE_CLIENTS_DEC_SPECIAL("PIPE_SUSPEND_WAN");
+		} else if (sys->ep->client == IPA_CLIENT_APPS_LAN_CONS) {
+			IPA_ACTIVE_CLIENTS_INC_SPECIAL("PIPE_SUSPEND_LAN");
+			usleep_range(SUSPEND_MIN_SLEEP_RX,
+				SUSPEND_MAX_SLEEP_RX);
+			IPA_ACTIVE_CLIENTS_DEC_SPECIAL("PIPE_SUSPEND_LAN");
+		} else if (sys->ep->client == IPA_CLIENT_ODL_DPL_CONS) {
+			IPA_ACTIVE_CLIENTS_INC_SPECIAL("PIPE_SUSPEND_ODL");
+			usleep_range(SUSPEND_MIN_SLEEP_RX,
+				SUSPEND_MAX_SLEEP_RX);
+			IPA_ACTIVE_CLIENTS_DEC_SPECIAL("PIPE_SUSPEND_ODL");
+		} else if (sys->ep->client == IPA_CLIENT_APPS_WAN_COAL_CONS) {
+			IPA_ACTIVE_CLIENTS_INC_SPECIAL("PIPE_SUSPEND_COAL");
+			usleep_range(SUSPEND_MIN_SLEEP_RX,
+				SUSPEND_MAX_SLEEP_RX);
+			IPA_ACTIVE_CLIENTS_DEC_SPECIAL("PIPE_SUSPEND_COAL");
+		} else
+			IPAERR("Unexpected event %d\n for client %d\n",
+				event, sys->ep->client);
+		break;
+	default:
+		IPAERR("Unexpected event %d\n for client %d\n",
+			event, sys->ep->client);
+		WARN_ON(1);
+		return;
+	}
+}
+
+/**
+ * ipa3_setup_sys_pipe() - Setup an IPA GPI pipe and perform
+ * IPA EP configuration
+ * @sys_in:	[in] input needed to setup the pipe and configure EP
+ * @clnt_hdl:	[out] client handle
+ *
+ *  - configure the end-point registers with the supplied
+ *    parameters from the user.
+ *  - Creates a GPI connection with IPA.
+ *  - allocate descriptor FIFO
+ *
+ * Returns:	0 on success, negative on failure
+ */
+int ipa3_setup_sys_pipe(struct ipa_sys_connect_params *sys_in, u32 *clnt_hdl)
+{
+	struct ipa3_ep_context *ep;
+	int i, ipa_ep_idx, wan_handle, coal_ep_id;
+	int result = -EINVAL;
+	struct ipahal_reg_coal_qmap_cfg qmap_cfg;
+	struct ipahal_reg_coal_evict_lru evict_lru;
+	char buff[IPA_RESOURCE_NAME_MAX];
+	struct ipa_ep_cfg ep_cfg_copy;
+
+	if (sys_in == NULL || clnt_hdl == NULL) {
+		IPAERR("NULL args\n");
+		goto fail_gen;
+	}
+
+	if (sys_in->client >= IPA_CLIENT_MAX || sys_in->desc_fifo_sz == 0) {
+		IPAERR("bad parm client:%d fifo_sz:%d\n",
+			sys_in->client, sys_in->desc_fifo_sz);
+		goto fail_gen;
+	}
+
+	ipa_ep_idx = ipa3_get_ep_mapping(sys_in->client);
+	if (ipa_ep_idx == IPA_EP_NOT_ALLOCATED) {
+		IPAERR("Invalid client.\n");
+		goto fail_gen;
+	}
+
+	ep = &ipa3_ctx->ep[ipa_ep_idx];
+	if (ep->valid == 1) {
+		IPAERR("EP %d already allocated.\n", ipa_ep_idx);
+		goto fail_gen;
+	}
+
+	coal_ep_id = ipa3_get_ep_mapping(IPA_CLIENT_APPS_WAN_COAL_CONS);
+	/* save the input config parameters */
+	if (sys_in->client == IPA_CLIENT_APPS_WAN_COAL_CONS)
+		ep_cfg_copy = sys_in->ipa_ep_cfg;
+
+	IPA_ACTIVE_CLIENTS_INC_EP(sys_in->client);
+	memset(ep, 0, offsetof(struct ipa3_ep_context, sys));
+
+	if (!ep->sys) {
+		struct ipa_pm_register_params pm_reg;
+
+		memset(&pm_reg, 0, sizeof(pm_reg));
+		ep->sys = kzalloc(sizeof(struct ipa3_sys_context), GFP_KERNEL);
+		if (!ep->sys) {
+			IPAERR("failed to sys ctx for client %d\n",
+					sys_in->client);
+			result = -ENOMEM;
+			goto fail_and_disable_clocks;
+		}
+
+		ep->sys->ep = ep;
+		snprintf(buff, IPA_RESOURCE_NAME_MAX, "ipawq%d",
+				sys_in->client);
+		ep->sys->wq = alloc_workqueue(buff,
+				WQ_MEM_RECLAIM | WQ_UNBOUND | WQ_SYSFS, 1);
+
+		if (!ep->sys->wq) {
+			IPAERR("failed to create wq for client %d\n",
+					sys_in->client);
+			result = -EFAULT;
+			goto fail_wq;
+		}
+
+		snprintf(buff, IPA_RESOURCE_NAME_MAX, "iparepwq%d",
+				sys_in->client);
+		ep->sys->repl_wq = alloc_workqueue(buff,
+				WQ_MEM_RECLAIM | WQ_UNBOUND | WQ_SYSFS, 1);
+		if (!ep->sys->repl_wq) {
+			IPAERR("failed to create rep wq for client %d\n",
+					sys_in->client);
+			result = -EFAULT;
+			goto fail_wq2;
+		}
+
+		INIT_LIST_HEAD(&ep->sys->head_desc_list);
+		INIT_LIST_HEAD(&ep->sys->rcycl_list);
+		spin_lock_init(&ep->sys->spinlock);
+		hrtimer_init(&ep->sys->db_timer, CLOCK_MONOTONIC,
+			HRTIMER_MODE_REL);
+		ep->sys->db_timer.function = ipa3_ring_doorbell_timer_fn;
+
+		/* create IPA PM resources for handling polling mode */
+		if (sys_in->client == IPA_CLIENT_APPS_WAN_CONS &&
+			coal_ep_id != IPA_EP_NOT_ALLOCATED &&
+			ipa3_ctx->ep[coal_ep_id].valid == 1) {
+			/* Use coalescing pipe PM handle for default pipe also*/
+			ep->sys->pm_hdl = ipa3_ctx->ep[coal_ep_id].sys->pm_hdl;
+		} else if (IPA_CLIENT_IS_CONS(sys_in->client)) {
+			pm_reg.name = ipa_clients_strings[sys_in->client];
+			pm_reg.callback = ipa_pm_sys_pipe_cb;
+			pm_reg.user_data = ep->sys;
+			pm_reg.group = IPA_PM_GROUP_APPS;
+			result = ipa_pm_register(&pm_reg, &ep->sys->pm_hdl);
+			if (result) {
+				IPAERR("failed to create IPA PM client %d\n",
+					result);
+				goto fail_pm;
+			}
+
+			if (IPA_CLIENT_IS_APPS_CONS(sys_in->client)) {
+				result = ipa_pm_associate_ipa_cons_to_client(
+					ep->sys->pm_hdl, sys_in->client);
+				if (result) {
+					IPAERR("failed to associate\n");
+					goto fail_gen2;
+				}
+			}
+
+			result = ipa_pm_set_throughput(ep->sys->pm_hdl,
+				IPA_APPS_BW_FOR_PM);
+			if (result) {
+				IPAERR("failed to set profile IPA PM client\n");
+				goto fail_gen2;
+			}
+		}
+	} else {
+		memset(ep->sys, 0, offsetof(struct ipa3_sys_context, ep));
+	}
+
+	atomic_set(&ep->sys->xmit_eot_cnt, 0);
+	tasklet_init(&ep->sys->tasklet, ipa3_tasklet_write_done,
+			(unsigned long) ep->sys);
+	ep->skip_ep_cfg = sys_in->skip_ep_cfg;
+	if (ipa3_assign_policy(sys_in, ep->sys)) {
+		IPAERR("failed to sys ctx for client %d\n", sys_in->client);
+		result = -ENOMEM;
+		goto fail_gen2;
+	}
+
+	ep->valid = 1;
+	ep->client = sys_in->client;
+	ep->client_notify = sys_in->notify;
+	ep->sys->napi_obj = sys_in->napi_obj;
+	ep->priv = sys_in->priv;
+	ep->keep_ipa_awake = sys_in->keep_ipa_awake;
+	atomic_set(&ep->avail_fifo_desc,
+		((sys_in->desc_fifo_sz / IPA_FIFO_ELEMENT_SIZE) - 1));
+
+	if (ep->status.status_en && IPA_CLIENT_IS_CONS(ep->client) &&
+	    ep->sys->status_stat == NULL) {
+		ep->sys->status_stat =
+			kzalloc(sizeof(struct ipa3_status_stats), GFP_KERNEL);
+		if (!ep->sys->status_stat)
+			goto fail_gen2;
+	}
+
+	if (!ep->skip_ep_cfg) {
+		if (ipa3_cfg_ep(ipa_ep_idx, &sys_in->ipa_ep_cfg)) {
+			IPAERR("fail to configure EP.\n");
+			goto fail_gen2;
+		}
+		if (ipa3_cfg_ep_status(ipa_ep_idx, &ep->status)) {
+			IPAERR("fail to configure status of EP.\n");
+			goto fail_gen2;
+		}
+		IPADBG("ep %d configuration successful\n", ipa_ep_idx);
+	} else {
+		IPADBG("skipping ep %d configuration\n", ipa_ep_idx);
+	}
+
+	result = ipa_gsi_setup_channel(sys_in, ep);
+	if (result) {
+		IPAERR("Failed to setup GSI channel\n");
+		goto fail_gen2;
+	}
+
+	*clnt_hdl = ipa_ep_idx;
+
+	if (ep->sys->repl_hdlr == ipa3_fast_replenish_rx_cache) {
+		ep->sys->repl = kzalloc(sizeof(*ep->sys->repl), GFP_KERNEL);
+		if (!ep->sys->repl) {
+			IPAERR("failed to alloc repl for client %d\n",
+					sys_in->client);
+			result = -ENOMEM;
+			goto fail_gen2;
+		}
+		atomic_set(&ep->sys->repl->pending, 0);
+		ep->sys->repl->capacity = ep->sys->rx_pool_sz + 1;
+
+		ep->sys->repl->cache = kcalloc(ep->sys->repl->capacity,
+				sizeof(void *), GFP_KERNEL);
+		if (!ep->sys->repl->cache) {
+			IPAERR("ep=%d fail to alloc repl cache\n", ipa_ep_idx);
+			ep->sys->repl_hdlr = ipa3_replenish_rx_cache;
+			ep->sys->repl->capacity = 0;
+		} else {
+			atomic_set(&ep->sys->repl->head_idx, 0);
+			atomic_set(&ep->sys->repl->tail_idx, 0);
+			ipa3_wq_repl_rx(&ep->sys->repl_work);
+		}
+	}
+
+	if (ep->sys->repl_hdlr == ipa3_replenish_rx_page_recycle) {
+		ep->sys->page_recycle_repl = kzalloc(
+			sizeof(*ep->sys->page_recycle_repl), GFP_KERNEL);
+		if (!ep->sys->page_recycle_repl) {
+			IPAERR("failed to alloc repl for client %d\n",
+					sys_in->client);
+			result = -ENOMEM;
+			goto fail_gen2;
+		}
+		atomic_set(&ep->sys->page_recycle_repl->pending, 0);
+		ep->sys->page_recycle_repl->capacity =
+				(ep->sys->rx_pool_sz + 1) * 2;
+
+		ep->sys->page_recycle_repl->cache =
+				kcalloc(ep->sys->page_recycle_repl->capacity,
+				sizeof(void *), GFP_KERNEL);
+		atomic_set(&ep->sys->page_recycle_repl->head_idx, 0);
+		atomic_set(&ep->sys->page_recycle_repl->tail_idx, 0);
+		ep->sys->repl = kzalloc(sizeof(*ep->sys->repl), GFP_KERNEL);
+		if (!ep->sys->repl) {
+			IPAERR("failed to alloc repl for client %d\n",
+				   sys_in->client);
+			result = -ENOMEM;
+			goto fail_page_recycle_repl;
+		}
+		ep->sys->repl->capacity = (ep->sys->rx_pool_sz + 1);
+
+		atomic_set(&ep->sys->repl->pending, 0);
+		ep->sys->repl->cache = kcalloc(ep->sys->repl->capacity,
+				sizeof(void *), GFP_KERNEL);
+		atomic_set(&ep->sys->repl->head_idx, 0);
+		atomic_set(&ep->sys->repl->tail_idx, 0);
+
+		ipa3_replenish_rx_page_cache(ep->sys);
+		ipa3_wq_page_repl(&ep->sys->repl_work);
+	}
+
+	if (IPA_CLIENT_IS_CONS(sys_in->client)) {
+		if (IPA_CLIENT_IS_WAN_CONS(sys_in->client) &&
+			ipa3_ctx->ipa_wan_skb_page) {
+			ipa3_replenish_rx_page_recycle(ep->sys);
+		} else
+			ipa3_replenish_rx_cache(ep->sys);
+		for (i = 0; i < GSI_VEID_MAX; i++)
+			INIT_LIST_HEAD(&ep->sys->pending_pkts[i]);
+	}
+
+	if (IPA_CLIENT_IS_WLAN_CONS(sys_in->client)) {
+		ipa3_alloc_wlan_rx_common_cache(IPA_WLAN_COMM_RX_POOL_LOW);
+		atomic_inc(&ipa3_ctx->wc_memb.active_clnt_cnt);
+	}
+
+	ipa3_ctx->skip_ep_cfg_shadow[ipa_ep_idx] = ep->skip_ep_cfg;
+	if (!ep->skip_ep_cfg && IPA_CLIENT_IS_PROD(sys_in->client)) {
+		if (ipa3_ctx->modem_cfg_emb_pipe_flt &&
+			sys_in->client == IPA_CLIENT_APPS_WAN_PROD)
+			IPADBG("modem cfg emb pipe flt\n");
+		else
+			ipa3_install_dflt_flt_rules(ipa_ep_idx);
+	}
+
+	result = ipa3_enable_data_path(ipa_ep_idx);
+	if (result) {
+		IPAERR("enable data path failed res=%d ep=%d.\n", result,
+			ipa_ep_idx);
+		goto fail_repl;
+	}
+
+	result = gsi_start_channel(ep->gsi_chan_hdl);
+	if (result != GSI_STATUS_SUCCESS) {
+		IPAERR("gsi_start_channel failed res=%d ep=%d.\n", result,
+			ipa_ep_idx);
+		goto fail_gen3;
+	}
+
+	IPADBG("client %d (ep: %d) connected sys=%pK\n", sys_in->client,
+			ipa_ep_idx, ep->sys);
+
+	/* configure the registers and setup the default pipe */
+	if (sys_in->client == IPA_CLIENT_APPS_WAN_COAL_CONS) {
+		evict_lru.coal_vp_lru_thrshld = 0;
+		evict_lru.coal_eviction_en = true;
+		ipahal_write_reg_fields(IPA_COAL_EVICT_LRU, &evict_lru);
+
+		qmap_cfg.mux_id_byte_sel = IPA_QMAP_ID_BYTE;
+		ipahal_write_reg_fields(IPA_COAL_QMAP_CFG, &qmap_cfg);
+
+		sys_in->client = IPA_CLIENT_APPS_WAN_CONS;
+		sys_in->ipa_ep_cfg = ep_cfg_copy;
+		result = ipa3_setup_sys_pipe(sys_in, &wan_handle);
+		if (result) {
+			IPAERR("failed to setup default coalescing pipe\n");
+			goto fail_repl;
+		}
+	}
+
+	if (!ep->keep_ipa_awake)
+		IPA_ACTIVE_CLIENTS_DEC_EP(ep->client);
+
+	return 0;
+
+fail_gen3:
+	ipa3_disable_data_path(ipa_ep_idx);
+fail_repl:
+	ep->sys->repl_hdlr = ipa3_replenish_rx_cache;
+	ep->sys->repl->capacity = 0;
+	kfree(ep->sys->repl);
+fail_page_recycle_repl:
+	if (ep->sys->page_recycle_repl) {
+		ep->sys->page_recycle_repl->capacity = 0;
+		kfree(ep->sys->page_recycle_repl);
+	}
+fail_gen2:
+	ipa_pm_deregister(ep->sys->pm_hdl);
+fail_pm:
+	destroy_workqueue(ep->sys->repl_wq);
+fail_wq2:
+	destroy_workqueue(ep->sys->wq);
+fail_wq:
+	kfree(ep->sys);
+	memset(&ipa3_ctx->ep[ipa_ep_idx], 0, sizeof(struct ipa3_ep_context));
+fail_and_disable_clocks:
+	IPA_ACTIVE_CLIENTS_DEC_EP(sys_in->client);
+fail_gen:
+	return result;
+}
+
+/**
+ * ipa3_teardown_sys_pipe() - Teardown the GPI pipe and cleanup IPA EP
+ * @clnt_hdl:	[in] the handle obtained from ipa3_setup_sys_pipe
+ *
+ * Returns:	0 on success, negative on failure
+ */
+int ipa3_teardown_sys_pipe(u32 clnt_hdl)
+{
+	struct ipa3_ep_context *ep;
+	int empty;
+	int result;
+	int i;
+
+	if (clnt_hdl >= ipa3_ctx->ipa_num_pipes ||
+	    ipa3_ctx->ep[clnt_hdl].valid == 0) {
+		IPAERR("bad parm.\n");
+		return -EINVAL;
+	}
+
+	ep = &ipa3_ctx->ep[clnt_hdl];
+
+	if (!ep->keep_ipa_awake)
+		IPA_ACTIVE_CLIENTS_INC_EP(ipa3_get_client_mapping(clnt_hdl));
+
+	ipa3_disable_data_path(clnt_hdl);
+
+	if (IPA_CLIENT_IS_PROD(ep->client)) {
+		do {
+			spin_lock_bh(&ep->sys->spinlock);
+			empty = list_empty(&ep->sys->head_desc_list);
+			spin_unlock_bh(&ep->sys->spinlock);
+			if (!empty)
+				usleep_range(95, 105);
+			else
+				break;
+		} while (1);
+	}
+
+	/* channel stop might fail on timeout if IPA is busy */
+	for (i = 0; i < IPA_GSI_CHANNEL_STOP_MAX_RETRY; i++) {
+		result = ipa3_stop_gsi_channel(clnt_hdl);
+		if (result == GSI_STATUS_SUCCESS)
+			break;
+
+		if (result != -GSI_STATUS_AGAIN &&
+			result != -GSI_STATUS_TIMED_OUT)
+			break;
+	}
+
+	if (result != GSI_STATUS_SUCCESS) {
+		IPAERR("GSI stop chan err: %d.\n", result);
+		ipa_assert();
+		return result;
+	}
+
+	if (ep->sys->napi_obj) {
+		do {
+			usleep_range(95, 105);
+		} while (atomic_read(&ep->sys->curr_polling_state));
+	}
+
+	if (IPA_CLIENT_IS_CONS(ep->client))
+		cancel_delayed_work_sync(&ep->sys->replenish_rx_work);
+	flush_workqueue(ep->sys->wq);
+	if (IPA_CLIENT_IS_PROD(ep->client))
+		atomic_set(&ep->sys->workqueue_flushed, 1);
+
+	/* tear down the default pipe before we reset the channel*/
+	if (ep->client == IPA_CLIENT_APPS_WAN_COAL_CONS) {
+		i = ipa3_get_ep_mapping(IPA_CLIENT_APPS_WAN_CONS);
+
+		if (i == IPA_EP_NOT_ALLOCATED) {
+			IPAERR("failed to get idx");
+			return i;
+		}
+
+		result = ipa3_teardown_coal_def_pipe(i);
+		if (result) {
+			IPAERR("failed to teardown default coal pipe\n");
+			return result;
+		}
+	}
+
+	result = ipa3_reset_gsi_channel(clnt_hdl);
+	if (result != GSI_STATUS_SUCCESS) {
+		IPAERR("Failed to reset chan: %d.\n", result);
+		ipa_assert();
+		return result;
+	}
+	dma_free_coherent(ipa3_ctx->pdev,
+		ep->gsi_mem_info.chan_ring_len,
+		ep->gsi_mem_info.chan_ring_base_vaddr,
+		ep->gsi_mem_info.chan_ring_base_addr);
+	result = gsi_dealloc_channel(ep->gsi_chan_hdl);
+	if (result != GSI_STATUS_SUCCESS) {
+		IPAERR("Failed to dealloc chan: %d.\n", result);
+		ipa_assert();
+		return result;
+	}
+
+	/* free event ring only when it is present */
+	if (ep->sys->use_comm_evt_ring) {
+		ipa3_ctx->gsi_evt_comm_ring_rem +=
+			ep->gsi_mem_info.chan_ring_len;
+	} else if (ep->gsi_evt_ring_hdl != ~0) {
+		result = gsi_reset_evt_ring(ep->gsi_evt_ring_hdl);
+		if (WARN(result != GSI_STATUS_SUCCESS, "reset evt %d", result))
+			return result;
+
+		dma_free_coherent(ipa3_ctx->pdev,
+			ep->gsi_mem_info.evt_ring_len,
+			ep->gsi_mem_info.evt_ring_base_vaddr,
+			ep->gsi_mem_info.evt_ring_base_addr);
+		result = gsi_dealloc_evt_ring(ep->gsi_evt_ring_hdl);
+		if (WARN(result != GSI_STATUS_SUCCESS, "deall evt %d", result))
+			return result;
+	}
+	if (ep->sys->repl_wq)
+		flush_workqueue(ep->sys->repl_wq);
+	if (IPA_CLIENT_IS_CONS(ep->client))
+		ipa3_cleanup_rx(ep->sys);
+
+	if (!ep->skip_ep_cfg && IPA_CLIENT_IS_PROD(ep->client)) {
+		if (ipa3_ctx->modem_cfg_emb_pipe_flt &&
+			ep->client == IPA_CLIENT_APPS_WAN_PROD)
+			IPADBG("modem cfg emb pipe flt\n");
+		else
+			ipa3_delete_dflt_flt_rules(clnt_hdl);
+	}
+
+	if (IPA_CLIENT_IS_WLAN_CONS(ep->client))
+		atomic_dec(&ipa3_ctx->wc_memb.active_clnt_cnt);
+
+	memset(&ep->wstats, 0, sizeof(struct ipa3_wlan_stats));
+
+	if (!atomic_read(&ipa3_ctx->wc_memb.active_clnt_cnt))
+		ipa3_cleanup_wlan_rx_common_cache();
+
+	ep->valid = 0;
+
+	IPA_ACTIVE_CLIENTS_DEC_EP(ipa3_get_client_mapping(clnt_hdl));
+
+	IPADBG("client (ep: %d) disconnected\n", clnt_hdl);
+
+	return 0;
+}
+
+/**
+ * ipa3_teardown_coal_def_pipe() - Teardown the APPS_WAN_COAL_CONS
+ *				   default GPI pipe and cleanup IPA EP
+ *				   called after the coalesced pipe is destroyed.
+ * @clnt_hdl:	[in] the handle obtained from ipa3_setup_sys_pipe
+ *
+ * Returns:	0 on success, negative on failure
+ */
+static int ipa3_teardown_coal_def_pipe(u32 clnt_hdl)
+{
+	struct ipa3_ep_context *ep;
+	int result;
+	int i;
+
+	ep = &ipa3_ctx->ep[clnt_hdl];
+
+	ipa3_disable_data_path(clnt_hdl);
+
+	/* channel stop might fail on timeout if IPA is busy */
+	for (i = 0; i < IPA_GSI_CHANNEL_STOP_MAX_RETRY; i++) {
+		result = ipa3_stop_gsi_channel(clnt_hdl);
+		if (result == GSI_STATUS_SUCCESS)
+			break;
+
+		if (result != -GSI_STATUS_AGAIN &&
+		    result != -GSI_STATUS_TIMED_OUT)
+			break;
+	}
+
+	if (result != GSI_STATUS_SUCCESS) {
+		IPAERR("GSI stop chan err: %d.\n", result);
+		ipa_assert();
+		return result;
+	}
+	result = ipa3_reset_gsi_channel(clnt_hdl);
+	if (result != GSI_STATUS_SUCCESS) {
+		IPAERR("Failed to reset chan: %d.\n", result);
+		ipa_assert();
+		return result;
+	}
+	dma_free_coherent(ipa3_ctx->pdev,
+		ep->gsi_mem_info.chan_ring_len,
+		ep->gsi_mem_info.chan_ring_base_vaddr,
+		ep->gsi_mem_info.chan_ring_base_addr);
+	result = gsi_dealloc_channel(ep->gsi_chan_hdl);
+	if (result != GSI_STATUS_SUCCESS) {
+		IPAERR("Failed to dealloc chan: %d.\n", result);
+		ipa_assert();
+		return result;
+	}
+
+	if (IPA_CLIENT_IS_CONS(ep->client))
+		cancel_delayed_work_sync(&ep->sys->replenish_rx_work);
+
+	flush_workqueue(ep->sys->wq);
+
+	if (ep->sys->repl_wq)
+		flush_workqueue(ep->sys->repl_wq);
+	if (IPA_CLIENT_IS_CONS(ep->client))
+		ipa3_cleanup_rx(ep->sys);
+
+	ep->valid = 0;
+
+	IPADBG("client (ep: %d) disconnected\n", clnt_hdl);
+
+	return 0;
+}
+
+/**
+ * ipa3_tx_comp_usr_notify_release() - Callback function which will call the
+ * user supplied callback function to release the skb, or release it on
+ * its own if no callback function was supplied.
+ * @user1
+ * @user2
+ *
+ * This notified callback is for the destination client.
+ */
+static void ipa3_tx_comp_usr_notify_release(void *user1, int user2)
+{
+	struct sk_buff *skb = (struct sk_buff *)user1;
+	int ep_idx = user2;
+
+	IPADBG_LOW("skb=%pK ep=%d\n", skb, ep_idx);
+
+	IPA_STATS_INC_CNT(ipa3_ctx->stats.tx_pkts_compl);
+
+	if (ipa3_ctx->ep[ep_idx].client_notify)
+		ipa3_ctx->ep[ep_idx].client_notify(ipa3_ctx->ep[ep_idx].priv,
+				IPA_WRITE_DONE, (unsigned long)skb);
+	else
+		dev_kfree_skb_any(skb);
+}
+
+void ipa3_tx_cmd_comp(void *user1, int user2)
+{
+	ipahal_destroy_imm_cmd(user1);
+}
+
+/**
+ * ipa3_tx_dp() - Data-path tx handler
+ * @dst:	[in] which IPA destination to route tx packets to
+ * @skb:	[in] the packet to send
+ * @metadata:	[in] TX packet meta-data
+ *
+ * Data-path tx handler, this is used for both SW data-path which by-passes most
+ * IPA HW blocks AND the regular HW data-path for WLAN AMPDU traffic only. If
+ * dst is a "valid" CONS type, then SW data-path is used. If dst is the
+ * WLAN_AMPDU PROD type, then HW data-path for WLAN AMPDU is used. Anything else
+ * is an error. For errors, client needs to free the skb as needed. For success,
+ * IPA driver will later invoke client callback if one was supplied. That
+ * callback should free the skb. If no callback supplied, IPA driver will free
+ * the skb internally
+ *
+ * The function will use two descriptors for this send command
+ * (for A5_WLAN_AMPDU_PROD only one desciprtor will be sent),
+ * the first descriptor will be used to inform the IPA hardware that
+ * apps need to push data into the IPA (IP_PACKET_INIT immediate command).
+ * Once this send was done from transport point-of-view the IPA driver will
+ * get notified by the supplied callback.
+ *
+ * Returns:	0 on success, negative on failure
+ */
+int ipa3_tx_dp(enum ipa_client_type dst, struct sk_buff *skb,
+		struct ipa_tx_meta *meta)
+{
+	struct ipa3_desc *desc;
+	struct ipa3_desc _desc[3];
+	int dst_ep_idx;
+	struct ipahal_imm_cmd_pyld *cmd_pyld = NULL;
+	struct ipa3_sys_context *sys;
+	int src_ep_idx;
+	int num_frags, f;
+	const struct ipa_gsi_ep_config *gsi_ep;
+	int data_idx;
+	unsigned int max_desc;
+
+	if (unlikely(!ipa3_ctx)) {
+		IPAERR("IPA3 driver was not initialized\n");
+		return -EINVAL;
+	}
+
+	if (skb->len == 0) {
+		IPAERR("packet size is 0\n");
+		return -EINVAL;
+	}
+
+	/*
+	 * USB_CONS: PKT_INIT ep_idx = dst pipe
+	 * Q6_CONS: PKT_INIT ep_idx = sender pipe
+	 * A5_LAN_WAN_PROD: HW path ep_idx = sender pipe
+	 *
+	 * LAN TX: all PKT_INIT
+	 * WAN TX: PKT_INIT (cmd) + HW (data)
+	 *
+	 */
+	if (IPA_CLIENT_IS_CONS(dst)) {
+		src_ep_idx = ipa3_get_ep_mapping(IPA_CLIENT_APPS_LAN_PROD);
+		if (-1 == src_ep_idx) {
+			IPAERR("Client %u is not mapped\n",
+				IPA_CLIENT_APPS_LAN_PROD);
+			goto fail_gen;
+		}
+		dst_ep_idx = ipa3_get_ep_mapping(dst);
+	} else {
+		src_ep_idx = ipa3_get_ep_mapping(dst);
+		if (-1 == src_ep_idx) {
+			IPAERR("Client %u is not mapped\n", dst);
+			goto fail_gen;
+		}
+		if (meta && meta->pkt_init_dst_ep_valid)
+			dst_ep_idx = meta->pkt_init_dst_ep;
+		else
+			dst_ep_idx = -1;
+	}
+
+	sys = ipa3_ctx->ep[src_ep_idx].sys;
+
+	if (!sys || !sys->ep->valid) {
+		IPAERR_RL("pipe %d not valid\n", src_ep_idx);
+		goto fail_pipe_not_valid;
+	}
+
+	num_frags = skb_shinfo(skb)->nr_frags;
+	/*
+	 * make sure TLV FIFO supports the needed frags.
+	 * 2 descriptors are needed for IP_PACKET_INIT and TAG_STATUS.
+	 * 1 descriptor needed for the linear portion of skb.
+	 */
+	gsi_ep = ipa3_get_gsi_ep_info(ipa3_ctx->ep[src_ep_idx].client);
+	if (unlikely(gsi_ep == NULL)) {
+		IPAERR("failed to get EP %d GSI info\n", src_ep_idx);
+		goto fail_gen;
+	}
+	max_desc =  gsi_ep->ipa_if_tlv;
+	if (gsi_ep->prefetch_mode == GSI_SMART_PRE_FETCH ||
+		gsi_ep->prefetch_mode == GSI_FREE_PRE_FETCH)
+		max_desc -= gsi_ep->prefetch_threshold;
+	if (num_frags + 3 > max_desc) {
+		if (skb_linearize(skb)) {
+			IPAERR("Failed to linear skb with %d frags\n",
+				num_frags);
+			goto fail_gen;
+		}
+		num_frags = 0;
+	}
+	if (num_frags) {
+		/* 1 desc for tag to resolve status out-of-order issue;
+		 * 1 desc is needed for the linear portion of skb;
+		 * 1 desc may be needed for the PACKET_INIT;
+		 * 1 desc for each frag
+		 */
+		desc = kzalloc(sizeof(*desc) * (num_frags + 3), GFP_ATOMIC);
+		if (!desc) {
+			IPAERR("failed to alloc desc array\n");
+			goto fail_gen;
+		}
+	} else {
+		memset(_desc, 0, 3 * sizeof(struct ipa3_desc));
+		desc = &_desc[0];
+	}
+
+	if (dst_ep_idx != -1) {
+		int skb_idx;
+
+		/* SW data path */
+		data_idx = 0;
+		if (sys->policy == IPA_POLICY_NOINTR_MODE) {
+			/*
+			 * For non-interrupt mode channel (where there is no
+			 * event ring) TAG STATUS are used for completion
+			 * notification. IPA will generate a status packet with
+			 * tag info as a result of the TAG STATUS command.
+			 */
+			desc[data_idx].is_tag_status = true;
+			data_idx++;
+		}
+		desc[data_idx].opcode = ipa3_ctx->pkt_init_imm_opcode;
+		desc[data_idx].dma_address_valid = true;
+		desc[data_idx].dma_address = ipa3_ctx->pkt_init_imm[dst_ep_idx];
+		desc[data_idx].type = IPA_IMM_CMD_DESC;
+		desc[data_idx].callback = NULL;
+		data_idx++;
+		desc[data_idx].pyld = skb->data;
+		desc[data_idx].len = skb_headlen(skb);
+		desc[data_idx].type = IPA_DATA_DESC_SKB;
+		desc[data_idx].callback = ipa3_tx_comp_usr_notify_release;
+		desc[data_idx].user1 = skb;
+		desc[data_idx].user2 = (meta && meta->pkt_init_dst_ep_valid &&
+				meta->pkt_init_dst_ep_remote) ?
+				src_ep_idx :
+				dst_ep_idx;
+		if (meta && meta->dma_address_valid) {
+			desc[data_idx].dma_address_valid = true;
+			desc[data_idx].dma_address = meta->dma_address;
+		}
+
+		skb_idx = data_idx;
+		data_idx++;
+
+		for (f = 0; f < num_frags; f++) {
+			desc[data_idx + f].frag = &skb_shinfo(skb)->frags[f];
+			desc[data_idx + f].type = IPA_DATA_DESC_SKB_PAGED;
+			desc[data_idx + f].len =
+				skb_frag_size(desc[data_idx + f].frag);
+		}
+		/* don't free skb till frag mappings are released */
+		if (num_frags) {
+			desc[data_idx + f - 1].callback =
+				desc[skb_idx].callback;
+			desc[data_idx + f - 1].user1 = desc[skb_idx].user1;
+			desc[data_idx + f - 1].user2 = desc[skb_idx].user2;
+			desc[skb_idx].callback = NULL;
+		}
+
+		if (ipa3_send(sys, num_frags + data_idx, desc, true)) {
+			IPAERR_RL("fail to send skb %pK num_frags %u SWP\n",
+				skb, num_frags);
+			goto fail_send;
+		}
+		IPA_STATS_INC_CNT(ipa3_ctx->stats.tx_sw_pkts);
+	} else {
+		/* HW data path */
+		data_idx = 0;
+		if (sys->policy == IPA_POLICY_NOINTR_MODE) {
+			/*
+			 * For non-interrupt mode channel (where there is no
+			 * event ring) TAG STATUS are used for completion
+			 * notification. IPA will generate a status packet with
+			 * tag info as a result of the TAG STATUS command.
+			 */
+			desc[data_idx].is_tag_status = true;
+			data_idx++;
+		}
+		desc[data_idx].pyld = skb->data;
+		desc[data_idx].len = skb_headlen(skb);
+		desc[data_idx].type = IPA_DATA_DESC_SKB;
+		desc[data_idx].callback = ipa3_tx_comp_usr_notify_release;
+		desc[data_idx].user1 = skb;
+		desc[data_idx].user2 = src_ep_idx;
+
+		if (meta && meta->dma_address_valid) {
+			desc[data_idx].dma_address_valid = true;
+			desc[data_idx].dma_address = meta->dma_address;
+		}
+		if (num_frags == 0) {
+			if (ipa3_send(sys, data_idx + 1, desc, true)) {
+				IPAERR("fail to send skb %pK HWP\n", skb);
+				goto fail_mem;
+			}
+		} else {
+			for (f = 0; f < num_frags; f++) {
+				desc[data_idx+f+1].frag =
+					&skb_shinfo(skb)->frags[f];
+				desc[data_idx+f+1].type =
+					IPA_DATA_DESC_SKB_PAGED;
+				desc[data_idx+f+1].len =
+					skb_frag_size(desc[data_idx+f+1].frag);
+			}
+			/* don't free skb till frag mappings are released */
+			desc[data_idx+f].callback = desc[data_idx].callback;
+			desc[data_idx+f].user1 = desc[data_idx].user1;
+			desc[data_idx+f].user2 = desc[data_idx].user2;
+			desc[data_idx].callback = NULL;
+
+			if (ipa3_send(sys, num_frags + data_idx + 1,
+				desc, true)) {
+				IPAERR("fail to send skb %pK num_frags %u\n",
+					skb, num_frags);
+				goto fail_mem;
+			}
+		}
+		IPA_STATS_INC_CNT(ipa3_ctx->stats.tx_hw_pkts);
+	}
+
+	if (num_frags) {
+		kfree(desc);
+		IPA_STATS_INC_CNT(ipa3_ctx->stats.tx_non_linear);
+	}
+	return 0;
+
+fail_send:
+	ipahal_destroy_imm_cmd(cmd_pyld);
+fail_mem:
+	if (num_frags)
+		kfree(desc);
+fail_gen:
+	return -EFAULT;
+fail_pipe_not_valid:
+	return -EPIPE;
+}
+
+static void ipa3_wq_handle_rx(struct work_struct *work)
+{
+	struct ipa3_sys_context *sys;
+
+	sys = container_of(work, struct ipa3_sys_context, work);
+
+	if (sys->napi_obj) {
+		ipa_pm_activate_sync(sys->pm_hdl);
+		napi_schedule(sys->napi_obj);
+	} else
+		ipa3_handle_rx(sys);
+}
+
+static void ipa3_wq_repl_rx(struct work_struct *work)
+{
+	struct ipa3_sys_context *sys;
+	void *ptr;
+	struct ipa3_rx_pkt_wrapper *rx_pkt;
+	gfp_t flag = GFP_KERNEL;
+	u32 next;
+	u32 curr;
+
+	sys = container_of(work, struct ipa3_sys_context, repl_work);
+	atomic_set(&sys->repl->pending, 0);
+	curr = atomic_read(&sys->repl->tail_idx);
+
+begin:
+	while (1) {
+		next = (curr + 1) % sys->repl->capacity;
+		if (next == atomic_read(&sys->repl->head_idx))
+			goto fail_kmem_cache_alloc;
+
+		rx_pkt = kmem_cache_zalloc(ipa3_ctx->rx_pkt_wrapper_cache,
+					   flag);
+		if (!rx_pkt)
+			goto fail_kmem_cache_alloc;
+
+		INIT_WORK(&rx_pkt->work, ipa3_wq_rx_avail);
+		rx_pkt->sys = sys;
+
+		rx_pkt->data.skb = sys->get_skb(sys->rx_buff_sz, flag);
+		if (rx_pkt->data.skb == NULL)
+			goto fail_skb_alloc;
+
+		ptr = skb_put(rx_pkt->data.skb, sys->rx_buff_sz);
+		rx_pkt->data.dma_addr = dma_map_single(ipa3_ctx->pdev, ptr,
+						     sys->rx_buff_sz,
+						     DMA_FROM_DEVICE);
+		if (dma_mapping_error(ipa3_ctx->pdev, rx_pkt->data.dma_addr)) {
+			pr_err_ratelimited("%s dma map fail %pK for %pK sys=%pK\n",
+			       __func__, (void *)rx_pkt->data.dma_addr,
+			       ptr, sys);
+			goto fail_dma_mapping;
+		}
+
+		sys->repl->cache[curr] = rx_pkt;
+		curr = next;
+		/* ensure write is done before setting tail index */
+		mb();
+		atomic_set(&sys->repl->tail_idx, next);
+	}
+
+	return;
+
+fail_dma_mapping:
+	sys->free_skb(rx_pkt->data.skb);
+fail_skb_alloc:
+	kmem_cache_free(ipa3_ctx->rx_pkt_wrapper_cache, rx_pkt);
+fail_kmem_cache_alloc:
+	if (atomic_read(&sys->repl->tail_idx) ==
+			atomic_read(&sys->repl->head_idx)) {
+		if (sys->ep->client == IPA_CLIENT_APPS_WAN_CONS ||
+			sys->ep->client == IPA_CLIENT_APPS_WAN_COAL_CONS)
+			IPA_STATS_INC_CNT(ipa3_ctx->stats.wan_repl_rx_empty);
+		else if (sys->ep->client == IPA_CLIENT_APPS_LAN_CONS)
+			IPA_STATS_INC_CNT(ipa3_ctx->stats.lan_repl_rx_empty);
+		pr_err_ratelimited("%s sys=%pK repl ring empty\n",
+				__func__, sys);
+		goto begin;
+	}
+}
+
+static struct ipa3_rx_pkt_wrapper *ipa3_alloc_rx_pkt_page(
+	gfp_t flag, bool is_tmp_alloc)
+{
+	struct ipa3_rx_pkt_wrapper *rx_pkt;
+
+	flag |= __GFP_NOMEMALLOC;
+	rx_pkt = kmem_cache_zalloc(ipa3_ctx->rx_pkt_wrapper_cache,
+		flag);
+	if (unlikely(!rx_pkt))
+		return NULL;
+	rx_pkt->len = PAGE_SIZE << IPA_WAN_PAGE_ORDER;
+	rx_pkt->page_data.page = __dev_alloc_pages(flag,
+		IPA_WAN_PAGE_ORDER);
+	if (unlikely(!rx_pkt->page_data.page))
+		goto fail_page_alloc;
+
+	rx_pkt->page_data.dma_addr = dma_map_page(ipa3_ctx->pdev,
+			rx_pkt->page_data.page, 0,
+			rx_pkt->len, DMA_FROM_DEVICE);
+	if (dma_mapping_error(ipa3_ctx->pdev,
+		rx_pkt->page_data.dma_addr)) {
+		pr_err_ratelimited("%s dma map fail %pK for %pK\n",
+			__func__, (void *)rx_pkt->page_data.dma_addr,
+			rx_pkt->page_data.page);
+		goto fail_dma_mapping;
+	}
+	if (is_tmp_alloc)
+		rx_pkt->page_data.is_tmp_alloc = true;
+	else
+		rx_pkt->page_data.is_tmp_alloc = false;
+	return rx_pkt;
+
+fail_dma_mapping:
+	__free_pages(rx_pkt->page_data.page, IPA_WAN_PAGE_ORDER);
+fail_page_alloc:
+	kmem_cache_free(ipa3_ctx->rx_pkt_wrapper_cache, rx_pkt);
+	return NULL;
+}
+
+static void ipa3_replenish_rx_page_cache(struct ipa3_sys_context *sys)
+{
+	struct ipa3_rx_pkt_wrapper *rx_pkt;
+	u32 curr;
+
+	for (curr = 0; curr < sys->page_recycle_repl->capacity; curr++) {
+		rx_pkt = ipa3_alloc_rx_pkt_page(GFP_KERNEL, false);
+		if (!rx_pkt) {
+			IPAERR("ipa3_alloc_rx_pkt_page fails\n");
+			ipa_assert();
+			break;
+		}
+		rx_pkt->sys = sys;
+		sys->page_recycle_repl->cache[curr] = rx_pkt;
+	}
+
+	return;
+
+}
+
+static void ipa3_wq_page_repl(struct work_struct *work)
+{
+	struct ipa3_sys_context *sys;
+	struct ipa3_rx_pkt_wrapper *rx_pkt;
+	u32 next;
+	u32 curr;
+
+	sys = container_of(work, struct ipa3_sys_context, repl_work);
+	atomic_set(&sys->repl->pending, 0);
+	curr = atomic_read(&sys->repl->tail_idx);
+
+begin:
+	while (1) {
+		next = (curr + 1) % sys->repl->capacity;
+		if (unlikely(next == atomic_read(&sys->repl->head_idx)))
+			goto fail_kmem_cache_alloc;
+		rx_pkt = ipa3_alloc_rx_pkt_page(GFP_KERNEL, true);
+		if (unlikely(!rx_pkt)) {
+			IPAERR("ipa3_alloc_rx_pkt_page fails\n");
+			break;
+		}
+		rx_pkt->sys = sys;
+		sys->repl->cache[curr] = rx_pkt;
+		curr = next;
+		/* ensure write is done before setting tail index */
+		mb();
+		atomic_set(&sys->repl->tail_idx, next);
+	}
+
+	return;
+
+fail_kmem_cache_alloc:
+	if (atomic_read(&sys->repl->tail_idx) ==
+			atomic_read(&sys->repl->head_idx)) {
+		if (sys->ep->client == IPA_CLIENT_APPS_WAN_CONS ||
+			sys->ep->client == IPA_CLIENT_APPS_WAN_COAL_CONS)
+			IPA_STATS_INC_CNT(ipa3_ctx->stats.wan_repl_rx_empty);
+		pr_err_ratelimited("%s sys=%pK wq_repl ring empty\n",
+				__func__, sys);
+		goto begin;
+	}
+
+}
+
+static inline void __trigger_repl_work(struct ipa3_sys_context *sys)
+{
+	int tail, head, avail;
+
+	if (atomic_read(&sys->repl->pending))
+		return;
+
+	tail = atomic_read(&sys->repl->tail_idx);
+	head = atomic_read(&sys->repl->head_idx);
+	avail = (tail - head) % sys->repl->capacity;
+
+	if (avail < sys->repl->capacity / 4) {
+		atomic_set(&sys->repl->pending, 1);
+		queue_work(sys->repl_wq, &sys->repl_work);
+	}
+}
+
+
+static void ipa3_replenish_rx_page_recycle(struct ipa3_sys_context *sys)
+{
+	struct ipa3_rx_pkt_wrapper *rx_pkt;
+	int ret;
+	int rx_len_cached = 0;
+	struct gsi_xfer_elem gsi_xfer_elem_array[IPA_REPL_XFER_MAX];
+	u32 curr;
+	u32 curr_wq;
+	int idx = 0;
+	struct page *cur_page;
+	u32 stats_i = 0;
+
+	/* start replenish only when buffers go lower than the threshold */
+	if (sys->rx_pool_sz - sys->len < IPA_REPL_XFER_THRESH)
+		return;
+	stats_i = (sys->ep->client == IPA_CLIENT_APPS_WAN_COAL_CONS) ? 0 : 1;
+
+	spin_lock_bh(&sys->spinlock);
+	rx_len_cached = sys->len;
+	curr = atomic_read(&sys->page_recycle_repl->head_idx);
+	curr_wq = atomic_read(&sys->repl->head_idx);
+
+	while (rx_len_cached < sys->rx_pool_sz) {
+		cur_page = sys->page_recycle_repl->cache[curr]->page_data.page;
+		/* Found an idle page that can be used */
+		if (page_ref_count(cur_page) == 1) {
+			page_ref_inc(cur_page);
+			rx_pkt = sys->page_recycle_repl->cache[curr];
+			curr = (++curr == sys->page_recycle_repl->capacity) ?
+								0 : curr;
+		} else {
+			/*
+			 * Could not find idle page at curr index.
+			 * Allocate a new one.
+			 */
+			if (curr_wq == atomic_read(&sys->repl->tail_idx))
+				break;
+			ipa3_ctx->stats.page_recycle_stats[stats_i].tmp_alloc++;
+			rx_pkt = sys->repl->cache[curr_wq];
+			curr_wq = (++curr_wq == sys->repl->capacity) ?
+								 0 : curr_wq;
+		}
+
+		dma_sync_single_for_device(ipa3_ctx->pdev,
+			rx_pkt->page_data.dma_addr,
+			rx_pkt->len, DMA_FROM_DEVICE);
+		gsi_xfer_elem_array[idx].addr = rx_pkt->page_data.dma_addr;
+		gsi_xfer_elem_array[idx].len = rx_pkt->len;
+		gsi_xfer_elem_array[idx].flags = GSI_XFER_FLAG_EOT;
+		gsi_xfer_elem_array[idx].flags |= GSI_XFER_FLAG_EOB;
+		gsi_xfer_elem_array[idx].flags |= GSI_XFER_FLAG_BEI;
+		gsi_xfer_elem_array[idx].type = GSI_XFER_ELEM_DATA;
+		gsi_xfer_elem_array[idx].xfer_user_data = rx_pkt;
+		rx_len_cached++;
+		idx++;
+		ipa3_ctx->stats.page_recycle_stats[stats_i].total_replenished++;
+		/*
+		 * gsi_xfer_elem_buffer has a size of IPA_REPL_XFER_THRESH.
+		 * If this size is reached we need to queue the xfers.
+		 */
+		if (idx == IPA_REPL_XFER_MAX) {
+			ret = gsi_queue_xfer(sys->ep->gsi_chan_hdl, idx,
+				gsi_xfer_elem_array, false);
+			if (ret != GSI_STATUS_SUCCESS) {
+				/* we don't expect this will happen */
+				IPAERR("failed to provide buffer: %d\n", ret);
+				ipa_assert();
+				break;
+			}
+			idx = 0;
+		}
+	}
+	/* only ring doorbell once here */
+	ret = gsi_queue_xfer(sys->ep->gsi_chan_hdl, idx,
+			gsi_xfer_elem_array, true);
+	if (ret == GSI_STATUS_SUCCESS) {
+		/* ensure write is done before setting head index */
+		mb();
+		atomic_set(&sys->repl->head_idx, curr_wq);
+		atomic_set(&sys->page_recycle_repl->head_idx, curr);
+		sys->len = rx_len_cached;
+	} else {
+		/* we don't expect this will happen */
+		IPAERR("failed to provide buffer: %d\n", ret);
+		ipa_assert();
+	}
+	spin_unlock_bh(&sys->spinlock);
+	__trigger_repl_work(sys);
+
+	if (rx_len_cached <= IPA_DEFAULT_SYS_YELLOW_WM) {
+		if (sys->ep->client == IPA_CLIENT_APPS_WAN_CONS ||
+			sys->ep->client == IPA_CLIENT_APPS_WAN_COAL_CONS)
+			IPA_STATS_INC_CNT(ipa3_ctx->stats.wan_rx_empty);
+		else if (sys->ep->client == IPA_CLIENT_APPS_LAN_CONS)
+			IPA_STATS_INC_CNT(ipa3_ctx->stats.lan_rx_empty);
+		else
+			WARN_ON(1);
+		queue_delayed_work(sys->wq, &sys->replenish_rx_work,
+				msecs_to_jiffies(1));
+	}
+
+	return;
+}
+
+static void ipa3_replenish_wlan_rx_cache(struct ipa3_sys_context *sys)
+{
+	struct ipa3_rx_pkt_wrapper *rx_pkt = NULL;
+	struct ipa3_rx_pkt_wrapper *tmp;
+	int ret;
+	struct gsi_xfer_elem gsi_xfer_elem_one;
+	u32 rx_len_cached = 0;
+
+	IPADBG_LOW("\n");
+
+	spin_lock_bh(&ipa3_ctx->wc_memb.wlan_spinlock);
+	rx_len_cached = sys->len;
+
+	if (rx_len_cached < sys->rx_pool_sz) {
+		list_for_each_entry_safe(rx_pkt, tmp,
+			&ipa3_ctx->wc_memb.wlan_comm_desc_list, link) {
+			list_del(&rx_pkt->link);
+
+			if (ipa3_ctx->wc_memb.wlan_comm_free_cnt > 0)
+				ipa3_ctx->wc_memb.wlan_comm_free_cnt--;
+
+			rx_pkt->len = 0;
+			rx_pkt->sys = sys;
+
+			memset(&gsi_xfer_elem_one, 0,
+				sizeof(gsi_xfer_elem_one));
+			gsi_xfer_elem_one.addr = rx_pkt->data.dma_addr;
+			gsi_xfer_elem_one.len = IPA_WLAN_RX_BUFF_SZ;
+			gsi_xfer_elem_one.flags |= GSI_XFER_FLAG_EOT;
+			gsi_xfer_elem_one.flags |= GSI_XFER_FLAG_EOB;
+			gsi_xfer_elem_one.type = GSI_XFER_ELEM_DATA;
+			gsi_xfer_elem_one.xfer_user_data = rx_pkt;
+
+			ret = gsi_queue_xfer(sys->ep->gsi_chan_hdl, 1,
+				&gsi_xfer_elem_one, true);
+
+			if (ret) {
+				IPAERR("failed to provide buffer: %d\n", ret);
+				goto fail_provide_rx_buffer;
+			}
+
+			rx_len_cached = ++sys->len;
+
+			if (rx_len_cached >= sys->rx_pool_sz) {
+				spin_unlock_bh(
+					&ipa3_ctx->wc_memb.wlan_spinlock);
+				return;
+			}
+		}
+	}
+	spin_unlock_bh(&ipa3_ctx->wc_memb.wlan_spinlock);
+
+	if (rx_len_cached < sys->rx_pool_sz &&
+			ipa3_ctx->wc_memb.wlan_comm_total_cnt <
+			 IPA_WLAN_COMM_RX_POOL_HIGH) {
+		ipa3_replenish_rx_cache(sys);
+		ipa3_ctx->wc_memb.wlan_comm_total_cnt +=
+			(sys->rx_pool_sz - rx_len_cached);
+	}
+
+	return;
+
+fail_provide_rx_buffer:
+	list_del(&rx_pkt->link);
+	spin_unlock_bh(&ipa3_ctx->wc_memb.wlan_spinlock);
+}
+
+static void ipa3_cleanup_wlan_rx_common_cache(void)
+{
+	struct ipa3_rx_pkt_wrapper *rx_pkt;
+	struct ipa3_rx_pkt_wrapper *tmp;
+
+	spin_lock_bh(&ipa3_ctx->wc_memb.wlan_spinlock);
+
+	list_for_each_entry_safe(rx_pkt, tmp,
+		&ipa3_ctx->wc_memb.wlan_comm_desc_list, link) {
+		list_del(&rx_pkt->link);
+		dma_unmap_single(ipa3_ctx->pdev, rx_pkt->data.dma_addr,
+				IPA_WLAN_RX_BUFF_SZ, DMA_FROM_DEVICE);
+		dev_kfree_skb_any(rx_pkt->data.skb);
+		kmem_cache_free(ipa3_ctx->rx_pkt_wrapper_cache, rx_pkt);
+		ipa3_ctx->wc_memb.wlan_comm_free_cnt--;
+		ipa3_ctx->wc_memb.wlan_comm_total_cnt--;
+	}
+	ipa3_ctx->wc_memb.total_tx_pkts_freed = 0;
+
+	if (ipa3_ctx->wc_memb.wlan_comm_free_cnt != 0)
+		IPAERR("wlan comm buff free cnt: %d\n",
+			ipa3_ctx->wc_memb.wlan_comm_free_cnt);
+
+	if (ipa3_ctx->wc_memb.wlan_comm_total_cnt != 0)
+		IPAERR("wlan comm buff total cnt: %d\n",
+			ipa3_ctx->wc_memb.wlan_comm_total_cnt);
+
+	spin_unlock_bh(&ipa3_ctx->wc_memb.wlan_spinlock);
+
+}
+
+static void ipa3_alloc_wlan_rx_common_cache(u32 size)
+{
+	void *ptr;
+	struct ipa3_rx_pkt_wrapper *rx_pkt;
+	int rx_len_cached = 0;
+	gfp_t flag = GFP_NOWAIT | __GFP_NOWARN;
+
+	rx_len_cached = ipa3_ctx->wc_memb.wlan_comm_total_cnt;
+	while (rx_len_cached < size) {
+		rx_pkt = kmem_cache_zalloc(ipa3_ctx->rx_pkt_wrapper_cache,
+					   flag);
+		if (!rx_pkt)
+			goto fail_kmem_cache_alloc;
+
+		INIT_LIST_HEAD(&rx_pkt->link);
+		INIT_WORK(&rx_pkt->work, ipa3_wq_rx_avail);
+
+		rx_pkt->data.skb =
+			ipa3_get_skb_ipa_rx(IPA_WLAN_RX_BUFF_SZ,
+						flag);
+		if (rx_pkt->data.skb == NULL) {
+			IPAERR("failed to alloc skb\n");
+			goto fail_skb_alloc;
+		}
+		ptr = skb_put(rx_pkt->data.skb, IPA_WLAN_RX_BUFF_SZ);
+		rx_pkt->data.dma_addr = dma_map_single(ipa3_ctx->pdev, ptr,
+				IPA_WLAN_RX_BUFF_SZ, DMA_FROM_DEVICE);
+		if (dma_mapping_error(ipa3_ctx->pdev, rx_pkt->data.dma_addr)) {
+			IPAERR("dma_map_single failure %pK for %pK\n",
+			       (void *)rx_pkt->data.dma_addr, ptr);
+			goto fail_dma_mapping;
+		}
+
+		spin_lock_bh(&ipa3_ctx->wc_memb.wlan_spinlock);
+		list_add_tail(&rx_pkt->link,
+			&ipa3_ctx->wc_memb.wlan_comm_desc_list);
+		rx_len_cached = ++ipa3_ctx->wc_memb.wlan_comm_total_cnt;
+
+		ipa3_ctx->wc_memb.wlan_comm_free_cnt++;
+		spin_unlock_bh(&ipa3_ctx->wc_memb.wlan_spinlock);
+
+	}
+
+	return;
+
+fail_dma_mapping:
+	dev_kfree_skb_any(rx_pkt->data.skb);
+fail_skb_alloc:
+	kmem_cache_free(ipa3_ctx->rx_pkt_wrapper_cache, rx_pkt);
+fail_kmem_cache_alloc:
+	return;
+}
+
+
+/**
+ * ipa3_replenish_rx_cache() - Replenish the Rx packets cache.
+ *
+ * The function allocates buffers in the rx_pkt_wrapper_cache cache until there
+ * are IPA_RX_POOL_CEIL buffers in the cache.
+ *   - Allocate a buffer in the cache
+ *   - Initialized the packets link
+ *   - Initialize the packets work struct
+ *   - Allocate the packets socket buffer (skb)
+ *   - Fill the packets skb with data
+ *   - Make the packet DMAable
+ *   - Add the packet to the system pipe linked list
+ */
+static void ipa3_replenish_rx_cache(struct ipa3_sys_context *sys)
+{
+	void *ptr;
+	struct ipa3_rx_pkt_wrapper *rx_pkt;
+	int ret;
+	int idx = 0;
+	int rx_len_cached = 0;
+	struct gsi_xfer_elem gsi_xfer_elem_array[IPA_REPL_XFER_MAX];
+	gfp_t flag = GFP_NOWAIT | __GFP_NOWARN;
+
+	rx_len_cached = sys->len;
+
+	/* start replenish only when buffers go lower than the threshold */
+	if (sys->rx_pool_sz - sys->len < IPA_REPL_XFER_THRESH)
+		return;
+
+
+	while (rx_len_cached < sys->rx_pool_sz) {
+		rx_pkt = kmem_cache_zalloc(ipa3_ctx->rx_pkt_wrapper_cache,
+					   flag);
+		if (!rx_pkt)
+			goto fail_kmem_cache_alloc;
+
+		INIT_WORK(&rx_pkt->work, ipa3_wq_rx_avail);
+		rx_pkt->sys = sys;
+
+		rx_pkt->data.skb = sys->get_skb(sys->rx_buff_sz, flag);
+		if (rx_pkt->data.skb == NULL) {
+			IPAERR("failed to alloc skb\n");
+			goto fail_skb_alloc;
+		}
+		ptr = skb_put(rx_pkt->data.skb, sys->rx_buff_sz);
+		rx_pkt->data.dma_addr = dma_map_single(ipa3_ctx->pdev, ptr,
+						     sys->rx_buff_sz,
+						     DMA_FROM_DEVICE);
+		if (dma_mapping_error(ipa3_ctx->pdev, rx_pkt->data.dma_addr)) {
+			IPAERR("dma_map_single failure %pK for %pK\n",
+			       (void *)rx_pkt->data.dma_addr, ptr);
+			goto fail_dma_mapping;
+		}
+
+		gsi_xfer_elem_array[idx].addr = rx_pkt->data.dma_addr;
+		gsi_xfer_elem_array[idx].len = sys->rx_buff_sz;
+		gsi_xfer_elem_array[idx].flags = GSI_XFER_FLAG_EOT;
+		gsi_xfer_elem_array[idx].flags |= GSI_XFER_FLAG_EOB;
+		gsi_xfer_elem_array[idx].flags |= GSI_XFER_FLAG_BEI;
+		gsi_xfer_elem_array[idx].type = GSI_XFER_ELEM_DATA;
+		gsi_xfer_elem_array[idx].xfer_user_data = rx_pkt;
+		idx++;
+		rx_len_cached++;
+		/*
+		 * gsi_xfer_elem_buffer has a size of IPA_REPL_XFER_MAX.
+		 * If this size is reached we need to queue the xfers.
+		 */
+		if (idx == IPA_REPL_XFER_MAX) {
+			ret = gsi_queue_xfer(sys->ep->gsi_chan_hdl, idx,
+				gsi_xfer_elem_array, false);
+			if (ret != GSI_STATUS_SUCCESS) {
+				/* we don't expect this will happen */
+				IPAERR("failed to provide buffer: %d\n", ret);
+				WARN_ON(1);
+				break;
+			}
+			idx = 0;
+		}
+	}
+	goto done;
+
+fail_dma_mapping:
+	sys->free_skb(rx_pkt->data.skb);
+fail_skb_alloc:
+	kmem_cache_free(ipa3_ctx->rx_pkt_wrapper_cache, rx_pkt);
+fail_kmem_cache_alloc:
+	if (rx_len_cached == 0)
+		queue_delayed_work(sys->wq, &sys->replenish_rx_work,
+				msecs_to_jiffies(1));
+done:
+	/* only ring doorbell once here */
+	ret = gsi_queue_xfer(sys->ep->gsi_chan_hdl, idx,
+		gsi_xfer_elem_array, true);
+	if (ret == GSI_STATUS_SUCCESS) {
+		sys->len = rx_len_cached;
+	} else {
+		/* we don't expect this will happen */
+		IPAERR("failed to provide buffer: %d\n", ret);
+		WARN_ON(1);
+	}
+}
+
+static void ipa3_replenish_rx_cache_recycle(struct ipa3_sys_context *sys)
+{
+	void *ptr;
+	struct ipa3_rx_pkt_wrapper *rx_pkt;
+	int ret;
+	int idx = 0;
+	int rx_len_cached = 0;
+	struct gsi_xfer_elem gsi_xfer_elem_array[IPA_REPL_XFER_MAX];
+	gfp_t flag = GFP_NOWAIT | __GFP_NOWARN;
+
+	/* start replenish only when buffers go lower than the threshold */
+	if (sys->rx_pool_sz - sys->len < IPA_REPL_XFER_THRESH)
+		return;
+
+	rx_len_cached = sys->len;
+
+	while (rx_len_cached < sys->rx_pool_sz) {
+		if (list_empty(&sys->rcycl_list)) {
+			rx_pkt = kmem_cache_zalloc(
+				ipa3_ctx->rx_pkt_wrapper_cache, flag);
+			if (!rx_pkt)
+				goto fail_kmem_cache_alloc;
+
+			INIT_WORK(&rx_pkt->work, ipa3_wq_rx_avail);
+			rx_pkt->sys = sys;
+
+			rx_pkt->data.skb = sys->get_skb(sys->rx_buff_sz, flag);
+			if (rx_pkt->data.skb == NULL) {
+				IPAERR("failed to alloc skb\n");
+				kmem_cache_free(ipa3_ctx->rx_pkt_wrapper_cache,
+					rx_pkt);
+				goto fail_kmem_cache_alloc;
+			}
+			ptr = skb_put(rx_pkt->data.skb, sys->rx_buff_sz);
+			rx_pkt->data.dma_addr = dma_map_single(ipa3_ctx->pdev,
+				ptr, sys->rx_buff_sz, DMA_FROM_DEVICE);
+			if (dma_mapping_error(ipa3_ctx->pdev,
+				rx_pkt->data.dma_addr)) {
+				IPAERR("dma_map_single failure %pK for %pK\n",
+					(void *)rx_pkt->data.dma_addr, ptr);
+				goto fail_dma_mapping;
+			}
+		} else {
+			spin_lock_bh(&sys->spinlock);
+			rx_pkt = list_first_entry(&sys->rcycl_list,
+				struct ipa3_rx_pkt_wrapper, link);
+			list_del(&rx_pkt->link);
+			spin_unlock_bh(&sys->spinlock);
+			ptr = skb_put(rx_pkt->data.skb, sys->rx_buff_sz);
+			rx_pkt->data.dma_addr = dma_map_single(ipa3_ctx->pdev,
+				ptr, sys->rx_buff_sz, DMA_FROM_DEVICE);
+			if (dma_mapping_error(ipa3_ctx->pdev,
+				rx_pkt->data.dma_addr)) {
+				IPAERR("dma_map_single failure %pK for %pK\n",
+					(void *)rx_pkt->data.dma_addr, ptr);
+				goto fail_dma_mapping;
+			}
+		}
+
+		gsi_xfer_elem_array[idx].addr = rx_pkt->data.dma_addr;
+		gsi_xfer_elem_array[idx].len = sys->rx_buff_sz;
+		gsi_xfer_elem_array[idx].flags = GSI_XFER_FLAG_EOT;
+		gsi_xfer_elem_array[idx].flags |= GSI_XFER_FLAG_EOB;
+		gsi_xfer_elem_array[idx].flags |= GSI_XFER_FLAG_BEI;
+		gsi_xfer_elem_array[idx].type = GSI_XFER_ELEM_DATA;
+		gsi_xfer_elem_array[idx].xfer_user_data = rx_pkt;
+		idx++;
+		rx_len_cached++;
+		/*
+		 * gsi_xfer_elem_buffer has a size of IPA_REPL_XFER_MAX.
+		 * If this size is reached we need to queue the xfers.
+		 */
+		if (idx == IPA_REPL_XFER_MAX) {
+			ret = gsi_queue_xfer(sys->ep->gsi_chan_hdl, idx,
+				gsi_xfer_elem_array, false);
+			if (ret != GSI_STATUS_SUCCESS) {
+				/* we don't expect this will happen */
+				IPAERR("failed to provide buffer: %d\n", ret);
+				WARN_ON(1);
+				break;
+			}
+			idx = 0;
+		}
+	}
+	goto done;
+fail_dma_mapping:
+	spin_lock_bh(&sys->spinlock);
+	list_add_tail(&rx_pkt->link, &sys->rcycl_list);
+	INIT_LIST_HEAD(&rx_pkt->link);
+	spin_unlock_bh(&sys->spinlock);
+fail_kmem_cache_alloc:
+	if (rx_len_cached == 0)
+		queue_delayed_work(sys->wq, &sys->replenish_rx_work,
+		msecs_to_jiffies(1));
+done:
+	/* only ring doorbell once here */
+	ret = gsi_queue_xfer(sys->ep->gsi_chan_hdl, idx,
+		gsi_xfer_elem_array, true);
+	if (ret == GSI_STATUS_SUCCESS) {
+		sys->len = rx_len_cached;
+	} else {
+		/* we don't expect this will happen */
+		IPAERR("failed to provide buffer: %d\n", ret);
+		WARN_ON(1);
+	}
+}
+
+static void ipa3_fast_replenish_rx_cache(struct ipa3_sys_context *sys)
+{
+	struct ipa3_rx_pkt_wrapper *rx_pkt;
+	int ret;
+	int rx_len_cached = 0;
+	struct gsi_xfer_elem gsi_xfer_elem_array[IPA_REPL_XFER_MAX];
+	u32 curr;
+	int idx = 0;
+
+	/* start replenish only when buffers go lower than the threshold */
+	if (sys->rx_pool_sz - sys->len < IPA_REPL_XFER_THRESH)
+		return;
+
+	spin_lock_bh(&sys->spinlock);
+	rx_len_cached = sys->len;
+	curr = atomic_read(&sys->repl->head_idx);
+
+	while (rx_len_cached < sys->rx_pool_sz) {
+		if (curr == atomic_read(&sys->repl->tail_idx))
+			break;
+		rx_pkt = sys->repl->cache[curr];
+		gsi_xfer_elem_array[idx].addr = rx_pkt->data.dma_addr;
+		gsi_xfer_elem_array[idx].len = sys->rx_buff_sz;
+		gsi_xfer_elem_array[idx].flags = GSI_XFER_FLAG_EOT;
+		gsi_xfer_elem_array[idx].flags |= GSI_XFER_FLAG_EOB;
+		gsi_xfer_elem_array[idx].flags |= GSI_XFER_FLAG_BEI;
+		gsi_xfer_elem_array[idx].type = GSI_XFER_ELEM_DATA;
+		gsi_xfer_elem_array[idx].xfer_user_data = rx_pkt;
+		rx_len_cached++;
+		curr = (++curr == sys->repl->capacity) ? 0 : curr;
+		idx++;
+		/*
+		 * gsi_xfer_elem_buffer has a size of IPA_REPL_XFER_THRESH.
+		 * If this size is reached we need to queue the xfers.
+		 */
+		if (idx == IPA_REPL_XFER_MAX) {
+			ret = gsi_queue_xfer(sys->ep->gsi_chan_hdl, idx,
+				gsi_xfer_elem_array, false);
+			if (ret != GSI_STATUS_SUCCESS) {
+				/* we don't expect this will happen */
+				IPAERR("failed to provide buffer: %d\n", ret);
+				WARN_ON(1);
+				break;
+			}
+			idx = 0;
+		}
+	}
+	/* only ring doorbell once here */
+	ret = gsi_queue_xfer(sys->ep->gsi_chan_hdl, idx,
+			gsi_xfer_elem_array, true);
+	if (ret == GSI_STATUS_SUCCESS) {
+		/* ensure write is done before setting head index */
+		mb();
+		atomic_set(&sys->repl->head_idx, curr);
+		sys->len = rx_len_cached;
+	} else {
+		/* we don't expect this will happen */
+		IPAERR("failed to provide buffer: %d\n", ret);
+		WARN_ON(1);
+	}
+
+	spin_unlock_bh(&sys->spinlock);
+
+	__trigger_repl_work(sys);
+
+	if (rx_len_cached <= IPA_DEFAULT_SYS_YELLOW_WM) {
+		if (sys->ep->client == IPA_CLIENT_APPS_WAN_CONS ||
+			sys->ep->client == IPA_CLIENT_APPS_WAN_COAL_CONS)
+			IPA_STATS_INC_CNT(ipa3_ctx->stats.wan_rx_empty);
+		else if (sys->ep->client == IPA_CLIENT_APPS_LAN_CONS)
+			IPA_STATS_INC_CNT(ipa3_ctx->stats.lan_rx_empty);
+		else
+			WARN_ON(1);
+		queue_delayed_work(sys->wq, &sys->replenish_rx_work,
+				msecs_to_jiffies(1));
+	}
+}
+
+static void ipa3_replenish_rx_work_func(struct work_struct *work)
+{
+	struct delayed_work *dwork;
+	struct ipa3_sys_context *sys;
+
+	dwork = container_of(work, struct delayed_work, work);
+	sys = container_of(dwork, struct ipa3_sys_context, replenish_rx_work);
+	IPA_ACTIVE_CLIENTS_INC_SIMPLE();
+	sys->repl_hdlr(sys);
+	IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
+}
+
+/**
+ * free_rx_pkt() - function to free the skb and rx_pkt_wrapper
+ *
+ * @chan_user_data: ipa_sys_context used for skb size and skb_free func
+ * @xfer_uder_data: rx_pkt wrapper to be freed
+ *
+ */
+static void free_rx_pkt(void *chan_user_data, void *xfer_user_data)
+{
+
+	struct ipa3_rx_pkt_wrapper *rx_pkt = (struct ipa3_rx_pkt_wrapper *)
+		xfer_user_data;
+	struct ipa3_sys_context *sys = (struct ipa3_sys_context *)
+		chan_user_data;
+
+	dma_unmap_single(ipa3_ctx->pdev, rx_pkt->data.dma_addr,
+		sys->rx_buff_sz, DMA_FROM_DEVICE);
+	sys->free_skb(rx_pkt->data.skb);
+	kmem_cache_free(ipa3_ctx->rx_pkt_wrapper_cache, rx_pkt);
+}
+
+/**
+ * free_rx_page() - function to free the page and rx_pkt_wrapper
+ *
+ * @chan_user_data: ipa_sys_context used for skb size and skb_free func
+ * @xfer_uder_data: rx_pkt wrapper to be freed
+ *
+ */
+static void free_rx_page(void *chan_user_data, void *xfer_user_data)
+{
+	struct ipa3_rx_pkt_wrapper *rx_pkt = (struct ipa3_rx_pkt_wrapper *)
+		xfer_user_data;
+	struct ipa3_sys_context *sys = rx_pkt->sys;
+	int i;
+
+	for (i = 0; i < sys->page_recycle_repl->capacity; i++)
+		if (sys->page_recycle_repl->cache[i] == rx_pkt)
+			break;
+	if (i < sys->page_recycle_repl->capacity) {
+		page_ref_dec(rx_pkt->page_data.page);
+		sys->page_recycle_repl->cache[i] = NULL;
+	}
+	dma_unmap_page(ipa3_ctx->pdev, rx_pkt->page_data.dma_addr,
+		rx_pkt->len, DMA_FROM_DEVICE);
+	__free_pages(rx_pkt->page_data.page,
+		IPA_WAN_PAGE_ORDER);
+	kmem_cache_free(ipa3_ctx->rx_pkt_wrapper_cache, rx_pkt);
+}
+
+/**
+ * ipa3_cleanup_rx() - release RX queue resources
+ *
+ */
+static void ipa3_cleanup_rx(struct ipa3_sys_context *sys)
+{
+	struct ipa3_rx_pkt_wrapper *rx_pkt;
+	struct ipa3_rx_pkt_wrapper *r;
+	u32 head;
+	u32 tail;
+	int i;
+
+	/*
+	 * buffers not consumed by gsi are cleaned up using cleanup callback
+	 * provided to gsi
+	 */
+
+	spin_lock_bh(&sys->spinlock);
+	list_for_each_entry_safe(rx_pkt, r,
+				 &sys->rcycl_list, link) {
+		list_del(&rx_pkt->link);
+		if (rx_pkt->data.dma_addr)
+			dma_unmap_single(ipa3_ctx->pdev, rx_pkt->data.dma_addr,
+				sys->rx_buff_sz, DMA_FROM_DEVICE);
+		else
+			IPADBG("DMA address already freed\n");
+		sys->free_skb(rx_pkt->data.skb);
+		kmem_cache_free(ipa3_ctx->rx_pkt_wrapper_cache, rx_pkt);
+	}
+	spin_unlock_bh(&sys->spinlock);
+
+	if (sys->repl) {
+		head = atomic_read(&sys->repl->head_idx);
+		tail = atomic_read(&sys->repl->tail_idx);
+		while (head != tail) {
+			rx_pkt = sys->repl->cache[head];
+			if (!ipa3_ctx->ipa_wan_skb_page) {
+				dma_unmap_single(ipa3_ctx->pdev,
+					rx_pkt->data.dma_addr,
+					sys->rx_buff_sz,
+					DMA_FROM_DEVICE);
+				sys->free_skb(rx_pkt->data.skb);
+			} else {
+				dma_unmap_page(ipa3_ctx->pdev,
+					rx_pkt->page_data.dma_addr,
+					rx_pkt->len,
+					DMA_FROM_DEVICE);
+				__free_pages(rx_pkt->page_data.page,
+					IPA_WAN_PAGE_ORDER);
+			}
+			kmem_cache_free(ipa3_ctx->rx_pkt_wrapper_cache,
+				rx_pkt);
+			head = (head + 1) % sys->repl->capacity;
+		}
+
+		kfree(sys->repl->cache);
+		kfree(sys->repl);
+	}
+	if (sys->page_recycle_repl) {
+		for (i = 0; i < sys->page_recycle_repl->capacity; i++) {
+			rx_pkt = sys->page_recycle_repl->cache[i];
+			if (rx_pkt) {
+				dma_unmap_page(ipa3_ctx->pdev,
+					rx_pkt->page_data.dma_addr,
+					rx_pkt->len,
+					DMA_FROM_DEVICE);
+				__free_pages(rx_pkt->page_data.page,
+					IPA_WAN_PAGE_ORDER);
+				kmem_cache_free(
+					ipa3_ctx->rx_pkt_wrapper_cache,
+					rx_pkt);
+			}
+		}
+		kfree(sys->page_recycle_repl->cache);
+		kfree(sys->page_recycle_repl);
+	}
+}
+
+static struct sk_buff *ipa3_skb_copy_for_client(struct sk_buff *skb, int len)
+{
+	struct sk_buff *skb2 = NULL;
+
+	if (!ipa3_ctx->lan_rx_napi_enable)
+		skb2 = __dev_alloc_skb(len + IPA_RX_BUFF_CLIENT_HEADROOM,
+					GFP_KERNEL);
+	else
+		skb2 = __dev_alloc_skb(len + IPA_RX_BUFF_CLIENT_HEADROOM,
+					GFP_ATOMIC);
+
+	if (likely(skb2)) {
+		/* Set the data pointer */
+		skb_reserve(skb2, IPA_RX_BUFF_CLIENT_HEADROOM);
+		memcpy(skb2->data, skb->data, len);
+		skb2->len = len;
+		skb_set_tail_pointer(skb2, len);
+	}
+
+	return skb2;
+}
+
+static int ipa3_lan_rx_pyld_hdlr(struct sk_buff *skb,
+		struct ipa3_sys_context *sys)
+{
+	struct ipahal_pkt_status status;
+	u32 pkt_status_sz;
+	struct sk_buff *skb2;
+	int pad_len_byte;
+	int len;
+	unsigned char *buf;
+	int src_pipe;
+	unsigned int used = *(unsigned int *)skb->cb;
+	unsigned int used_align = ALIGN(used, 32);
+	unsigned long unused = IPA_GENERIC_RX_BUFF_BASE_SZ - used;
+	struct ipa3_tx_pkt_wrapper *tx_pkt = NULL;
+	unsigned long ptr;
+
+	IPA_DUMP_BUFF(skb->data, 0, skb->len);
+
+	if (skb->len == 0) {
+		IPAERR("ZLT packet arrived to AP\n");
+		goto out;
+	}
+
+	if (sys->len_partial) {
+		IPADBG_LOW("len_partial %d\n", sys->len_partial);
+		buf = skb_push(skb, sys->len_partial);
+		memcpy(buf, sys->prev_skb->data, sys->len_partial);
+		sys->len_partial = 0;
+		sys->free_skb(sys->prev_skb);
+		sys->prev_skb = NULL;
+		goto begin;
+	}
+
+	/* this pipe has TX comp (status only) + mux-ed LAN RX data
+	 * (status+data)
+	 */
+	if (sys->len_rem) {
+		IPADBG_LOW("rem %d skb %d pad %d\n", sys->len_rem, skb->len,
+				sys->len_pad);
+		if (sys->len_rem <= skb->len) {
+			if (sys->prev_skb) {
+				if (!ipa3_ctx->lan_rx_napi_enable)
+					skb2 = skb_copy_expand(sys->prev_skb,
+						0, sys->len_rem, GFP_KERNEL);
+				else
+					skb2 = skb_copy_expand(sys->prev_skb,
+						0, sys->len_rem, GFP_ATOMIC);
+				if (likely(skb2)) {
+					memcpy(skb_put(skb2, sys->len_rem),
+						skb->data, sys->len_rem);
+					skb_trim(skb2,
+						skb2->len - sys->len_pad);
+					skb2->truesize = skb2->len +
+						sizeof(struct sk_buff);
+					if (sys->drop_packet)
+						dev_kfree_skb_any(skb2);
+					else
+						sys->ep->client_notify(
+							sys->ep->priv,
+							IPA_RECEIVE,
+							(unsigned long)(skb2));
+				} else {
+					IPAERR("copy expand failed\n");
+				}
+				dev_kfree_skb_any(sys->prev_skb);
+			}
+			skb_pull(skb, sys->len_rem);
+			sys->prev_skb = NULL;
+			sys->len_rem = 0;
+			sys->len_pad = 0;
+		} else {
+			if (sys->prev_skb) {
+				if (!ipa3_ctx->lan_rx_napi_enable)
+					skb2 = skb_copy_expand(sys->prev_skb, 0,
+						skb->len, GFP_KERNEL);
+				else
+					skb2 = skb_copy_expand(sys->prev_skb, 0,
+						skb->len, GFP_ATOMIC);
+				if (likely(skb2)) {
+					memcpy(skb_put(skb2, skb->len),
+						skb->data, skb->len);
+				} else {
+					IPAERR("copy expand failed\n");
+				}
+				dev_kfree_skb_any(sys->prev_skb);
+				sys->prev_skb = skb2;
+			}
+			sys->len_rem -= skb->len;
+			goto out;
+		}
+	}
+
+begin:
+	pkt_status_sz = ipahal_pkt_status_get_size();
+	while (skb->len) {
+		sys->drop_packet = false;
+		IPADBG_LOW("LEN_REM %d\n", skb->len);
+
+		if (skb->len < pkt_status_sz) {
+			WARN_ON(sys->prev_skb != NULL);
+			IPADBG_LOW("status straddles buffer\n");
+			if (!ipa3_ctx->lan_rx_napi_enable)
+				sys->prev_skb = skb_copy(skb, GFP_KERNEL);
+			else
+				sys->prev_skb = skb_copy(skb, GFP_ATOMIC);
+			sys->len_partial = skb->len;
+			goto out;
+		}
+
+		ipahal_pkt_status_parse(skb->data, &status);
+		IPADBG_LOW("STATUS opcode=%d src=%d dst=%d len=%d\n",
+				status.status_opcode, status.endp_src_idx,
+				status.endp_dest_idx, status.pkt_len);
+		if (sys->status_stat) {
+			sys->status_stat->status[sys->status_stat->curr] =
+				status;
+			sys->status_stat->curr++;
+			if (sys->status_stat->curr == IPA_MAX_STATUS_STAT_NUM)
+				sys->status_stat->curr = 0;
+		}
+
+		switch (status.status_opcode) {
+		case IPAHAL_PKT_STATUS_OPCODE_DROPPED_PACKET:
+		case IPAHAL_PKT_STATUS_OPCODE_PACKET:
+		case IPAHAL_PKT_STATUS_OPCODE_SUSPENDED_PACKET:
+		case IPAHAL_PKT_STATUS_OPCODE_PACKET_2ND_PASS:
+			break;
+		case IPAHAL_PKT_STATUS_OPCODE_NEW_FRAG_RULE:
+			IPAERR_RL("Frag packets received on lan consumer\n");
+			IPAERR_RL("STATUS opcode=%d src=%d dst=%d src ip=%x\n",
+				status.status_opcode, status.endp_src_idx,
+				status.endp_dest_idx, status.src_ip_addr);
+			skb_pull(skb, pkt_status_sz);
+			continue;
+		default:
+			IPAERR_RL("unsupported opcode(%d)\n",
+				status.status_opcode);
+			skb_pull(skb, pkt_status_sz);
+			continue;
+		}
+
+		IPA_STATS_EXCP_CNT(status.exception,
+				ipa3_ctx->stats.rx_excp_pkts);
+		if (status.endp_dest_idx >= ipa3_ctx->ipa_num_pipes ||
+			status.endp_src_idx >= ipa3_ctx->ipa_num_pipes) {
+			IPAERR_RL("status fields invalid\n");
+			IPAERR_RL("STATUS opcode=%d src=%d dst=%d len=%d\n",
+				status.status_opcode, status.endp_src_idx,
+				status.endp_dest_idx, status.pkt_len);
+			WARN_ON(1);
+			/* HW gave an unexpected status */
+			ipa_assert();
+		}
+		if (IPAHAL_PKT_STATUS_MASK_FLAG_VAL(
+			IPAHAL_PKT_STATUS_MASK_TAG_VALID_SHFT, &status)) {
+			struct ipa3_tag_completion *comp;
+
+			IPADBG_LOW("TAG packet arrived\n");
+			if (status.tag_info == IPA_COOKIE) {
+				skb_pull(skb, pkt_status_sz);
+				if (skb->len < sizeof(comp)) {
+					IPAERR("TAG arrived without packet\n");
+					goto out;
+				}
+				memcpy(&comp, skb->data, sizeof(comp));
+				skb_pull(skb, sizeof(comp));
+				complete(&comp->comp);
+				if (atomic_dec_return(&comp->cnt) == 0)
+					kfree(comp);
+				continue;
+			} else {
+				ptr = tag_to_pointer_wa(status.tag_info);
+				tx_pkt = (struct ipa3_tx_pkt_wrapper *)ptr;
+				IPADBG_LOW("tx_pkt recv = %pK\n", tx_pkt);
+			}
+		}
+		if (status.pkt_len == 0) {
+			IPADBG_LOW("Skip aggr close status\n");
+			skb_pull(skb, pkt_status_sz);
+			IPA_STATS_INC_CNT(ipa3_ctx->stats.aggr_close);
+			IPA_STATS_DEC_CNT(ipa3_ctx->stats.rx_excp_pkts
+				[IPAHAL_PKT_STATUS_EXCEPTION_NONE]);
+			continue;
+		}
+
+		if (status.endp_dest_idx == (sys->ep - ipa3_ctx->ep)) {
+			/* RX data */
+			src_pipe = status.endp_src_idx;
+
+			/*
+			 * A packet which is received back to the AP after
+			 * there was no route match.
+			 */
+			if (status.exception ==
+				IPAHAL_PKT_STATUS_EXCEPTION_NONE &&
+				ipahal_is_rule_miss_id(status.rt_rule_id))
+				sys->drop_packet = true;
+
+			if (skb->len == pkt_status_sz &&
+				status.exception ==
+				IPAHAL_PKT_STATUS_EXCEPTION_NONE) {
+				WARN_ON(sys->prev_skb != NULL);
+				IPADBG_LOW("Ins header in next buffer\n");
+				if (!ipa3_ctx->lan_rx_napi_enable)
+					sys->prev_skb = skb_copy(skb,
+						GFP_KERNEL);
+				else
+					sys->prev_skb = skb_copy(skb,
+						GFP_ATOMIC);
+				sys->len_partial = skb->len;
+				goto out;
+			}
+
+			pad_len_byte = ((status.pkt_len + 3) & ~3) -
+					status.pkt_len;
+			len = status.pkt_len + pad_len_byte;
+			IPADBG_LOW("pad %d pkt_len %d len %d\n", pad_len_byte,
+					status.pkt_len, len);
+
+			if (status.exception ==
+					IPAHAL_PKT_STATUS_EXCEPTION_DEAGGR) {
+				IPADBG_LOW(
+					"Dropping packet on DeAggr Exception\n");
+				sys->drop_packet = true;
+			}
+
+			skb2 = ipa3_skb_copy_for_client(skb,
+				min(status.pkt_len + pkt_status_sz, skb->len));
+			if (likely(skb2)) {
+				if (skb->len < len + pkt_status_sz) {
+					IPADBG_LOW("SPL skb len %d len %d\n",
+							skb->len, len);
+					sys->prev_skb = skb2;
+					sys->len_rem = len - skb->len +
+						pkt_status_sz;
+					sys->len_pad = pad_len_byte;
+					skb_pull(skb, skb->len);
+				} else {
+					skb_trim(skb2, status.pkt_len +
+							pkt_status_sz);
+					IPADBG_LOW("rx avail for %d\n",
+							status.endp_dest_idx);
+					if (sys->drop_packet) {
+						dev_kfree_skb_any(skb2);
+					} else if (status.pkt_len >
+						   IPA_GENERIC_AGGR_BYTE_LIMIT *
+						   1024) {
+						IPAERR("packet size invalid\n");
+						IPAERR("STATUS opcode=%d\n",
+							status.status_opcode);
+						IPAERR("src=%d dst=%d len=%d\n",
+							status.endp_src_idx,
+							status.endp_dest_idx,
+							status.pkt_len);
+						/* Unexpected HW status */
+						ipa_assert();
+					} else {
+						skb2->truesize = skb2->len +
+						sizeof(struct sk_buff) +
+						(ALIGN(len +
+						pkt_status_sz, 32) *
+						unused / used_align);
+						sys->ep->client_notify(
+							sys->ep->priv,
+							IPA_RECEIVE,
+							(unsigned long)(skb2));
+					}
+					skb_pull(skb, len + pkt_status_sz);
+				}
+			} else {
+				IPAERR("fail to alloc skb\n");
+				if (skb->len < len) {
+					sys->prev_skb = NULL;
+					sys->len_rem = len - skb->len +
+						pkt_status_sz;
+					sys->len_pad = pad_len_byte;
+					skb_pull(skb, skb->len);
+				} else {
+					skb_pull(skb, len + pkt_status_sz);
+				}
+			}
+			/* TX comp */
+			ipa3_wq_write_done_status(src_pipe, tx_pkt);
+			IPADBG_LOW("tx comp imp for %d\n", src_pipe);
+		} else {
+			/* TX comp */
+			ipa3_wq_write_done_status(status.endp_src_idx, tx_pkt);
+			IPADBG_LOW("tx comp exp for %d\n",
+				status.endp_src_idx);
+			skb_pull(skb, pkt_status_sz);
+			IPA_STATS_INC_CNT(ipa3_ctx->stats.stat_compl);
+			IPA_STATS_DEC_CNT(ipa3_ctx->stats.rx_excp_pkts
+				[IPAHAL_PKT_STATUS_EXCEPTION_NONE]);
+		}
+		tx_pkt = NULL;
+	}
+
+out:
+	ipa3_skb_recycle(skb);
+	return 0;
+}
+
+static struct sk_buff *ipa3_join_prev_skb(struct sk_buff *prev_skb,
+		struct sk_buff *skb, unsigned int len)
+{
+	struct sk_buff *skb2;
+
+	skb2 = skb_copy_expand(prev_skb, 0,
+			len, GFP_KERNEL);
+	if (likely(skb2)) {
+		memcpy(skb_put(skb2, len),
+			skb->data, len);
+	} else {
+		IPAERR("copy expand failed\n");
+		skb2 = NULL;
+	}
+	dev_kfree_skb_any(prev_skb);
+
+	return skb2;
+}
+
+static void ipa3_wan_rx_handle_splt_pyld(struct sk_buff *skb,
+		struct ipa3_sys_context *sys)
+{
+	struct sk_buff *skb2;
+
+	IPADBG_LOW("rem %d skb %d\n", sys->len_rem, skb->len);
+	if (sys->len_rem <= skb->len) {
+		if (sys->prev_skb) {
+			skb2 = ipa3_join_prev_skb(sys->prev_skb, skb,
+					sys->len_rem);
+			if (likely(skb2)) {
+				IPADBG_LOW(
+					"removing Status element from skb and sending to WAN client");
+				skb_pull(skb2, ipahal_pkt_status_get_size());
+				skb2->truesize = skb2->len +
+					sizeof(struct sk_buff);
+				sys->ep->client_notify(sys->ep->priv,
+					IPA_RECEIVE,
+					(unsigned long)(skb2));
+			}
+		}
+		skb_pull(skb, sys->len_rem);
+		sys->prev_skb = NULL;
+		sys->len_rem = 0;
+	} else {
+		if (sys->prev_skb) {
+			skb2 = ipa3_join_prev_skb(sys->prev_skb, skb,
+					skb->len);
+			sys->prev_skb = skb2;
+		}
+		sys->len_rem -= skb->len;
+		skb_pull(skb, skb->len);
+	}
+}
+
+static int ipa3_wan_rx_pyld_hdlr(struct sk_buff *skb,
+		struct ipa3_sys_context *sys)
+{
+	struct ipahal_pkt_status status;
+	unsigned char *skb_data;
+	u32 pkt_status_sz;
+	struct sk_buff *skb2;
+	u16 pkt_len_with_pad;
+	u32 qmap_hdr;
+	int checksum_trailer_exists;
+	int frame_len;
+	int ep_idx;
+	unsigned int used = *(unsigned int *)skb->cb;
+	unsigned int used_align = ALIGN(used, 32);
+	unsigned long unused = IPA_GENERIC_RX_BUFF_BASE_SZ - used;
+
+	IPA_DUMP_BUFF(skb->data, 0, skb->len);
+	if (skb->len == 0) {
+		IPAERR("ZLT\n");
+		goto bail;
+	}
+
+	if (ipa3_ctx->ipa_client_apps_wan_cons_agg_gro) {
+		sys->ep->client_notify(sys->ep->priv,
+			IPA_RECEIVE, (unsigned long)(skb));
+		return 0;
+	}
+	if (sys->repl_hdlr == ipa3_replenish_rx_cache_recycle) {
+		IPAERR("Recycle should enable only with GRO Aggr\n");
+		ipa_assert();
+	}
+
+	/*
+	 * payload splits across 2 buff or more,
+	 * take the start of the payload from prev_skb
+	 */
+	if (sys->len_rem)
+		ipa3_wan_rx_handle_splt_pyld(skb, sys);
+
+	pkt_status_sz = ipahal_pkt_status_get_size();
+	while (skb->len) {
+		IPADBG_LOW("LEN_REM %d\n", skb->len);
+		if (skb->len < pkt_status_sz) {
+			IPAERR("status straddles buffer\n");
+			WARN_ON(1);
+			goto bail;
+		}
+		ipahal_pkt_status_parse(skb->data, &status);
+		skb_data = skb->data;
+		IPADBG_LOW("STATUS opcode=%d src=%d dst=%d len=%d\n",
+				status.status_opcode, status.endp_src_idx,
+				status.endp_dest_idx, status.pkt_len);
+
+		if (sys->status_stat) {
+			sys->status_stat->status[sys->status_stat->curr] =
+				status;
+			sys->status_stat->curr++;
+			if (sys->status_stat->curr == IPA_MAX_STATUS_STAT_NUM)
+				sys->status_stat->curr = 0;
+		}
+
+		if ((status.status_opcode !=
+			IPAHAL_PKT_STATUS_OPCODE_DROPPED_PACKET) &&
+			(status.status_opcode !=
+			IPAHAL_PKT_STATUS_OPCODE_PACKET) &&
+			(status.status_opcode !=
+			IPAHAL_PKT_STATUS_OPCODE_PACKET_2ND_PASS)) {
+			IPAERR("unsupported opcode(%d)\n",
+				status.status_opcode);
+			skb_pull(skb, pkt_status_sz);
+			continue;
+		}
+
+		IPA_STATS_INC_CNT(ipa3_ctx->stats.rx_pkts);
+		if (status.endp_dest_idx >= ipa3_ctx->ipa_num_pipes ||
+			status.endp_src_idx >= ipa3_ctx->ipa_num_pipes) {
+			IPAERR("status fields invalid\n");
+			WARN_ON(1);
+			goto bail;
+		}
+		if (status.pkt_len == 0) {
+			IPADBG_LOW("Skip aggr close status\n");
+			skb_pull(skb, pkt_status_sz);
+			IPA_STATS_DEC_CNT(ipa3_ctx->stats.rx_pkts);
+			IPA_STATS_INC_CNT(ipa3_ctx->stats.wan_aggr_close);
+			continue;
+		}
+		ep_idx = ipa3_get_ep_mapping(IPA_CLIENT_APPS_WAN_CONS);
+		if (status.endp_dest_idx != ep_idx) {
+			IPAERR("expected endp_dest_idx %d received %d\n",
+					ep_idx, status.endp_dest_idx);
+			WARN_ON(1);
+			goto bail;
+		}
+		/* RX data */
+		if (skb->len == pkt_status_sz) {
+			IPAERR("Ins header in next buffer\n");
+			WARN_ON(1);
+			goto bail;
+		}
+		qmap_hdr = *(u32 *)(skb_data + pkt_status_sz);
+		/*
+		 * Take the pkt_len_with_pad from the last 2 bytes of the QMAP
+		 * header
+		 */
+
+		/*QMAP is BE: convert the pkt_len field from BE to LE*/
+		pkt_len_with_pad = ntohs((qmap_hdr>>16) & 0xffff);
+		IPADBG_LOW("pkt_len with pad %d\n", pkt_len_with_pad);
+		/*get the CHECKSUM_PROCESS bit*/
+		checksum_trailer_exists = IPAHAL_PKT_STATUS_MASK_FLAG_VAL(
+			IPAHAL_PKT_STATUS_MASK_CKSUM_PROCESS_SHFT, &status);
+		IPADBG_LOW("checksum_trailer_exists %d\n",
+				checksum_trailer_exists);
+
+		frame_len = pkt_status_sz + IPA_QMAP_HEADER_LENGTH +
+			    pkt_len_with_pad;
+		if (checksum_trailer_exists)
+			frame_len += IPA_DL_CHECKSUM_LENGTH;
+		IPADBG_LOW("frame_len %d\n", frame_len);
+
+		skb2 = skb_clone(skb, GFP_KERNEL);
+		if (likely(skb2)) {
+			/*
+			 * the len of actual data is smaller than expected
+			 * payload split across 2 buff
+			 */
+			if (skb->len < frame_len) {
+				IPADBG_LOW("SPL skb len %d len %d\n",
+						skb->len, frame_len);
+				sys->prev_skb = skb2;
+				sys->len_rem = frame_len - skb->len;
+				skb_pull(skb, skb->len);
+			} else {
+				skb_trim(skb2, frame_len);
+				IPADBG_LOW("rx avail for %d\n",
+						status.endp_dest_idx);
+				IPADBG_LOW(
+					"removing Status element from skb and sending to WAN client");
+				skb_pull(skb2, pkt_status_sz);
+				skb2->truesize = skb2->len +
+					sizeof(struct sk_buff) +
+					(ALIGN(frame_len, 32) *
+					 unused / used_align);
+				sys->ep->client_notify(sys->ep->priv,
+					IPA_RECEIVE, (unsigned long)(skb2));
+				skb_pull(skb, frame_len);
+			}
+		} else {
+			IPAERR("fail to clone\n");
+			if (skb->len < frame_len) {
+				sys->prev_skb = NULL;
+				sys->len_rem = frame_len - skb->len;
+				skb_pull(skb, skb->len);
+			} else {
+				skb_pull(skb, frame_len);
+			}
+		}
+	}
+bail:
+	sys->free_skb(skb);
+	return 0;
+}
+
+static struct sk_buff *ipa3_get_skb_ipa_rx(unsigned int len, gfp_t flags)
+{
+	return __dev_alloc_skb(len, flags);
+}
+
+static void ipa3_free_skb_rx(struct sk_buff *skb)
+{
+	dev_kfree_skb_any(skb);
+}
+
+void ipa3_lan_rx_cb(void *priv, enum ipa_dp_evt_type evt, unsigned long data)
+{
+	struct sk_buff *rx_skb = (struct sk_buff *)data;
+	struct ipahal_pkt_status_thin status;
+	struct ipa3_ep_context *ep;
+	unsigned int src_pipe;
+	u32 metadata;
+	u8 ucp;
+
+	ipahal_pkt_status_parse_thin(rx_skb->data, &status);
+	src_pipe = status.endp_src_idx;
+	metadata = status.metadata;
+	ucp = status.ucp;
+	ep = &ipa3_ctx->ep[src_pipe];
+	if (unlikely(src_pipe >= ipa3_ctx->ipa_num_pipes)) {
+		IPAERR_RL("drop pipe=%d\n", src_pipe);
+		dev_kfree_skb_any(rx_skb);
+		return;
+	}
+	if (status.exception == IPAHAL_PKT_STATUS_EXCEPTION_NONE)
+		skb_pull(rx_skb, ipahal_pkt_status_get_size() +
+				IPA_LAN_RX_HEADER_LENGTH);
+	else
+		skb_pull(rx_skb, ipahal_pkt_status_get_size());
+
+	/* Metadata Info
+	 *  ------------------------------------------
+	 *  |   3     |   2     |    1        |  0   |
+	 *  | fw_desc | vdev_id | qmap mux id | Resv |
+	 *  ------------------------------------------
+	 */
+	*(u16 *)rx_skb->cb = ((metadata >> 16) & 0xFFFF);
+	*(u8 *)(rx_skb->cb + 4) = ucp;
+	IPADBG_LOW("meta_data: 0x%x cb: 0x%x\n",
+			metadata, *(u32 *)rx_skb->cb);
+	IPADBG_LOW("ucp: %d\n", *(u8 *)(rx_skb->cb + 4));
+
+	if (likely((!atomic_read(&ep->disconnect_in_progress)) &&
+				ep->valid && ep->client_notify))
+		ep->client_notify(ep->priv, IPA_RECEIVE,
+				(unsigned long)(rx_skb));
+	else
+		dev_kfree_skb_any(rx_skb);
+}
+
+static void ipa3_recycle_rx_wrapper(struct ipa3_rx_pkt_wrapper *rx_pkt)
+{
+	rx_pkt->data.dma_addr = 0;
+	/* skb recycle was moved to pyld_hdlr */
+	INIT_LIST_HEAD(&rx_pkt->link);
+	spin_lock_bh(&rx_pkt->sys->spinlock);
+	list_add_tail(&rx_pkt->link, &rx_pkt->sys->rcycl_list);
+	spin_unlock_bh(&rx_pkt->sys->spinlock);
+}
+
+static void ipa3_recycle_rx_page_wrapper(struct ipa3_rx_pkt_wrapper *rx_pkt)
+{
+	struct ipa_rx_page_data rx_page;
+
+	rx_page = rx_pkt->page_data;
+
+	/* Free rx_wrapper only for tmp alloc pages*/
+	if (rx_page.is_tmp_alloc)
+		kmem_cache_free(ipa3_ctx->rx_pkt_wrapper_cache, rx_pkt);
+}
+
+/**
+ * handle_skb_completion()- Handle event completion EOB or EOT and prep the skb
+ *
+ * if eob: Set skb values, put rx_pkt at the end of the list and return NULL
+ *
+ * if eot: Set skb values, put skb at the end of the list. Then update the
+ * length and chain the skbs together while also freeing and unmapping the
+ * corresponding rx pkt. Once finished return the head_skb to be sent up the
+ * network stack.
+ */
+static struct sk_buff *handle_skb_completion(struct gsi_chan_xfer_notify
+		*notify, bool update_truesize)
+{
+	struct ipa3_rx_pkt_wrapper *rx_pkt, *tmp;
+	struct sk_buff *rx_skb, *next_skb = NULL;
+	struct list_head *head;
+	struct ipa3_sys_context *sys;
+
+	sys = (struct ipa3_sys_context *) notify->chan_user_data;
+	rx_pkt = (struct ipa3_rx_pkt_wrapper *) notify->xfer_user_data;
+
+	spin_lock_bh(&rx_pkt->sys->spinlock);
+	rx_pkt->sys->len--;
+	spin_unlock_bh(&rx_pkt->sys->spinlock);
+
+	if (notify->bytes_xfered)
+		rx_pkt->len = notify->bytes_xfered;
+
+	/*Drop packets when WAN consumer channel receive EOB event*/
+	if ((notify->evt_id == GSI_CHAN_EVT_EOB ||
+		sys->skip_eot) &&
+		sys->ep->client == IPA_CLIENT_APPS_WAN_CONS) {
+		dma_unmap_single(ipa3_ctx->pdev, rx_pkt->data.dma_addr,
+			sys->rx_buff_sz, DMA_FROM_DEVICE);
+		sys->free_skb(rx_pkt->data.skb);
+		sys->free_rx_wrapper(rx_pkt);
+		sys->eob_drop_cnt++;
+		if (notify->evt_id == GSI_CHAN_EVT_EOB) {
+			IPADBG("EOB event on WAN consumer channel, drop\n");
+			sys->skip_eot = true;
+		} else {
+			IPADBG("Reset skip eot flag.\n");
+			sys->skip_eot = false;
+		}
+		return NULL;
+	}
+
+	rx_skb = rx_pkt->data.skb;
+	skb_set_tail_pointer(rx_skb, rx_pkt->len);
+	rx_skb->len = rx_pkt->len;
+
+	if (update_truesize) {
+		*(unsigned int *)rx_skb->cb = rx_skb->len;
+		rx_skb->truesize = rx_pkt->len + sizeof(struct sk_buff);
+	}
+
+	if (notify->veid >= GSI_VEID_MAX) {
+		WARN_ON(1);
+		return NULL;
+	}
+
+	head = &rx_pkt->sys->pending_pkts[notify->veid];
+
+	INIT_LIST_HEAD(&rx_pkt->link);
+	list_add_tail(&rx_pkt->link, head);
+
+	/* Check added for handling LAN consumer packet without EOT flag */
+	if (notify->evt_id == GSI_CHAN_EVT_EOT ||
+		sys->ep->client == IPA_CLIENT_APPS_LAN_CONS) {
+	/* go over the list backward to save computations on updating length */
+		list_for_each_entry_safe_reverse(rx_pkt, tmp, head, link) {
+			rx_skb = rx_pkt->data.skb;
+
+			list_del(&rx_pkt->link);
+			dma_unmap_single(ipa3_ctx->pdev, rx_pkt->data.dma_addr,
+				sys->rx_buff_sz, DMA_FROM_DEVICE);
+			sys->free_rx_wrapper(rx_pkt);
+
+			if (next_skb) {
+				skb_shinfo(rx_skb)->frag_list = next_skb;
+				rx_skb->len += next_skb->len;
+				rx_skb->data_len += next_skb->len;
+			}
+			next_skb = rx_skb;
+		}
+	} else {
+		return NULL;
+	}
+	return rx_skb;
+}
+
+/**
+ * handle_page_completion()- Handle event completion EOB or EOT
+ * and prep the skb
+ *
+ * if eob: Set skb values, put rx_pkt at the end of the list and return NULL
+ *
+ * if eot: Set skb values, put skb at the end of the list. Then update the
+ * length and put the page together to the frags while also
+ * freeing and unmapping the corresponding rx pkt. Once finished
+ * return the head_skb to be sent up the network stack.
+ */
+static struct sk_buff *handle_page_completion(struct gsi_chan_xfer_notify
+		*notify, bool update_truesize)
+{
+	struct ipa3_rx_pkt_wrapper *rx_pkt, *tmp;
+	struct sk_buff *rx_skb;
+	struct list_head *head;
+	struct ipa3_sys_context *sys;
+	struct ipa_rx_page_data rx_page;
+
+	sys = (struct ipa3_sys_context *) notify->chan_user_data;
+	rx_pkt = (struct ipa3_rx_pkt_wrapper *) notify->xfer_user_data;
+	rx_page = rx_pkt->page_data;
+
+	spin_lock_bh(&rx_pkt->sys->spinlock);
+	rx_pkt->sys->len--;
+	spin_unlock_bh(&rx_pkt->sys->spinlock);
+
+	/* TODO: truesize handle for EOB */
+	if (update_truesize)
+		IPAERR("update_truesize not supported\n");
+
+	if (notify->veid >= GSI_VEID_MAX) {
+		rx_pkt->sys->free_rx_wrapper(rx_pkt);
+		if (!rx_page.is_tmp_alloc)
+			init_page_count(rx_page.page);
+		IPAERR("notify->veid > GSI_VEID_MAX\n");
+		return NULL;
+	}
+
+	head = &rx_pkt->sys->pending_pkts[notify->veid];
+
+	INIT_LIST_HEAD(&rx_pkt->link);
+	list_add_tail(&rx_pkt->link, head);
+
+	/* Check added for handling LAN consumer packet without EOT flag */
+	if (notify->evt_id == GSI_CHAN_EVT_EOT ||
+		sys->ep->client == IPA_CLIENT_APPS_LAN_CONS) {
+		rx_skb = alloc_skb(0, GFP_ATOMIC);
+		if (unlikely(!rx_skb)) {
+			rx_pkt->sys->free_rx_wrapper(rx_pkt);
+			if (!rx_page.is_tmp_alloc)
+				init_page_count(rx_page.page);
+			IPAERR("skb alloc failure\n");
+			return NULL;
+		}
+	/* go over the list backward to save computations on updating length */
+		list_for_each_entry_safe_reverse(rx_pkt, tmp, head, link) {
+			rx_page = rx_pkt->page_data;
+
+			list_del(&rx_pkt->link);
+			if (rx_page.is_tmp_alloc)
+				dma_unmap_page(ipa3_ctx->pdev, rx_page.dma_addr,
+					rx_pkt->len, DMA_FROM_DEVICE);
+			else
+				dma_sync_single_for_cpu(ipa3_ctx->pdev,
+					rx_page.dma_addr,
+					rx_pkt->len, DMA_FROM_DEVICE);
+			rx_pkt->sys->free_rx_wrapper(rx_pkt);
+
+			skb_add_rx_frag(rx_skb,
+				skb_shinfo(rx_skb)->nr_frags,
+				rx_page.page, 0,
+				notify->bytes_xfered,
+				PAGE_SIZE << IPA_WAN_PAGE_ORDER);
+		}
+	} else {
+		return NULL;
+	}
+	return rx_skb;
+}
+
+static void ipa3_wq_rx_common(struct ipa3_sys_context *sys,
+	struct gsi_chan_xfer_notify *notify)
+{
+	struct sk_buff *rx_skb;
+	struct ipa3_sys_context *coal_sys;
+	int ipa_ep_idx;
+
+	if (!notify) {
+		IPAERR_RL("gsi_chan_xfer_notify is null\n");
+		return;
+	}
+	rx_skb = handle_skb_completion(notify, true);
+
+	if (rx_skb) {
+		sys->pyld_hdlr(rx_skb, sys);
+
+		/* For coalescing, we have 2 transfer rings to replenish */
+		if (sys->ep->client == IPA_CLIENT_APPS_WAN_COAL_CONS) {
+			ipa_ep_idx = ipa3_get_ep_mapping(
+					IPA_CLIENT_APPS_WAN_CONS);
+
+			if (ipa_ep_idx == IPA_EP_NOT_ALLOCATED) {
+				IPAERR("Invalid client.\n");
+				return;
+			}
+
+			coal_sys = ipa3_ctx->ep[ipa_ep_idx].sys;
+			coal_sys->repl_hdlr(coal_sys);
+		}
+
+		sys->repl_hdlr(sys);
+	}
+}
+
+static void ipa3_rx_napi_chain(struct ipa3_sys_context *sys,
+		struct gsi_chan_xfer_notify *notify, uint32_t num)
+{
+	struct ipa3_sys_context *wan_def_sys;
+	int i, ipa_ep_idx;
+	struct sk_buff *rx_skb, *first_skb = NULL, *prev_skb = NULL;
+
+	/* non-coalescing case (SKB chaining enabled) */
+	if (sys->ep->client != IPA_CLIENT_APPS_WAN_COAL_CONS) {
+		for (i = 0; i < num; i++) {
+			if (!ipa3_ctx->ipa_wan_skb_page)
+				rx_skb = handle_skb_completion(
+					&notify[i], false);
+			else
+				rx_skb = handle_page_completion(
+					&notify[i], false);
+
+			/* this is always true for EOTs */
+			if (rx_skb) {
+				if (!first_skb)
+					first_skb = rx_skb;
+
+				if (prev_skb)
+					skb_shinfo(prev_skb)->frag_list =
+						rx_skb;
+
+				prev_skb = rx_skb;
+			}
+		}
+		if (prev_skb) {
+			skb_shinfo(prev_skb)->frag_list = NULL;
+			sys->pyld_hdlr(first_skb, sys);
+		}
+	} else {
+		if (!ipa3_ctx->ipa_wan_skb_page) {
+			/* TODO: add chaining for coal case */
+			for (i = 0; i < num; i++) {
+				rx_skb = handle_skb_completion(
+					&notify[i], false);
+				if (rx_skb) {
+					sys->pyld_hdlr(rx_skb, sys);
+					/*
+					 * For coalescing, we have 2 transfer
+					 * rings to replenish
+					 */
+					ipa_ep_idx = ipa3_get_ep_mapping(
+						IPA_CLIENT_APPS_WAN_CONS);
+					if (ipa_ep_idx ==
+						IPA_EP_NOT_ALLOCATED) {
+						IPAERR("Invalid client.\n");
+						return;
+					}
+					wan_def_sys =
+						ipa3_ctx->ep[ipa_ep_idx].sys;
+					wan_def_sys->repl_hdlr(wan_def_sys);
+					sys->repl_hdlr(sys);
+				}
+			}
+		} else {
+			for (i = 0; i < num; i++) {
+				rx_skb = handle_page_completion(
+					&notify[i], false);
+
+				/* this is always true for EOTs */
+				if (rx_skb) {
+					if (!first_skb)
+						first_skb = rx_skb;
+
+					if (prev_skb)
+						skb_shinfo(prev_skb)->frag_list
+							= rx_skb;
+
+					prev_skb = rx_skb;
+				}
+			}
+			if (prev_skb) {
+				skb_shinfo(prev_skb)->frag_list = NULL;
+				sys->pyld_hdlr(first_skb, sys);
+				/*
+				 * For coalescing, we have 2 transfer
+				 * rings to replenish
+				 */
+				ipa_ep_idx = ipa3_get_ep_mapping(
+						IPA_CLIENT_APPS_WAN_CONS);
+				if (ipa_ep_idx ==
+					IPA_EP_NOT_ALLOCATED) {
+					IPAERR("Invalid client.\n");
+					return;
+				}
+				wan_def_sys =
+					ipa3_ctx->ep[ipa_ep_idx].sys;
+				wan_def_sys->repl_hdlr(wan_def_sys);
+			}
+		}
+	}
+}
+
+static void ipa3_wlan_wq_rx_common(struct ipa3_sys_context *sys,
+	struct gsi_chan_xfer_notify *notify)
+{
+	struct ipa3_rx_pkt_wrapper *rx_pkt_expected;
+	struct sk_buff *rx_skb;
+
+	rx_pkt_expected = (struct ipa3_rx_pkt_wrapper *) notify->xfer_user_data;
+
+	sys->len--;
+
+	if (notify->bytes_xfered)
+		rx_pkt_expected->len = notify->bytes_xfered;
+
+	rx_skb = rx_pkt_expected->data.skb;
+	skb_set_tail_pointer(rx_skb, rx_pkt_expected->len);
+	rx_skb->len = rx_pkt_expected->len;
+	rx_skb->truesize = rx_pkt_expected->len + sizeof(struct sk_buff);
+	sys->ep->wstats.tx_pkts_rcvd++;
+	if (sys->len <= IPA_WLAN_RX_POOL_SZ_LOW_WM) {
+		ipa3_free_skb(&rx_pkt_expected->data);
+		sys->ep->wstats.tx_pkts_dropped++;
+	} else {
+		sys->ep->wstats.tx_pkts_sent++;
+		sys->ep->client_notify(sys->ep->priv, IPA_RECEIVE,
+				(unsigned long)(&rx_pkt_expected->data));
+	}
+	ipa3_replenish_wlan_rx_cache(sys);
+}
+
+static void ipa3_dma_memcpy_notify(struct ipa3_sys_context *sys)
+{
+	IPADBG_LOW("ENTER.\n");
+	if (unlikely(list_empty(&sys->head_desc_list))) {
+		IPAERR("descriptor list is empty!\n");
+		WARN_ON(1);
+		return;
+	}
+	sys->ep->client_notify(sys->ep->priv, IPA_RECEIVE, 0);
+	IPADBG_LOW("EXIT\n");
+}
+
+static void ipa3_wq_rx_avail(struct work_struct *work)
+{
+	struct ipa3_rx_pkt_wrapper *rx_pkt;
+	struct ipa3_sys_context *sys;
+
+	rx_pkt = container_of(work, struct ipa3_rx_pkt_wrapper, work);
+	WARN(unlikely(rx_pkt == NULL), "rx pkt is null");
+	sys = rx_pkt->sys;
+	ipa3_wq_rx_common(sys, 0);
+}
+
+static int ipa3_odu_rx_pyld_hdlr(struct sk_buff *rx_skb,
+	struct ipa3_sys_context *sys)
+{
+	if (sys->ep->client_notify) {
+		sys->ep->client_notify(sys->ep->priv, IPA_RECEIVE,
+			(unsigned long)(rx_skb));
+	} else {
+		dev_kfree_skb_any(rx_skb);
+		WARN(1, "client notify is null");
+	}
+
+	return 0;
+}
+
+static int ipa3_odl_dpl_rx_pyld_hdlr(struct sk_buff *rx_skb,
+	struct ipa3_sys_context *sys)
+{
+	if (WARN(!sys->ep->client_notify, "sys->ep->client_notify is NULL\n")) {
+		dev_kfree_skb_any(rx_skb);
+	} else {
+		sys->ep->client_notify(sys->ep->priv, IPA_RECEIVE,
+			(unsigned long)(rx_skb));
+		/*Recycle the SKB before reusing it*/
+		ipa3_skb_recycle(rx_skb);
+	}
+
+	return 0;
+}
+static void ipa3_free_rx_wrapper(struct ipa3_rx_pkt_wrapper *rk_pkt)
+{
+	kmem_cache_free(ipa3_ctx->rx_pkt_wrapper_cache, rk_pkt);
+}
+
+static void ipa3_set_aggr_limit(struct ipa_sys_connect_params *in,
+		struct ipa3_sys_context *sys)
+{
+	u32 *aggr_byte_limit = &in->ipa_ep_cfg.aggr.aggr_byte_limit;
+	u32 adjusted_sz = ipa_adjust_ra_buff_base_sz(*aggr_byte_limit);
+
+	IPADBG("get close-by %u\n", adjusted_sz);
+	IPADBG("set rx_buff_sz %lu\n", (unsigned long)
+		IPA_GENERIC_RX_BUFF_SZ(adjusted_sz));
+
+	/* disable ipa_status */
+	sys->ep->status.status_en = false;
+	sys->rx_buff_sz = IPA_GENERIC_RX_BUFF_SZ(adjusted_sz);
+
+	if (in->client == IPA_CLIENT_APPS_WAN_COAL_CONS)
+		in->ipa_ep_cfg.aggr.aggr_hard_byte_limit_en = 1;
+
+	*aggr_byte_limit = sys->rx_buff_sz < *aggr_byte_limit ?
+		IPA_ADJUST_AGGR_BYTE_LIMIT(sys->rx_buff_sz) :
+		IPA_ADJUST_AGGR_BYTE_LIMIT(*aggr_byte_limit);
+
+	IPADBG("set aggr_limit %lu\n", (unsigned long) *aggr_byte_limit);
+}
+
+static int ipa3_assign_policy(struct ipa_sys_connect_params *in,
+		struct ipa3_sys_context *sys)
+{
+	bool apps_wan_cons_agg_gro_flag;
+	unsigned long aggr_byte_limit;
+
+	if (in->client == IPA_CLIENT_APPS_CMD_PROD) {
+		sys->policy = IPA_POLICY_INTR_MODE;
+		sys->use_comm_evt_ring = false;
+		return 0;
+	}
+
+	if (in->client == IPA_CLIENT_APPS_WAN_PROD) {
+		sys->policy = IPA_POLICY_INTR_MODE;
+		sys->use_comm_evt_ring = true;
+		INIT_WORK(&sys->work, ipa3_send_nop_desc);
+		atomic_set(&sys->workqueue_flushed, 0);
+
+		/*
+		 * enable source notification status for exception packets
+		 * (i.e. QMAP commands) to be routed to modem.
+		 */
+		sys->ep->status.status_en = true;
+		sys->ep->status.status_ep =
+			ipa3_get_ep_mapping(IPA_CLIENT_Q6_WAN_CONS);
+		return 0;
+	}
+
+	if (IPA_CLIENT_IS_MEMCPY_DMA_PROD(in->client)) {
+		sys->policy = IPA_POLICY_NOINTR_MODE;
+		return 0;
+	}
+
+	apps_wan_cons_agg_gro_flag =
+		ipa3_ctx->ipa_client_apps_wan_cons_agg_gro;
+	aggr_byte_limit = in->ipa_ep_cfg.aggr.aggr_byte_limit;
+
+	if (IPA_CLIENT_IS_PROD(in->client)) {
+		if (sys->ep->skip_ep_cfg) {
+			sys->policy = IPA_POLICY_INTR_POLL_MODE;
+			sys->use_comm_evt_ring = true;
+			atomic_set(&sys->curr_polling_state, 0);
+		} else {
+			sys->policy = IPA_POLICY_INTR_MODE;
+			sys->use_comm_evt_ring = true;
+			INIT_WORK(&sys->work, ipa3_send_nop_desc);
+			atomic_set(&sys->workqueue_flushed, 0);
+		}
+	} else {
+		if (in->client == IPA_CLIENT_APPS_LAN_CONS ||
+		    in->client == IPA_CLIENT_APPS_WAN_CONS ||
+		    in->client == IPA_CLIENT_APPS_WAN_COAL_CONS) {
+			sys->ep->status.status_en = true;
+			sys->policy = IPA_POLICY_INTR_POLL_MODE;
+			INIT_WORK(&sys->work, ipa3_wq_handle_rx);
+			INIT_DELAYED_WORK(&sys->switch_to_intr_work,
+				ipa3_switch_to_intr_rx_work_func);
+			INIT_DELAYED_WORK(&sys->replenish_rx_work,
+					ipa3_replenish_rx_work_func);
+			atomic_set(&sys->curr_polling_state, 0);
+			sys->rx_buff_sz = IPA_GENERIC_RX_BUFF_SZ(
+				IPA_GENERIC_RX_BUFF_BASE_SZ);
+			sys->get_skb = ipa3_get_skb_ipa_rx;
+			sys->free_skb = ipa3_free_skb_rx;
+			in->ipa_ep_cfg.aggr.aggr_en = IPA_ENABLE_AGGR;
+			if (in->client == IPA_CLIENT_APPS_WAN_COAL_CONS)
+				in->ipa_ep_cfg.aggr.aggr = IPA_COALESCE;
+			else
+				in->ipa_ep_cfg.aggr.aggr = IPA_GENERIC;
+			in->ipa_ep_cfg.aggr.aggr_time_limit =
+				IPA_GENERIC_AGGR_TIME_LIMIT;
+			if (in->client == IPA_CLIENT_APPS_LAN_CONS) {
+				INIT_WORK(&sys->repl_work, ipa3_wq_repl_rx);
+				sys->pyld_hdlr = ipa3_lan_rx_pyld_hdlr;
+				sys->repl_hdlr =
+					ipa3_replenish_rx_cache_recycle;
+				sys->free_rx_wrapper =
+					ipa3_recycle_rx_wrapper;
+				sys->rx_pool_sz =
+					ipa3_ctx->lan_rx_ring_size;
+				in->ipa_ep_cfg.aggr.aggr_byte_limit =
+				IPA_GENERIC_AGGR_BYTE_LIMIT;
+				in->ipa_ep_cfg.aggr.aggr_pkt_limit =
+				IPA_GENERIC_AGGR_PKT_LIMIT;
+			} else if (in->client == IPA_CLIENT_APPS_WAN_CONS ||
+				in->client == IPA_CLIENT_APPS_WAN_COAL_CONS) {
+				if (ipa3_ctx->ipa_wan_skb_page
+					&& in->napi_obj) {
+					INIT_WORK(&sys->repl_work,
+							ipa3_wq_page_repl);
+					sys->pyld_hdlr = ipa3_wan_rx_pyld_hdlr;
+					sys->free_rx_wrapper =
+						ipa3_recycle_rx_page_wrapper;
+					sys->repl_hdlr =
+						ipa3_replenish_rx_page_recycle;
+					sys->rx_pool_sz =
+						ipa3_ctx->wan_rx_ring_size;
+				} else {
+					INIT_WORK(&sys->repl_work,
+						ipa3_wq_repl_rx);
+					sys->pyld_hdlr = ipa3_wan_rx_pyld_hdlr;
+					sys->free_rx_wrapper =
+						ipa3_free_rx_wrapper;
+					sys->rx_pool_sz =
+						ipa3_ctx->wan_rx_ring_size;
+					if (nr_cpu_ids > 1) {
+						sys->repl_hdlr =
+						ipa3_fast_replenish_rx_cache;
+					} else {
+						sys->repl_hdlr =
+						ipa3_replenish_rx_cache;
+					}
+					if (in->napi_obj && in->recycle_enabled)
+						sys->repl_hdlr =
+						ipa3_replenish_rx_cache_recycle;
+				}
+				in->ipa_ep_cfg.aggr.aggr_sw_eof_active
+						= true;
+				if (apps_wan_cons_agg_gro_flag)
+					ipa3_set_aggr_limit(in, sys);
+				else {
+					in->ipa_ep_cfg.aggr.aggr_byte_limit
+						= IPA_GENERIC_AGGR_BYTE_LIMIT;
+					in->ipa_ep_cfg.aggr.aggr_pkt_limit
+						= IPA_GENERIC_AGGR_PKT_LIMIT;
+				}
+			}
+		} else if (IPA_CLIENT_IS_WLAN_CONS(in->client)) {
+			IPADBG("assigning policy to client:%d",
+				in->client);
+
+			sys->policy = IPA_POLICY_INTR_POLL_MODE;
+			INIT_WORK(&sys->work, ipa3_wq_handle_rx);
+			INIT_DELAYED_WORK(&sys->switch_to_intr_work,
+				ipa3_switch_to_intr_rx_work_func);
+			INIT_DELAYED_WORK(&sys->replenish_rx_work,
+				ipa3_replenish_rx_work_func);
+			atomic_set(&sys->curr_polling_state, 0);
+			sys->rx_buff_sz = IPA_WLAN_RX_BUFF_SZ;
+			sys->rx_pool_sz = in->desc_fifo_sz /
+				IPA_FIFO_ELEMENT_SIZE - 1;
+			if (sys->rx_pool_sz > IPA_WLAN_RX_POOL_SZ)
+				sys->rx_pool_sz = IPA_WLAN_RX_POOL_SZ;
+			sys->pyld_hdlr = NULL;
+			sys->repl_hdlr = ipa3_replenish_wlan_rx_cache;
+			sys->get_skb = ipa3_get_skb_ipa_rx;
+			sys->free_skb = ipa3_free_skb_rx;
+			sys->free_rx_wrapper = ipa3_free_rx_wrapper;
+			in->ipa_ep_cfg.aggr.aggr_en = IPA_BYPASS_AGGR;
+		} else if (IPA_CLIENT_IS_ODU_CONS(in->client)) {
+			IPADBG("assigning policy to client:%d",
+				in->client);
+
+			sys->policy = IPA_POLICY_INTR_POLL_MODE;
+			INIT_WORK(&sys->work, ipa3_wq_handle_rx);
+			INIT_DELAYED_WORK(&sys->switch_to_intr_work,
+				ipa3_switch_to_intr_rx_work_func);
+			INIT_DELAYED_WORK(&sys->replenish_rx_work,
+				ipa3_replenish_rx_work_func);
+			atomic_set(&sys->curr_polling_state, 0);
+			sys->rx_pool_sz = in->desc_fifo_sz /
+				IPA_FIFO_ELEMENT_SIZE - 1;
+			if (sys->rx_pool_sz > IPA_ODU_RX_POOL_SZ)
+				sys->rx_pool_sz = IPA_ODU_RX_POOL_SZ;
+			sys->pyld_hdlr = ipa3_odu_rx_pyld_hdlr;
+			sys->get_skb = ipa3_get_skb_ipa_rx;
+			sys->free_skb = ipa3_free_skb_rx;
+			/* recycle skb for GSB use case */
+			if (ipa3_ctx->ipa_hw_type >= IPA_HW_v4_0) {
+				sys->free_rx_wrapper =
+					ipa3_free_rx_wrapper;
+				sys->repl_hdlr =
+					ipa3_replenish_rx_cache;
+				/* Overwrite buffer size & aggr limit for GSB */
+				sys->rx_buff_sz = IPA_GENERIC_RX_BUFF_SZ(
+					IPA_GSB_RX_BUFF_BASE_SZ);
+				in->ipa_ep_cfg.aggr.aggr_byte_limit =
+					IPA_GSB_AGGR_BYTE_LIMIT;
+			} else {
+				sys->free_rx_wrapper =
+					ipa3_free_rx_wrapper;
+				sys->repl_hdlr = ipa3_replenish_rx_cache;
+				sys->rx_buff_sz = IPA_ODU_RX_BUFF_SZ;
+			}
+		} else if (in->client ==
+				IPA_CLIENT_MEMCPY_DMA_ASYNC_CONS) {
+			IPADBG("assigning policy to client:%d",
+				in->client);
+
+			sys->policy = IPA_POLICY_INTR_POLL_MODE;
+			INIT_WORK(&sys->work, ipa3_wq_handle_rx);
+			INIT_DELAYED_WORK(&sys->switch_to_intr_work,
+				ipa3_switch_to_intr_rx_work_func);
+		} else if (in->client ==
+				IPA_CLIENT_MEMCPY_DMA_SYNC_CONS) {
+			IPADBG("assigning policy to client:%d",
+				in->client);
+
+			sys->policy = IPA_POLICY_NOINTR_MODE;
+		}  else if (in->client == IPA_CLIENT_ODL_DPL_CONS) {
+			IPADBG("assigning policy to ODL client:%d\n",
+				in->client);
+			/* Status enabling is needed for DPLv2 with
+			 * IPA versions < 4.5.
+			 * Dont enable ipa_status for APQ, since MDM IPA
+			 * has IPA >= 4.5 with DPLv3.
+			 */
+			if (ipa3_ctx->platform_type == IPA_PLAT_TYPE_APQ &&
+				ipa3_is_mhip_offload_enabled())
+				sys->ep->status.status_en = false;
+			else
+				sys->ep->status.status_en = true;
+			sys->policy = IPA_POLICY_INTR_POLL_MODE;
+			INIT_WORK(&sys->work, ipa3_wq_handle_rx);
+			INIT_DELAYED_WORK(&sys->switch_to_intr_work,
+				ipa3_switch_to_intr_rx_work_func);
+			INIT_DELAYED_WORK(&sys->replenish_rx_work,
+				ipa3_replenish_rx_work_func);
+			atomic_set(&sys->curr_polling_state, 0);
+			sys->rx_buff_sz =
+				IPA_GENERIC_RX_BUFF_SZ(IPA_ODL_RX_BUFF_SZ);
+			sys->pyld_hdlr = ipa3_odl_dpl_rx_pyld_hdlr;
+			sys->get_skb = ipa3_get_skb_ipa_rx;
+			sys->free_skb = ipa3_free_skb_rx;
+			sys->free_rx_wrapper = ipa3_recycle_rx_wrapper;
+			sys->repl_hdlr = ipa3_replenish_rx_cache_recycle;
+			sys->rx_pool_sz = in->desc_fifo_sz /
+					IPA_FIFO_ELEMENT_SIZE - 1;
+		} else {
+			WARN(1, "Need to install a RX pipe hdlr\n");
+			return -EINVAL;
+		}
+	}
+
+	return 0;
+}
+
+/**
+ * ipa3_tx_client_rx_notify_release() - Callback function
+ * which will call the user supplied callback function to
+ * release the skb, or release it on its own if no callback
+ * function was supplied
+ *
+ * @user1: [in] - Data Descriptor
+ * @user2: [in] - endpoint idx
+ *
+ * This notified callback is for the destination client
+ * This function is supplied in ipa3_tx_dp_mul
+ */
+static void ipa3_tx_client_rx_notify_release(void *user1, int user2)
+{
+	struct ipa_tx_data_desc *dd = (struct ipa_tx_data_desc *)user1;
+	int ep_idx = user2;
+
+	IPADBG_LOW("Received data desc anchor:%pK\n", dd);
+
+	atomic_inc(&ipa3_ctx->ep[ep_idx].avail_fifo_desc);
+	ipa3_ctx->ep[ep_idx].wstats.rx_pkts_status_rcvd++;
+
+  /* wlan host driver waits till tx complete before unload */
+	IPADBG_LOW("ep=%d fifo_desc_free_count=%d\n",
+		ep_idx, atomic_read(&ipa3_ctx->ep[ep_idx].avail_fifo_desc));
+	IPADBG_LOW("calling client notify callback with priv:%pK\n",
+		ipa3_ctx->ep[ep_idx].priv);
+
+	if (ipa3_ctx->ep[ep_idx].client_notify) {
+		ipa3_ctx->ep[ep_idx].client_notify(ipa3_ctx->ep[ep_idx].priv,
+				IPA_WRITE_DONE, (unsigned long)user1);
+		ipa3_ctx->ep[ep_idx].wstats.rx_hd_reply++;
+	}
+}
+/**
+ * ipa3_tx_client_rx_pkt_status() - Callback function
+ * which will call the user supplied callback function to
+ * increase the available fifo descriptor
+ *
+ * @user1: [in] - Data Descriptor
+ * @user2: [in] - endpoint idx
+ *
+ * This notified callback is for the destination client
+ * This function is supplied in ipa3_tx_dp_mul
+ */
+static void ipa3_tx_client_rx_pkt_status(void *user1, int user2)
+{
+	int ep_idx = user2;
+
+	atomic_inc(&ipa3_ctx->ep[ep_idx].avail_fifo_desc);
+	ipa3_ctx->ep[ep_idx].wstats.rx_pkts_status_rcvd++;
+}
+
+
+/**
+ * ipa3_tx_dp_mul() - Data-path tx handler for multiple packets
+ * @src: [in] - Client that is sending data
+ * @ipa_tx_data_desc:	[in] data descriptors from wlan
+ *
+ * this is used for to transfer data descriptors that received
+ * from WLAN1_PROD pipe to IPA HW
+ *
+ * The function will send data descriptors from WLAN1_PROD (one
+ * at a time). Will set EOT flag for last descriptor Once this send was done
+ * from transport point-of-view the IPA driver will get notified by the
+ * supplied callback - ipa_gsi_irq_tx_notify_cb()
+ *
+ * ipa_gsi_irq_tx_notify_cb will call to the user supplied callback
+ *
+ * Returns:	0 on success, negative on failure
+ */
+int ipa3_tx_dp_mul(enum ipa_client_type src,
+			struct ipa_tx_data_desc *data_desc)
+{
+	/* The second byte in wlan header holds qmap id */
+#define IPA_WLAN_HDR_QMAP_ID_OFFSET 1
+	struct ipa_tx_data_desc *entry;
+	struct ipa3_sys_context *sys;
+	struct ipa3_desc desc[2];
+	u32 num_desc, cnt;
+	int ep_idx;
+
+	IPADBG_LOW("Received data desc anchor:%pK\n", data_desc);
+
+	spin_lock_bh(&ipa3_ctx->wc_memb.ipa_tx_mul_spinlock);
+
+	ep_idx = ipa3_get_ep_mapping(src);
+	if (unlikely(ep_idx == -1)) {
+		IPAERR("dest EP does not exist.\n");
+		goto fail_send;
+	}
+	IPADBG_LOW("ep idx:%d\n", ep_idx);
+	sys = ipa3_ctx->ep[ep_idx].sys;
+
+	if (unlikely(ipa3_ctx->ep[ep_idx].valid == 0)) {
+		IPAERR("dest EP not valid.\n");
+		goto fail_send;
+	}
+	sys->ep->wstats.rx_hd_rcvd++;
+
+	/* Calculate the number of descriptors */
+	num_desc = 0;
+	list_for_each_entry(entry, &data_desc->link, link) {
+		num_desc++;
+	}
+	IPADBG_LOW("Number of Data Descriptors:%d", num_desc);
+
+	if (atomic_read(&sys->ep->avail_fifo_desc) < num_desc) {
+		IPAERR("Insufficient data descriptors available\n");
+		goto fail_send;
+	}
+
+	/* Assign callback only for last data descriptor */
+	cnt = 0;
+	list_for_each_entry(entry, &data_desc->link, link) {
+		memset(desc, 0, 2 * sizeof(struct ipa3_desc));
+
+		IPADBG_LOW("Parsing data desc :%d\n", cnt);
+		cnt++;
+		((u8 *)entry->pyld_buffer)[IPA_WLAN_HDR_QMAP_ID_OFFSET] =
+			(u8)sys->ep->cfg.meta.qmap_id;
+
+		/* the tag field will be populated in ipa3_send() function */
+		desc[0].is_tag_status = true;
+		desc[1].pyld = entry->pyld_buffer;
+		desc[1].len = entry->pyld_len;
+		desc[1].type = IPA_DATA_DESC_SKB;
+		desc[1].user1 = data_desc;
+		desc[1].user2 = ep_idx;
+		IPADBG_LOW("priv:%pK pyld_buf:0x%pK pyld_len:%d\n",
+			entry->priv, desc[1].pyld, desc[1].len);
+
+		/* In case of last descriptor populate callback */
+		if (cnt == num_desc) {
+			IPADBG_LOW("data desc:%pK\n", data_desc);
+			desc[1].callback = ipa3_tx_client_rx_notify_release;
+		} else {
+			desc[1].callback = ipa3_tx_client_rx_pkt_status;
+		}
+
+		IPADBG_LOW("calling ipa3_send()\n");
+		if (ipa3_send(sys, 2, desc, true)) {
+			IPAERR("fail to send skb\n");
+			sys->ep->wstats.rx_pkt_leak += (cnt-1);
+			sys->ep->wstats.rx_dp_fail++;
+			goto fail_send;
+		}
+
+		if (atomic_read(&sys->ep->avail_fifo_desc) >= 0)
+			atomic_dec(&sys->ep->avail_fifo_desc);
+
+		sys->ep->wstats.rx_pkts_rcvd++;
+		IPADBG_LOW("ep=%d fifo desc=%d\n",
+			ep_idx, atomic_read(&sys->ep->avail_fifo_desc));
+	}
+
+	sys->ep->wstats.rx_hd_processed++;
+	spin_unlock_bh(&ipa3_ctx->wc_memb.ipa_tx_mul_spinlock);
+	return 0;
+
+fail_send:
+	spin_unlock_bh(&ipa3_ctx->wc_memb.ipa_tx_mul_spinlock);
+	return -EFAULT;
+
+}
+
+void ipa3_free_skb(struct ipa_rx_data *data)
+{
+	struct ipa3_rx_pkt_wrapper *rx_pkt;
+
+	spin_lock_bh(&ipa3_ctx->wc_memb.wlan_spinlock);
+
+	ipa3_ctx->wc_memb.total_tx_pkts_freed++;
+	rx_pkt = container_of(data, struct ipa3_rx_pkt_wrapper, data);
+
+	ipa3_skb_recycle(rx_pkt->data.skb);
+	(void)skb_put(rx_pkt->data.skb, IPA_WLAN_RX_BUFF_SZ);
+
+	list_add_tail(&rx_pkt->link,
+		&ipa3_ctx->wc_memb.wlan_comm_desc_list);
+	ipa3_ctx->wc_memb.wlan_comm_free_cnt++;
+
+	spin_unlock_bh(&ipa3_ctx->wc_memb.wlan_spinlock);
+}
+
+/* Functions added to support kernel tests */
+
+int ipa3_sys_setup(struct ipa_sys_connect_params *sys_in,
+			unsigned long *ipa_transport_hdl,
+			u32 *ipa_pipe_num, u32 *clnt_hdl, bool en_status)
+{
+	struct ipa3_ep_context *ep;
+	int ipa_ep_idx;
+	int result = -EINVAL;
+
+	if (sys_in == NULL || clnt_hdl == NULL) {
+		IPAERR("NULL args\n");
+		goto fail_gen;
+	}
+
+	if (ipa_transport_hdl == NULL || ipa_pipe_num == NULL) {
+		IPAERR("NULL args\n");
+		goto fail_gen;
+	}
+	if (sys_in->client >= IPA_CLIENT_MAX) {
+		IPAERR("bad parm client:%d\n", sys_in->client);
+		goto fail_gen;
+	}
+
+	ipa_ep_idx = ipa3_get_ep_mapping(sys_in->client);
+	if (ipa_ep_idx == -1) {
+		IPAERR("Invalid client :%d\n", sys_in->client);
+		goto fail_gen;
+	}
+
+	ep = &ipa3_ctx->ep[ipa_ep_idx];
+	IPA_ACTIVE_CLIENTS_INC_EP(sys_in->client);
+
+	if (ep->valid == 1) {
+		if (sys_in->client != IPA_CLIENT_APPS_WAN_PROD) {
+			IPAERR("EP %d already allocated\n", ipa_ep_idx);
+			goto fail_and_disable_clocks;
+		} else {
+			if (ipa3_cfg_ep_hdr(ipa_ep_idx,
+						&sys_in->ipa_ep_cfg.hdr)) {
+				IPAERR("fail to configure hdr prop of EP %d\n",
+						ipa_ep_idx);
+				result = -EFAULT;
+				goto fail_and_disable_clocks;
+			}
+			if (ipa3_cfg_ep_hdr_ext(ipa_ep_idx,
+						&sys_in->ipa_ep_cfg.hdr_ext)) {
+				IPAERR("fail config hdr_ext prop of EP %d\n",
+						ipa_ep_idx);
+				result = -EFAULT;
+				goto fail_and_disable_clocks;
+			}
+			if (ipa3_cfg_ep_cfg(ipa_ep_idx,
+						&sys_in->ipa_ep_cfg.cfg)) {
+				IPAERR("fail to configure cfg prop of EP %d\n",
+						ipa_ep_idx);
+				result = -EFAULT;
+				goto fail_and_disable_clocks;
+			}
+			IPAERR("client %d (ep: %d) overlay ok sys=%pK\n",
+					sys_in->client, ipa_ep_idx, ep->sys);
+			ep->client_notify = sys_in->notify;
+			ep->priv = sys_in->priv;
+			*clnt_hdl = ipa_ep_idx;
+			if (!ep->keep_ipa_awake)
+				IPA_ACTIVE_CLIENTS_DEC_EP(sys_in->client);
+
+			return 0;
+		}
+	}
+
+	memset(ep, 0, offsetof(struct ipa3_ep_context, sys));
+
+	ep->valid = 1;
+	ep->client = sys_in->client;
+	ep->client_notify = sys_in->notify;
+	ep->priv = sys_in->priv;
+	ep->keep_ipa_awake = true;
+	if (en_status) {
+		ep->status.status_en = true;
+		ep->status.status_ep = ipa_ep_idx;
+	}
+
+	result = ipa3_enable_data_path(ipa_ep_idx);
+	if (result) {
+		IPAERR("enable data path failed res=%d clnt=%d.\n",
+				 result, ipa_ep_idx);
+		goto fail_gen2;
+	}
+
+	if (!ep->skip_ep_cfg) {
+		if (ipa3_cfg_ep(ipa_ep_idx, &sys_in->ipa_ep_cfg)) {
+			IPAERR("fail to configure EP.\n");
+			goto fail_gen2;
+		}
+		if (ipa3_cfg_ep_status(ipa_ep_idx, &ep->status)) {
+			IPAERR("fail to configure status of EP.\n");
+			goto fail_gen2;
+		}
+		IPADBG("ep configuration successful\n");
+	} else {
+		IPADBG("skipping ep configuration\n");
+	}
+
+	*clnt_hdl = ipa_ep_idx;
+
+	*ipa_pipe_num = ipa_ep_idx;
+	*ipa_transport_hdl = ipa3_ctx->gsi_dev_hdl;
+
+	if (!ep->keep_ipa_awake)
+		IPA_ACTIVE_CLIENTS_DEC_EP(sys_in->client);
+
+	ipa3_ctx->skip_ep_cfg_shadow[ipa_ep_idx] = ep->skip_ep_cfg;
+	IPADBG("client %d (ep: %d) connected sys=%pK\n", sys_in->client,
+			ipa_ep_idx, ep->sys);
+
+	return 0;
+
+fail_gen2:
+fail_and_disable_clocks:
+	IPA_ACTIVE_CLIENTS_DEC_EP(sys_in->client);
+fail_gen:
+	return result;
+}
+
+int ipa3_sys_teardown(u32 clnt_hdl)
+{
+	struct ipa3_ep_context *ep;
+
+	if (clnt_hdl >= ipa3_ctx->ipa_num_pipes ||
+	    ipa3_ctx->ep[clnt_hdl].valid == 0) {
+		IPAERR("bad parm(Either endpoint or client hdl invalid)\n");
+		return -EINVAL;
+	}
+
+	ep = &ipa3_ctx->ep[clnt_hdl];
+
+	if (!ep->keep_ipa_awake)
+		IPA_ACTIVE_CLIENTS_INC_EP(ipa3_get_client_mapping(clnt_hdl));
+
+	ipa3_disable_data_path(clnt_hdl);
+	ep->valid = 0;
+
+	IPA_ACTIVE_CLIENTS_DEC_EP(ipa3_get_client_mapping(clnt_hdl));
+
+	IPADBG("client (ep: %d) disconnected\n", clnt_hdl);
+
+	return 0;
+}
+
+int ipa3_sys_update_gsi_hdls(u32 clnt_hdl, unsigned long gsi_ch_hdl,
+	unsigned long gsi_ev_hdl)
+{
+	struct ipa3_ep_context *ep;
+
+	if (clnt_hdl >= ipa3_ctx->ipa_num_pipes ||
+		ipa3_ctx->ep[clnt_hdl].valid == 0) {
+		IPAERR("bad parm(Either endpoint or client hdl invalid)\n");
+		return -EINVAL;
+	}
+
+	ep = &ipa3_ctx->ep[clnt_hdl];
+
+	ep->gsi_chan_hdl = gsi_ch_hdl;
+	ep->gsi_evt_ring_hdl = gsi_ev_hdl;
+
+	return 0;
+}
+
+static void ipa_gsi_evt_ring_err_cb(struct gsi_evt_err_notify *notify)
+{
+	switch (notify->evt_id) {
+	case GSI_EVT_OUT_OF_BUFFERS_ERR:
+		IPAERR("Got GSI_EVT_OUT_OF_BUFFERS_ERR\n");
+		break;
+	case GSI_EVT_OUT_OF_RESOURCES_ERR:
+		IPAERR("Got GSI_EVT_OUT_OF_RESOURCES_ERR\n");
+		break;
+	case GSI_EVT_UNSUPPORTED_INTER_EE_OP_ERR:
+		IPAERR("Got GSI_EVT_UNSUPPORTED_INTER_EE_OP_ERR\n");
+		break;
+	case GSI_EVT_EVT_RING_EMPTY_ERR:
+		IPAERR("Got GSI_EVT_EVT_RING_EMPTY_ERR\n");
+		break;
+	default:
+		IPAERR("Unexpected err evt: %d\n", notify->evt_id);
+	}
+}
+
+static void ipa_gsi_chan_err_cb(struct gsi_chan_err_notify *notify)
+{
+	switch (notify->evt_id) {
+	case GSI_CHAN_INVALID_TRE_ERR:
+		IPAERR("Got GSI_CHAN_INVALID_TRE_ERR\n");
+		break;
+	case GSI_CHAN_NON_ALLOCATED_EVT_ACCESS_ERR:
+		IPAERR("Got GSI_CHAN_NON_ALLOCATED_EVT_ACCESS_ERR\n");
+		break;
+	case GSI_CHAN_OUT_OF_BUFFERS_ERR:
+		IPAERR("Got GSI_CHAN_OUT_OF_BUFFERS_ERR\n");
+		break;
+	case GSI_CHAN_OUT_OF_RESOURCES_ERR:
+		IPAERR("Got GSI_CHAN_OUT_OF_RESOURCES_ERR\n");
+		break;
+	case GSI_CHAN_UNSUPPORTED_INTER_EE_OP_ERR:
+		IPAERR("Got GSI_CHAN_UNSUPPORTED_INTER_EE_OP_ERR\n");
+		break;
+	case GSI_CHAN_HWO_1_ERR:
+		IPAERR("Got GSI_CHAN_HWO_1_ERR\n");
+		break;
+	default:
+		IPAERR("Unexpected err evt: %d\n", notify->evt_id);
+	}
+}
+
+static void ipa_gsi_irq_tx_notify_cb(struct gsi_chan_xfer_notify *notify)
+{
+	struct ipa3_tx_pkt_wrapper *tx_pkt;
+
+	IPADBG_LOW("event %d notified\n", notify->evt_id);
+
+	switch (notify->evt_id) {
+	case GSI_CHAN_EVT_EOT:
+		atomic_set(&ipa3_ctx->transport_pm.eot_activity, 1);
+		tx_pkt = notify->xfer_user_data;
+		tx_pkt->xmit_done = true;
+		atomic_inc(&tx_pkt->sys->xmit_eot_cnt);
+		tasklet_schedule(&tx_pkt->sys->tasklet);
+		break;
+	default:
+		IPAERR("received unexpected event id %d\n", notify->evt_id);
+	}
+}
+
+void __ipa_gsi_irq_rx_scedule_poll(struct ipa3_sys_context *sys)
+{
+	bool clk_off;
+
+	atomic_set(&sys->curr_polling_state, 1);
+	__ipa3_update_curr_poll_state(sys->ep->client, 1);
+
+	ipa3_inc_acquire_wakelock();
+
+	/*
+	 * pm deactivate is done in wq context
+	 * or after NAPI poll
+	 */
+
+	clk_off = ipa_pm_activate(sys->pm_hdl);
+	if (!clk_off && sys->napi_obj) {
+		napi_schedule(sys->napi_obj);
+		return;
+	}
+	queue_work(sys->wq, &sys->work);
+	return;
+
+}
+
+static void ipa_gsi_irq_rx_notify_cb(struct gsi_chan_xfer_notify *notify)
+{
+	struct ipa3_sys_context *sys;
+
+	if (!notify) {
+		IPAERR("gsi notify is NULL.\n");
+		return;
+	}
+	IPADBG_LOW("event %d notified\n", notify->evt_id);
+
+	sys = (struct ipa3_sys_context *)notify->chan_user_data;
+
+
+	sys->ep->xfer_notify_valid = true;
+	sys->ep->xfer_notify = *notify;
+
+	switch (notify->evt_id) {
+	case GSI_CHAN_EVT_EOT:
+	case GSI_CHAN_EVT_EOB:
+		atomic_set(&ipa3_ctx->transport_pm.eot_activity, 1);
+		if (!atomic_read(&sys->curr_polling_state)) {
+			/* put the gsi channel into polling mode */
+			gsi_config_channel_mode(sys->ep->gsi_chan_hdl,
+				GSI_CHAN_MODE_POLL);
+			__ipa_gsi_irq_rx_scedule_poll(sys);
+		}
+		break;
+	default:
+		IPAERR("received unexpected event id %d\n", notify->evt_id);
+	}
+}
+
+static void ipa_dma_gsi_irq_rx_notify_cb(struct gsi_chan_xfer_notify *notify)
+{
+	struct ipa3_sys_context *sys;
+
+	if (!notify) {
+		IPAERR("gsi notify is NULL.\n");
+		return;
+	}
+	IPADBG_LOW("event %d notified\n", notify->evt_id);
+
+	sys = (struct ipa3_sys_context *)notify->chan_user_data;
+	if (sys->ep->client == IPA_CLIENT_MEMCPY_DMA_SYNC_CONS) {
+		IPAERR("IRQ_RX Callback was called for DMA_SYNC_CONS.\n");
+		return;
+	}
+
+	sys->ep->xfer_notify_valid = true;
+	sys->ep->xfer_notify = *notify;
+
+	switch (notify->evt_id) {
+	case GSI_CHAN_EVT_EOT:
+		if (!atomic_read(&sys->curr_polling_state)) {
+			/* put the gsi channel into polling mode */
+			gsi_config_channel_mode(sys->ep->gsi_chan_hdl,
+				GSI_CHAN_MODE_POLL);
+			ipa3_inc_acquire_wakelock();
+			atomic_set(&sys->curr_polling_state, 1);
+			queue_work(sys->wq, &sys->work);
+		}
+		break;
+	default:
+		IPAERR("received unexpected event id %d\n", notify->evt_id);
+	}
+}
+
+int ipa3_alloc_common_event_ring(void)
+{
+	struct gsi_evt_ring_props gsi_evt_ring_props;
+	dma_addr_t evt_dma_addr;
+	int result;
+
+	memset(&gsi_evt_ring_props, 0, sizeof(gsi_evt_ring_props));
+	gsi_evt_ring_props.intf = GSI_EVT_CHTYPE_GPI_EV;
+	gsi_evt_ring_props.intr = GSI_INTR_IRQ;
+	gsi_evt_ring_props.re_size = GSI_EVT_RING_RE_SIZE_16B;
+
+	gsi_evt_ring_props.ring_len = IPA_COMMON_EVENT_RING_SIZE;
+
+	gsi_evt_ring_props.ring_base_vaddr =
+		dma_alloc_coherent(ipa3_ctx->pdev,
+		gsi_evt_ring_props.ring_len, &evt_dma_addr, GFP_KERNEL);
+	if (!gsi_evt_ring_props.ring_base_vaddr) {
+		IPAERR("fail to dma alloc %u bytes\n",
+			gsi_evt_ring_props.ring_len);
+		return -ENOMEM;
+	}
+	gsi_evt_ring_props.ring_base_addr = evt_dma_addr;
+	gsi_evt_ring_props.int_modt = 0;
+	gsi_evt_ring_props.int_modc = 1; /* moderation comes from channel*/
+	gsi_evt_ring_props.rp_update_addr = 0;
+	gsi_evt_ring_props.exclusive = false;
+	gsi_evt_ring_props.err_cb = ipa_gsi_evt_ring_err_cb;
+	gsi_evt_ring_props.user_data = NULL;
+
+	result = gsi_alloc_evt_ring(&gsi_evt_ring_props,
+		ipa3_ctx->gsi_dev_hdl, &ipa3_ctx->gsi_evt_comm_hdl);
+	if (result) {
+		IPAERR("gsi_alloc_evt_ring failed %d\n", result);
+		return result;
+	}
+	ipa3_ctx->gsi_evt_comm_ring_rem = IPA_COMMON_EVENT_RING_SIZE;
+
+	return 0;
+}
+
+static int ipa_gsi_setup_channel(struct ipa_sys_connect_params *in,
+	struct ipa3_ep_context *ep)
+{
+	u32 ring_size;
+	int result;
+	gfp_t mem_flag = GFP_KERNEL;
+	u32 coale_ep_idx;
+
+	if (in->client == IPA_CLIENT_APPS_WAN_CONS ||
+		in->client == IPA_CLIENT_APPS_WAN_COAL_CONS ||
+		in->client == IPA_CLIENT_APPS_WAN_PROD)
+		mem_flag = GFP_ATOMIC;
+
+	if (!ep) {
+		IPAERR("EP context is empty\n");
+		return -EINVAL;
+	}
+	coale_ep_idx = ipa3_get_ep_mapping(IPA_CLIENT_APPS_WAN_COAL_CONS);
+	/*
+	 * GSI ring length is calculated based on the desc_fifo_sz
+	 * which was meant to define the BAM desc fifo. GSI descriptors
+	 * are 16B as opposed to 8B for BAM.
+	 */
+	ring_size = 2 * in->desc_fifo_sz;
+	ep->gsi_evt_ring_hdl = ~0;
+	if (ep->sys->use_comm_evt_ring) {
+		if (ipa3_ctx->gsi_evt_comm_ring_rem < ring_size) {
+			IPAERR("not enough space in common event ring\n");
+			IPAERR("available: %d needed: %d\n",
+				ipa3_ctx->gsi_evt_comm_ring_rem,
+				ring_size);
+			WARN_ON(1);
+			return -EFAULT;
+		}
+		ipa3_ctx->gsi_evt_comm_ring_rem -= (ring_size);
+		ep->gsi_evt_ring_hdl = ipa3_ctx->gsi_evt_comm_hdl;
+	} else if (in->client == IPA_CLIENT_APPS_WAN_COAL_CONS) {
+		result = ipa_gsi_setup_event_ring(ep,
+				IPA_COMMON_EVENT_RING_SIZE, mem_flag);
+		if (result)
+			goto fail_setup_event_ring;
+
+	} else if (in->client == IPA_CLIENT_APPS_WAN_CONS &&
+			coale_ep_idx != IPA_EP_NOT_ALLOCATED &&
+			ipa3_ctx->ep[coale_ep_idx].valid == 1) {
+		IPADBG("Wan consumer pipe configured\n");
+		result = ipa_gsi_setup_coal_def_channel(in, ep,
+					&ipa3_ctx->ep[coale_ep_idx]);
+		if (result) {
+			IPAERR("Failed to setup default coal GSI channel\n");
+			goto fail_setup_event_ring;
+		}
+		return result;
+	} else if (ep->sys->policy != IPA_POLICY_NOINTR_MODE ||
+			IPA_CLIENT_IS_CONS(ep->client)) {
+		result = ipa_gsi_setup_event_ring(ep, ring_size, mem_flag);
+		if (result)
+			goto fail_setup_event_ring;
+	}
+	result = ipa_gsi_setup_transfer_ring(ep, ring_size,
+		ep->sys, mem_flag);
+	if (result)
+		goto fail_setup_transfer_ring;
+
+	if (ep->client == IPA_CLIENT_MEMCPY_DMA_SYNC_CONS)
+		gsi_config_channel_mode(ep->gsi_chan_hdl,
+				GSI_CHAN_MODE_POLL);
+	return 0;
+
+fail_setup_transfer_ring:
+	if (ep->gsi_mem_info.evt_ring_base_vaddr)
+		dma_free_coherent(ipa3_ctx->pdev, ep->gsi_mem_info.evt_ring_len,
+			ep->gsi_mem_info.evt_ring_base_vaddr,
+			ep->gsi_mem_info.evt_ring_base_addr);
+fail_setup_event_ring:
+	IPAERR("Return with err: %d\n", result);
+	return result;
+}
+
+static int ipa_gsi_setup_event_ring(struct ipa3_ep_context *ep,
+	u32 ring_size, gfp_t mem_flag)
+{
+	struct gsi_evt_ring_props gsi_evt_ring_props;
+	dma_addr_t evt_dma_addr;
+	int result;
+
+	evt_dma_addr = 0;
+	memset(&gsi_evt_ring_props, 0, sizeof(gsi_evt_ring_props));
+	gsi_evt_ring_props.intf = GSI_EVT_CHTYPE_GPI_EV;
+	gsi_evt_ring_props.intr = GSI_INTR_IRQ;
+	gsi_evt_ring_props.re_size = GSI_EVT_RING_RE_SIZE_16B;
+	gsi_evt_ring_props.ring_len = ring_size;
+	gsi_evt_ring_props.ring_base_vaddr =
+		dma_alloc_coherent(ipa3_ctx->pdev, gsi_evt_ring_props.ring_len,
+		&evt_dma_addr, mem_flag);
+	if (!gsi_evt_ring_props.ring_base_vaddr) {
+		IPAERR("fail to dma alloc %u bytes\n",
+			gsi_evt_ring_props.ring_len);
+		return -ENOMEM;
+	}
+	gsi_evt_ring_props.ring_base_addr = evt_dma_addr;
+
+	/* copy mem info */
+	ep->gsi_mem_info.evt_ring_len = gsi_evt_ring_props.ring_len;
+	ep->gsi_mem_info.evt_ring_base_addr =
+		gsi_evt_ring_props.ring_base_addr;
+	ep->gsi_mem_info.evt_ring_base_vaddr =
+		gsi_evt_ring_props.ring_base_vaddr;
+
+	if (ep->sys->napi_obj) {
+		gsi_evt_ring_props.int_modt = IPA_GSI_EVT_RING_INT_MODT;
+		gsi_evt_ring_props.int_modc = IPA_GSI_EVT_RING_INT_MODC;
+	} else {
+		gsi_evt_ring_props.int_modt = IPA_GSI_EVT_RING_INT_MODT;
+		gsi_evt_ring_props.int_modc = 1;
+	}
+
+	IPADBG("client=%d moderation threshold cycles=%u cnt=%u\n",
+		ep->client,
+		gsi_evt_ring_props.int_modt,
+		gsi_evt_ring_props.int_modc);
+	gsi_evt_ring_props.rp_update_addr = 0;
+	gsi_evt_ring_props.exclusive = true;
+	gsi_evt_ring_props.err_cb = ipa_gsi_evt_ring_err_cb;
+	gsi_evt_ring_props.user_data = NULL;
+
+	result = gsi_alloc_evt_ring(&gsi_evt_ring_props,
+		ipa3_ctx->gsi_dev_hdl, &ep->gsi_evt_ring_hdl);
+	if (result != GSI_STATUS_SUCCESS)
+		goto fail_alloc_evt_ring;
+
+	return 0;
+
+fail_alloc_evt_ring:
+	if (ep->gsi_mem_info.evt_ring_base_vaddr)
+		dma_free_coherent(ipa3_ctx->pdev, ep->gsi_mem_info.evt_ring_len,
+			ep->gsi_mem_info.evt_ring_base_vaddr,
+			ep->gsi_mem_info.evt_ring_base_addr);
+	IPAERR("Return with err: %d\n", result);
+	return result;
+}
+
+static int ipa_gsi_setup_transfer_ring(struct ipa3_ep_context *ep,
+	u32 ring_size, struct ipa3_sys_context *user_data, gfp_t mem_flag)
+{
+	dma_addr_t dma_addr;
+	union __packed gsi_channel_scratch ch_scratch;
+	struct gsi_chan_props gsi_channel_props;
+	const struct ipa_gsi_ep_config *gsi_ep_info;
+	int result;
+
+	memset(&gsi_channel_props, 0, sizeof(gsi_channel_props));
+	if (ep->client == IPA_CLIENT_APPS_WAN_COAL_CONS)
+		gsi_channel_props.prot = GSI_CHAN_PROT_GCI;
+	else
+		gsi_channel_props.prot = GSI_CHAN_PROT_GPI;
+	if (IPA_CLIENT_IS_PROD(ep->client)) {
+		gsi_channel_props.dir = GSI_CHAN_DIR_TO_GSI;
+	} else {
+		gsi_channel_props.dir = GSI_CHAN_DIR_FROM_GSI;
+		gsi_channel_props.max_re_expected = ep->sys->rx_pool_sz;
+	}
+
+	gsi_ep_info = ipa3_get_gsi_ep_info(ep->client);
+	if (!gsi_ep_info) {
+		IPAERR("Failed getting GSI EP info for client=%d\n",
+		       ep->client);
+		result = -EINVAL;
+		goto fail_get_gsi_ep_info;
+	} else {
+		gsi_channel_props.ch_id = gsi_ep_info->ipa_gsi_chan_num;
+	}
+
+	gsi_channel_props.evt_ring_hdl = ep->gsi_evt_ring_hdl;
+	gsi_channel_props.re_size = GSI_CHAN_RE_SIZE_16B;
+	gsi_channel_props.ring_len = ring_size;
+
+	gsi_channel_props.ring_base_vaddr =
+		dma_alloc_coherent(ipa3_ctx->pdev, gsi_channel_props.ring_len,
+			&dma_addr, mem_flag);
+	if (!gsi_channel_props.ring_base_vaddr) {
+		IPAERR("fail to dma alloc %u bytes\n",
+			gsi_channel_props.ring_len);
+		result = -ENOMEM;
+		goto fail_alloc_channel_ring;
+	}
+	gsi_channel_props.ring_base_addr = dma_addr;
+
+	/* copy mem info */
+	ep->gsi_mem_info.chan_ring_len = gsi_channel_props.ring_len;
+	ep->gsi_mem_info.chan_ring_base_addr =
+		gsi_channel_props.ring_base_addr;
+	ep->gsi_mem_info.chan_ring_base_vaddr =
+		gsi_channel_props.ring_base_vaddr;
+
+	if (ipa3_ctx->ipa_hw_type >= IPA_HW_v4_0)
+		gsi_channel_props.use_db_eng = GSI_CHAN_DIRECT_MODE;
+	else
+		gsi_channel_props.use_db_eng = GSI_CHAN_DB_MODE;
+	gsi_channel_props.max_prefetch = GSI_ONE_PREFETCH_SEG;
+	if (ep->client == IPA_CLIENT_APPS_CMD_PROD)
+		gsi_channel_props.low_weight = IPA_GSI_MAX_CH_LOW_WEIGHT;
+	else
+		gsi_channel_props.low_weight = 1;
+	gsi_channel_props.db_in_bytes = 1;
+	gsi_channel_props.prefetch_mode = gsi_ep_info->prefetch_mode;
+	gsi_channel_props.empty_lvl_threshold = gsi_ep_info->prefetch_threshold;
+	gsi_channel_props.chan_user_data = user_data;
+	gsi_channel_props.err_cb = ipa_gsi_chan_err_cb;
+	if (IPA_CLIENT_IS_PROD(ep->client))
+		gsi_channel_props.xfer_cb = ipa_gsi_irq_tx_notify_cb;
+	else
+		gsi_channel_props.xfer_cb = ipa_gsi_irq_rx_notify_cb;
+	if (IPA_CLIENT_IS_MEMCPY_DMA_CONS(ep->client))
+		gsi_channel_props.xfer_cb = ipa_dma_gsi_irq_rx_notify_cb;
+
+	if (IPA_CLIENT_IS_CONS(ep->client))
+		gsi_channel_props.cleanup_cb = free_rx_pkt;
+
+	/* overwrite the cleanup_cb for page recycling */
+	if (ipa3_ctx->ipa_wan_skb_page &&
+		(IPA_CLIENT_IS_WAN_CONS(ep->client)))
+		gsi_channel_props.cleanup_cb = free_rx_page;
+
+	result = gsi_alloc_channel(&gsi_channel_props, ipa3_ctx->gsi_dev_hdl,
+		&ep->gsi_chan_hdl);
+	if (result != GSI_STATUS_SUCCESS) {
+		IPAERR("Failed to alloc GSI chan.\n");
+		goto fail_alloc_channel;
+	}
+
+	memset(&ch_scratch, 0, sizeof(ch_scratch));
+	/*
+	 * Update scratch for MCS smart prefetch:
+	 * Starting IPA4.5, smart prefetch implemented by H/W.
+	 * At IPA 4.0/4.1/4.2, we do not use MCS smart prefetch
+	 *  so keep the fields zero.
+	 */
+	if (ipa3_ctx->ipa_hw_type < IPA_HW_v4_0) {
+		ch_scratch.gpi.max_outstanding_tre =
+			gsi_ep_info->ipa_if_tlv * GSI_CHAN_RE_SIZE_16B;
+		ch_scratch.gpi.outstanding_threshold =
+			2 * GSI_CHAN_RE_SIZE_16B;
+	}
+	if (ipa3_ctx->ipa_hw_type >= IPA_HW_v4_5)
+		ch_scratch.gpi.dl_nlo_channel = 0;
+	result = gsi_write_channel_scratch(ep->gsi_chan_hdl, ch_scratch);
+	if (result != GSI_STATUS_SUCCESS) {
+		IPAERR("failed to write scratch %d\n", result);
+		goto fail_write_channel_scratch;
+	}
+	return 0;
+
+fail_write_channel_scratch:
+	if (gsi_dealloc_channel(ep->gsi_chan_hdl)
+		!= GSI_STATUS_SUCCESS) {
+		IPAERR("Failed to dealloc GSI chan.\n");
+		WARN_ON(1);
+	}
+fail_alloc_channel:
+	dma_free_coherent(ipa3_ctx->pdev, ep->gsi_mem_info.chan_ring_len,
+			ep->gsi_mem_info.chan_ring_base_vaddr,
+			ep->gsi_mem_info.chan_ring_base_addr);
+fail_alloc_channel_ring:
+fail_get_gsi_ep_info:
+	if (ep->gsi_evt_ring_hdl != ~0) {
+		gsi_dealloc_evt_ring(ep->gsi_evt_ring_hdl);
+		ep->gsi_evt_ring_hdl = ~0;
+	}
+	return result;
+}
+
+static int ipa_gsi_setup_coal_def_channel(struct ipa_sys_connect_params *in,
+	struct ipa3_ep_context *ep, struct ipa3_ep_context *coal_ep)
+{
+	u32 ring_size;
+	int result;
+
+	ring_size = 2 * in->desc_fifo_sz;
+
+	/* copy event ring handle */
+	ep->gsi_evt_ring_hdl = coal_ep->gsi_evt_ring_hdl;
+
+	result = ipa_gsi_setup_transfer_ring(ep, ring_size,
+		coal_ep->sys, GFP_ATOMIC);
+	if (result) {
+		if (ep->gsi_mem_info.evt_ring_base_vaddr)
+			dma_free_coherent(ipa3_ctx->pdev,
+					ep->gsi_mem_info.chan_ring_len,
+					ep->gsi_mem_info.chan_ring_base_vaddr,
+					ep->gsi_mem_info.chan_ring_base_addr);
+		IPAERR("Destroying WAN_COAL_CONS evt_ring");
+		if (ep->gsi_evt_ring_hdl != ~0) {
+			gsi_dealloc_evt_ring(ep->gsi_evt_ring_hdl);
+			ep->gsi_evt_ring_hdl = ~0;
+		}
+		IPAERR("Return with err: %d\n", result);
+		return result;
+	}
+	return 0;
+}
+
+static int ipa_populate_tag_field(struct ipa3_desc *desc,
+		struct ipa3_tx_pkt_wrapper *tx_pkt,
+		struct ipahal_imm_cmd_pyld **tag_pyld_ret)
+{
+	struct ipahal_imm_cmd_pyld *tag_pyld;
+	struct ipahal_imm_cmd_ip_packet_tag_status tag_cmd = {0};
+
+	/* populate tag field only if it is NULL */
+	if (desc->pyld == NULL) {
+		tag_cmd.tag = pointer_to_tag_wa(tx_pkt);
+		tag_pyld = ipahal_construct_imm_cmd(
+			IPA_IMM_CMD_IP_PACKET_TAG_STATUS, &tag_cmd, true);
+		if (unlikely(!tag_pyld)) {
+			IPAERR("Failed to construct ip_packet_tag_status\n");
+			return -EFAULT;
+		}
+		/*
+		 * This is for 32-bit pointer, will need special
+		 * handling if 64-bit pointer is used
+		 */
+		IPADBG_LOW("tx_pkt sent in tag: 0x%pK\n", tx_pkt);
+		desc->pyld = tag_pyld->data;
+		desc->opcode = tag_pyld->opcode;
+		desc->len = tag_pyld->len;
+		desc->user1 = tag_pyld;
+		desc->type = IPA_IMM_CMD_DESC;
+		desc->callback = ipa3_tag_destroy_imm;
+
+		*tag_pyld_ret = tag_pyld;
+	}
+	return 0;
+}
+
+static int ipa_poll_gsi_pkt(struct ipa3_sys_context *sys,
+		struct gsi_chan_xfer_notify *notify)
+{
+	int unused_var;
+
+	return ipa_poll_gsi_n_pkt(sys, notify, 1, &unused_var);
+}
+
+
+static int ipa_poll_gsi_n_pkt(struct ipa3_sys_context *sys,
+		struct gsi_chan_xfer_notify *notify,
+		int expected_num, int *actual_num)
+{
+	int ret;
+	int idx = 0;
+	int poll_num = 0;
+
+	if (!actual_num || expected_num <= 0 ||
+		expected_num > IPA_WAN_NAPI_MAX_FRAMES) {
+		IPAERR("bad params actual_num=%pK expected_num=%d\n",
+			actual_num, expected_num);
+		return GSI_STATUS_INVALID_PARAMS;
+	}
+
+	if (sys->ep->xfer_notify_valid) {
+		*notify = sys->ep->xfer_notify;
+		sys->ep->xfer_notify_valid = false;
+		idx++;
+	}
+	if (expected_num == idx) {
+		*actual_num = idx;
+		return GSI_STATUS_SUCCESS;
+	}
+
+	ret = gsi_poll_n_channel(sys->ep->gsi_chan_hdl,
+		&notify[idx], expected_num - idx, &poll_num);
+	if (ret == GSI_STATUS_POLL_EMPTY) {
+		if (idx) {
+			*actual_num = idx;
+			return GSI_STATUS_SUCCESS;
+		}
+		*actual_num = 0;
+		return ret;
+	} else if (ret != GSI_STATUS_SUCCESS) {
+		if (idx) {
+			*actual_num = idx;
+			return GSI_STATUS_SUCCESS;
+		}
+		*actual_num = 0;
+		IPAERR("Poll channel err: %d\n", ret);
+		return ret;
+	}
+
+	*actual_num = idx + poll_num;
+	return ret;
+}
+/**
+ * ipa3_lan_rx_poll() - Poll the LAN rx packets from IPA HW.
+ * This function is executed in the softirq context
+ *
+ * if input budget is zero, the driver switches back to
+ * interrupt mode.
+ *
+ * return number of polled packets, on error 0(zero)
+ */
+int ipa3_lan_rx_poll(u32 clnt_hdl, int weight)
+{
+	struct ipa3_ep_context *ep;
+	int ret;
+	int cnt = 0;
+	int remain_aggr_weight;
+	struct gsi_chan_xfer_notify notify;
+
+	if (unlikely(clnt_hdl >= ipa3_ctx->ipa_num_pipes ||
+		ipa3_ctx->ep[clnt_hdl].valid == 0)) {
+		IPAERR("bad param 0x%x\n", clnt_hdl);
+		return cnt;
+	}
+	remain_aggr_weight = weight / IPA_LAN_AGGR_PKT_CNT;
+	if (unlikely(remain_aggr_weight > IPA_LAN_NAPI_MAX_FRAMES)) {
+		IPAERR("NAPI weight is higher than expected\n");
+		IPAERR("expected %d got %d\n",
+			IPA_LAN_NAPI_MAX_FRAMES, remain_aggr_weight);
+		return cnt;
+	}
+	ep = &ipa3_ctx->ep[clnt_hdl];
+
+start_poll:
+	while (remain_aggr_weight > 0 &&
+			atomic_read(&ep->sys->curr_polling_state)) {
+		atomic_set(&ipa3_ctx->transport_pm.eot_activity, 1);
+		ret = ipa_poll_gsi_pkt(ep->sys, &notify);
+		if (ret)
+			break;
+
+		if (IPA_CLIENT_IS_MEMCPY_DMA_CONS(ep->client))
+			ipa3_dma_memcpy_notify(ep->sys);
+		else if (IPA_CLIENT_IS_WLAN_CONS(ep->client))
+			ipa3_wlan_wq_rx_common(ep->sys, &notify);
+		else
+			ipa3_wq_rx_common(ep->sys, &notify);
+
+		remain_aggr_weight--;
+		if (ep->sys->len == 0) {
+			if (remain_aggr_weight == 0)
+				cnt--;
+			break;
+		}
+	}
+	cnt += weight - remain_aggr_weight * IPA_LAN_AGGR_PKT_CNT;
+	if (cnt < weight) {
+		napi_complete(ep->sys->napi_obj);
+		ret = ipa3_rx_switch_to_intr_mode(ep->sys);
+		if (ret == -GSI_STATUS_PENDING_IRQ &&
+				napi_reschedule(ep->sys->napi_obj))
+			goto start_poll;
+
+		ipa_pm_deferred_deactivate(ep->sys->pm_hdl);
+	}
+
+	return cnt;
+}
+
+/**
+ * ipa3_rx_poll() - Poll the WAN rx packets from IPA HW. This
+ * function is exectued in the softirq context
+ *
+ * if input budget is zero, the driver switches back to
+ * interrupt mode.
+ *
+ * return number of polled packets, on error 0(zero)
+ */
+int ipa3_rx_poll(u32 clnt_hdl, int weight)
+{
+	struct ipa3_ep_context *ep;
+	int ret;
+	int cnt = 0;
+	int num = 0;
+	int remain_aggr_weight;
+	struct ipa_active_client_logging_info log;
+	struct gsi_chan_xfer_notify notify[IPA_WAN_NAPI_MAX_FRAMES];
+
+	IPA_ACTIVE_CLIENTS_PREP_SPECIAL(log, "NAPI");
+
+	if (clnt_hdl >= ipa3_ctx->ipa_num_pipes ||
+		ipa3_ctx->ep[clnt_hdl].valid == 0) {
+		IPAERR("bad parm 0x%x\n", clnt_hdl);
+		return cnt;
+	}
+
+	remain_aggr_weight = weight / IPA_WAN_AGGR_PKT_CNT;
+
+	if (remain_aggr_weight > IPA_WAN_NAPI_MAX_FRAMES) {
+		IPAERR("NAPI weight is higher than expected\n");
+		IPAERR("expected %d got %d\n",
+			IPA_WAN_NAPI_MAX_FRAMES, remain_aggr_weight);
+		return -EINVAL;
+	}
+
+	ep = &ipa3_ctx->ep[clnt_hdl];
+start_poll:
+	while (remain_aggr_weight > 0 &&
+			atomic_read(&ep->sys->curr_polling_state)) {
+		atomic_set(&ipa3_ctx->transport_pm.eot_activity, 1);
+		if (ipa3_ctx->enable_napi_chain) {
+			ret = ipa_poll_gsi_n_pkt(ep->sys, notify,
+				remain_aggr_weight, &num);
+		} else {
+			ret = ipa_poll_gsi_n_pkt(ep->sys, notify,
+				1, &num);
+		}
+		if (ret)
+			break;
+
+		trace_ipa3_rx_poll_num(num);
+		ipa3_rx_napi_chain(ep->sys, notify, num);
+		remain_aggr_weight -= num;
+
+		trace_ipa3_rx_poll_cnt(ep->sys->len);
+		if (ep->sys->len == 0) {
+			if (remain_aggr_weight == 0)
+				cnt--;
+			break;
+		}
+	}
+	cnt += weight - remain_aggr_weight * IPA_WAN_AGGR_PKT_CNT;
+	/* call repl_hdlr before napi_reschedule / napi_complete */
+	ep->sys->repl_hdlr(ep->sys);
+
+	/* When not able to replenish enough descriptors pipe wait
+	 * until minimum number descripotrs to replish.
+	 */
+	if (cnt < weight && ep->sys->len > IPA_DEFAULT_SYS_YELLOW_WM) {
+		napi_complete(ep->sys->napi_obj);
+		ret = ipa3_rx_switch_to_intr_mode(ep->sys);
+		if (ret == -GSI_STATUS_PENDING_IRQ &&
+				napi_reschedule(ep->sys->napi_obj))
+			goto start_poll;
+		ipa_pm_deferred_deactivate(ep->sys->pm_hdl);
+	} else {
+		cnt = weight;
+		IPADBG_LOW("Client = %d not replenished free descripotrs\n",
+				ep->client);
+	}
+	return cnt;
+}
+
+static unsigned long tag_to_pointer_wa(uint64_t tag)
+{
+	return 0xFFFF000000000000 | (unsigned long) tag;
+}
+
+static uint64_t pointer_to_tag_wa(struct ipa3_tx_pkt_wrapper *tx_pkt)
+{
+	u16 temp;
+	/* Add the check but it might have throughput issue */
+	if (BITS_PER_LONG == 64) {
+		temp = (u16) (~((unsigned long) tx_pkt &
+			0xFFFF000000000000) >> 48);
+		if (temp) {
+			IPAERR("The 16 prefix is not all 1s (%pK)\n",
+			tx_pkt);
+			/*
+			 * We need all addresses starting at 0xFFFF to
+			 * pass it to HW.
+			 */
+			ipa_assert();
+		}
+	}
+	return (unsigned long)tx_pkt & 0x0000FFFFFFFFFFFF;
+}
+
+/**
+ * ipa_gsi_ch20_wa() - software workaround for IPA GSI channel 20
+ *
+ * A hardware limitation requires to avoid using GSI physical channel 20.
+ * This function allocates GSI physical channel 20 and holds it to prevent
+ * others to use it.
+ *
+ * Return codes: 0 on success, negative on failure
+ */
+int ipa_gsi_ch20_wa(void)
+{
+	struct gsi_chan_props gsi_channel_props;
+	dma_addr_t dma_addr;
+	int result;
+	int i;
+	unsigned long chan_hdl[IPA_GSI_CH_20_WA_NUM_CH_TO_ALLOC];
+	unsigned long chan_hdl_to_keep;
+
+
+	memset(&gsi_channel_props, 0, sizeof(gsi_channel_props));
+	gsi_channel_props.prot = GSI_CHAN_PROT_GPI;
+	gsi_channel_props.dir = GSI_CHAN_DIR_TO_GSI;
+	gsi_channel_props.evt_ring_hdl = ~0;
+	gsi_channel_props.re_size = GSI_CHAN_RE_SIZE_16B;
+	gsi_channel_props.ring_len = 4 * gsi_channel_props.re_size;
+	gsi_channel_props.ring_base_vaddr =
+		dma_alloc_coherent(ipa3_ctx->pdev, gsi_channel_props.ring_len,
+		&dma_addr, 0);
+	gsi_channel_props.ring_base_addr = dma_addr;
+
+	if (ipa3_ctx->ipa_hw_type >= IPA_HW_v4_0)
+		gsi_channel_props.use_db_eng = GSI_CHAN_DIRECT_MODE;
+	else
+		gsi_channel_props.use_db_eng = GSI_CHAN_DB_MODE;
+
+	gsi_channel_props.db_in_bytes = 1;
+	gsi_channel_props.max_prefetch = GSI_ONE_PREFETCH_SEG;
+	gsi_channel_props.low_weight = 1;
+	gsi_channel_props.err_cb = ipa_gsi_chan_err_cb;
+	gsi_channel_props.xfer_cb = ipa_gsi_irq_tx_notify_cb;
+
+	/* first allocate channels up to channel 20 */
+	for (i = 0; i < IPA_GSI_CH_20_WA_NUM_CH_TO_ALLOC; i++) {
+		gsi_channel_props.ch_id = i;
+		result = gsi_alloc_channel(&gsi_channel_props,
+			ipa3_ctx->gsi_dev_hdl,
+			&chan_hdl[i]);
+		if (result != GSI_STATUS_SUCCESS) {
+			IPAERR("failed to alloc channel %d err %d\n",
+				i, result);
+			return result;
+		}
+	}
+
+	/* allocate channel 20 */
+	gsi_channel_props.ch_id = IPA_GSI_CH_20_WA_VIRT_CHAN;
+	result = gsi_alloc_channel(&gsi_channel_props, ipa3_ctx->gsi_dev_hdl,
+		&chan_hdl_to_keep);
+	if (result != GSI_STATUS_SUCCESS) {
+		IPAERR("failed to alloc channel %d err %d\n",
+			i, result);
+		return result;
+	}
+
+	/* release all other channels */
+	for (i = 0; i < IPA_GSI_CH_20_WA_NUM_CH_TO_ALLOC; i++) {
+		result = gsi_dealloc_channel(chan_hdl[i]);
+		if (result != GSI_STATUS_SUCCESS) {
+			IPAERR("failed to dealloc channel %d err %d\n",
+				i, result);
+			return result;
+		}
+	}
+
+	/* DMA memory shall not be freed as it is used by channel 20 */
+	return 0;
+}
+
+/**
+ * ipa_adjust_ra_buff_base_sz()
+ *
+ * Return value: the largest power of two which is smaller
+ * than the input value
+ */
+static u32 ipa_adjust_ra_buff_base_sz(u32 aggr_byte_limit)
+{
+	aggr_byte_limit += IPA_MTU;
+	aggr_byte_limit += IPA_GENERIC_RX_BUFF_LIMIT;
+	aggr_byte_limit--;
+	aggr_byte_limit |= aggr_byte_limit >> 1;
+	aggr_byte_limit |= aggr_byte_limit >> 2;
+	aggr_byte_limit |= aggr_byte_limit >> 4;
+	aggr_byte_limit |= aggr_byte_limit >> 8;
+	aggr_byte_limit |= aggr_byte_limit >> 16;
+	aggr_byte_limit++;
+	return aggr_byte_limit >> 1;
+}

+ 874 - 0
ipa/ipa_v3/ipa_dt_replacement.c

@@ -0,0 +1,874 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/errno.h>
+#include <linux/string.h>
+#include <linux/ioport.h>
+#include <linux/msm_ipa.h>
+#include "ipa_i.h"
+#include "ipa_emulation_stubs.h"
+
+# undef strsame
+# define strsame(x, y) \
+	(!strcmp((x), (y)))
+
+/*
+ * The following enum values used to index tables below.
+ */
+enum dtsi_index_e {
+	DTSI_INDEX_3_5_1 = 0,
+	DTSI_INDEX_4_0   = 1,
+	DTSI_INDEX_4_5   = 2,
+};
+
+struct dtsi_replacement_u32 {
+	char *key;
+	u32 value;
+};
+
+struct dtsi_replacement_u32_table {
+	struct dtsi_replacement_u32 *p_table;
+	u32 num_entries;
+};
+
+struct dtsi_replacement_bool {
+	char *key;
+	bool value;
+};
+
+struct dtsi_replacement_bool_table {
+	struct dtsi_replacement_bool *p_table;
+	u32 num_entries;
+};
+
+struct dtsi_replacement_u32_array {
+	char *key;
+	u32 *p_value;
+	u32 num_elements;
+};
+
+struct dtsi_replacement_u32_array_table {
+	struct dtsi_replacement_u32_array *p_table;
+	u32 num_entries;
+};
+
+struct dtsi_replacement_resource_table {
+	struct resource *p_table;
+	u32 num_entries;
+};
+
+/*
+ * Any of the data below with _4_5 in the name represent data taken
+ * from the 4.5 dtsi file.
+ *
+ * Any of the data below with _4_0 in the name represent data taken
+ * from the 4.0 dtsi file.
+ *
+ * Any of the data below with _3_5_1 in the name represent data taken
+ * from the 3.5.1 dtsi file.
+ */
+static struct dtsi_replacement_bool ipa3_plat_drv_bool_4_5[] = {
+	{"qcom,use-ipa-tethering-bridge",       true},
+	{"qcom,modem-cfg-emb-pipe-flt",         true},
+	{"qcom,ipa-wdi2",                       false},
+	{"qcom,use-64-bit-dma-mask",            false},
+	{"qcom,bandwidth-vote-for-ipa",         true},
+	{"qcom,skip-uc-pipe-reset",             false},
+	{"qcom,tethered-flow-control",          false},
+	{"qcom,use-rg10-limitation-mitigation", false},
+	{"qcom,do-not-use-ch-gsi-20",           false},
+	{"qcom,use-ipa-pm",                     true},
+	{"qcom,register-collection-on-crash",   true},
+	{"qcom,testbus-collection-on-crash",    true},
+	{"qcom,non-tn-collection-on-crash",     true},
+};
+
+static struct dtsi_replacement_bool ipa3_plat_drv_bool_4_0[] = {
+	{"qcom,use-ipa-tethering-bridge",       true},
+	{"qcom,modem-cfg-emb-pipe-flt",         true},
+	{"qcom,ipa-wdi2",                       true},
+	{"qcom,use-64-bit-dma-mask",            false},
+	{"qcom,bandwidth-vote-for-ipa",         false},
+	{"qcom,skip-uc-pipe-reset",             false},
+	{"qcom,tethered-flow-control",          true},
+	{"qcom,use-rg10-limitation-mitigation", false},
+	{"qcom,do-not-use-ch-gsi-20",           false},
+	{"qcom,use-ipa-pm",                     false},
+	{"qcom,register-collection-on-crash",   true},
+	{"qcom,testbus-collection-on-crash",    true},
+	{"qcom,non-tn-collection-on-crash",     true},
+};
+
+static struct dtsi_replacement_bool ipa3_plat_drv_bool_3_5_1[] = {
+	{"qcom,use-ipa-tethering-bridge",       true},
+	{"qcom,modem-cfg-emb-pipe-flt",         true},
+	{"qcom,ipa-wdi2",                       true},
+	{"qcom,use-64-bit-dma-mask",            false},
+	{"qcom,bandwidth-vote-for-ipa",         true},
+	{"qcom,skip-uc-pipe-reset",             false},
+	{"qcom,tethered-flow-control",          false},
+	{"qcom,use-rg10-limitation-mitigation", false},
+	{"qcom,do-not-use-ch-gsi-20",           false},
+	{"qcom,use-ipa-pm",                     false},
+	{"qcom,register-collection-on-crash",   true},
+	{"qcom,testbus-collection-on-crash",    true},
+	{"qcom,non-tn-collection-on-crash",     true},
+};
+
+static struct dtsi_replacement_bool_table
+ipa3_plat_drv_bool_table[] = {
+	{ ipa3_plat_drv_bool_3_5_1,
+	  ARRAY_SIZE(ipa3_plat_drv_bool_3_5_1) },
+	{ ipa3_plat_drv_bool_4_0,
+	  ARRAY_SIZE(ipa3_plat_drv_bool_4_0) },
+	{ ipa3_plat_drv_bool_4_5,
+	  ARRAY_SIZE(ipa3_plat_drv_bool_4_5) },
+};
+
+static struct dtsi_replacement_u32 ipa3_plat_drv_u32_4_5[] = {
+	{"qcom,ipa-hw-ver",                     IPA_HW_v4_5},
+	{"qcom,ipa-hw-mode",                    3},
+	{"qcom,wan-rx-ring-size",               192},
+	{"qcom,lan-rx-ring-size",               192},
+	{"qcom,ee",                             0},
+	{"qcom,msm-bus,num-cases",              5},
+	{"emulator-bar0-offset",                0x01C00000},
+	{"qcom,entire-ipa-block-size",          0x00100000},
+};
+
+static struct dtsi_replacement_u32 ipa3_plat_drv_u32_4_0[] = {
+	{"qcom,ipa-hw-ver",                     IPA_HW_v4_0},
+	{"qcom,ipa-hw-mode",                    3},
+	{"qcom,wan-rx-ring-size",               192},
+	{"qcom,lan-rx-ring-size",               192},
+	{"qcom,ee",                             0},
+	{"emulator-bar0-offset",                0x01C00000},
+	{"qcom,entire-ipa-block-size",          0x00100000},
+};
+
+static struct dtsi_replacement_u32 ipa3_plat_drv_u32_3_5_1[] = {
+	{"qcom,ipa-hw-ver",                     IPA_HW_v3_5_1},
+	{"qcom,ipa-hw-mode",                    3},
+	{"qcom,wan-rx-ring-size",               192},
+	{"qcom,lan-rx-ring-size",               192},
+	{"qcom,ee",                             0},
+	{"emulator-bar0-offset",                0x01C00000},
+	{"qcom,entire-ipa-block-size",          0x00100000},
+};
+
+static struct dtsi_replacement_u32_table ipa3_plat_drv_u32_table[] = {
+	{ ipa3_plat_drv_u32_3_5_1,
+	  ARRAY_SIZE(ipa3_plat_drv_u32_3_5_1) },
+	{ ipa3_plat_drv_u32_4_0,
+	  ARRAY_SIZE(ipa3_plat_drv_u32_4_0) },
+	{ ipa3_plat_drv_u32_4_5,
+	  ARRAY_SIZE(ipa3_plat_drv_u32_4_5) },
+};
+
+static u32 mhi_event_ring_id_limits_array_4_5[] = {
+	9, 10
+};
+
+static u32 mhi_event_ring_id_limits_array_4_0[] = {
+	9, 10
+};
+
+static u32 mhi_event_ring_id_limits_array_3_5_1[] = {
+	IPA_MHI_GSI_EVENT_RING_ID_START, IPA_MHI_GSI_EVENT_RING_ID_END
+};
+
+static u32 ipa_tz_unlock_reg_array_4_5[] = {
+	0x04043583c, 0x00001000
+};
+
+static u32 ipa_throughput_thresh_array_4_5[] = {
+	310, 600, 1000
+};
+
+static u32 ipa_tz_unlock_reg_array_4_0[] = {
+	0x04043583c, 0x00001000
+};
+
+static u32 ipa_tz_unlock_reg_array_3_5_1[] = {
+	0x04043583c, 0x00001000
+};
+
+struct dtsi_replacement_u32_array ipa3_plat_drv_u32_array_4_5[] = {
+	{"qcom,mhi-event-ring-id-limits",
+	 mhi_event_ring_id_limits_array_4_5,
+	 ARRAY_SIZE(mhi_event_ring_id_limits_array_4_5) },
+	{"qcom,ipa-tz-unlock-reg",
+	 ipa_tz_unlock_reg_array_4_5,
+	 ARRAY_SIZE(ipa_tz_unlock_reg_array_4_5) },
+	{"qcom,throughput-threshold",
+	 ipa_throughput_thresh_array_4_5,
+	 ARRAY_SIZE(ipa_throughput_thresh_array_4_5) },
+};
+
+struct dtsi_replacement_u32_array ipa3_plat_drv_u32_array_4_0[] = {
+	{"qcom,mhi-event-ring-id-limits",
+	 mhi_event_ring_id_limits_array_4_0,
+	 ARRAY_SIZE(mhi_event_ring_id_limits_array_4_0) },
+	{"qcom,ipa-tz-unlock-reg",
+	 ipa_tz_unlock_reg_array_4_0,
+	 ARRAY_SIZE(ipa_tz_unlock_reg_array_4_0) },
+};
+
+struct dtsi_replacement_u32_array ipa3_plat_drv_u32_array_3_5_1[] = {
+	{"qcom,mhi-event-ring-id-limits",
+	 mhi_event_ring_id_limits_array_3_5_1,
+	 ARRAY_SIZE(mhi_event_ring_id_limits_array_3_5_1) },
+	{"qcom,ipa-tz-unlock-reg",
+	 ipa_tz_unlock_reg_array_3_5_1,
+	 ARRAY_SIZE(ipa_tz_unlock_reg_array_3_5_1) },
+};
+
+struct dtsi_replacement_u32_array_table
+ipa3_plat_drv_u32_array_table[] = {
+	{ ipa3_plat_drv_u32_array_3_5_1,
+	  ARRAY_SIZE(ipa3_plat_drv_u32_array_3_5_1) },
+	{ ipa3_plat_drv_u32_array_4_0,
+	  ARRAY_SIZE(ipa3_plat_drv_u32_array_4_0) },
+	{ ipa3_plat_drv_u32_array_4_5,
+	  ARRAY_SIZE(ipa3_plat_drv_u32_array_4_5) },
+};
+
+#define INTCTRL_OFFSET       0x083C0000
+#define INTCTRL_SIZE         0x00000110
+
+#define IPA_BASE_OFFSET_4_5  0x01e00000
+#define IPA_BASE_SIZE_4_5    0x000c0000
+#define GSI_BASE_OFFSET_4_5  0x01e04000
+#define GSI_BASE_SIZE_4_5    0x00023000
+
+struct resource ipa3_plat_drv_resource_4_5[] = {
+	/*
+	 * PLEASE NOTE: The following offset values below ("ipa-base",
+	 * "gsi-base", and "intctrl-base") are used to calculate
+	 * offsets relative to the PCI BAR0 address provided by the
+	 * PCI probe.  After their use to calculate the offsets, they
+	 * are not used again, since PCI ultimately dictates where
+	 * things live.
+	 */
+	{
+		IPA_BASE_OFFSET_4_5,
+		(IPA_BASE_OFFSET_4_5 + IPA_BASE_SIZE_4_5),
+		"ipa-base",
+		IORESOURCE_MEM,
+		0,
+		NULL,
+		NULL,
+		NULL
+	},
+
+	{
+		GSI_BASE_OFFSET_4_5,
+		(GSI_BASE_OFFSET_4_5 + GSI_BASE_SIZE_4_5),
+		"gsi-base",
+		IORESOURCE_MEM,
+		0,
+		NULL,
+		NULL,
+		NULL
+	},
+
+	/*
+	 * The following entry is germane only to the emulator
+	 * environment.  It is needed to locate the emulator's PCI
+	 * interrupt controller...
+	 */
+	{
+		INTCTRL_OFFSET,
+		(INTCTRL_OFFSET + INTCTRL_SIZE),
+		"intctrl-base",
+		IORESOURCE_MEM,
+		0,
+		NULL,
+		NULL,
+		NULL
+	},
+
+	{
+		IPA_PIPE_MEM_START_OFST,
+		(IPA_PIPE_MEM_START_OFST + IPA_PIPE_MEM_SIZE),
+		"ipa-pipe-mem",
+		IORESOURCE_MEM,
+		0,
+		NULL,
+		NULL,
+		NULL
+	},
+
+	{
+		0,
+		0,
+		"gsi-irq",
+		IORESOURCE_IRQ,
+		0,
+		NULL,
+		NULL,
+		NULL
+	},
+
+	{
+		0,
+		0,
+		"ipa-irq",
+		IORESOURCE_IRQ,
+		0,
+		NULL,
+		NULL,
+		NULL
+	},
+};
+
+#define IPA_BASE_OFFSET_4_0  0x01e00000
+#define IPA_BASE_SIZE_4_0    0x00034000
+#define GSI_BASE_OFFSET_4_0  0x01e04000
+#define GSI_BASE_SIZE_4_0    0x00028000
+
+struct resource ipa3_plat_drv_resource_4_0[] = {
+	/*
+	 * PLEASE NOTE: The following offset values below ("ipa-base",
+	 * "gsi-base", and "intctrl-base") are used to calculate
+	 * offsets relative to the PCI BAR0 address provided by the
+	 * PCI probe.  After their use to calculate the offsets, they
+	 * are not used again, since PCI ultimately dictates where
+	 * things live.
+	 */
+	{
+		IPA_BASE_OFFSET_4_0,
+		(IPA_BASE_OFFSET_4_0 + IPA_BASE_SIZE_4_0),
+		"ipa-base",
+		IORESOURCE_MEM,
+		0,
+		NULL,
+		NULL,
+		NULL
+	},
+
+	{
+		GSI_BASE_OFFSET_4_0,
+		(GSI_BASE_OFFSET_4_0 + GSI_BASE_SIZE_4_0),
+		"gsi-base",
+		IORESOURCE_MEM,
+		0,
+		NULL,
+		NULL,
+		NULL
+	},
+
+	/*
+	 * The following entry is germane only to the emulator
+	 * environment.  It is needed to locate the emulator's PCI
+	 * interrupt controller...
+	 */
+	{
+		INTCTRL_OFFSET,
+		(INTCTRL_OFFSET + INTCTRL_SIZE),
+		"intctrl-base",
+		IORESOURCE_MEM,
+		0,
+		NULL,
+		NULL,
+		NULL
+	},
+
+	{
+		IPA_PIPE_MEM_START_OFST,
+		(IPA_PIPE_MEM_START_OFST + IPA_PIPE_MEM_SIZE),
+		"ipa-pipe-mem",
+		IORESOURCE_MEM,
+		0,
+		NULL,
+		NULL,
+		NULL
+	},
+
+	{
+		0,
+		0,
+		"gsi-irq",
+		IORESOURCE_IRQ,
+		0,
+		NULL,
+		NULL,
+		NULL
+	},
+
+	{
+		0,
+		0,
+		"ipa-irq",
+		IORESOURCE_IRQ,
+		0,
+		NULL,
+		NULL,
+		NULL
+	},
+};
+
+#define IPA_BASE_OFFSET_3_5_1  0x01e00000
+#define IPA_BASE_SIZE_3_5_1    0x00034000
+#define GSI_BASE_OFFSET_3_5_1  0x01e04000
+#define GSI_BASE_SIZE_3_5_1    0x0002c000
+
+struct resource ipa3_plat_drv_resource_3_5_1[] = {
+	/*
+	 * PLEASE NOTE: The following offset values below ("ipa-base",
+	 * "gsi-base", and "intctrl-base") are used to calculate
+	 * offsets relative to the PCI BAR0 address provided by the
+	 * PCI probe.  After their use to calculate the offsets, they
+	 * are not used again, since PCI ultimately dictates where
+	 * things live.
+	 */
+	{
+		IPA_BASE_OFFSET_3_5_1,
+		(IPA_BASE_OFFSET_3_5_1 + IPA_BASE_SIZE_3_5_1),
+		"ipa-base",
+		IORESOURCE_MEM,
+		0,
+		NULL,
+		NULL,
+		NULL
+	},
+
+	{
+		GSI_BASE_OFFSET_3_5_1,
+		(GSI_BASE_OFFSET_3_5_1 + GSI_BASE_SIZE_3_5_1),
+		"gsi-base",
+		IORESOURCE_MEM,
+		0,
+		NULL,
+		NULL,
+		NULL
+	},
+
+	/*
+	 * The following entry is germane only to the emulator
+	 * environment.  It is needed to locate the emulator's PCI
+	 * interrupt controller...
+	 */
+	{
+		INTCTRL_OFFSET,
+		(INTCTRL_OFFSET + INTCTRL_SIZE),
+		"intctrl-base",
+		IORESOURCE_MEM,
+		0,
+		NULL,
+		NULL,
+		NULL
+	},
+
+	{
+		IPA_PIPE_MEM_START_OFST,
+		(IPA_PIPE_MEM_START_OFST + IPA_PIPE_MEM_SIZE),
+		"ipa-pipe-mem",
+		IORESOURCE_MEM,
+		0,
+		NULL,
+		NULL,
+		NULL
+	},
+
+	{
+		0,
+		0,
+		"gsi-irq",
+		IORESOURCE_IRQ,
+		0,
+		NULL,
+		NULL,
+		NULL
+	},
+
+	{
+		0,
+		0,
+		"ipa-irq",
+		IORESOURCE_IRQ,
+		0,
+		NULL,
+		NULL,
+		NULL
+	},
+};
+
+struct dtsi_replacement_resource_table
+ipa3_plat_drv_resource_table[] = {
+	{ ipa3_plat_drv_resource_3_5_1,
+	  ARRAY_SIZE(ipa3_plat_drv_resource_3_5_1) },
+	{ ipa3_plat_drv_resource_4_0,
+	  ARRAY_SIZE(ipa3_plat_drv_resource_4_0) },
+	{ ipa3_plat_drv_resource_4_5,
+	  ARRAY_SIZE(ipa3_plat_drv_resource_4_5) },
+};
+
+/*
+ * The following code uses the data above...
+ */
+static u32 emulator_type_to_index(void)
+{
+	/*
+	 * Use the input parameter to the IPA driver loadable module,
+	 * which specifies the type of hardware the driver is running
+	 * on.
+	 */
+	u32 index = DTSI_INDEX_4_0;
+	uint emulation_type = ipa3_get_emulation_type();
+
+	switch (emulation_type) {
+	case IPA_HW_v3_5_1:
+		index = DTSI_INDEX_3_5_1;
+		break;
+	case IPA_HW_v4_0:
+		index = DTSI_INDEX_4_0;
+		break;
+	case IPA_HW_v4_5:
+		index = DTSI_INDEX_4_5;
+		break;
+	default:
+		break;
+	}
+
+	IPADBG("emulation_type(%u) emulation_index(%u)\n",
+	       emulation_type, index);
+
+	return index;
+}
+
+/* From include/linux/of.h */
+/**
+ * emulator_of_property_read_bool - Find from a property
+ * @np:         device node from which the property value is to be read.
+ * @propname:   name of the property to be searched.
+ *
+ * Search for a property in a device node.
+ * Returns true if the property exists false otherwise.
+ */
+bool emulator_of_property_read_bool(
+	const struct device_node *np,
+	const char *propname)
+{
+	u16 i;
+	u32 index;
+	struct dtsi_replacement_bool *ipa3_plat_drv_boolP;
+
+	/*
+	 * Get the index for the type of hardware we're running on.
+	 * This is used as a table index.
+	 */
+	index = emulator_type_to_index();
+	if (index >= ARRAY_SIZE(ipa3_plat_drv_bool_table)) {
+		IPADBG(
+		    "Did not find ipa3_plat_drv_bool_table for index %u\n",
+		    index);
+		return false;
+	}
+
+	ipa3_plat_drv_boolP =
+	    ipa3_plat_drv_bool_table[index].p_table;
+
+	for (i = 0;
+	     i < ipa3_plat_drv_bool_table[index].num_entries;
+	     i++) {
+		if (strsame(ipa3_plat_drv_boolP[i].key, propname)) {
+			IPADBG(
+			    "Found value %u for propname %s index %u\n",
+			    ipa3_plat_drv_boolP[i].value,
+			    propname,
+			    index);
+			return ipa3_plat_drv_boolP[i].value;
+		}
+	}
+
+	IPADBG("Did not find match for propname %s index %u\n",
+	       propname,
+	       index);
+
+	return false;
+}
+
+/* From include/linux/of.h */
+int emulator_of_property_read_u32(
+	const struct device_node *np,
+	const char *propname,
+	u32 *out_value)
+{
+	u16 i;
+	u32 index;
+	struct dtsi_replacement_u32 *ipa3_plat_drv_u32P;
+
+	/*
+	 * Get the index for the type of hardware we're running on.
+	 * This is used as a table index.
+	 */
+	index = emulator_type_to_index();
+	if (index >= ARRAY_SIZE(ipa3_plat_drv_u32_table)) {
+		IPADBG(
+		    "Did not find ipa3_plat_drv_u32_table for index %u\n",
+		    index);
+		return false;
+	}
+
+	ipa3_plat_drv_u32P =
+	    ipa3_plat_drv_u32_table[index].p_table;
+
+	for (i = 0;
+	     i < ipa3_plat_drv_u32_table[index].num_entries;
+	     i++) {
+		if (strsame(ipa3_plat_drv_u32P[i].key, propname)) {
+			*out_value = ipa3_plat_drv_u32P[i].value;
+			IPADBG(
+			    "Found value %u for propname %s index %u\n",
+			    ipa3_plat_drv_u32P[i].value,
+			    propname,
+			    index);
+			return 0;
+		}
+	}
+
+	IPADBG("Did not find match for propname %s index %u\n",
+	       propname,
+	       index);
+
+	return -EINVAL;
+}
+
+/* From include/linux/of.h */
+/**
+ * emulator_of_property_read_u32_array - Find and read an array of 32
+ * bit integers from a property.
+ *
+ * @np:         device node from which the property value is to be read.
+ * @propname:   name of the property to be searched.
+ * @out_values: pointer to return value, modified only if return value is 0.
+ * @sz:         number of array elements to read
+ *
+ * Search for a property in a device node and read 32-bit value(s) from
+ * it. Returns 0 on success, -EINVAL if the property does not exist,
+ * -ENODATA if property does not have a value, and -EOVERFLOW if the
+ * property data isn't large enough.
+ *
+ * The out_values is modified only if a valid u32 value can be decoded.
+ */
+int emulator_of_property_read_u32_array(
+	const struct device_node *np,
+	const char *propname,
+	u32 *out_values,
+	size_t sz)
+{
+	u16 i;
+	u32 index;
+	struct dtsi_replacement_u32_array *u32_arrayP;
+
+	/*
+	 * Get the index for the type of hardware we're running on.
+	 * This is used as a table index.
+	 */
+	index = emulator_type_to_index();
+	if (index >= ARRAY_SIZE(ipa3_plat_drv_u32_array_table)) {
+		IPADBG(
+		    "Did not find ipa3_plat_drv_u32_array_table for index %u\n",
+		    index);
+		return false;
+	}
+
+	u32_arrayP =
+		ipa3_plat_drv_u32_array_table[index].p_table;
+	for (i = 0;
+	     i < ipa3_plat_drv_u32_array_table[index].num_entries;
+	     i++) {
+		if (strsame(
+			u32_arrayP[i].key, propname)) {
+			u32 num_elements =
+			    u32_arrayP[i].num_elements;
+			u32 *p_element =
+			    &u32_arrayP[i].p_value[0];
+			size_t j = 0;
+
+			if (num_elements > sz) {
+				IPAERR(
+				    "Found array of %u values for propname %s; only room for %u elements in copy buffer\n",
+				    num_elements,
+				    propname,
+				    (unsigned int) sz);
+				return -EOVERFLOW;
+			}
+
+			while (j++ < num_elements)
+				*out_values++ = *p_element++;
+
+			IPADBG(
+			    "Found array of values starting with %u for propname %s index %u\n",
+			    u32_arrayP[i].p_value[0],
+			    propname,
+			    index);
+
+			return 0;
+		}
+	}
+
+	IPADBG("Did not find match for propname %s index %u\n",
+	       propname,
+	       index);
+
+	return -EINVAL;
+}
+
+/* From drivers/base/platform.c */
+/**
+ * emulator_platform_get_resource_byname - get a resource for a device by name
+ * @dev: platform device
+ * @type: resource type
+ * @name: resource name
+ */
+struct resource *emulator_platform_get_resource_byname(
+	struct platform_device *dev,
+	unsigned int type,
+	const char *name)
+{
+	u16 i;
+	u32 index;
+	struct resource *ipa3_plat_drv_resourceP;
+
+	/*
+	 * Get the index for the type of hardware we're running on.
+	 * This is used as a table index.
+	 */
+	index = emulator_type_to_index();
+	if (index >= ARRAY_SIZE(ipa3_plat_drv_resource_table)) {
+		IPADBG(
+		    "Did not find ipa3_plat_drv_resource_table for index %u\n",
+		    index);
+		return false;
+	}
+
+	ipa3_plat_drv_resourceP =
+		ipa3_plat_drv_resource_table[index].p_table;
+	for (i = 0;
+	     i < ipa3_plat_drv_resource_table[index].num_entries;
+	     i++) {
+		struct resource *r = &ipa3_plat_drv_resourceP[i];
+
+		if (type == resource_type(r) && strsame(r->name, name)) {
+			IPADBG(
+			    "Found start 0x%x size %u for name %s index %u\n",
+			    (unsigned int) (r->start),
+			    (unsigned int) (resource_size(r)),
+			    name,
+			    index);
+			return r;
+		}
+	}
+
+	IPADBG("Did not find match for name %s index %u\n",
+	       name,
+	       index);
+
+	return NULL;
+}
+
+/* From drivers/of/base.c */
+/**
+ * emulator_of_property_count_elems_of_size - Count the number of
+ * elements in a property
+ *
+ * @np:         device node from which the property value is to
+ *              be read. Not used.
+ * @propname:   name of the property to be searched.
+ * @elem_size:  size of the individual element
+ *
+ * Search for a property and count the number of elements of size
+ * elem_size in it. Returns number of elements on success, -EINVAL if
+ * the property does not exist or its length does not match a multiple
+ * of elem_size and -ENODATA if the property does not have a value.
+ */
+int emulator_of_property_count_elems_of_size(
+	const struct device_node *np,
+	const char *propname,
+	int elem_size)
+{
+	u32 index;
+
+	/*
+	 * Get the index for the type of hardware we're running on.
+	 * This is used as a table index.
+	 */
+	index = emulator_type_to_index();
+
+	/*
+	 * Use elem_size to determine which table to search for the
+	 * specified property name
+	 */
+	if (elem_size == sizeof(u32)) {
+		u16 i;
+		struct dtsi_replacement_u32_array *u32_arrayP;
+
+		if (index >= ARRAY_SIZE(ipa3_plat_drv_u32_array_table)) {
+			IPADBG(
+			    "Did not find ipa3_plat_drv_u32_array_table for index %u\n",
+			    index);
+			return false;
+		}
+
+		u32_arrayP =
+			ipa3_plat_drv_u32_array_table[index].p_table;
+
+		for (i = 0;
+		     i < ipa3_plat_drv_u32_array_table[index].num_entries;
+		     i++) {
+			if (strsame(u32_arrayP[i].key, propname)) {
+				if (u32_arrayP[i].p_value == NULL) {
+					IPADBG(
+					    "Found no elements for propname %s index %u\n",
+					    propname,
+					    index);
+					return -ENODATA;
+				}
+
+				IPADBG(
+				    "Found %u elements for propname %s index %u\n",
+				    u32_arrayP[i].num_elements,
+				    propname,
+				    index);
+
+				return u32_arrayP[i].num_elements;
+			}
+		}
+
+		IPADBG(
+		    "Found no match in table with elem_size %d for propname %s index %u\n",
+		    elem_size,
+		    propname,
+		    index);
+
+		return -EINVAL;
+	}
+
+	IPAERR(
+	    "Found no tables with element size %u to search for propname %s index %u\n",
+	    elem_size,
+	    propname,
+	    index);
+
+	return -EINVAL;
+}
+
+int emulator_of_property_read_variable_u32_array(
+	const struct device_node *np,
+	const char *propname,
+	u32 *out_values,
+	size_t sz_min,
+	size_t sz_max)
+{
+	return emulator_of_property_read_u32_array(
+	    np, propname, out_values, sz_max);
+}
+
+resource_size_t emulator_resource_size(const struct resource *res)
+{
+	return resource_size(res);
+}

+ 121 - 0
ipa/ipa_v3/ipa_emulation_stubs.h

@@ -0,0 +1,121 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.
+ */
+
+#if !defined(_IPA_EMULATION_STUBS_H_)
+# define _IPA_EMULATION_STUBS_H_
+
+# define outer_flush_range(x, y)
+# define __flush_dcache_area(x, y)
+# define __cpuc_flush_dcache_area(x, y) __flush_dcache_area(x, y)
+
+/* Point several API calls to these new EMULATION functions */
+# define of_property_read_bool(np, propname)	 \
+	emulator_of_property_read_bool(NULL, propname)
+# define of_property_read_u32(np, propname, out_value)   \
+	emulator_of_property_read_u32(NULL, propname, out_value)
+# define of_property_read_u32_array(np, propname, out_values, sz)	\
+	emulator_of_property_read_u32_array(NULL, propname, out_values, sz)
+# define platform_get_resource_byname(dev, type, name) \
+	emulator_platform_get_resource_byname(NULL, type, name)
+# define of_property_count_elems_of_size(np, propname, elem_size) \
+	emulator_of_property_count_elems_of_size(NULL, propname, elem_size)
+# define of_property_read_variable_u32_array( \
+	np, propname, out_values, sz_min, sz_max) \
+	emulator_of_property_read_variable_u32_array( \
+		NULL, propname, out_values, sz_min, sz_max)
+# define resource_size(res) \
+	emulator_resource_size(res)
+
+/**
+ * emulator_of_property_read_bool - Findfrom a property
+ * @np:         device node used to find the property value. (not used)
+ * @propname:   name of the property to be searched.
+ *
+ * Search for a property in a device node.
+ * Returns true if the property exists false otherwise.
+ */
+bool emulator_of_property_read_bool(
+	const struct device_node *np,
+	const char *propname);
+
+int emulator_of_property_read_u32(
+	const struct device_node *np,
+	const char *propname,
+	u32 *out_value);
+
+/**
+ * emulator_of_property_read_u32_array - Find and read an array of 32
+ * bit integers from a property.
+ *
+ * @np:         device node used to find the property value. (not used)
+ * @propname:   name of the property to be searched.
+ * @out_values: pointer to return value, modified only if return value is 0.
+ * @sz:         number of array elements to read
+ *
+ * Search for a property in a device node and read 32-bit value(s) from
+ * it. Returns 0 on success, -EINVAL if the property does not exist,
+ * -ENODATA if property does not have a value, and -EOVERFLOW if the
+ * property data isn't large enough.
+ *
+ * The out_values is modified only if a valid u32 value can be decoded.
+ */
+int emulator_of_property_read_u32_array(
+	const struct device_node *np,
+	const char *propname,
+	u32 *out_values,
+	size_t sz);
+
+/**
+ * emulator_platform_get_resource_byname - get a resource for a device
+ * by name
+ *
+ * @dev: platform device
+ * @type: resource type
+ * @name: resource name
+ */
+struct resource *emulator_platform_get_resource_byname(
+	struct platform_device *dev,
+	unsigned int type,
+	const char *name);
+
+/**
+ * emulator_of_property_count_elems_of_size - Count the number of
+ * elements in a property
+ *
+ * @np:         device node used to find the property value. (not used)
+ * @propname:   name of the property to be searched.
+ * @elem_size:  size of the individual element
+ *
+ * Search for a property and count the number of elements of size
+ * elem_size in it. Returns number of elements on success, -EINVAL if
+ * the property does not exist or its length does not match a multiple
+ * of elem_size and -ENODATA if the property does not have a value.
+ */
+int emulator_of_property_count_elems_of_size(
+	const struct device_node *np,
+	const char *propname,
+	int elem_size);
+
+int emulator_of_property_read_variable_u32_array(
+	const struct device_node *np,
+	const char *propname,
+	u32 *out_values,
+	size_t sz_min,
+	size_t sz_max);
+
+resource_size_t emulator_resource_size(
+	const struct resource *res);
+
+static inline bool is_device_dma_coherent(struct device *dev)
+{
+	return false;
+}
+
+static inline phys_addr_t qcom_smem_virt_to_phys(void *addr)
+{
+	return 0;
+}
+
+#endif /* #if !defined(_IPA_EMULATION_STUBS_H_) */

+ 2137 - 0
ipa/ipa_v3/ipa_flt.c

@@ -0,0 +1,2137 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2012-2019, The Linux Foundation. All rights reserved.
+ */
+
+#include "ipa_i.h"
+#include "ipahal/ipahal.h"
+#include "ipahal/ipahal_fltrt.h"
+
+#define IPA_FLT_STATUS_OF_ADD_FAILED		(-1)
+#define IPA_FLT_STATUS_OF_DEL_FAILED		(-1)
+#define IPA_FLT_STATUS_OF_MDFY_FAILED		(-1)
+
+#define IPA_FLT_GET_RULE_TYPE(__entry) \
+	( \
+	((__entry)->rule.hashable) ? \
+	(IPA_RULE_HASHABLE):(IPA_RULE_NON_HASHABLE) \
+	)
+
+/**
+ * ipa3_generate_flt_hw_rule() - generates the filtering hardware rule
+ * @ip: the ip address family type
+ * @entry: filtering entry
+ * @buf: output buffer, buf == NULL means
+ *		caller wants to know the size of the rule as seen
+ *		by HW so they did not pass a valid buffer, we will use a
+ *		scratch buffer instead.
+ *		With this scheme we are going to
+ *		generate the rule twice, once to know size using scratch
+ *		buffer and second to write the rule to the actual caller
+ *		supplied buffer which is of required size
+ *
+ * Returns:	0 on success, negative on failure
+ *
+ * caller needs to hold any needed locks to ensure integrity
+ *
+ */
+static int ipa3_generate_flt_hw_rule(enum ipa_ip_type ip,
+		struct ipa3_flt_entry *entry, u8 *buf)
+{
+	struct ipahal_flt_rule_gen_params gen_params;
+	int res = 0;
+
+	memset(&gen_params, 0, sizeof(gen_params));
+
+	if (entry->rule.hashable) {
+		if (entry->rule.attrib.attrib_mask & IPA_FLT_IS_PURE_ACK
+			&& !entry->rule.eq_attrib_type) {
+			IPAERR_RL("PURE_ACK rule atrb used with hash rule\n");
+			WARN_ON_RATELIMIT_IPA(1);
+			return -EPERM;
+		}
+		/*
+		 * tos_eq_present field has two meanings:
+		 * tos equation for IPA ver < 4.5 (as the field name reveals)
+		 * pure_ack equation for IPA ver >= 4.5
+		 */
+		if (ipa3_ctx->ipa_hw_type >= IPA_HW_v4_5 &&
+			entry->rule.eq_attrib_type &&
+			entry->rule.eq_attrib.tos_eq_present) {
+			IPAERR_RL("PURE_ACK rule eq used with hash rule\n");
+			return -EPERM;
+		}
+	}
+
+	gen_params.ipt = ip;
+	if (entry->rt_tbl && (!ipa3_check_idr_if_freed(entry->rt_tbl)))
+		gen_params.rt_tbl_idx = entry->rt_tbl->idx;
+	else
+		gen_params.rt_tbl_idx = entry->rule.rt_tbl_idx;
+
+	gen_params.priority = entry->prio;
+	gen_params.id = entry->rule_id;
+	gen_params.rule = (const struct ipa_flt_rule_i *)&entry->rule;
+	gen_params.cnt_idx = entry->cnt_idx;
+
+	res = ipahal_flt_generate_hw_rule(&gen_params, &entry->hw_len, buf);
+	if (res) {
+		IPAERR_RL("failed to generate flt h/w rule\n");
+		return res;
+	}
+
+	return 0;
+}
+
+static void __ipa_reap_sys_flt_tbls(enum ipa_ip_type ip, enum ipa_rule_type rlt)
+{
+	struct ipa3_flt_tbl *tbl;
+	int i;
+
+	IPADBG_LOW("reaping sys flt tbls ip=%d rlt=%d\n", ip, rlt);
+
+	for (i = 0; i < ipa3_ctx->ipa_num_pipes; i++) {
+		if (!ipa_is_ep_support_flt(i))
+			continue;
+
+		tbl = &ipa3_ctx->flt_tbl[i][ip];
+		if (tbl->prev_mem[rlt].phys_base) {
+			IPADBG_LOW("reaping flt tbl (prev) pipe=%d\n", i);
+			ipahal_free_dma_mem(&tbl->prev_mem[rlt]);
+		}
+
+		if (list_empty(&tbl->head_flt_rule_list)) {
+			if (tbl->curr_mem[rlt].phys_base) {
+				IPADBG_LOW("reaping flt tbl (curr) pipe=%d\n",
+					i);
+				ipahal_free_dma_mem(&tbl->curr_mem[rlt]);
+			}
+		}
+	}
+}
+
+/**
+ * ipa_prep_flt_tbl_for_cmt() - preparing the flt table for commit
+ *  assign priorities to the rules, calculate their sizes and calculate
+ *  the overall table size
+ * @ip: the ip address family type
+ * @tbl: the flt tbl to be prepared
+ * @pipe_idx: the ep pipe appropriate for the given tbl
+ *
+ * Return: 0 on success, negative on failure
+ */
+static int ipa_prep_flt_tbl_for_cmt(enum ipa_ip_type ip,
+	struct ipa3_flt_tbl *tbl, int pipe_idx)
+{
+	struct ipa3_flt_entry *entry;
+	int prio_i;
+	int max_prio;
+	u32 hdr_width;
+
+	tbl->sz[IPA_RULE_HASHABLE] = 0;
+	tbl->sz[IPA_RULE_NON_HASHABLE] = 0;
+
+	max_prio = ipahal_get_rule_max_priority();
+
+	prio_i = max_prio;
+	list_for_each_entry(entry, &tbl->head_flt_rule_list, link) {
+
+		if (entry->rule.max_prio) {
+			entry->prio = max_prio;
+		} else {
+			if (ipahal_rule_decrease_priority(&prio_i)) {
+				IPAERR("cannot decrease rule priority - %d\n",
+					prio_i);
+				return -EPERM;
+			}
+			entry->prio = prio_i;
+		}
+
+		if (ipa3_generate_flt_hw_rule(ip, entry, NULL)) {
+			IPAERR("failed to calculate HW FLT rule size\n");
+			return -EPERM;
+		}
+		IPADBG_LOW("pipe %d rule_id(handle) %u hw_len %d priority %u\n",
+			pipe_idx, entry->rule_id, entry->hw_len, entry->prio);
+
+		if (entry->rule.hashable)
+			tbl->sz[IPA_RULE_HASHABLE] += entry->hw_len;
+		else
+			tbl->sz[IPA_RULE_NON_HASHABLE] += entry->hw_len;
+	}
+
+	if ((tbl->sz[IPA_RULE_HASHABLE] +
+		tbl->sz[IPA_RULE_NON_HASHABLE]) == 0) {
+		IPADBG_LOW("flt tbl pipe %d is with zero total size\n",
+			pipe_idx);
+		return 0;
+	}
+
+	hdr_width = ipahal_get_hw_tbl_hdr_width();
+
+	/* for the header word */
+	if (tbl->sz[IPA_RULE_HASHABLE])
+		tbl->sz[IPA_RULE_HASHABLE] += hdr_width;
+	if (tbl->sz[IPA_RULE_NON_HASHABLE])
+		tbl->sz[IPA_RULE_NON_HASHABLE] += hdr_width;
+
+	IPADBG_LOW("FLT tbl pipe idx %d hash sz %u non-hash sz %u\n", pipe_idx,
+		tbl->sz[IPA_RULE_HASHABLE], tbl->sz[IPA_RULE_NON_HASHABLE]);
+
+	return 0;
+}
+
+/**
+ * ipa_translate_flt_tbl_to_hw_fmt() - translate the flt driver structures
+ *  (rules and tables) to HW format and fill it in the given buffers
+ * @ip: the ip address family type
+ * @rlt: the type of the rules to translate (hashable or non-hashable)
+ * @base: the rules body buffer to be filled
+ * @hdr: the rules header (addresses/offsets) buffer to be filled
+ * @body_ofst: the offset of the rules body from the rules header at
+ *  ipa sram
+ *
+ * Returns: 0 on success, negative on failure
+ *
+ * caller needs to hold any needed locks to ensure integrity
+ *
+ */
+static int ipa_translate_flt_tbl_to_hw_fmt(enum ipa_ip_type ip,
+	enum ipa_rule_type rlt, u8 *base, u8 *hdr, u32 body_ofst)
+{
+	u64 offset;
+	u8 *body_i;
+	int res;
+	struct ipa3_flt_entry *entry;
+	u8 *tbl_mem_buf;
+	struct ipa_mem_buffer tbl_mem;
+	struct ipa3_flt_tbl *tbl;
+	int i;
+	int hdr_idx = 0;
+
+	body_i = base;
+	for (i = 0; i < ipa3_ctx->ipa_num_pipes; i++) {
+		if (!ipa_is_ep_support_flt(i))
+			continue;
+		tbl = &ipa3_ctx->flt_tbl[i][ip];
+		if (tbl->sz[rlt] == 0) {
+			hdr_idx++;
+			continue;
+		}
+		if (tbl->in_sys[rlt]) {
+			/* only body (no header) */
+			tbl_mem.size = tbl->sz[rlt] -
+				ipahal_get_hw_tbl_hdr_width();
+			if (ipahal_fltrt_allocate_hw_sys_tbl(&tbl_mem)) {
+				IPAERR("fail to alloc sys tbl of size %d\n",
+					tbl_mem.size);
+				goto err;
+			}
+
+			if (ipahal_fltrt_write_addr_to_hdr(tbl_mem.phys_base,
+				hdr, hdr_idx, true)) {
+				IPAERR("fail to wrt sys tbl addr to hdr\n");
+				goto hdr_update_fail;
+			}
+
+			tbl_mem_buf = tbl_mem.base;
+
+			/* generate the rule-set */
+			list_for_each_entry(entry, &tbl->head_flt_rule_list,
+				link) {
+				if (IPA_FLT_GET_RULE_TYPE(entry) != rlt)
+					continue;
+				res = ipa3_generate_flt_hw_rule(
+					ip, entry, tbl_mem_buf);
+				if (res) {
+					IPAERR("failed to gen HW FLT rule\n");
+					goto hdr_update_fail;
+				}
+				tbl_mem_buf += entry->hw_len;
+			}
+
+			if (tbl->curr_mem[rlt].phys_base) {
+				WARN_ON(tbl->prev_mem[rlt].phys_base);
+				tbl->prev_mem[rlt] = tbl->curr_mem[rlt];
+			}
+			tbl->curr_mem[rlt] = tbl_mem;
+		} else {
+			offset = body_i - base + body_ofst;
+
+			/* update the hdr at the right index */
+			if (ipahal_fltrt_write_addr_to_hdr(offset, hdr,
+				hdr_idx, true)) {
+				IPAERR("fail to wrt lcl tbl ofst to hdr\n");
+				goto hdr_update_fail;
+			}
+
+			/* generate the rule-set */
+			list_for_each_entry(entry, &tbl->head_flt_rule_list,
+				link) {
+				if (IPA_FLT_GET_RULE_TYPE(entry) != rlt)
+					continue;
+				res = ipa3_generate_flt_hw_rule(
+					ip, entry, body_i);
+				if (res) {
+					IPAERR("failed to gen HW FLT rule\n");
+					goto err;
+				}
+				body_i += entry->hw_len;
+			}
+
+			/**
+			 * advance body_i to next table alignment as local
+			 * tables are order back-to-back
+			 */
+			body_i += ipahal_get_lcl_tbl_addr_alignment();
+			body_i = (u8 *)((long)body_i &
+				~ipahal_get_lcl_tbl_addr_alignment());
+		}
+		hdr_idx++;
+	}
+
+	return 0;
+
+hdr_update_fail:
+	ipahal_free_dma_mem(&tbl_mem);
+err:
+	return -EPERM;
+}
+
+/**
+ * ipa_generate_flt_hw_tbl_img() - generates the flt hw tbls.
+ *  headers and bodies are being created into buffers that will be filled into
+ *  the local memory (sram)
+ * @ip: the ip address family type
+ * @alloc_params: In and Out parameters for the allocations of the buffers
+ *  4 buffers: hdr and bdy, each hashable and non-hashable
+ *
+ * Return: 0 on success, negative on failure
+ */
+static int ipa_generate_flt_hw_tbl_img(enum ipa_ip_type ip,
+	struct ipahal_fltrt_alloc_imgs_params *alloc_params)
+{
+	u32 hash_bdy_start_ofst, nhash_bdy_start_ofst;
+	int rc = 0;
+
+	if (ip == IPA_IP_v4) {
+		nhash_bdy_start_ofst = IPA_MEM_PART(apps_v4_flt_nhash_ofst) -
+			IPA_MEM_PART(v4_flt_nhash_ofst);
+		hash_bdy_start_ofst = IPA_MEM_PART(apps_v4_flt_hash_ofst) -
+			IPA_MEM_PART(v4_flt_hash_ofst);
+	} else {
+		nhash_bdy_start_ofst = IPA_MEM_PART(apps_v6_flt_nhash_ofst) -
+			IPA_MEM_PART(v6_flt_nhash_ofst);
+		hash_bdy_start_ofst = IPA_MEM_PART(apps_v6_flt_hash_ofst) -
+			IPA_MEM_PART(v6_flt_hash_ofst);
+	}
+
+	if (ipahal_fltrt_allocate_hw_tbl_imgs(alloc_params)) {
+		IPAERR_RL("fail to allocate FLT HW TBL images. IP %d\n", ip);
+		rc = -ENOMEM;
+		goto allocate_failed;
+	}
+
+	if (ipa_translate_flt_tbl_to_hw_fmt(ip, IPA_RULE_HASHABLE,
+		alloc_params->hash_bdy.base, alloc_params->hash_hdr.base,
+		hash_bdy_start_ofst)) {
+		IPAERR_RL("fail to translate hashable flt tbls to hw format\n");
+		rc = -EPERM;
+		goto translate_fail;
+	}
+	if (ipa_translate_flt_tbl_to_hw_fmt(ip, IPA_RULE_NON_HASHABLE,
+		alloc_params->nhash_bdy.base, alloc_params->nhash_hdr.base,
+		nhash_bdy_start_ofst)) {
+		IPAERR_RL("fail to translate non-hash flt tbls to hw format\n");
+		rc = -EPERM;
+		goto translate_fail;
+	}
+
+	return rc;
+
+translate_fail:
+	if (alloc_params->hash_hdr.size)
+		ipahal_free_dma_mem(&alloc_params->hash_hdr);
+	ipahal_free_dma_mem(&alloc_params->nhash_hdr);
+	if (alloc_params->hash_bdy.size)
+		ipahal_free_dma_mem(&alloc_params->hash_bdy);
+	if (alloc_params->nhash_bdy.size)
+		ipahal_free_dma_mem(&alloc_params->nhash_bdy);
+allocate_failed:
+	return rc;
+}
+
+/**
+ * ipa_flt_valid_lcl_tbl_size() - validate if the space allocated for flt
+ * tbl bodies at the sram is enough for the commit
+ * @ipt: the ip address family type
+ * @rlt: the rule type (hashable or non-hashable)
+ *
+ * Return: true if enough space available or false in other cases
+ */
+static bool ipa_flt_valid_lcl_tbl_size(enum ipa_ip_type ipt,
+	enum ipa_rule_type rlt, struct ipa_mem_buffer *bdy)
+{
+	u16 avail;
+
+	if (!bdy) {
+		IPAERR("Bad parameters, bdy = NULL\n");
+		return false;
+	}
+
+	if (ipt == IPA_IP_v4)
+		avail = (rlt == IPA_RULE_HASHABLE) ?
+			IPA_MEM_PART(apps_v4_flt_hash_size) :
+			IPA_MEM_PART(apps_v4_flt_nhash_size);
+	else
+		avail = (rlt == IPA_RULE_HASHABLE) ?
+			IPA_MEM_PART(apps_v6_flt_hash_size) :
+			IPA_MEM_PART(apps_v6_flt_nhash_size);
+
+	if (bdy->size <= avail)
+		return true;
+
+	IPAERR("tbl too big, needed %d avail %d ipt %d rlt %d\n",
+	       bdy->size, avail, ipt, rlt);
+	return false;
+}
+
+/**
+ * ipa_flt_alloc_cmd_buffers() - alloc descriptors and imm cmds
+ *  payload pointers buffers for headers and bodies of flt structure
+ *  as well as place for flush imm.
+ * @ipt: the ip address family type
+ * @entries: the number of entries
+ * @desc: [OUT] descriptor buffer
+ * @cmd: [OUT] imm commands payload pointers buffer
+ *
+ * Return: 0 on success, negative on failure
+ */
+static int ipa_flt_alloc_cmd_buffers(enum ipa_ip_type ip, u16 entries,
+	struct ipa3_desc **desc, struct ipahal_imm_cmd_pyld ***cmd_pyld)
+{
+	*desc = kcalloc(entries, sizeof(**desc), GFP_ATOMIC);
+	if (*desc == NULL) {
+		IPAERR("fail to alloc desc blob ip %d\n", ip);
+		goto fail_desc_alloc;
+	}
+
+	*cmd_pyld = kcalloc(entries, sizeof(**cmd_pyld), GFP_ATOMIC);
+	if (*cmd_pyld == NULL) {
+		IPAERR("fail to alloc cmd pyld blob ip %d\n", ip);
+		goto fail_cmd_alloc;
+	}
+
+	return 0;
+
+fail_cmd_alloc:
+	kfree(*desc);
+fail_desc_alloc:
+	return -ENOMEM;
+}
+
+/**
+ * ipa_flt_skip_pipe_config() - skip ep flt configuration or not?
+ *  will skip according to pre-configuration or modem pipes
+ * @pipe: the EP pipe index
+ *
+ * Return: true if to skip, false otherwize
+ */
+static bool ipa_flt_skip_pipe_config(int pipe)
+{
+	struct ipa3_ep_context *ep;
+
+	if (ipa_is_modem_pipe(pipe)) {
+		IPADBG_LOW("skip %d - modem owned pipe\n", pipe);
+		return true;
+	}
+
+	if (ipa3_ctx->skip_ep_cfg_shadow[pipe]) {
+		IPADBG_LOW("skip %d\n", pipe);
+		return true;
+	}
+
+	ep = &ipa3_ctx->ep[pipe];
+
+	if ((ipa3_get_ep_mapping(IPA_CLIENT_APPS_WAN_PROD) == pipe
+		&& ipa3_ctx->modem_cfg_emb_pipe_flt)
+		&& ep->client == IPA_CLIENT_APPS_WAN_PROD) {
+		IPADBG_LOW("skip %d\n", pipe);
+		return true;
+	}
+
+	return false;
+}
+
+/**
+ * __ipa_commit_flt_v3() - commit flt tables to the hw
+ *  commit the headers and the bodies if are local with internal cache flushing.
+ *  The headers (and local bodies) will first be created into dma buffers and
+ *  then written via IC to the SRAM
+ * @ipt: the ip address family type
+ *
+ * Return: 0 on success, negative on failure
+ */
+int __ipa_commit_flt_v3(enum ipa_ip_type ip)
+{
+	struct ipahal_fltrt_alloc_imgs_params alloc_params;
+	int rc = 0;
+	struct ipa3_desc *desc;
+	struct ipahal_imm_cmd_register_write reg_write_cmd = {0};
+	struct ipahal_imm_cmd_dma_shared_mem mem_cmd = {0};
+	struct ipahal_imm_cmd_pyld **cmd_pyld;
+	int num_cmd = 0;
+	int i;
+	int hdr_idx;
+	u32 lcl_hash_hdr, lcl_nhash_hdr;
+	u32 lcl_hash_bdy, lcl_nhash_bdy;
+	bool lcl_hash, lcl_nhash;
+	struct ipahal_reg_fltrt_hash_flush flush;
+	struct ipahal_reg_valmask valmask;
+	u32 tbl_hdr_width;
+	struct ipa3_flt_tbl *tbl;
+	u16 entries;
+	struct ipahal_imm_cmd_register_write reg_write_coal_close;
+
+	tbl_hdr_width = ipahal_get_hw_tbl_hdr_width();
+	memset(&alloc_params, 0, sizeof(alloc_params));
+	alloc_params.ipt = ip;
+	alloc_params.tbls_num = ipa3_ctx->ep_flt_num;
+
+	if (ip == IPA_IP_v4) {
+		lcl_hash_hdr = ipa3_ctx->smem_restricted_bytes +
+			IPA_MEM_PART(v4_flt_hash_ofst) +
+			tbl_hdr_width; /* to skip the bitmap */
+		lcl_nhash_hdr = ipa3_ctx->smem_restricted_bytes +
+			IPA_MEM_PART(v4_flt_nhash_ofst) +
+			tbl_hdr_width; /* to skip the bitmap */
+		lcl_hash_bdy = ipa3_ctx->smem_restricted_bytes +
+			IPA_MEM_PART(apps_v4_flt_hash_ofst);
+		lcl_nhash_bdy = ipa3_ctx->smem_restricted_bytes +
+			IPA_MEM_PART(apps_v4_flt_nhash_ofst);
+		lcl_hash = ipa3_ctx->ip4_flt_tbl_hash_lcl;
+		lcl_nhash = ipa3_ctx->ip4_flt_tbl_nhash_lcl;
+	} else {
+		lcl_hash_hdr = ipa3_ctx->smem_restricted_bytes +
+			IPA_MEM_PART(v6_flt_hash_ofst) +
+			tbl_hdr_width; /* to skip the bitmap */
+		lcl_nhash_hdr = ipa3_ctx->smem_restricted_bytes +
+			IPA_MEM_PART(v6_flt_nhash_ofst) +
+			tbl_hdr_width; /* to skip the bitmap */
+		lcl_hash_bdy = ipa3_ctx->smem_restricted_bytes +
+			IPA_MEM_PART(apps_v6_flt_hash_ofst);
+		lcl_nhash_bdy = ipa3_ctx->smem_restricted_bytes +
+			IPA_MEM_PART(apps_v6_flt_nhash_ofst);
+		lcl_hash = ipa3_ctx->ip6_flt_tbl_hash_lcl;
+		lcl_nhash = ipa3_ctx->ip6_flt_tbl_nhash_lcl;
+	}
+
+	for (i = 0; i < ipa3_ctx->ipa_num_pipes; i++) {
+		if (!ipa_is_ep_support_flt(i))
+			continue;
+		tbl = &ipa3_ctx->flt_tbl[i][ip];
+		if (ipa_prep_flt_tbl_for_cmt(ip, tbl, i)) {
+			rc = -EPERM;
+			goto prep_failed;
+		}
+		if (!tbl->in_sys[IPA_RULE_HASHABLE] &&
+			tbl->sz[IPA_RULE_HASHABLE]) {
+			alloc_params.num_lcl_hash_tbls++;
+			alloc_params.total_sz_lcl_hash_tbls +=
+				tbl->sz[IPA_RULE_HASHABLE];
+			alloc_params.total_sz_lcl_hash_tbls -= tbl_hdr_width;
+
+		}
+		if (!tbl->in_sys[IPA_RULE_NON_HASHABLE] &&
+			tbl->sz[IPA_RULE_NON_HASHABLE]) {
+			alloc_params.num_lcl_nhash_tbls++;
+			alloc_params.total_sz_lcl_nhash_tbls +=
+				tbl->sz[IPA_RULE_NON_HASHABLE];
+			alloc_params.total_sz_lcl_nhash_tbls -= tbl_hdr_width;
+		}
+	}
+
+	if (ipa_generate_flt_hw_tbl_img(ip, &alloc_params)) {
+		IPAERR_RL("fail to generate FLT HW TBL image. IP %d\n", ip);
+		rc = -EFAULT;
+		goto prep_failed;
+	}
+
+	if (!ipa_flt_valid_lcl_tbl_size(ip, IPA_RULE_HASHABLE,
+		&alloc_params.hash_bdy)) {
+		rc = -EFAULT;
+		goto fail_size_valid;
+	}
+	if (!ipa_flt_valid_lcl_tbl_size(ip, IPA_RULE_NON_HASHABLE,
+		&alloc_params.nhash_bdy)) {
+		rc = -EFAULT;
+		goto fail_size_valid;
+	}
+
+	/* +4: 2 for bodies (hashable and non-hashable), 1 for flushing and 1
+	 * for closing the colaescing frame
+	 */
+	entries = (ipa3_ctx->ep_flt_num) * 2 + 4;
+
+	if (ipa_flt_alloc_cmd_buffers(ip, entries, &desc, &cmd_pyld)) {
+		rc = -ENOMEM;
+		goto fail_size_valid;
+	}
+
+	/* IC to close the coal frame before HPS Clear if coal is enabled */
+	if (ipa3_get_ep_mapping(IPA_CLIENT_APPS_WAN_COAL_CONS) != -1) {
+		i = ipa3_get_ep_mapping(IPA_CLIENT_APPS_WAN_COAL_CONS);
+		reg_write_coal_close.skip_pipeline_clear = false;
+		reg_write_coal_close.pipeline_clear_options = IPAHAL_HPS_CLEAR;
+		reg_write_coal_close.offset = ipahal_get_reg_ofst(
+			IPA_AGGR_FORCE_CLOSE);
+		ipahal_get_aggr_force_close_valmask(i, &valmask);
+		reg_write_coal_close.value = valmask.val;
+		reg_write_coal_close.value_mask = valmask.mask;
+		cmd_pyld[num_cmd] = ipahal_construct_imm_cmd(
+			IPA_IMM_CMD_REGISTER_WRITE,
+			&reg_write_coal_close, false);
+		if (!cmd_pyld[num_cmd]) {
+			IPAERR("failed to construct coal close IC\n");
+			rc = -ENOMEM;
+			goto fail_reg_write_construct;
+		}
+		ipa3_init_imm_cmd_desc(&desc[num_cmd], cmd_pyld[num_cmd]);
+		++num_cmd;
+	}
+
+	/*
+	 * SRAM memory not allocated to hash tables. Sending
+	 * command to hash tables(filer/routing) operation not supported.
+	 */
+	if (!ipa3_ctx->ipa_fltrt_not_hashable) {
+		/* flushing ipa internal hashable flt rules cache */
+		memset(&flush, 0, sizeof(flush));
+		if (ip == IPA_IP_v4)
+			flush.v4_flt = true;
+		else
+			flush.v6_flt = true;
+		ipahal_get_fltrt_hash_flush_valmask(&flush, &valmask);
+		reg_write_cmd.skip_pipeline_clear = false;
+		reg_write_cmd.pipeline_clear_options = IPAHAL_HPS_CLEAR;
+		reg_write_cmd.offset = ipahal_get_reg_ofst(
+					IPA_FILT_ROUT_HASH_FLUSH);
+		reg_write_cmd.value = valmask.val;
+		reg_write_cmd.value_mask = valmask.mask;
+		cmd_pyld[num_cmd] = ipahal_construct_imm_cmd(
+				IPA_IMM_CMD_REGISTER_WRITE, &reg_write_cmd,
+							false);
+		if (!cmd_pyld[num_cmd]) {
+			IPAERR(
+			"fail construct register_write imm cmd: IP %d\n", ip);
+			rc = -EFAULT;
+			goto fail_imm_cmd_construct;
+		}
+		ipa3_init_imm_cmd_desc(&desc[num_cmd], cmd_pyld[num_cmd]);
+		++num_cmd;
+	}
+
+	hdr_idx = 0;
+	for (i = 0; i < ipa3_ctx->ipa_num_pipes; i++) {
+		if (!ipa_is_ep_support_flt(i)) {
+			IPADBG_LOW("skip %d - not filtering pipe\n", i);
+			continue;
+		}
+
+		if (ipa_flt_skip_pipe_config(i)) {
+			hdr_idx++;
+			continue;
+		}
+
+		if (num_cmd + 1 >= entries) {
+			IPAERR("number of commands is out of range: IP = %d\n",
+				ip);
+			rc = -ENOBUFS;
+			goto fail_imm_cmd_construct;
+		}
+
+		IPADBG_LOW("Prepare imm cmd for hdr at index %d for pipe %d\n",
+			hdr_idx, i);
+
+		mem_cmd.is_read = false;
+		mem_cmd.skip_pipeline_clear = false;
+		mem_cmd.pipeline_clear_options = IPAHAL_HPS_CLEAR;
+		mem_cmd.size = tbl_hdr_width;
+		mem_cmd.system_addr = alloc_params.nhash_hdr.phys_base +
+			hdr_idx * tbl_hdr_width;
+		mem_cmd.local_addr = lcl_nhash_hdr +
+			hdr_idx * tbl_hdr_width;
+		cmd_pyld[num_cmd] = ipahal_construct_imm_cmd(
+			IPA_IMM_CMD_DMA_SHARED_MEM, &mem_cmd, false);
+		if (!cmd_pyld[num_cmd]) {
+			IPAERR("fail construct dma_shared_mem cmd: IP = %d\n",
+				ip);
+			rc = -ENOMEM;
+			goto fail_imm_cmd_construct;
+		}
+		ipa3_init_imm_cmd_desc(&desc[num_cmd], cmd_pyld[num_cmd]);
+		++num_cmd;
+
+		/*
+		 * SRAM memory not allocated to hash tables. Sending command
+		 * to hash tables(filer/routing) operation not supported.
+		 */
+		if (!ipa3_ctx->ipa_fltrt_not_hashable) {
+			mem_cmd.is_read = false;
+			mem_cmd.skip_pipeline_clear = false;
+			mem_cmd.pipeline_clear_options = IPAHAL_HPS_CLEAR;
+			mem_cmd.size = tbl_hdr_width;
+			mem_cmd.system_addr = alloc_params.hash_hdr.phys_base +
+				hdr_idx * tbl_hdr_width;
+			mem_cmd.local_addr = lcl_hash_hdr +
+				hdr_idx * tbl_hdr_width;
+			cmd_pyld[num_cmd] = ipahal_construct_imm_cmd(
+					IPA_IMM_CMD_DMA_SHARED_MEM,
+						&mem_cmd, false);
+			if (!cmd_pyld[num_cmd]) {
+				IPAERR(
+				"fail construct dma_shared_mem cmd: IP = %d\n",
+						ip);
+				rc = -ENOMEM;
+				goto fail_imm_cmd_construct;
+			}
+			ipa3_init_imm_cmd_desc(&desc[num_cmd],
+						cmd_pyld[num_cmd]);
+			++num_cmd;
+		}
+		++hdr_idx;
+	}
+
+	if (lcl_nhash) {
+		if (num_cmd >= entries) {
+			IPAERR("number of commands is out of range: IP = %d\n",
+				ip);
+			rc = -ENOBUFS;
+			goto fail_imm_cmd_construct;
+		}
+
+		mem_cmd.is_read = false;
+		mem_cmd.skip_pipeline_clear = false;
+		mem_cmd.pipeline_clear_options = IPAHAL_HPS_CLEAR;
+		mem_cmd.size = alloc_params.nhash_bdy.size;
+		mem_cmd.system_addr = alloc_params.nhash_bdy.phys_base;
+		mem_cmd.local_addr = lcl_nhash_bdy;
+		cmd_pyld[num_cmd] = ipahal_construct_imm_cmd(
+			IPA_IMM_CMD_DMA_SHARED_MEM, &mem_cmd, false);
+		if (!cmd_pyld[num_cmd]) {
+			IPAERR("fail construct dma_shared_mem cmd: IP = %d\n",
+				ip);
+			rc = -ENOMEM;
+			goto fail_imm_cmd_construct;
+		}
+		ipa3_init_imm_cmd_desc(&desc[num_cmd], cmd_pyld[num_cmd]);
+		++num_cmd;
+	}
+	if (lcl_hash) {
+		if (num_cmd >= entries) {
+			IPAERR("number of commands is out of range: IP = %d\n",
+				ip);
+			rc = -ENOBUFS;
+			goto fail_imm_cmd_construct;
+		}
+
+		mem_cmd.is_read = false;
+		mem_cmd.skip_pipeline_clear = false;
+		mem_cmd.pipeline_clear_options = IPAHAL_HPS_CLEAR;
+		mem_cmd.size = alloc_params.hash_bdy.size;
+		mem_cmd.system_addr = alloc_params.hash_bdy.phys_base;
+		mem_cmd.local_addr = lcl_hash_bdy;
+		cmd_pyld[num_cmd] = ipahal_construct_imm_cmd(
+			IPA_IMM_CMD_DMA_SHARED_MEM, &mem_cmd, false);
+		if (!cmd_pyld[num_cmd]) {
+			IPAERR("fail construct dma_shared_mem cmd: IP = %d\n",
+				ip);
+			rc = -ENOMEM;
+			goto fail_imm_cmd_construct;
+		}
+		ipa3_init_imm_cmd_desc(&desc[num_cmd], cmd_pyld[num_cmd]);
+		++num_cmd;
+	}
+
+	if (ipa3_send_cmd(num_cmd, desc)) {
+		IPAERR("fail to send immediate command\n");
+		rc = -EFAULT;
+		goto fail_imm_cmd_construct;
+	}
+
+	IPADBG_LOW("Hashable HEAD\n");
+	IPA_DUMP_BUFF(alloc_params.hash_hdr.base,
+		alloc_params.hash_hdr.phys_base, alloc_params.hash_hdr.size);
+
+	IPADBG_LOW("Non-Hashable HEAD\n");
+	IPA_DUMP_BUFF(alloc_params.nhash_hdr.base,
+		alloc_params.nhash_hdr.phys_base, alloc_params.nhash_hdr.size);
+
+	if (alloc_params.hash_bdy.size) {
+		IPADBG_LOW("Hashable BODY\n");
+		IPA_DUMP_BUFF(alloc_params.hash_bdy.base,
+			alloc_params.hash_bdy.phys_base,
+			alloc_params.hash_bdy.size);
+	}
+
+	if (alloc_params.nhash_bdy.size) {
+		IPADBG_LOW("Non-Hashable BODY\n");
+		IPA_DUMP_BUFF(alloc_params.nhash_bdy.base,
+			alloc_params.nhash_bdy.phys_base,
+			alloc_params.nhash_bdy.size);
+	}
+
+	__ipa_reap_sys_flt_tbls(ip, IPA_RULE_HASHABLE);
+	__ipa_reap_sys_flt_tbls(ip, IPA_RULE_NON_HASHABLE);
+
+fail_imm_cmd_construct:
+	for (i = 0 ; i < num_cmd ; i++)
+		ipahal_destroy_imm_cmd(cmd_pyld[i]);
+fail_reg_write_construct:
+	kfree(desc);
+	kfree(cmd_pyld);
+fail_size_valid:
+	if (alloc_params.hash_hdr.size)
+		ipahal_free_dma_mem(&alloc_params.hash_hdr);
+	ipahal_free_dma_mem(&alloc_params.nhash_hdr);
+	if (alloc_params.hash_bdy.size)
+		ipahal_free_dma_mem(&alloc_params.hash_bdy);
+	if (alloc_params.nhash_bdy.size)
+		ipahal_free_dma_mem(&alloc_params.nhash_bdy);
+prep_failed:
+	return rc;
+}
+
+static int __ipa_validate_flt_rule(const struct ipa_flt_rule_i *rule,
+		struct ipa3_rt_tbl **rt_tbl, enum ipa_ip_type ip)
+{
+	int index;
+
+	if (rule->action != IPA_PASS_TO_EXCEPTION) {
+		if (!rule->eq_attrib_type) {
+			if (!rule->rt_tbl_hdl) {
+				IPAERR_RL("invalid RT tbl\n");
+				goto error;
+			}
+
+			*rt_tbl = ipa3_id_find(rule->rt_tbl_hdl);
+			if (*rt_tbl == NULL) {
+				IPAERR_RL("RT tbl not found\n");
+				goto error;
+			}
+
+			if ((*rt_tbl)->cookie != IPA_RT_TBL_COOKIE) {
+				IPAERR_RL("RT table cookie is invalid\n");
+				goto error;
+			}
+		} else {
+			if (rule->rt_tbl_idx > ((ip == IPA_IP_v4) ?
+				IPA_MEM_PART(v4_modem_rt_index_hi) :
+				IPA_MEM_PART(v6_modem_rt_index_hi))) {
+				IPAERR_RL("invalid RT tbl\n");
+				goto error;
+			}
+		}
+	} else {
+		if (rule->rt_tbl_idx > 0) {
+			IPAERR_RL("invalid RT tbl\n");
+			goto error;
+		}
+	}
+
+	if (ipa3_ctx->ipa_hw_type >= IPA_HW_v4_0) {
+		if (rule->pdn_idx) {
+			if (rule->action == IPA_PASS_TO_EXCEPTION ||
+				rule->action == IPA_PASS_TO_ROUTING) {
+				IPAERR_RL(
+					"PDN index should be 0 when action is not pass to NAT\n");
+				goto error;
+			} else {
+				if (rule->pdn_idx >= IPA_MAX_PDN_NUM) {
+					IPAERR_RL("PDN index %d is too large\n",
+						rule->pdn_idx);
+					goto error;
+				}
+			}
+		}
+	}
+
+	if (rule->rule_id) {
+		if ((rule->rule_id < ipahal_get_rule_id_hi_bit()) ||
+		(rule->rule_id >= ((ipahal_get_rule_id_hi_bit()<<1)-1))) {
+			IPAERR_RL("invalid rule_id provided 0x%x\n"
+				"rule_id with bit 0x%x are auto generated\n",
+				rule->rule_id, ipahal_get_rule_id_hi_bit());
+			goto error;
+		}
+	}
+
+	if (ipa3_ctx->ipa_hw_type >= IPA_HW_v4_5) {
+		if (rule->enable_stats && rule->cnt_idx) {
+			if (!ipahal_is_rule_cnt_id_valid(rule->cnt_idx)) {
+				IPAERR_RL(
+					"invalid cnt_idx %hhu out of range\n",
+					rule->cnt_idx);
+				goto error;
+			}
+			index = rule->cnt_idx - 1;
+			if (!ipa3_ctx->flt_rt_counters.used_hw[index]) {
+				IPAERR_RL(
+					"invalid cnt_idx %hhu not alloc by driver\n",
+					rule->cnt_idx);
+				goto error;
+			}
+		}
+	} else {
+		if (rule->enable_stats) {
+			IPAERR_RL(
+				"enable_stats won't support on ipa_hw_type %d\n",
+				ipa3_ctx->ipa_hw_type);
+			goto error;
+		}
+	}
+	return 0;
+
+error:
+	return -EPERM;
+}
+
+static int __ipa_create_flt_entry(struct ipa3_flt_entry **entry,
+		const struct ipa_flt_rule_i *rule, struct ipa3_rt_tbl *rt_tbl,
+		struct ipa3_flt_tbl *tbl, bool user)
+{
+	int id;
+
+	*entry = kmem_cache_zalloc(ipa3_ctx->flt_rule_cache, GFP_KERNEL);
+	if (!*entry)
+		goto error;
+	INIT_LIST_HEAD(&((*entry)->link));
+	(*entry)->rule = *rule;
+	(*entry)->cookie = IPA_FLT_COOKIE;
+	(*entry)->rt_tbl = rt_tbl;
+	(*entry)->tbl = tbl;
+	if (rule->rule_id) {
+		id = rule->rule_id;
+	} else {
+		id = ipa3_alloc_rule_id(tbl->rule_ids);
+		if (id < 0) {
+			IPAERR_RL("failed to allocate rule id\n");
+			WARN_ON_RATELIMIT_IPA(1);
+			goto rule_id_fail;
+		}
+	}
+	(*entry)->rule_id = id;
+	(*entry)->ipacm_installed = user;
+	if (rule->enable_stats)
+		(*entry)->cnt_idx = rule->cnt_idx;
+	else
+		(*entry)->cnt_idx = 0;
+	return 0;
+
+rule_id_fail:
+	kmem_cache_free(ipa3_ctx->flt_rule_cache, *entry);
+error:
+	return -EPERM;
+}
+
+static int __ipa_finish_flt_rule_add(struct ipa3_flt_tbl *tbl,
+		struct ipa3_flt_entry *entry, u32 *rule_hdl)
+{
+	int id;
+
+	tbl->rule_cnt++;
+	if (entry->rt_tbl)
+		entry->rt_tbl->ref_cnt++;
+	id = ipa3_id_alloc(entry);
+	if (id < 0) {
+		IPAERR_RL("failed to add to tree\n");
+		WARN_ON_RATELIMIT_IPA(1);
+		goto ipa_insert_failed;
+	}
+	*rule_hdl = id;
+	entry->id = id;
+	IPADBG_LOW("add flt rule rule_cnt=%d\n", tbl->rule_cnt);
+
+	return 0;
+ipa_insert_failed:
+	if (entry->rt_tbl)
+		entry->rt_tbl->ref_cnt--;
+	tbl->rule_cnt--;
+	return -EPERM;
+}
+
+static int __ipa_add_flt_rule(struct ipa3_flt_tbl *tbl, enum ipa_ip_type ip,
+			      const struct ipa_flt_rule_i *rule, u8 add_rear,
+			      u32 *rule_hdl, bool user)
+{
+	struct ipa3_flt_entry *entry;
+	struct ipa3_rt_tbl *rt_tbl = NULL;
+
+	if (__ipa_validate_flt_rule(rule, &rt_tbl, ip))
+		goto error;
+
+	if (__ipa_create_flt_entry(&entry, rule, rt_tbl, tbl, user))
+		goto error;
+
+	if (add_rear) {
+		if (tbl->sticky_rear)
+			list_add_tail(&entry->link,
+					tbl->head_flt_rule_list.prev);
+		else
+			list_add_tail(&entry->link, &tbl->head_flt_rule_list);
+	} else {
+		list_add(&entry->link, &tbl->head_flt_rule_list);
+	}
+
+	if (__ipa_finish_flt_rule_add(tbl, entry, rule_hdl))
+		goto ipa_insert_failed;
+
+	return 0;
+ipa_insert_failed:
+	list_del(&entry->link);
+	/* if rule id was allocated from idr, remove it */
+	if ((entry->rule_id < ipahal_get_rule_id_hi_bit()) &&
+		(entry->rule_id >= ipahal_get_low_rule_id()))
+		idr_remove(entry->tbl->rule_ids, entry->rule_id);
+	kmem_cache_free(ipa3_ctx->flt_rule_cache, entry);
+
+error:
+	return -EPERM;
+}
+
+static int __ipa_add_flt_rule_after(struct ipa3_flt_tbl *tbl,
+				const struct ipa_flt_rule_i *rule,
+				u32 *rule_hdl,
+				enum ipa_ip_type ip,
+				struct ipa3_flt_entry **add_after_entry)
+{
+	struct ipa3_flt_entry *entry;
+	struct ipa3_rt_tbl *rt_tbl = NULL;
+
+	if (!*add_after_entry)
+		goto error;
+
+	if (rule == NULL || rule_hdl == NULL) {
+		IPAERR_RL("bad parms rule=%pK rule_hdl=%pK\n", rule,
+				rule_hdl);
+		goto error;
+	}
+
+	if (__ipa_validate_flt_rule(rule, &rt_tbl, ip))
+		goto error;
+
+	if (__ipa_create_flt_entry(&entry, rule, rt_tbl, tbl, true))
+		goto error;
+
+	list_add(&entry->link, &((*add_after_entry)->link));
+
+	if (__ipa_finish_flt_rule_add(tbl, entry, rule_hdl))
+		goto ipa_insert_failed;
+
+	/*
+	 * prepare for next insertion
+	 */
+	*add_after_entry = entry;
+
+	return 0;
+
+ipa_insert_failed:
+	list_del(&entry->link);
+	/* if rule id was allocated from idr, remove it */
+	if ((entry->rule_id < ipahal_get_rule_id_hi_bit()) &&
+		(entry->rule_id >= ipahal_get_low_rule_id()))
+		idr_remove(entry->tbl->rule_ids, entry->rule_id);
+	kmem_cache_free(ipa3_ctx->flt_rule_cache, entry);
+
+error:
+	*add_after_entry = NULL;
+	return -EPERM;
+}
+
+static int __ipa_del_flt_rule(u32 rule_hdl)
+{
+	struct ipa3_flt_entry *entry;
+	int id;
+
+	entry = ipa3_id_find(rule_hdl);
+	if (entry == NULL) {
+		IPAERR_RL("lookup failed\n");
+		return -EINVAL;
+	}
+
+	if (entry->cookie != IPA_FLT_COOKIE) {
+		IPAERR_RL("bad params\n");
+		return -EINVAL;
+	}
+	id = entry->id;
+
+	list_del(&entry->link);
+	entry->tbl->rule_cnt--;
+	if (entry->rt_tbl)
+		entry->rt_tbl->ref_cnt--;
+	IPADBG("del flt rule rule_cnt=%d rule_id=%d\n",
+		entry->tbl->rule_cnt, entry->rule_id);
+	entry->cookie = 0;
+	/* if rule id was allocated from idr, remove it */
+	if ((entry->rule_id < ipahal_get_rule_id_hi_bit()) &&
+		(entry->rule_id >= ipahal_get_low_rule_id()))
+		idr_remove(entry->tbl->rule_ids, entry->rule_id);
+
+	kmem_cache_free(ipa3_ctx->flt_rule_cache, entry);
+
+	/* remove the handle from the database */
+	ipa3_id_remove(id);
+
+	return 0;
+}
+
+static int __ipa_mdfy_flt_rule(struct ipa_flt_rule_mdfy_i *frule,
+		enum ipa_ip_type ip)
+{
+	struct ipa3_flt_entry *entry;
+	struct ipa3_rt_tbl *rt_tbl = NULL;
+
+	entry = ipa3_id_find(frule->rule_hdl);
+	if (entry == NULL) {
+		IPAERR_RL("lookup failed\n");
+		goto error;
+	}
+
+	if (entry->cookie != IPA_FLT_COOKIE) {
+		IPAERR_RL("bad params\n");
+		goto error;
+	}
+
+	if (__ipa_validate_flt_rule(&frule->rule, &rt_tbl, ip))
+		goto error;
+
+	if (entry->rt_tbl)
+		entry->rt_tbl->ref_cnt--;
+
+	entry->rule = frule->rule;
+	entry->rt_tbl = rt_tbl;
+	if (entry->rt_tbl)
+		entry->rt_tbl->ref_cnt++;
+	entry->hw_len = 0;
+	entry->prio = 0;
+	if (frule->rule.enable_stats)
+		entry->cnt_idx = frule->rule.cnt_idx;
+	else
+		entry->cnt_idx = 0;
+
+	return 0;
+
+error:
+	return -EPERM;
+}
+
+static int __ipa_add_flt_get_ep_idx(enum ipa_client_type ep, int *ipa_ep_idx)
+{
+	*ipa_ep_idx = ipa3_get_ep_mapping(ep);
+	if (*ipa_ep_idx < 0) {
+		IPAERR_RL("ep not valid ep=%d\n", ep);
+		return -EINVAL;
+	}
+	if (ipa3_ctx->ep[*ipa_ep_idx].valid == 0)
+		IPADBG("ep not connected ep_idx=%d\n", *ipa_ep_idx);
+
+	if (!ipa_is_ep_support_flt(*ipa_ep_idx)) {
+		IPAERR("ep do not support filtering ep=%d\n", ep);
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static int __ipa_add_ep_flt_rule(enum ipa_ip_type ip, enum ipa_client_type ep,
+				 const struct ipa_flt_rule_i *rule, u8 add_rear,
+				 u32 *rule_hdl, bool user)
+{
+	struct ipa3_flt_tbl *tbl;
+	int ipa_ep_idx;
+
+	if (rule == NULL || rule_hdl == NULL || ep >= IPA_CLIENT_MAX) {
+		IPAERR_RL("bad parms rule=%pK rule_hdl=%pK ep=%d\n", rule,
+				rule_hdl, ep);
+		return -EINVAL;
+	}
+
+	if (__ipa_add_flt_get_ep_idx(ep, &ipa_ep_idx))
+		return -EINVAL;
+
+	if (ipa_ep_idx >= IPA3_MAX_NUM_PIPES) {
+		IPAERR_RL("invalid ipa_ep_idx=%d\n", ipa_ep_idx);
+		return -EINVAL;
+	}
+
+	tbl = &ipa3_ctx->flt_tbl[ipa_ep_idx][ip];
+	IPADBG_LOW("add ep flt rule ip=%d ep=%d\n", ip, ep);
+
+	return __ipa_add_flt_rule(tbl, ip, rule, add_rear, rule_hdl, user);
+}
+
+static void __ipa_convert_flt_rule_in(struct ipa_flt_rule rule_in,
+	struct ipa_flt_rule_i *rule_out)
+{
+	if (unlikely(sizeof(struct ipa_flt_rule) >
+			sizeof(struct ipa_flt_rule_i))) {
+		IPAERR_RL("invalid size in:%d size out:%d\n",
+			sizeof(struct ipa_flt_rule_i),
+			sizeof(struct ipa_flt_rule));
+		return;
+	}
+	memset(rule_out, 0, sizeof(struct ipa_flt_rule_i));
+	memcpy(rule_out, &rule_in, sizeof(struct ipa_flt_rule));
+}
+
+static void __ipa_convert_flt_rule_out(struct ipa_flt_rule_i rule_in,
+	struct ipa_flt_rule *rule_out)
+{
+	if (unlikely(sizeof(struct ipa_flt_rule) >
+			sizeof(struct ipa_flt_rule_i))) {
+		IPAERR_RL("invalid size in:%d size out:%d\n",
+			sizeof(struct ipa_flt_rule_i),
+			sizeof(struct ipa_flt_rule));
+		return;
+	}
+	memset(rule_out, 0, sizeof(struct ipa_flt_rule));
+	memcpy(rule_out, &rule_in, sizeof(struct ipa_flt_rule));
+}
+
+static void __ipa_convert_flt_mdfy_in(struct ipa_flt_rule_mdfy rule_in,
+	struct ipa_flt_rule_mdfy_i *rule_out)
+{
+	if (unlikely(sizeof(struct ipa_flt_rule_mdfy) >
+			sizeof(struct ipa_flt_rule_mdfy_i))) {
+		IPAERR_RL("invalid size in:%d size out:%d\n",
+			sizeof(struct ipa_flt_rule_mdfy),
+			sizeof(struct ipa_flt_rule_mdfy_i));
+		return;
+	}
+	memset(rule_out, 0, sizeof(struct ipa_flt_rule_mdfy_i));
+	memcpy(&rule_out->rule, &rule_in.rule,
+		sizeof(struct ipa_flt_rule));
+	rule_out->rule_hdl = rule_in.rule_hdl;
+	rule_out->status = rule_in.status;
+}
+
+static void __ipa_convert_flt_mdfy_out(struct ipa_flt_rule_mdfy_i rule_in,
+	struct ipa_flt_rule_mdfy *rule_out)
+{
+	if (unlikely(sizeof(struct ipa_flt_rule_mdfy) >
+			sizeof(struct ipa_flt_rule_mdfy_i))) {
+		IPAERR_RL("invalid size in:%d size out:%d\n",
+			sizeof(struct ipa_flt_rule_mdfy),
+			sizeof(struct ipa_flt_rule_mdfy_i));
+		return;
+	}
+	memset(rule_out, 0, sizeof(struct ipa_flt_rule_mdfy));
+	memcpy(&rule_out->rule, &rule_in.rule,
+		sizeof(struct ipa_flt_rule));
+	rule_out->rule_hdl = rule_in.rule_hdl;
+	rule_out->status = rule_in.status;
+}
+
+/**
+ * ipa3_add_flt_rule() - Add the specified filtering rules to SW and optionally
+ * commit to IPA HW
+ * @rules:	[inout] set of filtering rules to add
+ *
+ * Returns:	0 on success, negative on failure
+ *
+ * Note:	Should not be called from atomic context
+ */
+int ipa3_add_flt_rule(struct ipa_ioc_add_flt_rule *rules)
+{
+	return ipa3_add_flt_rule_usr(rules, false);
+}
+
+/**
+ * ipa3_add_flt_rule_v2() - Add the specified filtering rules to
+ * SW and optionally commit to IPA HW
+ * @rules:	[inout] set of filtering rules to add
+ *
+ * Returns:	0 on success, negative on failure
+ *
+ * Note:	Should not be called from atomic context
+ */
+int ipa3_add_flt_rule_v2(struct ipa_ioc_add_flt_rule_v2 *rules)
+{
+	return ipa3_add_flt_rule_usr_v2(rules, false);
+}
+
+
+/**
+ * ipa3_add_flt_rule_usr() - Add the specified filtering rules to
+ * SW and optionally commit to IPA HW
+ * @rules:	[inout] set of filtering rules to add
+ * @user_only:	[in] indicate rules installed by userspace
+ *
+ * Returns:	0 on success, negative on failure
+ *
+ * Note:	Should not be called from atomic context
+ */
+int ipa3_add_flt_rule_usr(struct ipa_ioc_add_flt_rule *rules, bool user_only)
+{
+	int i;
+	int result;
+	struct ipa_flt_rule_i rule;
+
+	if (rules == NULL || rules->num_rules == 0 ||
+			rules->ip >= IPA_IP_MAX) {
+		IPAERR_RL("bad parm\n");
+		return -EINVAL;
+	}
+
+	mutex_lock(&ipa3_ctx->lock);
+	for (i = 0; i < rules->num_rules; i++) {
+		if (!rules->global) {
+			/* if hashing not supported, all table entry
+			 * are non-hash tables
+			 */
+			if (ipa3_ctx->ipa_fltrt_not_hashable)
+				rules->rules[i].rule.hashable = false;
+			__ipa_convert_flt_rule_in(
+				rules->rules[i].rule, &rule);
+			result = __ipa_add_ep_flt_rule(rules->ip,
+					rules->ep,
+					&rule,
+					rules->rules[i].at_rear,
+					&rules->rules[i].flt_rule_hdl,
+					user_only);
+			__ipa_convert_flt_rule_out(rule,
+				&rules->rules[i].rule);
+		} else
+			result = -1;
+		if (result) {
+			IPAERR_RL("failed to add flt rule %d\n", i);
+			rules->rules[i].status = IPA_FLT_STATUS_OF_ADD_FAILED;
+		} else {
+			rules->rules[i].status = 0;
+		}
+	}
+
+	if (rules->global) {
+		IPAERR_RL("no support for global filter rules\n");
+		result = -EPERM;
+		goto bail;
+	}
+
+	if (rules->commit)
+		if (ipa3_ctx->ctrl->ipa3_commit_flt(rules->ip)) {
+			result = -EPERM;
+			goto bail;
+		}
+	result = 0;
+bail:
+	mutex_unlock(&ipa3_ctx->lock);
+
+	return result;
+}
+
+/**
+ * ipa3_add_flt_rule_usr_v2() - Add the specified filtering
+ * rules to SW and optionally commit to IPA HW
+ * @rules:	[inout] set of filtering rules to add
+ * @user_only:	[in] indicate rules installed by userspace
+ *
+ * Returns:	0 on success, negative on failure
+ *
+ * Note:	Should not be called from atomic context
+ */
+int ipa3_add_flt_rule_usr_v2(struct ipa_ioc_add_flt_rule_v2
+	*rules, bool user_only)
+{
+	int i;
+	int result;
+
+	if (rules == NULL || rules->num_rules == 0 ||
+			rules->ip >= IPA_IP_MAX) {
+		IPAERR_RL("bad parm\n");
+		return -EINVAL;
+	}
+
+	mutex_lock(&ipa3_ctx->lock);
+	for (i = 0; i < rules->num_rules; i++) {
+		if (!rules->global) {
+			/* if hashing not supported, all table entry
+			 * are non-hash tables
+			 */
+			if (ipa3_ctx->ipa_fltrt_not_hashable)
+				((struct ipa_flt_rule_add_i *)
+				rules->rules)[i].rule.hashable = false;
+			result = __ipa_add_ep_flt_rule(rules->ip,
+					rules->ep,
+					&(((struct ipa_flt_rule_add_i *)
+					rules->rules)[i].rule),
+					((struct ipa_flt_rule_add_i *)
+					rules->rules)[i].at_rear,
+					&(((struct ipa_flt_rule_add_i *)
+					rules->rules)[i].flt_rule_hdl),
+					user_only);
+		} else
+			result = -1;
+
+		if (result) {
+			IPAERR_RL("failed to add flt rule %d\n", i);
+			((struct ipa_flt_rule_add_i *)
+			rules->rules)[i].status = IPA_FLT_STATUS_OF_ADD_FAILED;
+		} else {
+			((struct ipa_flt_rule_add_i *)
+			rules->rules)[i].status = 0;
+		}
+	}
+
+	if (rules->global) {
+		IPAERR_RL("no support for global filter rules\n");
+		result = -EPERM;
+		goto bail;
+	}
+
+	if (rules->commit)
+		if (ipa3_ctx->ctrl->ipa3_commit_flt(rules->ip)) {
+			result = -EPERM;
+			goto bail;
+		}
+	result = 0;
+bail:
+	mutex_unlock(&ipa3_ctx->lock);
+
+	return result;
+}
+
+/**
+ * ipa3_add_flt_rule_after() - Add the specified filtering rules to SW after
+ *  the rule which its handle is given and optionally commit to IPA HW
+ *
+ * Returns:	0 on success, negative on failure
+ *
+ * Note:	Should not be called from atomic context
+ */
+int ipa3_add_flt_rule_after(struct ipa_ioc_add_flt_rule_after *rules)
+{
+	int i;
+	int result;
+	struct ipa3_flt_tbl *tbl;
+	int ipa_ep_idx;
+	struct ipa3_flt_entry *entry;
+	struct ipa_flt_rule_i rule;
+
+	if (rules == NULL || rules->num_rules == 0 ||
+			rules->ip >= IPA_IP_MAX) {
+		IPAERR_RL("bad parm\n");
+		return -EINVAL;
+	}
+
+	if (rules->ep >= IPA_CLIENT_MAX) {
+		IPAERR_RL("bad parms ep=%d\n", rules->ep);
+		return -EINVAL;
+	}
+
+	mutex_lock(&ipa3_ctx->lock);
+
+	if (__ipa_add_flt_get_ep_idx(rules->ep, &ipa_ep_idx)) {
+		result = -EINVAL;
+		goto bail;
+	}
+
+	if (ipa_ep_idx >= IPA3_MAX_NUM_PIPES || ipa_ep_idx < 0) {
+		IPAERR_RL("invalid ipa_ep_idx=%u\n", ipa_ep_idx);
+		result = -EINVAL;
+		goto bail;
+	}
+
+	tbl = &ipa3_ctx->flt_tbl[ipa_ep_idx][rules->ip];
+
+	entry = ipa3_id_find(rules->add_after_hdl);
+	if (entry == NULL) {
+		IPAERR_RL("lookup failed\n");
+		result = -EINVAL;
+		goto bail;
+	}
+
+	if (entry->cookie != IPA_FLT_COOKIE) {
+		IPAERR_RL("Invalid cookie value =  %u flt hdl id = %d\n",
+			entry->cookie, rules->add_after_hdl);
+		result = -EINVAL;
+		goto bail;
+	}
+
+	if (entry->tbl != tbl) {
+		IPAERR_RL("given entry does not match the table\n");
+		result = -EINVAL;
+		goto bail;
+	}
+
+	if (tbl->sticky_rear)
+		if (&entry->link == tbl->head_flt_rule_list.prev) {
+			IPAERR_RL("cannot add rule at end of a sticky table");
+			result = -EINVAL;
+			goto bail;
+		}
+
+	IPADBG("add ep flt rule ip=%d ep=%d after hdl %d\n",
+			rules->ip, rules->ep, rules->add_after_hdl);
+
+	/*
+	 * we add all rules one after the other, if one insertion fails, it cuts
+	 * the chain (all following will receive fail status) following calls to
+	 * __ipa_add_flt_rule_after will fail (entry == NULL)
+	 */
+
+	for (i = 0; i < rules->num_rules; i++) {
+		/* if hashing not supported, all tables are non-hash tables*/
+		if (ipa3_ctx->ipa_fltrt_not_hashable)
+			rules->rules[i].rule.hashable = false;
+
+		__ipa_convert_flt_rule_in(
+				rules->rules[i].rule, &rule);
+
+		result = __ipa_add_flt_rule_after(tbl,
+				&rule,
+				&rules->rules[i].flt_rule_hdl,
+				rules->ip,
+				&entry);
+
+		__ipa_convert_flt_rule_out(rule,
+				&rules->rules[i].rule);
+
+		if (result) {
+			IPAERR_RL("failed to add flt rule %d\n", i);
+			rules->rules[i].status = IPA_FLT_STATUS_OF_ADD_FAILED;
+		} else {
+			rules->rules[i].status = 0;
+		}
+	}
+
+	if (rules->commit)
+		if (ipa3_ctx->ctrl->ipa3_commit_flt(rules->ip)) {
+			IPAERR("failed to commit flt rules\n");
+			result = -EPERM;
+			goto bail;
+		}
+	result = 0;
+bail:
+	mutex_unlock(&ipa3_ctx->lock);
+
+	return result;
+}
+
+/**
+ * ipa3_add_flt_rule_after_v2() - Add the specified filtering
+ *  rules to SW after the rule which its handle is given and
+ *  optionally commit to IPA HW
+ *
+ * Returns:	0 on success, negative on failure
+ *
+ * Note:	Should not be called from atomic context
+ */
+int ipa3_add_flt_rule_after_v2(struct ipa_ioc_add_flt_rule_after_v2
+	*rules)
+{
+	int i;
+	int result;
+	struct ipa3_flt_tbl *tbl;
+	int ipa_ep_idx;
+	struct ipa3_flt_entry *entry;
+
+	if (rules == NULL || rules->num_rules == 0 ||
+			rules->ip >= IPA_IP_MAX) {
+		IPAERR_RL("bad parm\n");
+		return -EINVAL;
+	}
+
+	if (rules->ep >= IPA_CLIENT_MAX) {
+		IPAERR_RL("bad parms ep=%d\n", rules->ep);
+		return -EINVAL;
+	}
+
+	mutex_lock(&ipa3_ctx->lock);
+
+	if (__ipa_add_flt_get_ep_idx(rules->ep, &ipa_ep_idx)) {
+		result = -EINVAL;
+		goto bail;
+	}
+
+	if (ipa_ep_idx >= IPA3_MAX_NUM_PIPES ||
+		ipa_ep_idx < 0) {
+		IPAERR_RL("invalid ipa_ep_idx=%u\n", ipa_ep_idx);
+		result = -EINVAL;
+		goto bail;
+	}
+
+	tbl = &ipa3_ctx->flt_tbl[ipa_ep_idx][rules->ip];
+
+	entry = ipa3_id_find(rules->add_after_hdl);
+	if (entry == NULL) {
+		IPAERR_RL("lookup failed\n");
+		result = -EINVAL;
+		goto bail;
+	}
+
+	if (entry->cookie != IPA_FLT_COOKIE) {
+		IPAERR_RL("Invalid cookie value =  %u flt hdl id = %d\n",
+			entry->cookie, rules->add_after_hdl);
+		result = -EINVAL;
+		goto bail;
+	}
+
+	if (entry->tbl != tbl) {
+		IPAERR_RL("given entry does not match the table\n");
+		result = -EINVAL;
+		goto bail;
+	}
+
+	if (tbl->sticky_rear)
+		if (&entry->link == tbl->head_flt_rule_list.prev) {
+			IPAERR_RL("cannot add rule at end of a sticky table");
+			result = -EINVAL;
+			goto bail;
+		}
+
+	IPADBG("add ep flt rule ip=%d ep=%d after hdl %d\n",
+			rules->ip, rules->ep, rules->add_after_hdl);
+
+	/*
+	 * we add all rules one after the other, if one insertion fails, it cuts
+	 * the chain (all following will receive fail status) following calls to
+	 * __ipa_add_flt_rule_after will fail (entry == NULL)
+	 */
+
+	for (i = 0; i < rules->num_rules; i++) {
+		/* if hashing not supported, all tables are non-hash tables*/
+		if (ipa3_ctx->ipa_fltrt_not_hashable)
+			((struct ipa_flt_rule_add_i *)
+			rules->rules)[i].rule.hashable = false;
+		result = __ipa_add_flt_rule_after(tbl,
+				&(((struct ipa_flt_rule_add_i *)
+				rules->rules)[i].rule),
+				&(((struct ipa_flt_rule_add_i *)
+				rules->rules)[i].flt_rule_hdl),
+				rules->ip,
+				&entry);
+		if (result) {
+			IPAERR_RL("failed to add flt rule %d\n", i);
+			((struct ipa_flt_rule_add_i *)
+			rules->rules)[i].status = IPA_FLT_STATUS_OF_ADD_FAILED;
+		} else {
+			((struct ipa_flt_rule_add_i *)
+			rules->rules)[i].status = 0;
+		}
+	}
+
+	if (rules->commit)
+		if (ipa3_ctx->ctrl->ipa3_commit_flt(rules->ip)) {
+			IPAERR("failed to commit flt rules\n");
+			result = -EPERM;
+			goto bail;
+		}
+	result = 0;
+bail:
+	mutex_unlock(&ipa3_ctx->lock);
+
+	return result;
+}
+
+/**
+ * ipa3_del_flt_rule() - Remove the specified filtering rules from SW and
+ * optionally commit to IPA HW
+ *
+ * Returns:	0 on success, negative on failure
+ *
+ * Note:	Should not be called from atomic context
+ */
+int ipa3_del_flt_rule(struct ipa_ioc_del_flt_rule *hdls)
+{
+	int i;
+	int result;
+
+	if (hdls == NULL || hdls->num_hdls == 0 || hdls->ip >= IPA_IP_MAX) {
+		IPAERR_RL("bad param\n");
+		return -EINVAL;
+	}
+
+	mutex_lock(&ipa3_ctx->lock);
+	for (i = 0; i < hdls->num_hdls; i++) {
+		if (__ipa_del_flt_rule(hdls->hdl[i].hdl)) {
+			IPAERR_RL("failed to del flt rule %i\n", i);
+			hdls->hdl[i].status = IPA_FLT_STATUS_OF_DEL_FAILED;
+		} else {
+			hdls->hdl[i].status = 0;
+		}
+	}
+
+	if (hdls->commit)
+		if (ipa3_ctx->ctrl->ipa3_commit_flt(hdls->ip)) {
+			result = -EPERM;
+			goto bail;
+		}
+	result = 0;
+bail:
+	mutex_unlock(&ipa3_ctx->lock);
+
+	return result;
+}
+
+/**
+ * ipa3_mdfy_flt_rule() - Modify the specified filtering rules in SW and
+ * optionally commit to IPA HW
+ *
+ * Returns:	0 on success, negative on failure
+ *
+ * Note:	Should not be called from atomic context
+ */
+int ipa3_mdfy_flt_rule(struct ipa_ioc_mdfy_flt_rule *hdls)
+{
+	int i;
+	int result;
+	struct ipa_flt_rule_mdfy_i rule;
+
+	if (hdls == NULL || hdls->num_rules == 0 || hdls->ip >= IPA_IP_MAX) {
+		IPAERR_RL("bad parm\n");
+		return -EINVAL;
+	}
+
+	mutex_lock(&ipa3_ctx->lock);
+	for (i = 0; i < hdls->num_rules; i++) {
+		/* if hashing not supported, all tables are non-hash tables*/
+		if (ipa3_ctx->ipa_fltrt_not_hashable)
+			hdls->rules[i].rule.hashable = false;
+		__ipa_convert_flt_mdfy_in(hdls->rules[i], &rule);
+		if (__ipa_mdfy_flt_rule(&rule, hdls->ip)) {
+			IPAERR_RL("failed to mdfy flt rule %i\n", i);
+			hdls->rules[i].status = IPA_FLT_STATUS_OF_MDFY_FAILED;
+		} else {
+			hdls->rules[i].status = 0;
+			__ipa_convert_flt_mdfy_out(rule, &hdls->rules[i]);
+		}
+	}
+
+	if (hdls->commit)
+		if (ipa3_ctx->ctrl->ipa3_commit_flt(hdls->ip)) {
+			result = -EPERM;
+			goto bail;
+		}
+	result = 0;
+bail:
+	mutex_unlock(&ipa3_ctx->lock);
+
+	return result;
+}
+
+/**
+ * ipa3_mdfy_flt_rule_v2() - Modify the specified filtering
+ * rules in SW and optionally commit to IPA HW
+ *
+ * Returns:	0 on success, negative on failure
+ *
+ * Note:	Should not be called from atomic context
+ */
+int ipa3_mdfy_flt_rule_v2(struct ipa_ioc_mdfy_flt_rule_v2 *hdls)
+{
+	int i;
+	int result;
+
+	if (hdls == NULL || hdls->num_rules == 0 || hdls->ip >= IPA_IP_MAX) {
+		IPAERR_RL("bad parm\n");
+		return -EINVAL;
+	}
+
+	mutex_lock(&ipa3_ctx->lock);
+	for (i = 0; i < hdls->num_rules; i++) {
+		/* if hashing not supported, all tables are non-hash tables*/
+		if (ipa3_ctx->ipa_fltrt_not_hashable)
+			((struct ipa_flt_rule_mdfy_i *)
+			hdls->rules)[i].rule.hashable = false;
+		if (__ipa_mdfy_flt_rule(&(((struct ipa_flt_rule_mdfy_i *)
+			hdls->rules)[i]), hdls->ip)) {
+			IPAERR_RL("failed to mdfy flt rule %i\n", i);
+			((struct ipa_flt_rule_mdfy_i *)
+			hdls->rules)[i].status = IPA_FLT_STATUS_OF_MDFY_FAILED;
+		} else {
+			((struct ipa_flt_rule_mdfy_i *)
+			hdls->rules)[i].status = 0;
+		}
+	}
+
+	if (hdls->commit)
+		if (ipa3_ctx->ctrl->ipa3_commit_flt(hdls->ip)) {
+			result = -EPERM;
+			goto bail;
+		}
+	result = 0;
+bail:
+	mutex_unlock(&ipa3_ctx->lock);
+
+	return result;
+}
+
+/**
+ * ipa3_commit_flt() - Commit the current SW filtering table of specified type
+ * to IPA HW
+ * @ip:	[in] the family of routing tables
+ *
+ * Returns:	0 on success, negative on failure
+ *
+ * Note:	Should not be called from atomic context
+ */
+int ipa3_commit_flt(enum ipa_ip_type ip)
+{
+	int result;
+
+	if (ip >= IPA_IP_MAX) {
+		IPAERR_RL("bad param\n");
+		return -EINVAL;
+	}
+
+	mutex_lock(&ipa3_ctx->lock);
+
+	if (ipa3_ctx->ctrl->ipa3_commit_flt(ip)) {
+		result = -EPERM;
+		goto bail;
+	}
+	result = 0;
+
+bail:
+	mutex_unlock(&ipa3_ctx->lock);
+
+	return result;
+}
+
+/**
+ * ipa3_reset_flt() - Reset the current SW filtering table of specified type
+ * (does not commit to HW)
+ * @ip:	[in] the family of routing tables
+ * @user_only:	[in] indicate rules deleted by userspace
+ *
+ * Returns:	0 on success, negative on failure
+ *
+ * Note:	Should not be called from atomic context
+ */
+int ipa3_reset_flt(enum ipa_ip_type ip, bool user_only)
+{
+	struct ipa3_flt_tbl *tbl;
+	struct ipa3_flt_entry *entry;
+	struct ipa3_flt_entry *next;
+	int i;
+	int id;
+	int rule_id;
+
+	if (ip >= IPA_IP_MAX) {
+		IPAERR_RL("bad parm\n");
+		return -EINVAL;
+	}
+
+	mutex_lock(&ipa3_ctx->lock);
+	for (i = 0; i < ipa3_ctx->ipa_num_pipes; i++) {
+		if (!ipa_is_ep_support_flt(i))
+			continue;
+
+		tbl = &ipa3_ctx->flt_tbl[i][ip];
+		list_for_each_entry_safe(entry, next, &tbl->head_flt_rule_list,
+				link) {
+			if (ipa3_id_find(entry->id) == NULL) {
+				WARN_ON_RATELIMIT_IPA(1);
+				mutex_unlock(&ipa3_ctx->lock);
+				return -EFAULT;
+			}
+
+			if (!user_only ||
+					entry->ipacm_installed) {
+				list_del(&entry->link);
+				entry->tbl->rule_cnt--;
+				if (entry->rt_tbl &&
+					(!ipa3_check_idr_if_freed(
+						entry->rt_tbl)))
+					entry->rt_tbl->ref_cnt--;
+				/* if rule id was allocated from idr, remove */
+				rule_id = entry->rule_id;
+				id = entry->id;
+				if ((rule_id < ipahal_get_rule_id_hi_bit()) &&
+					(rule_id >= ipahal_get_low_rule_id()))
+					idr_remove(entry->tbl->rule_ids,
+						rule_id);
+				entry->cookie = 0;
+				kmem_cache_free(ipa3_ctx->flt_rule_cache,
+								entry);
+
+				/* remove the handle from the database */
+				ipa3_id_remove(id);
+			}
+		}
+	}
+
+	/* commit the change to IPA-HW */
+	if (ipa3_ctx->ctrl->ipa3_commit_flt(IPA_IP_v4) ||
+		ipa3_ctx->ctrl->ipa3_commit_flt(IPA_IP_v6)) {
+		IPAERR("fail to commit flt-rule\n");
+		WARN_ON_RATELIMIT_IPA(1);
+		mutex_unlock(&ipa3_ctx->lock);
+		return -EPERM;
+	}
+	mutex_unlock(&ipa3_ctx->lock);
+	return 0;
+}
+
+void ipa3_install_dflt_flt_rules(u32 ipa_ep_idx)
+{
+	struct ipa3_flt_tbl *tbl;
+	struct ipa3_ep_context *ep;
+	struct ipa_flt_rule_i rule;
+
+	if (ipa_ep_idx >= IPA3_MAX_NUM_PIPES) {
+		IPAERR("invalid ipa_ep_idx=%u\n", ipa_ep_idx);
+		ipa_assert();
+		return;
+	}
+
+	ep = &ipa3_ctx->ep[ipa_ep_idx];
+
+	if (!ipa_is_ep_support_flt(ipa_ep_idx)) {
+		IPADBG("cannot add flt rules to non filtering pipe num %d\n",
+			ipa_ep_idx);
+		return;
+	}
+
+	memset(&rule, 0, sizeof(rule));
+
+	mutex_lock(&ipa3_ctx->lock);
+	tbl = &ipa3_ctx->flt_tbl[ipa_ep_idx][IPA_IP_v4];
+	rule.action = IPA_PASS_TO_EXCEPTION;
+	__ipa_add_flt_rule(tbl, IPA_IP_v4, &rule, true,
+			&ep->dflt_flt4_rule_hdl, false);
+	ipa3_ctx->ctrl->ipa3_commit_flt(IPA_IP_v4);
+	tbl->sticky_rear = true;
+
+	tbl = &ipa3_ctx->flt_tbl[ipa_ep_idx][IPA_IP_v6];
+	rule.action = IPA_PASS_TO_EXCEPTION;
+	__ipa_add_flt_rule(tbl, IPA_IP_v6, &rule, true,
+			&ep->dflt_flt6_rule_hdl, false);
+	ipa3_ctx->ctrl->ipa3_commit_flt(IPA_IP_v6);
+	tbl->sticky_rear = true;
+	mutex_unlock(&ipa3_ctx->lock);
+}
+
+void ipa3_delete_dflt_flt_rules(u32 ipa_ep_idx)
+{
+	struct ipa3_ep_context *ep = &ipa3_ctx->ep[ipa_ep_idx];
+	struct ipa3_flt_tbl *tbl;
+
+	mutex_lock(&ipa3_ctx->lock);
+	if (ep->dflt_flt4_rule_hdl) {
+		tbl = &ipa3_ctx->flt_tbl[ipa_ep_idx][IPA_IP_v4];
+		__ipa_del_flt_rule(ep->dflt_flt4_rule_hdl);
+		ipa3_ctx->ctrl->ipa3_commit_flt(IPA_IP_v4);
+		/* Reset the sticky flag. */
+		tbl->sticky_rear = false;
+		ep->dflt_flt4_rule_hdl = 0;
+	}
+	if (ep->dflt_flt6_rule_hdl) {
+		tbl = &ipa3_ctx->flt_tbl[ipa_ep_idx][IPA_IP_v6];
+		__ipa_del_flt_rule(ep->dflt_flt6_rule_hdl);
+		ipa3_ctx->ctrl->ipa3_commit_flt(IPA_IP_v6);
+		/* Reset the sticky flag. */
+		tbl->sticky_rear = false;
+		ep->dflt_flt6_rule_hdl = 0;
+	}
+	mutex_unlock(&ipa3_ctx->lock);
+}
+
+/**
+ * ipa3_set_flt_tuple_mask() - Sets the flt tuple masking for the given pipe
+ *  Pipe must be for AP EP (not modem) and support filtering
+ *  updates the the filtering masking values without changing the rt ones.
+ *
+ * @pipe_idx: filter pipe index to configure the tuple masking
+ * @tuple: the tuple members masking
+ * Returns:	0 on success, negative on failure
+ *
+ */
+int ipa3_set_flt_tuple_mask(int pipe_idx, struct ipahal_reg_hash_tuple *tuple)
+{
+	struct ipahal_reg_fltrt_hash_tuple fltrt_tuple;
+
+	if (!tuple) {
+		IPAERR_RL("bad tuple\n");
+		return -EINVAL;
+	}
+
+	if (pipe_idx >= ipa3_ctx->ipa_num_pipes || pipe_idx < 0) {
+		IPAERR("bad pipe index!\n");
+		return -EINVAL;
+	}
+
+	if (!ipa_is_ep_support_flt(pipe_idx)) {
+		IPAERR("pipe %d not filtering pipe\n", pipe_idx);
+		return -EINVAL;
+	}
+
+	if (ipa_is_modem_pipe(pipe_idx)) {
+		IPAERR("modem pipe tuple is not configured by AP\n");
+		return -EINVAL;
+	}
+
+	ipahal_read_reg_n_fields(IPA_ENDP_FILTER_ROUTER_HSH_CFG_n,
+		pipe_idx, &fltrt_tuple);
+	fltrt_tuple.flt = *tuple;
+	ipahal_write_reg_n_fields(IPA_ENDP_FILTER_ROUTER_HSH_CFG_n,
+		pipe_idx, &fltrt_tuple);
+
+	return 0;
+}
+
+/**
+ * ipa3_flt_read_tbl_from_hw() -Read filtering table from IPA HW
+ * @pipe_idx: IPA endpoint index
+ * @ip_type: IPv4 or IPv6 table
+ * @hashable: hashable or non-hashable table
+ * @entry: array to fill the table entries
+ * @num_entry: number of entries in entry array. set by the caller to indicate
+ *  entry array size. Then set by this function as an output parameter to
+ *  indicate the number of entries in the array
+ *
+ * This function reads the filtering table from IPA SRAM and prepares an array
+ * of entries. This function is mainly used for debugging purposes.
+ *
+ * If empty table or Modem Apps table, zero entries will be returned.
+ *
+ * Returns:	0 on success, negative on failure
+ */
+int ipa3_flt_read_tbl_from_hw(u32 pipe_idx, enum ipa_ip_type ip_type,
+	bool hashable, struct ipahal_flt_rule_entry entry[], int *num_entry)
+{
+	void *ipa_sram_mmio;
+	u64 hdr_base_ofst;
+	int tbl_entry_idx;
+	int i;
+	int res = 0;
+	u64 tbl_addr;
+	bool is_sys;
+	u8 *rule_addr;
+	struct ipa_mem_buffer *sys_tbl_mem;
+	int rule_idx;
+	struct ipa3_flt_tbl *flt_tbl_ptr;
+
+	IPADBG("pipe_idx=%d ip=%d hashable=%d entry=0x%pK num_entry=0x%pK\n",
+		pipe_idx, ip_type, hashable, entry, num_entry);
+
+	/*
+	 * SRAM memory not allocated to hash tables. Reading of hash table
+	 * rules operation not supported
+	 */
+	if (hashable && ipa3_ctx->ipa_fltrt_not_hashable) {
+		IPAERR_RL("Reading hashable rules not supported\n");
+		*num_entry = 0;
+		return 0;
+	}
+
+	if (pipe_idx >= ipa3_ctx->ipa_num_pipes ||
+		pipe_idx >= IPA3_MAX_NUM_PIPES || ip_type >= IPA_IP_MAX ||
+		!entry || !num_entry) {
+		IPAERR_RL("Invalid pipe_idx=%u\n", pipe_idx);
+		return -EFAULT;
+	}
+
+	if (!ipa_is_ep_support_flt(pipe_idx)) {
+		IPAERR_RL("pipe %d does not support filtering\n", pipe_idx);
+		return -EINVAL;
+	}
+
+	flt_tbl_ptr = &ipa3_ctx->flt_tbl[pipe_idx][ip_type];
+	/* map IPA SRAM */
+	ipa_sram_mmio = ioremap(ipa3_ctx->ipa_wrapper_base +
+		ipa3_ctx->ctrl->ipa_reg_base_ofst +
+		ipahal_get_reg_n_ofst(IPA_SW_AREA_RAM_DIRECT_ACCESS_n,
+			ipa3_ctx->smem_restricted_bytes / 4),
+		ipa3_ctx->smem_sz);
+	if (!ipa_sram_mmio) {
+		IPAERR("fail to ioremap IPA SRAM\n");
+		return -ENOMEM;
+	}
+
+	memset(entry, 0, sizeof(*entry) * (*num_entry));
+	if (hashable) {
+		if (ip_type == IPA_IP_v4)
+			hdr_base_ofst =
+				IPA_MEM_PART(v4_flt_hash_ofst);
+		else
+			hdr_base_ofst =
+				IPA_MEM_PART(v6_flt_hash_ofst);
+	} else {
+		if (ip_type == IPA_IP_v4)
+			hdr_base_ofst =
+				IPA_MEM_PART(v4_flt_nhash_ofst);
+		else
+			hdr_base_ofst =
+				IPA_MEM_PART(v6_flt_nhash_ofst);
+	}
+
+	/* calculate the index of the tbl entry */
+	tbl_entry_idx = 1; /* skip the bitmap */
+	for (i = 0; i < pipe_idx; i++)
+		if (ipa3_ctx->ep_flt_bitmap & (1 << i))
+			tbl_entry_idx++;
+
+	IPADBG("hdr_base_ofst=0x%llx tbl_entry_idx=%d\n",
+		hdr_base_ofst, tbl_entry_idx);
+
+	res = ipahal_fltrt_read_addr_from_hdr(ipa_sram_mmio + hdr_base_ofst,
+		tbl_entry_idx, &tbl_addr, &is_sys);
+	if (res) {
+		IPAERR("failed to read table address from header structure\n");
+		goto bail;
+	}
+	IPADBG("flt tbl ep=%d: tbl_addr=0x%llx is_sys=%d\n",
+		pipe_idx, tbl_addr, is_sys);
+	if (!tbl_addr) {
+		IPAERR("invalid flt tbl addr\n");
+		res = -EFAULT;
+		goto bail;
+	}
+
+	/* for tables resides in DDR access it from the virtual memory */
+	if (is_sys) {
+		sys_tbl_mem =
+			&flt_tbl_ptr->curr_mem[hashable ? IPA_RULE_HASHABLE :
+			IPA_RULE_NON_HASHABLE];
+		if (sys_tbl_mem->phys_base &&
+			sys_tbl_mem->phys_base != tbl_addr) {
+			IPAERR("mismatch addr: parsed=%llx sw=%pad\n",
+				tbl_addr, &sys_tbl_mem->phys_base);
+		}
+		if (sys_tbl_mem->phys_base)
+			rule_addr = sys_tbl_mem->base;
+		else
+			rule_addr = NULL;
+	} else {
+		rule_addr = ipa_sram_mmio + hdr_base_ofst + tbl_addr;
+	}
+
+	IPADBG("First rule addr 0x%pK\n", rule_addr);
+
+	if (!rule_addr) {
+		/* Modem table in system memory or empty table */
+		*num_entry = 0;
+		goto bail;
+	}
+
+	rule_idx = 0;
+	while (rule_idx < *num_entry) {
+		res = ipahal_flt_parse_hw_rule(rule_addr, &entry[rule_idx]);
+		if (res) {
+			IPAERR("failed parsing flt rule\n");
+			goto bail;
+		}
+
+		IPADBG("rule_size=%d\n", entry[rule_idx].rule_size);
+		if (!entry[rule_idx].rule_size)
+			break;
+
+		rule_addr += entry[rule_idx].rule_size;
+		rule_idx++;
+	}
+	*num_entry = rule_idx;
+bail:
+	iounmap(ipa_sram_mmio);
+	return 0;
+}

+ 1377 - 0
ipa/ipa_v3/ipa_hdr.c

@@ -0,0 +1,1377 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2012-2019, The Linux Foundation. All rights reserved.
+ */
+
+#include "ipa_i.h"
+#include "ipahal/ipahal.h"
+
+static const u32 ipa_hdr_bin_sz[IPA_HDR_BIN_MAX] = { 8, 16, 24, 36, 64};
+static const u32 ipa_hdr_proc_ctx_bin_sz[IPA_HDR_PROC_CTX_BIN_MAX] = { 32, 64};
+
+#define HDR_TYPE_IS_VALID(type) \
+	((type) >= 0 && (type) < IPA_HDR_L2_MAX)
+
+#define HDR_PROC_TYPE_IS_VALID(type) \
+	((type) >= 0 && (type) < IPA_HDR_PROC_MAX)
+
+/**
+ * ipa3_generate_hdr_hw_tbl() - generates the headers table
+ * @mem:	[out] buffer to put the header table
+ *
+ * Returns:	0 on success, negative on failure
+ */
+static int ipa3_generate_hdr_hw_tbl(struct ipa_mem_buffer *mem)
+{
+	struct ipa3_hdr_entry *entry;
+	gfp_t flag = GFP_KERNEL;
+
+	mem->size = ipa3_ctx->hdr_tbl.end;
+
+	if (mem->size == 0) {
+		IPAERR("hdr tbl empty\n");
+		return -EPERM;
+	}
+	IPADBG_LOW("tbl_sz=%d\n", ipa3_ctx->hdr_tbl.end);
+
+alloc:
+	mem->base = dma_alloc_coherent(ipa3_ctx->pdev, mem->size,
+			&mem->phys_base, flag);
+	if (!mem->base) {
+		if (flag == GFP_KERNEL) {
+			flag = GFP_ATOMIC;
+			goto alloc;
+		}
+		IPAERR("fail to alloc DMA buff of size %d\n", mem->size);
+		return -ENOMEM;
+	}
+
+	list_for_each_entry(entry, &ipa3_ctx->hdr_tbl.head_hdr_entry_list,
+			link) {
+		if (entry->is_hdr_proc_ctx)
+			continue;
+		IPADBG_LOW("hdr of len %d ofst=%d\n", entry->hdr_len,
+				entry->offset_entry->offset);
+		ipahal_cp_hdr_to_hw_buff(mem->base, entry->offset_entry->offset,
+				entry->hdr, entry->hdr_len);
+	}
+
+	return 0;
+}
+
+static int ipa3_hdr_proc_ctx_to_hw_format(struct ipa_mem_buffer *mem,
+	u64 hdr_base_addr)
+{
+	struct ipa3_hdr_proc_ctx_entry *entry;
+	int ret;
+	int ep;
+	struct ipa_ep_cfg *cfg_ptr;
+	struct ipa_l2tp_header_remove_procparams *l2p_hdr_rm_ptr;
+
+	list_for_each_entry(entry,
+			&ipa3_ctx->hdr_proc_ctx_tbl.head_proc_ctx_entry_list,
+			link) {
+		IPADBG_LOW("processing type %d ofst=%d\n",
+			entry->type, entry->offset_entry->offset);
+
+		if (entry->l2tp_params.is_dst_pipe_valid) {
+			ep = ipa3_get_ep_mapping(entry->l2tp_params.dst_pipe);
+
+			if (ep >= 0) {
+				cfg_ptr = &ipa3_ctx->ep[ep].cfg;
+				l2p_hdr_rm_ptr =
+					&entry->l2tp_params.hdr_remove_param;
+				l2p_hdr_rm_ptr->hdr_ofst_pkt_size_valid =
+					cfg_ptr->hdr.hdr_ofst_pkt_size_valid;
+				l2p_hdr_rm_ptr->hdr_ofst_pkt_size =
+					cfg_ptr->hdr.hdr_ofst_pkt_size;
+				l2p_hdr_rm_ptr->hdr_endianness =
+					cfg_ptr->hdr_ext.hdr_little_endian ?
+					0 : 1;
+			}
+		}
+
+		ret = ipahal_cp_proc_ctx_to_hw_buff(entry->type, mem->base,
+				entry->offset_entry->offset,
+				entry->hdr->hdr_len,
+				entry->hdr->is_hdr_proc_ctx,
+				entry->hdr->phys_base,
+				hdr_base_addr,
+				entry->hdr->offset_entry,
+				&entry->l2tp_params,
+				&entry->generic_params,
+				ipa3_ctx->use_64_bit_dma_mask);
+		if (ret)
+			return ret;
+	}
+
+	return 0;
+}
+
+/**
+ * ipa3_generate_hdr_proc_ctx_hw_tbl() -
+ * generates the headers processing context table.
+ * @mem:		[out] buffer to put the processing context table
+ * @aligned_mem:	[out] actual processing context table (with alignment).
+ *			Processing context table needs to be 8 Bytes aligned.
+ *
+ * Returns:	0 on success, negative on failure
+ */
+static int ipa3_generate_hdr_proc_ctx_hw_tbl(u64 hdr_sys_addr,
+	struct ipa_mem_buffer *mem, struct ipa_mem_buffer *aligned_mem)
+{
+	u64 hdr_base_addr;
+	gfp_t flag = GFP_KERNEL;
+
+	mem->size = (ipa3_ctx->hdr_proc_ctx_tbl.end) ? : 4;
+
+	/* make sure table is aligned */
+	mem->size += IPA_HDR_PROC_CTX_TABLE_ALIGNMENT_BYTE;
+
+	IPADBG_LOW("tbl_sz=%d\n", ipa3_ctx->hdr_proc_ctx_tbl.end);
+
+alloc:
+	mem->base = dma_alloc_coherent(ipa3_ctx->pdev, mem->size,
+			&mem->phys_base, flag);
+	if (!mem->base) {
+		if (flag == GFP_KERNEL) {
+			flag = GFP_ATOMIC;
+			goto alloc;
+		}
+		IPAERR("fail to alloc DMA buff of size %d\n", mem->size);
+		return -ENOMEM;
+	}
+
+	aligned_mem->phys_base =
+		IPA_HDR_PROC_CTX_TABLE_ALIGNMENT(mem->phys_base);
+	aligned_mem->base = mem->base +
+		(aligned_mem->phys_base - mem->phys_base);
+	aligned_mem->size = mem->size - IPA_HDR_PROC_CTX_TABLE_ALIGNMENT_BYTE;
+	memset(aligned_mem->base, 0, aligned_mem->size);
+	hdr_base_addr = (ipa3_ctx->hdr_tbl_lcl) ? IPA_MEM_PART(apps_hdr_ofst) :
+		hdr_sys_addr;
+	return ipa3_hdr_proc_ctx_to_hw_format(aligned_mem, hdr_base_addr);
+}
+
+/**
+ * __ipa_commit_hdr_v3_0() - Commits the header table from memory to HW
+ *
+ * Returns:	0 on success, negative on failure
+ */
+int __ipa_commit_hdr_v3_0(void)
+{
+	struct ipa3_desc desc[3];
+	struct ipa_mem_buffer hdr_mem;
+	struct ipa_mem_buffer ctx_mem;
+	struct ipa_mem_buffer aligned_ctx_mem;
+	struct ipahal_imm_cmd_dma_shared_mem dma_cmd_hdr = {0};
+	struct ipahal_imm_cmd_dma_shared_mem dma_cmd_ctx = {0};
+	struct ipahal_imm_cmd_register_write reg_write_cmd = {0};
+	struct ipahal_imm_cmd_hdr_init_system hdr_init_cmd = {0};
+	struct ipahal_imm_cmd_pyld *hdr_cmd_pyld = NULL;
+	struct ipahal_imm_cmd_pyld *ctx_cmd_pyld = NULL;
+	struct ipahal_imm_cmd_pyld *coal_cmd_pyld = NULL;
+	int rc = -EFAULT;
+	int i;
+	int num_cmd = 0;
+	u32 proc_ctx_size;
+	u32 proc_ctx_ofst;
+	u32 proc_ctx_size_ddr;
+	struct ipahal_imm_cmd_register_write reg_write_coal_close;
+	struct ipahal_reg_valmask valmask;
+
+	memset(desc, 0, 3 * sizeof(struct ipa3_desc));
+
+	if (ipa3_generate_hdr_hw_tbl(&hdr_mem)) {
+		IPAERR("fail to generate HDR HW TBL\n");
+		goto end;
+	}
+
+	if (ipa3_generate_hdr_proc_ctx_hw_tbl(hdr_mem.phys_base, &ctx_mem,
+	    &aligned_ctx_mem)) {
+		IPAERR("fail to generate HDR PROC CTX HW TBL\n");
+		goto end;
+	}
+
+	/* IC to close the coal frame before HPS Clear if coal is enabled */
+	if (ipa3_get_ep_mapping(IPA_CLIENT_APPS_WAN_COAL_CONS) != -1) {
+		i = ipa3_get_ep_mapping(IPA_CLIENT_APPS_WAN_COAL_CONS);
+		reg_write_coal_close.skip_pipeline_clear = false;
+		reg_write_coal_close.pipeline_clear_options = IPAHAL_HPS_CLEAR;
+		reg_write_coal_close.offset = ipahal_get_reg_ofst(
+			IPA_AGGR_FORCE_CLOSE);
+		ipahal_get_aggr_force_close_valmask(i, &valmask);
+		reg_write_coal_close.value = valmask.val;
+		reg_write_coal_close.value_mask = valmask.mask;
+		coal_cmd_pyld = ipahal_construct_imm_cmd(
+			IPA_IMM_CMD_REGISTER_WRITE,
+			&reg_write_coal_close, false);
+		if (!coal_cmd_pyld) {
+			IPAERR("failed to construct coal close IC\n");
+			goto end;
+		}
+		ipa3_init_imm_cmd_desc(&desc[num_cmd], coal_cmd_pyld);
+		++num_cmd;
+	}
+
+	if (ipa3_ctx->hdr_tbl_lcl) {
+		if (hdr_mem.size > IPA_MEM_PART(apps_hdr_size)) {
+			IPAERR("tbl too big needed %d avail %d\n", hdr_mem.size,
+				IPA_MEM_PART(apps_hdr_size));
+			goto end;
+		} else {
+			dma_cmd_hdr.is_read = false; /* write operation */
+			dma_cmd_hdr.skip_pipeline_clear = false;
+			dma_cmd_hdr.pipeline_clear_options = IPAHAL_HPS_CLEAR;
+			dma_cmd_hdr.system_addr = hdr_mem.phys_base;
+			dma_cmd_hdr.size = hdr_mem.size;
+			dma_cmd_hdr.local_addr =
+				ipa3_ctx->smem_restricted_bytes +
+				IPA_MEM_PART(apps_hdr_ofst);
+			hdr_cmd_pyld = ipahal_construct_imm_cmd(
+				IPA_IMM_CMD_DMA_SHARED_MEM,
+				&dma_cmd_hdr, false);
+			if (!hdr_cmd_pyld) {
+				IPAERR("fail construct dma_shared_mem cmd\n");
+				goto end;
+			}
+		}
+	} else {
+		if (hdr_mem.size > IPA_MEM_PART(apps_hdr_size_ddr)) {
+			IPAERR("tbl too big needed %d avail %d\n", hdr_mem.size,
+				IPA_MEM_PART(apps_hdr_size_ddr));
+			goto end;
+		} else {
+			hdr_init_cmd.hdr_table_addr = hdr_mem.phys_base;
+			hdr_cmd_pyld = ipahal_construct_imm_cmd(
+				IPA_IMM_CMD_HDR_INIT_SYSTEM,
+				&hdr_init_cmd, false);
+			if (!hdr_cmd_pyld) {
+				IPAERR("fail construct hdr_init_system cmd\n");
+				goto end;
+			}
+		}
+	}
+	ipa3_init_imm_cmd_desc(&desc[num_cmd], hdr_cmd_pyld);
+	++num_cmd;
+	IPA_DUMP_BUFF(hdr_mem.base, hdr_mem.phys_base, hdr_mem.size);
+
+	proc_ctx_size = IPA_MEM_PART(apps_hdr_proc_ctx_size);
+	proc_ctx_ofst = IPA_MEM_PART(apps_hdr_proc_ctx_ofst);
+	if (ipa3_ctx->hdr_proc_ctx_tbl_lcl) {
+		if (aligned_ctx_mem.size > proc_ctx_size) {
+			IPAERR("tbl too big needed %d avail %d\n",
+				aligned_ctx_mem.size,
+				proc_ctx_size);
+			goto end;
+		} else {
+			dma_cmd_ctx.is_read = false; /* Write operation */
+			dma_cmd_ctx.skip_pipeline_clear = false;
+			dma_cmd_ctx.pipeline_clear_options = IPAHAL_HPS_CLEAR;
+			dma_cmd_ctx.system_addr = aligned_ctx_mem.phys_base;
+			dma_cmd_ctx.size = aligned_ctx_mem.size;
+			dma_cmd_ctx.local_addr =
+				ipa3_ctx->smem_restricted_bytes +
+				proc_ctx_ofst;
+			ctx_cmd_pyld = ipahal_construct_imm_cmd(
+				IPA_IMM_CMD_DMA_SHARED_MEM,
+				&dma_cmd_ctx, false);
+			if (!ctx_cmd_pyld) {
+				IPAERR("fail construct dma_shared_mem cmd\n");
+				goto end;
+			}
+		}
+	} else {
+		proc_ctx_size_ddr = IPA_MEM_PART(apps_hdr_proc_ctx_size_ddr);
+		if (aligned_ctx_mem.size > proc_ctx_size_ddr) {
+			IPAERR("tbl too big, needed %d avail %d\n",
+				aligned_ctx_mem.size,
+				proc_ctx_size_ddr);
+			goto end;
+		} else {
+			reg_write_cmd.skip_pipeline_clear = false;
+			reg_write_cmd.pipeline_clear_options =
+				IPAHAL_HPS_CLEAR;
+			reg_write_cmd.offset =
+				ipahal_get_reg_ofst(
+				IPA_SYS_PKT_PROC_CNTXT_BASE);
+			reg_write_cmd.value = aligned_ctx_mem.phys_base;
+			reg_write_cmd.value_mask =
+				~(IPA_HDR_PROC_CTX_TABLE_ALIGNMENT_BYTE - 1);
+			ctx_cmd_pyld = ipahal_construct_imm_cmd(
+				IPA_IMM_CMD_REGISTER_WRITE,
+				&reg_write_cmd, false);
+			if (!ctx_cmd_pyld) {
+				IPAERR("fail construct register_write cmd\n");
+				goto end;
+			}
+		}
+	}
+	ipa3_init_imm_cmd_desc(&desc[num_cmd], ctx_cmd_pyld);
+	++num_cmd;
+	IPA_DUMP_BUFF(ctx_mem.base, ctx_mem.phys_base, ctx_mem.size);
+
+	if (ipa3_send_cmd(num_cmd, desc))
+		IPAERR("fail to send immediate command\n");
+	else
+		rc = 0;
+
+	if (ipa3_ctx->hdr_tbl_lcl) {
+		dma_free_coherent(ipa3_ctx->pdev, hdr_mem.size, hdr_mem.base,
+			hdr_mem.phys_base);
+	} else {
+		if (!rc) {
+			if (ipa3_ctx->hdr_mem.phys_base)
+				dma_free_coherent(ipa3_ctx->pdev,
+				ipa3_ctx->hdr_mem.size,
+				ipa3_ctx->hdr_mem.base,
+				ipa3_ctx->hdr_mem.phys_base);
+			ipa3_ctx->hdr_mem = hdr_mem;
+		}
+	}
+
+	if (ipa3_ctx->hdr_proc_ctx_tbl_lcl) {
+		dma_free_coherent(ipa3_ctx->pdev, ctx_mem.size, ctx_mem.base,
+			ctx_mem.phys_base);
+	} else {
+		if (!rc) {
+			if (ipa3_ctx->hdr_proc_ctx_mem.phys_base)
+				dma_free_coherent(ipa3_ctx->pdev,
+					ipa3_ctx->hdr_proc_ctx_mem.size,
+					ipa3_ctx->hdr_proc_ctx_mem.base,
+					ipa3_ctx->hdr_proc_ctx_mem.phys_base);
+			ipa3_ctx->hdr_proc_ctx_mem = ctx_mem;
+		}
+	}
+
+end:
+	if (coal_cmd_pyld)
+		ipahal_destroy_imm_cmd(coal_cmd_pyld);
+
+	if (ctx_cmd_pyld)
+		ipahal_destroy_imm_cmd(ctx_cmd_pyld);
+
+	if (hdr_cmd_pyld)
+		ipahal_destroy_imm_cmd(hdr_cmd_pyld);
+
+	return rc;
+}
+
+static int __ipa_add_hdr_proc_ctx(struct ipa_hdr_proc_ctx_add *proc_ctx,
+	bool add_ref_hdr, bool user_only)
+{
+	struct ipa3_hdr_entry *hdr_entry;
+	struct ipa3_hdr_proc_ctx_entry *entry;
+	struct ipa3_hdr_proc_ctx_offset_entry *offset;
+	u32 bin;
+	struct ipa3_hdr_proc_ctx_tbl *htbl = &ipa3_ctx->hdr_proc_ctx_tbl;
+	int id;
+	int needed_len;
+	int mem_size;
+
+	IPADBG_LOW("Add processing type %d hdr_hdl %d\n",
+		proc_ctx->type, proc_ctx->hdr_hdl);
+
+	if (!HDR_PROC_TYPE_IS_VALID(proc_ctx->type)) {
+		IPAERR_RL("invalid processing type %d\n", proc_ctx->type);
+		return -EINVAL;
+	}
+
+	hdr_entry = ipa3_id_find(proc_ctx->hdr_hdl);
+	if (!hdr_entry) {
+		IPAERR_RL("hdr_hdl is invalid\n");
+		return -EINVAL;
+	}
+	if (hdr_entry->cookie != IPA_HDR_COOKIE) {
+		IPAERR_RL("Invalid header cookie %u\n", hdr_entry->cookie);
+		WARN_ON_RATELIMIT_IPA(1);
+		return -EINVAL;
+	}
+	IPADBG("Associated header is name=%s is_hdr_proc_ctx=%d\n",
+		hdr_entry->name, hdr_entry->is_hdr_proc_ctx);
+
+	entry = kmem_cache_zalloc(ipa3_ctx->hdr_proc_ctx_cache, GFP_KERNEL);
+	if (!entry) {
+		IPAERR("failed to alloc proc_ctx object\n");
+		return -ENOMEM;
+	}
+
+	INIT_LIST_HEAD(&entry->link);
+
+	entry->type = proc_ctx->type;
+	entry->hdr = hdr_entry;
+	entry->l2tp_params = proc_ctx->l2tp_params;
+	entry->generic_params = proc_ctx->generic_params;
+	if (add_ref_hdr)
+		hdr_entry->ref_cnt++;
+	entry->cookie = IPA_PROC_HDR_COOKIE;
+	entry->ipacm_installed = user_only;
+
+	needed_len = ipahal_get_proc_ctx_needed_len(proc_ctx->type);
+	if ((needed_len < 0) ||
+		((needed_len > ipa_hdr_proc_ctx_bin_sz[IPA_HDR_PROC_CTX_BIN0])
+			&&
+			(needed_len >
+			ipa_hdr_proc_ctx_bin_sz[IPA_HDR_PROC_CTX_BIN1]))) {
+		IPAERR_RL("unexpected needed len %d\n", needed_len);
+		WARN_ON_RATELIMIT_IPA(1);
+		goto bad_len;
+	}
+
+	if (needed_len <= ipa_hdr_proc_ctx_bin_sz[IPA_HDR_PROC_CTX_BIN0])
+		bin = IPA_HDR_PROC_CTX_BIN0;
+	else
+		bin = IPA_HDR_PROC_CTX_BIN1;
+
+	mem_size = (ipa3_ctx->hdr_proc_ctx_tbl_lcl) ?
+		IPA_MEM_PART(apps_hdr_proc_ctx_size) :
+		IPA_MEM_PART(apps_hdr_proc_ctx_size_ddr);
+	if (list_empty(&htbl->head_free_offset_list[bin])) {
+		if (htbl->end + ipa_hdr_proc_ctx_bin_sz[bin] > mem_size) {
+			IPAERR_RL("hdr proc ctx table overflow\n");
+			goto bad_len;
+		}
+
+		offset = kmem_cache_zalloc(ipa3_ctx->hdr_proc_ctx_offset_cache,
+					   GFP_KERNEL);
+		if (!offset) {
+			IPAERR("failed to alloc offset object\n");
+			goto bad_len;
+		}
+		INIT_LIST_HEAD(&offset->link);
+		/*
+		 * for a first item grow, set the bin and offset which are set
+		 * in stone
+		 */
+		offset->offset = htbl->end;
+		offset->bin = bin;
+		offset->ipacm_installed = user_only;
+		htbl->end += ipa_hdr_proc_ctx_bin_sz[bin];
+		list_add(&offset->link,
+				&htbl->head_offset_list[bin]);
+	} else {
+		/* get the first free slot */
+		offset =
+		    list_first_entry(&htbl->head_free_offset_list[bin],
+				struct ipa3_hdr_proc_ctx_offset_entry, link);
+		offset->ipacm_installed = user_only;
+		list_move(&offset->link, &htbl->head_offset_list[bin]);
+	}
+
+	entry->offset_entry = offset;
+	list_add(&entry->link, &htbl->head_proc_ctx_entry_list);
+	htbl->proc_ctx_cnt++;
+	IPADBG("add proc ctx of sz=%d cnt=%d ofst=%d\n", needed_len,
+			htbl->proc_ctx_cnt, offset->offset);
+
+	id = ipa3_id_alloc(entry);
+	if (id < 0) {
+		IPAERR_RL("failed to alloc id\n");
+		WARN_ON_RATELIMIT_IPA(1);
+		goto ipa_insert_failed;
+	}
+	entry->id = id;
+	proc_ctx->proc_ctx_hdl = id;
+	entry->ref_cnt++;
+
+	return 0;
+
+ipa_insert_failed:
+	list_move(&offset->link,
+		&htbl->head_free_offset_list[offset->bin]);
+	entry->offset_entry = NULL;
+	list_del(&entry->link);
+	htbl->proc_ctx_cnt--;
+
+bad_len:
+	if (add_ref_hdr)
+		hdr_entry->ref_cnt--;
+	entry->cookie = 0;
+	kmem_cache_free(ipa3_ctx->hdr_proc_ctx_cache, entry);
+	return -EPERM;
+}
+
+
+static int __ipa_add_hdr(struct ipa_hdr_add *hdr, bool user)
+{
+	struct ipa3_hdr_entry *entry;
+	struct ipa_hdr_offset_entry *offset = NULL;
+	u32 bin;
+	struct ipa3_hdr_tbl *htbl = &ipa3_ctx->hdr_tbl;
+	int id;
+	int mem_size;
+
+	if (hdr->hdr_len == 0 || hdr->hdr_len > IPA_HDR_MAX_SIZE) {
+		IPAERR_RL("bad param\n");
+		goto error;
+	}
+
+	if (!HDR_TYPE_IS_VALID(hdr->type)) {
+		IPAERR_RL("invalid hdr type %d\n", hdr->type);
+		goto error;
+	}
+
+	entry = kmem_cache_zalloc(ipa3_ctx->hdr_cache, GFP_KERNEL);
+	if (!entry)
+		goto error;
+
+	INIT_LIST_HEAD(&entry->link);
+
+	memcpy(entry->hdr, hdr->hdr, hdr->hdr_len);
+	entry->hdr_len = hdr->hdr_len;
+	strlcpy(entry->name, hdr->name, IPA_RESOURCE_NAME_MAX);
+	entry->is_partial = hdr->is_partial;
+	entry->type = hdr->type;
+	entry->is_eth2_ofst_valid = hdr->is_eth2_ofst_valid;
+	entry->eth2_ofst = hdr->eth2_ofst;
+	entry->cookie = IPA_HDR_COOKIE;
+	entry->ipacm_installed = user;
+
+	if (hdr->hdr_len <= ipa_hdr_bin_sz[IPA_HDR_BIN0])
+		bin = IPA_HDR_BIN0;
+	else if (hdr->hdr_len <= ipa_hdr_bin_sz[IPA_HDR_BIN1])
+		bin = IPA_HDR_BIN1;
+	else if (hdr->hdr_len <= ipa_hdr_bin_sz[IPA_HDR_BIN2])
+		bin = IPA_HDR_BIN2;
+	else if (hdr->hdr_len <= ipa_hdr_bin_sz[IPA_HDR_BIN3])
+		bin = IPA_HDR_BIN3;
+	else if (hdr->hdr_len <= ipa_hdr_bin_sz[IPA_HDR_BIN4])
+		bin = IPA_HDR_BIN4;
+	else {
+		IPAERR_RL("unexpected hdr len %d\n", hdr->hdr_len);
+		goto bad_hdr_len;
+	}
+
+	mem_size = (ipa3_ctx->hdr_tbl_lcl) ? IPA_MEM_PART(apps_hdr_size) :
+		IPA_MEM_PART(apps_hdr_size_ddr);
+
+	if (list_empty(&htbl->head_free_offset_list[bin])) {
+		/* if header does not fit to table, place it in DDR */
+		if (htbl->end + ipa_hdr_bin_sz[bin] > mem_size) {
+			entry->is_hdr_proc_ctx = true;
+			entry->phys_base = dma_map_single(ipa3_ctx->pdev,
+				entry->hdr,
+				entry->hdr_len,
+				DMA_TO_DEVICE);
+			if (dma_mapping_error(ipa3_ctx->pdev,
+				entry->phys_base)) {
+				IPAERR("dma_map_single failure for entry\n");
+				goto fail_dma_mapping;
+			}
+		} else {
+			entry->is_hdr_proc_ctx = false;
+			offset = kmem_cache_zalloc(ipa3_ctx->hdr_offset_cache,
+						   GFP_KERNEL);
+			if (!offset) {
+				IPAERR("failed to alloc hdr offset object\n");
+				goto bad_hdr_len;
+			}
+			INIT_LIST_HEAD(&offset->link);
+			/*
+			 * for a first item grow, set the bin and offset which
+			 * are set in stone
+			 */
+			offset->offset = htbl->end;
+			offset->bin = bin;
+			htbl->end += ipa_hdr_bin_sz[bin];
+			list_add(&offset->link,
+					&htbl->head_offset_list[bin]);
+			entry->offset_entry = offset;
+			offset->ipacm_installed = user;
+		}
+	} else {
+		entry->is_hdr_proc_ctx = false;
+		/* get the first free slot */
+		offset = list_first_entry(&htbl->head_free_offset_list[bin],
+			struct ipa_hdr_offset_entry, link);
+		list_move(&offset->link, &htbl->head_offset_list[bin]);
+		entry->offset_entry = offset;
+		offset->ipacm_installed = user;
+	}
+
+	list_add(&entry->link, &htbl->head_hdr_entry_list);
+	htbl->hdr_cnt++;
+	if (entry->is_hdr_proc_ctx)
+		IPADBG("add hdr of sz=%d hdr_cnt=%d phys_base=%pa\n",
+			hdr->hdr_len,
+			htbl->hdr_cnt,
+			&entry->phys_base);
+	else
+		IPADBG("add hdr of sz=%d hdr_cnt=%d ofst=%d\n",
+			hdr->hdr_len,
+			htbl->hdr_cnt,
+			entry->offset_entry->offset);
+
+	id = ipa3_id_alloc(entry);
+	if (id < 0) {
+		IPAERR_RL("failed to alloc id\n");
+		WARN_ON_RATELIMIT_IPA(1);
+		goto ipa_insert_failed;
+	}
+	entry->id = id;
+	hdr->hdr_hdl = id;
+	entry->ref_cnt++;
+
+	if (entry->is_hdr_proc_ctx) {
+		struct ipa_hdr_proc_ctx_add proc_ctx;
+
+		IPADBG("adding processing context for header %s\n", hdr->name);
+		proc_ctx.type = IPA_HDR_PROC_NONE;
+		proc_ctx.hdr_hdl = id;
+		if (__ipa_add_hdr_proc_ctx(&proc_ctx, false, user)) {
+			IPAERR("failed to add hdr proc ctx\n");
+			goto fail_add_proc_ctx;
+		}
+		entry->proc_ctx = ipa3_id_find(proc_ctx.proc_ctx_hdl);
+	}
+
+	return 0;
+
+fail_add_proc_ctx:
+	entry->ref_cnt--;
+	hdr->hdr_hdl = 0;
+	ipa3_id_remove(id);
+ipa_insert_failed:
+	if (entry->is_hdr_proc_ctx) {
+		dma_unmap_single(ipa3_ctx->pdev, entry->phys_base,
+			entry->hdr_len, DMA_TO_DEVICE);
+	} else {
+		if (offset)
+			list_move(&offset->link,
+			&htbl->head_free_offset_list[offset->bin]);
+		entry->offset_entry = NULL;
+	}
+	htbl->hdr_cnt--;
+	list_del(&entry->link);
+
+fail_dma_mapping:
+	entry->is_hdr_proc_ctx = false;
+
+bad_hdr_len:
+	entry->cookie = 0;
+	kmem_cache_free(ipa3_ctx->hdr_cache, entry);
+error:
+	return -EPERM;
+}
+
+static int __ipa3_del_hdr_proc_ctx(u32 proc_ctx_hdl,
+	bool release_hdr, bool by_user)
+{
+	struct ipa3_hdr_proc_ctx_entry *entry;
+	struct ipa3_hdr_proc_ctx_tbl *htbl = &ipa3_ctx->hdr_proc_ctx_tbl;
+
+	entry = ipa3_id_find(proc_ctx_hdl);
+	if (!entry || (entry->cookie != IPA_PROC_HDR_COOKIE)) {
+		IPAERR_RL("bad param\n");
+		return -EINVAL;
+	}
+
+	IPADBG("del proc ctx cnt=%d ofst=%d\n",
+		htbl->proc_ctx_cnt, entry->offset_entry->offset);
+
+	if (by_user && entry->user_deleted) {
+		IPAERR_RL("proc_ctx already deleted by user\n");
+		return -EINVAL;
+	}
+
+	if (by_user)
+		entry->user_deleted = true;
+
+	if (--entry->ref_cnt) {
+		IPADBG("proc_ctx_hdl %x ref_cnt %d\n",
+			proc_ctx_hdl, entry->ref_cnt);
+		return 0;
+	}
+
+	if (release_hdr)
+		__ipa3_del_hdr(entry->hdr->id, false);
+
+	/* move the offset entry to appropriate free list */
+	list_move(&entry->offset_entry->link,
+		&htbl->head_free_offset_list[entry->offset_entry->bin]);
+	list_del(&entry->link);
+	htbl->proc_ctx_cnt--;
+	entry->cookie = 0;
+	kmem_cache_free(ipa3_ctx->hdr_proc_ctx_cache, entry);
+
+	/* remove the handle from the database */
+	ipa3_id_remove(proc_ctx_hdl);
+
+	return 0;
+}
+
+int __ipa3_del_hdr(u32 hdr_hdl, bool by_user)
+{
+	struct ipa3_hdr_entry *entry;
+	struct ipa3_hdr_tbl *htbl = &ipa3_ctx->hdr_tbl;
+
+	entry = ipa3_id_find(hdr_hdl);
+	if (entry == NULL) {
+		IPAERR_RL("lookup failed\n");
+		return -EINVAL;
+	}
+
+	if (entry->cookie != IPA_HDR_COOKIE) {
+		IPAERR_RL("bad parm\n");
+		return -EINVAL;
+	}
+
+	if (entry->is_hdr_proc_ctx)
+		IPADBG("del hdr of len=%d hdr_cnt=%d phys_base=%pa\n",
+			entry->hdr_len, htbl->hdr_cnt, &entry->phys_base);
+	else
+		IPADBG("del hdr of len=%d hdr_cnt=%d ofst=%d\n",
+			entry->hdr_len, htbl->hdr_cnt,
+			entry->offset_entry->offset);
+
+	if (by_user && entry->user_deleted) {
+		IPAERR_RL("proc_ctx already deleted by user\n");
+		return -EINVAL;
+	}
+
+	if (by_user) {
+		if (!strcmp(entry->name, IPA_LAN_RX_HDR_NAME)) {
+			IPADBG("Trying to delete hdr %s offset=%u\n",
+				entry->name, entry->offset_entry->offset);
+			if (!entry->offset_entry->offset) {
+				IPAERR_RL(
+				"User cannot delete default header\n");
+				return -EPERM;
+			}
+		}
+		entry->user_deleted = true;
+	}
+
+	if (--entry->ref_cnt) {
+		IPADBG("hdr_hdl %x ref_cnt %d\n", hdr_hdl, entry->ref_cnt);
+		return 0;
+	}
+
+	if (entry->is_hdr_proc_ctx) {
+		dma_unmap_single(ipa3_ctx->pdev,
+			entry->phys_base,
+			entry->hdr_len,
+			DMA_TO_DEVICE);
+		__ipa3_del_hdr_proc_ctx(entry->proc_ctx->id, false, false);
+	} else {
+		/* move the offset entry to appropriate free list */
+		list_move(&entry->offset_entry->link,
+			&htbl->head_free_offset_list[entry->offset_entry->bin]);
+	}
+	list_del(&entry->link);
+	htbl->hdr_cnt--;
+	entry->cookie = 0;
+	kmem_cache_free(ipa3_ctx->hdr_cache, entry);
+
+	/* remove the handle from the database */
+	ipa3_id_remove(hdr_hdl);
+
+	return 0;
+}
+
+/**
+ * ipa3_add_hdr() - add the specified headers to SW and optionally commit them
+ * to IPA HW
+ * @hdrs:	[inout] set of headers to add
+ *
+ * Returns:	0 on success, negative on failure
+ *
+ * Note:	Should not be called from atomic context
+ */
+int ipa3_add_hdr(struct ipa_ioc_add_hdr *hdrs)
+{
+	return ipa3_add_hdr_usr(hdrs, false);
+}
+
+/**
+ * ipa3_add_hdr_usr() - add the specified headers to SW
+ * and optionally commit them to IPA HW
+ * @hdrs:		[inout] set of headers to add
+ * @user_only:	[in] indicate installed from user
+ *
+ * Returns:	0 on success, negative on failure
+ *
+ * Note:	Should not be called from atomic context
+ */
+int ipa3_add_hdr_usr(struct ipa_ioc_add_hdr *hdrs, bool user_only)
+{
+	int i;
+	int result = -EFAULT;
+
+	if (hdrs == NULL || hdrs->num_hdrs == 0) {
+		IPAERR_RL("bad parm\n");
+		return -EINVAL;
+	}
+
+	mutex_lock(&ipa3_ctx->lock);
+	IPADBG("adding %d headers to IPA driver internal data struct\n",
+			hdrs->num_hdrs);
+	for (i = 0; i < hdrs->num_hdrs; i++) {
+		if (__ipa_add_hdr(&hdrs->hdr[i], user_only)) {
+			IPAERR_RL("failed to add hdr %d\n", i);
+			hdrs->hdr[i].status = -1;
+		} else {
+			hdrs->hdr[i].status = 0;
+		}
+	}
+
+	if (hdrs->commit) {
+		IPADBG("committing all headers to IPA core");
+		if (ipa3_ctx->ctrl->ipa3_commit_hdr()) {
+			result = -EPERM;
+			goto bail;
+		}
+	}
+	result = 0;
+bail:
+	mutex_unlock(&ipa3_ctx->lock);
+	return result;
+}
+
+/**
+ * ipa3_del_hdr_by_user() - Remove the specified headers
+ * from SW and optionally commit them to IPA HW
+ * @hdls:	[inout] set of headers to delete
+ * @by_user:	Operation requested by user?
+ *
+ * Returns:	0 on success, negative on failure
+ *
+ * Note:	Should not be called from atomic context
+ */
+int ipa3_del_hdr_by_user(struct ipa_ioc_del_hdr *hdls, bool by_user)
+{
+	int i;
+	int result = -EFAULT;
+
+	if (hdls == NULL || hdls->num_hdls == 0) {
+		IPAERR_RL("bad parm\n");
+		return -EINVAL;
+	}
+
+	mutex_lock(&ipa3_ctx->lock);
+	for (i = 0; i < hdls->num_hdls; i++) {
+		if (__ipa3_del_hdr(hdls->hdl[i].hdl, by_user)) {
+			IPAERR_RL("failed to del hdr %i\n", i);
+			hdls->hdl[i].status = -1;
+		} else {
+			hdls->hdl[i].status = 0;
+		}
+	}
+
+	if (hdls->commit) {
+		if (ipa3_ctx->ctrl->ipa3_commit_hdr()) {
+			result = -EPERM;
+			goto bail;
+		}
+	}
+	result = 0;
+bail:
+	mutex_unlock(&ipa3_ctx->lock);
+	return result;
+}
+
+/**
+ * ipa3_del_hdr() - Remove the specified headers from SW
+ * and optionally commit them to IPA HW
+ * @hdls:	[inout] set of headers to delete
+ *
+ * Returns:	0 on success, negative on failure
+ *
+ * Note:	Should not be called from atomic context
+ */
+int ipa3_del_hdr(struct ipa_ioc_del_hdr *hdls)
+{
+	return ipa3_del_hdr_by_user(hdls, false);
+}
+
+/**
+ * ipa3_add_hdr_proc_ctx() - add the specified headers to SW
+ * and optionally commit them to IPA HW
+ * @proc_ctxs:	[inout] set of processing context headers to add
+ * @user_only:	[in] indicate installed by user-space module
+ *
+ * Returns:	0 on success, negative on failure
+ *
+ * Note:	Should not be called from atomic context
+ */
+int ipa3_add_hdr_proc_ctx(struct ipa_ioc_add_hdr_proc_ctx *proc_ctxs,
+							bool user_only)
+{
+	int i;
+	int result = -EFAULT;
+
+	if (proc_ctxs == NULL || proc_ctxs->num_proc_ctxs == 0) {
+		IPAERR_RL("bad parm\n");
+		return -EINVAL;
+	}
+
+	mutex_lock(&ipa3_ctx->lock);
+	IPADBG("adding %d header processing contextes to IPA driver\n",
+			proc_ctxs->num_proc_ctxs);
+	for (i = 0; i < proc_ctxs->num_proc_ctxs; i++) {
+		if (__ipa_add_hdr_proc_ctx(&proc_ctxs->proc_ctx[i],
+				true, user_only)) {
+			IPAERR_RL("failed to add hdr proc ctx %d\n", i);
+			proc_ctxs->proc_ctx[i].status = -1;
+		} else {
+			proc_ctxs->proc_ctx[i].status = 0;
+		}
+	}
+
+	if (proc_ctxs->commit) {
+		IPADBG("committing all headers to IPA core");
+		if (ipa3_ctx->ctrl->ipa3_commit_hdr()) {
+			result = -EPERM;
+			goto bail;
+		}
+	}
+	result = 0;
+bail:
+	mutex_unlock(&ipa3_ctx->lock);
+	return result;
+}
+
+/**
+ * ipa3_del_hdr_proc_ctx_by_user() -
+ * Remove the specified processing context headers from SW and
+ * optionally commit them to IPA HW.
+ * @hdls:	[inout] set of processing context headers to delete
+ * @by_user:	Operation requested by user?
+ *
+ * Returns:	0 on success, negative on failure
+ *
+ * Note:	Should not be called from atomic context
+ */
+int ipa3_del_hdr_proc_ctx_by_user(struct ipa_ioc_del_hdr_proc_ctx *hdls,
+	bool by_user)
+{
+	int i;
+	int result;
+
+	if (hdls == NULL || hdls->num_hdls == 0) {
+		IPAERR_RL("bad parm\n");
+		return -EINVAL;
+	}
+
+	mutex_lock(&ipa3_ctx->lock);
+	for (i = 0; i < hdls->num_hdls; i++) {
+		if (__ipa3_del_hdr_proc_ctx(hdls->hdl[i].hdl, true, by_user)) {
+			IPAERR_RL("failed to del hdr %i\n", i);
+			hdls->hdl[i].status = -1;
+		} else {
+			hdls->hdl[i].status = 0;
+		}
+	}
+
+	if (hdls->commit) {
+		if (ipa3_ctx->ctrl->ipa3_commit_hdr()) {
+			result = -EPERM;
+			goto bail;
+		}
+	}
+	result = 0;
+bail:
+	mutex_unlock(&ipa3_ctx->lock);
+	return result;
+}
+
+/**
+ * ipa3_del_hdr_proc_ctx() -
+ * Remove the specified processing context headers from SW and
+ * optionally commit them to IPA HW.
+ * @hdls:	[inout] set of processing context headers to delete
+ *
+ * Returns:	0 on success, negative on failure
+ *
+ * Note:	Should not be called from atomic context
+ */
+int ipa3_del_hdr_proc_ctx(struct ipa_ioc_del_hdr_proc_ctx *hdls)
+{
+	return ipa3_del_hdr_proc_ctx_by_user(hdls, false);
+}
+
+/**
+ * ipa3_commit_hdr() - commit to IPA HW the current header table in SW
+ *
+ * Returns:	0 on success, negative on failure
+ *
+ * Note:	Should not be called from atomic context
+ */
+int ipa3_commit_hdr(void)
+{
+	int result = -EFAULT;
+
+	/*
+	 * issue a commit on the routing module since routing rules point to
+	 * header table entries
+	 */
+	if (ipa3_commit_rt(IPA_IP_v4))
+		return -EPERM;
+	if (ipa3_commit_rt(IPA_IP_v6))
+		return -EPERM;
+
+	mutex_lock(&ipa3_ctx->lock);
+	if (ipa3_ctx->ctrl->ipa3_commit_hdr()) {
+		result = -EPERM;
+		goto bail;
+	}
+	result = 0;
+bail:
+	mutex_unlock(&ipa3_ctx->lock);
+	return result;
+}
+
+/**
+ * ipa3_reset_hdr() - reset the current header table in SW (does not commit to
+ * HW)
+ *
+ * @user_only:	[in] indicate delete rules installed by userspace
+ * Returns:	0 on success, negative on failure
+ *
+ * Note:	Should not be called from atomic context
+ */
+int ipa3_reset_hdr(bool user_only)
+{
+	struct ipa3_hdr_entry *entry;
+	struct ipa3_hdr_entry *next;
+	struct ipa3_hdr_proc_ctx_entry *ctx_entry;
+	struct ipa3_hdr_proc_ctx_entry *ctx_next;
+	struct ipa_hdr_offset_entry *off_entry;
+	struct ipa_hdr_offset_entry *off_next;
+	struct ipa3_hdr_proc_ctx_offset_entry *ctx_off_entry;
+	struct ipa3_hdr_proc_ctx_offset_entry *ctx_off_next;
+	struct ipa3_hdr_tbl *htbl = &ipa3_ctx->hdr_tbl;
+	struct ipa3_hdr_proc_ctx_tbl *htbl_proc = &ipa3_ctx->hdr_proc_ctx_tbl;
+	int i;
+
+	/*
+	 * issue a reset on the routing module since routing rules point to
+	 * header table entries
+	 */
+	if (ipa3_reset_rt(IPA_IP_v4, user_only))
+		IPAERR_RL("fail to reset v4 rt\n");
+	if (ipa3_reset_rt(IPA_IP_v6, user_only))
+		IPAERR_RL("fail to reset v6 rt\n");
+
+	mutex_lock(&ipa3_ctx->lock);
+	IPADBG("reset hdr\n");
+	list_for_each_entry_safe(entry, next,
+			&ipa3_ctx->hdr_tbl.head_hdr_entry_list, link) {
+
+		/* do not remove the default header */
+		if (!strcmp(entry->name, IPA_LAN_RX_HDR_NAME)) {
+			IPADBG("Trying to remove hdr %s offset=%u\n",
+				entry->name, entry->offset_entry->offset);
+			if (!entry->offset_entry->offset) {
+				if (entry->is_hdr_proc_ctx) {
+					IPAERR("default header is proc ctx\n");
+					mutex_unlock(&ipa3_ctx->lock);
+					WARN_ON_RATELIMIT_IPA(1);
+					return -EFAULT;
+				}
+				IPADBG("skip default header\n");
+				continue;
+			}
+		}
+
+		if (ipa3_id_find(entry->id) == NULL) {
+			mutex_unlock(&ipa3_ctx->lock);
+			WARN_ON_RATELIMIT_IPA(1);
+			return -EFAULT;
+		}
+
+		if (!user_only || entry->ipacm_installed) {
+			if (entry->is_hdr_proc_ctx) {
+				dma_unmap_single(ipa3_ctx->pdev,
+					entry->phys_base,
+					entry->hdr_len,
+					DMA_TO_DEVICE);
+				entry->proc_ctx = NULL;
+			} else {
+				/* move the offset entry to free list */
+				entry->offset_entry->ipacm_installed = false;
+				list_move(&entry->offset_entry->link,
+				&htbl->head_free_offset_list[
+					entry->offset_entry->bin]);
+			}
+			list_del(&entry->link);
+			htbl->hdr_cnt--;
+			entry->ref_cnt = 0;
+			entry->cookie = 0;
+
+			/* remove the handle from the database */
+			ipa3_id_remove(entry->id);
+			kmem_cache_free(ipa3_ctx->hdr_cache, entry);
+		}
+	}
+
+	/* only clean up offset_list and free_offset_list on global reset */
+	if (!user_only) {
+		for (i = 0; i < IPA_HDR_BIN_MAX; i++) {
+			list_for_each_entry_safe(off_entry, off_next,
+					 &ipa3_ctx->hdr_tbl.head_offset_list[i],
+					 link) {
+				/**
+				 * do not remove the default exception
+				 * header which is at offset 0
+				 */
+				if (off_entry->offset == 0)
+					continue;
+				list_del(&off_entry->link);
+				kmem_cache_free(ipa3_ctx->hdr_offset_cache,
+					off_entry);
+			}
+			list_for_each_entry_safe(off_entry, off_next,
+				&ipa3_ctx->hdr_tbl.head_free_offset_list[i],
+				link) {
+				list_del(&off_entry->link);
+				kmem_cache_free(ipa3_ctx->hdr_offset_cache,
+					off_entry);
+			}
+		}
+		/* there is one header of size 8 */
+		ipa3_ctx->hdr_tbl.end = 8;
+		ipa3_ctx->hdr_tbl.hdr_cnt = 1;
+	}
+
+	IPADBG("reset hdr proc ctx\n");
+	list_for_each_entry_safe(
+		ctx_entry,
+		ctx_next,
+		&(htbl_proc->head_proc_ctx_entry_list),
+		link) {
+
+		if (ipa3_id_find(ctx_entry->id) == NULL) {
+			mutex_unlock(&ipa3_ctx->lock);
+			WARN_ON_RATELIMIT_IPA(1);
+			return -EFAULT;
+		}
+
+		if (!user_only ||
+				ctx_entry->ipacm_installed) {
+			/* move the offset entry to appropriate free list */
+			list_move(&ctx_entry->offset_entry->link,
+				&htbl_proc->head_free_offset_list[
+					ctx_entry->offset_entry->bin]);
+			list_del(&ctx_entry->link);
+			htbl_proc->proc_ctx_cnt--;
+			ctx_entry->ref_cnt = 0;
+			ctx_entry->cookie = 0;
+
+			/* remove the handle from the database */
+			ipa3_id_remove(ctx_entry->id);
+			kmem_cache_free(ipa3_ctx->hdr_proc_ctx_cache,
+				ctx_entry);
+		}
+	}
+	/* only clean up offset_list and free_offset_list on global reset */
+	if (!user_only) {
+		for (i = 0; i < IPA_HDR_PROC_CTX_BIN_MAX; i++) {
+			list_for_each_entry_safe(ctx_off_entry, ctx_off_next,
+				&(htbl_proc->head_offset_list[i]), link) {
+				list_del(&ctx_off_entry->link);
+				kmem_cache_free(
+					ipa3_ctx->hdr_proc_ctx_offset_cache,
+					ctx_off_entry);
+			}
+			list_for_each_entry_safe(ctx_off_entry, ctx_off_next,
+				&(htbl_proc->head_free_offset_list[i]), link) {
+				list_del(&ctx_off_entry->link);
+				kmem_cache_free(
+					ipa3_ctx->hdr_proc_ctx_offset_cache,
+					ctx_off_entry);
+			}
+		}
+		htbl_proc->end = 0;
+		htbl_proc->proc_ctx_cnt = 0;
+	}
+
+	/* commit the change to IPA-HW */
+	if (ipa3_ctx->ctrl->ipa3_commit_hdr()) {
+		IPAERR("fail to commit hdr\n");
+		WARN_ON_RATELIMIT_IPA(1);
+		mutex_unlock(&ipa3_ctx->lock);
+		return -EFAULT;
+	}
+
+	mutex_unlock(&ipa3_ctx->lock);
+	return 0;
+}
+
+static struct ipa3_hdr_entry *__ipa_find_hdr(const char *name)
+{
+	struct ipa3_hdr_entry *entry;
+
+	if (strnlen(name, IPA_RESOURCE_NAME_MAX) == IPA_RESOURCE_NAME_MAX) {
+		IPAERR_RL("Header name too long: %s\n", name);
+		return NULL;
+	}
+
+	list_for_each_entry(entry, &ipa3_ctx->hdr_tbl.head_hdr_entry_list,
+			link) {
+		if (!strcmp(name, entry->name))
+			return entry;
+	}
+
+	return NULL;
+}
+
+/**
+ * ipa3_get_hdr() - Lookup the specified header resource
+ * @lookup:	[inout] header to lookup and its handle
+ *
+ * lookup the specified header resource and return handle if it exists
+ *
+ * Returns:	0 on success, negative on failure
+ *
+ * Note:	Should not be called from atomic context
+ *		Caller should call ipa3_put_hdr later if this function succeeds
+ */
+int ipa3_get_hdr(struct ipa_ioc_get_hdr *lookup)
+{
+	struct ipa3_hdr_entry *entry;
+	int result = -1;
+
+	if (lookup == NULL) {
+		IPAERR_RL("bad parm\n");
+		return -EINVAL;
+	}
+	mutex_lock(&ipa3_ctx->lock);
+	lookup->name[IPA_RESOURCE_NAME_MAX-1] = '\0';
+	entry = __ipa_find_hdr(lookup->name);
+	if (entry) {
+		lookup->hdl = entry->id;
+		result = 0;
+	}
+	mutex_unlock(&ipa3_ctx->lock);
+
+	return result;
+}
+
+/**
+ * __ipa3_release_hdr() - drop reference to header and cause
+ * deletion if reference count permits
+ * @hdr_hdl:	[in] handle of header to be released
+ *
+ * Returns:	0 on success, negative on failure
+ */
+int __ipa3_release_hdr(u32 hdr_hdl)
+{
+	int result = 0;
+
+	if (__ipa3_del_hdr(hdr_hdl, false)) {
+		IPADBG("fail to del hdr %x\n", hdr_hdl);
+		result = -EFAULT;
+		goto bail;
+	}
+
+	/* commit for put */
+	if (ipa3_ctx->ctrl->ipa3_commit_hdr()) {
+		IPAERR("fail to commit hdr\n");
+		result = -EFAULT;
+		goto bail;
+	}
+
+bail:
+	return result;
+}
+
+/**
+ * __ipa3_release_hdr_proc_ctx() - drop reference to processing context
+ *  and cause deletion if reference count permits
+ * @proc_ctx_hdl:	[in] handle of processing context to be released
+ *
+ * Returns:	0 on success, negative on failure
+ */
+int __ipa3_release_hdr_proc_ctx(u32 proc_ctx_hdl)
+{
+	int result = 0;
+
+	if (__ipa3_del_hdr_proc_ctx(proc_ctx_hdl, true, false)) {
+		IPADBG("fail to del hdr %x\n", proc_ctx_hdl);
+		result = -EFAULT;
+		goto bail;
+	}
+
+	/* commit for put */
+	if (ipa3_ctx->ctrl->ipa3_commit_hdr()) {
+		IPAERR("fail to commit hdr\n");
+		result = -EFAULT;
+		goto bail;
+	}
+
+bail:
+	return result;
+}
+
+/**
+ * ipa3_put_hdr() - Release the specified header handle
+ * @hdr_hdl:	[in] the header handle to release
+ *
+ * Returns:	0 on success, negative on failure
+ *
+ * Note:	Should not be called from atomic context
+ */
+int ipa3_put_hdr(u32 hdr_hdl)
+{
+	struct ipa3_hdr_entry *entry;
+	int result = -EFAULT;
+
+	mutex_lock(&ipa3_ctx->lock);
+
+	entry = ipa3_id_find(hdr_hdl);
+	if (entry == NULL) {
+		IPAERR_RL("lookup failed\n");
+		result = -EINVAL;
+		goto bail;
+	}
+
+	if (entry->cookie != IPA_HDR_COOKIE) {
+		IPAERR_RL("invalid header entry\n");
+		result = -EINVAL;
+		goto bail;
+	}
+
+	result = 0;
+bail:
+	mutex_unlock(&ipa3_ctx->lock);
+	return result;
+}
+
+/**
+ * ipa3_copy_hdr() - Lookup the specified header resource and return a copy of
+ * it
+ * @copy:	[inout] header to lookup and its copy
+ *
+ * lookup the specified header resource and return a copy of it (along with its
+ * attributes) if it exists, this would be called for partial headers
+ *
+ * Returns:	0 on success, negative on failure
+ *
+ * Note:	Should not be called from atomic context
+ */
+int ipa3_copy_hdr(struct ipa_ioc_copy_hdr *copy)
+{
+	struct ipa3_hdr_entry *entry;
+	int result = -EFAULT;
+
+	if (copy == NULL) {
+		IPAERR_RL("bad parm\n");
+		return -EINVAL;
+	}
+	mutex_lock(&ipa3_ctx->lock);
+	copy->name[IPA_RESOURCE_NAME_MAX-1] = '\0';
+	entry = __ipa_find_hdr(copy->name);
+	if (entry) {
+		memcpy(copy->hdr, entry->hdr, entry->hdr_len);
+		copy->hdr_len = entry->hdr_len;
+		copy->type = entry->type;
+		copy->is_partial = entry->is_partial;
+		copy->is_eth2_ofst_valid = entry->is_eth2_ofst_valid;
+		copy->eth2_ofst = entry->eth2_ofst;
+		result = 0;
+	}
+	mutex_unlock(&ipa3_ctx->lock);
+
+	return result;
+}

+ 2288 - 0
ipa/ipa_v3/ipa_hw_stats.c

@@ -0,0 +1,2288 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2017-2019, The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/debugfs.h>
+#include <linux/kernel.h>
+#include <linux/delay.h>
+#include "ipa_i.h"
+#include "ipahal/ipahal.h"
+#include "ipahal/ipahal_hw_stats.h"
+
+#define IPA_CLIENT_BIT_32(client) \
+	((ipa3_get_ep_mapping(client) >= 0 && \
+		ipa3_get_ep_mapping(client) < IPA_STATS_MAX_PIPE_BIT) ? \
+		(1 << ipa3_get_ep_mapping(client)) : 0)
+
+int ipa_hw_stats_init(void)
+{
+	int ret = 0, ep_index;
+	struct ipa_teth_stats_endpoints *teth_stats_init;
+
+	if (ipa3_ctx->ipa_hw_type < IPA_HW_v4_0)
+		return 0;
+
+	/* initialize stats here */
+	ipa3_ctx->hw_stats.enabled = true;
+
+	teth_stats_init = kzalloc(sizeof(*teth_stats_init), GFP_KERNEL);
+	if (!teth_stats_init) {
+		IPAERR("mem allocated failed!\n");
+		return -ENOMEM;
+	}
+	/* enable prod mask */
+	if (ipa3_ctx->platform_type == IPA_PLAT_TYPE_APQ) {
+		teth_stats_init->prod_mask = (
+			IPA_CLIENT_BIT_32(IPA_CLIENT_MHI_PRIME_TETH_PROD) |
+			IPA_CLIENT_BIT_32(IPA_CLIENT_USB_PROD));
+		if (ipa3_ctx->ipa_hw_type == IPA_HW_v4_5)
+			teth_stats_init->prod_mask |=
+			IPA_CLIENT_BIT_32(IPA_CLIENT_WLAN2_PROD);
+		else
+			teth_stats_init->prod_mask |=
+			IPA_CLIENT_BIT_32(IPA_CLIENT_WLAN1_PROD);
+
+		teth_stats_init->prod_mask |=
+			IPA_CLIENT_BIT_32(IPA_CLIENT_WIGIG_PROD);
+
+		if (IPA_CLIENT_BIT_32(IPA_CLIENT_MHI_PRIME_TETH_PROD)) {
+			ep_index = ipa3_get_ep_mapping(
+				IPA_CLIENT_MHI_PRIME_TETH_PROD);
+			if (ep_index == -1) {
+				IPAERR("Invalid client.\n");
+				kfree(teth_stats_init);
+				return -EINVAL;
+			}
+			teth_stats_init->dst_ep_mask[ep_index] =
+				IPA_CLIENT_BIT_32(IPA_CLIENT_USB_CONS);
+
+			if (ipa3_ctx->ipa_hw_type == IPA_HW_v4_5)
+				teth_stats_init->dst_ep_mask[ep_index] |=
+				IPA_CLIENT_BIT_32(IPA_CLIENT_WLAN2_CONS);
+			else
+				teth_stats_init->dst_ep_mask[ep_index] |=
+				IPA_CLIENT_BIT_32(IPA_CLIENT_WLAN1_CONS);
+
+			teth_stats_init->dst_ep_mask[ep_index] |=
+				IPA_CLIENT_BIT_32(IPA_CLIENT_WIGIG1_CONS);
+			teth_stats_init->dst_ep_mask[ep_index] |=
+				IPA_CLIENT_BIT_32(IPA_CLIENT_WIGIG2_CONS);
+			teth_stats_init->dst_ep_mask[ep_index] |=
+				IPA_CLIENT_BIT_32(IPA_CLIENT_WIGIG3_CONS);
+			teth_stats_init->dst_ep_mask[ep_index] |=
+				IPA_CLIENT_BIT_32(IPA_CLIENT_WIGIG4_CONS);
+		}
+	} else {
+		teth_stats_init->prod_mask = (
+			IPA_CLIENT_BIT_32(IPA_CLIENT_Q6_WAN_PROD) |
+			IPA_CLIENT_BIT_32(IPA_CLIENT_USB_PROD));
+
+		if (ipa3_ctx->ipa_hw_type == IPA_HW_v4_5)
+			teth_stats_init->prod_mask |=
+			IPA_CLIENT_BIT_32(IPA_CLIENT_WLAN2_PROD);
+		else
+			teth_stats_init->prod_mask |=
+			IPA_CLIENT_BIT_32(IPA_CLIENT_WLAN1_PROD);
+
+		teth_stats_init->prod_mask |=
+			IPA_CLIENT_BIT_32(IPA_CLIENT_WIGIG_PROD);
+
+		if (ipa3_ctx->ipa_hw_type >= IPA_HW_v4_5)
+			teth_stats_init->prod_mask |=
+			IPA_CLIENT_BIT_32(IPA_CLIENT_Q6_DL_NLO_DATA_PROD);
+
+		if (IPA_CLIENT_BIT_32(IPA_CLIENT_Q6_WAN_PROD)) {
+			ep_index = ipa3_get_ep_mapping(IPA_CLIENT_Q6_WAN_PROD);
+			if (ep_index == -1) {
+				IPAERR("Invalid client.\n");
+				kfree(teth_stats_init);
+				return -EINVAL;
+			}
+			teth_stats_init->dst_ep_mask[ep_index] =
+			IPA_CLIENT_BIT_32(IPA_CLIENT_USB_CONS);
+
+			if (ipa3_ctx->ipa_hw_type == IPA_HW_v4_5)
+				teth_stats_init->dst_ep_mask[ep_index] |=
+				IPA_CLIENT_BIT_32(IPA_CLIENT_WLAN2_CONS);
+			else
+				teth_stats_init->dst_ep_mask[ep_index] |=
+				IPA_CLIENT_BIT_32(IPA_CLIENT_WLAN1_CONS);
+
+			teth_stats_init->dst_ep_mask[ep_index] |=
+				IPA_CLIENT_BIT_32(IPA_CLIENT_WIGIG1_CONS);
+			teth_stats_init->dst_ep_mask[ep_index] |=
+				IPA_CLIENT_BIT_32(IPA_CLIENT_WIGIG2_CONS);
+			teth_stats_init->dst_ep_mask[ep_index] |=
+				IPA_CLIENT_BIT_32(IPA_CLIENT_WIGIG3_CONS);
+			teth_stats_init->dst_ep_mask[ep_index] |=
+				IPA_CLIENT_BIT_32(IPA_CLIENT_WIGIG4_CONS);
+		}
+
+		if (IPA_CLIENT_BIT_32(IPA_CLIENT_Q6_DL_NLO_DATA_PROD) &&
+			(ipa3_ctx->ipa_hw_type >= IPA_HW_v4_5)) {
+			ep_index = ipa3_get_ep_mapping(
+					IPA_CLIENT_Q6_DL_NLO_DATA_PROD);
+			if (ep_index == -1) {
+				IPAERR("Invalid client.\n");
+				kfree(teth_stats_init);
+				return -EINVAL;
+			}
+			teth_stats_init->dst_ep_mask[ep_index] =
+				IPA_CLIENT_BIT_32(IPA_CLIENT_USB_CONS);
+
+			if (ipa3_ctx->ipa_hw_type == IPA_HW_v4_5)
+				teth_stats_init->dst_ep_mask[ep_index] |=
+				IPA_CLIENT_BIT_32(IPA_CLIENT_WLAN2_CONS);
+			else
+				teth_stats_init->dst_ep_mask[ep_index] |=
+				IPA_CLIENT_BIT_32(IPA_CLIENT_WLAN1_CONS);
+
+			teth_stats_init->dst_ep_mask[ep_index] |=
+				IPA_CLIENT_BIT_32(IPA_CLIENT_WIGIG1_CONS);
+			teth_stats_init->dst_ep_mask[ep_index] |=
+				IPA_CLIENT_BIT_32(IPA_CLIENT_WIGIG2_CONS);
+			teth_stats_init->dst_ep_mask[ep_index] |=
+				IPA_CLIENT_BIT_32(IPA_CLIENT_WIGIG3_CONS);
+			teth_stats_init->dst_ep_mask[ep_index] |=
+				IPA_CLIENT_BIT_32(IPA_CLIENT_WIGIG4_CONS);
+		}
+	}
+
+	if (IPA_CLIENT_BIT_32(IPA_CLIENT_USB_PROD)) {
+		ep_index = ipa3_get_ep_mapping(IPA_CLIENT_USB_PROD);
+		if (ep_index == -1) {
+			IPAERR("Invalid client.\n");
+			kfree(teth_stats_init);
+			return -EINVAL;
+		}
+		/* enable addtional pipe monitoring for pcie modem */
+		if (ipa3_ctx->platform_type == IPA_PLAT_TYPE_APQ)
+			teth_stats_init->dst_ep_mask[ep_index] =
+				(IPA_CLIENT_BIT_32(
+					IPA_CLIENT_Q6_WAN_CONS) |
+				IPA_CLIENT_BIT_32(
+					IPA_CLIENT_MHI_PRIME_TETH_CONS));
+		else if (ipa3_ctx->ipa_hw_type >= IPA_HW_v4_5)
+			teth_stats_init->dst_ep_mask[ep_index] =
+				(IPA_CLIENT_BIT_32(IPA_CLIENT_Q6_WAN_CONS) |
+			IPA_CLIENT_BIT_32(IPA_CLIENT_Q6_UL_NLO_DATA_CONS));
+		else
+			teth_stats_init->dst_ep_mask[ep_index] =
+				IPA_CLIENT_BIT_32(IPA_CLIENT_Q6_WAN_CONS);
+	}
+
+	if (IPA_CLIENT_BIT_32(IPA_CLIENT_WLAN1_PROD)) {
+		ep_index = ipa3_get_ep_mapping(IPA_CLIENT_WLAN1_PROD);
+		if (ep_index == -1) {
+			IPAERR("Invalid client.\n");
+			kfree(teth_stats_init);
+			return -EINVAL;
+		}
+		/* enable additional pipe monitoring for pcie modem*/
+		if (ipa3_ctx->platform_type == IPA_PLAT_TYPE_APQ)
+			teth_stats_init->dst_ep_mask[ep_index] =
+				(IPA_CLIENT_BIT_32(IPA_CLIENT_Q6_WAN_CONS) |
+				IPA_CLIENT_BIT_32(
+					IPA_CLIENT_MHI_PRIME_TETH_CONS));
+		else if (ipa3_ctx->ipa_hw_type >= IPA_HW_v4_5)
+			teth_stats_init->dst_ep_mask[ep_index] =
+				(IPA_CLIENT_BIT_32(IPA_CLIENT_Q6_WAN_CONS) |
+			IPA_CLIENT_BIT_32(IPA_CLIENT_Q6_UL_NLO_DATA_CONS));
+		else
+			teth_stats_init->dst_ep_mask[ep_index] =
+				IPA_CLIENT_BIT_32(IPA_CLIENT_Q6_WAN_CONS);
+	}
+
+	if (IPA_CLIENT_BIT_32(IPA_CLIENT_WLAN2_PROD)) {
+		ep_index = ipa3_get_ep_mapping(IPA_CLIENT_WLAN2_PROD);
+		if (ep_index == -1) {
+			IPAERR("Invalid client.\n");
+			kfree(teth_stats_init);
+			return -EINVAL;
+		}
+		/* enable additional pipe monitoring for pcie modem*/
+		if (ipa3_ctx->platform_type == IPA_PLAT_TYPE_APQ)
+			teth_stats_init->dst_ep_mask[ep_index] =
+				(IPA_CLIENT_BIT_32(IPA_CLIENT_Q6_WAN_CONS) |
+				IPA_CLIENT_BIT_32(
+					IPA_CLIENT_MHI_PRIME_TETH_CONS));
+		else if (ipa3_ctx->ipa_hw_type >= IPA_HW_v4_5)
+			teth_stats_init->dst_ep_mask[ep_index] =
+				(IPA_CLIENT_BIT_32(IPA_CLIENT_Q6_WAN_CONS) |
+			IPA_CLIENT_BIT_32(IPA_CLIENT_Q6_UL_NLO_DATA_CONS));
+		else
+			teth_stats_init->dst_ep_mask[ep_index] =
+				IPA_CLIENT_BIT_32(IPA_CLIENT_Q6_WAN_CONS);
+	}
+
+	if (IPA_CLIENT_BIT_32(IPA_CLIENT_WIGIG_PROD)) {
+		ep_index = ipa3_get_ep_mapping(IPA_CLIENT_WIGIG_PROD);
+		if (ep_index == -1) {
+			IPAERR("Invalid client.\n");
+			kfree(teth_stats_init);
+			return -EINVAL;
+		}
+		/* enable additional pipe monitoring for pcie modem */
+		if (ipa3_ctx->platform_type == IPA_PLAT_TYPE_APQ)
+			teth_stats_init->dst_ep_mask[ep_index] =
+			(IPA_CLIENT_BIT_32(
+				IPA_CLIENT_Q6_WAN_CONS) |
+				IPA_CLIENT_BIT_32(
+					IPA_CLIENT_MHI_PRIME_TETH_CONS));
+		else if (ipa3_ctx->ipa_hw_type >= IPA_HW_v4_5)
+			teth_stats_init->dst_ep_mask[ep_index] =
+			(IPA_CLIENT_BIT_32(IPA_CLIENT_Q6_WAN_CONS) |
+			IPA_CLIENT_BIT_32(IPA_CLIENT_Q6_UL_NLO_DATA_CONS));
+		else
+			teth_stats_init->dst_ep_mask[ep_index] =
+			IPA_CLIENT_BIT_32(IPA_CLIENT_Q6_WAN_CONS);
+	}
+
+
+	ret = ipa_init_teth_stats(teth_stats_init);
+	if (ret != 0)
+		IPAERR("init teth stats fails\n");
+	kfree(teth_stats_init);
+	if (ipa3_ctx->ipa_hw_type >= IPA_HW_v4_5) {
+		ret = ipa_init_flt_rt_stats();
+		if (ret != 0)
+			IPAERR("init flt rt stats fails\n");
+	}
+	return ret;
+}
+
+static void ipa_close_coal_frame(struct ipahal_imm_cmd_pyld **coal_cmd_pyld)
+{
+	int i;
+	struct ipahal_reg_valmask valmask;
+	struct ipahal_imm_cmd_register_write reg_write_coal_close;
+
+	i = ipa3_get_ep_mapping(IPA_CLIENT_APPS_WAN_COAL_CONS);
+	reg_write_coal_close.skip_pipeline_clear = false;
+	reg_write_coal_close.pipeline_clear_options = IPAHAL_HPS_CLEAR;
+	reg_write_coal_close.offset = ipahal_get_reg_ofst(
+		IPA_AGGR_FORCE_CLOSE);
+	ipahal_get_aggr_force_close_valmask(i, &valmask);
+	reg_write_coal_close.value = valmask.val;
+	reg_write_coal_close.value_mask = valmask.mask;
+	*coal_cmd_pyld = ipahal_construct_imm_cmd(
+		IPA_IMM_CMD_REGISTER_WRITE,
+		&reg_write_coal_close, false);
+}
+
+int ipa_init_quota_stats(u32 pipe_bitmask)
+{
+	struct ipahal_stats_init_pyld *pyld;
+	struct ipahal_imm_cmd_dma_shared_mem cmd = { 0 };
+	struct ipahal_imm_cmd_pyld *cmd_pyld;
+	struct ipahal_imm_cmd_register_write quota_base = {0};
+	struct ipahal_imm_cmd_pyld *quota_base_pyld;
+	struct ipahal_imm_cmd_register_write quota_mask = {0};
+	struct ipahal_imm_cmd_pyld *quota_mask_pyld;
+	struct ipahal_imm_cmd_pyld *coal_cmd_pyld = NULL;
+	struct ipa3_desc desc[4] = { {0} };
+	dma_addr_t dma_address;
+	int ret;
+	int num_cmd = 0;
+
+	if (!ipa3_ctx->hw_stats.enabled)
+		return 0;
+
+	/* reset driver's cache */
+	memset(&ipa3_ctx->hw_stats.quota, 0, sizeof(ipa3_ctx->hw_stats.quota));
+	ipa3_ctx->hw_stats.quota.init.enabled_bitmask = pipe_bitmask;
+	IPADBG_LOW("pipe_bitmask=0x%x\n", pipe_bitmask);
+
+	pyld = ipahal_stats_generate_init_pyld(IPAHAL_HW_STATS_QUOTA,
+		&ipa3_ctx->hw_stats.quota.init, false);
+	if (!pyld) {
+		IPAERR("failed to generate pyld\n");
+		return -EPERM;
+	}
+
+	if (pyld->len > IPA_MEM_PART(stats_quota_size)) {
+		IPAERR("SRAM partition too small: %d needed %d\n",
+			IPA_MEM_PART(stats_quota_size), pyld->len);
+		ret = -EPERM;
+		goto destroy_init_pyld;
+	}
+
+	dma_address = dma_map_single(ipa3_ctx->pdev,
+		pyld->data,
+		pyld->len,
+		DMA_TO_DEVICE);
+	if (dma_mapping_error(ipa3_ctx->pdev, dma_address)) {
+		IPAERR("failed to DMA map\n");
+		ret = -EPERM;
+		goto destroy_init_pyld;
+	}
+
+	/* IC to close the coal frame before HPS Clear if coal is enabled */
+	if (ipa3_get_ep_mapping(IPA_CLIENT_APPS_WAN_COAL_CONS) !=
+		IPA_EP_NOT_ALLOCATED) {
+		ipa_close_coal_frame(&coal_cmd_pyld);
+		if (!coal_cmd_pyld) {
+			IPAERR("failed to construct coal close IC\n");
+			ret = -ENOMEM;
+			goto unmap;
+		}
+		ipa3_init_imm_cmd_desc(&desc[num_cmd], coal_cmd_pyld);
+		++num_cmd;
+	}
+
+	/* setting the registers and init the stats pyld are done atomically */
+	quota_mask.skip_pipeline_clear = false;
+	quota_mask.pipeline_clear_options = IPAHAL_FULL_PIPELINE_CLEAR;
+	quota_mask.offset = ipahal_get_reg_n_ofst(IPA_STAT_QUOTA_MASK_n,
+		ipa3_ctx->ee);
+	quota_mask.value = pipe_bitmask;
+	quota_mask.value_mask = ~0;
+	quota_mask_pyld = ipahal_construct_imm_cmd(IPA_IMM_CMD_REGISTER_WRITE,
+		&quota_mask, false);
+	if (!quota_mask_pyld) {
+		IPAERR("failed to construct register_write imm cmd\n");
+		ret = -ENOMEM;
+		goto destroy_coal_cmd;
+	}
+	desc[num_cmd].opcode = quota_mask_pyld->opcode;
+	desc[num_cmd].pyld = quota_mask_pyld->data;
+	desc[num_cmd].len = quota_mask_pyld->len;
+	desc[num_cmd].type = IPA_IMM_CMD_DESC;
+	num_cmd++;
+
+	quota_base.skip_pipeline_clear = false;
+	quota_base.pipeline_clear_options = IPAHAL_FULL_PIPELINE_CLEAR;
+	quota_base.offset = ipahal_get_reg_n_ofst(IPA_STAT_QUOTA_BASE_n,
+		ipa3_ctx->ee);
+	quota_base.value = ipa3_ctx->smem_restricted_bytes +
+		IPA_MEM_PART(stats_quota_ofst);
+	quota_base.value_mask = ~0;
+	quota_base_pyld = ipahal_construct_imm_cmd(IPA_IMM_CMD_REGISTER_WRITE,
+		&quota_base, false);
+	if (!quota_base_pyld) {
+		IPAERR("failed to construct register_write imm cmd\n");
+		ret = -ENOMEM;
+		goto destroy_quota_mask;
+	}
+	desc[num_cmd].opcode = quota_base_pyld->opcode;
+	desc[num_cmd].pyld = quota_base_pyld->data;
+	desc[num_cmd].len = quota_base_pyld->len;
+	desc[num_cmd].type = IPA_IMM_CMD_DESC;
+	num_cmd++;
+
+	cmd.is_read = false;
+	cmd.skip_pipeline_clear = false;
+	cmd.pipeline_clear_options = IPAHAL_FULL_PIPELINE_CLEAR;
+	cmd.size = pyld->len;
+	cmd.system_addr = dma_address;
+	cmd.local_addr = ipa3_ctx->smem_restricted_bytes +
+			IPA_MEM_PART(stats_quota_ofst);
+	cmd_pyld = ipahal_construct_imm_cmd(
+		IPA_IMM_CMD_DMA_SHARED_MEM, &cmd, false);
+	if (!cmd_pyld) {
+		IPAERR("failed to construct dma_shared_mem imm cmd\n");
+		ret = -ENOMEM;
+		goto destroy_quota_base;
+	}
+	desc[num_cmd].opcode = cmd_pyld->opcode;
+	desc[num_cmd].pyld = cmd_pyld->data;
+	desc[num_cmd].len = cmd_pyld->len;
+	desc[num_cmd].type = IPA_IMM_CMD_DESC;
+	num_cmd++;
+
+	ret = ipa3_send_cmd(num_cmd, desc);
+	if (ret) {
+		IPAERR("failed to send immediate command (error %d)\n", ret);
+		goto destroy_imm;
+	}
+
+	ret = 0;
+
+destroy_imm:
+	ipahal_destroy_imm_cmd(cmd_pyld);
+destroy_quota_base:
+	ipahal_destroy_imm_cmd(quota_base_pyld);
+destroy_quota_mask:
+	ipahal_destroy_imm_cmd(quota_mask_pyld);
+destroy_coal_cmd:
+	ipahal_destroy_imm_cmd(coal_cmd_pyld);
+unmap:
+	dma_unmap_single(ipa3_ctx->pdev, dma_address, pyld->len, DMA_TO_DEVICE);
+destroy_init_pyld:
+	ipahal_destroy_stats_init_pyld(pyld);
+	return ret;
+}
+
+int ipa_get_quota_stats(struct ipa_quota_stats_all *out)
+{
+	int i;
+	int ret;
+	struct ipahal_stats_get_offset_quota get_offset = { { 0 } };
+	struct ipahal_stats_offset offset = { 0 };
+	struct ipahal_imm_cmd_dma_shared_mem cmd = { 0 };
+	struct ipahal_imm_cmd_pyld *cmd_pyld[2];
+	struct ipa_mem_buffer mem;
+	struct ipa3_desc desc[2];
+	struct ipahal_stats_quota_all *stats;
+	int num_cmd = 0;
+
+	if (!ipa3_ctx->hw_stats.enabled)
+		return 0;
+
+	memset(desc, 0, sizeof(desc));
+	memset(cmd_pyld, 0, sizeof(cmd_pyld));
+
+	get_offset.init = ipa3_ctx->hw_stats.quota.init;
+	ret = ipahal_stats_get_offset(IPAHAL_HW_STATS_QUOTA, &get_offset,
+		&offset);
+	if (ret) {
+		IPAERR("failed to get offset from hal %d\n", ret);
+		return ret;
+	}
+
+	IPADBG_LOW("offset = %d size = %d\n", offset.offset, offset.size);
+
+	if (offset.size == 0)
+		return 0;
+
+	mem.size = offset.size;
+	mem.base = dma_alloc_coherent(ipa3_ctx->pdev,
+		mem.size,
+		&mem.phys_base,
+		GFP_KERNEL);
+	if (!mem.base) {
+		IPAERR("fail to alloc DMA memory");
+		return ret;
+	}
+
+	/* IC to close the coal frame before HPS Clear if coal is enabled */
+	if (ipa3_get_ep_mapping(IPA_CLIENT_APPS_WAN_COAL_CONS) !=
+		IPA_EP_NOT_ALLOCATED) {
+		ipa_close_coal_frame(&cmd_pyld[num_cmd]);
+		if (!cmd_pyld[num_cmd]) {
+			IPAERR("failed to construct coal close IC\n");
+			ret = -ENOMEM;
+			goto free_dma_mem;
+		}
+		ipa3_init_imm_cmd_desc(&desc[num_cmd], cmd_pyld[num_cmd]);
+		++num_cmd;
+	}
+
+	cmd.is_read = true;
+	cmd.clear_after_read = true;
+	cmd.skip_pipeline_clear = false;
+	cmd.pipeline_clear_options = IPAHAL_HPS_CLEAR;
+	cmd.size = mem.size;
+	cmd.system_addr = mem.phys_base;
+	cmd.local_addr = ipa3_ctx->smem_restricted_bytes +
+		IPA_MEM_PART(stats_quota_ofst) + offset.offset;
+	cmd_pyld[num_cmd] = ipahal_construct_imm_cmd(
+		IPA_IMM_CMD_DMA_SHARED_MEM, &cmd, false);
+	if (!cmd_pyld[num_cmd]) {
+		IPAERR("failed to construct dma_shared_mem imm cmd\n");
+		ret = -ENOMEM;
+		goto free_dma_mem;
+	}
+	ipa3_init_imm_cmd_desc(&desc[num_cmd], cmd_pyld[num_cmd]);
+	++num_cmd;
+
+	ret = ipa3_send_cmd(num_cmd, desc);
+	if (ret) {
+		IPAERR("failed to send immediate command (error %d)\n", ret);
+		goto destroy_imm;
+	}
+
+	stats = kzalloc(sizeof(*stats), GFP_KERNEL);
+	if (!stats) {
+		ret = -ENOMEM;
+		goto destroy_imm;
+	}
+
+	ret = ipahal_parse_stats(IPAHAL_HW_STATS_QUOTA,
+		&ipa3_ctx->hw_stats.quota.init, mem.base, stats);
+	if (ret) {
+		IPAERR("failed to parse stats (error %d)\n", ret);
+		goto free_stats;
+	}
+
+	/*
+	 * update driver cache.
+	 * the stats were read from hardware with clear_after_read meaning
+	 * hardware stats are 0 now
+	 */
+	for (i = 0; i < IPA_CLIENT_MAX; i++) {
+		int ep_idx = ipa3_get_ep_mapping(i);
+
+		if (ep_idx == -1 || ep_idx >= IPA3_MAX_NUM_PIPES)
+			continue;
+
+		if (ipa3_ctx->ep[ep_idx].client != i)
+			continue;
+
+		ipa3_ctx->hw_stats.quota.stats.client[i].num_ipv4_bytes +=
+			stats->stats[ep_idx].num_ipv4_bytes;
+		ipa3_ctx->hw_stats.quota.stats.client[i].num_ipv4_pkts +=
+			stats->stats[ep_idx].num_ipv4_pkts;
+		ipa3_ctx->hw_stats.quota.stats.client[i].num_ipv6_bytes +=
+			stats->stats[ep_idx].num_ipv6_bytes;
+		ipa3_ctx->hw_stats.quota.stats.client[i].num_ipv6_pkts +=
+			stats->stats[ep_idx].num_ipv6_pkts;
+	}
+
+	/* copy results to out parameter */
+	if (out)
+		*out = ipa3_ctx->hw_stats.quota.stats;
+	ret = 0;
+free_stats:
+	kfree(stats);
+destroy_imm:
+	for (i = 0; i < num_cmd; i++)
+		ipahal_destroy_imm_cmd(cmd_pyld[i]);
+free_dma_mem:
+	dma_free_coherent(ipa3_ctx->pdev, mem.size, mem.base, mem.phys_base);
+	return ret;
+
+}
+
+int ipa_reset_quota_stats(enum ipa_client_type client)
+{
+	int ret;
+	struct ipa_quota_stats *stats;
+
+	if (!ipa3_ctx->hw_stats.enabled)
+		return 0;
+
+	if (client >= IPA_CLIENT_MAX) {
+		IPAERR("invalid client %d\n", client);
+		return -EINVAL;
+	}
+
+	/* reading stats will reset them in hardware */
+	ret = ipa_get_quota_stats(NULL);
+	if (ret) {
+		IPAERR("ipa_get_quota_stats failed %d\n", ret);
+		return ret;
+	}
+
+	/* reset driver's cache */
+	stats = &ipa3_ctx->hw_stats.quota.stats.client[client];
+	memset(stats, 0, sizeof(*stats));
+	return 0;
+}
+
+int ipa_reset_all_quota_stats(void)
+{
+	int ret;
+	struct ipa_quota_stats_all *stats;
+
+	if (!ipa3_ctx->hw_stats.enabled)
+		return 0;
+
+	/* reading stats will reset them in hardware */
+	ret = ipa_get_quota_stats(NULL);
+	if (ret) {
+		IPAERR("ipa_get_quota_stats failed %d\n", ret);
+		return ret;
+	}
+
+	/* reset driver's cache */
+	stats = &ipa3_ctx->hw_stats.quota.stats;
+	memset(stats, 0, sizeof(*stats));
+	return 0;
+}
+
+int ipa_init_teth_stats(struct ipa_teth_stats_endpoints *in)
+{
+	struct ipahal_stats_init_pyld *pyld;
+	struct ipahal_imm_cmd_dma_shared_mem cmd = { 0 };
+	struct ipahal_imm_cmd_pyld *cmd_pyld;
+	struct ipahal_imm_cmd_register_write teth_base = {0};
+	struct ipahal_imm_cmd_pyld *teth_base_pyld;
+	struct ipahal_imm_cmd_register_write teth_mask = { 0 };
+	struct ipahal_imm_cmd_pyld *teth_mask_pyld;
+	struct ipahal_imm_cmd_pyld *coal_cmd_pyld = NULL;
+	struct ipa3_desc desc[4] = { {0} };
+	dma_addr_t dma_address;
+	int ret;
+	int i;
+	int num_cmd = 0;
+
+
+	if (!ipa3_ctx->hw_stats.enabled)
+		return 0;
+
+	if (!in || !in->prod_mask) {
+		IPAERR("invalid params\n");
+		return -EINVAL;
+	}
+
+	for (i = 0; i < IPA_STATS_MAX_PIPE_BIT; i++) {
+		if ((in->prod_mask & (1 << i)) && !in->dst_ep_mask[i]) {
+			IPAERR("prod %d doesn't have cons\n", i);
+			return -EINVAL;
+		}
+	}
+	IPADBG_LOW("prod_mask=0x%x\n", in->prod_mask);
+
+	/* reset driver's cache */
+	memset(&ipa3_ctx->hw_stats.teth.init, 0,
+		sizeof(ipa3_ctx->hw_stats.teth.init));
+	for (i = 0; i < IPA_CLIENT_MAX; i++) {
+		memset(&ipa3_ctx->hw_stats.teth.prod_stats_sum[i], 0,
+			sizeof(ipa3_ctx->hw_stats.teth.prod_stats_sum[i]));
+		memset(&ipa3_ctx->hw_stats.teth.prod_stats[i], 0,
+			sizeof(ipa3_ctx->hw_stats.teth.prod_stats[i]));
+	}
+	ipa3_ctx->hw_stats.teth.init.prod_bitmask = in->prod_mask;
+	memcpy(ipa3_ctx->hw_stats.teth.init.cons_bitmask, in->dst_ep_mask,
+		sizeof(ipa3_ctx->hw_stats.teth.init.cons_bitmask));
+
+
+	pyld = ipahal_stats_generate_init_pyld(IPAHAL_HW_STATS_TETHERING,
+		&ipa3_ctx->hw_stats.teth.init, false);
+	if (!pyld) {
+		IPAERR("failed to generate pyld\n");
+		return -EPERM;
+	}
+
+	if (pyld->len > IPA_MEM_PART(stats_tethering_size)) {
+		IPAERR("SRAM partition too small: %d needed %d\n",
+			IPA_MEM_PART(stats_tethering_size), pyld->len);
+		ret = -EPERM;
+		goto destroy_init_pyld;
+	}
+
+	dma_address = dma_map_single(ipa3_ctx->pdev,
+		pyld->data,
+		pyld->len,
+		DMA_TO_DEVICE);
+	if (dma_mapping_error(ipa3_ctx->pdev, dma_address)) {
+		IPAERR("failed to DMA map\n");
+		ret = -EPERM;
+		goto destroy_init_pyld;
+	}
+
+	/* IC to close the coal frame before HPS Clear if coal is enabled */
+	if (ipa3_get_ep_mapping(IPA_CLIENT_APPS_WAN_COAL_CONS) !=
+		IPA_EP_NOT_ALLOCATED) {
+		ipa_close_coal_frame(&coal_cmd_pyld);
+		if (!coal_cmd_pyld) {
+			IPAERR("failed to construct coal close IC\n");
+			ret = -ENOMEM;
+			goto unmap;
+		}
+		ipa3_init_imm_cmd_desc(&desc[num_cmd], coal_cmd_pyld);
+		++num_cmd;
+	}
+
+	/* setting the registers and init the stats pyld are done atomically */
+	teth_mask.skip_pipeline_clear = false;
+	teth_mask.pipeline_clear_options = IPAHAL_FULL_PIPELINE_CLEAR;
+	teth_mask.offset = ipahal_get_reg_n_ofst(IPA_STAT_TETHERING_MASK_n,
+		ipa3_ctx->ee);
+	teth_mask.value = in->prod_mask;
+	teth_mask.value_mask = ~0;
+	teth_mask_pyld = ipahal_construct_imm_cmd(IPA_IMM_CMD_REGISTER_WRITE,
+		&teth_mask, false);
+	if (!teth_mask_pyld) {
+		IPAERR("failed to construct register_write imm cmd\n");
+		ret = -ENOMEM;
+		goto destroy_coal_cmd;
+	}
+	desc[num_cmd].opcode = teth_mask_pyld->opcode;
+	desc[num_cmd].pyld = teth_mask_pyld->data;
+	desc[num_cmd].len = teth_mask_pyld->len;
+	desc[num_cmd].type = IPA_IMM_CMD_DESC;
+	++num_cmd;
+
+	teth_base.skip_pipeline_clear = false;
+	teth_base.pipeline_clear_options = IPAHAL_FULL_PIPELINE_CLEAR;
+	teth_base.offset = ipahal_get_reg_n_ofst(IPA_STAT_TETHERING_BASE_n,
+		ipa3_ctx->ee);
+	teth_base.value = ipa3_ctx->smem_restricted_bytes +
+		IPA_MEM_PART(stats_tethering_ofst);
+	teth_base.value_mask = ~0;
+	teth_base_pyld = ipahal_construct_imm_cmd(IPA_IMM_CMD_REGISTER_WRITE,
+		&teth_base, false);
+	if (!teth_base_pyld) {
+		IPAERR("failed to construct register_write imm cmd\n");
+		ret = -ENOMEM;
+		goto destroy_teth_mask;
+	}
+	desc[num_cmd].opcode = teth_base_pyld->opcode;
+	desc[num_cmd].pyld = teth_base_pyld->data;
+	desc[num_cmd].len = teth_base_pyld->len;
+	desc[num_cmd].type = IPA_IMM_CMD_DESC;
+	++num_cmd;
+
+	cmd.is_read = false;
+	cmd.skip_pipeline_clear = false;
+	cmd.pipeline_clear_options = IPAHAL_FULL_PIPELINE_CLEAR;
+	cmd.size = pyld->len;
+	cmd.system_addr = dma_address;
+	cmd.local_addr = ipa3_ctx->smem_restricted_bytes +
+			IPA_MEM_PART(stats_tethering_ofst);
+	cmd_pyld = ipahal_construct_imm_cmd(
+		IPA_IMM_CMD_DMA_SHARED_MEM, &cmd, false);
+	if (!cmd_pyld) {
+		IPAERR("failed to construct dma_shared_mem imm cmd\n");
+		ret = -ENOMEM;
+		goto destroy_teth_base;
+	}
+	desc[num_cmd].opcode = cmd_pyld->opcode;
+	desc[num_cmd].pyld = cmd_pyld->data;
+	desc[num_cmd].len = cmd_pyld->len;
+	desc[num_cmd].type = IPA_IMM_CMD_DESC;
+	++num_cmd;
+
+	ret = ipa3_send_cmd(num_cmd, desc);
+	if (ret) {
+		IPAERR("failed to send immediate command (error %d)\n", ret);
+		goto destroy_imm;
+	}
+
+	ret = 0;
+
+destroy_imm:
+	ipahal_destroy_imm_cmd(cmd_pyld);
+destroy_teth_base:
+	ipahal_destroy_imm_cmd(teth_base_pyld);
+destroy_teth_mask:
+	ipahal_destroy_imm_cmd(teth_mask_pyld);
+destroy_coal_cmd:
+	if (coal_cmd_pyld)
+		ipahal_destroy_imm_cmd(coal_cmd_pyld);
+unmap:
+	dma_unmap_single(ipa3_ctx->pdev, dma_address, pyld->len, DMA_TO_DEVICE);
+destroy_init_pyld:
+	ipahal_destroy_stats_init_pyld(pyld);
+	return ret;
+}
+
+int ipa_get_teth_stats(void)
+{
+	int i, j;
+	int ret;
+	struct ipahal_stats_get_offset_tethering get_offset = { { 0 } };
+	struct ipahal_stats_offset offset = {0};
+	struct ipahal_imm_cmd_dma_shared_mem cmd = { 0 };
+	struct ipahal_imm_cmd_pyld *cmd_pyld[2];
+	struct ipa_mem_buffer mem;
+	struct ipa3_desc desc[2];
+	struct ipahal_stats_tethering_all *stats_all;
+	struct ipa_hw_stats_teth *sw_stats = &ipa3_ctx->hw_stats.teth;
+	struct ipahal_stats_tethering *stats;
+	struct ipa_quota_stats *quota_stats;
+	struct ipahal_stats_init_tethering *init =
+		(struct ipahal_stats_init_tethering *)
+			&ipa3_ctx->hw_stats.teth.init;
+	int num_cmd = 0;
+
+	if (!ipa3_ctx->hw_stats.enabled)
+		return 0;
+
+	memset(desc, 0, sizeof(desc));
+	memset(cmd_pyld, 0, sizeof(cmd_pyld));
+
+	get_offset.init = ipa3_ctx->hw_stats.teth.init;
+	ret = ipahal_stats_get_offset(IPAHAL_HW_STATS_TETHERING, &get_offset,
+		&offset);
+	if (ret) {
+		IPAERR("failed to get offset from hal %d\n", ret);
+		return ret;
+	}
+
+	IPADBG_LOW("offset = %d size = %d\n", offset.offset, offset.size);
+
+	if (offset.size == 0)
+		return 0;
+
+	mem.size = offset.size;
+	mem.base = dma_alloc_coherent(ipa3_ctx->pdev,
+		mem.size,
+		&mem.phys_base,
+		GFP_KERNEL);
+	if (!mem.base) {
+		IPAERR("fail to alloc DMA memory\n");
+		return ret;
+	}
+
+	/* IC to close the coal frame before HPS Clear if coal is enabled */
+	if (ipa3_get_ep_mapping(IPA_CLIENT_APPS_WAN_COAL_CONS) !=
+		IPA_EP_NOT_ALLOCATED) {
+		ipa_close_coal_frame(&cmd_pyld[num_cmd]);
+		if (!cmd_pyld[num_cmd]) {
+			IPAERR("failed to construct coal close IC\n");
+			ret = -ENOMEM;
+			goto free_dma_mem;
+		}
+		ipa3_init_imm_cmd_desc(&desc[num_cmd], cmd_pyld[num_cmd]);
+		++num_cmd;
+	}
+
+	cmd.is_read = true;
+	cmd.clear_after_read = true;
+	cmd.skip_pipeline_clear = false;
+	cmd.pipeline_clear_options = IPAHAL_HPS_CLEAR;
+	cmd.size = mem.size;
+	cmd.system_addr = mem.phys_base;
+	cmd.local_addr = ipa3_ctx->smem_restricted_bytes +
+		IPA_MEM_PART(stats_tethering_ofst) + offset.offset;
+	cmd_pyld[num_cmd] = ipahal_construct_imm_cmd(
+		IPA_IMM_CMD_DMA_SHARED_MEM, &cmd, false);
+	if (!cmd_pyld[num_cmd]) {
+		IPAERR("failed to construct dma_shared_mem imm cmd\n");
+		ret = -ENOMEM;
+		goto destroy_imm;
+	}
+	ipa3_init_imm_cmd_desc(&desc[num_cmd], cmd_pyld[num_cmd]);
+	++num_cmd;
+
+	ret = ipa3_send_cmd(num_cmd, desc);
+	if (ret) {
+		IPAERR("failed to send immediate command (error %d)\n", ret);
+		goto destroy_imm;
+	}
+
+	stats_all = kzalloc(sizeof(*stats_all), GFP_KERNEL);
+	if (!stats_all) {
+		IPADBG("failed to alloc memory\n");
+		ret = -ENOMEM;
+		goto destroy_imm;
+	}
+
+	ret = ipahal_parse_stats(IPAHAL_HW_STATS_TETHERING,
+		&ipa3_ctx->hw_stats.teth.init, mem.base, stats_all);
+	if (ret) {
+		IPAERR("failed to parse stats_all (error %d)\n", ret);
+		goto free_stats;
+	}
+
+	/* reset prod_stats cache */
+	for (i = 0; i < IPA_CLIENT_MAX; i++) {
+		memset(&ipa3_ctx->hw_stats.teth.prod_stats[i], 0,
+			sizeof(ipa3_ctx->hw_stats.teth.prod_stats[i]));
+	}
+
+	/*
+	 * update driver cache.
+	 * the stats were read from hardware with clear_after_read meaning
+	 * hardware stats are 0 now
+	 */
+	for (i = 0; i < IPA_CLIENT_MAX; i++) {
+		for (j = 0; j < IPA_CLIENT_MAX; j++) {
+			int prod_idx = ipa3_get_ep_mapping(i);
+			int cons_idx = ipa3_get_ep_mapping(j);
+
+			if (prod_idx == -1 || prod_idx >= IPA3_MAX_NUM_PIPES)
+				continue;
+
+			if (cons_idx == -1 || cons_idx >= IPA3_MAX_NUM_PIPES)
+				continue;
+
+			/* save hw-query result */
+			if ((init->prod_bitmask & (1 << prod_idx)) &&
+				(init->cons_bitmask[prod_idx]
+					& (1 << cons_idx))) {
+				IPADBG_LOW("prod %d cons %d\n",
+					prod_idx, cons_idx);
+				stats = &stats_all->stats[prod_idx][cons_idx];
+				IPADBG_LOW("num_ipv4_bytes %lld\n",
+					stats->num_ipv4_bytes);
+				IPADBG_LOW("num_ipv4_pkts %lld\n",
+					stats->num_ipv4_pkts);
+				IPADBG_LOW("num_ipv6_pkts %lld\n",
+					stats->num_ipv6_pkts);
+				IPADBG_LOW("num_ipv6_bytes %lld\n",
+					stats->num_ipv6_bytes);
+
+				/* update stats*/
+				quota_stats =
+					&sw_stats->prod_stats[i].client[j];
+				quota_stats->num_ipv4_bytes =
+					stats->num_ipv4_bytes;
+				quota_stats->num_ipv4_pkts =
+					stats->num_ipv4_pkts;
+				quota_stats->num_ipv6_bytes =
+					stats->num_ipv6_bytes;
+				quota_stats->num_ipv6_pkts =
+					stats->num_ipv6_pkts;
+
+				/* Accumulated stats */
+				quota_stats =
+					&sw_stats->prod_stats_sum[i].client[j];
+				quota_stats->num_ipv4_bytes +=
+					stats->num_ipv4_bytes;
+				quota_stats->num_ipv4_pkts +=
+					stats->num_ipv4_pkts;
+				quota_stats->num_ipv6_bytes +=
+					stats->num_ipv6_bytes;
+				quota_stats->num_ipv6_pkts +=
+					stats->num_ipv6_pkts;
+			}
+		}
+	}
+
+	ret = 0;
+free_stats:
+	kfree(stats_all);
+	stats = NULL;
+destroy_imm:
+	for (i = 0; i < num_cmd; i++)
+		ipahal_destroy_imm_cmd(cmd_pyld[i]);
+free_dma_mem:
+	dma_free_coherent(ipa3_ctx->pdev, mem.size, mem.base, mem.phys_base);
+	return ret;
+
+}
+
+int ipa_query_teth_stats(enum ipa_client_type prod,
+	struct ipa_quota_stats_all *out, bool reset)
+{
+	if (!IPA_CLIENT_IS_PROD(prod) || ipa3_get_ep_mapping(prod) == -1) {
+		IPAERR("invalid prod %d\n", prod);
+		return -EINVAL;
+	}
+
+	/* copy results to out parameter */
+	if (reset)
+		*out = ipa3_ctx->hw_stats.teth.prod_stats[prod];
+	else
+		*out = ipa3_ctx->hw_stats.teth.prod_stats_sum[prod];
+	return 0;
+}
+
+int ipa_reset_teth_stats(enum ipa_client_type prod, enum ipa_client_type cons)
+{
+	int ret;
+	struct ipa_quota_stats *stats;
+
+	if (!ipa3_ctx->hw_stats.enabled)
+		return 0;
+
+	if (!IPA_CLIENT_IS_PROD(prod) || !IPA_CLIENT_IS_CONS(cons)) {
+		IPAERR("invalid prod %d or cons %d\n", prod, cons);
+		return -EINVAL;
+	}
+
+	/* reading stats will reset them in hardware */
+	ret = ipa_get_teth_stats();
+	if (ret) {
+		IPAERR("ipa_get_teth_stats failed %d\n", ret);
+		return ret;
+	}
+
+	/* reset driver's cache */
+	stats = &ipa3_ctx->hw_stats.teth.prod_stats_sum[prod].client[cons];
+	memset(stats, 0, sizeof(*stats));
+	return 0;
+}
+
+int ipa_reset_all_cons_teth_stats(enum ipa_client_type prod)
+{
+	int ret;
+	int i;
+	struct ipa_quota_stats *stats;
+
+	if (!ipa3_ctx->hw_stats.enabled)
+		return 0;
+
+	if (!IPA_CLIENT_IS_PROD(prod)) {
+		IPAERR("invalid prod %d\n", prod);
+		return -EINVAL;
+	}
+
+	/* reading stats will reset them in hardware */
+	ret = ipa_get_teth_stats();
+	if (ret) {
+		IPAERR("ipa_get_teth_stats failed %d\n", ret);
+		return ret;
+	}
+
+	/* reset driver's cache */
+	for (i = 0; i < IPA_CLIENT_MAX; i++) {
+		stats = &ipa3_ctx->hw_stats.teth.prod_stats_sum[prod].client[i];
+		memset(stats, 0, sizeof(*stats));
+	}
+
+	return 0;
+}
+
+int ipa_reset_all_teth_stats(void)
+{
+	int i;
+	int ret;
+	struct ipa_quota_stats_all *stats;
+
+	if (!ipa3_ctx->hw_stats.enabled)
+		return 0;
+
+	/* reading stats will reset them in hardware */
+	for (i = 0; i < IPA_CLIENT_MAX; i++) {
+		if (IPA_CLIENT_IS_PROD(i) && ipa3_get_ep_mapping(i) != -1) {
+			ret = ipa_get_teth_stats();
+			if (ret) {
+				IPAERR("ipa_get_teth_stats failed %d\n", ret);
+				return ret;
+			}
+			/* a single iteration will reset all hardware stats */
+			break;
+		}
+	}
+
+	/* reset driver's cache */
+	for (i = 0; i < IPA_CLIENT_MAX; i++) {
+		stats = &ipa3_ctx->hw_stats.teth.prod_stats_sum[i];
+		memset(stats, 0, sizeof(*stats));
+	}
+
+	return 0;
+}
+
+int ipa_init_flt_rt_stats(void)
+{
+	struct ipahal_stats_init_pyld *pyld;
+	int smem_ofst, smem_size;
+	int stats_base_flt_v4, stats_base_flt_v6;
+	int stats_base_rt_v4, stats_base_rt_v6;
+	struct ipahal_imm_cmd_dma_shared_mem cmd = { 0 };
+	struct ipahal_imm_cmd_pyld *cmd_pyld;
+	struct ipahal_imm_cmd_register_write flt_v4_base = {0};
+	struct ipahal_imm_cmd_pyld *flt_v4_base_pyld;
+	struct ipahal_imm_cmd_register_write flt_v6_base = {0};
+	struct ipahal_imm_cmd_pyld *flt_v6_base_pyld;
+	struct ipahal_imm_cmd_register_write rt_v4_base = {0};
+	struct ipahal_imm_cmd_pyld *rt_v4_base_pyld;
+	struct ipahal_imm_cmd_register_write rt_v6_base = {0};
+	struct ipahal_imm_cmd_pyld *rt_v6_base_pyld;
+	struct ipahal_imm_cmd_pyld *coal_cmd_pyld = NULL;
+	struct ipa3_desc desc[6] = { {0} };
+	dma_addr_t dma_address;
+	int ret;
+	int num_cmd = 0;
+
+	if (!ipa3_ctx->hw_stats.enabled)
+		return 0;
+
+	smem_ofst = IPA_MEM_PART(stats_fnr_ofst);
+	smem_size = IPA_MEM_PART(stats_fnr_size);
+
+	pyld = ipahal_stats_generate_init_pyld(IPAHAL_HW_STATS_FNR,
+		(void *)(uintptr_t)(IPA_MAX_FLT_RT_CNT_INDEX), false);
+	if (!pyld) {
+		IPAERR("failed to generate pyld\n");
+		return -EPERM;
+	}
+
+	if (pyld->len > smem_size) {
+		IPAERR("SRAM partition too small: %d needed %d\n",
+			smem_size, pyld->len);
+		ret = -EPERM;
+		goto destroy_init_pyld;
+	}
+
+	dma_address = dma_map_single(ipa3_ctx->pdev,
+		pyld->data,
+		pyld->len,
+		DMA_TO_DEVICE);
+	if (dma_mapping_error(ipa3_ctx->pdev, dma_address)) {
+		IPAERR("failed to DMA map\n");
+		ret = -EPERM;
+		goto destroy_init_pyld;
+	}
+
+	/* IC to close the coal frame before HPS Clear if coal is enabled */
+	if (ipa3_get_ep_mapping(IPA_CLIENT_APPS_WAN_COAL_CONS) !=
+		IPA_EP_NOT_ALLOCATED) {
+		ipa_close_coal_frame(&coal_cmd_pyld);
+		if (!coal_cmd_pyld) {
+			IPAERR("failed to construct coal close IC\n");
+			ret = -ENOMEM;
+			goto unmap;
+		}
+		ipa3_init_imm_cmd_desc(&desc[num_cmd], coal_cmd_pyld);
+		++num_cmd;
+	}
+
+	stats_base_flt_v4 = ipahal_get_reg_ofst(IPA_STAT_FILTER_IPV4_BASE);
+	stats_base_flt_v6 = ipahal_get_reg_ofst(IPA_STAT_FILTER_IPV6_BASE);
+	stats_base_rt_v4 = ipahal_get_reg_ofst(IPA_STAT_ROUTER_IPV4_BASE);
+	stats_base_rt_v6 = ipahal_get_reg_ofst(IPA_STAT_ROUTER_IPV6_BASE);
+
+	/* setting the registers and init the stats pyld are done atomically */
+	/* set IPA_STAT_FILTER_IPV4_BASE */
+	flt_v4_base.skip_pipeline_clear = false;
+	flt_v4_base.pipeline_clear_options = IPAHAL_FULL_PIPELINE_CLEAR;
+	flt_v4_base.offset = stats_base_flt_v4;
+	flt_v4_base.value = ipa3_ctx->smem_restricted_bytes +
+		smem_ofst;
+	flt_v4_base.value_mask = ~0;
+	flt_v4_base_pyld = ipahal_construct_imm_cmd(IPA_IMM_CMD_REGISTER_WRITE,
+		&flt_v4_base, false);
+	if (!flt_v4_base_pyld) {
+		IPAERR("failed to construct register_write imm cmd\n");
+		ret = -ENOMEM;
+		goto destroy_coal_cmd;
+	}
+	desc[num_cmd].opcode = flt_v4_base_pyld->opcode;
+	desc[num_cmd].pyld = flt_v4_base_pyld->data;
+	desc[num_cmd].len = flt_v4_base_pyld->len;
+	desc[num_cmd].type = IPA_IMM_CMD_DESC;
+	++num_cmd;
+
+	/* set IPA_STAT_FILTER_IPV6_BASE */
+	flt_v6_base.skip_pipeline_clear = false;
+	flt_v6_base.pipeline_clear_options = IPAHAL_FULL_PIPELINE_CLEAR;
+	flt_v6_base.offset = stats_base_flt_v6;
+	flt_v6_base.value = ipa3_ctx->smem_restricted_bytes +
+		smem_ofst;
+	flt_v6_base.value_mask = ~0;
+	flt_v6_base_pyld = ipahal_construct_imm_cmd(IPA_IMM_CMD_REGISTER_WRITE,
+		&flt_v6_base, false);
+	if (!flt_v6_base_pyld) {
+		IPAERR("failed to construct register_write imm cmd\n");
+		ret = -ENOMEM;
+		goto destroy_flt_v4_base;
+	}
+	desc[num_cmd].opcode = flt_v6_base_pyld->opcode;
+	desc[num_cmd].pyld = flt_v6_base_pyld->data;
+	desc[num_cmd].len = flt_v6_base_pyld->len;
+	desc[num_cmd].type = IPA_IMM_CMD_DESC;
+	++num_cmd;
+
+	/* set IPA_STAT_ROUTER_IPV4_BASE */
+	rt_v4_base.skip_pipeline_clear = false;
+	rt_v4_base.pipeline_clear_options = IPAHAL_FULL_PIPELINE_CLEAR;
+	rt_v4_base.offset = stats_base_rt_v4;
+	rt_v4_base.value = ipa3_ctx->smem_restricted_bytes +
+		smem_ofst;
+	rt_v4_base.value_mask = ~0;
+	rt_v4_base_pyld = ipahal_construct_imm_cmd(IPA_IMM_CMD_REGISTER_WRITE,
+		&rt_v4_base, false);
+	if (!rt_v4_base_pyld) {
+		IPAERR("failed to construct register_write imm cmd\n");
+		ret = -ENOMEM;
+		goto destroy_flt_v6_base;
+	}
+	desc[num_cmd].opcode = rt_v4_base_pyld->opcode;
+	desc[num_cmd].pyld = rt_v4_base_pyld->data;
+	desc[num_cmd].len = rt_v4_base_pyld->len;
+	desc[num_cmd].type = IPA_IMM_CMD_DESC;
+	++num_cmd;
+
+	/* set IPA_STAT_ROUTER_IPV6_BASE */
+	rt_v6_base.skip_pipeline_clear = false;
+	rt_v6_base.pipeline_clear_options = IPAHAL_FULL_PIPELINE_CLEAR;
+	rt_v6_base.offset = stats_base_rt_v6;
+	rt_v6_base.value = ipa3_ctx->smem_restricted_bytes +
+		smem_ofst;
+	rt_v6_base.value_mask = ~0;
+	rt_v6_base_pyld = ipahal_construct_imm_cmd(IPA_IMM_CMD_REGISTER_WRITE,
+		&rt_v6_base, false);
+	if (!rt_v6_base_pyld) {
+		IPAERR("failed to construct register_write imm cmd\n");
+		ret = -ENOMEM;
+		goto destroy_rt_v4_base;
+	}
+	desc[num_cmd].opcode = rt_v6_base_pyld->opcode;
+	desc[num_cmd].pyld = rt_v6_base_pyld->data;
+	desc[num_cmd].len = rt_v6_base_pyld->len;
+	desc[num_cmd].type = IPA_IMM_CMD_DESC;
+	++num_cmd;
+
+	cmd.is_read = false;
+	cmd.skip_pipeline_clear = false;
+	cmd.pipeline_clear_options = IPAHAL_FULL_PIPELINE_CLEAR;
+	cmd.size = pyld->len;
+	cmd.system_addr = dma_address;
+	cmd.local_addr = ipa3_ctx->smem_restricted_bytes +
+			smem_ofst;
+	cmd_pyld = ipahal_construct_imm_cmd(
+		IPA_IMM_CMD_DMA_SHARED_MEM, &cmd, false);
+	if (!cmd_pyld) {
+		IPAERR("failed to construct dma_shared_mem imm cmd\n");
+		ret = -ENOMEM;
+		goto destroy_rt_v6_base;
+	}
+	desc[num_cmd].opcode = cmd_pyld->opcode;
+	desc[num_cmd].pyld = cmd_pyld->data;
+	desc[num_cmd].len = cmd_pyld->len;
+	desc[num_cmd].type = IPA_IMM_CMD_DESC;
+	++num_cmd;
+
+	ret = ipa3_send_cmd(num_cmd, desc);
+	if (ret) {
+		IPAERR("failed to send immediate command (error %d)\n", ret);
+		goto destroy_imm;
+	}
+
+	ret = 0;
+
+destroy_imm:
+	ipahal_destroy_imm_cmd(cmd_pyld);
+destroy_rt_v6_base:
+	ipahal_destroy_imm_cmd(rt_v6_base_pyld);
+destroy_rt_v4_base:
+	ipahal_destroy_imm_cmd(rt_v4_base_pyld);
+destroy_flt_v6_base:
+	ipahal_destroy_imm_cmd(flt_v6_base_pyld);
+destroy_flt_v4_base:
+	ipahal_destroy_imm_cmd(flt_v4_base_pyld);
+destroy_coal_cmd:
+	if (coal_cmd_pyld)
+		ipahal_destroy_imm_cmd(coal_cmd_pyld);
+unmap:
+	dma_unmap_single(ipa3_ctx->pdev, dma_address, pyld->len, DMA_TO_DEVICE);
+destroy_init_pyld:
+	ipahal_destroy_stats_init_pyld(pyld);
+	return ret;
+}
+
+static int __ipa_get_flt_rt_stats(struct ipa_ioc_flt_rt_query *query)
+{
+	int ret;
+	int smem_ofst;
+	bool clear = query->reset;
+	struct ipahal_stats_get_offset_flt_rt_v4_5 *get_offset;
+	struct ipahal_stats_offset offset = { 0 };
+	struct ipahal_imm_cmd_dma_shared_mem cmd = { 0 };
+	struct ipahal_imm_cmd_pyld *cmd_pyld[2];
+	struct ipa_mem_buffer mem;
+	struct ipa3_desc desc[2];
+	int num_cmd = 0;
+	int i;
+
+	memset(desc, 0, sizeof(desc));
+	memset(cmd_pyld, 0, sizeof(cmd_pyld));
+
+	get_offset = kzalloc(sizeof(*get_offset), GFP_KERNEL);
+	if (!get_offset) {
+		IPADBG("no mem\n");
+		return -ENOMEM;
+	}
+
+	smem_ofst = IPA_MEM_PART(stats_fnr_ofst);
+
+	get_offset->start_id = query->start_id;
+	get_offset->end_id = query->end_id;
+
+	ret = ipahal_stats_get_offset(IPAHAL_HW_STATS_FNR, get_offset,
+		&offset);
+	if (ret) {
+		IPAERR("failed to get offset from hal %d\n", ret);
+		goto free_offset;
+	}
+
+	IPADBG("offset = %d size = %d\n", offset.offset, offset.size);
+
+	if (offset.size == 0) {
+		ret = 0;
+		goto free_offset;
+	}
+
+	mem.size = offset.size;
+	mem.base = dma_alloc_coherent(ipa3_ctx->pdev,
+		mem.size,
+		&mem.phys_base,
+		GFP_KERNEL);
+	if (!mem.base) {
+		IPAERR("fail to alloc DMA memory\n");
+		goto free_offset;
+	}
+
+	/* IC to close the coal frame before HPS Clear if coal is enabled */
+	if (ipa3_get_ep_mapping(IPA_CLIENT_APPS_WAN_COAL_CONS) !=
+		IPA_EP_NOT_ALLOCATED) {
+		ipa_close_coal_frame(&cmd_pyld[num_cmd]);
+		if (!cmd_pyld[num_cmd]) {
+			IPAERR("failed to construct coal close IC\n");
+			ret = -ENOMEM;
+			goto free_dma_mem;
+		}
+		ipa3_init_imm_cmd_desc(&desc[num_cmd], cmd_pyld[num_cmd]);
+		++num_cmd;
+	}
+
+	cmd.is_read = true;
+	cmd.clear_after_read = clear;
+	cmd.skip_pipeline_clear = false;
+	cmd.pipeline_clear_options = IPAHAL_HPS_CLEAR;
+	cmd.size = mem.size;
+	cmd.system_addr = mem.phys_base;
+	cmd.local_addr = ipa3_ctx->smem_restricted_bytes +
+		smem_ofst + offset.offset;
+	cmd_pyld[num_cmd] = ipahal_construct_imm_cmd(
+		IPA_IMM_CMD_DMA_SHARED_MEM, &cmd, false);
+	if (!cmd_pyld[num_cmd]) {
+		IPAERR("failed to construct dma_shared_mem imm cmd\n");
+		ret = -ENOMEM;
+		goto destroy_imm;
+	}
+	ipa3_init_imm_cmd_desc(&desc[num_cmd], cmd_pyld[num_cmd]);
+	++num_cmd;
+
+	ret = ipa3_send_cmd(num_cmd, desc);
+	if (ret) {
+		IPAERR("failed to send immediate command (error %d)\n", ret);
+		goto destroy_imm;
+	}
+
+	ret = ipahal_parse_stats(IPAHAL_HW_STATS_FNR,
+		NULL, mem.base, query);
+	if (ret) {
+		IPAERR("failed to parse stats (error %d)\n", ret);
+		goto destroy_imm;
+	}
+	ret = 0;
+
+destroy_imm:
+	for (i = 0; i < num_cmd; i++)
+		ipahal_destroy_imm_cmd(cmd_pyld[i]);
+free_dma_mem:
+	dma_free_coherent(ipa3_ctx->pdev, mem.size, mem.base, mem.phys_base);
+free_offset:
+	kfree(get_offset);
+	return ret;
+}
+
+int ipa_get_flt_rt_stats(struct ipa_ioc_flt_rt_query *query)
+{
+	if (!ipa3_ctx->hw_stats.enabled) {
+		IPAERR("hw_stats is not enabled\n");
+		return 0;
+	}
+
+	if (ipa3_ctx->ipa_hw_type < IPA_HW_v4_5) {
+		IPAERR("FnR stats not supported in %d hw_type\n",
+			ipa3_ctx->ipa_hw_type);
+		return 0;
+	}
+
+	if (query->start_id == 0 || query->end_id == 0) {
+		IPAERR("Invalid start_id/end_id, must be not 0\n");
+		IPAERR("start_id %d, end_id %d\n",
+			query->start_id, query->end_id);
+		return -EINVAL;
+	}
+
+	if (query->start_id > IPA_MAX_FLT_RT_CNT_INDEX) {
+		IPAERR("start_cnt_id %d out of range\n", query->start_id);
+		return -EINVAL;
+	}
+
+	if (query->end_id > IPA_MAX_FLT_RT_CNT_INDEX) {
+		IPAERR("end_cnt_id %d out of range\n", query->end_id);
+		return -EINVAL;
+	}
+
+	if (query->end_id < query->start_id) {
+		IPAERR("end_id %d < start_id %d\n",
+			query->end_id, query->start_id);
+		return -EINVAL;
+	}
+
+	if (query->stats_size > sizeof(struct ipa_flt_rt_stats)) {
+		IPAERR("stats_size %d > ipa_flt_rt_stats %d\n",
+			query->stats_size, sizeof(struct ipa_flt_rt_stats));
+		return -EINVAL;
+	}
+
+	return __ipa_get_flt_rt_stats(query);
+}
+
+
+static int __ipa_set_flt_rt_stats(int index, struct ipa_flt_rt_stats stats)
+{
+	int ret;
+	int smem_ofst;
+	struct ipahal_stats_get_offset_flt_rt_v4_5 *get_offset;
+	struct ipahal_stats_offset offset = { 0 };
+	struct ipahal_imm_cmd_dma_shared_mem cmd = { 0 };
+	struct ipahal_imm_cmd_pyld *cmd_pyld;
+	struct ipa_mem_buffer mem;
+	struct ipa3_desc desc = { 0 };
+
+	get_offset = kzalloc(sizeof(*get_offset), GFP_KERNEL);
+	if (!get_offset) {
+		IPADBG("no mem\n");
+		return -ENOMEM;
+	}
+
+	smem_ofst = IPA_MEM_PART(stats_fnr_ofst);
+
+	get_offset->start_id = index;
+	get_offset->end_id = index;
+
+	ret = ipahal_stats_get_offset(IPAHAL_HW_STATS_FNR, get_offset,
+		&offset);
+	if (ret) {
+		IPAERR("failed to get offset from hal %d\n", ret);
+		goto free_offset;
+	}
+
+	IPADBG("offset = %d size = %d\n", offset.offset, offset.size);
+
+	if (offset.size == 0) {
+		ret = 0;
+		goto free_offset;
+	}
+
+	mem.size = offset.size;
+	mem.base = dma_alloc_coherent(ipa3_ctx->pdev,
+		mem.size,
+		&mem.phys_base,
+		GFP_KERNEL);
+	if (!mem.base) {
+		IPAERR("fail to alloc DMA memory\n");
+		goto free_offset;
+	}
+	ipahal_set_flt_rt_sw_stats(mem.base, stats);
+
+	cmd.is_read = false;
+	cmd.skip_pipeline_clear = false;
+	cmd.pipeline_clear_options = IPAHAL_HPS_CLEAR;
+	cmd.size = mem.size;
+	cmd.system_addr = mem.phys_base;
+	cmd.local_addr = ipa3_ctx->smem_restricted_bytes +
+		smem_ofst + offset.offset;
+	cmd_pyld = ipahal_construct_imm_cmd(
+		IPA_IMM_CMD_DMA_SHARED_MEM, &cmd, false);
+	if (!cmd_pyld) {
+		IPAERR("failed to construct dma_shared_mem imm cmd\n");
+		ret = -ENOMEM;
+		goto free_dma_mem;
+	}
+	desc.opcode = cmd_pyld->opcode;
+	desc.pyld = cmd_pyld->data;
+	desc.len = cmd_pyld->len;
+	desc.type = IPA_IMM_CMD_DESC;
+
+	ret = ipa3_send_cmd(1, &desc);
+	if (ret) {
+		IPAERR("failed to send immediate command (error %d)\n", ret);
+		goto destroy_imm;
+	}
+
+	ret = 0;
+
+destroy_imm:
+	ipahal_destroy_imm_cmd(cmd_pyld);
+free_dma_mem:
+	dma_free_coherent(ipa3_ctx->pdev, mem.size, mem.base, mem.phys_base);
+free_offset:
+	kfree(get_offset);
+	return ret;
+}
+
+int ipa_set_flt_rt_stats(int index, struct ipa_flt_rt_stats stats)
+{
+	if (!ipa3_ctx->hw_stats.enabled) {
+		IPAERR("hw_stats is not enabled\n");
+		return 0;
+	}
+
+	if (ipa3_ctx->ipa_hw_type < IPA_HW_v4_5) {
+		IPAERR("FnR stats not supported in %d hw_type\n",
+			ipa3_ctx->ipa_hw_type);
+		return 0;
+	}
+
+	if (index > IPA_MAX_FLT_RT_CNT_INDEX) {
+		IPAERR("index %d out of range\n", index);
+		return -EINVAL;
+	}
+
+	if (index <= IPA_FLT_RT_HW_COUNTER) {
+		IPAERR("index %d invalid, only support sw counter set\n",
+			index);
+		return -EINVAL;
+	}
+
+	return __ipa_set_flt_rt_stats(index, stats);
+}
+
+int ipa_init_drop_stats(u32 pipe_bitmask)
+{
+	struct ipahal_stats_init_pyld *pyld;
+	struct ipahal_imm_cmd_dma_shared_mem cmd = { 0 };
+	struct ipahal_imm_cmd_pyld *cmd_pyld;
+	struct ipahal_imm_cmd_register_write drop_base = {0};
+	struct ipahal_imm_cmd_pyld *drop_base_pyld;
+	struct ipahal_imm_cmd_register_write drop_mask = {0};
+	struct ipahal_imm_cmd_pyld *drop_mask_pyld;
+	struct ipahal_imm_cmd_pyld *coal_cmd_pyld = NULL;
+	struct ipa3_desc desc[4] = { {0} };
+	dma_addr_t dma_address;
+	int ret;
+	int num_cmd = 0;
+
+	if (!ipa3_ctx->hw_stats.enabled)
+		return 0;
+
+	/* reset driver's cache */
+	memset(&ipa3_ctx->hw_stats.drop, 0, sizeof(ipa3_ctx->hw_stats.drop));
+	ipa3_ctx->hw_stats.drop.init.enabled_bitmask = pipe_bitmask;
+	IPADBG_LOW("pipe_bitmask=0x%x\n", pipe_bitmask);
+
+	pyld = ipahal_stats_generate_init_pyld(IPAHAL_HW_STATS_DROP,
+		&ipa3_ctx->hw_stats.drop.init, false);
+	if (!pyld) {
+		IPAERR("failed to generate pyld\n");
+		return -EPERM;
+	}
+
+	if (pyld->len > IPA_MEM_PART(stats_drop_size)) {
+		IPAERR("SRAM partition too small: %d needed %d\n",
+			IPA_MEM_PART(stats_drop_size), pyld->len);
+		ret = -EPERM;
+		goto destroy_init_pyld;
+	}
+
+	dma_address = dma_map_single(ipa3_ctx->pdev,
+		pyld->data,
+		pyld->len,
+		DMA_TO_DEVICE);
+	if (dma_mapping_error(ipa3_ctx->pdev, dma_address)) {
+		IPAERR("failed to DMA map\n");
+		ret = -EPERM;
+		goto destroy_init_pyld;
+	}
+
+	/* IC to close the coal frame before HPS Clear if coal is enabled */
+	if (ipa3_get_ep_mapping(IPA_CLIENT_APPS_WAN_COAL_CONS) !=
+		IPA_EP_NOT_ALLOCATED) {
+		ipa_close_coal_frame(&coal_cmd_pyld);
+		if (!coal_cmd_pyld) {
+			IPAERR("failed to construct coal close IC\n");
+			ret = -ENOMEM;
+			goto unmap;
+		}
+		ipa3_init_imm_cmd_desc(&desc[num_cmd], coal_cmd_pyld);
+		++num_cmd;
+	}
+
+	/* setting the registers and init the stats pyld are done atomically */
+	drop_mask.skip_pipeline_clear = false;
+	drop_mask.pipeline_clear_options = IPAHAL_FULL_PIPELINE_CLEAR;
+	drop_mask.offset = ipahal_get_reg_n_ofst(IPA_STAT_DROP_CNT_MASK_n,
+		ipa3_ctx->ee);
+	drop_mask.value = pipe_bitmask;
+	drop_mask.value_mask = ~0;
+	drop_mask_pyld = ipahal_construct_imm_cmd(IPA_IMM_CMD_REGISTER_WRITE,
+		&drop_mask, false);
+	if (!drop_mask_pyld) {
+		IPAERR("failed to construct register_write imm cmd\n");
+		ret = -ENOMEM;
+		goto destroy_coal_cmd;
+	}
+	desc[num_cmd].opcode = drop_mask_pyld->opcode;
+	desc[num_cmd].pyld = drop_mask_pyld->data;
+	desc[num_cmd].len = drop_mask_pyld->len;
+	desc[num_cmd].type = IPA_IMM_CMD_DESC;
+	++num_cmd;
+
+	drop_base.skip_pipeline_clear = false;
+	drop_base.pipeline_clear_options = IPAHAL_FULL_PIPELINE_CLEAR;
+	drop_base.offset = ipahal_get_reg_n_ofst(IPA_STAT_DROP_CNT_BASE_n,
+		ipa3_ctx->ee);
+	drop_base.value = ipa3_ctx->smem_restricted_bytes +
+		IPA_MEM_PART(stats_drop_ofst);
+	drop_base.value_mask = ~0;
+	drop_base_pyld = ipahal_construct_imm_cmd(IPA_IMM_CMD_REGISTER_WRITE,
+		&drop_base, false);
+	if (!drop_base_pyld) {
+		IPAERR("failed to construct register_write imm cmd\n");
+		ret = -ENOMEM;
+		goto destroy_drop_mask;
+	}
+	desc[num_cmd].opcode = drop_base_pyld->opcode;
+	desc[num_cmd].pyld = drop_base_pyld->data;
+	desc[num_cmd].len = drop_base_pyld->len;
+	desc[num_cmd].type = IPA_IMM_CMD_DESC;
+	++num_cmd;
+
+	cmd.is_read = false;
+	cmd.skip_pipeline_clear = false;
+	cmd.pipeline_clear_options = IPAHAL_FULL_PIPELINE_CLEAR;
+	cmd.size = pyld->len;
+	cmd.system_addr = dma_address;
+	cmd.local_addr = ipa3_ctx->smem_restricted_bytes +
+			IPA_MEM_PART(stats_drop_ofst);
+	cmd_pyld = ipahal_construct_imm_cmd(
+		IPA_IMM_CMD_DMA_SHARED_MEM, &cmd, false);
+	if (!cmd_pyld) {
+		IPAERR("failed to construct dma_shared_mem imm cmd\n");
+		ret = -ENOMEM;
+		goto destroy_drop_base;
+	}
+	desc[num_cmd].opcode = cmd_pyld->opcode;
+	desc[num_cmd].pyld = cmd_pyld->data;
+	desc[num_cmd].len = cmd_pyld->len;
+	desc[num_cmd].type = IPA_IMM_CMD_DESC;
+	++num_cmd;
+
+	ret = ipa3_send_cmd(num_cmd, desc);
+	if (ret) {
+		IPAERR("failed to send immediate command (error %d)\n", ret);
+		goto destroy_imm;
+	}
+
+	ret = 0;
+
+destroy_imm:
+	ipahal_destroy_imm_cmd(cmd_pyld);
+destroy_drop_base:
+	ipahal_destroy_imm_cmd(drop_base_pyld);
+destroy_drop_mask:
+	ipahal_destroy_imm_cmd(drop_mask_pyld);
+destroy_coal_cmd:
+	if (coal_cmd_pyld)
+		ipahal_destroy_imm_cmd(coal_cmd_pyld);
+unmap:
+	dma_unmap_single(ipa3_ctx->pdev, dma_address, pyld->len, DMA_TO_DEVICE);
+destroy_init_pyld:
+	ipahal_destroy_stats_init_pyld(pyld);
+	return ret;
+}
+
+int ipa_get_drop_stats(struct ipa_drop_stats_all *out)
+{
+	int i;
+	int ret;
+	struct ipahal_stats_get_offset_drop get_offset = { { 0 } };
+	struct ipahal_stats_offset offset = { 0 };
+	struct ipahal_imm_cmd_dma_shared_mem cmd = { 0 };
+	struct ipahal_imm_cmd_pyld *cmd_pyld[2];
+	struct ipa_mem_buffer mem;
+	struct ipa3_desc desc[2];
+	struct ipahal_stats_drop_all *stats;
+	int num_cmd = 0;
+
+	if (!ipa3_ctx->hw_stats.enabled)
+		return 0;
+
+	memset(desc, 0, sizeof(desc));
+	memset(cmd_pyld, 0, sizeof(cmd_pyld));
+
+	get_offset.init = ipa3_ctx->hw_stats.drop.init;
+	ret = ipahal_stats_get_offset(IPAHAL_HW_STATS_DROP, &get_offset,
+		&offset);
+	if (ret) {
+		IPAERR("failed to get offset from hal %d\n", ret);
+		return ret;
+	}
+
+	IPADBG_LOW("offset = %d size = %d\n", offset.offset, offset.size);
+
+	if (offset.size == 0)
+		return 0;
+
+	mem.size = offset.size;
+	mem.base = dma_alloc_coherent(ipa3_ctx->pdev,
+		mem.size,
+		&mem.phys_base,
+		GFP_KERNEL);
+	if (!mem.base) {
+		IPAERR("fail to alloc DMA memory\n");
+		return ret;
+	}
+
+	/* IC to close the coal frame before HPS Clear if coal is enabled */
+	if (ipa3_get_ep_mapping(IPA_CLIENT_APPS_WAN_COAL_CONS) !=
+		IPA_EP_NOT_ALLOCATED) {
+		ipa_close_coal_frame(&cmd_pyld[num_cmd]);
+		if (!cmd_pyld[num_cmd]) {
+			IPAERR("failed to construct coal close IC\n");
+			ret = -ENOMEM;
+			goto free_dma_mem;
+		}
+		ipa3_init_imm_cmd_desc(&desc[num_cmd], cmd_pyld[num_cmd]);
+		++num_cmd;
+	}
+
+	cmd.is_read = true;
+	cmd.clear_after_read = true;
+	cmd.skip_pipeline_clear = false;
+	cmd.pipeline_clear_options = IPAHAL_HPS_CLEAR;
+	cmd.size = mem.size;
+	cmd.system_addr = mem.phys_base;
+	cmd.local_addr = ipa3_ctx->smem_restricted_bytes +
+		IPA_MEM_PART(stats_drop_ofst) + offset.offset;
+	cmd_pyld[num_cmd] = ipahal_construct_imm_cmd(
+		IPA_IMM_CMD_DMA_SHARED_MEM, &cmd, false);
+	if (!cmd_pyld[num_cmd]) {
+		IPAERR("failed to construct dma_shared_mem imm cmd\n");
+		ret = -ENOMEM;
+		goto destroy_imm;
+	}
+	ipa3_init_imm_cmd_desc(&desc[num_cmd], cmd_pyld[num_cmd]);
+	++num_cmd;
+
+	ret = ipa3_send_cmd(num_cmd, desc);
+	if (ret) {
+		IPAERR("failed to send immediate command (error %d)\n", ret);
+		goto destroy_imm;
+	}
+
+	stats = kzalloc(sizeof(*stats), GFP_KERNEL);
+	if (!stats) {
+		ret = -ENOMEM;
+		goto destroy_imm;
+	}
+
+	ret = ipahal_parse_stats(IPAHAL_HW_STATS_DROP,
+		&ipa3_ctx->hw_stats.drop.init, mem.base, stats);
+	if (ret) {
+		IPAERR("failed to parse stats (error %d)\n", ret);
+		goto free_stats;
+	}
+
+	/*
+	 * update driver cache.
+	 * the stats were read from hardware with clear_after_read meaning
+	 * hardware stats are 0 now
+	 */
+	for (i = 0; i < IPA_CLIENT_MAX; i++) {
+		int ep_idx = ipa3_get_ep_mapping(i);
+
+		if (ep_idx == -1 || ep_idx >= IPA3_MAX_NUM_PIPES)
+			continue;
+
+		if (ipa3_ctx->ep[ep_idx].client != i)
+			continue;
+
+		ipa3_ctx->hw_stats.drop.stats.client[i].drop_byte_cnt +=
+			stats->stats[ep_idx].drop_byte_cnt;
+		ipa3_ctx->hw_stats.drop.stats.client[i].drop_packet_cnt +=
+			stats->stats[ep_idx].drop_packet_cnt;
+	}
+
+
+	if (!out) {
+		ret = 0;
+		goto free_stats;
+	}
+
+	/* copy results to out parameter */
+	*out = ipa3_ctx->hw_stats.drop.stats;
+
+	ret = 0;
+free_stats:
+	kfree(stats);
+destroy_imm:
+	for (i = 0; i < num_cmd; i++)
+		ipahal_destroy_imm_cmd(cmd_pyld[i]);
+free_dma_mem:
+	dma_free_coherent(ipa3_ctx->pdev, mem.size, mem.base, mem.phys_base);
+	return ret;
+
+}
+
+int ipa_reset_drop_stats(enum ipa_client_type client)
+{
+	int ret;
+	struct ipa_drop_stats *stats;
+
+	if (client >= IPA_CLIENT_MAX) {
+		IPAERR("invalid client %d\n", client);
+		return -EINVAL;
+	}
+
+	/* reading stats will reset them in hardware */
+	ret = ipa_get_drop_stats(NULL);
+	if (ret) {
+		IPAERR("ipa_get_drop_stats failed %d\n", ret);
+		return ret;
+	}
+
+	/* reset driver's cache */
+	stats = &ipa3_ctx->hw_stats.drop.stats.client[client];
+	memset(stats, 0, sizeof(*stats));
+	return 0;
+}
+
+int ipa_reset_all_drop_stats(void)
+{
+	int ret;
+	struct ipa_drop_stats_all *stats;
+
+	if (!ipa3_ctx->hw_stats.enabled)
+		return 0;
+
+	/* reading stats will reset them in hardware */
+	ret = ipa_get_drop_stats(NULL);
+	if (ret) {
+		IPAERR("ipa_get_drop_stats failed %d\n", ret);
+		return ret;
+	}
+
+	/* reset driver's cache */
+	stats = &ipa3_ctx->hw_stats.drop.stats;
+	memset(stats, 0, sizeof(*stats));
+	return 0;
+}
+
+
+#ifndef CONFIG_DEBUG_FS
+int ipa_debugfs_init_stats(struct dentry *parent) { return 0; }
+#else
+#define IPA_MAX_MSG_LEN 4096
+static char dbg_buff[IPA_MAX_MSG_LEN];
+
+static ssize_t ipa_debugfs_reset_quota_stats(struct file *file,
+	const char __user *ubuf, size_t count, loff_t *ppos)
+{
+	s8 client = 0;
+	int ret;
+
+	mutex_lock(&ipa3_ctx->lock);
+
+	ret = kstrtos8_from_user(ubuf, count, 0, &client);
+	if (ret)
+		goto bail;
+
+	if (client == -1)
+		ipa_reset_all_quota_stats();
+	else
+		ipa_reset_quota_stats(client);
+
+	ret = count;
+bail:
+	mutex_unlock(&ipa3_ctx->lock);
+	return ret;
+}
+
+static ssize_t ipa_debugfs_print_quota_stats(struct file *file,
+	char __user *ubuf, size_t count, loff_t *ppos)
+{
+	int nbytes = 0;
+	struct ipa_quota_stats_all *out;
+	int i;
+	int res;
+
+	out = kzalloc(sizeof(*out), GFP_KERNEL);
+	if (!out)
+		return -ENOMEM;
+
+	mutex_lock(&ipa3_ctx->lock);
+	res = ipa_get_quota_stats(out);
+	if (res) {
+		mutex_unlock(&ipa3_ctx->lock);
+		kfree(out);
+		return res;
+	}
+	for (i = 0; i < IPA_CLIENT_MAX; i++) {
+		int ep_idx = ipa3_get_ep_mapping(i);
+
+		if (ep_idx == -1)
+			continue;
+
+		if (IPA_CLIENT_IS_TEST(i))
+			continue;
+
+		if (!(ipa3_ctx->hw_stats.quota.init.enabled_bitmask &
+			(1 << ep_idx)))
+			continue;
+
+		nbytes += scnprintf(dbg_buff + nbytes,
+			IPA_MAX_MSG_LEN - nbytes,
+			"%s:\n",
+			ipa_clients_strings[i]);
+		nbytes += scnprintf(dbg_buff + nbytes,
+			IPA_MAX_MSG_LEN - nbytes,
+			"num_ipv4_bytes=%llu\n",
+			out->client[i].num_ipv4_bytes);
+		nbytes += scnprintf(dbg_buff + nbytes,
+			IPA_MAX_MSG_LEN - nbytes,
+			"num_ipv6_bytes=%llu\n",
+			out->client[i].num_ipv6_bytes);
+		nbytes += scnprintf(dbg_buff + nbytes,
+			IPA_MAX_MSG_LEN - nbytes,
+			"num_ipv4_pkts=%u\n",
+			out->client[i].num_ipv4_pkts);
+		nbytes += scnprintf(dbg_buff + nbytes,
+			IPA_MAX_MSG_LEN - nbytes,
+			"num_ipv6_pkts=%u\n",
+			out->client[i].num_ipv6_pkts);
+		nbytes += scnprintf(dbg_buff + nbytes,
+			IPA_MAX_MSG_LEN - nbytes,
+			"\n");
+
+	}
+	mutex_unlock(&ipa3_ctx->lock);
+	kfree(out);
+
+	return simple_read_from_buffer(ubuf, count, ppos, dbg_buff, nbytes);
+}
+
+static ssize_t ipa_debugfs_reset_tethering_stats(struct file *file,
+	const char __user *ubuf, size_t count, loff_t *ppos)
+{
+	s8 client = 0;
+	int ret;
+
+	mutex_lock(&ipa3_ctx->lock);
+
+	ret = kstrtos8_from_user(ubuf, count, 0, &client);
+	if (ret)
+		goto bail;
+
+	if (client == -1)
+		ipa_reset_all_teth_stats();
+	else
+		ipa_reset_all_cons_teth_stats(client);
+
+	ret = count;
+bail:
+	mutex_unlock(&ipa3_ctx->lock);
+	return ret;
+}
+
+static ssize_t ipa_debugfs_print_tethering_stats(struct file *file,
+	char __user *ubuf, size_t count, loff_t *ppos)
+{
+	int nbytes = 0;
+	struct ipa_quota_stats_all *out;
+	int i, j;
+	int res;
+
+	out = kzalloc(sizeof(*out), GFP_KERNEL);
+	if (!out)
+		return -ENOMEM;
+
+	mutex_lock(&ipa3_ctx->lock);
+	for (i = 0; i < IPA_CLIENT_MAX; i++) {
+		int ep_idx = ipa3_get_ep_mapping(i);
+
+		if (ep_idx == -1)
+			continue;
+
+		if (!IPA_CLIENT_IS_PROD(i))
+			continue;
+
+		if (IPA_CLIENT_IS_TEST(i))
+			continue;
+
+		if (!(ipa3_ctx->hw_stats.teth.init.prod_bitmask &
+			(1 << ep_idx)))
+			continue;
+
+		res = ipa_get_teth_stats();
+		if (res) {
+			mutex_unlock(&ipa3_ctx->lock);
+			kfree(out);
+			return res;
+		}
+
+		for (j = 0; j < IPA_CLIENT_MAX; j++) {
+			int cons_idx = ipa3_get_ep_mapping(j);
+
+			if (cons_idx == -1)
+				continue;
+
+			if (IPA_CLIENT_IS_TEST(j))
+				continue;
+
+			if (!(ipa3_ctx->hw_stats.teth.init.cons_bitmask[ep_idx]
+				& (1 << cons_idx)))
+				continue;
+
+			nbytes += scnprintf(dbg_buff + nbytes,
+				IPA_MAX_MSG_LEN - nbytes,
+				"%s->%s:\n",
+				ipa_clients_strings[i],
+				ipa_clients_strings[j]);
+			nbytes += scnprintf(dbg_buff + nbytes,
+				IPA_MAX_MSG_LEN - nbytes,
+				"num_ipv4_bytes=%llu\n",
+				out->client[j].num_ipv4_bytes);
+			nbytes += scnprintf(dbg_buff + nbytes,
+				IPA_MAX_MSG_LEN - nbytes,
+				"num_ipv6_bytes=%llu\n",
+				out->client[j].num_ipv6_bytes);
+			nbytes += scnprintf(dbg_buff + nbytes,
+				IPA_MAX_MSG_LEN - nbytes,
+				"num_ipv4_pkts=%u\n",
+				out->client[j].num_ipv4_pkts);
+			nbytes += scnprintf(dbg_buff + nbytes,
+				IPA_MAX_MSG_LEN - nbytes,
+				"num_ipv6_pkts=%u\n",
+				out->client[j].num_ipv6_pkts);
+			nbytes += scnprintf(dbg_buff + nbytes,
+				IPA_MAX_MSG_LEN - nbytes,
+				"\n");
+		}
+	}
+	mutex_unlock(&ipa3_ctx->lock);
+	kfree(out);
+
+	return simple_read_from_buffer(ubuf, count, ppos, dbg_buff, nbytes);
+}
+
+static ssize_t ipa_debugfs_control_flt_rt_stats(struct file *file,
+	const char __user *ubuf, size_t count, loff_t *ppos)
+{
+	struct ipa_ioc_flt_rt_query *query;
+	unsigned long missing;
+	int pyld_size = 0;
+	int ret;
+
+	query = kzalloc(sizeof(struct ipa_ioc_flt_rt_query),
+		GFP_KERNEL);
+	if (!query)
+		return -ENOMEM;
+	query->stats_size = sizeof(struct ipa_flt_rt_stats);
+	pyld_size = IPA_MAX_FLT_RT_CNT_INDEX *
+		sizeof(struct ipa_flt_rt_stats);
+	query->stats = (uint64_t)kzalloc(pyld_size, GFP_KERNEL);
+	if (!query->stats) {
+		kfree(query);
+		return -ENOMEM;
+	}
+
+	mutex_lock(&ipa3_ctx->lock);
+	if (count >= sizeof(dbg_buff)) {
+		ret = -EFAULT;
+		goto bail;
+	}
+
+	missing = copy_from_user(dbg_buff, ubuf, count);
+	if (missing) {
+		ret = -EFAULT;
+		goto bail;
+	}
+
+	dbg_buff[count] = '\0';
+	if (strcmp(dbg_buff, "reset\n") == 0) {
+		query->reset = 1;
+		query->start_id = 1;
+		query->end_id = IPA_MAX_FLT_RT_CNT_INDEX;
+		ipa_get_flt_rt_stats(query);
+	} else {
+		IPAERR("unsupport flt_rt command\n");
+	}
+
+	ret = count;
+bail:
+	kfree((void *)(uintptr_t)(query->stats));
+	kfree(query);
+	mutex_unlock(&ipa3_ctx->lock);
+	return ret;
+}
+
+static ssize_t ipa_debugfs_print_flt_rt_stats(struct file *file,
+	char __user *ubuf, size_t count, loff_t *ppos)
+{
+	int nbytes = 0;
+	int i;
+	int res;
+	int pyld_size = 0;
+	struct ipa_ioc_flt_rt_query *query;
+
+	query = kzalloc(sizeof(struct ipa_ioc_flt_rt_query),
+		GFP_KERNEL);
+	if (!query)
+		return -ENOMEM;
+	query->start_id = 1;
+	query->end_id = IPA_MAX_FLT_RT_CNT_INDEX;
+	query->reset = true;
+	query->stats_size = sizeof(struct ipa_flt_rt_stats);
+	pyld_size = IPA_MAX_FLT_RT_CNT_INDEX *
+		sizeof(struct ipa_flt_rt_stats);
+	query->stats = (uint64_t)kzalloc(pyld_size, GFP_KERNEL);
+	if (!query->stats) {
+		kfree(query);
+		return -ENOMEM;
+	}
+	mutex_lock(&ipa3_ctx->lock);
+	res = ipa_get_flt_rt_stats(query);
+	if (res) {
+		mutex_unlock(&ipa3_ctx->lock);
+		kfree((void *)(uintptr_t)(query->stats));
+		kfree(query);
+		return res;
+	}
+	for (i = 0; i < IPA_MAX_FLT_RT_CNT_INDEX; i++) {
+		nbytes += scnprintf(dbg_buff + nbytes,
+			IPA_MAX_MSG_LEN - nbytes,
+			"cnt_id: %d\n", i + 1);
+		nbytes += scnprintf(dbg_buff + nbytes,
+			IPA_MAX_MSG_LEN - nbytes,
+			"num_pkts: %d\n",
+			((struct ipa_flt_rt_stats *)
+			query->stats)[i].num_pkts);
+		nbytes += scnprintf(dbg_buff + nbytes,
+			IPA_MAX_MSG_LEN - nbytes,
+			"num_pkts_hash: %d\n",
+			((struct ipa_flt_rt_stats *)
+			query->stats)[i].num_pkts_hash);
+		nbytes += scnprintf(dbg_buff + nbytes,
+			IPA_MAX_MSG_LEN - nbytes,
+			"num_bytes: %lld\n",
+			((struct ipa_flt_rt_stats *)
+			query->stats)[i].num_bytes);
+		nbytes += scnprintf(dbg_buff + nbytes,
+			IPA_MAX_MSG_LEN - nbytes,
+			"\n");
+	}
+	mutex_unlock(&ipa3_ctx->lock);
+	kfree((void *)(uintptr_t)(query->stats));
+	kfree(query);
+	return simple_read_from_buffer(ubuf, count, ppos, dbg_buff, nbytes);
+}
+
+static ssize_t ipa_debugfs_reset_drop_stats(struct file *file,
+	const char __user *ubuf, size_t count, loff_t *ppos)
+{
+	s8 client = 0;
+	int ret;
+
+	mutex_lock(&ipa3_ctx->lock);
+
+	ret = kstrtos8_from_user(ubuf, count, 0, &client);
+	if (ret)
+		goto bail;
+
+	if (client == -1)
+		ipa_reset_all_drop_stats();
+	else
+		ipa_reset_drop_stats(client);
+
+	ret = count;
+bail:
+	mutex_unlock(&ipa3_ctx->lock);
+	return count;
+}
+
+static ssize_t ipa_debugfs_print_drop_stats(struct file *file,
+	char __user *ubuf, size_t count, loff_t *ppos)
+{
+	int nbytes = 0;
+	struct ipa_drop_stats_all *out;
+	int i;
+	int res;
+
+	out = kzalloc(sizeof(*out), GFP_KERNEL);
+	if (!out)
+		return -ENOMEM;
+
+	mutex_lock(&ipa3_ctx->lock);
+	res = ipa_get_drop_stats(out);
+	if (res) {
+		mutex_unlock(&ipa3_ctx->lock);
+		kfree(out);
+		return res;
+	}
+
+	for (i = 0; i < IPA_CLIENT_MAX; i++) {
+		int ep_idx = ipa3_get_ep_mapping(i);
+
+		if (ep_idx == -1)
+			continue;
+
+		if (!IPA_CLIENT_IS_CONS(i))
+			continue;
+
+		if (IPA_CLIENT_IS_TEST(i))
+			continue;
+
+		if (!(ipa3_ctx->hw_stats.drop.init.enabled_bitmask &
+			(1 << ep_idx)))
+			continue;
+
+
+		nbytes += scnprintf(dbg_buff + nbytes,
+			IPA_MAX_MSG_LEN - nbytes,
+			"%s:\n",
+			ipa_clients_strings[i]);
+
+		nbytes += scnprintf(dbg_buff + nbytes,
+			IPA_MAX_MSG_LEN - nbytes,
+			"drop_byte_cnt=%u\n",
+			out->client[i].drop_byte_cnt);
+
+		nbytes += scnprintf(dbg_buff + nbytes,
+			IPA_MAX_MSG_LEN - nbytes,
+			"drop_packet_cnt=%u\n",
+			out->client[i].drop_packet_cnt);
+		nbytes += scnprintf(dbg_buff + nbytes,
+			IPA_MAX_MSG_LEN - nbytes,
+			"\n");
+	}
+	mutex_unlock(&ipa3_ctx->lock);
+	kfree(out);
+
+	return simple_read_from_buffer(ubuf, count, ppos, dbg_buff, nbytes);
+}
+
+static const struct file_operations ipa3_quota_ops = {
+	.read = ipa_debugfs_print_quota_stats,
+	.write = ipa_debugfs_reset_quota_stats,
+};
+
+static const struct file_operations ipa3_tethering_ops = {
+	.read = ipa_debugfs_print_tethering_stats,
+	.write = ipa_debugfs_reset_tethering_stats,
+};
+
+static const struct file_operations ipa3_flt_rt_ops = {
+	.read = ipa_debugfs_print_flt_rt_stats,
+	.write = ipa_debugfs_control_flt_rt_stats,
+};
+
+static const struct file_operations ipa3_drop_ops = {
+	.read = ipa_debugfs_print_drop_stats,
+	.write = ipa_debugfs_reset_drop_stats,
+};
+
+
+int ipa_debugfs_init_stats(struct dentry *parent)
+{
+	const mode_t read_write_mode = 0664;
+	struct dentry *file;
+	struct dentry *dent;
+
+	if (!ipa3_ctx->hw_stats.enabled)
+		return 0;
+
+	dent = debugfs_create_dir("hw_stats", parent);
+	if (IS_ERR_OR_NULL(dent)) {
+		IPAERR("fail to create folder in debug_fs\n");
+		return -EFAULT;
+	}
+
+	file = debugfs_create_file("quota", read_write_mode, dent, NULL,
+		&ipa3_quota_ops);
+	if (IS_ERR_OR_NULL(file)) {
+		IPAERR("fail to create file %s\n", "quota");
+		goto fail;
+	}
+
+	file = debugfs_create_file("drop", read_write_mode, dent, NULL,
+		&ipa3_drop_ops);
+	if (IS_ERR_OR_NULL(file)) {
+		IPAERR("fail to create file %s\n", "drop");
+		goto fail;
+	}
+
+	file = debugfs_create_file("tethering", read_write_mode, dent, NULL,
+		&ipa3_tethering_ops);
+	if (IS_ERR_OR_NULL(file)) {
+		IPAERR("fail to create file %s\n", "tethering");
+		goto fail;
+	}
+
+	file = debugfs_create_file("flt_rt", read_write_mode, dent, NULL,
+		&ipa3_flt_rt_ops);
+	if (IS_ERR_OR_NULL(file)) {
+		IPAERR("fail to create file flt_rt\n");
+		goto fail;
+	}
+
+	return 0;
+fail:
+	debugfs_remove_recursive(dent);
+	return -EFAULT;
+}
+#endif

+ 3269 - 0
ipa/ipa_v3/ipa_i.h

@@ -0,0 +1,3269 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2012-2020, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _IPA3_I_H_
+#define _IPA3_I_H_
+
+#include <linux/bitops.h>
+#include <linux/cdev.h>
+#include <linux/export.h>
+#include <linux/idr.h>
+#include <linux/list.h>
+#include <linux/mutex.h>
+#include <linux/skbuff.h>
+#include <linux/slab.h>
+#include <linux/notifier.h>
+#include <linux/interrupt.h>
+#include <linux/netdevice.h>
+#include <linux/ipa.h>
+#include <linux/ipa_usb.h>
+#include <linux/iommu.h>
+#include <linux/platform_device.h>
+#include <linux/firmware.h>
+#include "ipa_qmi_service.h"
+#include "../ipa_api.h"
+#include "ipahal/ipahal_reg.h"
+#include "ipahal/ipahal.h"
+#include "ipahal/ipahal_fltrt.h"
+#include "ipahal/ipahal_hw_stats.h"
+#include "../ipa_common_i.h"
+#include "ipa_uc_offload_i.h"
+#include "ipa_pm.h"
+#include "ipa_defs.h"
+#include <linux/mailbox_client.h>
+#include <linux/mailbox/qmp.h>
+#include <linux/rmnet_ipa_fd_ioctl.h>
+
+#define IPA_DEV_NAME_MAX_LEN 15
+#define DRV_NAME "ipa"
+
+#define IPA_COOKIE 0x57831603
+#define IPA_RT_RULE_COOKIE 0x57831604
+#define IPA_RT_TBL_COOKIE 0x57831605
+#define IPA_FLT_COOKIE 0x57831606
+#define IPA_HDR_COOKIE 0x57831607
+#define IPA_PROC_HDR_COOKIE 0x57831608
+
+#define MTU_BYTE 1500
+
+#define IPA_EP_NOT_ALLOCATED (-1)
+#define IPA3_MAX_NUM_PIPES 31
+#define IPA_SYS_DESC_FIFO_SZ 0x800
+#define IPA_SYS_TX_DATA_DESC_FIFO_SZ 0x1000
+#define IPA_COMMON_EVENT_RING_SIZE 0x7C00
+#define IPA_LAN_RX_HEADER_LENGTH (2)
+#define IPA_QMAP_HEADER_LENGTH (4)
+#define IPA_DL_CHECKSUM_LENGTH (8)
+#define IPA_NUM_DESC_PER_SW_TX (3)
+#define IPA_GENERIC_RX_POOL_SZ 192
+#define IPA_UC_FINISH_MAX 6
+#define IPA_UC_WAIT_MIN_SLEEP 1000
+#define IPA_UC_WAII_MAX_SLEEP 1200
+/*
+ * The transport descriptor size was changed to GSI_CHAN_RE_SIZE_16B, but
+ * IPA users still use sps_iovec size as FIFO element size.
+ */
+#define IPA_FIFO_ELEMENT_SIZE 8
+
+#define IPA_MAX_STATUS_STAT_NUM 30
+
+#define IPA_IPC_LOG_PAGES 50
+
+#define IPA_MAX_NUM_REQ_CACHE 10
+
+#define NAPI_WEIGHT 60
+
+#define IPADBG(fmt, args...) \
+	do { \
+		pr_debug(DRV_NAME " %s:%d " fmt, __func__, __LINE__, ## args);\
+		if (ipa3_ctx) { \
+			IPA_IPC_LOGGING(ipa3_ctx->logbuf, \
+				DRV_NAME " %s:%d " fmt, ## args); \
+			IPA_IPC_LOGGING(ipa3_ctx->logbuf_low, \
+				DRV_NAME " %s:%d " fmt, ## args); \
+		} \
+	} while (0)
+
+#define IPADBG_LOW(fmt, args...) \
+	do { \
+		pr_debug(DRV_NAME " %s:%d " fmt, __func__, __LINE__, ## args);\
+		if (ipa3_ctx) \
+			IPA_IPC_LOGGING(ipa3_ctx->logbuf_low, \
+				DRV_NAME " %s:%d " fmt, ## args); \
+	} while (0)
+
+#define IPAERR(fmt, args...) \
+	do { \
+		pr_err(DRV_NAME " %s:%d " fmt, __func__, __LINE__, ## args);\
+		if (ipa3_ctx) { \
+			IPA_IPC_LOGGING(ipa3_ctx->logbuf, \
+				DRV_NAME " %s:%d " fmt, ## args); \
+			IPA_IPC_LOGGING(ipa3_ctx->logbuf_low, \
+				DRV_NAME " %s:%d " fmt, ## args); \
+		} \
+	} while (0)
+
+#define IPAERR_RL(fmt, args...) \
+	do { \
+		pr_err_ratelimited_ipa(DRV_NAME " %s:%d " fmt, __func__,\
+		__LINE__, ## args);\
+		if (ipa3_ctx) { \
+			IPA_IPC_LOGGING(ipa3_ctx->logbuf, \
+				DRV_NAME " %s:%d " fmt, ## args); \
+			IPA_IPC_LOGGING(ipa3_ctx->logbuf_low, \
+				DRV_NAME " %s:%d " fmt, ## args); \
+		} \
+	} while (0)
+
+#define IPALOG_VnP_ADDRS(ptr) \
+	do { \
+		phys_addr_t b = (phys_addr_t) virt_to_phys(ptr); \
+		IPAERR("%s: VIRT: %pK PHYS: %pa\n", \
+			   #ptr, ptr, &b); \
+	} while (0)
+
+/* round addresses for closes page per SMMU requirements */
+#define IPA_SMMU_ROUND_TO_PAGE(iova, pa, size, iova_p, pa_p, size_p) \
+	do { \
+		(iova_p) = rounddown((iova), PAGE_SIZE); \
+		(pa_p) = rounddown((pa), PAGE_SIZE); \
+		(size_p) = roundup((size) + (pa) - (pa_p), PAGE_SIZE); \
+	} while (0)
+
+#define WLAN_AMPDU_TX_EP 15
+#define WLAN_PROD_TX_EP  19
+#define WLAN1_CONS_RX_EP  14
+#define WLAN2_CONS_RX_EP  16
+#define WLAN3_CONS_RX_EP  17
+#define WLAN4_CONS_RX_EP  18
+
+#define IPA_RAM_NAT_OFST \
+	IPA_MEM_PART(nat_tbl_ofst)
+#define IPA_RAM_NAT_SIZE \
+	IPA_MEM_PART(nat_tbl_size)
+#define IPA_RAM_IPV6CT_OFST 0
+#define IPA_RAM_IPV6CT_SIZE 0
+#define IPA_MEM_CANARY_VAL 0xdeadbeef
+
+#define IS_IPV6CT_MEM_DEV(d) \
+	(((void *) (d) == (void *) &ipa3_ctx->ipv6ct_mem))
+
+#define IS_NAT_MEM_DEV(d) \
+	(((void *) (d) == (void *) &ipa3_ctx->nat_mem))
+
+#define IPA_STATS
+
+#ifdef IPA_STATS
+#define IPA_STATS_INC_CNT(val) (++val)
+#define IPA_STATS_DEC_CNT(val) (--val)
+#define IPA_STATS_EXCP_CNT(__excp, __base) do {				\
+	if (__excp < 0 || __excp >= IPAHAL_PKT_STATUS_EXCEPTION_MAX)	\
+		break;							\
+	++__base[__excp];						\
+	} while (0)
+#else
+#define IPA_STATS_INC_CNT(x) do { } while (0)
+#define IPA_STATS_DEC_CNT(x)
+#define IPA_STATS_EXCP_CNT(__excp, __base) do { } while (0)
+#endif
+
+#define IPA_HDR_BIN0 0
+#define IPA_HDR_BIN1 1
+#define IPA_HDR_BIN2 2
+#define IPA_HDR_BIN3 3
+#define IPA_HDR_BIN4 4
+#define IPA_HDR_BIN_MAX 5
+
+#define IPA_HDR_PROC_CTX_BIN0 0
+#define IPA_HDR_PROC_CTX_BIN1 1
+#define IPA_HDR_PROC_CTX_BIN_MAX 2
+
+#define IPA_RX_POOL_CEIL 32
+#define IPA_RX_SKB_SIZE 1792
+
+#define IPA_A5_MUX_HDR_NAME "ipa_excp_hdr"
+#define IPA_LAN_RX_HDR_NAME "ipa_lan_hdr"
+#define IPA_INVALID_L4_PROTOCOL 0xFF
+
+#define IPA_HDR_PROC_CTX_TABLE_ALIGNMENT_BYTE 8
+#define IPA_HDR_PROC_CTX_TABLE_ALIGNMENT(start_ofst) \
+	(((start_ofst) + IPA_HDR_PROC_CTX_TABLE_ALIGNMENT_BYTE - 1) & \
+	~(IPA_HDR_PROC_CTX_TABLE_ALIGNMENT_BYTE - 1))
+
+#define MAX_RESOURCE_TO_CLIENTS (IPA_CLIENT_MAX)
+#define IPA_MEM_PART(x_) (ipa3_ctx->ctrl->mem_partition->x_)
+
+#define IPA_GSI_CHANNEL_STOP_MAX_RETRY 10
+#define IPA_GSI_CHANNEL_STOP_PKT_SIZE 1
+
+#define IPA_GSI_CHANNEL_EMPTY_MAX_RETRY 15
+#define IPA_GSI_CHANNEL_EMPTY_SLEEP_MIN_USEC (1000)
+#define IPA_GSI_CHANNEL_EMPTY_SLEEP_MAX_USEC (2000)
+
+#define IPA_SLEEP_CLK_RATE_KHZ (32)
+
+#define IPA3_ACTIVE_CLIENTS_LOG_BUFFER_SIZE_LINES 120
+#define IPA3_ACTIVE_CLIENTS_LOG_LINE_LEN 96
+#define IPA3_ACTIVE_CLIENTS_LOG_HASHTABLE_SIZE 50
+#define IPA3_ACTIVE_CLIENTS_LOG_NAME_LEN 40
+#define SMEM_IPA_FILTER_TABLE 497
+
+enum {
+	SMEM_APPS,
+	SMEM_MODEM,
+	SMEM_Q6,
+	SMEM_DSPS,
+	SMEM_WCNSS,
+	SMEM_CDSP,
+	SMEM_RPM,
+	SMEM_TZ,
+	SMEM_SPSS,
+	SMEM_HYP,
+	NUM_SMEM_SUBSYSTEMS,
+};
+
+#define IPA_WDI_RX_RING_RES			0
+#define IPA_WDI_RX_RING_RP_RES		1
+#define IPA_WDI_RX_COMP_RING_RES	2
+#define IPA_WDI_RX_COMP_RING_WP_RES	3
+#define IPA_WDI_TX_RING_RES			4
+#define IPA_WDI_CE_RING_RES			5
+#define IPA_WDI_CE_DB_RES			6
+#define IPA_WDI_TX_DB_RES			7
+#define IPA_WDI_MAX_RES				8
+
+/* use QMAP header reserved bit to identify tethered traffic */
+#define IPA_QMAP_TETH_BIT (1 << 30)
+
+#ifdef CONFIG_ARM64
+/* Outer caches unsupported on ARM64 platforms */
+# define outer_flush_range(x, y)
+# define __cpuc_flush_dcache_area __flush_dcache_area
+#endif
+
+#define IPA_APP_VOTE_MAX 500
+
+#define IPA_SMP2P_OUT_CLK_RSP_CMPLT_IDX 0
+#define IPA_SMP2P_OUT_CLK_VOTE_IDX 1
+#define IPA_SMP2P_SMEM_STATE_MASK 3
+
+
+#define IPA_SUMMING_THRESHOLD (0x10)
+#define IPA_PIPE_MEM_START_OFST (0x0)
+#define IPA_PIPE_MEM_SIZE (0x0)
+#define IPA_MOBILE_AP_MODE(x) (x == IPA_MODE_MOBILE_AP_ETH || \
+				   x == IPA_MODE_MOBILE_AP_WAN || \
+				   x == IPA_MODE_MOBILE_AP_WLAN)
+#define IPA_CNOC_CLK_RATE (75 * 1000 * 1000UL)
+#define IPA_A5_MUX_HEADER_LENGTH (8)
+
+#define IPA_AGGR_MAX_STR_LENGTH (10)
+
+#define CLEANUP_TAG_PROCESS_TIMEOUT 1000
+
+#define IPA_AGGR_STR_IN_BYTES(str) \
+	(strnlen((str), IPA_AGGR_MAX_STR_LENGTH - 1) + 1)
+
+#define IPA_ADJUST_AGGR_BYTE_HARD_LIMIT(X) (X/1000)
+
+#define IPA_TRANSPORT_PROD_TIMEOUT_MSEC 100
+
+#define IPA3_ACTIVE_CLIENTS_TABLE_BUF_SIZE 4096
+
+#define IPA3_ACTIVE_CLIENT_LOG_TYPE_EP 0
+#define IPA3_ACTIVE_CLIENT_LOG_TYPE_SIMPLE 1
+#define IPA3_ACTIVE_CLIENT_LOG_TYPE_RESOURCE 2
+#define IPA3_ACTIVE_CLIENT_LOG_TYPE_SPECIAL 3
+
+#define IPA_MHI_GSI_EVENT_RING_ID_START 10
+#define IPA_MHI_GSI_EVENT_RING_ID_END 12
+
+#define IPA_SMEM_SIZE (8 * 1024)
+
+#define IPA_GSI_CHANNEL_HALT_MIN_SLEEP 5000
+#define IPA_GSI_CHANNEL_HALT_MAX_SLEEP 10000
+#define IPA_GSI_CHANNEL_HALT_MAX_TRY 10
+
+/* round addresses for closes page per SMMU requirements */
+#define IPA_SMMU_ROUND_TO_PAGE(iova, pa, size, iova_p, pa_p, size_p) \
+	do { \
+		(iova_p) = rounddown((iova), PAGE_SIZE); \
+		(pa_p) = rounddown((pa), PAGE_SIZE); \
+		(size_p) = roundup((size) + (pa) - (pa_p), PAGE_SIZE); \
+	} while (0)
+
+
+/* The relative location in /lib/firmware where the FWs will reside */
+#define IPA_FWS_PATH "ipa/ipa_fws.elf"
+/*
+ * The following paths below are used when building the system for the
+ * emulation environment.
+ *
+ * As new hardware platforms are added into the emulation environment,
+ * please add the appropriate paths here for their firmwares.
+ */
+#define IPA_FWS_PATH_4_0     "ipa/4.0/ipa_fws.elf"
+#define IPA_FWS_PATH_3_5_1   "ipa/3.5.1/ipa_fws.elf"
+#define IPA_FWS_PATH_4_5     "ipa/4.5/ipa_fws.elf"
+
+/*
+ * The following will be used for determining/using access control
+ * policy.
+ */
+#define USE_SCM            0 /* use scm call to determine policy */
+#define OVERRIDE_SCM_TRUE  1 /* override scm call with true */
+#define OVERRIDE_SCM_FALSE 2 /* override scm call with false */
+
+#define SD_ENABLED  0 /* secure debug enabled. */
+#define SD_DISABLED 1 /* secure debug disabled. */
+
+#define IPA_MEM_INIT_VAL 0xFFFFFFFF
+
+#ifdef CONFIG_COMPAT
+#define IPA_IOC_ADD_HDR32 _IOWR(IPA_IOC_MAGIC, \
+					IPA_IOCTL_ADD_HDR, \
+					compat_uptr_t)
+#define IPA_IOC_DEL_HDR32 _IOWR(IPA_IOC_MAGIC, \
+					IPA_IOCTL_DEL_HDR, \
+					compat_uptr_t)
+#define IPA_IOC_ADD_RT_RULE32 _IOWR(IPA_IOC_MAGIC, \
+					IPA_IOCTL_ADD_RT_RULE, \
+					compat_uptr_t)
+#define IPA_IOC_DEL_RT_RULE32 _IOWR(IPA_IOC_MAGIC, \
+					IPA_IOCTL_DEL_RT_RULE, \
+					compat_uptr_t)
+#define IPA_IOC_ADD_FLT_RULE32 _IOWR(IPA_IOC_MAGIC, \
+					IPA_IOCTL_ADD_FLT_RULE, \
+					compat_uptr_t)
+#define IPA_IOC_DEL_FLT_RULE32 _IOWR(IPA_IOC_MAGIC, \
+					IPA_IOCTL_DEL_FLT_RULE, \
+					compat_uptr_t)
+#define IPA_IOC_GET_RT_TBL32 _IOWR(IPA_IOC_MAGIC, \
+				IPA_IOCTL_GET_RT_TBL, \
+				compat_uptr_t)
+#define IPA_IOC_COPY_HDR32 _IOWR(IPA_IOC_MAGIC, \
+				IPA_IOCTL_COPY_HDR, \
+				compat_uptr_t)
+#define IPA_IOC_QUERY_INTF32 _IOWR(IPA_IOC_MAGIC, \
+				IPA_IOCTL_QUERY_INTF, \
+				compat_uptr_t)
+#define IPA_IOC_QUERY_INTF_TX_PROPS32 _IOWR(IPA_IOC_MAGIC, \
+				IPA_IOCTL_QUERY_INTF_TX_PROPS, \
+				compat_uptr_t)
+#define IPA_IOC_QUERY_INTF_RX_PROPS32 _IOWR(IPA_IOC_MAGIC, \
+					IPA_IOCTL_QUERY_INTF_RX_PROPS, \
+					compat_uptr_t)
+#define IPA_IOC_QUERY_INTF_EXT_PROPS32 _IOWR(IPA_IOC_MAGIC, \
+					IPA_IOCTL_QUERY_INTF_EXT_PROPS, \
+					compat_uptr_t)
+#define IPA_IOC_GET_HDR32 _IOWR(IPA_IOC_MAGIC, \
+				IPA_IOCTL_GET_HDR, \
+				compat_uptr_t)
+#define IPA_IOC_ALLOC_NAT_MEM32 _IOWR(IPA_IOC_MAGIC, \
+				IPA_IOCTL_ALLOC_NAT_MEM, \
+				compat_uptr_t)
+#define IPA_IOC_ALLOC_NAT_TABLE32 _IOWR(IPA_IOC_MAGIC, \
+				IPA_IOCTL_ALLOC_NAT_TABLE, \
+				compat_uptr_t)
+#define IPA_IOC_ALLOC_IPV6CT_TABLE32 _IOWR(IPA_IOC_MAGIC, \
+				IPA_IOCTL_ALLOC_IPV6CT_TABLE, \
+				compat_uptr_t)
+#define IPA_IOC_V4_INIT_NAT32 _IOWR(IPA_IOC_MAGIC, \
+				IPA_IOCTL_V4_INIT_NAT, \
+				compat_uptr_t)
+#define IPA_IOC_INIT_IPV6CT_TABLE32 _IOWR(IPA_IOC_MAGIC, \
+				IPA_IOCTL_INIT_IPV6CT_TABLE, \
+				compat_uptr_t)
+#define IPA_IOC_TABLE_DMA_CMD32 _IOWR(IPA_IOC_MAGIC, \
+				IPA_IOCTL_TABLE_DMA_CMD, \
+				compat_uptr_t)
+#define IPA_IOC_V4_DEL_NAT32 _IOWR(IPA_IOC_MAGIC, \
+				IPA_IOCTL_V4_DEL_NAT, \
+				compat_uptr_t)
+#define IPA_IOC_DEL_NAT_TABLE32 _IOWR(IPA_IOC_MAGIC, \
+				IPA_IOCTL_DEL_NAT_TABLE, \
+				compat_uptr_t)
+#define IPA_IOC_DEL_IPV6CT_TABLE32 _IOWR(IPA_IOC_MAGIC, \
+				IPA_IOCTL_DEL_IPV6CT_TABLE, \
+				compat_uptr_t)
+#define IPA_IOC_NAT_MODIFY_PDN32 _IOWR(IPA_IOC_MAGIC, \
+				IPA_IOCTL_NAT_MODIFY_PDN, \
+				compat_uptr_t)
+#define IPA_IOC_GET_NAT_OFFSET32 _IOWR(IPA_IOC_MAGIC, \
+				IPA_IOCTL_GET_NAT_OFFSET, \
+				compat_uptr_t)
+#define IPA_IOC_PULL_MSG32 _IOWR(IPA_IOC_MAGIC, \
+				IPA_IOCTL_PULL_MSG, \
+				compat_uptr_t)
+#define IPA_IOC_RM_ADD_DEPENDENCY32 _IOWR(IPA_IOC_MAGIC, \
+				IPA_IOCTL_RM_ADD_DEPENDENCY, \
+				compat_uptr_t)
+#define IPA_IOC_RM_DEL_DEPENDENCY32 _IOWR(IPA_IOC_MAGIC, \
+				IPA_IOCTL_RM_DEL_DEPENDENCY, \
+				compat_uptr_t)
+#define IPA_IOC_GENERATE_FLT_EQ32 _IOWR(IPA_IOC_MAGIC, \
+				IPA_IOCTL_GENERATE_FLT_EQ, \
+				compat_uptr_t)
+#define IPA_IOC_QUERY_RT_TBL_INDEX32 _IOWR(IPA_IOC_MAGIC, \
+				IPA_IOCTL_QUERY_RT_TBL_INDEX, \
+				compat_uptr_t)
+#define IPA_IOC_WRITE_QMAPID32  _IOWR(IPA_IOC_MAGIC, \
+				IPA_IOCTL_WRITE_QMAPID, \
+				compat_uptr_t)
+#define IPA_IOC_MDFY_FLT_RULE32 _IOWR(IPA_IOC_MAGIC, \
+				IPA_IOCTL_MDFY_FLT_RULE, \
+				compat_uptr_t)
+#define IPA_IOC_NOTIFY_WAN_UPSTREAM_ROUTE_ADD32 _IOWR(IPA_IOC_MAGIC, \
+				IPA_IOCTL_NOTIFY_WAN_UPSTREAM_ROUTE_ADD, \
+				compat_uptr_t)
+#define IPA_IOC_NOTIFY_WAN_UPSTREAM_ROUTE_DEL32 _IOWR(IPA_IOC_MAGIC, \
+				IPA_IOCTL_NOTIFY_WAN_UPSTREAM_ROUTE_DEL, \
+				compat_uptr_t)
+#define IPA_IOC_NOTIFY_WAN_EMBMS_CONNECTED32 _IOWR(IPA_IOC_MAGIC, \
+					IPA_IOCTL_NOTIFY_WAN_EMBMS_CONNECTED, \
+					compat_uptr_t)
+#define IPA_IOC_ADD_HDR_PROC_CTX32 _IOWR(IPA_IOC_MAGIC, \
+				IPA_IOCTL_ADD_HDR_PROC_CTX, \
+				compat_uptr_t)
+#define IPA_IOC_DEL_HDR_PROC_CTX32 _IOWR(IPA_IOC_MAGIC, \
+				IPA_IOCTL_DEL_HDR_PROC_CTX, \
+				compat_uptr_t)
+#define IPA_IOC_MDFY_RT_RULE32 _IOWR(IPA_IOC_MAGIC, \
+				IPA_IOCTL_MDFY_RT_RULE, \
+				compat_uptr_t)
+#define IPA_IOC_GET_NAT_IN_SRAM_INFO32 _IOWR(IPA_IOC_MAGIC, \
+				IPA_IOCTL_GET_NAT_IN_SRAM_INFO, \
+				compat_uptr_t)
+#define IPA_IOC_APP_CLOCK_VOTE32 _IOWR(IPA_IOC_MAGIC, \
+				IPA_IOCTL_APP_CLOCK_VOTE, \
+				compat_uptr_t)
+#endif /* #ifdef CONFIG_COMPAT */
+
+#define IPA_TZ_UNLOCK_ATTRIBUTE 0x0C0311
+
+#define MBOX_TOUT_MS 100
+
+/* miscellaneous for rmnet_ipa and qmi_service */
+enum ipa_type_mode {
+	IPA_HW_TYPE,
+	PLATFORM_TYPE,
+	IPA3_HW_MODE,
+};
+
+enum ipa_flag {
+	IPA_ENDP_DELAY_WA_EN,
+	IPA_HW_STATS_EN,
+	IPA_MHI_EN,
+	IPA_FLTRT_NOT_HASHABLE_EN,
+};
+
+enum ipa_icc_level {
+	IPA_ICC_NONE,
+	IPA_ICC_SVS2,
+	IPA_ICC_SVS,
+	IPA_ICC_NOMINAL,
+	IPA_ICC_TURBO,
+	IPA_ICC_LVL_MAX,
+};
+
+enum ipa_icc_path {
+	IPA_ICC_IPA_TO_LLCC,
+	IPA_ICC_LLCC_TO_EBIL,
+	IPA_ICC_IPA_TO_IMEM,
+	IPA_ICC_APSS_TO_IPA,
+	IPA_ICC_PATH_MAX,
+};
+
+enum ipa_icc_type {
+	IPA_ICC_AB,
+	IPA_ICC_IB,
+	IPA_ICC_TYPE_MAX,
+};
+
+#define IPA_ICC_MAX (IPA_ICC_PATH_MAX*IPA_ICC_TYPE_MAX)
+
+struct ipa3_active_client_htable_entry {
+	struct hlist_node list;
+	char id_string[IPA3_ACTIVE_CLIENTS_LOG_NAME_LEN];
+	int count;
+	enum ipa_active_client_log_type type;
+};
+
+struct ipa3_active_clients_log_ctx {
+	spinlock_t lock;
+	char *log_buffer[IPA3_ACTIVE_CLIENTS_LOG_BUFFER_SIZE_LINES];
+	int log_head;
+	int log_tail;
+	bool log_rdy;
+	struct hlist_head htable[IPA3_ACTIVE_CLIENTS_LOG_HASHTABLE_SIZE];
+};
+
+struct ipa3_client_names {
+	enum ipa_client_type names[MAX_RESOURCE_TO_CLIENTS];
+	int length;
+};
+
+struct ipa_smmu_cb_ctx {
+	bool valid;
+	struct device *dev;
+	struct iommu_domain *iommu_domain;
+	unsigned long next_addr;
+	u32 va_start;
+	u32 va_size;
+	u32 va_end;
+	bool shared;
+	bool is_cache_coherent;
+};
+
+/**
+ * struct ipa_flt_rule_add_i - filtering rule descriptor
+ * includes in and out parameters
+ * @rule: actual rule to be added
+ * @at_rear: add at back of filtering table?
+ * @flt_rule_hdl: out parameter, handle to rule, valid when status is 0
+ * @status:	output parameter, status of filtering rule add   operation,
+ *		0 for success,
+ *		-1 for failure
+ *
+ */
+struct ipa_flt_rule_add_i {
+	u8 at_rear;
+	u32 flt_rule_hdl;
+	int status;
+	struct ipa_flt_rule_i rule;
+};
+
+/**
+ * struct ipa_flt_rule_mdfy_i - filtering rule descriptor
+ * includes in and out parameters
+ * @rule: actual rule to be added
+ * @flt_rule_hdl: handle to rule
+ * @status:	output parameter, status of filtering rule modify  operation,
+ *		0 for success,
+ *		-1 for failure
+ *
+ */
+struct ipa_flt_rule_mdfy_i {
+	u32 rule_hdl;
+	int status;
+	struct ipa_flt_rule_i rule;
+};
+
+/**
+ * struct ipa_rt_rule_add_i - routing rule descriptor includes
+ * in and out parameters
+ * @rule: actual rule to be added
+ * @at_rear:	add at back of routing table, it is NOT possible to add rules at
+ *		the rear of the "default" routing tables
+ * @rt_rule_hdl: output parameter, handle to rule, valid when status is 0
+ * @status:	output parameter, status of routing rule add operation,
+ *		0 for success,
+ *		-1 for failure
+ */
+struct ipa_rt_rule_add_i {
+	u8 at_rear;
+	u32 rt_rule_hdl;
+	int status;
+	struct ipa_rt_rule_i rule;
+};
+
+/**
+ * struct ipa_rt_rule_mdfy_i - routing rule descriptor includes
+ * in and out parameters
+ * @rule: actual rule to be added
+ * @rt_rule_hdl: handle to rule which supposed to modify
+ * @status:	output parameter, status of routing rule modify  operation,
+ *		0 for success,
+ *		-1 for failure
+ *
+ */
+struct ipa_rt_rule_mdfy_i {
+	u32 rt_rule_hdl;
+	int status;
+	struct ipa_rt_rule_i rule;
+};
+
+/**
+ * struct ipa_rt_rule_add_ext_i - routing rule descriptor
+ * includes in and out parameters
+ * @rule: actual rule to be added
+ * @at_rear:	add at back of routing table, it is NOT possible to add rules at
+ *		the rear of the "default" routing tables
+ * @rt_rule_hdl: output parameter, handle to rule, valid when status is 0
+ * @status:	output parameter, status of routing rule add operation,
+ * @rule_id: rule_id to be assigned to the routing rule. In case client
+ *  specifies rule_id as 0 the driver will assign a new rule_id
+ *		0 for success,
+ *		-1 for failure
+ */
+struct ipa_rt_rule_add_ext_i {
+	uint8_t at_rear;
+	uint32_t rt_rule_hdl;
+	int status;
+	uint16_t rule_id;
+	struct ipa_rt_rule_i rule;
+};
+
+/**
+ * struct ipa3_flt_entry - IPA filtering table entry
+ * @link: entry's link in global filtering enrties list
+ * @rule: filter rule
+ * @cookie: cookie used for validity check
+ * @tbl: filter table
+ * @rt_tbl: routing table
+ * @hw_len: entry's size
+ * @id: rule handle - globally unique
+ * @prio: rule 10bit priority which defines the order of the rule
+ *  among other rules at the same integrated table
+ * @rule_id: rule 10bit ID to be returned in packet status
+ * @cnt_idx: stats counter index
+ * @ipacm_installed: indicate if installed by ipacm
+ */
+struct ipa3_flt_entry {
+	struct list_head link;
+	u32 cookie;
+	struct ipa_flt_rule_i rule;
+	struct ipa3_flt_tbl *tbl;
+	struct ipa3_rt_tbl *rt_tbl;
+	u32 hw_len;
+	int id;
+	u16 prio;
+	u16 rule_id;
+	u8 cnt_idx;
+	bool ipacm_installed;
+};
+
+/**
+ * struct ipa3_rt_tbl - IPA routing table
+ * @link: table's link in global routing tables list
+ * @head_rt_rule_list: head of routing rules list
+ * @name: routing table name
+ * @idx: routing table index
+ * @rule_cnt: number of rules in routing table
+ * @ref_cnt: reference counter of routing table
+ * @set: collection of routing tables
+ * @cookie: cookie used for validity check
+ * @in_sys: flag indicating if the table is located in system memory
+ * @sz: the size of the routing table
+ * @curr_mem: current routing tables block in sys memory
+ * @prev_mem: previous routing table block in sys memory
+ * @id: routing table id
+ * @rule_ids: common idr structure that holds the rule_id for each rule
+ */
+struct ipa3_rt_tbl {
+	struct list_head link;
+	u32 cookie;
+	struct list_head head_rt_rule_list;
+	char name[IPA_RESOURCE_NAME_MAX];
+	u32 idx;
+	u32 rule_cnt;
+	u32 ref_cnt;
+	struct ipa3_rt_tbl_set *set;
+	bool in_sys[IPA_RULE_TYPE_MAX];
+	u32 sz[IPA_RULE_TYPE_MAX];
+	struct ipa_mem_buffer curr_mem[IPA_RULE_TYPE_MAX];
+	struct ipa_mem_buffer prev_mem[IPA_RULE_TYPE_MAX];
+	int id;
+	struct idr *rule_ids;
+};
+
+/**
+ * struct ipa3_hdr_entry - IPA header table entry
+ * @link: entry's link in global header table entries list
+ * @hdr: the header
+ * @hdr_len: header length
+ * @name: name of header table entry
+ * @type: l2 header type
+ * @is_partial: flag indicating if header table entry is partial
+ * @is_hdr_proc_ctx: false - hdr entry resides in hdr table,
+ * true - hdr entry resides in DDR and pointed to by proc ctx
+ * @phys_base: physical address of entry in DDR when is_hdr_proc_ctx is true,
+ * else 0
+ * @proc_ctx: processing context header
+ * @offset_entry: entry's offset
+ * @cookie: cookie used for validity check
+ * @ref_cnt: reference counter of routing table
+ * @id: header entry id
+ * @is_eth2_ofst_valid: is eth2_ofst field valid?
+ * @eth2_ofst: offset to start of Ethernet-II/802.3 header
+ * @user_deleted: is the header deleted by the user?
+ * @ipacm_installed: indicate if installed by ipacm
+ */
+struct ipa3_hdr_entry {
+	struct list_head link;
+	u32 cookie;
+	u8 hdr[IPA_HDR_MAX_SIZE];
+	u32 hdr_len;
+	char name[IPA_RESOURCE_NAME_MAX];
+	enum ipa_hdr_l2_type type;
+	u8 is_partial;
+	bool is_hdr_proc_ctx;
+	dma_addr_t phys_base;
+	struct ipa3_hdr_proc_ctx_entry *proc_ctx;
+	struct ipa_hdr_offset_entry *offset_entry;
+	u32 ref_cnt;
+	int id;
+	u8 is_eth2_ofst_valid;
+	u16 eth2_ofst;
+	bool user_deleted;
+	bool ipacm_installed;
+};
+
+/**
+ * struct ipa3_hdr_tbl - IPA header table
+ * @head_hdr_entry_list: header entries list
+ * @head_offset_list: header offset list
+ * @head_free_offset_list: header free offset list
+ * @hdr_cnt: number of headers
+ * @end: the last header index
+ */
+struct ipa3_hdr_tbl {
+	struct list_head head_hdr_entry_list;
+	struct list_head head_offset_list[IPA_HDR_BIN_MAX];
+	struct list_head head_free_offset_list[IPA_HDR_BIN_MAX];
+	u32 hdr_cnt;
+	u32 end;
+};
+
+/**
+ * struct ipa3_hdr_offset_entry - IPA header offset entry
+ * @link: entry's link in global processing context header offset entries list
+ * @offset: the offset
+ * @bin: bin
+ * @ipacm_installed: indicate if installed by ipacm
+ */
+struct ipa3_hdr_proc_ctx_offset_entry {
+	struct list_head link;
+	u32 offset;
+	u32 bin;
+	bool ipacm_installed;
+};
+
+/**
+ * struct ipa3_hdr_proc_ctx_entry - IPA processing context header table entry
+ * @link: entry's link in global header table entries list
+ * @type: header processing context type
+ * @l2tp_params: L2TP parameters
+ * @generic_params: generic proc_ctx params
+ * @offset_entry: entry's offset
+ * @hdr: the header
+ * @cookie: cookie used for validity check
+ * @ref_cnt: reference counter of routing table
+ * @id: processing context header entry id
+ * @user_deleted: is the hdr processing context deleted by the user?
+ * @ipacm_installed: indicate if installed by ipacm
+ */
+struct ipa3_hdr_proc_ctx_entry {
+	struct list_head link;
+	u32 cookie;
+	enum ipa_hdr_proc_type type;
+	struct ipa_l2tp_hdr_proc_ctx_params l2tp_params;
+	struct ipa_eth_II_to_eth_II_ex_procparams generic_params;
+	struct ipa3_hdr_proc_ctx_offset_entry *offset_entry;
+	struct ipa3_hdr_entry *hdr;
+	u32 ref_cnt;
+	int id;
+	bool user_deleted;
+	bool ipacm_installed;
+};
+
+/**
+ * struct ipa3_hdr_proc_ctx_tbl - IPA processing context header table
+ * @head_proc_ctx_entry_list: header entries list
+ * @head_offset_list: header offset list
+ * @head_free_offset_list: header free offset list
+ * @proc_ctx_cnt: number of processing context headers
+ * @end: the last processing context header index
+ * @start_offset: offset in words of processing context header table
+ */
+struct ipa3_hdr_proc_ctx_tbl {
+	struct list_head head_proc_ctx_entry_list;
+	struct list_head head_offset_list[IPA_HDR_PROC_CTX_BIN_MAX];
+	struct list_head head_free_offset_list[IPA_HDR_PROC_CTX_BIN_MAX];
+	u32 proc_ctx_cnt;
+	u32 end;
+	u32 start_offset;
+};
+
+/**
+ * struct ipa3_flt_tbl - IPA filter table
+ * @head_flt_rule_list: filter rules list
+ * @rule_cnt: number of filter rules
+ * @in_sys: flag indicating if filter table is located in system memory
+ * @sz: the size of the filter tables
+ * @end: the last header index
+ * @curr_mem: current filter tables block in sys memory
+ * @prev_mem: previous filter table block in sys memory
+ * @rule_ids: common idr structure that holds the rule_id for each rule
+ */
+struct ipa3_flt_tbl {
+	struct list_head head_flt_rule_list;
+	u32 rule_cnt;
+	bool in_sys[IPA_RULE_TYPE_MAX];
+	u32 sz[IPA_RULE_TYPE_MAX];
+	struct ipa_mem_buffer curr_mem[IPA_RULE_TYPE_MAX];
+	struct ipa_mem_buffer prev_mem[IPA_RULE_TYPE_MAX];
+	bool sticky_rear;
+	struct idr *rule_ids;
+};
+
+/**
+ * struct ipa3_rt_entry - IPA routing table entry
+ * @link: entry's link in global routing table entries list
+ * @rule: routing rule
+ * @cookie: cookie used for validity check
+ * @tbl: routing table
+ * @hdr: header table
+ * @proc_ctx: processing context table
+ * @hw_len: the length of the table
+ * @id: rule handle - globaly unique
+ * @prio: rule 10bit priority which defines the order of the rule
+ *  among other rules at the integrated same table
+ * @rule_id: rule 10bit ID to be returned in packet status
+ * @rule_id_valid: indicate if rule_id_valid valid or not?
+ * @cnt_idx: stats counter index
+ * @ipacm_installed: indicate if installed by ipacm
+ */
+struct ipa3_rt_entry {
+	struct list_head link;
+	u32 cookie;
+	struct ipa_rt_rule_i rule;
+	struct ipa3_rt_tbl *tbl;
+	struct ipa3_hdr_entry *hdr;
+	struct ipa3_hdr_proc_ctx_entry *proc_ctx;
+	u32 hw_len;
+	int id;
+	u16 prio;
+	u16 rule_id;
+	u16 rule_id_valid;
+	u8 cnt_idx;
+	bool ipacm_installed;
+};
+
+/**
+ * struct ipa3_rt_tbl_set - collection of routing tables
+ * @head_rt_tbl_list: collection of routing tables
+ * @tbl_cnt: number of routing tables
+ * @rule_ids: idr structure that holds the rule_id for each rule
+ */
+struct ipa3_rt_tbl_set {
+	struct list_head head_rt_tbl_list;
+	u32 tbl_cnt;
+	struct idr rule_ids;
+};
+
+/**
+ * struct ipa3_wlan_stats - Wlan stats for each wlan endpoint
+ * @rx_pkts_rcvd: Packets sent by wlan driver
+ * @rx_pkts_status_rcvd: Status packets received from ipa hw
+ * @rx_hd_processed: Data Descriptors processed by IPA Driver
+ * @rx_hd_reply: Data Descriptors recycled by wlan driver
+ * @rx_hd_rcvd: Data Descriptors sent by wlan driver
+ * @rx_pkt_leak: Packet count that are not recycled
+ * @rx_dp_fail: Packets failed to transfer to IPA HW
+ * @tx_pkts_rcvd: SKB Buffers received from ipa hw
+ * @tx_pkts_sent: SKB Buffers sent to wlan driver
+ * @tx_pkts_dropped: Dropped packets count
+ */
+struct ipa3_wlan_stats {
+	u32 rx_pkts_rcvd;
+	u32 rx_pkts_status_rcvd;
+	u32 rx_hd_processed;
+	u32 rx_hd_reply;
+	u32 rx_hd_rcvd;
+	u32 rx_pkt_leak;
+	u32 rx_dp_fail;
+	u32 tx_pkts_rcvd;
+	u32 tx_pkts_sent;
+	u32 tx_pkts_dropped;
+};
+
+/**
+ * struct ipa3_wlan_comm_memb - Wlan comm members
+ * @wlan_spinlock: protects wlan comm buff list and its size
+ * @ipa_tx_mul_spinlock: protects tx dp mul transfer
+ * @wlan_comm_total_cnt: wlan common skb buffers allocated count
+ * @wlan_comm_free_cnt: wlan common skb buffer free count
+ * @total_tx_pkts_freed: Recycled Buffer count
+ * @wlan_comm_desc_list: wlan common skb buffer list
+ */
+struct ipa3_wlan_comm_memb {
+	spinlock_t wlan_spinlock;
+	spinlock_t ipa_tx_mul_spinlock;
+	u32 wlan_comm_total_cnt;
+	u32 wlan_comm_free_cnt;
+	u32 total_tx_pkts_freed;
+	struct list_head wlan_comm_desc_list;
+	atomic_t active_clnt_cnt;
+};
+
+struct ipa_gsi_ep_mem_info {
+	u16 evt_ring_len;
+	u64 evt_ring_base_addr;
+	void *evt_ring_base_vaddr;
+	u16 chan_ring_len;
+	u64 chan_ring_base_addr;
+	void *chan_ring_base_vaddr;
+};
+
+struct ipa3_status_stats {
+	struct ipahal_pkt_status status[IPA_MAX_STATUS_STAT_NUM];
+	unsigned int curr;
+};
+
+/**
+ * struct ipa3_ep_context - IPA end point context
+ * @valid: flag indicating id EP context is valid
+ * @client: EP client type
+ * @gsi_chan_hdl: EP's GSI channel handle
+ * @gsi_evt_ring_hdl: EP's GSI channel event ring handle
+ * @gsi_mem_info: EP's GSI channel rings info
+ * @chan_scratch: EP's GSI channel scratch info
+ * @cfg: EP cionfiguration
+ * @dst_pipe_index: destination pipe index
+ * @rt_tbl_idx: routing table index
+ * @priv: user provided information which will forwarded once the user is
+ *        notified for new data avail
+ * @client_notify: user provided CB for EP events notification, the event is
+ *                 data revived.
+ * @skip_ep_cfg: boolean field that determines if EP should be configured
+ *  by IPA driver
+ * @keep_ipa_awake: when true, IPA will not be clock gated
+ * @disconnect_in_progress: Indicates client disconnect in progress.
+ * @qmi_request_sent: Indicates whether QMI request to enable clear data path
+ *					request is sent or not.
+ * @client_lock_unlock: callback function to take mutex lock/unlock for USB
+ *				clients
+ */
+struct ipa3_ep_context {
+	int valid;
+	enum ipa_client_type client;
+	unsigned long gsi_chan_hdl;
+	unsigned long gsi_evt_ring_hdl;
+	struct ipa_gsi_ep_mem_info gsi_mem_info;
+	union __packed gsi_channel_scratch chan_scratch;
+	struct gsi_chan_xfer_notify xfer_notify;
+	bool xfer_notify_valid;
+	struct ipa_ep_cfg cfg;
+	struct ipa_ep_cfg_holb holb;
+	struct ipahal_reg_ep_cfg_status status;
+	u32 dst_pipe_index;
+	u32 rt_tbl_idx;
+	void *priv;
+	void (*client_notify)(void *priv, enum ipa_dp_evt_type evt,
+		       unsigned long data);
+	atomic_t avail_fifo_desc;
+	u32 dflt_flt4_rule_hdl;
+	u32 dflt_flt6_rule_hdl;
+	bool skip_ep_cfg;
+	bool keep_ipa_awake;
+	struct ipa3_wlan_stats wstats;
+	u32 uc_offload_state;
+	u32 gsi_offload_state;
+	atomic_t disconnect_in_progress;
+	u32 qmi_request_sent;
+	u32 eot_in_poll_err;
+	bool ep_delay_set;
+
+	/* sys MUST be the last element of this struct */
+	struct ipa3_sys_context *sys;
+};
+
+/**
+ * ipa_usb_xdci_chan_params - xDCI channel related properties
+ *
+ * @ipa_ep_cfg:          IPA EP configuration
+ * @client:              type of "client"
+ * @priv:                callback cookie
+ * @notify:              callback
+ *           priv - callback cookie evt - type of event data - data relevant
+ *           to event.  May not be valid. See event_type enum for valid
+ *           cases.
+ * @skip_ep_cfg:         boolean field that determines if EP should be
+ *                       configured by IPA driver
+ * @keep_ipa_awake:      when true, IPA will not be clock gated
+ * @evt_ring_params:     parameters for the channel's event ring
+ * @evt_scratch:         parameters for the channel's event ring scratch
+ * @chan_params:         parameters for the channel
+ * @chan_scratch:        parameters for the channel's scratch
+ *
+ */
+struct ipa_request_gsi_channel_params {
+	struct ipa_ep_cfg ipa_ep_cfg;
+	enum ipa_client_type client;
+	void *priv;
+	ipa_notify_cb notify;
+	bool skip_ep_cfg;
+	bool keep_ipa_awake;
+	struct gsi_evt_ring_props evt_ring_params;
+	union __packed gsi_evt_scratch evt_scratch;
+	struct gsi_chan_props chan_params;
+	union __packed gsi_channel_scratch chan_scratch;
+};
+
+enum ipa3_sys_pipe_policy {
+	IPA_POLICY_INTR_MODE,
+	IPA_POLICY_NOINTR_MODE,
+	IPA_POLICY_INTR_POLL_MODE,
+};
+
+struct ipa3_repl_ctx {
+	struct ipa3_rx_pkt_wrapper **cache;
+	atomic_t head_idx;
+	atomic_t tail_idx;
+	u32 capacity;
+	atomic_t pending;
+};
+
+/**
+ * struct ipa3_sys_context - IPA GPI pipes context
+ * @head_desc_list: header descriptors list
+ * @len: the size of the above list
+ * @spinlock: protects the list and its size
+ * @ep: IPA EP context
+ * @xmit_eot_cnt: count of pending eot for tasklet to process
+ * @tasklet: tasklet for eot write_done handle (tx_complete)
+ *
+ * IPA context specific to the GPI pipes a.k.a LAN IN/OUT and WAN
+ */
+struct ipa3_sys_context {
+	u32 len;
+	atomic_t curr_polling_state;
+	atomic_t workqueue_flushed;
+	struct delayed_work switch_to_intr_work;
+	enum ipa3_sys_pipe_policy policy;
+	bool use_comm_evt_ring;
+	bool nop_pending;
+	int (*pyld_hdlr)(struct sk_buff *skb, struct ipa3_sys_context *sys);
+	struct sk_buff * (*get_skb)(unsigned int len, gfp_t flags);
+	void (*free_skb)(struct sk_buff *skb);
+	void (*free_rx_wrapper)(struct ipa3_rx_pkt_wrapper *rk_pkt);
+	u32 rx_buff_sz;
+	u32 rx_pool_sz;
+	struct sk_buff *prev_skb;
+	unsigned int len_rem;
+	unsigned int len_pad;
+	unsigned int len_partial;
+	bool drop_packet;
+	struct work_struct work;
+	struct delayed_work replenish_rx_work;
+	struct work_struct repl_work;
+	void (*repl_hdlr)(struct ipa3_sys_context *sys);
+	struct ipa3_repl_ctx *repl;
+	struct ipa3_repl_ctx *page_recycle_repl;
+	u32 pkt_sent;
+	struct napi_struct *napi_obj;
+	struct list_head pending_pkts[GSI_VEID_MAX];
+	atomic_t xmit_eot_cnt;
+	struct tasklet_struct tasklet;
+	bool skip_eot;
+	u32 eob_drop_cnt;
+
+	/* ordering is important - mutable fields go above */
+	struct ipa3_ep_context *ep;
+	struct list_head head_desc_list;
+	struct list_head rcycl_list;
+	spinlock_t spinlock;
+	struct hrtimer db_timer;
+	struct workqueue_struct *wq;
+	struct workqueue_struct *repl_wq;
+	struct ipa3_status_stats *status_stat;
+	u32 pm_hdl;
+	/* ordering is important - other immutable fields go below */
+};
+
+/**
+ * enum ipa3_desc_type - IPA decriptors type
+ *
+ * IPA decriptors type, IPA supports DD and ICD but no CD
+ */
+enum ipa3_desc_type {
+	IPA_DATA_DESC,
+	IPA_DATA_DESC_SKB,
+	IPA_DATA_DESC_SKB_PAGED,
+	IPA_IMM_CMD_DESC,
+};
+
+/**
+ * struct ipa3_tx_pkt_wrapper - IPA Tx packet wrapper
+ * @type: specify if this packet is for the skb or immediate command
+ * @mem: memory buffer used by this Tx packet
+ * @link: linked to the wrappers on that pipe
+ * @callback: IPA client provided callback
+ * @user1: cookie1 for above callback
+ * @user2: cookie2 for above callback
+ * @sys: corresponding IPA sys context
+ * @cnt: 1 for single transfers,
+ * >1 and <0xFFFF for first of a "multiple" transfer,
+ * 0xFFFF for last desc, 0 for rest of "multiple' transfer
+ * @bounce: va of bounce buffer
+ * @unmap_dma: in case this is true, the buffer will not be dma unmapped
+ * @xmit_done: flag to indicate the last desc got tx complete on each ieob
+ *
+ * This struct can wrap both data packet and immediate command packet.
+ */
+struct ipa3_tx_pkt_wrapper {
+	enum ipa3_desc_type type;
+	struct ipa_mem_buffer mem;
+	struct list_head link;
+	void (*callback)(void *user1, int user2);
+	void *user1;
+	int user2;
+	struct ipa3_sys_context *sys;
+	u32 cnt;
+	void *bounce;
+	bool no_unmap_dma;
+	bool xmit_done;
+};
+
+/**
+ * struct ipa3_dma_xfer_wrapper - IPADMA transfer descr wrapper
+ * @phys_addr_src: physical address of the source data to copy
+ * @phys_addr_dest: physical address to store the copied data
+ * @len: len in bytes to copy
+ * @link: linked to the wrappers list on the proper(sync/async) cons pipe
+ * @xfer_done: completion object for sync_memcpy completion
+ * @callback: IPADMA client provided completion callback
+ * @user1: cookie1 for above callback
+ *
+ * This struct can wrap both sync and async memcpy transfers descriptors.
+ */
+struct ipa3_dma_xfer_wrapper {
+	u64 phys_addr_src;
+	u64 phys_addr_dest;
+	u16 len;
+	struct list_head link;
+	struct completion xfer_done;
+	void (*callback)(void *user1);
+	void *user1;
+};
+
+/**
+ * struct ipa3_desc - IPA descriptor
+ * @type: skb or immediate command or plain old data
+ * @pyld: points to skb
+ * @frag: points to paged fragment
+ * or kmalloc'ed immediate command parameters/plain old data
+ * @dma_address: dma mapped address of pyld
+ * @dma_address_valid: valid field for dma_address
+ * @is_tag_status: flag for IP_PACKET_TAG_STATUS imd cmd
+ * @len: length of the pyld
+ * @opcode: for immediate commands
+ * @callback: IPA client provided completion callback
+ * @user1: cookie1 for above callback
+ * @user2: cookie2 for above callback
+ * @xfer_done: completion object for sync completion
+ * @skip_db_ring: specifies whether GSI doorbell should not be rang
+ */
+struct ipa3_desc {
+	enum ipa3_desc_type type;
+	void *pyld;
+	skb_frag_t *frag;
+	dma_addr_t dma_address;
+	bool dma_address_valid;
+	bool is_tag_status;
+	u16 len;
+	u16 opcode;
+	void (*callback)(void *user1, int user2);
+	void *user1;
+	int user2;
+	struct completion xfer_done;
+	bool skip_db_ring;
+};
+
+/**
+ * struct ipa3_rx_pkt_wrapper - IPA Rx packet wrapper
+ * @skb: skb
+ * @dma_address: DMA address of this Rx packet
+ * @link: linked to the Rx packets on that pipe
+ * @len: how many bytes are copied into skb's flat buffer
+ */
+struct ipa3_rx_pkt_wrapper {
+	struct list_head link;
+	union {
+		struct ipa_rx_data data;
+		struct ipa_rx_page_data page_data;
+	};
+	u32 len;
+	struct work_struct work;
+	struct ipa3_sys_context *sys;
+};
+
+/**
+ * struct ipa3_nat_ipv6ct_tmp_mem - NAT/IPv6CT temporary memory
+ *
+ * In case NAT/IPv6CT table are destroyed the HW is provided with the
+ * temporary memory
+ *
+ * @vaddr: the address of the temporary memory
+ * @dma_handle: the handle of the temporary memory
+ */
+struct ipa3_nat_ipv6ct_tmp_mem {
+	void *vaddr;
+	dma_addr_t dma_handle;
+};
+
+/**
+ * struct ipa3_nat_ipv6ct_common_mem - IPA NAT/IPv6CT memory device
+ * @name: the device name
+ * @lock: memory mutex
+ * @class: pointer to the struct class
+ * @dev: the dev_t of the device
+ * @cdev: cdev of the device
+ * @dev_num: device number
+ * @is_nat_mem: is the memory for v4 nat
+ * @is_ipv6ct_mem: is the memory for v6 nat
+ * @is_dev_init: flag indicating if device is initialized
+ * @is_hw_init: flag indicating if the corresponding HW is initialized
+ * @is_mapped: flag indicating if memory is mapped
+ * @phys_mem_size: the physical size in the shared memory
+ * @phys_mem_ofst: the offset in the shared memory
+ * @table_alloc_size: size (bytes) of table
+ * @vaddr: the virtual address in the system memory
+ * @dma_handle: the system memory DMA handle
+ * @base_address: table virtual address
+ * @base_table_addr: base table address
+ * @expansion_table_addr: expansion table address
+ * @table_entries: num of entries in the base table
+ * @expn_table_entries: num of entries in the expansion table
+ * @tmp_mem: temporary memory used to always provide HW with a legal memory
+ */
+struct ipa3_nat_ipv6ct_common_mem {
+	char           name[IPA_DEV_NAME_MAX_LEN];
+	struct mutex   lock;
+	struct class  *class;
+	struct device *dev;
+	struct cdev    cdev;
+	dev_t          dev_num;
+
+	bool           is_nat_mem;
+	bool           is_ipv6ct_mem;
+
+	bool           is_dev_init;
+	bool           is_hw_init;
+	bool           is_mapped;
+
+	u32            phys_mem_size;
+	u32            phys_mem_ofst;
+	size_t         table_alloc_size;
+
+	void          *vaddr;
+	dma_addr_t     dma_handle;
+	void          *base_address;
+	char          *base_table_addr;
+	char          *expansion_table_addr;
+	u32            table_entries;
+	u32            expn_table_entries;
+
+	struct ipa3_nat_ipv6ct_tmp_mem *tmp_mem;
+};
+
+/**
+ * struct ipa3_nat_mem_loc_data - memory specific info per table memory type
+ * @is_mapped: has the memory been mapped?
+ * @io_vaddr: the virtual address in the sram memory
+ * @vaddr: the virtual address in the system memory
+ * @dma_handle: the system memory DMA handle
+ * @phys_addr: physical sram memory location
+ * @table_alloc_size: size (bytes) of table
+ * @table_entries: number of entries in table
+ * @expn_table_entries: number of entries in expansion table
+ * @base_address: same as vaddr above
+ * @base_table_addr: base table address
+ * @expansion_table_addr: base table's expansion table address
+ * @index_table_addr: index table address
+ * @index_table_expansion_addr: index table's expansion table address
+ */
+struct ipa3_nat_mem_loc_data {
+	bool          is_mapped;
+
+	void __iomem *io_vaddr;
+
+	void         *vaddr;
+	dma_addr_t    dma_handle;
+
+	unsigned long phys_addr;
+
+	size_t        table_alloc_size;
+
+	u32           table_entries;
+	u32           expn_table_entries;
+
+	void         *base_address;
+
+	char         *base_table_addr;
+	char         *expansion_table_addr;
+
+	char         *index_table_addr;
+	char         *index_table_expansion_addr;
+};
+
+/**
+ * struct ipa3_nat_mem - IPA NAT memory description
+ * @dev: the memory device structure
+ * @public_ip_addr: ip address of nat table
+ * @pdn_mem: pdn config table SW cache memory structure
+ * @is_tmp_mem_allocated: indicate if tmp mem has been allocated
+ * @last_alloc_loc: last memory type allocated
+ * @active_table: which table memory type is currently active
+ * @switch2ddr_cnt: how many times we've switched focust to ddr
+ * @switch2sram_cnt: how many times we've switched focust to sram
+ * @ddr_in_use: is there table in ddr
+ * @sram_in_use: is there table in sram
+ * @mem_loc: memory specific info per table memory type
+ */
+struct ipa3_nat_mem {
+	struct ipa3_nat_ipv6ct_common_mem dev; /* this item must be first */
+
+	u32                          public_ip_addr;
+	struct ipa_mem_buffer        pdn_mem;
+
+	bool                         is_tmp_mem_allocated;
+
+	enum ipa3_nat_mem_in         last_alloc_loc;
+
+	enum ipa3_nat_mem_in         active_table;
+	u32                          switch2ddr_cnt;
+	u32                          switch2sram_cnt;
+
+	bool                         ddr_in_use;
+	bool                         sram_in_use;
+
+	struct ipa3_nat_mem_loc_data mem_loc[IPA_NAT_MEM_IN_MAX];
+};
+
+/**
+ * struct ipa3_ipv6ct_mem - IPA IPv6 connection tracking memory description
+ * @dev: the memory device structure
+ */
+struct ipa3_ipv6ct_mem {
+	struct ipa3_nat_ipv6ct_common_mem dev; /* this item must be first */
+};
+
+/**
+ * enum ipa3_hw_mode - IPA hardware mode
+ * @IPA_HW_Normal: Regular IPA hardware
+ * @IPA_HW_Virtual: IPA hardware supporting virtual memory allocation
+ * @IPA_HW_PCIE: IPA hardware supporting memory allocation over PCIE Bridge
+ * @IPA_HW_Emulation: IPA emulation hardware
+ */
+enum ipa3_hw_mode {
+	IPA_HW_MODE_NORMAL    = 0,
+	IPA_HW_MODE_VIRTUAL   = 1,
+	IPA_HW_MODE_PCIE      = 2,
+	IPA_HW_MODE_EMULATION = 3,
+};
+
+/*
+ * enum ipa3_platform_type - Platform type
+ * @IPA_PLAT_TYPE_MDM: MDM platform (usually 32bit single core CPU platform)
+ * @IPA_PLAT_TYPE_MSM: MSM SOC platform (usually 64bit multi-core platform)
+ * @IPA_PLAT_TYPE_APQ: Similar to MSM but without modem
+ */
+enum ipa3_platform_type {
+	IPA_PLAT_TYPE_MDM	= 0,
+	IPA_PLAT_TYPE_MSM	= 1,
+	IPA_PLAT_TYPE_APQ	= 2,
+};
+
+enum ipa3_config_this_ep {
+	IPA_CONFIGURE_THIS_EP,
+	IPA_DO_NOT_CONFIGURE_THIS_EP,
+};
+
+struct ipa3_page_recycle_stats {
+	u64 total_replenished;
+	u64 tmp_alloc;
+};
+struct ipa3_stats {
+	u32 tx_sw_pkts;
+	u32 tx_hw_pkts;
+	u32 rx_pkts;
+	u32 rx_excp_pkts[IPAHAL_PKT_STATUS_EXCEPTION_MAX];
+	u32 rx_repl_repost;
+	u32 tx_pkts_compl;
+	u32 rx_q_len;
+	u32 msg_w[IPA_EVENT_MAX_NUM];
+	u32 msg_r[IPA_EVENT_MAX_NUM];
+	u32 stat_compl;
+	u32 aggr_close;
+	u32 wan_aggr_close;
+	u32 wan_rx_empty;
+	u32 wan_repl_rx_empty;
+	u32 lan_rx_empty;
+	u32 lan_repl_rx_empty;
+	u32 flow_enable;
+	u32 flow_disable;
+	u32 tx_non_linear;
+	struct ipa3_page_recycle_stats page_recycle_stats[2];
+};
+
+/* offset for each stats */
+#define IPA3_UC_DEBUG_STATS_RINGFULL_OFF (0)
+#define IPA3_UC_DEBUG_STATS_RINGEMPTY_OFF (4)
+#define IPA3_UC_DEBUG_STATS_RINGUSAGEHIGH_OFF (8)
+#define IPA3_UC_DEBUG_STATS_RINGUSAGELOW_OFF (12)
+#define IPA3_UC_DEBUG_STATS_RINGUTILCOUNT_OFF (16)
+#define IPA3_UC_DEBUG_STATS_OFF (20)
+
+/**
+ * struct ipa3_uc_dbg_stats - uC dbg stats for offloading
+ * protocols
+ * @uc_dbg_stats_ofst: offset to SRAM base
+ * @uc_dbg_stats_size: stats size for all channels
+ * @uc_dbg_stats_mmio: mmio offset
+ */
+struct ipa3_uc_dbg_stats {
+	u32 uc_dbg_stats_ofst;
+	u16 uc_dbg_stats_size;
+	void __iomem *uc_dbg_stats_mmio;
+};
+
+struct ipa3_active_clients {
+	struct mutex mutex;
+	atomic_t cnt;
+	int bus_vote_idx;
+};
+
+struct ipa3_wakelock_ref_cnt {
+	spinlock_t spinlock;
+	int cnt;
+};
+
+struct ipa3_tag_completion {
+	struct completion comp;
+	atomic_t cnt;
+};
+
+struct ipa3_controller;
+
+/**
+ * struct ipa3_uc_hdlrs - IPA uC callback functions
+ * @ipa_uc_loaded_hdlr: Function handler when uC is loaded
+ * @ipa_uc_event_hdlr: Event handler function
+ * @ipa3_uc_response_hdlr: Response handler function
+ * @ipa_uc_event_log_info_hdlr: Log event handler function
+ */
+struct ipa3_uc_hdlrs {
+	void (*ipa_uc_loaded_hdlr)(void);
+
+	void (*ipa_uc_event_hdlr)
+		(struct IpaHwSharedMemCommonMapping_t *uc_sram_mmio);
+
+	int (*ipa3_uc_response_hdlr)
+		(struct IpaHwSharedMemCommonMapping_t *uc_sram_mmio,
+		u32 *uc_status);
+
+	void (*ipa_uc_event_log_info_hdlr)
+		(struct IpaHwEventLogInfoData_t *uc_event_top_mmio);
+};
+
+/**
+ * enum ipa3_hw_flags - flags which defines the behavior of HW
+ *
+ * @IPA_HW_FLAG_HALT_SYSTEM_ON_ASSERT_FAILURE: Halt system in case of assert
+ *	failure.
+ * @IPA_HW_FLAG_NO_REPORT_MHI_CHANNEL_ERORR: Channel error would be reported
+ *	in the event ring only. No event to CPU.
+ * @IPA_HW_FLAG_NO_REPORT_MHI_CHANNEL_WAKE_UP: No need to report event
+ *	IPA_HW_2_CPU_EVENT_MHI_WAKE_UP_REQUEST
+ * @IPA_HW_FLAG_WORK_OVER_DDR: Perform all transaction to external addresses by
+ *	QMB (avoid memcpy)
+ * @IPA_HW_FLAG_NO_REPORT_OOB: If set do not report that the device is OOB in
+ *	IN Channel
+ * @IPA_HW_FLAG_NO_REPORT_DB_MODE: If set, do not report that the device is
+ *	entering a mode where it expects a doorbell to be rung for OUT Channel
+ * @IPA_HW_FLAG_NO_START_OOB_TIMER
+ */
+enum ipa3_hw_flags {
+	IPA_HW_FLAG_HALT_SYSTEM_ON_ASSERT_FAILURE	= 0x01,
+	IPA_HW_FLAG_NO_REPORT_MHI_CHANNEL_ERORR		= 0x02,
+	IPA_HW_FLAG_NO_REPORT_MHI_CHANNEL_WAKE_UP	= 0x04,
+	IPA_HW_FLAG_WORK_OVER_DDR			= 0x08,
+	IPA_HW_FLAG_NO_REPORT_OOB			= 0x10,
+	IPA_HW_FLAG_NO_REPORT_DB_MODE			= 0x20,
+	IPA_HW_FLAG_NO_START_OOB_TIMER			= 0x40
+};
+
+/**
+ * struct ipa3_uc_ctx - IPA uC context
+ * @uc_inited: Indicates if uC interface has been initialized
+ * @uc_loaded: Indicates if uC has loaded
+ * @uc_failed: Indicates if uC has failed / returned an error
+ * @uc_lock: uC interface lock to allow only one uC interaction at a time
+ * @uc_spinlock: same as uc_lock but for irq contexts
+ * @uc_completation: Completion mechanism to wait for uC commands
+ * @uc_sram_mmio: Pointer to uC mapped memory
+ * @pending_cmd: The last command sent waiting to be ACKed
+ * @uc_status: The last status provided by the uC
+ * @uc_error_type: error type from uC error event
+ * @uc_error_timestamp: tag timer sampled after uC crashed
+ */
+struct ipa3_uc_ctx {
+	bool uc_inited;
+	bool uc_loaded;
+	bool uc_failed;
+	struct mutex uc_lock;
+	spinlock_t uc_spinlock;
+	struct completion uc_completion;
+	struct IpaHwSharedMemCommonMapping_t *uc_sram_mmio;
+	struct IpaHwEventLogInfoData_t *uc_event_top_mmio;
+	u32 uc_event_top_ofst;
+	u32 pending_cmd;
+	u32 uc_status;
+	u32 uc_error_type;
+	u32 uc_error_timestamp;
+	phys_addr_t rdy_ring_base_pa;
+	phys_addr_t rdy_ring_rp_pa;
+	u32 rdy_ring_size;
+	phys_addr_t rdy_comp_ring_base_pa;
+	phys_addr_t rdy_comp_ring_wp_pa;
+	u32 rdy_comp_ring_size;
+	u32 *rdy_ring_rp_va;
+	u32 *rdy_comp_ring_wp_va;
+	bool uc_event_ring_valid;
+	struct ipa_mem_buffer event_ring;
+	u32 ering_wp_local;
+	u32 ering_rp_local;
+	u32 ering_wp;
+	u32 ering_rp;
+};
+
+/**
+ * struct ipa3_uc_wdi_ctx
+ * @wdi_uc_top_ofst:
+ * @wdi_uc_top_mmio:
+ * @wdi_uc_stats_ofst:
+ * @wdi_uc_stats_mmio:
+ */
+struct ipa3_uc_wdi_ctx {
+	/* WDI specific fields */
+	u32 wdi_uc_stats_ofst;
+	struct IpaHwStatsWDIInfoData_t *wdi_uc_stats_mmio;
+	void *priv;
+	ipa_uc_ready_cb uc_ready_cb;
+	/* for AP+STA stats update */
+#ifdef IPA_WAN_MSG_IPv6_ADDR_GW_LEN
+	ipa_wdi_meter_notifier_cb stats_notify;
+#endif
+};
+
+/**
+ * struct ipa3_uc_wigig_ctx
+ * @priv: wigig driver private data
+ * @uc_ready_cb: wigig driver uc ready callback
+ * @int_notify: wigig driver misc interrupt callback
+ */
+struct ipa3_uc_wigig_ctx {
+	void *priv;
+	ipa_uc_ready_cb uc_ready_cb;
+	ipa_wigig_misc_int_cb misc_notify_cb;
+};
+
+/**
+ * struct ipa3_wdi2_ctx - IPA wdi2 context
+ */
+struct ipa3_wdi2_ctx {
+	phys_addr_t rdy_ring_base_pa;
+	phys_addr_t rdy_ring_rp_pa;
+	u32 rdy_ring_size;
+	phys_addr_t rdy_comp_ring_base_pa;
+	phys_addr_t rdy_comp_ring_wp_pa;
+	u32 rdy_comp_ring_size;
+	u32 *rdy_ring_rp_va;
+	u32 *rdy_comp_ring_wp_va;
+	struct ipa3_uc_dbg_stats dbg_stats;
+};
+
+/**
+ * struct ipa3_wdi3_ctx - IPA wdi3 context
+ */
+struct ipa3_wdi3_ctx {
+	struct ipa3_uc_dbg_stats dbg_stats;
+};
+
+/**
+ * struct ipa3_usb_ctx - IPA usb context
+ */
+struct ipa3_usb_ctx {
+	struct ipa3_uc_dbg_stats dbg_stats;
+};
+
+/**
+ * struct ipa3_mhip_ctx - IPA mhip context
+ */
+struct ipa3_mhip_ctx {
+	struct ipa3_uc_dbg_stats dbg_stats;
+};
+
+/**
+ * struct ipa3_aqc_ctx - IPA aqc context
+ */
+struct ipa3_aqc_ctx {
+	struct ipa3_uc_dbg_stats dbg_stats;
+};
+
+
+/**
+ * struct ipa3_transport_pm - transport power management related members
+ * @transport_pm_mutex: Mutex to protect the transport_pm functionality.
+ */
+struct ipa3_transport_pm {
+	atomic_t dec_clients;
+	atomic_t eot_activity;
+	struct mutex transport_pm_mutex;
+};
+
+/**
+ * struct ipa3cm_client_info - the client-info indicated from IPACM
+ * @ipacm_client_enum: the enum to indicate tether-client
+ * @ipacm_client_uplink: the bool to indicate pipe for uplink
+ */
+struct ipa3cm_client_info {
+	enum ipacm_client_enum client_enum;
+	bool uplink;
+};
+
+/**
+ * struct ipacm_fnr_info - the fnr-info indicated from IPACM
+ * @ipacm_client_enum: the enum to indicate tether-client
+ * @ipacm_client_uplink: the bool to indicate pipe for uplink
+ */
+struct ipacm_fnr_info {
+	bool valid;
+	uint8_t hw_counter_offset;
+	uint8_t sw_counter_offset;
+};
+
+struct ipa3_smp2p_info {
+	u32 out_base_id;
+	u32 in_base_id;
+	bool ipa_clk_on;
+	bool res_sent;
+	unsigned int smem_bit;
+	struct qcom_smem_state *smem_state;
+};
+
+/**
+ * struct ipa3_ready_cb_info - A list of all the registrations
+ *  for an indication of IPA driver readiness
+ *
+ * @link: linked list link
+ * @ready_cb: callback
+ * @user_data: User data
+ *
+ */
+struct ipa3_ready_cb_info {
+	struct list_head link;
+	ipa_ready_cb ready_cb;
+	void *user_data;
+};
+
+struct ipa_dma_task_info {
+	struct ipa_mem_buffer mem;
+	struct ipahal_imm_cmd_pyld *cmd_pyld;
+};
+
+struct ipa_quota_stats {
+	u64 num_ipv4_bytes;
+	u64 num_ipv6_bytes;
+	u32 num_ipv4_pkts;
+	u32 num_ipv6_pkts;
+};
+
+struct ipa_quota_stats_all {
+	struct ipa_quota_stats client[IPA_CLIENT_MAX];
+};
+
+struct ipa_drop_stats {
+	u32 drop_packet_cnt;
+	u32 drop_byte_cnt;
+};
+
+struct ipa_drop_stats_all {
+	struct ipa_drop_stats client[IPA_CLIENT_MAX];
+};
+
+struct ipa_hw_stats_quota {
+	struct ipahal_stats_init_quota init;
+	struct ipa_quota_stats_all stats;
+};
+
+struct ipa_hw_stats_teth {
+	struct ipahal_stats_init_tethering init;
+	struct ipa_quota_stats_all prod_stats_sum[IPA_CLIENT_MAX];
+	struct ipa_quota_stats_all prod_stats[IPA_CLIENT_MAX];
+};
+
+struct ipa_hw_stats_flt_rt {
+	struct ipahal_stats_init_flt_rt flt_v4_init;
+	struct ipahal_stats_init_flt_rt flt_v6_init;
+	struct ipahal_stats_init_flt_rt rt_v4_init;
+	struct ipahal_stats_init_flt_rt rt_v6_init;
+};
+
+struct ipa_hw_stats_drop {
+	struct ipahal_stats_init_drop init;
+	struct ipa_drop_stats_all stats;
+};
+
+struct ipa_hw_stats {
+	bool enabled;
+	struct ipa_hw_stats_quota quota;
+	struct ipa_hw_stats_teth teth;
+	struct ipa_hw_stats_flt_rt flt_rt;
+	struct ipa_hw_stats_drop drop;
+};
+
+struct ipa_cne_evt {
+	struct ipa_wan_msg wan_msg;
+	struct ipa_msg_meta msg_meta;
+};
+
+enum ipa_smmu_cb_type {
+	IPA_SMMU_CB_AP,
+	IPA_SMMU_CB_WLAN,
+	IPA_SMMU_CB_UC,
+	IPA_SMMU_CB_11AD,
+	IPA_SMMU_CB_MAX
+};
+
+#define VALID_IPA_SMMU_CB_TYPE(t) \
+	((t) >= IPA_SMMU_CB_AP && (t) < IPA_SMMU_CB_MAX)
+
+enum ipa_client_cb_type {
+	IPA_USB_CLNT,
+	IPA_MHI_CLNT,
+	IPA_MAX_CLNT
+};
+
+/**
+ * struct ipa_flt_rt_counter - IPA flt rt counters management
+ * @hdl: idr structure to manage hdl per request
+ * @used_hw: boolean array to track used hw counters
+ * @used_sw: boolean array to track used sw counters
+ * @hdl_lock: spinlock for flt_rt handle
+ */
+struct ipa_flt_rt_counter {
+	struct idr hdl;
+	bool used_hw[IPA_FLT_RT_HW_COUNTER];
+	bool used_sw[IPA_FLT_RT_SW_COUNTER];
+	spinlock_t hdl_lock;
+};
+
+/**
+ * struct ipa3_char_device_context - IPA character device
+ * @class: pointer to the struct class
+ * @dev_num: device number
+ * @dev: the dev_t of the device
+ * @cdev: cdev of the device
+ */
+struct ipa3_char_device_context {
+	struct class *class;
+	dev_t dev_num;
+	struct device *dev;
+	struct cdev cdev;
+};
+
+struct ipa3_pc_mbox_data {
+	struct mbox_client mbox_client;
+	struct mbox_chan *mbox;
+};
+
+enum ipa_fw_load_state {
+	IPA_FW_LOAD_STATE_INIT,
+	IPA_FW_LOAD_STATE_FWFILE_READY,
+	IPA_FW_LOAD_STATE_SMMU_DONE,
+	IPA_FW_LOAD_STATE_LOAD_READY,
+	IPA_FW_LOAD_STATE_LOADED,
+};
+
+enum ipa_fw_load_event {
+	IPA_FW_LOAD_EVNT_FWFILE_READY,
+	IPA_FW_LOAD_EVNT_SMMU_DONE,
+};
+
+struct ipa_fw_load_data {
+	enum ipa_fw_load_state state;
+	struct mutex lock;
+};
+
+struct ipa3_app_clock_vote {
+	struct mutex mutex;
+	u32 cnt;
+};
+
+/**
+ * struct ipa3_context - IPA context
+ * @cdev: cdev context
+ * @ep: list of all end points
+ * @skip_ep_cfg_shadow: state to update filter table correctly across power-save
+ * @ep_flt_bitmap: End-points supporting filtering bitmap
+ * @ep_flt_num: End-points supporting filtering number
+ * @resume_on_connect: resume ep on ipa connect
+ * @flt_tbl: list of all IPA filter tables
+ * @flt_rule_ids: idr structure that holds the rule_id for each rule
+ * @mode: IPA operating mode
+ * @mmio: iomem
+ * @ipa_wrapper_base: IPA wrapper base address
+ * @ipa_wrapper_size: size of the memory pointed to by ipa_wrapper_base
+ * @hdr_tbl: IPA header table
+ * @hdr_proc_ctx_tbl: IPA processing context table
+ * @rt_tbl_set: list of routing tables each of which is a list of rules
+ * @reap_rt_tbl_set: list of sys mem routing tables waiting to be reaped
+ * @flt_rule_cache: filter rule cache
+ * @rt_rule_cache: routing rule cache
+ * @hdr_cache: header cache
+ * @hdr_offset_cache: header offset cache
+ * @hdr_proc_ctx_cache: processing context cache
+ * @hdr_proc_ctx_offset_cache: processing context offset cache
+ * @rt_tbl_cache: routing table cache
+ * @tx_pkt_wrapper_cache: Tx packets cache
+ * @rx_pkt_wrapper_cache: Rx packets cache
+ * @rt_idx_bitmap: routing table index bitmap
+ * @lock: this does NOT protect the linked lists within ipa3_sys_context
+ * @smem_sz: shared memory size available for SW use starting
+ *  from non-restricted bytes
+ * @smem_restricted_bytes: the bytes that SW should not use in the shared mem
+ * @nat_mem: NAT memory
+ * @ipv6ct_mem: IPv6CT memory
+ * @excp_hdr_hdl: exception header handle
+ * @dflt_v4_rt_rule_hdl: default v4 routing rule handle
+ * @dflt_v6_rt_rule_hdl: default v6 routing rule handle
+ * @aggregation_type: aggregation type used on USB client endpoint
+ * @aggregation_byte_limit: aggregation byte limit used on USB client endpoint
+ * @aggregation_time_limit: aggregation time limit used on USB client endpoint
+ * @hdr_tbl_lcl: where hdr tbl resides 1-local, 0-system
+ * @hdr_proc_ctx_tbl_lcl: where proc_ctx tbl resides true-local, false-system
+ * @hdr_mem: header memory
+ * @hdr_proc_ctx_mem: processing context memory
+ * @ip4_rt_tbl_lcl: where ip4 rt tables reside 1-local; 0-system
+ * @ip6_rt_tbl_lcl: where ip6 rt tables reside 1-local; 0-system
+ * @ip4_flt_tbl_lcl: where ip4 flt tables reside 1-local; 0-system
+ * @ip6_flt_tbl_lcl: where ip6 flt tables reside 1-local; 0-system
+ * @power_mgmt_wq: workqueue for power management
+ * @transport_power_mgmt_wq: workqueue transport related power management
+ * @tag_process_before_gating: indicates whether to start tag process before
+ *  gating IPA clocks
+ * @transport_pm: transport power management related information
+ * @disconnect_lock: protects LAN_CONS packet receive notification CB
+ * @ipa3_active_clients: structure for reference counting connected IPA clients
+ * @ipa_hw_type: type of IPA HW type (e.g. IPA 1.0, IPA 1.1 etc')
+ * @ipa3_hw_mode: mode of IPA HW mode (e.g. Normal, Virtual or over PCIe)
+ * @use_ipa_teth_bridge: use tethering bridge driver
+ * @modem_cfg_emb_pipe_flt: modem configure embedded pipe filtering rules
+ * @logbuf: ipc log buffer for high priority messages
+ * @logbuf_low: ipc log buffer for low priority messages
+ * @ipa_wdi2: using wdi-2.0
+ * @ipa_fltrt_not_hashable: filter/route rules not hashable
+ * @use_64_bit_dma_mask: using 64bits dma mask
+ * @ctrl: holds the core specific operations based on
+ *  core version (vtable like)
+ * @pkt_init_imm_opcode: opcode for IP_PACKET_INIT imm cmd
+ * @enable_clock_scaling: clock scaling is enabled ?
+ * @curr_ipa_clk_rate: IPA current clock rate
+ * @wcstats: wlan common buffer stats
+ * @uc_ctx: uC interface context
+ * @uc_wdi_ctx: WDI specific fields for uC interface
+ * @uc_wigig_ctx: WIGIG specific fields for uC interface
+ * @ipa_num_pipes: The number of pipes used by IPA HW
+ * @skip_uc_pipe_reset: Indicates whether pipe reset via uC needs to be avoided
+ * @ipa_client_apps_wan_cons_agg_gro: RMNET_IOCTL_INGRESS_FORMAT_AGG_DATA
+ * @apply_rg10_wa: Indicates whether to use register group 10 workaround
+ * @gsi_ch20_wa: Indicates whether to apply GSI physical channel 20 workaround
+ * @w_lock: Indicates the wakeup source.
+ * @wakelock_ref_cnt: Indicates the number of times wakelock is acquired
+ * @ipa_initialization_complete: Indicates that IPA is fully initialized
+ * @ipa_ready_cb_list: A list of all the clients who require a CB when IPA
+ *  driver is ready/initialized.
+ * @init_completion_obj: Completion object to be used in case IPA driver hasn't
+ * @mhi_evid_limits: MHI event rings start and end ids
+ *  finished initializing. Example of use - IOCTLs to /dev/ipa
+ * @flt_rt_counters: the counters usage info for flt rt stats
+ * @wdi3_ctx: IPA wdi3 context
+ * @gsi_info: channel/protocol info for GSI offloading uC stats
+ * @app_vote: holds userspace application clock vote count
+ * IPA context - holds all relevant info about IPA driver and its state
+ * @lan_rx_napi_enable: flag if NAPI is enabled on the LAN dp
+ * @lan_ndev: dummy netdev for LAN rx NAPI
+ * @napi_lan_rx: NAPI object for LAN rx
+ * @ipa_wan_skb_page - page recycling enabled on wwan data path
+ * @icc_num_cases - number of icc scaling level supported
+ * @icc_num_paths - number of paths icc would vote for bw
+ * @icc_clk - table for icc bw clock value
+ * @coal_cmd_pyld: holds the coslescing close frame command payload
+ */
+struct ipa3_context {
+	struct ipa3_char_device_context cdev;
+	struct ipa3_ep_context ep[IPA3_MAX_NUM_PIPES];
+	bool skip_ep_cfg_shadow[IPA3_MAX_NUM_PIPES];
+	u32 ep_flt_bitmap;
+	u32 ep_flt_num;
+	bool resume_on_connect[IPA_CLIENT_MAX];
+	struct ipa3_flt_tbl flt_tbl[IPA3_MAX_NUM_PIPES][IPA_IP_MAX];
+	struct idr flt_rule_ids[IPA_IP_MAX];
+	void __iomem *mmio;
+	u32 ipa_wrapper_base;
+	u32 ipa_wrapper_size;
+	struct ipa3_hdr_tbl hdr_tbl;
+	struct ipa3_hdr_proc_ctx_tbl hdr_proc_ctx_tbl;
+	struct ipa3_rt_tbl_set rt_tbl_set[IPA_IP_MAX];
+	struct ipa3_rt_tbl_set reap_rt_tbl_set[IPA_IP_MAX];
+	struct kmem_cache *flt_rule_cache;
+	struct kmem_cache *rt_rule_cache;
+	struct kmem_cache *hdr_cache;
+	struct kmem_cache *hdr_offset_cache;
+	struct kmem_cache *hdr_proc_ctx_cache;
+	struct kmem_cache *hdr_proc_ctx_offset_cache;
+	struct kmem_cache *rt_tbl_cache;
+	struct kmem_cache *tx_pkt_wrapper_cache;
+	struct kmem_cache *rx_pkt_wrapper_cache;
+	unsigned long rt_idx_bitmap[IPA_IP_MAX];
+	struct mutex lock;
+	u16 smem_sz;
+	u16 smem_restricted_bytes;
+	u16 smem_reqd_sz;
+	struct ipa3_nat_mem nat_mem;
+	struct ipa3_ipv6ct_mem ipv6ct_mem;
+	u32 excp_hdr_hdl;
+	u32 dflt_v4_rt_rule_hdl;
+	u32 dflt_v6_rt_rule_hdl;
+	uint aggregation_type;
+	uint aggregation_byte_limit;
+	uint aggregation_time_limit;
+	bool hdr_tbl_lcl;
+	bool hdr_proc_ctx_tbl_lcl;
+	struct ipa_mem_buffer hdr_mem;
+	struct ipa_mem_buffer hdr_proc_ctx_mem;
+	bool ip4_rt_tbl_hash_lcl;
+	bool ip4_rt_tbl_nhash_lcl;
+	bool ip6_rt_tbl_hash_lcl;
+	bool ip6_rt_tbl_nhash_lcl;
+	bool ip4_flt_tbl_hash_lcl;
+	bool ip4_flt_tbl_nhash_lcl;
+	bool ip6_flt_tbl_hash_lcl;
+	bool ip6_flt_tbl_nhash_lcl;
+	struct ipa3_active_clients ipa3_active_clients;
+	struct ipa3_active_clients_log_ctx ipa3_active_clients_logging;
+	struct workqueue_struct *power_mgmt_wq;
+	struct workqueue_struct *transport_power_mgmt_wq;
+	bool tag_process_before_gating;
+	struct ipa3_transport_pm transport_pm;
+	unsigned long gsi_evt_comm_hdl;
+	u32 gsi_evt_comm_ring_rem;
+	u32 clnt_hdl_cmd;
+	u32 clnt_hdl_data_in;
+	u32 clnt_hdl_data_out;
+	spinlock_t disconnect_lock;
+	u8 a5_pipe_index;
+	struct list_head intf_list;
+	struct list_head msg_list;
+	struct list_head pull_msg_list;
+	struct mutex msg_lock;
+	struct list_head msg_wlan_client_list;
+	struct mutex msg_wlan_client_lock;
+	wait_queue_head_t msg_waitq;
+	enum ipa_hw_type ipa_hw_type;
+	enum ipa3_hw_mode ipa3_hw_mode;
+	enum ipa3_platform_type platform_type;
+	bool ipa_config_is_mhi;
+	bool use_ipa_teth_bridge;
+	bool modem_cfg_emb_pipe_flt;
+	bool ipa_wdi2;
+	bool ipa_wdi2_over_gsi;
+	bool ipa_wdi3_over_gsi;
+	bool ipa_endp_delay_wa;
+	bool ipa_fltrt_not_hashable;
+	bool use_64_bit_dma_mask;
+	/* featurize if memory footprint becomes a concern */
+	struct ipa3_stats stats;
+	void *smem_pipe_mem;
+	void *logbuf;
+	void *logbuf_low;
+	struct ipa3_controller *ctrl;
+	struct idr ipa_idr;
+	struct platform_device *master_pdev;
+	struct device *pdev;
+	struct device *uc_pdev;
+	spinlock_t idr_lock;
+	u32 enable_clock_scaling;
+	u32 enable_napi_chain;
+	u32 curr_ipa_clk_rate;
+	bool q6_proxy_clk_vote_valid;
+	struct mutex q6_proxy_clk_vote_mutex;
+	u32 q6_proxy_clk_vote_cnt;
+	u32 ipa_num_pipes;
+	dma_addr_t pkt_init_imm[IPA3_MAX_NUM_PIPES];
+	u32 pkt_init_imm_opcode;
+
+	struct ipa3_wlan_comm_memb wc_memb;
+
+	struct ipa3_uc_ctx uc_ctx;
+
+	struct ipa3_uc_wdi_ctx uc_wdi_ctx;
+	struct ipa3_uc_ntn_ctx uc_ntn_ctx;
+	struct ipa3_uc_wigig_ctx uc_wigig_ctx;
+	u32 wan_rx_ring_size;
+	u32 lan_rx_ring_size;
+	bool skip_uc_pipe_reset;
+	unsigned long gsi_dev_hdl;
+	u32 ee;
+	bool apply_rg10_wa;
+	bool gsi_ch20_wa;
+	bool s1_bypass_arr[IPA_SMMU_CB_MAX];
+	u32 wdi_map_cnt;
+	struct wakeup_source *w_lock;
+	struct ipa3_wakelock_ref_cnt wakelock_ref_cnt;
+	/* RMNET_IOCTL_INGRESS_FORMAT_AGG_DATA */
+	bool ipa_client_apps_wan_cons_agg_gro;
+	/* M-release support to know client pipes */
+	struct ipa3cm_client_info ipacm_client[IPA3_MAX_NUM_PIPES];
+	bool tethered_flow_control;
+	bool ipa_initialization_complete;
+	struct list_head ipa_ready_cb_list;
+	struct completion init_completion_obj;
+	struct completion uc_loaded_completion_obj;
+	struct ipa3_smp2p_info smp2p_info;
+	u32 mhi_evid_limits[2]; /* start and end values */
+	u32 ipa_tz_unlock_reg_num;
+	struct ipa_tz_unlock_reg_info *ipa_tz_unlock_reg;
+	struct ipa_dma_task_info dma_task_info;
+	struct ipa_hw_stats hw_stats;
+	struct ipa_flt_rt_counter flt_rt_counters;
+	struct ipa_cne_evt ipa_cne_evt_req_cache[IPA_MAX_NUM_REQ_CACHE];
+	int num_ipa_cne_evt_req;
+	struct mutex ipa_cne_evt_lock;
+	bool vlan_mode_iface[IPA_VLAN_IF_MAX];
+	bool wdi_over_pcie;
+	u32 entire_ipa_block_size;
+	bool do_register_collection_on_crash;
+	bool do_testbus_collection_on_crash;
+	bool do_non_tn_collection_on_crash;
+	bool do_ram_collection_on_crash;
+	u32 secure_debug_check_action;
+	u32 sd_state;
+	void __iomem *reg_collection_base;
+	struct ipa3_wdi2_ctx wdi2_ctx;
+	struct ipa3_pc_mbox_data pc_mbox;
+	struct ipa3_wdi3_ctx wdi3_ctx;
+	struct ipa3_usb_ctx usb_ctx;
+	struct ipa3_mhip_ctx mhip_ctx;
+	struct ipa3_aqc_ctx aqc_ctx;
+	atomic_t ipa_clk_vote;
+
+	int (*client_lock_unlock[IPA_MAX_CLNT])(bool is_lock);
+
+	struct ipa_fw_load_data fw_load_data;
+
+	bool (*get_teth_port_state[IPA_MAX_CLNT])(void);
+
+	atomic_t is_ssr;
+	struct IpaHwOffloadStatsAllocCmdData_t
+		gsi_info[IPA_HW_PROTOCOL_MAX];
+	bool ipa_wan_skb_page;
+	struct ipacm_fnr_info fnr_info;
+	/* dummy netdev for lan RX NAPI */
+	bool lan_rx_napi_enable;
+	struct net_device lan_ndev;
+	struct napi_struct napi_lan_rx;
+	u32 icc_num_cases;
+	u32 icc_num_paths;
+	u32 icc_clk[IPA_ICC_LVL_MAX][IPA_ICC_PATH_MAX][IPA_ICC_TYPE_MAX];
+	struct ipahal_imm_cmd_pyld *coal_cmd_pyld;
+	struct ipa3_app_clock_vote app_clock_vote;
+};
+
+struct ipa3_plat_drv_res {
+	bool use_ipa_teth_bridge;
+	u32 ipa_mem_base;
+	u32 ipa_mem_size;
+	u32 transport_mem_base;
+	u32 transport_mem_size;
+	u32 emulator_intcntrlr_mem_base;
+	u32 emulator_intcntrlr_mem_size;
+	u32 emulator_irq;
+	u32 ipa_irq;
+	u32 transport_irq;
+	u32 ipa_pipe_mem_start_ofst;
+	u32 ipa_pipe_mem_size;
+	enum ipa_hw_type ipa_hw_type;
+	enum ipa3_hw_mode ipa3_hw_mode;
+	enum ipa3_platform_type platform_type;
+	u32 ee;
+	bool modem_cfg_emb_pipe_flt;
+	bool ipa_wdi2;
+	bool ipa_wdi2_over_gsi;
+	bool ipa_wdi3_over_gsi;
+	bool ipa_fltrt_not_hashable;
+	bool use_64_bit_dma_mask;
+	bool use_bw_vote;
+	u32 wan_rx_ring_size;
+	u32 lan_rx_ring_size;
+	bool skip_uc_pipe_reset;
+	bool apply_rg10_wa;
+	bool gsi_ch20_wa;
+	bool tethered_flow_control;
+	bool lan_rx_napi_enable;
+	u32 mhi_evid_limits[2]; /* start and end values */
+	bool ipa_mhi_dynamic_config;
+	u32 ipa_tz_unlock_reg_num;
+	struct ipa_tz_unlock_reg_info *ipa_tz_unlock_reg;
+	struct ipa_pm_init_params pm_init;
+	bool wdi_over_pcie;
+	u32 entire_ipa_block_size;
+	bool do_register_collection_on_crash;
+	bool do_testbus_collection_on_crash;
+	bool do_non_tn_collection_on_crash;
+	bool do_ram_collection_on_crash;
+	u32 secure_debug_check_action;
+	bool ipa_endp_delay_wa;
+	bool skip_ieob_mask_wa;
+	bool ipa_wan_skb_page;
+	u32 icc_num_cases;
+	u32 icc_num_paths;
+	const char *icc_path_name[IPA_ICC_PATH_MAX];
+	u32 icc_clk_val[IPA_ICC_LVL_MAX][IPA_ICC_MAX];
+};
+
+/**
+ * struct ipa3_mem_partition - represents IPA RAM Map as read from DTS
+ * Order and type of members should not be changed without a suitable change
+ * to DTS file or the code that reads it.
+ *
+ * IPA SRAM memory layout:
+ * +-------------------------+
+ * |    UC MEM               |
+ * +-------------------------+
+ * |    UC INFO              |
+ * +-------------------------+
+ * |    CANARY               |
+ * +-------------------------+
+ * |    CANARY               |
+ * +-------------------------+
+ * | V4 FLT HDR HASHABLE     |
+ * +-------------------------+
+ * |    CANARY               |
+ * +-------------------------+
+ * |    CANARY               |
+ * +-------------------------+
+ * | V4 FLT HDR NON-HASHABLE |
+ * +-------------------------+
+ * |    CANARY               |
+ * +-------------------------+
+ * |    CANARY               |
+ * +-------------------------+
+ * | V6 FLT HDR HASHABLE     |
+ * +-------------------------+
+ * |    CANARY               |
+ * +-------------------------+
+ * |    CANARY               |
+ * +-------------------------+
+ * | V6 FLT HDR NON-HASHABLE |
+ * +-------------------------+
+ * |    CANARY               |
+ * +-------------------------+
+ * |    CANARY               |
+ * +-------------------------+
+ * | V4 RT HDR HASHABLE      |
+ * +-------------------------+
+ * |    CANARY               |
+ * +-------------------------+
+ * |    CANARY               |
+ * +-------------------------+
+ * | V4 RT HDR NON-HASHABLE  |
+ * +-------------------------+
+ * |    CANARY               |
+ * +-------------------------+
+ * |    CANARY               |
+ * +-------------------------+
+ * | V6 RT HDR HASHABLE      |
+ * +-------------------------+
+ * |    CANARY               |
+ * +-------------------------+
+ * |    CANARY               |
+ * +-------------------------+
+ * | V6 RT HDR NON-HASHABLE  |
+ * +-------------------------+
+ * |    CANARY               |
+ * +-------------------------+
+ * |    CANARY               |
+ * +-------------------------+
+ * |  MODEM HDR              |
+ * +-------------------------+
+ * |  APPS HDR (IPA4.5)      |
+ * +-------------------------+
+ * |    CANARY               |
+ * +-------------------------+
+ * |    CANARY               |
+ * +-------------------------+
+ * | MODEM PROC CTX          |
+ * +-------------------------+
+ * | APPS PROC CTX           |
+ * +-------------------------+
+ * |    CANARY               |
+ * +-------------------------+
+ * |    CANARY               |
+ * +-------------------------+
+ * |    CANARY (IPA4.5)      |
+ * +-------------------------+
+ * |    CANARY (IPA4.5)      |
+ * +-------------------------+
+ * | NAT TABLE (IPA4.5)      |
+ * +-------------------------+
+ * |    CANARY (IPA4.5)      |
+ * +-------------------------+
+ * |    CANARY (IPA4.5)      |
+ * +-------------------------+
+ * | PDN CONFIG              |
+ * +-------------------------+
+ * |    CANARY               |
+ * +-------------------------+
+ * |    CANARY               |
+ * +-------------------------+
+ * | QUOTA STATS             |
+ * +-------------------------+
+ * | TETH STATS              |
+ * +-------------------------+
+ * | FnR STATS               |
+ * +-------------------------+
+ * | DROP STATS              |
+ * +-------------------------+
+ * |    CANARY (IPA4.5)      |
+ * +-------------------------+
+ * |    CANARY (IPA4.5)      |
+ * +-------------------------+
+ * | MODEM MEM               |
+ * +-------------------------+
+ * |    Dummy (IPA4.5)       |
+ * +-------------------------+
+ * |    CANARY (IPA4.5)      |
+ * +-------------------------+
+ * | UC DESC RAM (IPA3.5)    |
+ * +-------------------------+
+ */
+struct ipa3_mem_partition {
+	u32 ofst_start;
+	u32 v4_flt_hash_ofst;
+	u32 v4_flt_hash_size;
+	u32 v4_flt_hash_size_ddr;
+	u32 v4_flt_nhash_ofst;
+	u32 v4_flt_nhash_size;
+	u32 v4_flt_nhash_size_ddr;
+	u32 v6_flt_hash_ofst;
+	u32 v6_flt_hash_size;
+	u32 v6_flt_hash_size_ddr;
+	u32 v6_flt_nhash_ofst;
+	u32 v6_flt_nhash_size;
+	u32 v6_flt_nhash_size_ddr;
+	u32 v4_rt_num_index;
+	u32 v4_modem_rt_index_lo;
+	u32 v4_modem_rt_index_hi;
+	u32 v4_apps_rt_index_lo;
+	u32 v4_apps_rt_index_hi;
+	u32 v4_rt_hash_ofst;
+	u32 v4_rt_hash_size;
+	u32 v4_rt_hash_size_ddr;
+	u32 v4_rt_nhash_ofst;
+	u32 v4_rt_nhash_size;
+	u32 v4_rt_nhash_size_ddr;
+	u32 v6_rt_num_index;
+	u32 v6_modem_rt_index_lo;
+	u32 v6_modem_rt_index_hi;
+	u32 v6_apps_rt_index_lo;
+	u32 v6_apps_rt_index_hi;
+	u32 v6_rt_hash_ofst;
+	u32 v6_rt_hash_size;
+	u32 v6_rt_hash_size_ddr;
+	u32 v6_rt_nhash_ofst;
+	u32 v6_rt_nhash_size;
+	u32 v6_rt_nhash_size_ddr;
+	u32 modem_hdr_ofst;
+	u32 modem_hdr_size;
+	u32 apps_hdr_ofst;
+	u32 apps_hdr_size;
+	u32 apps_hdr_size_ddr;
+	u32 modem_hdr_proc_ctx_ofst;
+	u32 modem_hdr_proc_ctx_size;
+	u32 apps_hdr_proc_ctx_ofst;
+	u32 apps_hdr_proc_ctx_size;
+	u32 apps_hdr_proc_ctx_size_ddr;
+	u32 nat_tbl_ofst;
+	u32 nat_tbl_size;
+	u32 modem_comp_decomp_ofst;
+	u32 modem_comp_decomp_size;
+	u32 modem_ofst;
+	u32 modem_size;
+	u32 apps_v4_flt_hash_ofst;
+	u32 apps_v4_flt_hash_size;
+	u32 apps_v4_flt_nhash_ofst;
+	u32 apps_v4_flt_nhash_size;
+	u32 apps_v6_flt_hash_ofst;
+	u32 apps_v6_flt_hash_size;
+	u32 apps_v6_flt_nhash_ofst;
+	u32 apps_v6_flt_nhash_size;
+	u32 uc_info_ofst;
+	u32 uc_info_size;
+	u32 end_ofst;
+	u32 apps_v4_rt_hash_ofst;
+	u32 apps_v4_rt_hash_size;
+	u32 apps_v4_rt_nhash_ofst;
+	u32 apps_v4_rt_nhash_size;
+	u32 apps_v6_rt_hash_ofst;
+	u32 apps_v6_rt_hash_size;
+	u32 apps_v6_rt_nhash_ofst;
+	u32 apps_v6_rt_nhash_size;
+	u32 uc_descriptor_ram_ofst;
+	u32 uc_descriptor_ram_size;
+	u32 pdn_config_ofst;
+	u32 pdn_config_size;
+	u32 stats_quota_ofst;
+	u32 stats_quota_size;
+	u32 stats_tethering_ofst;
+	u32 stats_tethering_size;
+	u32 stats_fnr_ofst;
+	u32 stats_fnr_size;
+
+	/* Irrelevant starting IPA4.5 */
+	u32 stats_flt_v4_ofst;
+	u32 stats_flt_v4_size;
+	u32 stats_flt_v6_ofst;
+	u32 stats_flt_v6_size;
+	u32 stats_rt_v4_ofst;
+	u32 stats_rt_v4_size;
+	u32 stats_rt_v6_ofst;
+	u32 stats_rt_v6_size;
+
+	u32 stats_drop_ofst;
+	u32 stats_drop_size;
+};
+
+struct ipa3_controller {
+	struct ipa3_mem_partition *mem_partition;
+	u32 ipa_clk_rate_turbo;
+	u32 ipa_clk_rate_nominal;
+	u32 ipa_clk_rate_svs;
+	u32 ipa_clk_rate_svs2;
+	u32 clock_scaling_bw_threshold_turbo;
+	u32 clock_scaling_bw_threshold_nominal;
+	u32 clock_scaling_bw_threshold_svs;
+	u32 ipa_reg_base_ofst;
+	u32 max_holb_tmr_val;
+	void (*ipa_sram_read_settings)(void);
+	int (*ipa_init_sram)(void);
+	int (*ipa_init_hdr)(void);
+	int (*ipa_init_rt4)(void);
+	int (*ipa_init_rt6)(void);
+	int (*ipa_init_flt4)(void);
+	int (*ipa_init_flt6)(void);
+	int (*ipa3_read_ep_reg)(char *buff, int max_len, int pipe);
+	int (*ipa3_commit_flt)(enum ipa_ip_type ip);
+	int (*ipa3_commit_rt)(enum ipa_ip_type ip);
+	int (*ipa3_commit_hdr)(void);
+	void (*ipa3_enable_clks)(void);
+	void (*ipa3_disable_clks)(void);
+	struct icc_path *icc_path[IPA_ICC_PATH_MAX];
+};
+
+extern struct ipa3_context *ipa3_ctx;
+
+/* public APIs */
+/* Generic GSI channels functions */
+int ipa3_request_gsi_channel(struct ipa_request_gsi_channel_params *params,
+			     struct ipa_req_chan_out_params *out_params);
+
+int ipa3_release_gsi_channel(u32 clnt_hdl);
+
+int ipa3_start_gsi_channel(u32 clnt_hdl);
+
+int ipa3_stop_gsi_channel(u32 clnt_hdl);
+
+int ipa3_reset_gsi_channel(u32 clnt_hdl);
+
+int ipa3_reset_gsi_event_ring(u32 clnt_hdl);
+
+/* Specific xDCI channels functions */
+int ipa3_set_usb_max_packet_size(
+	enum ipa_usb_max_usb_packet_size usb_max_packet_size);
+
+int ipa3_xdci_start(u32 clnt_hdl, u8 xferrscidx, bool xferrscidx_valid);
+
+int ipa3_xdci_connect(u32 clnt_hdl);
+
+int ipa3_xdci_disconnect(u32 clnt_hdl, bool should_force_clear, u32 qmi_req_id);
+
+void ipa3_xdci_ep_delay_rm(u32 clnt_hdl);
+void ipa3_register_client_callback(int (*client_cb)(bool),
+		bool (*teth_port_state)(void),
+		enum ipa_client_type client_type);
+void ipa3_deregister_client_callback(enum ipa_client_type client_type);
+int ipa3_set_reset_client_prod_pipe_delay(bool set_reset,
+		enum ipa_client_type client);
+int ipa3_start_stop_client_prod_gsi_chnl(enum ipa_client_type client,
+		bool start_chnl);
+void ipa3_client_prod_post_shutdown_cleanup(void);
+
+
+int ipa3_set_reset_client_cons_pipe_sus_holb(bool set_reset,
+		enum ipa_client_type client);
+
+int ipa3_xdci_suspend(u32 ul_clnt_hdl, u32 dl_clnt_hdl,
+	bool should_force_clear, u32 qmi_req_id, bool is_dpl);
+
+int ipa3_xdci_resume(u32 ul_clnt_hdl, u32 dl_clnt_hdl, bool is_dpl);
+
+/*
+ * Remove ep delay
+ */
+int ipa3_clear_endpoint_delay(u32 clnt_hdl);
+
+/*
+ * Configuration
+ */
+int ipa3_cfg_ep(u32 clnt_hdl, const struct ipa_ep_cfg *ipa_ep_cfg);
+
+int ipa3_cfg_ep_nat(u32 clnt_hdl, const struct ipa_ep_cfg_nat *ipa_ep_cfg);
+
+int ipa3_cfg_ep_conn_track(u32 clnt_hdl,
+	const struct ipa_ep_cfg_conn_track *ep_conn_track);
+
+int ipa3_cfg_ep_hdr(u32 clnt_hdl, const struct ipa_ep_cfg_hdr *ipa_ep_cfg);
+
+int ipa3_cfg_ep_hdr_ext(u32 clnt_hdl,
+			const struct ipa_ep_cfg_hdr_ext *ipa_ep_cfg);
+
+int ipa3_cfg_ep_mode(u32 clnt_hdl, const struct ipa_ep_cfg_mode *ipa_ep_cfg);
+
+int ipa3_cfg_ep_aggr(u32 clnt_hdl, const struct ipa_ep_cfg_aggr *ipa_ep_cfg);
+
+int ipa3_cfg_ep_deaggr(u32 clnt_hdl,
+		      const struct ipa_ep_cfg_deaggr *ipa_ep_cfg);
+
+int ipa3_cfg_ep_route(u32 clnt_hdl, const struct ipa_ep_cfg_route *ipa_ep_cfg);
+
+int ipa3_cfg_ep_holb(u32 clnt_hdl, const struct ipa_ep_cfg_holb *ipa_ep_cfg);
+
+void ipa3_cal_ep_holb_scale_base_val(u32 tmr_val,
+				struct ipa_ep_cfg_holb *ep_holb);
+
+int ipa3_cfg_ep_cfg(u32 clnt_hdl, const struct ipa_ep_cfg_cfg *ipa_ep_cfg);
+
+int ipa3_cfg_ep_metadata_mask(u32 clnt_hdl,
+		const struct ipa_ep_cfg_metadata_mask *ipa_ep_cfg);
+
+int ipa3_cfg_ep_holb_by_client(enum ipa_client_type client,
+				const struct ipa_ep_cfg_holb *ipa_ep_cfg);
+
+int ipa3_cfg_ep_ctrl(u32 clnt_hdl, const struct ipa_ep_cfg_ctrl *ep_ctrl);
+
+/*
+ * Header removal / addition
+ */
+int ipa3_add_hdr(struct ipa_ioc_add_hdr *hdrs);
+
+int ipa3_add_hdr_usr(struct ipa_ioc_add_hdr *hdrs, bool by_user);
+
+int ipa3_del_hdr(struct ipa_ioc_del_hdr *hdls);
+
+int ipa3_del_hdr_by_user(struct ipa_ioc_del_hdr *hdls, bool by_user);
+
+int ipa3_commit_hdr(void);
+
+int ipa3_reset_hdr(bool user_only);
+
+int ipa3_get_hdr(struct ipa_ioc_get_hdr *lookup);
+
+int ipa3_put_hdr(u32 hdr_hdl);
+
+int ipa3_copy_hdr(struct ipa_ioc_copy_hdr *copy);
+
+/*
+ * Header Processing Context
+ */
+int ipa3_add_hdr_proc_ctx(struct ipa_ioc_add_hdr_proc_ctx *proc_ctxs,
+							bool user_only);
+
+int ipa3_del_hdr_proc_ctx(struct ipa_ioc_del_hdr_proc_ctx *hdls);
+
+int ipa3_del_hdr_proc_ctx_by_user(struct ipa_ioc_del_hdr_proc_ctx *hdls,
+	bool by_user);
+
+/*
+ * Routing
+ */
+int ipa3_add_rt_rule(struct ipa_ioc_add_rt_rule *rules);
+
+int ipa3_add_rt_rule_v2(struct ipa_ioc_add_rt_rule_v2 *rules);
+
+int ipa3_add_rt_rule_usr(struct ipa_ioc_add_rt_rule *rules,
+	bool user_only);
+
+int ipa3_add_rt_rule_usr_v2(struct ipa_ioc_add_rt_rule_v2 *rules,
+	bool user_only);
+
+int ipa3_add_rt_rule_ext(struct ipa_ioc_add_rt_rule_ext *rules);
+
+int ipa3_add_rt_rule_ext_v2(struct ipa_ioc_add_rt_rule_ext_v2 *rules);
+
+int ipa3_add_rt_rule_after(struct ipa_ioc_add_rt_rule_after *rules);
+
+int ipa3_add_rt_rule_after_v2(struct ipa_ioc_add_rt_rule_after_v2
+	*rules);
+
+int ipa3_del_rt_rule(struct ipa_ioc_del_rt_rule *hdls);
+
+int ipa3_commit_rt(enum ipa_ip_type ip);
+
+int ipa3_reset_rt(enum ipa_ip_type ip, bool user_only);
+
+int ipa3_get_rt_tbl(struct ipa_ioc_get_rt_tbl *lookup);
+
+int ipa3_put_rt_tbl(u32 rt_tbl_hdl);
+
+int ipa3_query_rt_index(struct ipa_ioc_get_rt_tbl_indx *in);
+
+int ipa3_mdfy_rt_rule(struct ipa_ioc_mdfy_rt_rule *rules);
+
+int ipa3_mdfy_rt_rule_v2(struct ipa_ioc_mdfy_rt_rule_v2 *rules);
+
+/*
+ * Filtering
+ */
+int ipa3_add_flt_rule(struct ipa_ioc_add_flt_rule *rules);
+
+int ipa3_add_flt_rule_v2(struct ipa_ioc_add_flt_rule_v2 *rules);
+
+int ipa3_add_flt_rule_usr(struct ipa_ioc_add_flt_rule *rules,
+	bool user_only);
+
+int ipa3_add_flt_rule_usr_v2(struct ipa_ioc_add_flt_rule_v2 *rules,
+	bool user_only);
+
+int ipa3_add_flt_rule_after(struct ipa_ioc_add_flt_rule_after *rules);
+
+int ipa3_add_flt_rule_after_v2(struct ipa_ioc_add_flt_rule_after_v2
+	*rules);
+
+int ipa3_del_flt_rule(struct ipa_ioc_del_flt_rule *hdls);
+
+int ipa3_mdfy_flt_rule(struct ipa_ioc_mdfy_flt_rule *rules);
+
+int ipa3_mdfy_flt_rule_v2(struct ipa_ioc_mdfy_flt_rule_v2 *rules);
+
+int ipa3_commit_flt(enum ipa_ip_type ip);
+
+int ipa3_reset_flt(enum ipa_ip_type ip, bool user_only);
+
+/*
+ * NAT
+ */
+int ipa3_nat_ipv6ct_init_devices(void);
+void ipa3_nat_ipv6ct_destroy_devices(void);
+
+int ipa3_allocate_nat_device(struct ipa_ioc_nat_alloc_mem *mem);
+int ipa3_allocate_nat_table(
+	struct ipa_ioc_nat_ipv6ct_table_alloc *table_alloc);
+int ipa3_allocate_ipv6ct_table(
+	struct ipa_ioc_nat_ipv6ct_table_alloc *table_alloc);
+
+int ipa3_nat_init_cmd(struct ipa_ioc_v4_nat_init *init);
+int ipa3_ipv6ct_init_cmd(struct ipa_ioc_ipv6ct_init *init);
+
+int ipa3_table_dma_cmd(struct ipa_ioc_nat_dma_cmd *dma);
+int ipa3_nat_dma_cmd(struct ipa_ioc_nat_dma_cmd *dma);
+
+int ipa3_nat_del_cmd(struct ipa_ioc_v4_nat_del *del);
+int ipa3_del_nat_table(struct ipa_ioc_nat_ipv6ct_table_del *del);
+int ipa3_del_ipv6ct_table(struct ipa_ioc_nat_ipv6ct_table_del *del);
+
+int ipa3_nat_mdfy_pdn(struct ipa_ioc_nat_pdn_entry *mdfy_pdn);
+int ipa3_nat_get_sram_info(struct ipa_nat_in_sram_info *info_ptr);
+int ipa3_app_clk_vote(enum ipa_app_clock_vote_type vote_type);
+
+/*
+ * Messaging
+ */
+int ipa3_send_msg(struct ipa_msg_meta *meta, void *buff,
+		  ipa_msg_free_fn callback);
+int ipa3_resend_wlan_msg(void);
+int ipa3_register_pull_msg(struct ipa_msg_meta *meta, ipa_msg_pull_fn callback);
+int ipa3_deregister_pull_msg(struct ipa_msg_meta *meta);
+
+/*
+ * Interface
+ */
+int ipa3_register_intf(const char *name, const struct ipa_tx_intf *tx,
+		       const struct ipa_rx_intf *rx);
+int ipa3_register_intf_ext(const char *name, const struct ipa_tx_intf *tx,
+		       const struct ipa_rx_intf *rx,
+		       const struct ipa_ext_intf *ext);
+int ipa3_deregister_intf(const char *name);
+
+/*
+ * Aggregation
+ */
+int ipa3_set_aggr_mode(enum ipa_aggr_mode mode);
+
+int ipa3_set_qcncm_ndp_sig(char sig[3]);
+
+int ipa3_set_single_ndp_per_mbim(bool enable);
+
+/*
+ * Data path
+ */
+int ipa3_tx_dp(enum ipa_client_type dst, struct sk_buff *skb,
+		struct ipa_tx_meta *metadata);
+
+/*
+ * To transfer multiple data packets
+ * While passing the data descriptor list, the anchor node
+ * should be of type struct ipa_tx_data_desc not list_head
+ */
+int ipa3_tx_dp_mul(enum ipa_client_type dst,
+			struct ipa_tx_data_desc *data_desc);
+
+void ipa3_free_skb(struct ipa_rx_data *data);
+
+/*
+ * System pipes
+ */
+int ipa3_setup_sys_pipe(struct ipa_sys_connect_params *sys_in, u32 *clnt_hdl);
+
+int ipa3_teardown_sys_pipe(u32 clnt_hdl);
+
+int ipa3_sys_setup(struct ipa_sys_connect_params *sys_in,
+	unsigned long *ipa_transport_hdl,
+	u32 *ipa_pipe_num, u32 *clnt_hdl, bool en_status);
+
+int ipa3_sys_teardown(u32 clnt_hdl);
+
+int ipa3_sys_update_gsi_hdls(u32 clnt_hdl, unsigned long gsi_ch_hdl,
+	unsigned long gsi_ev_hdl);
+
+int ipa3_connect_wdi_pipe(struct ipa_wdi_in_params *in,
+		struct ipa_wdi_out_params *out);
+int ipa3_connect_gsi_wdi_pipe(struct ipa_wdi_in_params *in,
+		struct ipa_wdi_out_params *out);
+
+int ipa3_disconnect_wdi_pipe(u32 clnt_hdl);
+int ipa3_enable_wdi_pipe(u32 clnt_hdl);
+int ipa3_enable_gsi_wdi_pipe(u32 clnt_hdl);
+int ipa3_disable_wdi_pipe(u32 clnt_hdl);
+int ipa3_disable_gsi_wdi_pipe(u32 clnt_hdl);
+int ipa3_disconnect_gsi_wdi_pipe(u32 clnt_hdl);
+int ipa3_resume_wdi_pipe(u32 clnt_hdl);
+int ipa3_resume_gsi_wdi_pipe(u32 clnt_hdl);
+int ipa3_suspend_wdi_pipe(u32 clnt_hdl);
+void ipa3_get_gsi_stats(int prot_id,
+	struct ipa_uc_dbg_ring_stats *stats);
+int ipa3_get_wdi_gsi_stats(struct ipa_uc_dbg_ring_stats *stats);
+int ipa3_get_wdi3_gsi_stats(struct ipa_uc_dbg_ring_stats *stats);
+int ipa3_get_usb_gsi_stats(struct ipa_uc_dbg_ring_stats *stats);
+int ipa3_get_aqc_gsi_stats(struct ipa_uc_dbg_ring_stats *stats);
+int ipa3_get_wdi_stats(struct IpaHwStatsWDIInfoData_t *stats);
+int ipa3_get_prot_id(enum ipa_client_type client);
+u16 ipa3_get_smem_restr_bytes(void);
+int ipa3_broadcast_wdi_quota_reach_ind(uint32_t fid, uint64_t num_bytes);
+int ipa3_setup_uc_ntn_pipes(struct ipa_ntn_conn_in_params *in,
+		ipa_notify_cb notify, void *priv, u8 hdr_len,
+		struct ipa_ntn_conn_out_params *outp);
+int ipa3_tear_down_uc_offload_pipes(int ipa_ep_idx_ul, int ipa_ep_idx_dl,
+	struct ipa_ntn_conn_in_params *params);
+int ipa3_ntn_uc_reg_rdyCB(void (*ipauc_ready_cb)(void *), void *priv);
+void ipa3_ntn_uc_dereg_rdyCB(void);
+int ipa3_conn_wdi3_pipes(struct ipa_wdi_conn_in_params *in,
+	struct ipa_wdi_conn_out_params *out,
+	ipa_wdi_meter_notifier_cb wdi_notify);
+int ipa3_disconn_wdi3_pipes(int ipa_ep_idx_tx, int ipa_ep_idx_rx);
+int ipa3_enable_wdi3_pipes(int ipa_ep_idx_tx, int ipa_ep_idx_rx);
+int ipa3_disable_wdi3_pipes(int ipa_ep_idx_tx, int ipa_ep_idx_rx);
+
+int ipa3_conn_wigig_rx_pipe_i(void *in,
+	struct ipa_wigig_conn_out_params *out,
+	struct dentry **parent);
+
+int ipa3_conn_wigig_client_i(void *in,
+	struct ipa_wigig_conn_out_params *out,
+	ipa_notify_cb tx_notify,
+	void *priv);
+
+int ipa3_wigig_uc_msi_init(bool init,
+	phys_addr_t periph_baddr_pa,
+	phys_addr_t pseudo_cause_pa,
+	phys_addr_t int_gen_tx_pa,
+	phys_addr_t int_gen_rx_pa,
+	phys_addr_t dma_ep_misc_pa);
+
+int ipa3_disconn_wigig_pipe_i(enum ipa_client_type client,
+	struct ipa_wigig_pipe_setup_info_smmu *pipe_smmu,
+	void *dbuff);
+
+int ipa3_enable_wigig_pipe_i(enum ipa_client_type client);
+
+int ipa3_disable_wigig_pipe_i(enum ipa_client_type client);
+
+int ipa3_wigig_init_debugfs_i(struct dentry *dent);
+
+/*
+ * To retrieve doorbell physical address of
+ * wlan pipes
+ */
+int ipa3_uc_wdi_get_dbpa(struct ipa_wdi_db_params *out);
+
+/*
+ * To register uC ready callback if uC not ready
+ * and also check uC readiness
+ * if uC not ready only, register callback
+ */
+int ipa3_uc_reg_rdyCB(struct ipa_wdi_uc_ready_params *param);
+/*
+ * To de-register uC ready callback
+ */
+int ipa3_uc_dereg_rdyCB(void);
+
+int ipa_create_uc_smmu_mapping(int res_idx, bool wlan_smmu_en,
+		phys_addr_t pa, struct sg_table *sgt, size_t len, bool device,
+		unsigned long *iova);
+
+int ipa_create_gsi_smmu_mapping(int res_idx, bool wlan_smmu_en,
+		phys_addr_t pa, struct sg_table *sgt, size_t len, bool device,
+		unsigned long *iova);
+
+void ipa3_release_wdi3_gsi_smmu_mappings(u8 dir);
+
+/*
+ * Tethering bridge (Rmnet / MBIM)
+ */
+int ipa3_teth_bridge_init(struct teth_bridge_init_params *params);
+
+int ipa3_teth_bridge_disconnect(enum ipa_client_type client);
+
+int ipa3_teth_bridge_connect(struct teth_bridge_connect_params *connect_params);
+
+int ipa3_teth_bridge_get_pm_hdl(void);
+
+/*
+ * Tethering client info
+ */
+void ipa3_set_client(int index, enum ipacm_client_enum client, bool uplink);
+
+enum ipacm_client_enum ipa3_get_client(int pipe_idx);
+
+bool ipa3_get_client_uplink(int pipe_idx);
+
+int ipa3_get_wlan_stats(struct ipa_get_wdi_sap_stats *wdi_sap_stats);
+
+int ipa3_set_wlan_quota(struct ipa_set_wifi_quota *wdi_quota);
+
+int ipa3_inform_wlan_bw(struct ipa_inform_wlan_bw *wdi_bw);
+
+/*
+ * IPADMA
+ */
+int ipa3_dma_init(void);
+
+int ipa3_dma_enable(void);
+
+int ipa3_dma_disable(void);
+
+int ipa3_dma_sync_memcpy(u64 dest, u64 src, int len);
+
+int ipa3_dma_async_memcpy(u64 dest, u64 src, int len,
+			void (*user_cb)(void *user1), void *user_param);
+
+int ipa3_dma_uc_memcpy(phys_addr_t dest, phys_addr_t src, int len);
+
+void ipa3_dma_destroy(void);
+
+/*
+ * MHI
+ */
+
+int ipa3_mhi_init_engine(struct ipa_mhi_init_engine *params);
+
+int ipa3_connect_mhi_pipe(
+		struct ipa_mhi_connect_params_internal *in,
+		u32 *clnt_hdl);
+
+int ipa3_disconnect_mhi_pipe(u32 clnt_hdl);
+
+bool ipa3_mhi_stop_gsi_channel(enum ipa_client_type client);
+
+int ipa3_mhi_reset_channel_internal(enum ipa_client_type client);
+
+int ipa3_mhi_start_channel_internal(enum ipa_client_type client);
+
+bool ipa3_has_open_aggr_frame(enum ipa_client_type client);
+
+int ipa3_mhi_resume_channels_internal(enum ipa_client_type client,
+		bool LPTransitionRejected, bool brstmode_enabled,
+		union __packed gsi_channel_scratch ch_scratch, u8 index);
+
+int ipa3_mhi_destroy_channel(enum ipa_client_type client);
+
+/*
+ * mux id
+ */
+int ipa3_write_qmap_id(struct ipa_ioc_write_qmapid *param_in);
+
+/*
+ * interrupts
+ */
+int ipa3_add_interrupt_handler(enum ipa_irq_type interrupt,
+		ipa_irq_handler_t handler,
+		bool deferred_flag,
+		void *private_data);
+
+int ipa3_remove_interrupt_handler(enum ipa_irq_type interrupt);
+
+/*
+ * Miscellaneous
+ */
+int ipa3_get_ep_mapping(enum ipa_client_type client);
+
+bool ipa3_is_ready(void);
+
+int ipa3_ctx_get_type(enum ipa_type_mode type);
+bool ipa3_ctx_get_flag(enum ipa_flag flag);
+u32 ipa3_ctx_get_num_pipes(void);
+
+void ipa3_proxy_clk_vote(void);
+void ipa3_proxy_clk_unvote(void);
+
+bool ipa3_is_client_handle_valid(u32 clnt_hdl);
+
+enum ipa_client_type ipa3_get_client_mapping(int pipe_idx);
+enum ipa_client_type ipa3_get_client_by_pipe(int pipe_idx);
+
+void ipa_init_ep_flt_bitmap(void);
+
+bool ipa_is_ep_support_flt(int pipe_idx);
+
+bool ipa3_get_modem_cfg_emb_pipe_flt(void);
+
+u8 ipa3_get_qmb_master_sel(enum ipa_client_type client);
+
+int ipa3_get_smmu_params(struct ipa_smmu_in_params *in,
+	struct ipa_smmu_out_params *out);
+
+bool ipa3_get_lan_rx_napi(void);
+
+/* internal functions */
+
+int ipa3_bind_api_controller(enum ipa_hw_type ipa_hw_type,
+	struct ipa_api_controller *api_ctrl);
+
+bool ipa_is_modem_pipe(int pipe_idx);
+
+int ipa3_send_one(struct ipa3_sys_context *sys, struct ipa3_desc *desc,
+		bool in_atomic);
+int ipa3_send(struct ipa3_sys_context *sys,
+		u32 num_desc,
+		struct ipa3_desc *desc,
+		bool in_atomic);
+int ipa3_get_ep_mapping(enum ipa_client_type client);
+int ipa_get_ep_group(enum ipa_client_type client);
+
+int ipa3_generate_hw_rule(enum ipa_ip_type ip,
+			 const struct ipa_rule_attrib *attrib,
+			 u8 **buf,
+			 u16 *en_rule);
+int ipa3_init_hw(void);
+struct ipa3_rt_tbl *__ipa3_find_rt_tbl(enum ipa_ip_type ip, const char *name);
+int ipa3_set_single_ndp_per_mbim(bool enable);
+void ipa3_debugfs_init(void);
+void ipa3_debugfs_remove(void);
+
+void ipa3_dump_buff_internal(void *base, dma_addr_t phy_base, u32 size);
+#ifdef IPA_DEBUG
+#define IPA_DUMP_BUFF(base, phy_base, size) \
+	ipa3_dump_buff_internal(base, phy_base, size)
+#else
+#define IPA_DUMP_BUFF(base, phy_base, size)
+#endif
+int ipa3_init_mem_partition(enum ipa_hw_type ipa_hw_type);
+int ipa3_controller_static_bind(struct ipa3_controller *controller,
+		enum ipa_hw_type ipa_hw_type);
+int ipa3_cfg_route(struct ipahal_reg_route *route);
+int ipa3_send_cmd_timeout(u16 num_desc, struct ipa3_desc *descr, u32 timeout);
+int ipa3_send_cmd(u16 num_desc, struct ipa3_desc *descr);
+int ipa3_cfg_filter(u32 disable);
+int ipa3_straddle_boundary(u32 start, u32 end, u32 boundary);
+struct ipa3_context *ipa3_get_ctx(void);
+void ipa3_enable_clks(void);
+void ipa3_disable_clks(void);
+void ipa3_inc_client_enable_clks(struct ipa_active_client_logging_info *id);
+int ipa3_inc_client_enable_clks_no_block(struct ipa_active_client_logging_info
+		*id);
+void ipa3_dec_client_disable_clks(struct ipa_active_client_logging_info *id);
+void ipa3_dec_client_disable_clks_no_block(
+	struct ipa_active_client_logging_info *id);
+void ipa3_active_clients_log_dec(struct ipa_active_client_logging_info *id,
+		bool int_ctx);
+void ipa3_active_clients_log_inc(struct ipa_active_client_logging_info *id,
+		bool int_ctx);
+int ipa3_active_clients_log_print_buffer(char *buf, int size);
+int ipa3_active_clients_log_print_table(char *buf, int size);
+void ipa3_active_clients_log_clear(void);
+int ipa3_interrupts_init(u32 ipa_irq, u32 ee, struct device *ipa_dev);
+void ipa3_interrupts_destroy(u32 ipa_irq, struct device *ipa_dev);
+int __ipa3_del_rt_rule(u32 rule_hdl);
+int __ipa3_del_hdr(u32 hdr_hdl, bool by_user);
+int __ipa3_release_hdr(u32 hdr_hdl);
+int __ipa3_release_hdr_proc_ctx(u32 proc_ctx_hdl);
+int _ipa_read_ep_reg_v3_0(char *buf, int max_len, int pipe);
+int _ipa_read_ep_reg_v4_0(char *buf, int max_len, int pipe);
+int _ipa_read_ipahal_regs(void);
+void _ipa_enable_clks_v3_0(void);
+void _ipa_disable_clks_v3_0(void);
+struct device *ipa3_get_dma_dev(void);
+void ipa3_suspend_active_aggr_wa(u32 clnt_hdl);
+void ipa3_suspend_handler(enum ipa_irq_type interrupt,
+				void *private_data,
+				void *interrupt_data);
+
+ssize_t ipa3_read(struct file *filp, char __user *buf, size_t count,
+		 loff_t *f_pos);
+int ipa3_pull_msg(struct ipa_msg_meta *meta, char *buff, size_t count);
+int ipa3_query_intf(struct ipa_ioc_query_intf *lookup);
+int ipa3_query_intf_tx_props(struct ipa_ioc_query_intf_tx_props *tx);
+int ipa3_query_intf_rx_props(struct ipa_ioc_query_intf_rx_props *rx);
+int ipa3_query_intf_ext_props(struct ipa_ioc_query_intf_ext_props *ext);
+
+void wwan_cleanup(void);
+
+int ipa3_teth_bridge_driver_init(void);
+void ipa3_lan_rx_cb(void *priv, enum ipa_dp_evt_type evt, unsigned long data);
+
+int _ipa_init_sram_v3(void);
+int _ipa_init_hdr_v3_0(void);
+int _ipa_init_rt4_v3(void);
+int _ipa_init_rt6_v3(void);
+int _ipa_init_flt4_v3(void);
+int _ipa_init_flt6_v3(void);
+
+int __ipa_commit_flt_v3(enum ipa_ip_type ip);
+int __ipa_commit_rt_v3(enum ipa_ip_type ip);
+
+int __ipa_commit_hdr_v3_0(void);
+void ipa3_skb_recycle(struct sk_buff *skb);
+void ipa3_install_dflt_flt_rules(u32 ipa_ep_idx);
+void ipa3_delete_dflt_flt_rules(u32 ipa_ep_idx);
+
+int ipa3_enable_data_path(u32 clnt_hdl);
+int ipa3_disable_data_path(u32 clnt_hdl);
+int ipa3_disable_gsi_data_path(u32 clnt_hdl);
+int ipa3_alloc_rule_id(struct idr *rule_ids);
+int ipa3_alloc_counter_id(struct ipa_ioc_flt_rt_counter_alloc *counter);
+void ipa3_counter_remove_hdl(int hdl);
+void ipa3_counter_id_remove_all(void);
+int ipa3_id_alloc(void *ptr);
+bool ipa3_check_idr_if_freed(void *ptr);
+void *ipa3_id_find(u32 id);
+void ipa3_id_remove(u32 id);
+int ipa3_enable_force_clear(u32 request_id, bool throttle_source,
+	u32 source_pipe_bitmask);
+int ipa3_disable_force_clear(u32 request_id);
+
+int ipa3_set_required_perf_profile(enum ipa_voltage_level floor_voltage,
+				  u32 bandwidth_mbps);
+
+int ipa3_cfg_ep_status(u32 clnt_hdl,
+		const struct ipahal_reg_ep_cfg_status *ipa_ep_cfg);
+
+int ipa3_suspend_resource_no_block(enum ipa_rm_resource_name name);
+int ipa3_suspend_resource_sync(enum ipa_rm_resource_name name);
+int ipa3_resume_resource(enum ipa_rm_resource_name name);
+bool ipa3_should_pipe_be_suspended(enum ipa_client_type client);
+int ipa3_tag_aggr_force_close(int pipe_num);
+
+void ipa3_active_clients_unlock(void);
+int ipa3_wdi_init(void);
+int ipa3_write_qmapid_gsi_wdi_pipe(u32 clnt_hdl, u8 qmap_id);
+int ipa3_write_qmapid_wdi_pipe(u32 clnt_hdl, u8 qmap_id);
+int ipa3_write_qmapid_wdi3_gsi_pipe(u32 clnt_hdl, u8 qmap_id);
+int ipa3_tag_process(struct ipa3_desc *desc, int num_descs,
+		    unsigned long timeout);
+
+void ipa3_q6_pre_shutdown_cleanup(void);
+void ipa3_q6_post_shutdown_cleanup(void);
+void ipa3_q6_pre_powerup_cleanup(void);
+void ipa3_update_ssr_state(bool is_ssr);
+int ipa3_init_q6_smem(void);
+
+int ipa3_mhi_handle_ipa_config_req(struct ipa_config_req_msg_v01 *config_req);
+int ipa3_mhi_query_ch_info(enum ipa_client_type client,
+		struct gsi_chan_info *ch_info);
+
+int ipa3_uc_interface_init(void);
+int ipa3_uc_is_gsi_channel_empty(enum ipa_client_type ipa_client);
+int ipa3_uc_state_check(void);
+int ipa3_uc_loaded_check(void);
+int ipa3_uc_register_ready_cb(struct notifier_block *nb);
+int ipa3_uc_unregister_ready_cb(struct notifier_block *nb);
+int ipa3_uc_send_cmd(u32 cmd, u32 opcode, u32 expected_status,
+		    bool polling_mode, unsigned long timeout_jiffies);
+void ipa3_uc_register_handlers(enum ipa3_hw_features feature,
+			      struct ipa3_uc_hdlrs *hdlrs);
+int ipa3_uc_notify_clk_state(bool enabled);
+int ipa3_dma_setup(void);
+void ipa3_dma_shutdown(void);
+void ipa3_dma_async_memcpy_notify_cb(void *priv,
+		enum ipa_dp_evt_type evt, unsigned long data);
+
+int ipa3_uc_update_hw_flags(u32 flags);
+
+int ipa3_uc_mhi_init(void (*ready_cb)(void), void (*wakeup_request_cb)(void));
+void ipa3_uc_mhi_cleanup(void);
+int ipa3_uc_mhi_send_dl_ul_sync_info(union IpaHwMhiDlUlSyncCmdData_t *cmd);
+int ipa3_uc_mhi_init_engine(struct ipa_mhi_msi_info *msi, u32 mmio_addr,
+	u32 host_ctrl_addr, u32 host_data_addr, u32 first_ch_idx,
+	u32 first_evt_idx);
+int ipa3_uc_mhi_init_channel(int ipa_ep_idx, int channelHandle,
+	int contexArrayIndex, int channelDirection);
+int ipa3_uc_mhi_reset_channel(int channelHandle);
+int ipa3_uc_mhi_suspend_channel(int channelHandle);
+int ipa3_uc_mhi_resume_channel(int channelHandle, bool LPTransitionRejected);
+int ipa3_uc_mhi_stop_event_update_channel(int channelHandle);
+int ipa3_uc_mhi_print_stats(char *dbg_buff, int size);
+int ipa3_uc_memcpy(phys_addr_t dest, phys_addr_t src, int len);
+int ipa3_uc_send_remote_ipa_info(u32 remote_addr, uint32_t mbox_n);
+int ipa3_uc_debug_stats_alloc(
+	struct IpaHwOffloadStatsAllocCmdData_t cmdinfo);
+int ipa3_uc_debug_stats_dealloc(uint32_t protocol);
+int ipa3_uc_quota_monitor(uint64_t quota);
+int ipa3_uc_bw_monitor(struct ipa_wdi_bw_info *info);
+int ipa3_uc_setup_event_ring(void);
+int ipa3_set_wlan_tx_info(struct ipa_wdi_tx_info *info);
+int ipa3_uc_debug_stats_dealloc(uint32_t prot_id);
+void ipa3_tag_destroy_imm(void *user1, int user2);
+const struct ipa_gsi_ep_config *ipa3_get_gsi_ep_info
+	(enum ipa_client_type client);
+
+int ipa3_wigig_init_i(void);
+int ipa3_wigig_internal_init(
+	struct ipa_wdi_uc_ready_params *inout,
+	ipa_wigig_misc_int_cb int_notify,
+	phys_addr_t *uc_db_pa);
+
+/* Hardware stats */
+
+#define IPA_STATS_MAX_PIPE_BIT 32
+
+struct ipa_teth_stats_endpoints {
+	u32 prod_mask;
+	u32 dst_ep_mask[IPA_STATS_MAX_PIPE_BIT];
+};
+
+int ipa_hw_stats_init(void);
+
+int ipa_init_flt_rt_stats(void);
+
+int ipa_debugfs_init_stats(struct dentry *parent);
+
+int ipa_init_quota_stats(u32 pipe_bitmask);
+
+int ipa_get_quota_stats(struct ipa_quota_stats_all *out);
+
+int ipa_reset_quota_stats(enum ipa_client_type client);
+
+int ipa_reset_all_quota_stats(void);
+
+int ipa_init_drop_stats(u32 pipe_bitmask);
+
+int ipa_get_drop_stats(struct ipa_drop_stats_all *out);
+
+int ipa_reset_drop_stats(enum ipa_client_type client);
+
+int ipa_reset_all_drop_stats(void);
+
+int ipa_init_teth_stats(struct ipa_teth_stats_endpoints *in);
+
+int ipa_get_teth_stats(void);
+
+int ipa_query_teth_stats(enum ipa_client_type prod,
+	struct ipa_quota_stats_all *out, bool reset);
+
+int ipa_reset_teth_stats(enum ipa_client_type prod, enum ipa_client_type cons);
+
+int ipa_reset_all_cons_teth_stats(enum ipa_client_type prod);
+
+int ipa_reset_all_teth_stats(void);
+
+int ipa_get_flt_rt_stats(struct ipa_ioc_flt_rt_query *query);
+
+int ipa_set_flt_rt_stats(int index, struct ipa_flt_rt_stats stats);
+
+bool ipa_get_fnr_info(struct ipacm_fnr_info *fnr_info);
+
+u32 ipa3_get_num_pipes(void);
+struct ipa_smmu_cb_ctx *ipa3_get_smmu_ctx(enum ipa_smmu_cb_type);
+struct iommu_domain *ipa3_get_smmu_domain(void);
+struct iommu_domain *ipa3_get_uc_smmu_domain(void);
+struct iommu_domain *ipa3_get_wlan_smmu_domain(void);
+struct iommu_domain *ipa3_get_smmu_domain_by_type
+	(enum ipa_smmu_cb_type cb_type);
+int ipa3_iommu_map(struct iommu_domain *domain, unsigned long iova,
+	phys_addr_t paddr, size_t size, int prot);
+int ipa3_ap_suspend(struct device *dev);
+int ipa3_ap_resume(struct device *dev);
+int ipa3_init_interrupts(void);
+struct iommu_domain *ipa3_get_smmu_domain(void);
+int ipa3_release_wdi_mapping(u32 num_buffers, struct ipa_wdi_buffer_info *info);
+int ipa3_create_wdi_mapping(u32 num_buffers, struct ipa_wdi_buffer_info *info);
+int ipa3_set_flt_tuple_mask(int pipe_idx, struct ipahal_reg_hash_tuple *tuple);
+int ipa3_set_rt_tuple_mask(int tbl_idx, struct ipahal_reg_hash_tuple *tuple);
+void ipa3_set_resorce_groups_min_max_limits(void);
+int ipa3_suspend_apps_pipes(bool suspend);
+void ipa3_force_close_coal(void);
+int ipa3_flt_read_tbl_from_hw(u32 pipe_idx,
+	enum ipa_ip_type ip_type,
+	bool hashable,
+	struct ipahal_flt_rule_entry entry[],
+	int *num_entry);
+int ipa3_rt_read_tbl_from_hw(u32 tbl_idx,
+	enum ipa_ip_type ip_type,
+	bool hashable,
+	struct ipahal_rt_rule_entry entry[],
+	int *num_entry);
+int ipa3_restore_suspend_handler(void);
+int ipa3_inject_dma_task_for_gsi(void);
+int ipa3_uc_panic_notifier(struct notifier_block *this,
+	unsigned long event, void *ptr);
+void ipa3_inc_acquire_wakelock(void);
+void ipa3_dec_release_wakelock(void);
+int ipa3_load_fws(const struct firmware *firmware, phys_addr_t gsi_mem_base,
+	enum gsi_ver);
+int emulator_load_fws(
+	const struct firmware *firmware,
+	u32 transport_mem_base,
+	u32 transport_mem_size,
+	enum gsi_ver);
+int ipa3_register_ipa_ready_cb(void (*ipa_ready_cb)(void *), void *user_data);
+const char *ipa_hw_error_str(enum ipa3_hw_errors err_type);
+int ipa_gsi_ch20_wa(void);
+int ipa3_rx_poll(u32 clnt_hdl, int budget);
+int ipa3_lan_rx_poll(u32 clnt_hdl, int weight);
+int ipa3_smmu_map_peer_reg(phys_addr_t phys_addr, bool map,
+	enum ipa_smmu_cb_type cb_type);
+int ipa3_smmu_map_peer_buff(u64 iova, u32 size, bool map, struct sg_table *sgt,
+	enum ipa_smmu_cb_type cb_type);
+void ipa3_reset_freeze_vote(void);
+int ipa3_ntn_init(void);
+int ipa3_get_ntn_stats(struct Ipa3HwStatsNTNInfoData_t *stats);
+struct dentry *ipa_debugfs_get_root(void);
+struct device *ipa3_get_pdev(void);
+void ipa3_enable_dcd(void);
+void ipa3_disable_prefetch(enum ipa_client_type client);
+int ipa3_alloc_common_event_ring(void);
+int ipa3_allocate_dma_task_for_gsi(void);
+void ipa3_free_dma_task_for_gsi(void);
+int ipa3_allocate_coal_close_frame(void);
+void ipa3_free_coal_close_frame(void);
+int ipa3_set_clock_plan_from_pm(int idx);
+void __ipa_gsi_irq_rx_scedule_poll(struct ipa3_sys_context *sys);
+int ipa3_tz_unlock_reg(struct ipa_tz_unlock_reg_info *reg_info, u16 num_regs);
+void ipa3_init_imm_cmd_desc(struct ipa3_desc *desc,
+	struct ipahal_imm_cmd_pyld *cmd_pyld);
+int ipa3_is_vlan_mode(enum ipa_vlan_ifaces iface, bool *res);
+uint ipa3_get_emulation_type(void);
+int ipa3_get_transport_info(
+	phys_addr_t *phys_addr_ptr,
+	unsigned long *size_ptr);
+irq_handler_t ipa3_get_isr(void);
+void ipa_pc_qmp_enable(void);
+u32 ipa3_get_r_rev_version(void);
+#if defined(CONFIG_IPA3_REGDUMP)
+int ipa_reg_save_init(u32 value);
+void ipa_save_registers(void);
+void ipa_save_gsi_ver(void);
+#else
+static inline int ipa_reg_save_init(u32 value) { return 0; }
+static inline void ipa_save_registers(void) {};
+static inline void ipa_save_gsi_ver(void) {};
+#endif
+
+#ifdef CONFIG_IPA_ETH
+int ipa_eth_init(void);
+void ipa_eth_exit(void);
+#else
+static inline int ipa_eth_init(void) { return 0; }
+static inline void ipa_eth_exit(void) { }
+#endif // CONFIG_IPA_ETH
+
+int ipa3_get_gsi_chan_info(struct gsi_chan_info *gsi_chan_info,
+	unsigned long chan_hdl);
+
+#if IS_ENABLED(CONFIG_IPA3_MHI_PRIME_MANAGER)
+int ipa_mpm_mhip_xdci_pipe_enable(enum ipa_usb_teth_prot prot);
+int ipa_mpm_mhip_xdci_pipe_disable(enum ipa_usb_teth_prot xdci_teth_prot);
+int ipa_mpm_notify_wan_state(struct wan_ioctl_notify_wan_state *state);
+int ipa3_is_mhip_offload_enabled(void);
+int ipa_mpm_reset_dma_mode(enum ipa_client_type src_pipe,
+	enum ipa_client_type dst_pipe);
+int ipa_mpm_panic_handler(char *buf, int size);
+int ipa3_mpm_enable_adpl_over_odl(bool enable);
+int ipa3_get_mhip_gsi_stats(struct ipa_uc_dbg_ring_stats *stats);
+#else /* IS_ENABLED(CONFIG_IPA3_MHI_PRIME_MANAGER) */
+static inline int ipa_mpm_mhip_xdci_pipe_enable(
+	enum ipa_usb_teth_prot prot)
+{
+	return 0;
+}
+static inline int ipa_mpm_mhip_xdci_pipe_disable(
+	enum ipa_usb_teth_prot xdci_teth_prot)
+{
+	return 0;
+}
+static inline int ipa_mpm_notify_wan_state(
+	struct wan_ioctl_notify_wan_state *state)
+{
+	return 0;
+}
+static inline int ipa3_is_mhip_offload_enabled(void)
+{
+	return 0;
+}
+static inline int ipa_mpm_reset_dma_mode(enum ipa_client_type src_pipe,
+	enum ipa_client_type dst_pipe)
+{
+	return 0;
+}
+static inline int ipa_mpm_panic_handler(char *buf, int size)
+{
+	return 0;
+}
+
+static inline int ipa3_get_mhip_gsi_stats(struct ipa_uc_dbg_ring_stats *stats)
+{
+	return 0;
+}
+
+static inline int ipa3_mpm_enable_adpl_over_odl(bool enable)
+{
+	return 0;
+}
+#endif /* IS_ENABLED(CONFIG_IPA3_MHI_PRIME_MANAGER) */
+
+static inline void *alloc_and_init(u32 size, u32 init_val)
+{
+	void *ptr = kmalloc(size, GFP_KERNEL);
+
+	if (ptr)
+		memset(ptr, init_val, size);
+
+	return ptr;
+}
+
+/* query ipa APQ mode*/
+bool ipa3_is_apq(void);
+/* check if odl is connected */
+bool ipa3_is_odl_connected(void);
+
+int ipa3_uc_send_enable_flow_control(uint16_t gsi_chid,
+	uint16_t redMarkerThreshold);
+int ipa3_uc_send_disable_flow_control(void);
+int ipa3_uc_send_update_flow_control(uint32_t bitmask,
+	uint8_t  add_delete);
+#endif /* _IPA3_I_H_ */

+ 612 - 0
ipa/ipa_v3/ipa_interrupts.c

@@ -0,0 +1,612 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2014-2019, The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/interrupt.h>
+#include "ipa_i.h"
+
+#define INTERRUPT_WORKQUEUE_NAME "ipa_interrupt_wq"
+#define DIS_SUSPEND_INTERRUPT_TIMEOUT 5
+#define IPA_IRQ_NUM_MAX 32
+
+struct ipa3_interrupt_info {
+	ipa_irq_handler_t handler;
+	enum ipa_irq_type interrupt;
+	void *private_data;
+	bool deferred_flag;
+};
+
+struct ipa3_interrupt_work_wrap {
+	struct work_struct interrupt_work;
+	ipa_irq_handler_t handler;
+	enum ipa_irq_type interrupt;
+	void *private_data;
+	void *interrupt_data;
+};
+
+static struct ipa3_interrupt_info ipa_interrupt_to_cb[IPA_IRQ_NUM_MAX];
+static struct workqueue_struct *ipa_interrupt_wq;
+static u32 ipa_ee;
+
+static void ipa3_tx_suspend_interrupt_wa(void);
+static void ipa3_enable_tx_suspend_wa(struct work_struct *work);
+static DECLARE_DELAYED_WORK(dwork_en_suspend_int,
+						ipa3_enable_tx_suspend_wa);
+static spinlock_t suspend_wa_lock;
+static void ipa3_process_interrupts(bool isr_context);
+
+static int ipa3_irq_mapping[IPA_IRQ_MAX] = {
+	[IPA_BAD_SNOC_ACCESS_IRQ]		= 0,
+	[IPA_UC_IRQ_0]				= 2,
+	[IPA_UC_IRQ_1]				= 3,
+	[IPA_UC_IRQ_2]				= 4,
+	[IPA_UC_IRQ_3]				= 5,
+	[IPA_UC_IN_Q_NOT_EMPTY_IRQ]		= 6,
+	[IPA_UC_RX_CMD_Q_NOT_FULL_IRQ]		= 7,
+	[IPA_PROC_TO_UC_ACK_Q_NOT_EMPTY_IRQ]	= 8,
+	[IPA_RX_ERR_IRQ]			= 9,
+	[IPA_DEAGGR_ERR_IRQ]			= 10,
+	[IPA_TX_ERR_IRQ]			= 11,
+	[IPA_STEP_MODE_IRQ]			= 12,
+	[IPA_PROC_ERR_IRQ]			= 13,
+	[IPA_TX_SUSPEND_IRQ]			= 14,
+	[IPA_TX_HOLB_DROP_IRQ]			= 15,
+	[IPA_BAM_GSI_IDLE_IRQ]			= 16,
+	[IPA_PIPE_YELLOW_MARKER_BELOW_IRQ]	= 17,
+	[IPA_PIPE_RED_MARKER_BELOW_IRQ]		= 18,
+	[IPA_PIPE_YELLOW_MARKER_ABOVE_IRQ]	= 19,
+	[IPA_PIPE_RED_MARKER_ABOVE_IRQ]		= 20,
+	[IPA_UCP_IRQ]				= 21,
+	[IPA_DCMP_IRQ]				= 22,
+	[IPA_GSI_EE_IRQ]			= 23,
+	[IPA_GSI_IPA_IF_TLV_RCVD_IRQ]		= 24,
+	[IPA_GSI_UC_IRQ]			= 25,
+	[IPA_TLV_LEN_MIN_DSM_IRQ]		= 26,
+	[IPA_DRBIP_PKT_EXCEED_MAX_SIZE_IRQ]	= 27,
+	[IPA_DRBIP_DATA_SCTR_CFG_ERROR_IRQ]	= 28,
+	[IPA_DRBIP_IMM_CMD_NO_FLSH_HZRD_IRQ]	= 29,
+};
+
+static void ipa3_interrupt_defer(struct work_struct *work);
+static DECLARE_WORK(ipa3_interrupt_defer_work, ipa3_interrupt_defer);
+
+static void ipa3_deferred_interrupt_work(struct work_struct *work)
+{
+	struct ipa3_interrupt_work_wrap *work_data =
+			container_of(work,
+			struct ipa3_interrupt_work_wrap,
+			interrupt_work);
+	IPADBG("call handler from workq for interrupt %d...\n",
+		work_data->interrupt);
+	work_data->handler(work_data->interrupt, work_data->private_data,
+			work_data->interrupt_data);
+	kfree(work_data->interrupt_data);
+	kfree(work_data);
+}
+
+static bool ipa3_is_valid_ep(u32 ep_suspend_data)
+{
+	u32 bmsk = 1;
+	u32 i = 0;
+
+	for (i = 0; i < ipa3_ctx->ipa_num_pipes; i++) {
+		if ((ep_suspend_data & bmsk) && (ipa3_ctx->ep[i].valid))
+			return true;
+		bmsk = bmsk << 1;
+	}
+	return false;
+}
+
+static int ipa3_handle_interrupt(int irq_num, bool isr_context)
+{
+	struct ipa3_interrupt_info interrupt_info;
+	struct ipa3_interrupt_work_wrap *work_data;
+	u32 suspend_data;
+	void *interrupt_data = NULL;
+	struct ipa_tx_suspend_irq_data *suspend_interrupt_data = NULL;
+	int res;
+
+	interrupt_info = ipa_interrupt_to_cb[irq_num];
+	if (interrupt_info.handler == NULL) {
+		IPAERR("A callback function wasn't set for interrupt num %d\n",
+			irq_num);
+		return -EINVAL;
+	}
+
+	switch (interrupt_info.interrupt) {
+	case IPA_TX_SUSPEND_IRQ:
+		IPADBG_LOW("processing TX_SUSPEND interrupt\n");
+		ipa3_tx_suspend_interrupt_wa();
+		suspend_data = ipahal_read_reg_n(IPA_SUSPEND_IRQ_INFO_EE_n,
+			ipa_ee);
+		IPADBG_LOW("get interrupt %d\n", suspend_data);
+
+		if (ipa3_ctx->ipa_hw_type >= IPA_HW_v3_1) {
+			/* Clearing L2 interrupts status */
+			ipahal_write_reg_n(IPA_SUSPEND_IRQ_CLR_EE_n,
+				ipa_ee, suspend_data);
+		}
+		if (!ipa3_is_valid_ep(suspend_data))
+			return 0;
+
+		suspend_interrupt_data =
+			kzalloc(sizeof(*suspend_interrupt_data), GFP_ATOMIC);
+		if (!suspend_interrupt_data) {
+			IPAERR("failed allocating suspend_interrupt_data\n");
+			return -ENOMEM;
+		}
+		suspend_interrupt_data->endpoints = suspend_data;
+		interrupt_data = suspend_interrupt_data;
+		break;
+	default:
+		break;
+	}
+
+	/* Force defer processing if in ISR context. */
+	if (interrupt_info.deferred_flag || isr_context) {
+		IPADBG_LOW("Defer handling interrupt %d\n",
+			interrupt_info.interrupt);
+		work_data = kzalloc(sizeof(struct ipa3_interrupt_work_wrap),
+				GFP_ATOMIC);
+		if (!work_data) {
+			IPAERR("failed allocating ipa3_interrupt_work_wrap\n");
+			res = -ENOMEM;
+			goto fail_alloc_work;
+		}
+		INIT_WORK(&work_data->interrupt_work,
+				ipa3_deferred_interrupt_work);
+		work_data->handler = interrupt_info.handler;
+		work_data->interrupt = interrupt_info.interrupt;
+		work_data->private_data = interrupt_info.private_data;
+		work_data->interrupt_data = interrupt_data;
+		queue_work(ipa_interrupt_wq, &work_data->interrupt_work);
+
+	} else {
+		IPADBG_LOW("Handle interrupt %d\n", interrupt_info.interrupt);
+		interrupt_info.handler(interrupt_info.interrupt,
+			interrupt_info.private_data,
+			interrupt_data);
+		kfree(interrupt_data);
+	}
+
+	return 0;
+
+fail_alloc_work:
+	kfree(interrupt_data);
+	return res;
+}
+
+static void ipa3_enable_tx_suspend_wa(struct work_struct *work)
+{
+	u32 en;
+	u32 suspend_bmask;
+	int irq_num;
+
+	IPADBG_LOW("Enter\n");
+
+	irq_num = ipa3_irq_mapping[IPA_TX_SUSPEND_IRQ];
+
+	if (irq_num == -1) {
+		WARN_ON(1);
+		return;
+	}
+
+	/* make sure ipa hw is clocked on*/
+	IPA_ACTIVE_CLIENTS_INC_SIMPLE();
+
+	en = ipahal_read_reg_n(IPA_IRQ_EN_EE_n, ipa_ee);
+	suspend_bmask = 1 << irq_num;
+	/*enable  TX_SUSPEND_IRQ*/
+	en |= suspend_bmask;
+	IPADBG("enable TX_SUSPEND_IRQ, IPA_IRQ_EN_EE reg, write val = %u\n"
+		, en);
+	ipahal_write_reg_n(IPA_IRQ_EN_EE_n, ipa_ee, en);
+	ipa3_process_interrupts(false);
+	IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
+
+	IPADBG_LOW("Exit\n");
+}
+
+static void ipa3_tx_suspend_interrupt_wa(void)
+{
+	u32 val;
+	u32 suspend_bmask;
+	int irq_num;
+	int wa_delay;
+
+	IPADBG_LOW("Enter\n");
+	irq_num = ipa3_irq_mapping[IPA_TX_SUSPEND_IRQ];
+
+	if (irq_num == -1) {
+		WARN_ON(1);
+		return;
+	}
+
+	/*disable TX_SUSPEND_IRQ*/
+	val = ipahal_read_reg_n(IPA_IRQ_EN_EE_n, ipa_ee);
+	suspend_bmask = 1 << irq_num;
+	val &= ~suspend_bmask;
+	IPADBG("Disabling TX_SUSPEND_IRQ, write val: %u to IPA_IRQ_EN_EE reg\n",
+		val);
+	ipahal_write_reg_n(IPA_IRQ_EN_EE_n, ipa_ee, val);
+
+	IPADBG_LOW(" processing suspend interrupt work-around, delayed work\n");
+
+	wa_delay = DIS_SUSPEND_INTERRUPT_TIMEOUT;
+	if (ipa3_ctx->ipa3_hw_mode == IPA_HW_MODE_VIRTUAL ||
+	    ipa3_ctx->ipa3_hw_mode == IPA_HW_MODE_EMULATION) {
+		wa_delay *= 400;
+	}
+
+	IPADBG_LOW("Delay period %d msec\n", wa_delay);
+
+	queue_delayed_work(ipa_interrupt_wq, &dwork_en_suspend_int,
+			msecs_to_jiffies(wa_delay));
+
+	IPADBG_LOW("Exit\n");
+}
+
+static inline bool is_uc_irq(int irq_num)
+{
+	if (ipa_interrupt_to_cb[irq_num].interrupt >= IPA_UC_IRQ_0 &&
+		ipa_interrupt_to_cb[irq_num].interrupt <= IPA_UC_IRQ_3)
+		return true;
+	else
+		return false;
+}
+
+static void ipa3_process_interrupts(bool isr_context)
+{
+	u32 reg;
+	u32 bmsk;
+	u32 i = 0;
+	u32 en;
+	unsigned long flags;
+	bool uc_irq;
+
+	IPADBG_LOW("Enter isr_context=%d\n", isr_context);
+
+	spin_lock_irqsave(&suspend_wa_lock, flags);
+	en = ipahal_read_reg_n(IPA_IRQ_EN_EE_n, ipa_ee);
+	reg = ipahal_read_reg_n(IPA_IRQ_STTS_EE_n, ipa_ee);
+	while (en & reg) {
+		IPADBG_LOW("en=0x%x reg=0x%x\n", en, reg);
+		bmsk = 1;
+		for (i = 0; i < IPA_IRQ_NUM_MAX; i++) {
+			IPADBG_LOW("Check irq number %d\n", i);
+			if (en & reg & bmsk) {
+				IPADBG_LOW("Irq number %d asserted\n", i);
+				uc_irq = is_uc_irq(i);
+
+				/*
+				 * Clear uC interrupt before processing to avoid
+				 * clearing unhandled interrupts
+				 */
+				if (uc_irq)
+					ipahal_write_reg_n(IPA_IRQ_CLR_EE_n,
+							ipa_ee, bmsk);
+
+				/*
+				 * handle the interrupt with spin_lock
+				 * unlocked to avoid calling client in atomic
+				 * context. mutual exclusion still preserved
+				 * as the read/clr is done with spin_lock
+				 * locked.
+				 */
+				spin_unlock_irqrestore(&suspend_wa_lock, flags);
+				ipa3_handle_interrupt(i, isr_context);
+				spin_lock_irqsave(&suspend_wa_lock, flags);
+
+				/*
+				 * Clear non uC interrupt after processing
+				 * to avoid clearing interrupt data
+				 */
+				if (!uc_irq)
+					ipahal_write_reg_n(IPA_IRQ_CLR_EE_n,
+							ipa_ee, bmsk);
+			}
+			bmsk = bmsk << 1;
+		}
+
+		reg = ipahal_read_reg_n(IPA_IRQ_STTS_EE_n, ipa_ee);
+		/* since the suspend interrupt HW bug we must
+		 * read again the EN register, otherwise the while is endless
+		 */
+		en = ipahal_read_reg_n(IPA_IRQ_EN_EE_n, ipa_ee);
+	}
+
+	spin_unlock_irqrestore(&suspend_wa_lock, flags);
+	IPADBG_LOW("Exit\n");
+}
+
+static void ipa3_interrupt_defer(struct work_struct *work)
+{
+	IPADBG("processing interrupts in wq\n");
+	IPA_ACTIVE_CLIENTS_INC_SIMPLE();
+	ipa3_process_interrupts(false);
+	IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
+	IPADBG("Done\n");
+}
+
+static irqreturn_t ipa3_isr(int irq, void *ctxt)
+{
+	struct ipa_active_client_logging_info log_info;
+
+	IPA_ACTIVE_CLIENTS_PREP_SIMPLE(log_info);
+	IPADBG_LOW("Enter\n");
+	/* defer interrupt handling in case IPA is not clocked on */
+	if (ipa3_inc_client_enable_clks_no_block(&log_info)) {
+		IPADBG("defer interrupt processing\n");
+		queue_work(ipa3_ctx->power_mgmt_wq, &ipa3_interrupt_defer_work);
+		return IRQ_HANDLED;
+	}
+
+	ipa3_process_interrupts(true);
+	IPADBG_LOW("Exit\n");
+
+	ipa3_dec_client_disable_clks_no_block(&log_info);
+	return IRQ_HANDLED;
+}
+
+irq_handler_t ipa3_get_isr(void)
+{
+	return ipa3_isr;
+}
+
+/**
+ * ipa3_add_interrupt_handler() - Adds handler to an interrupt type
+ * @interrupt:		Interrupt type
+ * @handler:		The handler to be added
+ * @deferred_flag:	whether the handler processing should be deferred in
+ *			a workqueue
+ * @private_data:	the client's private data
+ *
+ * Adds handler to an interrupt type and enable the specific bit
+ * in IRQ_EN register, associated interrupt in IRQ_STTS register will be enabled
+ */
+int ipa3_add_interrupt_handler(enum ipa_irq_type interrupt,
+		ipa_irq_handler_t handler,
+		bool deferred_flag,
+		void *private_data)
+{
+	u32 val;
+	u32 bmsk;
+	int irq_num;
+	int client_idx, ep_idx;
+
+	IPADBG("interrupt_enum(%d)\n", interrupt);
+	if (interrupt < IPA_BAD_SNOC_ACCESS_IRQ ||
+		interrupt >= IPA_IRQ_MAX) {
+		IPAERR("invalid interrupt number %d\n", interrupt);
+		return -EINVAL;
+	}
+
+	irq_num = ipa3_irq_mapping[interrupt];
+	if (irq_num < 0 || irq_num >= IPA_IRQ_NUM_MAX) {
+		IPAERR("interrupt %d not supported\n", interrupt);
+		WARN_ON(1);
+		return -EFAULT;
+	}
+	IPADBG("ipa_interrupt_to_cb irq_num(%d)\n", irq_num);
+
+	ipa_interrupt_to_cb[irq_num].deferred_flag = deferred_flag;
+	ipa_interrupt_to_cb[irq_num].handler = handler;
+	ipa_interrupt_to_cb[irq_num].private_data = private_data;
+	ipa_interrupt_to_cb[irq_num].interrupt = interrupt;
+
+	val = ipahal_read_reg_n(IPA_IRQ_EN_EE_n, ipa_ee);
+	IPADBG("read IPA_IRQ_EN_EE_n register. reg = %d\n", val);
+	bmsk = 1 << irq_num;
+	val |= bmsk;
+	ipahal_write_reg_n(IPA_IRQ_EN_EE_n, ipa_ee, val);
+	IPADBG("wrote IPA_IRQ_EN_EE_n register. reg = %d\n", val);
+
+	/* register SUSPEND_IRQ_EN_EE_n_ADDR for L2 interrupt*/
+	if ((interrupt == IPA_TX_SUSPEND_IRQ) &&
+		(ipa3_ctx->ipa_hw_type >= IPA_HW_v3_1)) {
+		val = ~0;
+		for (client_idx = 0; client_idx < IPA_CLIENT_MAX; client_idx++)
+			if (IPA_CLIENT_IS_Q6_CONS(client_idx) ||
+				IPA_CLIENT_IS_Q6_PROD(client_idx)) {
+				ep_idx = ipa3_get_ep_mapping(client_idx);
+				IPADBG("modem ep_idx(%d) client_idx = %d\n",
+					ep_idx, client_idx);
+			if (ep_idx == -1)
+				IPADBG("Invalid IPA client\n");
+			else
+				val &= ~(1 << ep_idx);
+		}
+
+		ipahal_write_reg_n(IPA_SUSPEND_IRQ_EN_EE_n, ipa_ee, val);
+		IPADBG("wrote IPA_SUSPEND_IRQ_EN_EE_n reg = %d\n", val);
+	}
+	return 0;
+}
+
+/**
+ * ipa3_remove_interrupt_handler() - Removes handler to an interrupt type
+ * @interrupt:		Interrupt type
+ *
+ * Removes the handler and disable the specific bit in IRQ_EN register
+ */
+int ipa3_remove_interrupt_handler(enum ipa_irq_type interrupt)
+{
+	u32 val;
+	u32 bmsk;
+	int irq_num;
+
+	if (interrupt < IPA_BAD_SNOC_ACCESS_IRQ ||
+		interrupt >= IPA_IRQ_MAX) {
+		IPAERR("invalid interrupt number %d\n", interrupt);
+		return -EINVAL;
+	}
+
+	irq_num = ipa3_irq_mapping[interrupt];
+	if (irq_num < 0 || irq_num >= IPA_IRQ_NUM_MAX) {
+		IPAERR("interrupt %d not supported\n", interrupt);
+		WARN_ON(1);
+		return -EFAULT;
+	}
+
+	kfree(ipa_interrupt_to_cb[irq_num].private_data);
+	ipa_interrupt_to_cb[irq_num].deferred_flag = false;
+	ipa_interrupt_to_cb[irq_num].handler = NULL;
+	ipa_interrupt_to_cb[irq_num].private_data = NULL;
+	ipa_interrupt_to_cb[irq_num].interrupt = -1;
+
+	/* clean SUSPEND_IRQ_EN_EE_n_ADDR for L2 interrupt */
+	if ((interrupt == IPA_TX_SUSPEND_IRQ) &&
+		(ipa3_ctx->ipa_hw_type >= IPA_HW_v3_1)) {
+		ipahal_write_reg_n(IPA_SUSPEND_IRQ_EN_EE_n, ipa_ee, 0);
+		IPADBG("wrote IPA_SUSPEND_IRQ_EN_EE_n reg = %d\n", 0);
+	}
+
+	val = ipahal_read_reg_n(IPA_IRQ_EN_EE_n, ipa_ee);
+	bmsk = 1 << irq_num;
+	val &= ~bmsk;
+	ipahal_write_reg_n(IPA_IRQ_EN_EE_n, ipa_ee, val);
+
+	return 0;
+}
+
+/**
+ * ipa3_interrupts_init() - Initialize the IPA interrupts framework
+ * @ipa_irq:	The interrupt number to allocate
+ * @ee:		Execution environment
+ * @ipa_dev:	The basic device structure representing the IPA driver
+ *
+ * - Initialize the ipa_interrupt_to_cb array
+ * - Clear interrupts status
+ * - Register the ipa interrupt handler - ipa3_isr
+ * - Enable apps processor wakeup by IPA interrupts
+ */
+int ipa3_interrupts_init(u32 ipa_irq, u32 ee, struct device *ipa_dev)
+{
+	int idx;
+	int res = 0;
+
+	ipa_ee = ee;
+	for (idx = 0; idx < IPA_IRQ_NUM_MAX; idx++) {
+		ipa_interrupt_to_cb[idx].deferred_flag = false;
+		ipa_interrupt_to_cb[idx].handler = NULL;
+		ipa_interrupt_to_cb[idx].private_data = NULL;
+		ipa_interrupt_to_cb[idx].interrupt = -1;
+	}
+
+	ipa_interrupt_wq = create_singlethread_workqueue(
+			INTERRUPT_WORKQUEUE_NAME);
+	if (!ipa_interrupt_wq) {
+		IPAERR("workqueue creation failed\n");
+		return -ENOMEM;
+	}
+
+	/*
+	 * NOTE:
+	 *
+	 *  We'll only register an isr on non-emulator (ie. real UE)
+	 *  systems.
+	 *
+	 *  On the emulator, emulator_soft_irq_isr() will be calling
+	 *  ipa3_isr, so hence, no isr registration here, and instead,
+	 *  we'll pass the address of ipa3_isr to the gsi layer where
+	 *  emulator interrupts are handled...
+	 */
+	if (ipa3_ctx->ipa3_hw_mode != IPA_HW_MODE_EMULATION) {
+		res = request_irq(ipa_irq, (irq_handler_t) ipa3_isr,
+					IRQF_TRIGGER_RISING, "ipa", ipa_dev);
+		if (res) {
+			IPAERR(
+			    "fail to register IPA IRQ handler irq=%d\n",
+			    ipa_irq);
+			destroy_workqueue(ipa_interrupt_wq);
+			ipa_interrupt_wq = NULL;
+			return -ENODEV;
+		}
+		IPADBG("IPA IRQ handler irq=%d registered\n", ipa_irq);
+
+		res = enable_irq_wake(ipa_irq);
+		if (res)
+			IPAERR("fail to enable IPA IRQ wakeup irq=%d res=%d\n",
+				   ipa_irq, res);
+		else
+			IPADBG("IPA IRQ wakeup enabled irq=%d\n", ipa_irq);
+	}
+	spin_lock_init(&suspend_wa_lock);
+	return 0;
+}
+
+/**
+ * ipa3_interrupts_destroy() - Destroy the IPA interrupts framework
+ * @ipa_irq:	The interrupt number to allocate
+ * @ee:		Execution environment
+ * @ipa_dev:	The basic device structure representing the IPA driver
+ *
+ * - Disable apps processor wakeup by IPA interrupts
+ * - Unregister the ipa interrupt handler - ipa3_isr
+ * - Destroy the interrupt workqueue
+ */
+void ipa3_interrupts_destroy(u32 ipa_irq, struct device *ipa_dev)
+{
+	if (ipa3_ctx->ipa3_hw_mode != IPA_HW_MODE_EMULATION) {
+		disable_irq_wake(ipa_irq);
+		free_irq(ipa_irq, ipa_dev);
+	}
+	destroy_workqueue(ipa_interrupt_wq);
+	ipa_interrupt_wq = NULL;
+}
+
+/**
+ * ipa3_suspend_active_aggr_wa() - Emulate suspend IRQ
+ * @clnt_hndl:		suspended client handle, IRQ is emulated for this pipe
+ *
+ *  Emulate suspend IRQ to unsuspend client which was suspended with an open
+ *  aggregation frame in order to bypass HW bug of IRQ not generated when
+ *  endpoint is suspended during an open aggregation.
+ */
+void ipa3_suspend_active_aggr_wa(u32 clnt_hdl)
+{
+	struct ipa3_interrupt_info interrupt_info;
+	struct ipa3_interrupt_work_wrap *work_data;
+	struct ipa_tx_suspend_irq_data *suspend_interrupt_data;
+	int irq_num;
+	int aggr_active_bitmap = ipahal_read_reg(IPA_STATE_AGGR_ACTIVE);
+
+	if (aggr_active_bitmap & (1 << clnt_hdl)) {
+		/* force close aggregation */
+		ipahal_write_reg(IPA_AGGR_FORCE_CLOSE, (1 << clnt_hdl));
+
+		/* simulate suspend IRQ */
+		irq_num = ipa3_irq_mapping[IPA_TX_SUSPEND_IRQ];
+		interrupt_info = ipa_interrupt_to_cb[irq_num];
+		if (interrupt_info.handler == NULL) {
+			IPAERR("no CB function for IPA_TX_SUSPEND_IRQ\n");
+			return;
+		}
+		suspend_interrupt_data = kzalloc(
+				sizeof(*suspend_interrupt_data),
+				GFP_ATOMIC);
+		if (!suspend_interrupt_data) {
+			IPAERR("failed allocating suspend_interrupt_data\n");
+			return;
+		}
+		suspend_interrupt_data->endpoints = 1 << clnt_hdl;
+
+		work_data = kzalloc(sizeof(struct ipa3_interrupt_work_wrap),
+				GFP_ATOMIC);
+		if (!work_data) {
+			IPAERR("failed allocating ipa3_interrupt_work_wrap\n");
+			goto fail_alloc_work;
+		}
+		INIT_WORK(&work_data->interrupt_work,
+				ipa3_deferred_interrupt_work);
+		work_data->handler = interrupt_info.handler;
+		work_data->interrupt = IPA_TX_SUSPEND_IRQ;
+		work_data->private_data = interrupt_info.private_data;
+		work_data->interrupt_data = (void *)suspend_interrupt_data;
+		queue_work(ipa_interrupt_wq, &work_data->interrupt_work);
+		return;
+fail_alloc_work:
+		kfree(suspend_interrupt_data);
+	}
+}

+ 810 - 0
ipa/ipa_v3/ipa_intf.c

@@ -0,0 +1,810 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2013-2019, The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/fs.h>
+#include <linux/sched.h>
+#include <linux/sched/signal.h>
+#include "ipa_i.h"
+#include <linux/msm_ipa.h>
+
+struct ipa3_intf {
+	char name[IPA_RESOURCE_NAME_MAX];
+	struct list_head link;
+	u32 num_tx_props;
+	u32 num_rx_props;
+	u32 num_ext_props;
+	struct ipa_ioc_tx_intf_prop *tx;
+	struct ipa_ioc_rx_intf_prop *rx;
+	struct ipa_ioc_ext_intf_prop *ext;
+	enum ipa_client_type excp_pipe;
+};
+
+struct ipa3_push_msg {
+	struct ipa_msg_meta meta;
+	ipa_msg_free_fn callback;
+	void *buff;
+	struct list_head link;
+};
+
+struct ipa3_pull_msg {
+	struct ipa_msg_meta meta;
+	ipa_msg_pull_fn callback;
+	struct list_head link;
+};
+
+/**
+ * ipa3_register_intf() - register "logical" interface
+ * @name: [in] interface name
+ * @tx:	[in] TX properties of the interface
+ * @rx:	[in] RX properties of the interface
+ *
+ * Register an interface and its tx and rx properties, this allows
+ * configuration of rules from user-space
+ *
+ * Returns:	0 on success, negative on failure
+ *
+ * Note:	Should not be called from atomic context
+ */
+int ipa3_register_intf(const char *name, const struct ipa_tx_intf *tx,
+		       const struct ipa_rx_intf *rx)
+{
+	return ipa3_register_intf_ext(name, tx, rx, NULL);
+}
+
+/**
+ * ipa3_register_intf_ext() - register "logical" interface which has only
+ * extended properties
+ * @name: [in] interface name
+ * @tx:	[in] TX properties of the interface
+ * @rx:	[in] RX properties of the interface
+ * @ext: [in] EXT properties of the interface
+ *
+ * Register an interface and its tx, rx and ext properties, this allows
+ * configuration of rules from user-space
+ *
+ * Returns:	0 on success, negative on failure
+ *
+ * Note:	Should not be called from atomic context
+ */
+int ipa3_register_intf_ext(const char *name, const struct ipa_tx_intf *tx,
+		       const struct ipa_rx_intf *rx,
+		       const struct ipa_ext_intf *ext)
+{
+	struct ipa3_intf *intf;
+	u32 len;
+
+	if (name == NULL || (tx == NULL && rx == NULL && ext == NULL)) {
+		IPAERR_RL("invalid params name=%pK tx=%pK rx=%pK ext=%pK\n",
+				name, tx, rx, ext);
+		return -EINVAL;
+	}
+
+	if (tx && tx->num_props > IPA_NUM_PROPS_MAX) {
+		IPAERR_RL("invalid tx num_props=%d max=%d\n", tx->num_props,
+				IPA_NUM_PROPS_MAX);
+		return -EINVAL;
+	}
+
+	if (rx && rx->num_props > IPA_NUM_PROPS_MAX) {
+		IPAERR_RL("invalid rx num_props=%d max=%d\n", rx->num_props,
+				IPA_NUM_PROPS_MAX);
+		return -EINVAL;
+	}
+
+	if (ext && ext->num_props > IPA_NUM_PROPS_MAX) {
+		IPAERR_RL("invalid ext num_props=%d max=%d\n", ext->num_props,
+				IPA_NUM_PROPS_MAX);
+		return -EINVAL;
+	}
+
+	len = sizeof(struct ipa3_intf);
+	intf = kzalloc(len, GFP_KERNEL);
+	if (intf == NULL)
+		return -ENOMEM;
+
+	strlcpy(intf->name, name, IPA_RESOURCE_NAME_MAX);
+
+	if (tx) {
+		intf->num_tx_props = tx->num_props;
+		len = tx->num_props * sizeof(struct ipa_ioc_tx_intf_prop);
+		intf->tx = kmemdup(tx->prop, len, GFP_KERNEL);
+		if (intf->tx == NULL) {
+			kfree(intf);
+			return -ENOMEM;
+		}
+	}
+
+	if (rx) {
+		intf->num_rx_props = rx->num_props;
+		len = rx->num_props * sizeof(struct ipa_ioc_rx_intf_prop);
+		intf->rx = kmemdup(rx->prop, len, GFP_KERNEL);
+		if (intf->rx == NULL) {
+			kfree(intf->tx);
+			kfree(intf);
+			return -ENOMEM;
+		}
+		memcpy(intf->rx, rx->prop, len);
+	}
+
+	if (ext) {
+		intf->num_ext_props = ext->num_props;
+		len = ext->num_props * sizeof(struct ipa_ioc_ext_intf_prop);
+		intf->ext = kmemdup(ext->prop, len, GFP_KERNEL);
+		if (intf->ext == NULL) {
+			kfree(intf->rx);
+			kfree(intf->tx);
+			kfree(intf);
+			return -ENOMEM;
+		}
+		memcpy(intf->ext, ext->prop, len);
+	}
+
+	if (ext && ext->excp_pipe_valid)
+		intf->excp_pipe = ext->excp_pipe;
+	else
+		intf->excp_pipe = IPA_CLIENT_APPS_LAN_CONS;
+
+	mutex_lock(&ipa3_ctx->lock);
+	list_add_tail(&intf->link, &ipa3_ctx->intf_list);
+	mutex_unlock(&ipa3_ctx->lock);
+
+	return 0;
+}
+
+/**
+ * ipa3_deregister_intf() - de-register previously registered logical interface
+ * @name: [in] interface name
+ *
+ * De-register a previously registered interface
+ *
+ * Returns:	0 on success, negative on failure
+ *
+ * Note:	Should not be called from atomic context
+ */
+int ipa3_deregister_intf(const char *name)
+{
+	struct ipa3_intf *entry;
+	struct ipa3_intf *next;
+	int result = -EINVAL;
+
+	if ((name == NULL) ||
+	    (strnlen(name, IPA_RESOURCE_NAME_MAX) == IPA_RESOURCE_NAME_MAX)) {
+		IPAERR_RL("invalid param name=%s\n", name);
+		return result;
+	}
+
+	mutex_lock(&ipa3_ctx->lock);
+	list_for_each_entry_safe(entry, next, &ipa3_ctx->intf_list, link) {
+		if (!strcmp(entry->name, name)) {
+			list_del(&entry->link);
+			kfree(entry->ext);
+			kfree(entry->rx);
+			kfree(entry->tx);
+			kfree(entry);
+			result = 0;
+			break;
+		}
+	}
+	mutex_unlock(&ipa3_ctx->lock);
+
+	return result;
+}
+
+/**
+ * ipa3_query_intf() - query logical interface properties
+ * @lookup:	[inout] interface name and number of properties
+ *
+ * Obtain the handle and number of tx and rx properties for the named
+ * interface, used as part of querying the tx and rx properties for
+ * configuration of various rules from user-space
+ *
+ * Returns:	0 on success, negative on failure
+ *
+ * Note:	Should not be called from atomic context
+ */
+int ipa3_query_intf(struct ipa_ioc_query_intf *lookup)
+{
+	struct ipa3_intf *entry;
+	int result = -EINVAL;
+
+	if (lookup == NULL) {
+		IPAERR_RL("invalid param lookup=%pK\n", lookup);
+		return result;
+	}
+
+	lookup->name[IPA_RESOURCE_NAME_MAX-1] = '\0';
+	if (strnlen(lookup->name, IPA_RESOURCE_NAME_MAX) ==
+			IPA_RESOURCE_NAME_MAX) {
+		IPAERR_RL("Interface name too long. (%s)\n", lookup->name);
+		return result;
+	}
+
+	mutex_lock(&ipa3_ctx->lock);
+	list_for_each_entry(entry, &ipa3_ctx->intf_list, link) {
+		if (!strcmp(entry->name, lookup->name)) {
+			lookup->num_tx_props = entry->num_tx_props;
+			lookup->num_rx_props = entry->num_rx_props;
+			lookup->num_ext_props = entry->num_ext_props;
+			lookup->excp_pipe = entry->excp_pipe;
+			result = 0;
+			break;
+		}
+	}
+	mutex_unlock(&ipa3_ctx->lock);
+
+	return result;
+}
+
+/**
+ * ipa3_query_intf_tx_props() - qeury TX props of an interface
+ * @tx:  [inout] interface tx attributes
+ *
+ * Obtain the tx properties for the specified interface
+ *
+ * Returns:	0 on success, negative on failure
+ *
+ * Note:	Should not be called from atomic context
+ */
+int ipa3_query_intf_tx_props(struct ipa_ioc_query_intf_tx_props *tx)
+{
+	struct ipa3_intf *entry;
+	int result = -EINVAL;
+
+	if (tx == NULL) {
+		IPAERR_RL("null args: tx\n");
+		return result;
+	}
+
+	tx->name[IPA_RESOURCE_NAME_MAX-1] = '\0';
+	if (strnlen(tx->name, IPA_RESOURCE_NAME_MAX) == IPA_RESOURCE_NAME_MAX) {
+		IPAERR_RL("Interface name too long. (%s)\n", tx->name);
+		return result;
+	}
+
+	mutex_lock(&ipa3_ctx->lock);
+	list_for_each_entry(entry, &ipa3_ctx->intf_list, link) {
+		if (!strcmp(entry->name, tx->name)) {
+			/* add the entry check */
+			if (entry->num_tx_props != tx->num_tx_props) {
+				IPAERR("invalid entry number(%u %u)\n",
+					entry->num_tx_props,
+						tx->num_tx_props);
+				mutex_unlock(&ipa3_ctx->lock);
+				return result;
+			}
+			memcpy(tx->tx, entry->tx, entry->num_tx_props *
+			       sizeof(struct ipa_ioc_tx_intf_prop));
+			result = 0;
+			break;
+		}
+	}
+	mutex_unlock(&ipa3_ctx->lock);
+
+	return result;
+}
+
+/**
+ * ipa3_query_intf_rx_props() - qeury RX props of an interface
+ * @rx:  [inout] interface rx attributes
+ *
+ * Obtain the rx properties for the specified interface
+ *
+ * Returns:	0 on success, negative on failure
+ *
+ * Note:	Should not be called from atomic context
+ */
+int ipa3_query_intf_rx_props(struct ipa_ioc_query_intf_rx_props *rx)
+{
+	struct ipa3_intf *entry;
+	int result = -EINVAL;
+
+	if (rx == NULL) {
+		IPAERR_RL("null args: rx\n");
+		return result;
+	}
+
+	rx->name[IPA_RESOURCE_NAME_MAX-1] = '\0';
+	if (strnlen(rx->name, IPA_RESOURCE_NAME_MAX) == IPA_RESOURCE_NAME_MAX) {
+		IPAERR_RL("Interface name too long. (%s)\n", rx->name);
+		return result;
+	}
+
+	mutex_lock(&ipa3_ctx->lock);
+	list_for_each_entry(entry, &ipa3_ctx->intf_list, link) {
+		if (!strcmp(entry->name, rx->name)) {
+			/* add the entry check */
+			if (entry->num_rx_props != rx->num_rx_props) {
+				IPAERR("invalid entry number(%u %u)\n",
+					entry->num_rx_props,
+						rx->num_rx_props);
+				mutex_unlock(&ipa3_ctx->lock);
+				return result;
+			}
+			memcpy(rx->rx, entry->rx, entry->num_rx_props *
+					sizeof(struct ipa_ioc_rx_intf_prop));
+			result = 0;
+			break;
+		}
+	}
+	mutex_unlock(&ipa3_ctx->lock);
+
+	return result;
+}
+
+/**
+ * ipa3_query_intf_ext_props() - qeury EXT props of an interface
+ * @ext:  [inout] interface ext attributes
+ *
+ * Obtain the ext properties for the specified interface
+ *
+ * Returns:	0 on success, negative on failure
+ *
+ * Note:	Should not be called from atomic context
+ */
+int ipa3_query_intf_ext_props(struct ipa_ioc_query_intf_ext_props *ext)
+{
+	struct ipa3_intf *entry;
+	int result = -EINVAL;
+
+	if (ext == NULL) {
+		IPAERR_RL("invalid param ext=%pK\n", ext);
+		return result;
+	}
+
+	mutex_lock(&ipa3_ctx->lock);
+	list_for_each_entry(entry, &ipa3_ctx->intf_list, link) {
+		if (!strcmp(entry->name, ext->name)) {
+			/* add the entry check */
+			if (entry->num_ext_props != ext->num_ext_props) {
+				IPAERR("invalid entry number(%u %u)\n",
+					entry->num_ext_props,
+						ext->num_ext_props);
+				mutex_unlock(&ipa3_ctx->lock);
+				return result;
+			}
+			memcpy(ext->ext, entry->ext, entry->num_ext_props *
+					sizeof(struct ipa_ioc_ext_intf_prop));
+			result = 0;
+			break;
+		}
+	}
+	mutex_unlock(&ipa3_ctx->lock);
+	return result;
+}
+
+static void ipa3_send_msg_free(void *buff, u32 len, u32 type)
+{
+	kfree(buff);
+}
+
+static int wlan_msg_process(struct ipa_msg_meta *meta, void *buff)
+{
+	struct ipa3_push_msg *msg_dup;
+	struct ipa_wlan_msg_ex *event_ex_cur_con = NULL;
+	struct ipa_wlan_msg_ex *event_ex_list = NULL;
+	struct ipa_wlan_msg *event_ex_cur_discon = NULL;
+	void *data_dup = NULL;
+	struct ipa3_push_msg *entry;
+	struct ipa3_push_msg *next;
+	int cnt = 0, total = 0, max = 0;
+	uint8_t mac[IPA_MAC_ADDR_SIZE];
+	uint8_t mac2[IPA_MAC_ADDR_SIZE];
+
+	if (!buff)
+		return -EINVAL;
+	if (meta->msg_type == WLAN_CLIENT_CONNECT_EX) {
+		/* debug print */
+		event_ex_cur_con = buff;
+		for (cnt = 0; cnt < event_ex_cur_con->num_of_attribs; cnt++) {
+			if (event_ex_cur_con->attribs[cnt].attrib_type ==
+				WLAN_HDR_ATTRIB_MAC_ADDR) {
+				IPADBG("%02x:%02x:%02x:%02x:%02x:%02x,(%d)\n",
+				event_ex_cur_con->attribs[cnt].u.mac_addr[0],
+				event_ex_cur_con->attribs[cnt].u.mac_addr[1],
+				event_ex_cur_con->attribs[cnt].u.mac_addr[2],
+				event_ex_cur_con->attribs[cnt].u.mac_addr[3],
+				event_ex_cur_con->attribs[cnt].u.mac_addr[4],
+				event_ex_cur_con->attribs[cnt].u.mac_addr[5],
+				meta->msg_type);
+			}
+		}
+
+		mutex_lock(&ipa3_ctx->msg_wlan_client_lock);
+		msg_dup = kzalloc(sizeof(*msg_dup), GFP_KERNEL);
+		if (msg_dup == NULL) {
+			mutex_unlock(&ipa3_ctx->msg_wlan_client_lock);
+			return -ENOMEM;
+		}
+		msg_dup->meta = *meta;
+		if (meta->msg_len > 0 && buff) {
+			data_dup = kmemdup(buff, meta->msg_len, GFP_KERNEL);
+			if (data_dup == NULL) {
+				kfree(msg_dup);
+				mutex_unlock(&ipa3_ctx->msg_wlan_client_lock);
+				return -ENOMEM;
+			}
+			memcpy(data_dup, buff, meta->msg_len);
+			msg_dup->buff = data_dup;
+			msg_dup->callback = ipa3_send_msg_free;
+		} else {
+			IPAERR("msg_len %d\n", meta->msg_len);
+			kfree(msg_dup);
+			mutex_unlock(&ipa3_ctx->msg_wlan_client_lock);
+			return -ENOMEM;
+		}
+		list_add_tail(&msg_dup->link, &ipa3_ctx->msg_wlan_client_list);
+		mutex_unlock(&ipa3_ctx->msg_wlan_client_lock);
+	}
+
+	/* remove the cache */
+	if (meta->msg_type == WLAN_CLIENT_DISCONNECT) {
+		/* debug print */
+		event_ex_cur_discon = buff;
+		IPADBG("Mac %pM, msg %d\n",
+		event_ex_cur_discon->mac_addr,
+		meta->msg_type);
+		memcpy(mac2,
+			event_ex_cur_discon->mac_addr,
+			sizeof(mac2));
+
+		mutex_lock(&ipa3_ctx->msg_wlan_client_lock);
+		list_for_each_entry_safe(entry, next,
+				&ipa3_ctx->msg_wlan_client_list,
+				link) {
+			event_ex_list = entry->buff;
+			max = event_ex_list->num_of_attribs;
+			for (cnt = 0; cnt < max; cnt++) {
+				memcpy(mac,
+					event_ex_list->attribs[cnt].u.mac_addr,
+					sizeof(mac));
+				if (event_ex_list->attribs[cnt].attrib_type ==
+					WLAN_HDR_ATTRIB_MAC_ADDR) {
+					pr_debug("%pM\n", mac);
+
+					/* compare to delete one*/
+					if (memcmp(mac2, mac,
+						sizeof(mac)) == 0) {
+						IPADBG("clean %d\n", total);
+						list_del(&entry->link);
+						kfree(entry);
+						break;
+					}
+				}
+			}
+			total++;
+		}
+		mutex_unlock(&ipa3_ctx->msg_wlan_client_lock);
+	}
+	return 0;
+}
+
+/**
+ * ipa3_send_msg() - Send "message" from kernel client to IPA driver
+ * @meta: [in] message meta-data
+ * @buff: [in] the payload for message
+ * @callback: [in] free callback
+ *
+ * Client supplies the message meta-data and payload which IPA driver buffers
+ * till read by user-space. After read from user space IPA driver invokes the
+ * callback supplied to free the message payload. Client must not touch/free
+ * the message payload after calling this API.
+ *
+ * Returns:	0 on success, negative on failure
+ *
+ * Note:	Should not be called from atomic context
+ */
+int ipa3_send_msg(struct ipa_msg_meta *meta, void *buff,
+		  ipa_msg_free_fn callback)
+{
+	struct ipa3_push_msg *msg;
+	void *data = NULL;
+
+	if (meta == NULL || (buff == NULL && callback != NULL) ||
+	    (buff != NULL && callback == NULL)) {
+		IPAERR_RL("invalid param meta=%pK buff=%pK, callback=%pK\n",
+		       meta, buff, callback);
+		return -EINVAL;
+	}
+
+	if (meta->msg_type >= IPA_EVENT_MAX_NUM) {
+		IPAERR_RL("unsupported message type %d\n", meta->msg_type);
+		return -EINVAL;
+	}
+
+	msg = kzalloc(sizeof(struct ipa3_push_msg), GFP_KERNEL);
+	if (msg == NULL)
+		return -ENOMEM;
+
+	msg->meta = *meta;
+	if (meta->msg_len > 0 && buff) {
+		data = kmemdup(buff, meta->msg_len, GFP_KERNEL);
+		if (data == NULL) {
+			kfree(msg);
+			return -ENOMEM;
+		}
+		msg->buff = data;
+		msg->callback = ipa3_send_msg_free;
+	}
+
+	mutex_lock(&ipa3_ctx->msg_lock);
+	list_add_tail(&msg->link, &ipa3_ctx->msg_list);
+	/* support for softap client event cache */
+	if (wlan_msg_process(meta, buff))
+		IPAERR_RL("wlan_msg_process failed\n");
+
+	/* unlock only after process */
+	mutex_unlock(&ipa3_ctx->msg_lock);
+	IPA_STATS_INC_CNT(ipa3_ctx->stats.msg_w[meta->msg_type]);
+
+	wake_up(&ipa3_ctx->msg_waitq);
+	if (buff)
+		callback(buff, meta->msg_len, meta->msg_type);
+
+	return 0;
+}
+
+/**
+ * ipa3_resend_wlan_msg() - Resend cached "message" to IPACM
+ *
+ * resend wlan client connect events to user-space
+ *
+ * Returns:	0 on success, negative on failure
+ *
+ * Note:	Should not be called from atomic context
+ */
+int ipa3_resend_wlan_msg(void)
+{
+	struct ipa_wlan_msg_ex *event_ex_list = NULL;
+	struct ipa3_push_msg *entry;
+	struct ipa3_push_msg *next;
+	int cnt = 0, total = 0;
+	struct ipa3_push_msg *msg;
+	void *data = NULL;
+
+	IPADBG("\n");
+
+	mutex_lock(&ipa3_ctx->msg_wlan_client_lock);
+	list_for_each_entry_safe(entry, next, &ipa3_ctx->msg_wlan_client_list,
+			link) {
+
+		event_ex_list = entry->buff;
+		for (cnt = 0; cnt < event_ex_list->num_of_attribs; cnt++) {
+			if (event_ex_list->attribs[cnt].attrib_type ==
+				WLAN_HDR_ATTRIB_MAC_ADDR) {
+				IPADBG("%d-Mac %pM\n", total,
+				event_ex_list->attribs[cnt].u.mac_addr);
+			}
+		}
+
+		msg = kzalloc(sizeof(*msg), GFP_KERNEL);
+		if (msg == NULL) {
+			mutex_unlock(&ipa3_ctx->msg_wlan_client_lock);
+			return -ENOMEM;
+		}
+		msg->meta = entry->meta;
+		data = kmemdup(entry->buff, entry->meta.msg_len, GFP_KERNEL);
+		if (data == NULL) {
+			kfree(msg);
+			mutex_unlock(&ipa3_ctx->msg_wlan_client_lock);
+			return -ENOMEM;
+		}
+		msg->buff = data;
+		msg->callback = ipa3_send_msg_free;
+		mutex_lock(&ipa3_ctx->msg_lock);
+		list_add_tail(&msg->link, &ipa3_ctx->msg_list);
+		mutex_unlock(&ipa3_ctx->msg_lock);
+		wake_up(&ipa3_ctx->msg_waitq);
+
+		total++;
+	}
+	mutex_unlock(&ipa3_ctx->msg_wlan_client_lock);
+	return 0;
+}
+
+/**
+ * ipa3_register_pull_msg() - register pull message type
+ * @meta: [in] message meta-data
+ * @callback: [in] pull callback
+ *
+ * Register message callback by kernel client with IPA driver for IPA driver to
+ * pull message on-demand.
+ *
+ * Returns:	0 on success, negative on failure
+ *
+ * Note:	Should not be called from atomic context
+ */
+int ipa3_register_pull_msg(struct ipa_msg_meta *meta, ipa_msg_pull_fn callback)
+{
+	struct ipa3_pull_msg *msg;
+
+	if (meta == NULL || callback == NULL) {
+		IPAERR_RL("invalid param meta=%pK callback=%pK\n",
+				meta, callback);
+		return -EINVAL;
+	}
+
+	msg = kzalloc(sizeof(struct ipa3_pull_msg), GFP_KERNEL);
+	if (msg == NULL)
+		return -ENOMEM;
+
+	msg->meta = *meta;
+	msg->callback = callback;
+
+	mutex_lock(&ipa3_ctx->msg_lock);
+	list_add_tail(&msg->link, &ipa3_ctx->pull_msg_list);
+	mutex_unlock(&ipa3_ctx->msg_lock);
+
+	return 0;
+}
+
+/**
+ * ipa3_deregister_pull_msg() - De-register pull message type
+ * @meta: [in] message meta-data
+ *
+ * De-register "message" by kernel client from IPA driver
+ *
+ * Returns:	0 on success, negative on failure
+ *
+ * Note:	Should not be called from atomic context
+ */
+int ipa3_deregister_pull_msg(struct ipa_msg_meta *meta)
+{
+	struct ipa3_pull_msg *entry;
+	struct ipa3_pull_msg *next;
+	int result = -EINVAL;
+
+	if (meta == NULL) {
+		IPAERR_RL("null arg: meta\n");
+		return result;
+	}
+
+	mutex_lock(&ipa3_ctx->msg_lock);
+	list_for_each_entry_safe(entry, next, &ipa3_ctx->pull_msg_list, link) {
+		if (entry->meta.msg_len == meta->msg_len &&
+		    entry->meta.msg_type == meta->msg_type) {
+			list_del(&entry->link);
+			kfree(entry);
+			result = 0;
+			break;
+		}
+	}
+	mutex_unlock(&ipa3_ctx->msg_lock);
+	return result;
+}
+
+/**
+ * ipa3_read() - read message from IPA device
+ * @filp:	[in] file pointer
+ * @buf:	[out] buffer to read into
+ * @count:	[in] size of above buffer
+ * @f_pos:	[inout] file position
+ *
+ * Uer-space should continually read from /dev/ipa, read wll block when there
+ * are no messages to read. Upon return, user-space should read the ipa_msg_meta
+ * from the start of the buffer to know what type of message was read and its
+ * length in the remainder of the buffer. Buffer supplied must be big enough to
+ * hold the message meta-data and the largest defined message type
+ *
+ * Returns:	how many bytes copied to buffer
+ *
+ * Note:	Should not be called from atomic context
+ */
+ssize_t ipa3_read(struct file *filp, char __user *buf, size_t count,
+		  loff_t *f_pos)
+{
+	char __user *start;
+	struct ipa3_push_msg *msg = NULL;
+	int ret;
+	DEFINE_WAIT_FUNC(wait, woken_wake_function);
+	int locked;
+
+	start = buf;
+
+	add_wait_queue(&ipa3_ctx->msg_waitq, &wait);
+	while (1) {
+		mutex_lock(&ipa3_ctx->msg_lock);
+		locked = 1;
+
+		if (!list_empty(&ipa3_ctx->msg_list)) {
+			msg = list_first_entry(&ipa3_ctx->msg_list,
+					struct ipa3_push_msg, link);
+			list_del(&msg->link);
+		}
+
+		IPADBG_LOW("msg=%pK\n", msg);
+
+		if (msg) {
+			locked = 0;
+			mutex_unlock(&ipa3_ctx->msg_lock);
+			if (copy_to_user(buf, &msg->meta,
+					  sizeof(struct ipa_msg_meta))) {
+				ret = -EFAULT;
+				kfree(msg);
+				msg = NULL;
+				break;
+			}
+			buf += sizeof(struct ipa_msg_meta);
+			count -= sizeof(struct ipa_msg_meta);
+			if (msg->buff) {
+				if (copy_to_user(buf, msg->buff,
+						  msg->meta.msg_len)) {
+					ret = -EFAULT;
+					kfree(msg);
+					msg = NULL;
+					break;
+				}
+				buf += msg->meta.msg_len;
+				count -= msg->meta.msg_len;
+				msg->callback(msg->buff, msg->meta.msg_len,
+					       msg->meta.msg_type);
+			}
+			IPA_STATS_INC_CNT(
+				ipa3_ctx->stats.msg_r[msg->meta.msg_type]);
+			kfree(msg);
+			msg = NULL;
+		}
+
+		ret = -EAGAIN;
+		if (filp->f_flags & O_NONBLOCK)
+			break;
+
+		ret = -EINTR;
+		if (signal_pending(current))
+			break;
+
+		if (start != buf)
+			break;
+
+		locked = 0;
+		mutex_unlock(&ipa3_ctx->msg_lock);
+		wait_woken(&wait, TASK_INTERRUPTIBLE, MAX_SCHEDULE_TIMEOUT);
+	}
+
+	remove_wait_queue(&ipa3_ctx->msg_waitq, &wait);
+	if (start != buf && ret != -EFAULT)
+		ret = buf - start;
+
+	if (locked)
+		mutex_unlock(&ipa3_ctx->msg_lock);
+
+	return ret;
+}
+
+/**
+ * ipa3_pull_msg() - pull the specified message from client
+ * @meta: [in] message meta-data
+ * @buf:  [out] buffer to read into
+ * @count: [in] size of above buffer
+ *
+ * Populate the supplied buffer with the pull message which is fetched
+ * from client, the message must have previously been registered with
+ * the IPA driver
+ *
+ * Returns:	how many bytes copied to buffer
+ *
+ * Note:	Should not be called from atomic context
+ */
+int ipa3_pull_msg(struct ipa_msg_meta *meta, char *buff, size_t count)
+{
+	struct ipa3_pull_msg *entry;
+	int result = -EINVAL;
+
+	if (meta == NULL || buff == NULL || !count) {
+		IPAERR_RL("invalid param name=%pK buff=%pK count=%zu\n",
+				meta, buff, count);
+		return result;
+	}
+
+	mutex_lock(&ipa3_ctx->msg_lock);
+	list_for_each_entry(entry, &ipa3_ctx->pull_msg_list, link) {
+		if (entry->meta.msg_len == meta->msg_len &&
+		    entry->meta.msg_type == meta->msg_type) {
+			result = entry->callback(buff, count, meta->msg_type);
+			break;
+		}
+	}
+	mutex_unlock(&ipa3_ctx->msg_lock);
+	return result;
+}

+ 751 - 0
ipa/ipa_v3/ipa_mhi.c

@@ -0,0 +1,751 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2015-2019, The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/debugfs.h>
+#include <linux/export.h>
+#include <linux/delay.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/ipa.h>
+#include <linux/msm_gsi.h>
+#include <linux/ipa_mhi.h>
+#include "../ipa_common_i.h"
+#include "ipa_i.h"
+#include "ipa_qmi_service.h"
+
+#define IPA_MHI_DRV_NAME "ipa_mhi"
+
+
+#define IPA_MHI_DBG(fmt, args...) \
+	do { \
+		pr_debug(IPA_MHI_DRV_NAME " %s:%d " fmt, \
+			__func__, __LINE__, ## args); \
+		IPA_IPC_LOGGING(ipa_get_ipc_logbuf(), \
+			IPA_MHI_DRV_NAME " %s:%d " fmt, ## args); \
+		IPA_IPC_LOGGING(ipa_get_ipc_logbuf_low(), \
+			IPA_MHI_DRV_NAME " %s:%d " fmt, ## args); \
+	} while (0)
+
+#define IPA_MHI_DBG_LOW(fmt, args...) \
+	do { \
+		pr_debug(IPA_MHI_DRV_NAME " %s:%d " fmt, \
+			__func__, __LINE__, ## args); \
+		IPA_IPC_LOGGING(ipa_get_ipc_logbuf_low(), \
+			IPA_MHI_DRV_NAME " %s:%d " fmt, ## args); \
+	} while (0)
+
+
+#define IPA_MHI_ERR(fmt, args...) \
+	do { \
+		pr_err(IPA_MHI_DRV_NAME " %s:%d " fmt, \
+			__func__, __LINE__, ## args); \
+		IPA_IPC_LOGGING(ipa_get_ipc_logbuf(), \
+				IPA_MHI_DRV_NAME " %s:%d " fmt, ## args); \
+		IPA_IPC_LOGGING(ipa_get_ipc_logbuf_low(), \
+				IPA_MHI_DRV_NAME " %s:%d " fmt, ## args); \
+	} while (0)
+
+
+#define IPA_MHI_FUNC_ENTRY() \
+	IPA_MHI_DBG("ENTRY\n")
+#define IPA_MHI_FUNC_EXIT() \
+	IPA_MHI_DBG("EXIT\n")
+
+#define IPA_MHI_MAX_UL_CHANNELS 1
+#define IPA_MHI_MAX_DL_CHANNELS 2
+
+/* bit #40 in address should be asserted for MHI transfers over pcie */
+#define IPA_MHI_HOST_ADDR_COND(addr) \
+		((params->assert_bit40)?(IPA_MHI_HOST_ADDR(addr)):(addr))
+
+enum ipa3_mhi_polling_mode {
+	IPA_MHI_POLLING_MODE_DB_MODE,
+	IPA_MHI_POLLING_MODE_POLL_MODE,
+};
+
+bool ipa3_mhi_stop_gsi_channel(enum ipa_client_type client)
+{
+	int res;
+	int ipa_ep_idx;
+	struct ipa3_ep_context *ep;
+
+	IPA_MHI_FUNC_ENTRY();
+	ipa_ep_idx = ipa3_get_ep_mapping(client);
+	if (ipa_ep_idx == -1) {
+		IPA_MHI_ERR("Invalid client.\n");
+		return -EINVAL;
+	}
+
+	ep = &ipa3_ctx->ep[ipa_ep_idx];
+	IPA_MHI_DBG_LOW("Stopping GSI channel %ld\n", ep->gsi_chan_hdl);
+	res = gsi_stop_channel(ep->gsi_chan_hdl);
+	if (res != 0 &&
+		res != -GSI_STATUS_AGAIN &&
+		res != -GSI_STATUS_TIMED_OUT) {
+		IPA_MHI_ERR("GSI stop channel failed %d\n",
+			res);
+		WARN_ON(1);
+		return false;
+	}
+
+	if (res == 0) {
+		IPA_MHI_DBG_LOW("GSI channel %ld STOP\n",
+			ep->gsi_chan_hdl);
+		return true;
+	}
+
+	return false;
+}
+
+static int ipa3_mhi_reset_gsi_channel(enum ipa_client_type client)
+{
+	int res;
+	int clnt_hdl;
+
+	IPA_MHI_FUNC_ENTRY();
+
+	clnt_hdl = ipa3_get_ep_mapping(client);
+	if (clnt_hdl < 0)
+		return -EFAULT;
+
+	res = ipa3_reset_gsi_channel(clnt_hdl);
+	if (res) {
+		IPA_MHI_ERR("ipa3_reset_gsi_channel failed %d\n", res);
+		return -EFAULT;
+	}
+
+	IPA_MHI_FUNC_EXIT();
+	return 0;
+}
+
+int ipa3_mhi_reset_channel_internal(enum ipa_client_type client)
+{
+	int res;
+
+	IPA_MHI_FUNC_ENTRY();
+
+	res = ipa3_mhi_reset_gsi_channel(client);
+	if (res) {
+		IPAERR("ipa3_mhi_reset_gsi_channel failed\n");
+		ipa_assert();
+		return res;
+	}
+
+	res = ipa3_disable_data_path(ipa3_get_ep_mapping(client));
+	if (res) {
+		IPA_MHI_ERR("ipa3_disable_data_path failed %d\n", res);
+		return res;
+	}
+	IPA_MHI_FUNC_EXIT();
+
+	return 0;
+}
+
+int ipa3_mhi_start_channel_internal(enum ipa_client_type client)
+{
+	int res;
+	int ipa_ep_idx;
+
+	IPA_MHI_FUNC_ENTRY();
+
+	ipa_ep_idx = ipa3_get_ep_mapping(client);
+	if (ipa_ep_idx < 0) {
+		IPA_MHI_ERR("Invalid client %d\n", client);
+		return -EINVAL;
+	}
+	res = ipa3_enable_data_path(ipa_ep_idx);
+	if (res) {
+		IPA_MHI_ERR("ipa3_enable_data_path failed %d\n", res);
+		return res;
+	}
+	IPA_MHI_FUNC_EXIT();
+
+	return 0;
+}
+
+static int ipa3_mhi_get_ch_poll_cfg(enum ipa_client_type client,
+		struct ipa_mhi_ch_ctx *ch_ctx_host, int ring_size)
+{
+	switch (ch_ctx_host->pollcfg) {
+	case 0:
+	/*set default polling configuration according to MHI spec*/
+		if (IPA_CLIENT_IS_PROD(client))
+			return 7;
+		else
+			return (ring_size/2)/8;
+		break;
+	default:
+		return ch_ctx_host->pollcfg;
+	}
+}
+
+static int ipa_mhi_start_gsi_channel(enum ipa_client_type client,
+	int ipa_ep_idx, struct start_gsi_channel *params)
+{
+	int res = 0;
+	struct gsi_evt_ring_props ev_props;
+	struct ipa_mhi_msi_info *msi;
+	struct gsi_chan_props ch_props;
+	union __packed gsi_channel_scratch ch_scratch;
+	struct ipa3_ep_context *ep;
+	const struct ipa_gsi_ep_config *ep_cfg;
+	struct ipa_ep_cfg_ctrl ep_cfg_ctrl;
+	bool burst_mode_enabled = false;
+
+	IPA_MHI_FUNC_ENTRY();
+
+	ep = &ipa3_ctx->ep[ipa_ep_idx];
+
+	msi = params->msi;
+	ep_cfg = ipa3_get_gsi_ep_info(client);
+	if (!ep_cfg) {
+		IPA_MHI_ERR("Wrong parameter, ep_cfg is NULL\n");
+		return -EPERM;
+	}
+
+	/* allocate event ring only for the first time pipe is connected */
+	if (params->state == IPA_HW_MHI_CHANNEL_STATE_INVALID) {
+		memset(&ev_props, 0, sizeof(ev_props));
+		ev_props.intf = GSI_EVT_CHTYPE_MHI_EV;
+		ev_props.intr = GSI_INTR_MSI;
+		ev_props.re_size = GSI_EVT_RING_RE_SIZE_16B;
+		ev_props.ring_len = params->ev_ctx_host->rlen;
+		ev_props.ring_base_addr = IPA_MHI_HOST_ADDR_COND(
+				params->ev_ctx_host->rbase);
+		ev_props.int_modt = params->ev_ctx_host->intmodt *
+				IPA_SLEEP_CLK_RATE_KHZ;
+		ev_props.int_modc = params->ev_ctx_host->intmodc;
+		ev_props.intvec = ((msi->data & ~msi->mask) |
+				(params->ev_ctx_host->msivec & msi->mask));
+		ev_props.msi_addr = IPA_MHI_HOST_ADDR_COND(
+				(((u64)msi->addr_hi << 32) | msi->addr_low));
+		ev_props.rp_update_addr = IPA_MHI_HOST_ADDR_COND(
+				params->event_context_addr +
+				offsetof(struct ipa_mhi_ev_ctx, rp));
+		ev_props.exclusive = true;
+		ev_props.err_cb = params->ev_err_cb;
+		ev_props.user_data = params->channel;
+		ev_props.evchid_valid = true;
+		ev_props.evchid = params->evchid;
+		IPA_MHI_DBG("allocating event ring ep:%u evchid:%u\n",
+			ipa_ep_idx, ev_props.evchid);
+		res = gsi_alloc_evt_ring(&ev_props, ipa3_ctx->gsi_dev_hdl,
+			&ep->gsi_evt_ring_hdl);
+		if (res) {
+			IPA_MHI_ERR("gsi_alloc_evt_ring failed %d\n", res);
+			goto fail_alloc_evt;
+		}
+		IPA_MHI_DBG("client %d, caching event ring hdl %lu\n",
+				client,
+				ep->gsi_evt_ring_hdl);
+		*params->cached_gsi_evt_ring_hdl =
+			ep->gsi_evt_ring_hdl;
+
+	} else {
+		IPA_MHI_DBG("event ring already exists: evt_ring_hdl=%lu\n",
+			*params->cached_gsi_evt_ring_hdl);
+		ep->gsi_evt_ring_hdl = *params->cached_gsi_evt_ring_hdl;
+	}
+
+	if (params->ev_ctx_host->wp == params->ev_ctx_host->rbase) {
+		IPA_MHI_ERR("event ring wp is not updated. base=wp=0x%llx\n",
+			params->ev_ctx_host->wp);
+		goto fail_alloc_ch;
+	}
+
+	IPA_MHI_DBG("Ring event db: evt_ring_hdl=%lu host_wp=0x%llx\n",
+		ep->gsi_evt_ring_hdl, params->ev_ctx_host->wp);
+	res = gsi_ring_evt_ring_db(ep->gsi_evt_ring_hdl,
+		params->ev_ctx_host->wp);
+	if (res) {
+		IPA_MHI_ERR("fail to ring evt ring db %d. hdl=%lu wp=0x%llx\n",
+			res, ep->gsi_evt_ring_hdl, params->ev_ctx_host->wp);
+		goto fail_alloc_ch;
+	}
+
+	memset(&ch_props, 0, sizeof(ch_props));
+	ch_props.prot = GSI_CHAN_PROT_MHI;
+	ch_props.dir = IPA_CLIENT_IS_PROD(client) ?
+		GSI_CHAN_DIR_TO_GSI : GSI_CHAN_DIR_FROM_GSI;
+	ch_props.ch_id = ep_cfg->ipa_gsi_chan_num;
+	ch_props.evt_ring_hdl = *params->cached_gsi_evt_ring_hdl;
+	ch_props.re_size = GSI_CHAN_RE_SIZE_16B;
+	ch_props.ring_len = params->ch_ctx_host->rlen;
+	ch_props.ring_base_addr = IPA_MHI_HOST_ADDR_COND(
+			params->ch_ctx_host->rbase);
+
+	/* Burst mode is not supported on DPL pipes */
+	if ((client != IPA_CLIENT_MHI_DPL_CONS) &&
+		(params->ch_ctx_host->brstmode == IPA_MHI_BURST_MODE_DEFAULT ||
+		params->ch_ctx_host->brstmode == IPA_MHI_BURST_MODE_ENABLE)) {
+		burst_mode_enabled = true;
+	}
+
+	if (ipa3_ctx->ipa_hw_type >= IPA_HW_v4_0 &&
+		!burst_mode_enabled)
+		ch_props.use_db_eng = GSI_CHAN_DIRECT_MODE;
+	else
+		ch_props.use_db_eng = GSI_CHAN_DB_MODE;
+
+	ch_props.db_in_bytes = 1;
+	ch_props.max_prefetch = GSI_ONE_PREFETCH_SEG;
+	ch_props.low_weight = 1;
+	ch_props.prefetch_mode = ep_cfg->prefetch_mode;
+	ch_props.empty_lvl_threshold = ep_cfg->prefetch_threshold;
+	ch_props.err_cb = params->ch_err_cb;
+	ch_props.chan_user_data = params->channel;
+	res = gsi_alloc_channel(&ch_props, ipa3_ctx->gsi_dev_hdl,
+		&ep->gsi_chan_hdl);
+	if (res) {
+		IPA_MHI_ERR("gsi_alloc_channel failed %d\n",
+			res);
+		goto fail_alloc_ch;
+	}
+
+	memset(&ch_scratch, 0, sizeof(ch_scratch));
+	ch_scratch.mhi.mhi_host_wp_addr = IPA_MHI_HOST_ADDR_COND(
+			params->channel_context_addr +
+			offsetof(struct ipa_mhi_ch_ctx, wp));
+	ch_scratch.mhi.assert_bit40 = params->assert_bit40;
+
+	/*
+	 * Update scratch for MCS smart prefetch:
+	 * Starting IPA4.5, smart prefetch implemented by H/W.
+	 * At IPA 4.0/4.1/4.2, we do not use MCS smart prefetch
+	 *  so keep the fields zero.
+	 */
+	if (ipa3_ctx->ipa_hw_type < IPA_HW_v4_0) {
+		ch_scratch.mhi.max_outstanding_tre =
+			ep_cfg->ipa_if_tlv * ch_props.re_size;
+		ch_scratch.mhi.outstanding_threshold =
+			min(ep_cfg->ipa_if_tlv / 2, 8) * ch_props.re_size;
+	}
+	ch_scratch.mhi.oob_mod_threshold = 4;
+
+	if (burst_mode_enabled) {
+		ch_scratch.mhi.burst_mode_enabled = burst_mode_enabled;
+		ch_scratch.mhi.polling_configuration =
+			ipa3_mhi_get_ch_poll_cfg(client, params->ch_ctx_host,
+				(ch_props.ring_len / ch_props.re_size));
+		ch_scratch.mhi.polling_mode = IPA_MHI_POLLING_MODE_DB_MODE;
+	} else {
+		ch_scratch.mhi.burst_mode_enabled = false;
+	}
+	res = gsi_write_channel_scratch(ep->gsi_chan_hdl,
+		ch_scratch);
+	if (res) {
+		IPA_MHI_ERR("gsi_write_channel_scratch failed %d\n",
+			res);
+		goto fail_ch_scratch;
+	}
+
+	*params->mhi = ch_scratch.mhi;
+
+	if (IPA_CLIENT_IS_PROD(ep->client) && ep->skip_ep_cfg) {
+		memset(&ep_cfg_ctrl, 0, sizeof(struct ipa_ep_cfg_ctrl));
+		ep_cfg_ctrl.ipa_ep_delay = true;
+		ep->ep_delay_set = true;
+		res = ipa3_cfg_ep_ctrl(ipa_ep_idx, &ep_cfg_ctrl);
+		if (res)
+			IPA_MHI_ERR("client (ep: %d) failed result=%d\n",
+			ipa_ep_idx, res);
+		else
+			IPA_MHI_DBG("client (ep: %d) success\n", ipa_ep_idx);
+	} else {
+		ep->ep_delay_set = false;
+	}
+
+	IPA_MHI_DBG("Starting channel\n");
+	res = gsi_start_channel(ep->gsi_chan_hdl);
+	if (res) {
+		IPA_MHI_ERR("gsi_start_channel failed %d\n", res);
+		goto fail_ch_start;
+	}
+
+	IPA_MHI_FUNC_EXIT();
+	return 0;
+
+fail_ch_start:
+fail_ch_scratch:
+	gsi_dealloc_channel(ep->gsi_chan_hdl);
+fail_alloc_ch:
+	gsi_dealloc_evt_ring(ep->gsi_evt_ring_hdl);
+	ep->gsi_evt_ring_hdl = ~0;
+fail_alloc_evt:
+	return res;
+}
+
+int ipa3_mhi_init_engine(struct ipa_mhi_init_engine *params)
+{
+	int res;
+	struct gsi_device_scratch gsi_scratch;
+	const struct ipa_gsi_ep_config *gsi_ep_info;
+
+	IPA_MHI_FUNC_ENTRY();
+
+	if (!params) {
+		IPA_MHI_ERR("null args\n");
+		return -EINVAL;
+	}
+
+	if ((IPA_MHI_MAX_UL_CHANNELS + IPA_MHI_MAX_DL_CHANNELS) >
+		((ipa3_ctx->mhi_evid_limits[1] -
+		ipa3_ctx->mhi_evid_limits[0]) + 1)) {
+		IPAERR("Not enough event rings for MHI\n");
+		ipa_assert();
+		return -EINVAL;
+	}
+
+	/* Initialize IPA MHI engine */
+	gsi_ep_info = ipa3_get_gsi_ep_info(IPA_CLIENT_MHI_PROD);
+	if (!gsi_ep_info) {
+		IPAERR("MHI PROD has no ep allocated\n");
+		ipa_assert();
+	}
+	memset(&gsi_scratch, 0, sizeof(gsi_scratch));
+	gsi_scratch.mhi_base_chan_idx_valid = true;
+	gsi_scratch.mhi_base_chan_idx = gsi_ep_info->ipa_gsi_chan_num +
+		params->gsi.first_ch_idx;
+	res = gsi_write_device_scratch(ipa3_ctx->gsi_dev_hdl,
+		&gsi_scratch);
+	if (res) {
+		IPA_MHI_ERR("failed to write device scratch %d\n", res);
+		goto fail_init_engine;
+	}
+
+	IPA_MHI_FUNC_EXIT();
+	return 0;
+
+fail_init_engine:
+	return res;
+}
+
+/**
+ * ipa3_connect_mhi_pipe() - Connect pipe to IPA and start corresponding
+ * MHI channel
+ * @in: connect parameters
+ * @clnt_hdl: [out] client handle for this pipe
+ *
+ * This function is called by IPA MHI client driver on MHI channel start.
+ * This function is called after MHI engine was started.
+ *
+ * Return codes: 0	  : success
+ *		 negative : error
+ */
+int ipa3_connect_mhi_pipe(struct ipa_mhi_connect_params_internal *in,
+		u32 *clnt_hdl)
+{
+	struct ipa3_ep_context *ep;
+	int ipa_ep_idx;
+	int res;
+	enum ipa_client_type client;
+
+	IPA_MHI_FUNC_ENTRY();
+
+	if (!in || !clnt_hdl) {
+		IPA_MHI_ERR("NULL args\n");
+		return -EINVAL;
+	}
+
+	in->start.gsi.evchid += ipa3_ctx->mhi_evid_limits[0];
+
+	client = in->sys->client;
+	ipa_ep_idx = ipa3_get_ep_mapping(client);
+	if (ipa_ep_idx == -1) {
+		IPA_MHI_ERR("Invalid client.\n");
+		return -EINVAL;
+	}
+
+	ep = &ipa3_ctx->ep[ipa_ep_idx];
+
+	if (ep->valid == 1) {
+		IPA_MHI_ERR("EP already allocated.\n");
+		return -EPERM;
+	}
+
+	memset(ep, 0, offsetof(struct ipa3_ep_context, sys));
+	ep->valid = 1;
+	ep->skip_ep_cfg = in->sys->skip_ep_cfg;
+	ep->client = client;
+	ep->client_notify = in->sys->notify;
+	ep->priv = in->sys->priv;
+	ep->keep_ipa_awake = in->sys->keep_ipa_awake;
+
+	res = ipa_mhi_start_gsi_channel(client,
+					ipa_ep_idx, &in->start.gsi);
+	if (res) {
+		IPA_MHI_ERR("ipa_mhi_start_gsi_channel failed %d\n",
+			res);
+		goto fail_start_channel;
+	}
+
+	res = ipa3_enable_data_path(ipa_ep_idx);
+	if (res) {
+		IPA_MHI_ERR("enable data path failed res=%d clnt=%d.\n", res,
+			ipa_ep_idx);
+		goto fail_ep_cfg;
+	}
+
+	if (!ep->skip_ep_cfg) {
+		if (ipa3_cfg_ep(ipa_ep_idx, &in->sys->ipa_ep_cfg)) {
+			IPAERR("fail to configure EP.\n");
+			goto fail_ep_cfg;
+		}
+		if (ipa3_cfg_ep_status(ipa_ep_idx, &ep->status)) {
+			IPAERR("fail to configure status of EP.\n");
+			goto fail_ep_cfg;
+		}
+		IPA_MHI_DBG("ep configuration successful\n");
+	} else {
+		IPA_MHI_DBG("skipping ep configuration\n");
+	}
+
+	*clnt_hdl = ipa_ep_idx;
+
+	if (!ep->skip_ep_cfg && IPA_CLIENT_IS_PROD(client))
+		ipa3_install_dflt_flt_rules(ipa_ep_idx);
+
+	ipa3_ctx->skip_ep_cfg_shadow[ipa_ep_idx] = ep->skip_ep_cfg;
+	IPA_MHI_DBG("client %d (ep: %d) connected\n", client,
+		ipa_ep_idx);
+
+	IPA_MHI_FUNC_EXIT();
+
+	return 0;
+
+fail_ep_cfg:
+	ipa3_disable_data_path(ipa_ep_idx);
+fail_start_channel:
+	memset(ep, 0, offsetof(struct ipa3_ep_context, sys));
+	return -EPERM;
+}
+
+/**
+ * ipa3_disconnect_mhi_pipe() - Disconnect pipe from IPA and reset corresponding
+ * MHI channel
+ * @clnt_hdl: client handle for this pipe
+ *
+ * This function is called by IPA MHI client driver on MHI channel reset.
+ * This function is called after MHI channel was started.
+ * This function is doing the following:
+ *	- Send command to uC/GSI to reset corresponding MHI channel
+ *	- Configure IPA EP control
+ *
+ * Return codes: 0	  : success
+ *		 negative : error
+ */
+int ipa3_disconnect_mhi_pipe(u32 clnt_hdl)
+{
+	struct ipa3_ep_context *ep;
+	int res;
+	struct ipa_ep_cfg_ctrl ep_cfg_ctrl;
+
+	IPA_MHI_FUNC_ENTRY();
+
+	if (clnt_hdl >= ipa3_ctx->ipa_num_pipes) {
+		IPAERR("invalid handle %d\n", clnt_hdl);
+		return -EINVAL;
+	}
+
+	if (ipa3_ctx->ep[clnt_hdl].valid == 0) {
+		IPAERR("pipe was not connected %d\n", clnt_hdl);
+		return -EINVAL;
+	}
+
+	ep = &ipa3_ctx->ep[clnt_hdl];
+	if (ep->ep_delay_set) {
+		memset(&ep_cfg_ctrl, 0, sizeof(struct ipa_ep_cfg_ctrl));
+		ep_cfg_ctrl.ipa_ep_delay = false;
+		res = ipa3_cfg_ep_ctrl(clnt_hdl,
+			&ep_cfg_ctrl);
+		if (res) {
+			IPAERR
+			("client(ep:%d) failed to remove delay res=%d\n",
+				clnt_hdl, res);
+		} else {
+			IPADBG("client (ep: %d) delay removed\n",
+				clnt_hdl);
+			ep->ep_delay_set = false;
+		}
+	}
+
+	res = gsi_dealloc_channel(ep->gsi_chan_hdl);
+	if (res) {
+		IPAERR("gsi_dealloc_channel failed %d\n", res);
+		goto fail_reset_channel;
+	}
+
+	ep->valid = 0;
+	ipa3_delete_dflt_flt_rules(clnt_hdl);
+
+	IPA_MHI_DBG("client (ep: %d) disconnected\n", clnt_hdl);
+	IPA_MHI_FUNC_EXIT();
+	return 0;
+
+fail_reset_channel:
+	return res;
+}
+
+int ipa3_mhi_resume_channels_internal(enum ipa_client_type client,
+		bool LPTransitionRejected, bool brstmode_enabled,
+		union __packed gsi_channel_scratch ch_scratch, u8 index)
+{
+	int res;
+	int ipa_ep_idx;
+	struct ipa3_ep_context *ep;
+	union __packed gsi_channel_scratch gsi_ch_scratch;
+
+	IPA_MHI_FUNC_ENTRY();
+
+	ipa_ep_idx = ipa3_get_ep_mapping(client);
+	if (ipa_ep_idx < 0) {
+		IPA_MHI_ERR("Invalid client %d\n", client);
+		return -EINVAL;
+	}
+	ep = &ipa3_ctx->ep[ipa_ep_idx];
+
+	if (brstmode_enabled && !LPTransitionRejected) {
+
+		res = gsi_read_channel_scratch(ep->gsi_chan_hdl,
+			&gsi_ch_scratch);
+		if (res) {
+			IPA_MHI_ERR("read ch scratch fail %d\n", res);
+			return res;
+		}
+
+		/*
+		 * set polling mode bit to DB mode before
+		 * resuming the channel
+		 *
+		 * For MHI-->IPA pipes:
+		 * when resuming due to transition to M0,
+		 * set the polling mode bit to 0.
+		 * In other cases, restore it's value form
+		 * when you stopped the channel.
+		 * Here, after successful resume client move to M0 state.
+		 * So, by default setting polling mode bit to 0.
+		 *
+		 * For IPA-->MHI pipe:
+		 * always restore the polling mode bit.
+		 */
+		if (IPA_CLIENT_IS_PROD(client))
+			ch_scratch.mhi.polling_mode =
+				IPA_MHI_POLLING_MODE_DB_MODE;
+		else
+			ch_scratch.mhi.polling_mode =
+				gsi_ch_scratch.mhi.polling_mode;
+
+		/* Use GSI update API to not affect non-SWI fields
+		 * inside the scratch while in suspend-resume operation
+		 */
+		res = gsi_update_mhi_channel_scratch(
+			ep->gsi_chan_hdl, ch_scratch.mhi);
+		if (res) {
+			IPA_MHI_ERR("write ch scratch fail %d\n"
+				, res);
+			return res;
+		}
+	}
+
+	res = gsi_start_channel(ep->gsi_chan_hdl);
+	if (res) {
+		IPA_MHI_ERR("failed to resume channel error %d\n", res);
+		return res;
+	}
+
+	IPA_MHI_FUNC_EXIT();
+	return 0;
+}
+
+int ipa3_mhi_query_ch_info(enum ipa_client_type client,
+		struct gsi_chan_info *ch_info)
+{
+	int ipa_ep_idx;
+	int res;
+	struct ipa3_ep_context *ep;
+
+	IPA_MHI_FUNC_ENTRY();
+
+	ipa_ep_idx = ipa3_get_ep_mapping(client);
+	if (ipa_ep_idx < 0) {
+		IPA_MHI_ERR("Invalid client %d\n", client);
+		return -EINVAL;
+	}
+	ep = &ipa3_ctx->ep[ipa_ep_idx];
+	res = gsi_query_channel_info(ep->gsi_chan_hdl, ch_info);
+	if (res) {
+		IPA_MHI_ERR("gsi_query_channel_info failed\n");
+		return res;
+	}
+
+	IPA_MHI_FUNC_EXIT();
+	return 0;
+}
+
+bool ipa3_has_open_aggr_frame(enum ipa_client_type client)
+{
+	u32 aggr_state_active;
+	int ipa_ep_idx;
+
+	aggr_state_active = ipahal_read_reg(IPA_STATE_AGGR_ACTIVE);
+	IPA_MHI_DBG_LOW("IPA_STATE_AGGR_ACTIVE_OFST 0x%x\n", aggr_state_active);
+
+	ipa_ep_idx = ipa_get_ep_mapping(client);
+	if (ipa_ep_idx == -1) {
+		ipa_assert();
+		return false;
+	}
+
+	if ((1 << ipa_ep_idx) & aggr_state_active)
+		return true;
+
+	return false;
+}
+
+int ipa3_mhi_destroy_channel(enum ipa_client_type client)
+{
+	int res;
+	int ipa_ep_idx;
+	struct ipa3_ep_context *ep;
+
+	ipa_ep_idx = ipa3_get_ep_mapping(client);
+	if (ipa_ep_idx < 0) {
+		IPA_MHI_ERR("Invalid client %d\n", client);
+		return -EINVAL;
+	}
+	ep = &ipa3_ctx->ep[ipa_ep_idx];
+
+	IPA_ACTIVE_CLIENTS_INC_EP(client);
+
+	IPA_MHI_DBG("reset event ring (hdl: %lu, ep: %d)\n",
+		ep->gsi_evt_ring_hdl, ipa_ep_idx);
+
+	res = gsi_reset_evt_ring(ep->gsi_evt_ring_hdl);
+	if (res) {
+		IPAERR(" failed to reset evt ring %lu, err %d\n"
+			, ep->gsi_evt_ring_hdl, res);
+		goto fail;
+	}
+
+	IPA_MHI_DBG("dealloc event ring (hdl: %lu, ep: %d)\n",
+		ep->gsi_evt_ring_hdl, ipa_ep_idx);
+
+	res = gsi_dealloc_evt_ring(
+		ep->gsi_evt_ring_hdl);
+	if (res) {
+		IPAERR("dealloc evt ring %lu failed, err %d\n"
+			, ep->gsi_evt_ring_hdl, res);
+		goto fail;
+	}
+
+	IPA_ACTIVE_CLIENTS_DEC_EP(client);
+	return 0;
+fail:
+	IPA_ACTIVE_CLIENTS_DEC_EP(client);
+	return res;
+}
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("IPA MHI driver");

+ 1088 - 0
ipa/ipa_v3/ipa_mhi_proxy.c

@@ -0,0 +1,1088 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/dma-mapping.h>
+#include <linux/list.h>
+#include <linux/slab.h>
+#include <linux/device.h>
+#include <linux/module.h>
+#include <linux/mhi.h>
+#include "ipa_qmi_service.h"
+#include "../ipa_common_i.h"
+#include "ipa_i.h"
+
+#define IMP_DRV_NAME "ipa_mhi_proxy"
+
+#define IMP_DBG(fmt, args...) \
+	do { \
+		pr_debug(IMP_DRV_NAME " %s:%d " fmt, \
+			__func__, __LINE__, ## args); \
+		IPA_IPC_LOGGING(ipa_get_ipc_logbuf(), \
+			IMP_DRV_NAME " %s:%d " fmt, ## args); \
+		IPA_IPC_LOGGING(ipa_get_ipc_logbuf_low(), \
+			IMP_DRV_NAME " %s:%d " fmt, ## args); \
+	} while (0)
+
+#define IMP_DBG_LOW(fmt, args...) \
+	do { \
+		pr_debug(IMP_DRV_NAME " %s:%d " fmt, \
+			__func__, __LINE__, ## args); \
+		IPA_IPC_LOGGING(ipa_get_ipc_logbuf_low(), \
+			IMP_DRV_NAME " %s:%d " fmt, ## args); \
+	} while (0)
+
+
+#define IMP_ERR(fmt, args...) \
+	do { \
+		pr_err(IMP_DRV_NAME " %s:%d " fmt, \
+			__func__, __LINE__, ## args); \
+		IPA_IPC_LOGGING(ipa_get_ipc_logbuf(), \
+				IMP_DRV_NAME " %s:%d " fmt, ## args); \
+		IPA_IPC_LOGGING(ipa_get_ipc_logbuf_low(), \
+				IMP_DRV_NAME " %s:%d " fmt, ## args); \
+	} while (0)
+
+
+#define IMP_FUNC_ENTRY() \
+	IMP_DBG_LOW("ENTRY\n")
+#define IMP_FUNC_EXIT() \
+	IMP_DBG_LOW("EXIT\n")
+
+#define IMP_IPA_UC_UL_CH_n 0
+#define IMP_IPA_UC_UL_EV_n 1
+#define IMP_IPA_UC_DL_CH_n 2
+#define IMP_IPA_UC_DL_EV_n 3
+#define IMP_IPA_UC_m 1
+
+/* each pair of UL/DL channels are defined below */
+static const struct mhi_device_id mhi_driver_match_table[] = {
+	{ .chan = "IP_HW_OFFLOAD_0" },
+	{},
+};
+
+static int imp_mhi_probe_cb(struct mhi_device *, const struct mhi_device_id *);
+static void imp_mhi_remove_cb(struct mhi_device *);
+static void imp_mhi_status_cb(struct mhi_device *, enum MHI_CB);
+
+static struct mhi_driver mhi_driver = {
+	.id_table = mhi_driver_match_table,
+	.probe = imp_mhi_probe_cb,
+	.remove = imp_mhi_remove_cb,
+	.status_cb = imp_mhi_status_cb,
+	.driver = {
+		.name = IMP_DRV_NAME,
+		.owner = THIS_MODULE,
+	},
+};
+
+struct imp_channel_context_type {
+	u32 chstate:8;
+	u32 brsmode:2;
+	u32 pollcfg:6;
+	u32 reserved:16;
+
+	u32 chtype;
+
+	u32 erindex;
+
+	u64 rbase;
+
+	u64 rlen;
+
+	u64 rpp;
+
+	u64 wpp;
+} __packed;
+
+struct imp_event_context_type {
+	u32 reserved:8;
+	u32 intmodc:8;
+	u32 intmodt:16;
+
+	u32 ertype;
+
+	u32 msivec;
+
+	u64 rbase;
+
+	u64 rlen;
+
+	u64 rpp;
+
+	u64 wpp;
+} __packed;
+
+struct imp_iova_addr {
+	dma_addr_t base;
+	unsigned int size;
+};
+
+struct imp_dev_info {
+	struct platform_device *pdev;
+	bool smmu_enabled;
+	struct imp_iova_addr ctrl;
+	struct imp_iova_addr data;
+	u32 chdb_base;
+	u32 erdb_base;
+};
+
+struct imp_event_props {
+	u16 id;
+	phys_addr_t doorbell;
+	u16 uc_mbox_n;
+	struct imp_event_context_type ev_ctx;
+};
+
+struct imp_event {
+	struct imp_event_props props;
+};
+
+struct imp_channel_props {
+	enum dma_data_direction dir;
+	u16 id;
+	phys_addr_t doorbell;
+	u16 uc_mbox_n;
+	struct imp_channel_context_type ch_ctx;
+
+};
+
+struct imp_channel {
+	struct imp_channel_props props;
+	struct imp_event event;
+};
+
+enum imp_state {
+	IMP_INVALID = 0,
+	IMP_PROBED,
+	IMP_READY,
+	IMP_STARTED
+};
+
+struct imp_qmi_cache {
+	struct ipa_mhi_ready_indication_msg_v01 ready_ind;
+	struct ipa_mhi_alloc_channel_req_msg_v01 alloc_ch_req;
+	struct ipa_mhi_alloc_channel_resp_msg_v01 alloc_ch_resp;
+	struct ipa_mhi_clk_vote_resp_msg_v01 clk_vote_resp;
+};
+
+struct imp_mhi_driver {
+	struct mhi_device *mhi_dev;
+	struct imp_channel ul_chan;
+	struct imp_channel dl_chan;
+};
+
+struct imp_context {
+	struct imp_dev_info dev_info;
+	struct imp_mhi_driver md;
+	struct mutex mutex;
+	struct mutex lpm_mutex;
+	enum imp_state state;
+	bool in_lpm;
+	bool lpm_disabled;
+	struct imp_qmi_cache qmi;
+
+};
+
+static struct imp_context *imp_ctx;
+
+static void _populate_smmu_info(struct ipa_mhi_ready_indication_msg_v01 *req)
+{
+	req->smmu_info_valid = true;
+	req->smmu_info.iova_ctl_base_addr = imp_ctx->dev_info.ctrl.base;
+	req->smmu_info.iova_ctl_size = imp_ctx->dev_info.ctrl.size;
+	req->smmu_info.iova_data_base_addr = imp_ctx->dev_info.data.base;
+	req->smmu_info.iova_data_size = imp_ctx->dev_info.data.size;
+}
+
+static void imp_mhi_trigger_ready_ind(void)
+{
+	struct ipa_mhi_ready_indication_msg_v01 *req
+		= &imp_ctx->qmi.ready_ind;
+	int ret;
+	struct imp_channel *ch;
+	struct ipa_mhi_ch_init_info_type_v01 *ch_info;
+
+	IMP_FUNC_ENTRY();
+	if (imp_ctx->state != IMP_PROBED) {
+		IMP_ERR("invalid state %d\n", imp_ctx->state);
+		goto exit;
+	}
+
+	if (imp_ctx->dev_info.smmu_enabled)
+		_populate_smmu_info(req);
+
+	req->ch_info_arr_len = 0;
+	BUILD_BUG_ON(QMI_IPA_REMOTE_MHI_CHANNELS_NUM_MAX_V01 < 2);
+
+	/* UL channel */
+	ch = &imp_ctx->md.ul_chan;
+	ch_info = &req->ch_info_arr[req->ch_info_arr_len];
+
+	ch_info->ch_id = ch->props.id;
+	ch_info->direction_type = ch->props.dir;
+	ch_info->er_id = ch->event.props.id;
+
+	/* uC is a doorbell proxy between local Q6 and remote Q6 */
+	ch_info->ch_doorbell_addr = ipa3_ctx->ipa_wrapper_base +
+		ipahal_get_reg_base() +
+		ipahal_get_reg_mn_ofst(IPA_UC_MAILBOX_m_n,
+		IMP_IPA_UC_m,
+		ch->props.uc_mbox_n);
+
+	ch_info->er_doorbell_addr = ipa3_ctx->ipa_wrapper_base +
+		ipahal_get_reg_base() +
+		ipahal_get_reg_mn_ofst(IPA_UC_MAILBOX_m_n,
+		IMP_IPA_UC_m,
+		ch->event.props.uc_mbox_n);
+	req->ch_info_arr_len++;
+
+	/* DL channel */
+	ch = &imp_ctx->md.dl_chan;
+	ch_info = &req->ch_info_arr[req->ch_info_arr_len];
+
+	ch_info->ch_id = ch->props.id;
+	ch_info->direction_type = ch->props.dir;
+	ch_info->er_id = ch->event.props.id;
+
+	/* uC is a doorbell proxy between local Q6 and remote Q6 */
+	ch_info->ch_doorbell_addr = ipa3_ctx->ipa_wrapper_base +
+		ipahal_get_reg_base() +
+		ipahal_get_reg_mn_ofst(IPA_UC_MAILBOX_m_n,
+		IMP_IPA_UC_m,
+		ch->props.uc_mbox_n);
+
+	ch_info->er_doorbell_addr = ipa3_ctx->ipa_wrapper_base +
+		ipahal_get_reg_base() +
+		ipahal_get_reg_mn_ofst(IPA_UC_MAILBOX_m_n,
+		IMP_IPA_UC_m,
+		ch->event.props.uc_mbox_n);
+	req->ch_info_arr_len++;
+
+	IMP_DBG("sending IND to modem\n");
+	ret = ipa3_qmi_send_mhi_ready_indication(req);
+	if (ret) {
+		IMP_ERR("failed to send ready indication to modem %d\n", ret);
+		return;
+	}
+
+	imp_ctx->state = IMP_READY;
+
+exit:
+	IMP_FUNC_EXIT();
+}
+
+static struct imp_channel *imp_get_ch_by_id(u16 id)
+{
+	if (imp_ctx->md.ul_chan.props.id == id)
+		return &imp_ctx->md.ul_chan;
+
+	if (imp_ctx->md.dl_chan.props.id == id)
+		return &imp_ctx->md.dl_chan;
+
+	return NULL;
+}
+
+static struct ipa_mhi_er_info_type_v01 *
+	_find_ch_in_er_info_arr(struct ipa_mhi_alloc_channel_req_msg_v01 *req,
+	u16 id)
+{
+	int i;
+
+	if (req->er_info_arr_len > QMI_IPA_REMOTE_MHI_CHANNELS_NUM_MAX_V01)
+		return NULL;
+
+	for (i = 0; i < req->tr_info_arr_len; i++)
+		if (req->er_info_arr[i].er_id == id)
+			return &req->er_info_arr[i];
+	return NULL;
+}
+
+/* round addresses for closest page per SMMU requirements */
+static inline void imp_smmu_round_to_page(uint64_t iova, uint64_t pa,
+	uint64_t size, unsigned long *iova_p, phys_addr_t *pa_p, u32 *size_p)
+{
+	*iova_p = rounddown(iova, PAGE_SIZE);
+	*pa_p = rounddown(pa, PAGE_SIZE);
+	*size_p = roundup(size + pa - *pa_p, PAGE_SIZE);
+}
+
+static void __map_smmu_info(struct device *dev,
+	struct imp_iova_addr *partition, int num_mapping,
+	struct ipa_mhi_mem_addr_info_type_v01 *map_info,
+	bool map)
+{
+	int i;
+	struct iommu_domain *domain;
+	unsigned long iova_p;
+	phys_addr_t pa_p;
+	u32 size_p;
+
+	domain = iommu_get_domain_for_dev(dev);
+	if (!domain) {
+		IMP_ERR("domain is NULL for dev\n");
+		return;
+	}
+
+	for (i = 0; i < num_mapping; i++) {
+		int prot = IOMMU_READ | IOMMU_WRITE;
+		u32 ipa_base = ipa3_ctx->ipa_wrapper_base +
+			ipa3_ctx->ctrl->ipa_reg_base_ofst;
+		u32 ipa_size = ipa3_ctx->ipa_wrapper_size;
+
+		imp_smmu_round_to_page(map_info[i].iova, map_info[i].pa,
+			map_info[i].size, &iova_p, &pa_p, &size_p);
+
+		if (map) {
+			/* boundary check */
+			WARN_ON(partition->base > iova_p ||
+				(partition->base + partition->size) <
+				(iova_p + size_p));
+
+			/* for IPA uC MBOM we need to map with device type */
+			if (pa_p - ipa_base < ipa_size)
+				prot |= IOMMU_MMIO;
+
+			IMP_DBG("mapping 0x%lx to 0x%pa size %d\n",
+				iova_p, &pa_p, size_p);
+			iommu_map(domain,
+				iova_p, pa_p, size_p, prot);
+		} else {
+			IMP_DBG("unmapping 0x%lx to 0x%pa size %d\n",
+				iova_p, &pa_p, size_p);
+			iommu_unmap(domain, iova_p, size_p);
+		}
+	}
+}
+
+static int __imp_configure_mhi_device(
+	struct ipa_mhi_alloc_channel_req_msg_v01 *req,
+	struct ipa_mhi_alloc_channel_resp_msg_v01 *resp)
+{
+	struct mhi_buf ch_config[2];
+	int i;
+	struct ipa_mhi_er_info_type_v01 *er_info;
+	struct imp_channel *ch;
+	int ridx = 0;
+	int ret;
+
+	IMP_FUNC_ENTRY();
+
+	/* configure MHI */
+	for (i = 0; i < req->tr_info_arr_len; i++) {
+		ch = imp_get_ch_by_id(req->tr_info_arr[i].ch_id);
+		if (!ch) {
+			IMP_ERR("unknown channel %d\n",
+				req->tr_info_arr[i].ch_id);
+			resp->alloc_resp_arr[ridx].ch_id =
+				req->tr_info_arr[i].ch_id;
+			resp->alloc_resp_arr[ridx].is_success = 0;
+			ridx++;
+			resp->alloc_resp_arr_len = ridx;
+			resp->resp.result = IPA_QMI_RESULT_FAILURE_V01;
+			/* return INCOMPATIBLE_STATE in any case */
+			resp->resp.error =
+				IPA_QMI_ERR_INCOMPATIBLE_STATE_V01;
+			return -EINVAL;
+		}
+
+		/* populate CCA */
+		if (req->tr_info_arr[i].brst_mode_type ==
+			QMI_IPA_BURST_MODE_ENABLED_V01)
+			ch->props.ch_ctx.brsmode = 3;
+		else if (req->tr_info_arr[i].brst_mode_type ==
+			QMI_IPA_BURST_MODE_DISABLED_V01)
+			ch->props.ch_ctx.brsmode = 2;
+		else
+			ch->props.ch_ctx.brsmode = 0;
+
+		ch->props.ch_ctx.pollcfg = req->tr_info_arr[i].poll_cfg;
+		ch->props.ch_ctx.chtype = ch->props.dir;
+		ch->props.ch_ctx.erindex = ch->event.props.id;
+		ch->props.ch_ctx.rbase = req->tr_info_arr[i].ring_iova;
+		ch->props.ch_ctx.rlen = req->tr_info_arr[i].ring_len;
+		ch->props.ch_ctx.rpp = req->tr_info_arr[i].rp;
+		ch->props.ch_ctx.wpp = req->tr_info_arr[i].wp;
+
+		ch_config[0].buf = &ch->props.ch_ctx;
+		ch_config[0].len = sizeof(ch->props.ch_ctx);
+		ch_config[0].name = "CCA";
+
+		/* populate ECA */
+		er_info = _find_ch_in_er_info_arr(req, ch->event.props.id);
+		if (!er_info) {
+			IMP_ERR("no event ring for ch %d\n",
+				req->tr_info_arr[i].ch_id);
+			resp->alloc_resp_arr[ridx].ch_id =
+				req->tr_info_arr[i].ch_id;
+			resp->alloc_resp_arr[ridx].is_success = 0;
+			ridx++;
+			resp->alloc_resp_arr_len = ridx;
+			resp->resp.result = IPA_QMI_RESULT_FAILURE_V01;
+			resp->resp.error = IPA_QMI_ERR_INTERNAL_V01;
+			return -EINVAL;
+		}
+
+		ch->event.props.ev_ctx.intmodc = er_info->intmod_count;
+		ch->event.props.ev_ctx.intmodt = er_info->intmod_cycles;
+		ch->event.props.ev_ctx.ertype = 1;
+		ch->event.props.ev_ctx.msivec = er_info->msi_addr;
+		ch->event.props.ev_ctx.rbase = er_info->ring_iova;
+		ch->event.props.ev_ctx.rlen = er_info->ring_len;
+		ch->event.props.ev_ctx.rpp = er_info->rp;
+		ch->event.props.ev_ctx.wpp = er_info->wp;
+		ch_config[1].buf = &ch->event.props.ev_ctx;
+		ch_config[1].len = sizeof(ch->event.props.ev_ctx);
+		ch_config[1].name = "ECA";
+
+		IMP_DBG("Configuring MHI device for ch %d\n", ch->props.id);
+		ret = mhi_device_configure(imp_ctx->md.mhi_dev, ch->props.dir,
+			ch_config, 2);
+		/* configure mhi-host, no need check mhi state */
+		if (ret) {
+			IMP_ERR("mhi_device_configure failed for ch %d\n",
+				req->tr_info_arr[i].ch_id);
+			resp->alloc_resp_arr[ridx].ch_id =
+				req->tr_info_arr[i].ch_id;
+			resp->alloc_resp_arr[ridx].is_success = 0;
+			ridx++;
+			resp->alloc_resp_arr_len = ridx;
+			resp->resp.result = IPA_QMI_RESULT_FAILURE_V01;
+			resp->resp.error = IPA_QMI_ERR_INTERNAL_V01;
+			return -EINVAL;
+		}
+	}
+
+	IMP_FUNC_EXIT();
+
+	return 0;
+}
+
+/**
+ * imp_handle_allocate_channel_req() - Allocate a new MHI channel
+ *
+ * Allocates MHI channel and start them.
+ *
+ * Return: QMI return codes
+ */
+struct ipa_mhi_alloc_channel_resp_msg_v01 *imp_handle_allocate_channel_req(
+		struct ipa_mhi_alloc_channel_req_msg_v01 *req)
+{
+	int ret;
+	struct ipa_mhi_alloc_channel_resp_msg_v01 *resp =
+		&imp_ctx->qmi.alloc_ch_resp;
+
+	IMP_FUNC_ENTRY();
+
+	mutex_lock(&imp_ctx->mutex);
+
+	memset(resp, 0, sizeof(*resp));
+
+	if (imp_ctx->state != IMP_READY) {
+		IMP_ERR("invalid state %d\n", imp_ctx->state);
+		resp->resp.result = IPA_QMI_RESULT_FAILURE_V01;
+		resp->resp.error = IPA_QMI_ERR_INTERNAL_V01;
+		mutex_unlock(&imp_ctx->mutex);
+		return resp;
+	}
+
+	/* cache the req */
+	memcpy(&imp_ctx->qmi.alloc_ch_req, req, sizeof(*req));
+
+	if (req->tr_info_arr_len > QMI_IPA_REMOTE_MHI_CHANNELS_NUM_MAX_V01) {
+		IMP_ERR("invalid tr_info_arr_len %d\n", req->tr_info_arr_len);
+		resp->resp.result = IPA_QMI_RESULT_FAILURE_V01;
+		resp->resp.error = IPA_QMI_ERR_NO_MEMORY_V01;
+		mutex_unlock(&imp_ctx->mutex);
+		return resp;
+	}
+
+	if ((req->ctrl_addr_map_info_len == 0 ||
+	     req->data_addr_map_info_len == 0) &&
+	     imp_ctx->dev_info.smmu_enabled) {
+		IMP_ERR("no mapping provided, but smmu is enabled\n");
+		resp->resp.result = IPA_QMI_RESULT_FAILURE_V01;
+		resp->resp.error = IPA_QMI_ERR_INTERNAL_V01;
+		mutex_unlock(&imp_ctx->mutex);
+		return resp;
+	}
+
+	if (imp_ctx->dev_info.smmu_enabled) {
+		/* map CTRL */
+		__map_smmu_info(imp_ctx->md.mhi_dev->dev.parent,
+			&imp_ctx->dev_info.ctrl,
+			req->ctrl_addr_map_info_len,
+			req->ctrl_addr_map_info,
+			true);
+
+		/* map DATA */
+		__map_smmu_info(imp_ctx->md.mhi_dev->dev.parent,
+			&imp_ctx->dev_info.data,
+			req->data_addr_map_info_len,
+			req->data_addr_map_info,
+			true);
+	}
+
+	resp->alloc_resp_arr_valid = true;
+	ret = __imp_configure_mhi_device(req, resp);
+	if (ret)
+		goto fail_smmu;
+
+	IMP_DBG("Starting MHI channels %d and %d\n",
+		imp_ctx->md.ul_chan.props.id,
+		imp_ctx->md.dl_chan.props.id);
+	ret = mhi_prepare_for_transfer(imp_ctx->md.mhi_dev);
+	if (ret) {
+		IMP_ERR("mhi_prepare_for_transfer failed %d\n", ret);
+		resp->alloc_resp_arr[resp->alloc_resp_arr_len]
+			.ch_id = imp_ctx->md.ul_chan.props.id;
+		resp->alloc_resp_arr[resp->alloc_resp_arr_len]
+			.is_success = 0;
+		resp->alloc_resp_arr_len++;
+		resp->alloc_resp_arr[resp->alloc_resp_arr_len]
+			.ch_id = imp_ctx->md.dl_chan.props.id;
+		resp->alloc_resp_arr[resp->alloc_resp_arr_len]
+			.is_success = 0;
+		resp->alloc_resp_arr_len++;
+		resp->resp.result = IPA_QMI_RESULT_FAILURE_V01;
+		/* return INCOMPATIBLE_STATE in any case */
+		resp->resp.error = IPA_QMI_ERR_INCOMPATIBLE_STATE_V01;
+		goto fail_smmu;
+	}
+
+	resp->alloc_resp_arr[resp->alloc_resp_arr_len]
+		.ch_id = imp_ctx->md.ul_chan.props.id;
+	resp->alloc_resp_arr[resp->alloc_resp_arr_len]
+		.is_success = 1;
+	resp->alloc_resp_arr_len++;
+
+	resp->alloc_resp_arr[resp->alloc_resp_arr_len]
+		.ch_id = imp_ctx->md.dl_chan.props.id;
+	resp->alloc_resp_arr[resp->alloc_resp_arr_len]
+		.is_success = 1;
+	resp->alloc_resp_arr_len++;
+
+	imp_ctx->state = IMP_STARTED;
+	mutex_unlock(&imp_ctx->mutex);
+	IMP_FUNC_EXIT();
+
+	resp->resp.result = IPA_QMI_RESULT_SUCCESS_V01;
+	return resp;
+
+fail_smmu:
+	if (imp_ctx->dev_info.smmu_enabled) {
+		/* unmap CTRL */
+		__map_smmu_info(imp_ctx->md.mhi_dev->dev.parent,
+			&imp_ctx->dev_info.ctrl,
+			req->ctrl_addr_map_info_len,
+			req->ctrl_addr_map_info,
+			false);
+
+		/* unmap DATA */
+		__map_smmu_info(imp_ctx->md.mhi_dev->dev.parent,
+			&imp_ctx->dev_info.data,
+			req->data_addr_map_info_len,
+			req->data_addr_map_info,
+			false);
+	}
+	mutex_unlock(&imp_ctx->mutex);
+	return resp;
+}
+
+/**
+ * imp_handle_vote_req() - Votes for MHI / PCIe clocks
+ *
+ * Hold a vote to prevent / allow low power mode on MHI.
+ *
+ * Return: 0 on success, negative otherwise
+ */
+struct ipa_mhi_clk_vote_resp_msg_v01
+	*imp_handle_vote_req(bool vote)
+{
+	int ret;
+	struct ipa_mhi_clk_vote_resp_msg_v01 *resp =
+	&imp_ctx->qmi.clk_vote_resp;
+
+	IMP_DBG_LOW("vote %d\n", vote);
+	memset(resp, 0, sizeof(struct ipa_mhi_clk_vote_resp_msg_v01));
+	resp->resp.result = IPA_QMI_RESULT_FAILURE_V01;
+	resp->resp.error = IPA_QMI_ERR_INCOMPATIBLE_STATE_V01;
+
+	mutex_lock(&imp_ctx->mutex);
+
+	/*
+	 * returning success for clock unvote request - since it could
+	 * be 5G modem SSR scenario where clocks are already OFF.
+	 */
+	if (!vote && imp_ctx->state == IMP_INVALID) {
+		IMP_DBG("Unvote in Invalid state, no op for clock unvote\n");
+		mutex_unlock(&imp_ctx->mutex);
+		return resp;
+	}
+
+	if (imp_ctx->state != IMP_STARTED) {
+		IMP_ERR("unexpected vote when in state %d\n", imp_ctx->state);
+		mutex_unlock(&imp_ctx->mutex);
+		return resp;
+	}
+
+	if (vote == imp_ctx->lpm_disabled) {
+		IMP_ERR("already voted/devoted %d\n", vote);
+		mutex_unlock(&imp_ctx->mutex);
+		return resp;
+	}
+	mutex_unlock(&imp_ctx->mutex);
+
+	/*
+	 * Unlock the mutex before calling into mhi for clock vote
+	 * to avoid deadlock on imp mutex.
+	 * Calls into mhi are synchronous and imp callbacks are
+	 * executed from mhi context.
+	 */
+	if (vote) {
+		ret = mhi_device_get_sync(imp_ctx->md.mhi_dev,
+			MHI_VOTE_BUS | MHI_VOTE_DEVICE);
+		if (ret) {
+			IMP_ERR("mhi_sync_get failed %d\n", ret);
+			resp->resp.result = IPA_QMI_RESULT_FAILURE_V01;
+			/* return INCOMPATIBLE_STATE in any case */
+			resp->resp.error =
+					IPA_QMI_ERR_INCOMPATIBLE_STATE_V01;
+			return resp;
+		}
+	} else {
+		mhi_device_put(imp_ctx->md.mhi_dev,
+			MHI_VOTE_BUS | MHI_VOTE_DEVICE);
+	}
+
+	mutex_lock(&imp_ctx->mutex);
+	if (vote)
+		imp_ctx->lpm_disabled = true;
+	else
+		imp_ctx->lpm_disabled = false;
+	mutex_unlock(&imp_ctx->mutex);
+
+	resp->resp.result = IPA_QMI_RESULT_SUCCESS_V01;
+	return resp;
+}
+
+static int imp_read_iova_from_dtsi(const char *node, struct imp_iova_addr *out)
+{
+	u32 iova_mapping[2];
+	struct device_node *of_node = imp_ctx->dev_info.pdev->dev.of_node;
+
+	if (of_property_read_u32_array(of_node, node, iova_mapping, 2)) {
+		IMP_DBG("failed to read of_node %s\n", node);
+		return -EINVAL;
+	}
+
+	out->base = iova_mapping[0];
+	out->size = iova_mapping[1];
+	IMP_DBG("%s: base: 0x%pad size: 0x%x\n", node, &out->base, out->size);
+
+	return 0;
+}
+
+static void imp_mhi_shutdown(void)
+{
+	struct ipa_mhi_cleanup_req_msg_v01 req = { 0 };
+
+	IMP_FUNC_ENTRY();
+
+	if (imp_ctx->state == IMP_STARTED ||
+		imp_ctx->state == IMP_READY) {
+		req.cleanup_valid = true;
+		req.cleanup = true;
+		ipa3_qmi_send_mhi_cleanup_request(&req);
+		if (imp_ctx->dev_info.smmu_enabled) {
+			struct ipa_mhi_alloc_channel_req_msg_v01 *creq
+				= &imp_ctx->qmi.alloc_ch_req;
+
+			/* unmap CTRL */
+			__map_smmu_info(imp_ctx->md.mhi_dev->dev.parent,
+				&imp_ctx->dev_info.ctrl,
+				creq->ctrl_addr_map_info_len,
+				creq->ctrl_addr_map_info,
+				false);
+
+			/* unmap DATA */
+			__map_smmu_info(imp_ctx->md.mhi_dev->dev.parent,
+				&imp_ctx->dev_info.data,
+				creq->data_addr_map_info_len,
+				creq->data_addr_map_info,
+				false);
+		}
+		if (imp_ctx->lpm_disabled) {
+			mhi_device_put(imp_ctx->md.mhi_dev, MHI_VOTE_BUS);
+			imp_ctx->lpm_disabled = false;
+		}
+
+		/* unmap MHI doorbells from IPA uC SMMU */
+		if (!ipa3_ctx->s1_bypass_arr[IPA_SMMU_CB_UC]) {
+			struct ipa_smmu_cb_ctx *cb =
+				ipa3_get_smmu_ctx(IPA_SMMU_CB_UC);
+			unsigned long iova_p;
+			phys_addr_t pa_p;
+			u32 size_p;
+
+			imp_smmu_round_to_page(imp_ctx->dev_info.chdb_base,
+				imp_ctx->dev_info.chdb_base, PAGE_SIZE,
+				&iova_p, &pa_p, &size_p);
+
+			iommu_unmap(cb->iommu_domain, iova_p, size_p);
+		}
+	}
+	if (!imp_ctx->in_lpm &&
+		(imp_ctx->state == IMP_READY ||
+			imp_ctx->state == IMP_STARTED)) {
+		IMP_DBG("devote IMP with state= %d\n", imp_ctx->state);
+		IPA_ACTIVE_CLIENTS_DEC_SPECIAL("IMP");
+	}
+	imp_ctx->in_lpm = false;
+	imp_ctx->state = IMP_PROBED;
+
+	IMP_FUNC_EXIT();
+}
+
+static int imp_mhi_probe_cb(struct mhi_device *mhi_dev,
+	const struct mhi_device_id *id)
+{
+	struct imp_channel *ch;
+	struct imp_event *ev;
+	int ret;
+
+	IMP_FUNC_ENTRY();
+
+	if (id != &mhi_driver_match_table[0]) {
+		IMP_ERR("only chan=%s is supported for now\n",
+			mhi_driver_match_table[0].chan);
+		return -EPERM;
+	}
+
+	/* vote for IPA clock. IPA clock will be devoted when MHI enters LPM */
+	IPA_ACTIVE_CLIENTS_INC_SPECIAL("IMP");
+
+	imp_ctx->md.mhi_dev = mhi_dev;
+
+	mutex_lock(&imp_ctx->mutex);
+	/* store UL channel properties */
+	ch = &imp_ctx->md.ul_chan;
+	ev = &imp_ctx->md.ul_chan.event;
+
+	ch->props.id = mhi_dev->ul_chan_id;
+	ch->props.dir = DMA_TO_DEVICE;
+	ch->props.doorbell = imp_ctx->dev_info.chdb_base + ch->props.id * 8;
+	ch->props.uc_mbox_n = IMP_IPA_UC_UL_CH_n;
+	IMP_DBG("ul ch id %d doorbell 0x%pa uc_mbox_n %d\n",
+		ch->props.id, &ch->props.doorbell, ch->props.uc_mbox_n);
+
+	ret = ipa3_uc_send_remote_ipa_info(ch->props.doorbell,
+		ch->props.uc_mbox_n);
+	if (ret)
+		goto fail;
+	IMP_DBG("mapped ch db 0x%pad to mbox %d\n", &ch->props.doorbell,
+			ch->props.uc_mbox_n);
+
+	ev->props.id = mhi_dev->ul_event_id;
+	ev->props.doorbell = imp_ctx->dev_info.erdb_base + ev->props.id * 8;
+	ev->props.uc_mbox_n = IMP_IPA_UC_UL_EV_n;
+	IMP_DBG("allocated ev %d\n", ev->props.id);
+
+	ret = ipa3_uc_send_remote_ipa_info(ev->props.doorbell,
+		ev->props.uc_mbox_n);
+	if (ret)
+		goto fail;
+	IMP_DBG("mapped ch db 0x%pad to mbox %d\n", &ev->props.doorbell,
+		ev->props.uc_mbox_n);
+
+	/* store DL channel properties */
+	ch = &imp_ctx->md.dl_chan;
+	ev = &imp_ctx->md.dl_chan.event;
+
+	ch->props.dir = DMA_FROM_DEVICE;
+	ch->props.id = mhi_dev->dl_chan_id;
+	ch->props.doorbell = imp_ctx->dev_info.chdb_base + ch->props.id * 8;
+	ch->props.uc_mbox_n = IMP_IPA_UC_DL_CH_n;
+	IMP_DBG("dl ch id %d doorbell 0x%pa uc_mbox_n %d\n",
+		ch->props.id, &ch->props.doorbell, ch->props.uc_mbox_n);
+
+	ret = ipa3_uc_send_remote_ipa_info(ch->props.doorbell,
+		ch->props.uc_mbox_n);
+	if (ret)
+		goto fail;
+	IMP_DBG("mapped ch db 0x%pad to mbox %d\n", &ch->props.doorbell,
+		ch->props.uc_mbox_n);
+
+	ev->props.id = mhi_dev->dl_event_id;
+	ev->props.doorbell = imp_ctx->dev_info.erdb_base + ev->props.id * 8;
+	ev->props.uc_mbox_n = IMP_IPA_UC_DL_EV_n;
+	IMP_DBG("allocated ev %d\n", ev->props.id);
+
+	ret = ipa3_uc_send_remote_ipa_info(ev->props.doorbell,
+		ev->props.uc_mbox_n);
+	if (ret)
+		goto fail;
+	IMP_DBG("mapped ch db 0x%pad to mbox %d\n", &ev->props.doorbell,
+		ev->props.uc_mbox_n);
+
+	/*
+	 * Map MHI doorbells to IPA uC SMMU.
+	 * Both channel and event doorbells resides in a single page.
+	 */
+	if (!ipa3_ctx->s1_bypass_arr[IPA_SMMU_CB_UC]) {
+		struct ipa_smmu_cb_ctx *cb =
+			ipa3_get_smmu_ctx(IPA_SMMU_CB_UC);
+		unsigned long iova_p;
+		phys_addr_t pa_p;
+		u32 size_p;
+
+		imp_smmu_round_to_page(imp_ctx->dev_info.chdb_base,
+			imp_ctx->dev_info.chdb_base, PAGE_SIZE,
+			&iova_p, &pa_p, &size_p);
+
+		ret = ipa3_iommu_map(cb->iommu_domain, iova_p, pa_p, size_p,
+			IOMMU_READ | IOMMU_WRITE | IOMMU_MMIO);
+		if (ret)
+			goto fail;
+	}
+
+	imp_mhi_trigger_ready_ind();
+
+	mutex_unlock(&imp_ctx->mutex);
+
+	IMP_FUNC_EXIT();
+	return 0;
+
+fail:
+	mutex_unlock(&imp_ctx->mutex);
+	IPA_ACTIVE_CLIENTS_DEC_SPECIAL("IMP");
+	return ret;
+}
+
+static void imp_mhi_remove_cb(struct mhi_device *mhi_dev)
+{
+	IMP_FUNC_ENTRY();
+
+	mutex_lock(&imp_ctx->mutex);
+	imp_mhi_shutdown();
+	mutex_unlock(&imp_ctx->mutex);
+	IMP_FUNC_EXIT();
+}
+
+static void imp_mhi_status_cb(struct mhi_device *mhi_dev, enum MHI_CB mhi_cb)
+{
+	IMP_DBG("%d\n", mhi_cb);
+
+	mutex_lock(&imp_ctx->lpm_mutex);
+	if (mhi_dev != imp_ctx->md.mhi_dev) {
+		IMP_DBG("ignoring secondary callbacks\n");
+		mutex_unlock(&imp_ctx->lpm_mutex);
+		return;
+	}
+
+	switch (mhi_cb) {
+	case MHI_CB_IDLE:
+		break;
+	case MHI_CB_LPM_ENTER:
+		if (imp_ctx->state == IMP_STARTED) {
+			if (!imp_ctx->in_lpm) {
+				IPA_ACTIVE_CLIENTS_DEC_SPECIAL("IMP");
+				imp_ctx->in_lpm = true;
+			} else {
+				IMP_ERR("already in LPM\n");
+			}
+		}
+		break;
+	case MHI_CB_LPM_EXIT:
+		if (imp_ctx->state == IMP_STARTED) {
+			if (imp_ctx->in_lpm) {
+				IPA_ACTIVE_CLIENTS_INC_SPECIAL("IMP");
+				imp_ctx->in_lpm = false;
+			} else {
+				IMP_ERR("not in LPM\n");
+			}
+		}
+		break;
+
+	case MHI_CB_EE_RDDM:
+	case MHI_CB_PENDING_DATA:
+	default:
+		IMP_ERR("unexpected event %d\n", mhi_cb);
+		break;
+	}
+	mutex_unlock(&imp_ctx->lpm_mutex);
+}
+
+static int imp_probe(struct platform_device *pdev)
+{
+	int ret;
+
+	IMP_FUNC_ENTRY();
+
+	if (ipa3_uc_state_check()) {
+		IMP_DBG("uC not ready yet\n");
+		return -EPROBE_DEFER;
+	}
+
+	imp_ctx->dev_info.pdev = pdev;
+	imp_ctx->dev_info.smmu_enabled = true;
+	ret = imp_read_iova_from_dtsi("qcom,ctrl-iova",
+		&imp_ctx->dev_info.ctrl);
+	if (ret)
+		imp_ctx->dev_info.smmu_enabled = false;
+
+	ret = imp_read_iova_from_dtsi("qcom,data-iova",
+		&imp_ctx->dev_info.data);
+	if (ret)
+		imp_ctx->dev_info.smmu_enabled = false;
+
+	IMP_DBG("smmu_enabled=%d\n", imp_ctx->dev_info.smmu_enabled);
+
+	if (of_property_read_u32(pdev->dev.of_node, "qcom,mhi-chdb-base",
+		&imp_ctx->dev_info.chdb_base)) {
+		IMP_ERR("failed to read of_node %s\n", "qcom,mhi-chdb-base");
+		return -EINVAL;
+	}
+	IMP_DBG("chdb-base=0x%x\n", imp_ctx->dev_info.chdb_base);
+
+	if (of_property_read_u32(pdev->dev.of_node, "qcom,mhi-erdb-base",
+		&imp_ctx->dev_info.erdb_base)) {
+		IMP_ERR("failed to read of_node %s\n", "qcom,mhi-erdb-base");
+		return -EINVAL;
+	}
+	IMP_DBG("erdb-base=0x%x\n", imp_ctx->dev_info.erdb_base);
+
+	imp_ctx->state = IMP_PROBED;
+	ret = mhi_driver_register(&mhi_driver);
+	if (ret) {
+		IMP_ERR("mhi_driver_register failed %d\n", ret);
+		mutex_unlock(&imp_ctx->mutex);
+		return ret;
+	}
+
+	IMP_FUNC_EXIT();
+	return 0;
+}
+
+static int imp_remove(struct platform_device *pdev)
+{
+	IMP_FUNC_ENTRY();
+	mhi_driver_unregister(&mhi_driver);
+	mutex_lock(&imp_ctx->mutex);
+	if (!imp_ctx->in_lpm && (imp_ctx->state == IMP_READY ||
+		imp_ctx->state == IMP_STARTED)) {
+		IMP_DBG("devote IMP with state= %d\n", imp_ctx->state);
+		IPA_ACTIVE_CLIENTS_DEC_SPECIAL("IMP");
+	}
+	imp_ctx->lpm_disabled = false;
+	imp_ctx->state = IMP_INVALID;
+	mutex_unlock(&imp_ctx->mutex);
+
+	mutex_lock(&imp_ctx->lpm_mutex);
+	imp_ctx->in_lpm = false;
+	mutex_unlock(&imp_ctx->lpm_mutex);
+
+	return 0;
+}
+
+static const struct of_device_id imp_dt_match[] = {
+	{ .compatible = "qcom,ipa-mhi-proxy" },
+	{},
+};
+MODULE_DEVICE_TABLE(of, imp_dt_match);
+
+static struct platform_driver ipa_mhi_proxy_driver = {
+	.driver = {
+		.name = "ipa_mhi_proxy",
+		.of_match_table = imp_dt_match,
+	},
+	.probe = imp_probe,
+	.remove = imp_remove,
+};
+
+/**
+ * imp_handle_modem_ready() - Registers IMP as a platform device
+ *
+ * This function is called after modem is loaded and QMI handshake is done.
+ * IMP will register itself as a platform device, and on support device the
+ * probe function will get called.
+ *
+ * Return: None
+ */
+void imp_handle_modem_ready(void)
+{
+
+	if (!imp_ctx) {
+		imp_ctx = kzalloc(sizeof(*imp_ctx), GFP_KERNEL);
+		if (!imp_ctx)
+			return;
+
+		mutex_init(&imp_ctx->mutex);
+		mutex_init(&imp_ctx->lpm_mutex);
+	}
+
+	if (imp_ctx->state != IMP_INVALID) {
+		IMP_ERR("unexpected state %d\n", imp_ctx->state);
+		return;
+	}
+
+	IMP_DBG("register platform device\n");
+	platform_driver_register(&ipa_mhi_proxy_driver);
+}
+
+/**
+ * imp_handle_modem_shutdown() - Handles modem SSR
+ *
+ * Performs MHI cleanup when modem is going to SSR (Subsystem Restart).
+ *
+ * Return: None
+ */
+void imp_handle_modem_shutdown(void)
+{
+	IMP_FUNC_ENTRY();
+
+	if (!imp_ctx)
+		return;
+
+	mutex_lock(&imp_ctx->mutex);
+
+	if (imp_ctx->state == IMP_INVALID) {
+		mutex_unlock(&imp_ctx->mutex);
+		return;
+	}
+	if (imp_ctx->state == IMP_STARTED) {
+		mhi_unprepare_from_transfer(imp_ctx->md.mhi_dev);
+		imp_ctx->state = IMP_READY;
+	}
+
+	if (imp_ctx->state == IMP_READY) {
+		if (imp_ctx->dev_info.smmu_enabled) {
+			struct ipa_mhi_alloc_channel_req_msg_v01 *creq
+				= &imp_ctx->qmi.alloc_ch_req;
+
+			/* unmap CTRL */
+			__map_smmu_info(imp_ctx->md.mhi_dev->dev.parent,
+				&imp_ctx->dev_info.ctrl,
+				creq->ctrl_addr_map_info_len,
+				creq->ctrl_addr_map_info,
+				false);
+
+			/* unmap DATA */
+			__map_smmu_info(imp_ctx->md.mhi_dev->dev.parent,
+				&imp_ctx->dev_info.data,
+				creq->data_addr_map_info_len,
+				creq->data_addr_map_info,
+				false);
+		}
+	}
+
+	mutex_unlock(&imp_ctx->mutex);
+
+	IMP_FUNC_EXIT();
+
+	platform_driver_unregister(&ipa_mhi_proxy_driver);
+}
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("IPA MHI Proxy Driver");

+ 49 - 0
ipa/ipa_v3/ipa_mhi_proxy.h

@@ -0,0 +1,49 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef __IMP_H_
+#define __IMP_H_
+
+#ifdef CONFIG_IPA3_MHI_PROXY
+
+#include "ipa_qmi_service.h"
+
+void imp_handle_modem_ready(void);
+
+struct ipa_mhi_alloc_channel_resp_msg_v01 *imp_handle_allocate_channel_req(
+	struct ipa_mhi_alloc_channel_req_msg_v01 *req);
+
+struct ipa_mhi_clk_vote_resp_msg_v01 *imp_handle_vote_req(bool vote);
+
+void imp_handle_modem_shutdown(void);
+
+#else /* CONFIG_IPA3_MHI_PROXY */
+
+static inline void imp_handle_modem_ready(void)
+{
+
+}
+
+static inline struct ipa_mhi_alloc_channel_resp_msg_v01
+	*imp_handle_allocate_channel_req(
+		struct ipa_mhi_alloc_channel_req_msg_v01 *req)
+{
+		return NULL;
+}
+
+static inline struct ipa_mhi_clk_vote_resp_msg_v01
+	*imp_handle_vote_req(bool vote)
+{
+	return NULL;
+}
+
+static inline  void imp_handle_modem_shutdown(void)
+{
+
+}
+
+#endif /* CONFIG_IPA3_MHI_PROXY */
+
+#endif /* __IMP_H_ */

+ 3286 - 0
ipa/ipa_v3/ipa_mpm.c

@@ -0,0 +1,3286 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2019, The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/dma-mapping.h>
+#include <linux/list.h>
+#include <linux/slab.h>
+#include <linux/device.h>
+#include <linux/module.h>
+#include <linux/mhi.h>
+#include <linux/msm_gsi.h>
+#include <linux/delay.h>
+#include <linux/log2.h>
+#include <linux/gfp.h>
+#include "../ipa_common_i.h"
+#include "ipa_i.h"
+
+#define IPA_MPM_DRV_NAME "ipa_mpm"
+
+#define IPA_MPM_DBG(fmt, args...) \
+	do { \
+		pr_debug(IPA_MPM_DRV_NAME " %s:%d " fmt, \
+			__func__, __LINE__, ## args); \
+		IPA_IPC_LOGGING(ipa_get_ipc_logbuf(), \
+			IPA_MPM_DRV_NAME " %s:%d " fmt, ## args); \
+		IPA_IPC_LOGGING(ipa_get_ipc_logbuf_low(), \
+			IPA_MPM_DRV_NAME " %s:%d " fmt, ## args); \
+	} while (0)
+
+#define IPA_MPM_DBG_LOW(fmt, args...) \
+	do { \
+		pr_debug(IPA_MPM_DRV_NAME " %s:%d " fmt, \
+			__func__, __LINE__, ## args); \
+		IPA_IPC_LOGGING(ipa_get_ipc_logbuf_low(), \
+			IPA_MPM_DRV_NAME " %s:%d " fmt, ## args); \
+	} while (0)
+
+
+#define IPA_MPM_ERR(fmt, args...) \
+	do { \
+		pr_err(IPA_MPM_DRV_NAME " %s:%d " fmt, \
+			__func__, __LINE__, ## args); \
+		IPA_IPC_LOGGING(ipa_get_ipc_logbuf(), \
+				IPA_MPM_DRV_NAME " %s:%d " fmt, ## args); \
+		IPA_IPC_LOGGING(ipa_get_ipc_logbuf_low(), \
+				IPA_MPM_DRV_NAME " %s:%d " fmt, ## args); \
+	} while (0)
+
+
+#define IPA_MPM_FUNC_ENTRY() \
+	IPA_MPM_DBG("ENTRY\n")
+#define IPA_MPM_FUNC_EXIT() \
+	IPA_MPM_DBG("EXIT\n")
+
+#define IPA_MPM_MAX_MHIP_CHAN 3
+
+#define IPA_MPM_NUM_RING_DESC 6
+#define IPA_MPM_RING_LEN IPA_MPM_NUM_RING_DESC
+
+#define IPA_MPM_MHI_HOST_UL_CHANNEL 4
+#define IPA_MPM_MHI_HOST_DL_CHANNEL  5
+#define TETH_AGGR_TIME_LIMIT 10000 /* 10ms */
+#define TETH_AGGR_BYTE_LIMIT 24
+#define TETH_AGGR_DL_BYTE_LIMIT 16
+#define TRE_BUFF_SIZE 32768
+#define IPA_HOLB_TMR_EN 0x1
+#define IPA_HOLB_TMR_DIS 0x0
+#define RNDIS_IPA_DFLT_RT_HDL 0
+#define IPA_POLL_FOR_EMPTINESS_NUM 50
+#define IPA_POLL_FOR_EMPTINESS_SLEEP_USEC 20
+#define IPA_CHANNEL_STOP_IN_PROC_TO_MSEC 5
+#define IPA_CHANNEL_STOP_IN_PROC_SLEEP_USEC 200
+#define IPA_MHIP_HOLB_TMO 31 /* value to match granularity on ipa HW 4.5 */
+#define IPA_MPM_FLOW_CTRL_ADD 1
+#define IPA_MPM_FLOW_CTRL_DELETE 0
+
+enum mhip_re_type {
+	MHIP_RE_XFER = 0x2,
+	MHIP_RE_NOP = 0x4,
+};
+
+enum ipa_mpm_mhi_ch_id_type {
+	IPA_MPM_MHIP_CH_ID_0,
+	IPA_MPM_MHIP_CH_ID_1,
+	IPA_MPM_MHIP_CH_ID_2,
+	IPA_MPM_MHIP_CH_ID_MAX,
+};
+
+enum ipa_mpm_dma_data_direction {
+	DMA_HIPA_BIDIRECTIONAL = 0,
+	DMA_TO_HIPA = 1,
+	DMA_FROM_HIPA = 2,
+	DMA_HIPA_NONE = 3,
+};
+
+enum ipa_mpm_ipa_teth_client_type {
+	IPA_MPM_MHIP_USB,
+	IPA_MPM_MHIP_WIFI,
+};
+
+enum ipa_mpm_mhip_client_type {
+	IPA_MPM_MHIP_INIT,
+	/* USB RMNET CLIENT */
+	IPA_MPM_MHIP_USB_RMNET,
+	/* USB RNDIS / WIFI CLIENT */
+	IPA_MPM_MHIP_TETH,
+	/* USB DPL CLIENT */
+	IPA_MPM_MHIP_USB_DPL,
+	IPA_MPM_MHIP_NONE,
+};
+
+enum ipa_mpm_clk_vote_type {
+	CLK_ON,
+	CLK_OFF,
+};
+
+enum mhip_status_type {
+	MHIP_STATUS_SUCCESS,
+	MHIP_STATUS_NO_OP,
+	MHIP_STATUS_FAIL,
+	MHIP_STATUS_BAD_STATE,
+	MHIP_STATUS_EP_NOT_FOUND,
+	MHIP_STATUS_EP_NOT_READY,
+};
+
+enum mhip_smmu_domain_type {
+	MHIP_SMMU_DOMAIN_IPA,
+	MHIP_SMMU_DOMAIN_PCIE,
+	MHIP_SMMU_DOMAIN_NONE,
+};
+
+enum ipa_mpm_start_stop_type {
+	MPM_MHIP_STOP,
+	MPM_MHIP_START,
+};
+/* each pair of UL/DL channels are defined below */
+static const struct mhi_device_id mhi_driver_match_table[] = {
+	{ .chan = "IP_HW_MHIP_0" }, /* for rndis/Wifi teth pipes */
+	{ .chan = "IP_HW_MHIP_1" }, /* for MHIP rmnet */
+	{ .chan = "IP_HW_ADPL" }, /* ADPL/ODL DL pipe */
+};
+
+static const char *ipa_mpm_mhip_chan_str[IPA_MPM_MHIP_CH_ID_MAX] = {
+	__stringify(IPA_MPM_MHIP_TETH),
+	__stringify(IPA_MPM_MHIP_USB_RMNET),
+	__stringify(IPA_MPM_MHIP_USB_DPL),
+};
+/*
+ * MHI PRIME GSI Descriptor format that Host IPA uses.
+ */
+struct __packed mhi_p_desc {
+	uint64_t buffer_ptr;
+	uint16_t buff_len;
+	uint16_t resvd1;
+	uint16_t chain : 1;
+	uint16_t resvd4 : 7;
+	uint16_t ieob : 1;
+	uint16_t ieot : 1;
+	uint16_t bei : 1;
+	uint16_t sct : 1;
+	uint16_t resvd3 : 4;
+	uint8_t re_type;
+	uint8_t resvd2;
+};
+
+/*
+ * MHI PRIME Channel Context and Event Context Array
+ * Information that is sent to Device IPA.
+ */
+struct ipa_mpm_channel_context_type {
+	u32 chstate : 8;
+	u32 reserved1 : 24;
+	u32 chtype;
+	u32 erindex;
+	u64 rbase;
+	u64 rlen;
+	u64 reserved2;
+	u64 reserved3;
+} __packed;
+
+struct ipa_mpm_event_context_type {
+	u32 reserved1 : 8;
+	u32 update_rp_modc : 8;
+	u32 update_rp_intmodt : 16;
+	u32 ertype;
+	u32 update_rp_addr;
+	u64 rbase;
+	u64 rlen;
+	u32 buff_size : 16;
+	u32 reserved2 : 16;
+	u32 reserved3;
+	u64 reserved4;
+} __packed;
+
+struct ipa_mpm_pipes_info_type {
+	enum ipa_client_type ipa_client;
+	struct ipa_ep_cfg ep_cfg;
+};
+
+struct ipa_mpm_channel_type {
+	struct ipa_mpm_pipes_info_type dl_cons;
+	struct ipa_mpm_pipes_info_type ul_prod;
+	enum ipa_mpm_mhip_client_type mhip_client;
+};
+
+static struct ipa_mpm_channel_type ipa_mpm_pipes[IPA_MPM_MHIP_CH_ID_MAX];
+
+/* For configuring IPA_CLIENT_MHI_PRIME_TETH_CONS */
+static struct ipa_ep_cfg mhip_dl_teth_ep_cfg = {
+	.mode = {
+		.mode = IPA_BASIC,
+		.dst = IPA_CLIENT_MHI_PRIME_TETH_CONS,
+	},
+	.hdr = {
+		.hdr_len = 4,
+		.hdr_ofst_metadata_valid = 1,
+		.hdr_ofst_metadata = 1,
+		.hdr_ofst_pkt_size_valid = 1,
+		.hdr_ofst_pkt_size = 2,
+	},
+	.hdr_ext = {
+		.hdr_total_len_or_pad_valid = true,
+		.hdr_payload_len_inc_padding = true,
+	},
+	.aggr = {
+		.aggr_en = IPA_ENABLE_DEAGGR,
+		.aggr = IPA_QCMAP,
+		.aggr_byte_limit = TETH_AGGR_DL_BYTE_LIMIT,
+		.aggr_time_limit = TETH_AGGR_TIME_LIMIT,
+	},
+};
+
+static struct ipa_ep_cfg mhip_ul_teth_ep_cfg = {
+	.mode = {
+		.mode = IPA_BASIC,
+		.dst = IPA_CLIENT_MHI_PRIME_TETH_PROD,
+	},
+	.hdr = {
+		.hdr_len = 4,
+		.hdr_ofst_metadata_valid = 1,
+		.hdr_ofst_metadata = 0,
+		.hdr_ofst_pkt_size_valid = 1,
+		.hdr_ofst_pkt_size = 2,
+	},
+	.hdr_ext = {
+		.hdr_total_len_or_pad_valid = true,
+		.hdr_payload_len_inc_padding = true,
+	},
+	.aggr = {
+		.aggr_en = IPA_ENABLE_AGGR,
+		.aggr = IPA_QCMAP,
+		.aggr_byte_limit = TETH_AGGR_BYTE_LIMIT,
+		.aggr_time_limit = TETH_AGGR_TIME_LIMIT,
+	},
+
+};
+
+/* WARNING!! Temporary for rndis intgration only */
+
+
+/* For configuring IPA_CLIENT_MHIP_RMNET_PROD */
+static struct ipa_ep_cfg mhip_dl_rmnet_ep_cfg = {
+	.mode = {
+		.mode = IPA_DMA,
+		.dst = IPA_CLIENT_USB_CONS,
+	},
+};
+
+/* For configuring IPA_CLIENT_MHIP_RMNET_CONS */
+static struct ipa_ep_cfg mhip_ul_rmnet_ep_cfg = {
+	.mode = {
+		.mode = IPA_DMA,
+		.dst = IPA_CLIENT_USB_CONS,
+	},
+};
+
+/* For configuring IPA_CLIENT_MHIP_DPL_PROD using USB*/
+static struct ipa_ep_cfg mhip_dl_dpl_ep_cfg = {
+	.mode = {
+		.mode = IPA_DMA,
+		.dst = IPA_CLIENT_USB_DPL_CONS,
+	},
+};
+
+
+struct ipa_mpm_iova_addr {
+	dma_addr_t base;
+	unsigned int size;
+};
+
+struct ipa_mpm_dev_info {
+	struct platform_device *pdev;
+	struct device *dev;
+	bool ipa_smmu_enabled;
+	bool pcie_smmu_enabled;
+	struct ipa_mpm_iova_addr ctrl;
+	struct ipa_mpm_iova_addr data;
+	u32 chdb_base;
+	u32 erdb_base;
+	bool is_cache_coherent;
+};
+
+struct ipa_mpm_event_props {
+	u16 id;
+	phys_addr_t device_db;
+	struct ipa_mpm_event_context_type ev_ctx;
+};
+
+struct ipa_mpm_channel_props {
+	u16 id;
+	phys_addr_t device_db;
+	struct ipa_mpm_channel_context_type ch_ctx;
+};
+
+enum ipa_mpm_gsi_state {
+	GSI_ERR,
+	GSI_INIT,
+	GSI_ALLOCATED,
+	GSI_STARTED,
+	GSI_STOPPED,
+};
+
+enum ipa_mpm_remote_state {
+	MPM_MHIP_REMOTE_STOP,
+	MPM_MHIP_REMOTE_START,
+	MPM_MHIP_REMOTE_ERR,
+};
+
+struct ipa_mpm_channel {
+	struct ipa_mpm_channel_props chan_props;
+	struct ipa_mpm_event_props evt_props;
+	enum ipa_mpm_gsi_state gsi_state;
+	dma_addr_t db_host_iova;
+	dma_addr_t db_device_iova;
+};
+
+enum ipa_mpm_teth_state {
+	IPA_MPM_TETH_INIT = 0,
+	IPA_MPM_TETH_INPROGRESS,
+	IPA_MPM_TETH_CONNECTED,
+};
+
+enum ipa_mpm_mhip_chan {
+	IPA_MPM_MHIP_CHAN_UL,
+	IPA_MPM_MHIP_CHAN_DL,
+	IPA_MPM_MHIP_CHAN_BOTH,
+};
+
+struct ipa_mpm_clk_cnt_type {
+	atomic_t pcie_clk_cnt;
+	atomic_t ipa_clk_cnt;
+};
+
+struct producer_rings {
+	struct mhi_p_desc *tr_va;
+	struct mhi_p_desc *er_va;
+	void *tr_buff_va[IPA_MPM_RING_LEN];
+	dma_addr_t tr_pa;
+	dma_addr_t er_pa;
+	dma_addr_t tr_buff_c_iova[IPA_MPM_RING_LEN];
+	/*
+	 * The iova generated for AP CB,
+	 * used only for dma_map_single to flush the cache.
+	 */
+	dma_addr_t ap_iova_er;
+	dma_addr_t ap_iova_tr;
+	dma_addr_t ap_iova_buff[IPA_MPM_RING_LEN];
+};
+
+struct ipa_mpm_mhi_driver {
+	struct mhi_device *mhi_dev;
+	struct producer_rings ul_prod_ring;
+	struct producer_rings dl_prod_ring;
+	struct ipa_mpm_channel ul_prod;
+	struct ipa_mpm_channel dl_cons;
+	enum ipa_mpm_mhip_client_type mhip_client;
+	enum ipa_mpm_teth_state teth_state;
+	bool init_complete;
+	/* General MPM mutex to protect concurrent update of MPM GSI states */
+	struct mutex mutex;
+	/*
+	 * Mutex to protect mhi_dev update/ access, for concurrency such as
+	 * 5G SSR and USB disconnect/connect.
+	 */
+	struct mutex mhi_mutex;
+	bool in_lpm;
+	struct ipa_mpm_clk_cnt_type clk_cnt;
+	enum ipa_mpm_remote_state remote_state;
+};
+
+struct ipa_mpm_context {
+	struct ipa_mpm_dev_info dev_info;
+	struct ipa_mpm_mhi_driver md[IPA_MPM_MAX_MHIP_CHAN];
+	struct mutex mutex;
+	atomic_t probe_cnt;
+	atomic_t pcie_clk_total_cnt;
+	atomic_t ipa_clk_total_cnt;
+	atomic_t flow_ctrl_mask;
+	atomic_t adpl_over_usb_available;
+	struct device *parent_pdev;
+	struct ipa_smmu_cb_ctx carved_smmu_cb;
+	struct device *mhi_parent_dev;
+};
+
+#define IPA_MPM_DESC_SIZE (sizeof(struct mhi_p_desc))
+#define IPA_MPM_RING_TOTAL_SIZE (IPA_MPM_RING_LEN * IPA_MPM_DESC_SIZE)
+/* WA: Make the IPA_MPM_PAGE_SIZE from 16k (next power of ring size) to
+ * 32k. This is to make sure IOMMU map happens for the same size
+ * for all TR/ER and doorbells.
+ */
+#define IPA_MPM_PAGE_SIZE TRE_BUFF_SIZE
+
+
+static struct ipa_mpm_context *ipa_mpm_ctx;
+static struct platform_device *m_pdev;
+static int ipa_mpm_mhi_probe_cb(struct mhi_device *,
+	const struct mhi_device_id *);
+static void ipa_mpm_mhi_remove_cb(struct mhi_device *);
+static void ipa_mpm_mhi_status_cb(struct mhi_device *, enum MHI_CB);
+static void ipa_mpm_change_teth_state(int probe_id,
+	enum ipa_mpm_teth_state ip_state);
+static void ipa_mpm_change_gsi_state(int probe_id,
+	enum ipa_mpm_mhip_chan mhip_chan,
+	enum ipa_mpm_gsi_state next_state);
+static int ipa_mpm_probe(struct platform_device *pdev);
+static int ipa_mpm_vote_unvote_pcie_clk(enum ipa_mpm_clk_vote_type vote,
+	int probe_id, bool is_force, bool *is_acted);
+static void ipa_mpm_vote_unvote_ipa_clk(enum ipa_mpm_clk_vote_type vote,
+	int probe_id);
+static enum mhip_status_type ipa_mpm_start_stop_mhip_chan(
+	enum ipa_mpm_mhip_chan mhip_chan,
+	int probe_id,
+	enum ipa_mpm_start_stop_type start_stop);
+static int ipa_mpm_start_mhip_holb_tmo(u32 clnt_hdl);
+
+static struct mhi_driver mhi_driver = {
+	.id_table = mhi_driver_match_table,
+	.probe = ipa_mpm_mhi_probe_cb,
+	.remove = ipa_mpm_mhi_remove_cb,
+	.status_cb = ipa_mpm_mhi_status_cb,
+	.driver = {
+		.name = IPA_MPM_DRV_NAME,
+		.owner = THIS_MODULE,
+	},
+};
+
+static void ipa_mpm_ipa3_delayed_probe(struct work_struct *work)
+{
+	(void)ipa_mpm_probe(m_pdev);
+}
+
+static DECLARE_WORK(ipa_mpm_ipa3_scheduled_probe, ipa_mpm_ipa3_delayed_probe);
+
+static void ipa_mpm_ipa3_ready_cb(void *user_data)
+{
+	struct platform_device *pdev = (struct platform_device *)(user_data);
+
+	m_pdev = pdev;
+
+	IPA_MPM_DBG("IPA ready callback has been triggered\n");
+
+	schedule_work(&ipa_mpm_ipa3_scheduled_probe);
+}
+
+static void ipa_mpm_gsi_evt_ring_err_cb(struct gsi_evt_err_notify *err_data)
+{
+	IPA_MPM_ERR("GSI EVT RING ERROR, not expected..\n");
+	ipa_assert();
+}
+
+static void ipa_mpm_gsi_chan_err_cb(struct gsi_chan_err_notify *err_data)
+{
+	IPA_MPM_ERR("GSI CHAN ERROR, not expected..\n");
+	ipa_assert();
+}
+
+static int ipa_mpm_set_dma_mode(enum ipa_client_type src_pipe,
+	enum ipa_client_type dst_pipe, bool reset)
+{
+	int result = 0;
+	struct ipa_ep_cfg ep_cfg = { { 0 } };
+
+	IPA_MPM_FUNC_ENTRY();
+	IPA_MPM_DBG("DMA from %d to %d reset=%d\n", src_pipe, dst_pipe, reset);
+
+	/* Reset to basic if reset = 1, otherwise set to DMA */
+	if (reset)
+		ep_cfg.mode.mode = IPA_BASIC;
+	else
+		ep_cfg.mode.mode = IPA_DMA;
+	ep_cfg.mode.dst = dst_pipe;
+	ep_cfg.seq.set_dynamic = true;
+
+	result = ipa_cfg_ep(ipa_get_ep_mapping(src_pipe), &ep_cfg);
+	IPA_MPM_FUNC_EXIT();
+
+	return result;
+}
+
+static int ipa_mpm_start_mhip_holb_tmo(u32 clnt_hdl)
+{
+	struct ipa_ep_cfg_holb holb_cfg;
+
+	memset(&holb_cfg, 0, sizeof(holb_cfg));
+	holb_cfg.en = IPA_HOLB_TMR_EN;
+	/* 31 ms timer, which is less than tag timeout */
+	holb_cfg.tmr_val = IPA_MHIP_HOLB_TMO;
+	return ipa3_cfg_ep_holb(clnt_hdl, &holb_cfg);
+}
+
+/**
+ * ipa_mpm_smmu_map() - SMMU maps ring and the buffer pointer.
+ * @va_addr: virtual address that needs to be mapped
+ * @sz: size of the address to be mapped
+ * @dir: ipa_mpm_dma_data_direction
+ * @ap_cb_iova: iova for AP context bank
+ *
+ * This function SMMU maps both ring and the buffer pointer.
+ * The ring pointers will be aligned to ring size and
+ * the buffer pointers should be aligned to buffer size.
+ *
+ * Returns: iova of the mapped address
+ */
+static dma_addr_t ipa_mpm_smmu_map(void *va_addr,
+	int sz,
+	int dir,
+	dma_addr_t *ap_cb_iova)
+{
+	struct iommu_domain *ipa_smmu_domain, *pcie_smmu_domain;
+	phys_addr_t phys_addr;
+	dma_addr_t iova;
+	int smmu_enabled;
+	unsigned long iova_p;
+	phys_addr_t pa_p;
+	u32 size_p;
+	int prot = IOMMU_READ | IOMMU_WRITE;
+	struct ipa_smmu_cb_ctx *cb = &ipa_mpm_ctx->carved_smmu_cb;
+	unsigned long carved_iova = roundup(cb->next_addr, IPA_MPM_PAGE_SIZE);
+	int ret = 0;
+
+	/* check cache coherent */
+	if (ipa_mpm_ctx->dev_info.is_cache_coherent)  {
+		IPA_MPM_DBG_LOW("enable cache coherent\n");
+		prot |= IOMMU_CACHE;
+	}
+
+	if (carved_iova >= cb->va_end) {
+		IPA_MPM_ERR("running out of carved_iova %lx\n", carved_iova);
+		ipa_assert();
+	}
+	/*
+	 * Both Host IPA and PCIE SMMU should be enabled or disabled
+	 * for proceed.
+	 * If SMMU Enabled => iova == pa
+	 * If SMMU Disabled => iova == iommu mapped iova
+	 * dma_map_single ensures cache is flushed and the memory is not
+	 * touched again until dma_unmap_single() is called
+	 */
+	smmu_enabled = (ipa_mpm_ctx->dev_info.ipa_smmu_enabled &&
+		ipa_mpm_ctx->dev_info.pcie_smmu_enabled) ? 1 : 0;
+
+	if (smmu_enabled) {
+		/* Map the phys addr to both PCIE and IPA AP CB
+		 * from the carved out common iova range.
+		 */
+		ipa_smmu_domain = ipa3_get_smmu_domain();
+
+		if (!ipa_smmu_domain) {
+			IPA_MPM_ERR("invalid IPA smmu domain\n");
+			ipa_assert();
+		}
+
+		if (!ipa_mpm_ctx->mhi_parent_dev) {
+			IPA_MPM_ERR("invalid PCIE SMMU domain\n");
+			ipa_assert();
+		}
+
+		phys_addr = virt_to_phys((void *) va_addr);
+
+		IPA_SMMU_ROUND_TO_PAGE(carved_iova, phys_addr, sz,
+					iova_p, pa_p, size_p);
+
+	/* Flush the cache with dma_map_single for IPA AP CB */
+		*ap_cb_iova = dma_map_single(ipa3_ctx->pdev, va_addr,
+					size_p, dir);
+
+		if (dma_mapping_error(ipa3_ctx->pdev, *ap_cb_iova)) {
+			IPA_MPM_ERR("dma_map_single failure for entry\n");
+			goto fail_dma_mapping;
+		}
+
+		ret = ipa3_iommu_map(ipa_smmu_domain, iova_p,
+					pa_p, size_p, prot);
+		if (ret) {
+			IPA_MPM_ERR("IPA IOMMU returned failure, ret = %d\n",
+					ret);
+			ipa_assert();
+		}
+
+		pcie_smmu_domain = iommu_get_domain_for_dev(
+			ipa_mpm_ctx->mhi_parent_dev);
+		if (!pcie_smmu_domain) {
+			IPA_MPM_ERR("invalid pcie smmu domain\n");
+			ipa_assert();
+		}
+		ret = iommu_map(pcie_smmu_domain, iova_p, pa_p, size_p, prot);
+
+		if (ret) {
+			IPA_MPM_ERR("PCIe IOMMU returned failure, ret = %d\n",
+				ret);
+			ipa_assert();
+		}
+
+		cb->next_addr = iova_p + size_p;
+		iova = iova_p;
+	} else {
+		iova = dma_map_single(ipa3_ctx->pdev, va_addr,
+					IPA_MPM_RING_TOTAL_SIZE, dir);
+
+		if (dma_mapping_error(ipa3_ctx->pdev, iova)) {
+			IPA_MPM_ERR("dma_map_single failure for entry\n");
+			goto fail_dma_mapping;
+		}
+
+		*ap_cb_iova = iova;
+	}
+	return iova;
+
+fail_dma_mapping:
+	iova = 0;
+	ipa_assert();
+	return iova;
+}
+
+/**
+ * ipa_mpm_smmu_unmap() - SMMU unmaps ring and the buffer pointer.
+ * @va_addr: virtual address that needs to be mapped
+ * @sz: size of the address to be mapped
+ * @dir: ipa_mpm_dma_data_direction
+ * @ap_cb_iova: iova for AP context bank
+ *
+ * This function SMMU unmaps both ring and the buffer pointer.
+ * The ring pointers will be aligned to ring size and
+ * the buffer pointers should be aligned to buffer size.
+ *
+ * Return: none
+ */
+static void ipa_mpm_smmu_unmap(dma_addr_t carved_iova, int sz, int dir,
+	dma_addr_t ap_cb_iova)
+{
+	unsigned long iova_p;
+	unsigned long pa_p;
+	u32 size_p = 0;
+	struct iommu_domain *ipa_smmu_domain, *pcie_smmu_domain;
+	struct ipa_smmu_cb_ctx *cb = &ipa_mpm_ctx->carved_smmu_cb;
+	int smmu_enabled = (ipa_mpm_ctx->dev_info.ipa_smmu_enabled &&
+		ipa_mpm_ctx->dev_info.pcie_smmu_enabled) ? 1 : 0;
+
+	if (carved_iova <= 0) {
+		IPA_MPM_ERR("carved_iova is zero/negative\n");
+		WARN_ON(1);
+		return;
+	}
+
+	if (smmu_enabled) {
+		ipa_smmu_domain = ipa3_get_smmu_domain();
+		if (!ipa_smmu_domain) {
+			IPA_MPM_ERR("invalid IPA smmu domain\n");
+			ipa_assert();
+		}
+
+		if (!ipa_mpm_ctx->mhi_parent_dev) {
+			IPA_MPM_ERR("invalid PCIE SMMU domain\n");
+			ipa_assert();
+		}
+
+		IPA_SMMU_ROUND_TO_PAGE(carved_iova, carved_iova, sz,
+			iova_p, pa_p, size_p);
+		pcie_smmu_domain = iommu_get_domain_for_dev(
+			ipa_mpm_ctx->mhi_parent_dev);
+		if (pcie_smmu_domain) {
+			iommu_unmap(pcie_smmu_domain, iova_p, size_p);
+		} else {
+			IPA_MPM_ERR("invalid PCIE SMMU domain\n");
+			ipa_assert();
+		}
+		iommu_unmap(ipa_smmu_domain, iova_p, size_p);
+
+		cb->next_addr -= size_p;
+		dma_unmap_single(ipa3_ctx->pdev, ap_cb_iova,
+			IPA_MPM_RING_TOTAL_SIZE, dir);
+	} else {
+		dma_unmap_single(ipa3_ctx->pdev, ap_cb_iova,
+			IPA_MPM_RING_TOTAL_SIZE, dir);
+	}
+}
+
+static u32 ipa_mpm_smmu_map_doorbell(enum mhip_smmu_domain_type smmu_domain,
+	u32 pa_addr)
+{
+	/*
+	 * Doorbells are already in PA, map these to
+	 * PCIE/IPA doman if SMMUs are enabled.
+	 */
+	struct iommu_domain *ipa_smmu_domain, *pcie_smmu_domain;
+	int smmu_enabled;
+	unsigned long iova_p;
+	phys_addr_t pa_p;
+	u32 size_p;
+	int ret = 0;
+	int prot = IOMMU_READ | IOMMU_WRITE;
+	struct ipa_smmu_cb_ctx *cb = &ipa_mpm_ctx->carved_smmu_cb;
+	unsigned long carved_iova = roundup(cb->next_addr, IPA_MPM_PAGE_SIZE);
+	u32 iova = 0;
+	u64 offset = 0;
+
+	/* check cache coherent */
+	if (ipa_mpm_ctx->dev_info.is_cache_coherent)  {
+		IPA_MPM_DBG(" enable cache coherent\n");
+		prot |= IOMMU_CACHE;
+	}
+
+	if (carved_iova >= cb->va_end) {
+		IPA_MPM_ERR("running out of carved_iova %lx\n", carved_iova);
+		ipa_assert();
+	}
+
+	smmu_enabled = (ipa_mpm_ctx->dev_info.ipa_smmu_enabled &&
+		ipa_mpm_ctx->dev_info.pcie_smmu_enabled) ? 1 : 0;
+
+	if (smmu_enabled) {
+		IPA_SMMU_ROUND_TO_PAGE(carved_iova, pa_addr, IPA_MPM_PAGE_SIZE,
+					iova_p, pa_p, size_p);
+		if (smmu_domain == MHIP_SMMU_DOMAIN_IPA) {
+			ipa_smmu_domain = ipa3_get_smmu_domain();
+			if (!ipa_smmu_domain) {
+				IPA_MPM_ERR("invalid IPA smmu domain\n");
+				ipa_assert();
+			}
+			ret = ipa3_iommu_map(ipa_smmu_domain,
+				iova_p, pa_p, size_p, prot);
+			if (ret) {
+				IPA_MPM_ERR("IPA doorbell mapping failed\n");
+				ipa_assert();
+			}
+			offset = pa_addr - pa_p;
+		} else if (smmu_domain == MHIP_SMMU_DOMAIN_PCIE) {
+			pcie_smmu_domain = iommu_get_domain_for_dev(
+				ipa_mpm_ctx->mhi_parent_dev);
+			if (!pcie_smmu_domain) {
+				IPA_MPM_ERR("invalid IPA smmu domain\n");
+				ipa_assert();
+			}
+			ret = iommu_map(pcie_smmu_domain,
+					iova_p, pa_p, size_p, prot);
+			if (ret) {
+				IPA_MPM_ERR("PCIe doorbell mapping failed\n");
+				ipa_assert();
+			}
+			offset = pa_addr - pa_p;
+		}
+		iova = iova_p + offset;
+		cb->next_addr = iova_p + IPA_MPM_PAGE_SIZE;
+	} else {
+		iova = pa_addr;
+	}
+	return iova;
+}
+
+static void ipa_mpm_smmu_unmap_doorbell(enum mhip_smmu_domain_type smmu_domain,
+	dma_addr_t iova)
+{
+	/*
+	 * Doorbells are already in PA, map these to
+	 * PCIE/IPA doman if SMMUs are enabled.
+	 */
+	struct iommu_domain *ipa_smmu_domain, *pcie_smmu_domain;
+	int smmu_enabled;
+	unsigned long iova_p;
+	phys_addr_t pa_p;
+	u32 size_p;
+	struct ipa_smmu_cb_ctx *cb = &ipa_mpm_ctx->carved_smmu_cb;
+
+	smmu_enabled = (ipa_mpm_ctx->dev_info.ipa_smmu_enabled &&
+		ipa_mpm_ctx->dev_info.pcie_smmu_enabled) ? 1 : 0;
+
+	if (smmu_enabled) {
+		IPA_SMMU_ROUND_TO_PAGE(iova, iova, IPA_MPM_PAGE_SIZE,
+					iova_p, pa_p, size_p);
+		if (smmu_domain == MHIP_SMMU_DOMAIN_IPA) {
+			ipa_smmu_domain = ipa3_get_smmu_domain();
+			if (ipa_smmu_domain) {
+				iommu_unmap(ipa_smmu_domain, iova_p, size_p);
+			} else {
+				IPA_MPM_ERR("invalid IPA smmu domain\n");
+				ipa_assert();
+			}
+		} else if (smmu_domain == MHIP_SMMU_DOMAIN_PCIE) {
+			pcie_smmu_domain = iommu_get_domain_for_dev(
+				ipa_mpm_ctx->mhi_parent_dev);
+			if (pcie_smmu_domain) {
+				iommu_unmap(pcie_smmu_domain, iova_p, size_p);
+			} else {
+				IPA_MPM_ERR("invalid PCIE smmu domain\n");
+				ipa_assert();
+			}
+			cb->next_addr -=  IPA_MPM_PAGE_SIZE;
+		}
+	}
+}
+static int get_idx_from_id(const struct mhi_device_id *id)
+{
+	return (id - mhi_driver_match_table);
+}
+
+static void get_ipa3_client(int id,
+	enum ipa_client_type *ul_prod,
+	enum ipa_client_type *dl_cons)
+{
+	IPA_MPM_FUNC_ENTRY();
+
+	if (id >= IPA_MPM_MHIP_CH_ID_MAX) {
+		*ul_prod = IPA_CLIENT_MAX;
+		*dl_cons = IPA_CLIENT_MAX;
+	} else {
+		*ul_prod = ipa_mpm_pipes[id].ul_prod.ipa_client;
+		*dl_cons = ipa_mpm_pipes[id].dl_cons.ipa_client;
+	}
+	IPA_MPM_FUNC_EXIT();
+}
+
+static int ipa_mpm_connect_mhip_gsi_pipe(enum ipa_client_type mhip_client,
+	int mhi_idx, struct ipa_req_chan_out_params *out_params)
+{
+	int ipa_ep_idx;
+	int res;
+	struct mhi_p_desc *er_ring_va, *tr_ring_va;
+	void *buff_va;
+	dma_addr_t er_carved_iova, tr_carved_iova;
+	dma_addr_t ap_cb_tr_iova, ap_cb_er_iova, ap_cb_buff_iova;
+	struct ipa_request_gsi_channel_params gsi_params;
+	int dir;
+	int i, k;
+	int result;
+	struct ipa3_ep_context *ep;
+
+	if (mhip_client == IPA_CLIENT_MAX)
+		goto fail_gen;
+
+	if ((mhi_idx < IPA_MPM_MHIP_CH_ID_0) ||
+		(mhi_idx >= IPA_MPM_MHIP_CH_ID_MAX))
+		goto fail_gen;
+
+	ipa_ep_idx = ipa3_get_ep_mapping(mhip_client);
+	if (ipa_ep_idx == IPA_EP_NOT_ALLOCATED) {
+		IPA_MPM_ERR("fail to find channel EP.\n");
+		goto fail_gen;
+	}
+	ep = &ipa3_ctx->ep[ipa_ep_idx];
+	if (ep->valid == 1) {
+		IPAERR("EP %d already allocated.\n", ipa_ep_idx);
+		return 0;
+	}
+
+	IPA_MPM_DBG("connecting client %d (ep: %d)\n", mhip_client, ipa_ep_idx);
+
+	IPA_MPM_FUNC_ENTRY();
+
+	if (IPA_MPM_RING_TOTAL_SIZE > PAGE_SIZE) {
+		IPA_MPM_ERR("Ring Size / allocation mismatch\n");
+		ipa_assert();
+	}
+
+	/* Only ring need alignment, separate from buffer */
+	er_ring_va = (struct mhi_p_desc *) get_zeroed_page(GFP_KERNEL);
+
+	if (!er_ring_va)
+		goto fail_evt_alloc;
+
+	tr_ring_va = (struct mhi_p_desc *) get_zeroed_page(GFP_KERNEL);
+
+	if (!tr_ring_va)
+		goto fail_tr_alloc;
+
+	tr_ring_va[0].re_type = MHIP_RE_NOP;
+
+	dir = IPA_CLIENT_IS_PROD(mhip_client) ?
+		DMA_TO_HIPA : DMA_FROM_HIPA;
+
+	/* allocate transfer ring elements */
+	for (i = 1, k = 1; i < IPA_MPM_RING_LEN; i++, k++) {
+		buff_va = kzalloc(TRE_BUFF_SIZE, GFP_KERNEL);
+		if (!buff_va)
+			goto fail_buff_alloc;
+
+		tr_ring_va[i].buffer_ptr =
+			ipa_mpm_smmu_map(buff_va, TRE_BUFF_SIZE, dir,
+					&ap_cb_buff_iova);
+
+		if (!tr_ring_va[i].buffer_ptr)
+			goto fail_smmu_map_ring;
+
+		tr_ring_va[i].buff_len = TRE_BUFF_SIZE;
+		tr_ring_va[i].chain = 0;
+		tr_ring_va[i].ieob = 0;
+		tr_ring_va[i].ieot = 0;
+		tr_ring_va[i].bei = 0;
+		tr_ring_va[i].sct = 0;
+		tr_ring_va[i].re_type = MHIP_RE_XFER;
+
+		if (IPA_CLIENT_IS_PROD(mhip_client)) {
+			ipa_mpm_ctx->md[mhi_idx].dl_prod_ring.tr_buff_va[k] =
+						buff_va;
+			ipa_mpm_ctx->md[mhi_idx].dl_prod_ring.tr_buff_c_iova[k]
+						= tr_ring_va[i].buffer_ptr;
+			ipa_mpm_ctx->md[mhi_idx].dl_prod_ring.ap_iova_buff[k] =
+						ap_cb_buff_iova;
+		} else {
+			ipa_mpm_ctx->md[mhi_idx].ul_prod_ring.tr_buff_va[k] =
+						buff_va;
+			ipa_mpm_ctx->md[mhi_idx].ul_prod_ring.tr_buff_c_iova[k]
+						= tr_ring_va[i].buffer_ptr;
+			ipa_mpm_ctx->md[mhi_idx].ul_prod_ring.ap_iova_buff[k] =
+						ap_cb_buff_iova;
+		}
+	}
+
+	tr_carved_iova = ipa_mpm_smmu_map(tr_ring_va, PAGE_SIZE, dir,
+		&ap_cb_tr_iova);
+	if (!tr_carved_iova)
+		goto fail_smmu_map_ring;
+
+	er_carved_iova = ipa_mpm_smmu_map(er_ring_va, PAGE_SIZE, dir,
+		&ap_cb_er_iova);
+	if (!er_carved_iova)
+		goto fail_smmu_map_ring;
+
+	/* Store Producer channel rings */
+	if (IPA_CLIENT_IS_PROD(mhip_client)) {
+		/* Device UL */
+		ipa_mpm_ctx->md[mhi_idx].dl_prod_ring.er_va = er_ring_va;
+		ipa_mpm_ctx->md[mhi_idx].dl_prod_ring.tr_va = tr_ring_va;
+		ipa_mpm_ctx->md[mhi_idx].dl_prod_ring.er_pa = er_carved_iova;
+		ipa_mpm_ctx->md[mhi_idx].dl_prod_ring.tr_pa = tr_carved_iova;
+		ipa_mpm_ctx->md[mhi_idx].dl_prod_ring.ap_iova_tr =
+			ap_cb_tr_iova;
+		ipa_mpm_ctx->md[mhi_idx].dl_prod_ring.ap_iova_er =
+			ap_cb_er_iova;
+	} else {
+		/* Host UL */
+		ipa_mpm_ctx->md[mhi_idx].ul_prod_ring.er_va = er_ring_va;
+		ipa_mpm_ctx->md[mhi_idx].ul_prod_ring.tr_va = tr_ring_va;
+		ipa_mpm_ctx->md[mhi_idx].ul_prod_ring.er_pa = er_carved_iova;
+		ipa_mpm_ctx->md[mhi_idx].ul_prod_ring.tr_pa = tr_carved_iova;
+		ipa_mpm_ctx->md[mhi_idx].ul_prod_ring.ap_iova_tr =
+			ap_cb_tr_iova;
+		ipa_mpm_ctx->md[mhi_idx].ul_prod_ring.ap_iova_er =
+			ap_cb_er_iova;
+	}
+
+	memset(&gsi_params, 0, sizeof(struct ipa_request_gsi_channel_params));
+
+	if (IPA_CLIENT_IS_PROD(mhip_client))
+		gsi_params.ipa_ep_cfg =
+		ipa_mpm_pipes[mhi_idx].dl_cons.ep_cfg;
+	else
+		gsi_params.ipa_ep_cfg =
+		ipa_mpm_pipes[mhi_idx].ul_prod.ep_cfg;
+
+	gsi_params.client = mhip_client;
+	gsi_params.skip_ep_cfg = false;
+
+	/*
+	 * RP update address = Device channel DB address
+	 * CLIENT_PROD -> Host DL
+	 * CLIENT_CONS -> Host UL
+	 */
+	if (IPA_CLIENT_IS_PROD(mhip_client)) {
+		gsi_params.evt_ring_params.rp_update_addr =
+			ipa_mpm_smmu_map_doorbell(
+			MHIP_SMMU_DOMAIN_IPA,
+			ipa_mpm_ctx->md[mhi_idx].dl_cons.chan_props.device_db);
+		if (gsi_params.evt_ring_params.rp_update_addr == 0)
+			goto fail_smmu_map_db;
+
+		ipa_mpm_ctx->md[mhi_idx].dl_cons.db_host_iova =
+			gsi_params.evt_ring_params.rp_update_addr;
+
+		gsi_params.evt_ring_params.ring_base_addr =
+			ipa_mpm_ctx->md[mhi_idx].dl_prod_ring.tr_pa;
+		gsi_params.chan_params.ring_base_addr =
+			ipa_mpm_ctx->md[mhi_idx].dl_prod_ring.er_pa;
+	} else {
+		gsi_params.evt_ring_params.rp_update_addr =
+			ipa_mpm_smmu_map_doorbell(
+			MHIP_SMMU_DOMAIN_IPA,
+			ipa_mpm_ctx->md[mhi_idx].ul_prod.chan_props.device_db);
+		if (gsi_params.evt_ring_params.rp_update_addr == 0)
+			goto fail_smmu_map_db;
+		ipa_mpm_ctx->md[mhi_idx].ul_prod.db_host_iova =
+			gsi_params.evt_ring_params.rp_update_addr;
+		gsi_params.evt_ring_params.ring_base_addr =
+			ipa_mpm_ctx->md[mhi_idx].ul_prod_ring.er_pa;
+		gsi_params.chan_params.ring_base_addr =
+			ipa_mpm_ctx->md[mhi_idx].ul_prod_ring.tr_pa;
+	}
+
+	/* Fill Event ring params */
+	gsi_params.evt_ring_params.intf = GSI_EVT_CHTYPE_MHIP_EV;
+	gsi_params.evt_ring_params.intr = GSI_INTR_MSI;
+	gsi_params.evt_ring_params.re_size = GSI_EVT_RING_RE_SIZE_16B;
+	gsi_params.evt_ring_params.ring_len =
+		(IPA_MPM_RING_LEN) * GSI_EVT_RING_RE_SIZE_16B;
+	gsi_params.evt_ring_params.ring_base_vaddr = NULL;
+	gsi_params.evt_ring_params.int_modt = 0;
+	gsi_params.evt_ring_params.int_modc = 0;
+	gsi_params.evt_ring_params.intvec = 0;
+	gsi_params.evt_ring_params.msi_addr = 0;
+	gsi_params.evt_ring_params.exclusive = true;
+	gsi_params.evt_ring_params.err_cb = ipa_mpm_gsi_evt_ring_err_cb;
+	gsi_params.evt_ring_params.user_data = NULL;
+
+	/* Evt Scratch Params */
+	/* Disable the Moderation for ringing doorbells */
+	gsi_params.evt_scratch.mhip.rp_mod_threshold = 1;
+	gsi_params.evt_scratch.mhip.rp_mod_timer = 0;
+	gsi_params.evt_scratch.mhip.rp_mod_counter = 0;
+	gsi_params.evt_scratch.mhip.rp_mod_timer_id = 0;
+	gsi_params.evt_scratch.mhip.rp_mod_timer_running = 0;
+	gsi_params.evt_scratch.mhip.fixed_buffer_sz = TRE_BUFF_SIZE;
+
+	if (IPA_CLIENT_IS_PROD(mhip_client))
+		gsi_params.evt_scratch.mhip.rp_mod_threshold = 4;
+
+	/* Channel Params */
+	gsi_params.chan_params.prot = GSI_CHAN_PROT_MHIP;
+	gsi_params.chan_params.dir = IPA_CLIENT_IS_PROD(mhip_client) ?
+		GSI_CHAN_DIR_TO_GSI : GSI_CHAN_DIR_FROM_GSI;
+	/* chan_id is set in ipa3_request_gsi_channel() */
+	gsi_params.chan_params.re_size = GSI_CHAN_RE_SIZE_16B;
+	gsi_params.chan_params.ring_len =
+		(IPA_MPM_RING_LEN) * GSI_EVT_RING_RE_SIZE_16B;
+	gsi_params.chan_params.ring_base_vaddr = NULL;
+	gsi_params.chan_params.use_db_eng = GSI_CHAN_DIRECT_MODE;
+	gsi_params.chan_params.max_prefetch = GSI_ONE_PREFETCH_SEG;
+	gsi_params.chan_params.low_weight = 1;
+	gsi_params.chan_params.xfer_cb = NULL;
+	gsi_params.chan_params.err_cb = ipa_mpm_gsi_chan_err_cb;
+	gsi_params.chan_params.chan_user_data = NULL;
+
+	/* Channel scratch */
+	gsi_params.chan_scratch.mhip.assert_bit_40 = 0;
+	gsi_params.chan_scratch.mhip.host_channel = 1;
+
+	res = ipa3_request_gsi_channel(&gsi_params, out_params);
+	if (res) {
+		IPA_MPM_ERR("failed to allocate GSI channel res=%d\n", res);
+		goto fail_alloc_channel;
+	}
+
+	if (IPA_CLIENT_IS_CONS(mhip_client)) {
+		/*
+		 * Enable HOLB timer one time after bootup/SSR.
+		 * The HOLB timeout drops the packets on MHIP if
+		 * there is a stall on MHIP TX pipe greater than
+		 * configured timeout.
+		 */
+		result = ipa_mpm_start_mhip_holb_tmo(ipa_ep_idx);
+		if (result) {
+			IPA_MPM_ERR("HOLB config failed for %d, fail = %d\n",
+				ipa_ep_idx, result);
+			goto fail_alloc_channel;
+		}
+	}
+
+	if (IPA_CLIENT_IS_PROD(mhip_client))
+		ipa_mpm_change_gsi_state(mhi_idx,
+			IPA_MPM_MHIP_CHAN_DL,
+			GSI_ALLOCATED);
+	else
+		ipa_mpm_change_gsi_state(mhi_idx,
+			IPA_MPM_MHIP_CHAN_UL,
+			GSI_ALLOCATED);
+	result = ipa3_start_gsi_channel(ipa_ep_idx);
+	if (result) {
+		IPA_MPM_ERR("start MHIP channel %d failed\n", mhip_client);
+		if (IPA_CLIENT_IS_PROD(mhip_client))
+			ipa_mpm_change_gsi_state(mhi_idx,
+				IPA_MPM_MHIP_CHAN_DL, GSI_ERR);
+		else
+			ipa_mpm_change_gsi_state(mhi_idx,
+				IPA_MPM_MHIP_CHAN_UL, GSI_ERR);
+		goto fail_start_channel;
+	}
+	if (IPA_CLIENT_IS_PROD(mhip_client))
+		ipa_mpm_change_gsi_state(mhi_idx,
+			IPA_MPM_MHIP_CHAN_DL, GSI_STARTED);
+	else
+		ipa_mpm_change_gsi_state(mhi_idx,
+			IPA_MPM_MHIP_CHAN_UL, GSI_STARTED);
+
+	/* Fill in the Device Context params */
+	if (IPA_CLIENT_IS_PROD(mhip_client)) {
+		/* This is the DL channel :: Device -> Host */
+		ipa_mpm_ctx->md[mhi_idx].dl_cons.evt_props.ev_ctx.rbase =
+			ipa_mpm_ctx->md[mhi_idx].dl_prod_ring.er_pa;
+		ipa_mpm_ctx->md[mhi_idx].dl_cons.chan_props.ch_ctx.rbase =
+			ipa_mpm_ctx->md[mhi_idx].dl_prod_ring.tr_pa;
+	} else {
+		ipa_mpm_ctx->md[mhi_idx].ul_prod.evt_props.ev_ctx.rbase =
+			ipa_mpm_ctx->md[mhi_idx].ul_prod_ring.tr_pa;
+		ipa_mpm_ctx->md[mhi_idx].ul_prod.chan_props.ch_ctx.rbase =
+			ipa_mpm_ctx->md[mhi_idx].ul_prod_ring.er_pa;
+	}
+
+	IPA_MPM_FUNC_EXIT();
+
+	return 0;
+
+fail_start_channel:
+	ipa3_disable_data_path(ipa_ep_idx);
+	ipa3_stop_gsi_channel(ipa_ep_idx);
+fail_alloc_channel:
+	ipa3_release_gsi_channel(ipa_ep_idx);
+fail_smmu_map_db:
+fail_smmu_map_ring:
+fail_tr_alloc:
+fail_evt_alloc:
+fail_buff_alloc:
+	ipa_assert();
+fail_gen:
+	return -EFAULT;
+}
+
+static void ipa_mpm_clean_mhip_chan(int mhi_idx,
+	enum ipa_client_type mhip_client)
+{
+	int dir;
+	int i;
+	int ipa_ep_idx;
+	int result;
+
+	IPA_MPM_FUNC_ENTRY();
+
+	if (mhip_client == IPA_CLIENT_MAX)
+		return;
+
+	if ((mhi_idx < IPA_MPM_MHIP_CH_ID_0) ||
+		(mhi_idx >= IPA_MPM_MHIP_CH_ID_MAX))
+		return;
+
+	dir = IPA_CLIENT_IS_PROD(mhip_client) ?
+		DMA_TO_HIPA : DMA_FROM_HIPA;
+
+	ipa_ep_idx = ipa3_get_ep_mapping(mhip_client);
+	if (ipa_ep_idx == IPA_EP_NOT_ALLOCATED) {
+		IPA_MPM_ERR("fail to find channel EP.\n");
+		return;
+	}
+
+	/* For the uplink channels, enable HOLB. */
+	if (IPA_CLIENT_IS_CONS(mhip_client))
+		ipa3_disable_data_path(ipa_ep_idx);
+
+	/* Release channel */
+	result = ipa3_stop_gsi_channel(ipa_ep_idx);
+	if (result) {
+		IPA_MPM_ERR("Stop channel for MHIP_Client =  %d failed\n",
+					mhip_client);
+		goto fail_chan;
+	}
+	result = ipa3_reset_gsi_channel(ipa_ep_idx);
+	if (result) {
+		IPA_MPM_ERR("Reset channel for MHIP_Client =  %d failed\n",
+					mhip_client);
+		goto fail_chan;
+	}
+	result = ipa3_reset_gsi_event_ring(ipa_ep_idx);
+	if (result) {
+		IPA_MPM_ERR("Reset ev ring for MHIP_Client =  %d failed\n",
+					mhip_client);
+		goto fail_chan;
+	}
+	result = ipa3_release_gsi_channel(ipa_ep_idx);
+	if (result) {
+		IPA_MPM_ERR("Release tr ring for MHIP_Client =  %d failed\n",
+					mhip_client);
+		if (IPA_CLIENT_IS_PROD(mhip_client))
+			ipa_mpm_change_gsi_state(mhi_idx,
+				IPA_MPM_MHIP_CHAN_DL, GSI_ERR);
+		else
+			ipa_mpm_change_gsi_state(mhi_idx,
+				IPA_MPM_MHIP_CHAN_UL, GSI_ERR);
+		goto fail_chan;
+	}
+
+	if (IPA_CLIENT_IS_PROD(mhip_client))
+		ipa_mpm_change_gsi_state(mhi_idx,
+					IPA_MPM_MHIP_CHAN_DL, GSI_INIT);
+	else
+		ipa_mpm_change_gsi_state(mhi_idx,
+					IPA_MPM_MHIP_CHAN_UL, GSI_INIT);
+
+	memset(&ipa3_ctx->ep[ipa_ep_idx], 0, sizeof(struct ipa3_ep_context));
+
+	/* Unmap Doorbells */
+	if (IPA_CLIENT_IS_PROD(mhip_client)) {
+		ipa_mpm_smmu_unmap_doorbell(MHIP_SMMU_DOMAIN_PCIE,
+			ipa_mpm_ctx->md[mhi_idx].dl_cons.db_device_iova);
+
+		ipa_mpm_smmu_unmap_doorbell(MHIP_SMMU_DOMAIN_IPA,
+			ipa_mpm_ctx->md[mhi_idx].dl_cons.db_host_iova);
+
+		ipa_mpm_ctx->md[mhi_idx].dl_cons.db_host_iova = 0;
+		ipa_mpm_ctx->md[mhi_idx].dl_cons.db_device_iova = 0;
+
+	} else {
+		ipa_mpm_smmu_unmap_doorbell(MHIP_SMMU_DOMAIN_PCIE,
+			ipa_mpm_ctx->md[mhi_idx].ul_prod.db_device_iova);
+
+		ipa_mpm_smmu_unmap_doorbell(MHIP_SMMU_DOMAIN_IPA,
+			ipa_mpm_ctx->md[mhi_idx].ul_prod.db_host_iova);
+
+		ipa_mpm_ctx->md[mhi_idx].ul_prod.db_host_iova = 0;
+		ipa_mpm_ctx->md[mhi_idx].ul_prod.db_device_iova = 0;
+	}
+
+	/* deallocate/Unmap transfer ring buffers */
+	for (i = 1; i < IPA_MPM_RING_LEN; i++) {
+		if (IPA_CLIENT_IS_PROD(mhip_client)) {
+			ipa_mpm_smmu_unmap(
+			(dma_addr_t)
+			ipa_mpm_ctx->md[mhi_idx].dl_prod_ring.tr_buff_c_iova[i],
+			TRE_BUFF_SIZE, dir,
+			ipa_mpm_ctx->md[mhi_idx].dl_prod_ring.ap_iova_buff[i]);
+			ipa_mpm_ctx->md[mhi_idx].dl_prod_ring.tr_buff_c_iova[i]
+								= 0;
+			kfree(
+			ipa_mpm_ctx->md[mhi_idx].dl_prod_ring.tr_buff_va[i]);
+			ipa_mpm_ctx->md[mhi_idx].dl_prod_ring.tr_buff_va[i]
+								= NULL;
+			ipa_mpm_ctx->md[mhi_idx].dl_prod_ring.ap_iova_buff[i]
+								= 0;
+			ipa_mpm_ctx->md[mhi_idx].dl_prod_ring.tr_buff_c_iova[i]
+								= 0;
+		} else {
+			ipa_mpm_smmu_unmap(
+			(dma_addr_t)
+			ipa_mpm_ctx->md[mhi_idx].ul_prod_ring.tr_buff_c_iova[i],
+			TRE_BUFF_SIZE, dir,
+			ipa_mpm_ctx->md[mhi_idx].ul_prod_ring.ap_iova_buff[i]
+			);
+			ipa_mpm_ctx->md[mhi_idx].ul_prod_ring.tr_buff_c_iova[i]
+								= 0;
+			kfree(
+			ipa_mpm_ctx->md[mhi_idx].ul_prod_ring.tr_buff_va[i]);
+			ipa_mpm_ctx->md[mhi_idx].ul_prod_ring.tr_buff_va[i]
+								= NULL;
+			ipa_mpm_ctx->md[mhi_idx].ul_prod_ring.ap_iova_buff[i]
+								= 0;
+			ipa_mpm_ctx->md[mhi_idx].ul_prod_ring.tr_buff_c_iova[i]
+								= 0;
+		}
+	}
+
+	/* deallocate/Unmap rings */
+	if (IPA_CLIENT_IS_PROD(mhip_client)) {
+		ipa_mpm_smmu_unmap(
+			ipa_mpm_ctx->md[mhi_idx].dl_prod_ring.er_pa,
+			IPA_MPM_PAGE_SIZE, dir,
+			ipa_mpm_ctx->md[mhi_idx].dl_prod_ring.ap_iova_er);
+
+		ipa_mpm_smmu_unmap(
+			ipa_mpm_ctx->md[mhi_idx].dl_prod_ring.tr_pa,
+			IPA_MPM_PAGE_SIZE, dir,
+			ipa_mpm_ctx->md[mhi_idx].dl_prod_ring.ap_iova_tr);
+
+		if (ipa_mpm_ctx->md[mhi_idx].dl_prod_ring.er_va) {
+			free_page((unsigned long)
+				ipa_mpm_ctx->md[mhi_idx].dl_prod_ring.er_va);
+			ipa_mpm_ctx->md[mhi_idx].dl_prod_ring.er_va = NULL;
+		}
+
+		if (ipa_mpm_ctx->md[mhi_idx].dl_prod_ring.tr_va) {
+			free_page((unsigned long)
+				ipa_mpm_ctx->md[mhi_idx].dl_prod_ring.tr_va);
+			ipa_mpm_ctx->md[mhi_idx].dl_prod_ring.tr_va = NULL;
+		}
+
+		ipa_mpm_ctx->md[mhi_idx].dl_prod_ring.ap_iova_er = 0;
+		ipa_mpm_ctx->md[mhi_idx].dl_prod_ring.ap_iova_tr = 0;
+	} else {
+		ipa_mpm_smmu_unmap(
+			ipa_mpm_ctx->md[mhi_idx].ul_prod_ring.tr_pa,
+			IPA_MPM_PAGE_SIZE, dir,
+			ipa_mpm_ctx->md[mhi_idx].dl_prod_ring.ap_iova_tr);
+		ipa_mpm_smmu_unmap(
+			ipa_mpm_ctx->md[mhi_idx].ul_prod_ring.er_pa,
+			IPA_MPM_PAGE_SIZE, dir,
+			ipa_mpm_ctx->md[mhi_idx].ul_prod_ring.ap_iova_er);
+
+		ipa_mpm_ctx->md[mhi_idx].ul_prod_ring.tr_pa = 0;
+		ipa_mpm_ctx->md[mhi_idx].ul_prod_ring.er_pa = 0;
+
+		if (ipa_mpm_ctx->md[mhi_idx].ul_prod_ring.er_va) {
+			free_page((unsigned long)
+				ipa_mpm_ctx->md[mhi_idx].ul_prod_ring.er_va);
+			ipa_mpm_ctx->md[mhi_idx].ul_prod_ring.er_va = NULL;
+		}
+
+		if (ipa_mpm_ctx->md[mhi_idx].ul_prod_ring.tr_va) {
+			free_page((unsigned long)
+				ipa_mpm_ctx->md[mhi_idx].ul_prod_ring.tr_va);
+			ipa_mpm_ctx->md[mhi_idx].ul_prod_ring.tr_va = NULL;
+		}
+
+		ipa_mpm_ctx->md[mhi_idx].ul_prod_ring.ap_iova_er = 0;
+		ipa_mpm_ctx->md[mhi_idx].ul_prod_ring.ap_iova_tr = 0;
+	}
+
+	IPA_MPM_FUNC_EXIT();
+	return;
+fail_chan:
+	ipa_assert();
+}
+
+/* round addresses for closest page per SMMU requirements */
+static inline void ipa_mpm_smmu_round_to_page(uint64_t iova, uint64_t pa,
+	uint64_t size, unsigned long *iova_p, phys_addr_t *pa_p, u32 *size_p)
+{
+	*iova_p = rounddown(iova, PAGE_SIZE);
+	*pa_p = rounddown(pa, PAGE_SIZE);
+	*size_p = roundup(size + pa - *pa_p, PAGE_SIZE);
+}
+
+
+static int __ipa_mpm_configure_mhi_device(struct ipa_mpm_channel *ch,
+	int mhi_idx, int dir)
+{
+	struct mhi_buf ch_config[2];
+	int ret;
+
+	IPA_MPM_FUNC_ENTRY();
+
+	if (ch == NULL) {
+		IPA_MPM_ERR("ch config is NULL\n");
+		return -EINVAL;
+	}
+
+	/* Populate CCA */
+	ch_config[0].buf = &ch->chan_props.ch_ctx;
+	ch_config[0].len = sizeof(ch->chan_props.ch_ctx);
+	ch_config[0].name = "CCA";
+
+	/* populate ECA */
+	ch_config[1].buf = &ch->evt_props.ev_ctx;
+	ch_config[1].len = sizeof(ch->evt_props.ev_ctx);
+	ch_config[1].name = "ECA";
+
+	IPA_MPM_DBG("Configuring MHI PRIME device for mhi_idx %d\n", mhi_idx);
+
+	ret = mhi_device_configure(ipa_mpm_ctx->md[mhi_idx].mhi_dev, dir,
+			ch_config, 2);
+	if (ret) {
+		IPA_MPM_ERR("mhi_device_configure failed\n");
+		return -EINVAL;
+	}
+	IPA_MPM_FUNC_EXIT();
+	return 0;
+}
+
+static void ipa_mpm_mhip_shutdown(int mhip_idx)
+{
+	enum ipa_client_type ul_prod_chan, dl_cons_chan;
+
+	IPA_MPM_FUNC_ENTRY();
+
+	get_ipa3_client(mhip_idx, &ul_prod_chan, &dl_cons_chan);
+
+	if (mhip_idx != IPA_MPM_MHIP_CH_ID_2)
+		/* For DPL, stop only DL channel */
+		ipa_mpm_clean_mhip_chan(mhip_idx, ul_prod_chan);
+
+	ipa_mpm_clean_mhip_chan(mhip_idx, dl_cons_chan);
+
+	if (!ipa_mpm_ctx->md[mhip_idx].in_lpm) {
+		ipa_mpm_vote_unvote_ipa_clk(CLK_OFF, mhip_idx);
+		/* while in modem shutdown scenarios such as SSR, no explicit
+		 * PCIe vote is needed.
+		 */
+		ipa_mpm_ctx->md[mhip_idx].in_lpm = true;
+	}
+	mutex_lock(&ipa_mpm_ctx->md[mhip_idx].mhi_mutex);
+	ipa_mpm_ctx->md[mhip_idx].mhi_dev = NULL;
+	mutex_unlock(&ipa_mpm_ctx->md[mhip_idx].mhi_mutex);
+	IPA_MPM_FUNC_EXIT();
+}
+
+/**
+ * @ipa_mpm_vote_unvote_pcie_clk - Vote/Unvote PCIe Clock per probe_id
+ *                                 Returns if success or failure.
+ * @ipa_mpm_clk_vote_type - Vote or Unvote for PCIe Clock
+ * @probe_id - MHI probe_id per client.
+ * @is_force - Forcebly casts vote - should be true only in probe.
+ * @is_acted - Output param - This indicates the clk is actually voted or not
+ *             The flag output is checked only when we vote for clocks.
+ * Return value: PCIe clock voting is success or failure.
+ */
+static int ipa_mpm_vote_unvote_pcie_clk(enum ipa_mpm_clk_vote_type vote,
+	int probe_id,
+	bool is_force,
+	bool *is_acted)
+{
+	int result = 0;
+
+	if (probe_id >= IPA_MPM_MHIP_CH_ID_MAX) {
+		IPA_MPM_ERR("probe_id not found\n");
+		return -EINVAL;
+	}
+
+	if (vote > CLK_OFF) {
+		IPA_MPM_ERR("Invalid vote\n");
+		return -EINVAL;
+	}
+
+	if (!is_acted) {
+		IPA_MPM_ERR("Invalid clk_vote ptr\n");
+		return -EFAULT;
+	}
+
+	mutex_lock(&ipa_mpm_ctx->md[probe_id].mhi_mutex);
+	if (ipa_mpm_ctx->md[probe_id].mhi_dev == NULL) {
+		IPA_MPM_ERR("MHI not initialized yet\n");
+		*is_acted = false;
+		mutex_unlock(&ipa_mpm_ctx->md[probe_id].mhi_mutex);
+		return 0;
+	}
+
+	if (!ipa_mpm_ctx->md[probe_id].init_complete &&
+		!is_force) {
+		/*
+		 * SSR might be in progress, dont have to vote/unvote for
+		 * IPA clocks as it will be taken care in remove_cb/subsequent
+		 * probe.
+		 */
+		IPA_MPM_DBG("SSR in progress, return\n");
+		*is_acted = false;
+		mutex_unlock(&ipa_mpm_ctx->md[probe_id].mhi_mutex);
+		return 0;
+	}
+	mutex_unlock(&ipa_mpm_ctx->md[probe_id].mhi_mutex);
+
+	IPA_MPM_DBG("PCIe clock vote/unvote = %d probe_id = %d clk_cnt = %d\n",
+		vote, probe_id,
+		atomic_read(&ipa_mpm_ctx->md[probe_id].clk_cnt.pcie_clk_cnt));
+
+	if (vote == CLK_ON) {
+		result = mhi_device_get_sync(
+			ipa_mpm_ctx->md[probe_id].mhi_dev,
+				MHI_VOTE_BUS | MHI_VOTE_DEVICE);
+		if (result) {
+			IPA_MPM_ERR("mhi_sync_get failed for probe_id %d\n",
+				result, probe_id);
+			*is_acted = false;
+			return result;
+		}
+
+		IPA_MPM_DBG("probe_id %d PCIE clock now ON\n", probe_id);
+		atomic_inc(&ipa_mpm_ctx->md[probe_id].clk_cnt.pcie_clk_cnt);
+		atomic_inc(&ipa_mpm_ctx->pcie_clk_total_cnt);
+	} else {
+		if ((atomic_read(
+			&ipa_mpm_ctx->md[probe_id].clk_cnt.pcie_clk_cnt)
+								== 0)) {
+			IPA_MPM_DBG("probe_id %d PCIE clock already devoted\n",
+				probe_id);
+			WARN_ON(1);
+			*is_acted = true;
+			return 0;
+		}
+		mhi_device_put(ipa_mpm_ctx->md[probe_id].mhi_dev,
+				MHI_VOTE_BUS | MHI_VOTE_DEVICE);
+		IPA_MPM_DBG("probe_id %d PCIE clock off\n", probe_id);
+		atomic_dec(&ipa_mpm_ctx->md[probe_id].clk_cnt.pcie_clk_cnt);
+		atomic_dec(&ipa_mpm_ctx->pcie_clk_total_cnt);
+	}
+	*is_acted = true;
+	return result;
+}
+
+/*
+ * Turning on/OFF IPA Clock is done only once- for all clients
+ */
+static void ipa_mpm_vote_unvote_ipa_clk(enum ipa_mpm_clk_vote_type vote,
+	int probe_id)
+{
+	if (vote > CLK_OFF)
+		return;
+
+	IPA_MPM_DBG("IPA clock vote/unvote = %d probe_id = %d clk_cnt = %d\n",
+		vote, probe_id,
+		atomic_read(&ipa_mpm_ctx->md[probe_id].clk_cnt.ipa_clk_cnt));
+
+	if (vote == CLK_ON) {
+		IPA_ACTIVE_CLIENTS_INC_SPECIAL(ipa_mpm_mhip_chan_str[probe_id]);
+		IPA_MPM_DBG("IPA clock now ON for probe_id %d\n", probe_id);
+		atomic_inc(&ipa_mpm_ctx->md[probe_id].clk_cnt.ipa_clk_cnt);
+		atomic_inc(&ipa_mpm_ctx->ipa_clk_total_cnt);
+	} else {
+		if ((atomic_read
+			(&ipa_mpm_ctx->md[probe_id].clk_cnt.ipa_clk_cnt)
+								== 0)) {
+			IPA_MPM_DBG("probe_id %d IPA clock count < 0\n",
+				probe_id);
+			WARN_ON(1);
+			return;
+		}
+		IPA_ACTIVE_CLIENTS_DEC_SPECIAL(ipa_mpm_mhip_chan_str[probe_id]);
+		IPA_MPM_DBG("probe_id %d IPA clock off\n", probe_id);
+		atomic_dec(&ipa_mpm_ctx->md[probe_id].clk_cnt.ipa_clk_cnt);
+		atomic_dec(&ipa_mpm_ctx->ipa_clk_total_cnt);
+	}
+}
+
+/**
+ * @ipa_mpm_start_stop_remote_mhip_chan - Start/Stop Remote device side MHIP
+ *                                        channels.
+ * @ipa_mpm_clk_vote_type - Vote or Unvote for PCIe Clock
+ * @probe_id - MHI probe_id per client.
+ * @ipa_mpm_start_stop_type - Start/Stop remote channels.
+ * @is_force - Forcebly casts remote channels to be started/stopped.
+ *             should be true only in probe.
+ * Return value: 0 if success or error value.
+ */
+static int ipa_mpm_start_stop_remote_mhip_chan(
+	int probe_id,
+	enum ipa_mpm_start_stop_type start_stop,
+	bool is_force)
+{
+	int ret = 0;
+	struct mhi_device *mhi_dev = ipa_mpm_ctx->md[probe_id].mhi_dev;
+
+	/* Sanity check to make sure Remote channels can be started.
+	 * If probe in progress, mhi_prepare_for_transfer will start
+	 * the remote channels so no need to start it from here.
+	 */
+	mutex_lock(&ipa_mpm_ctx->md[probe_id].mhi_mutex);
+	if (!ipa_mpm_ctx->md[probe_id].init_complete && !is_force) {
+		IPA_MPM_ERR("MHI not initialized yet, probe in progress\n");
+		mutex_unlock(&ipa_mpm_ctx->md[probe_id].mhi_mutex);
+		return ret;
+	}
+
+	/* For error state, expect modem SSR to recover from error */
+	if (ipa_mpm_ctx->md[probe_id].remote_state == MPM_MHIP_REMOTE_ERR) {
+		IPA_MPM_ERR("Remote channels in err state for %d\n", probe_id);
+		mutex_unlock(&ipa_mpm_ctx->md[probe_id].mhi_mutex);
+		return -EFAULT;
+	}
+	mutex_unlock(&ipa_mpm_ctx->md[probe_id].mhi_mutex);
+
+	if (start_stop == MPM_MHIP_START) {
+		if (ipa_mpm_ctx->md[probe_id].remote_state ==
+				MPM_MHIP_REMOTE_START) {
+			IPA_MPM_DBG("Remote channel already started for %d\n",
+				probe_id);
+		} else {
+			ret = mhi_resume_transfer(mhi_dev);
+			mutex_lock(&ipa_mpm_ctx->md[probe_id].mhi_mutex);
+			if (ret)
+				ipa_mpm_ctx->md[probe_id].remote_state =
+							MPM_MHIP_REMOTE_ERR;
+			else
+				ipa_mpm_ctx->md[probe_id].remote_state =
+							MPM_MHIP_REMOTE_START;
+			mutex_unlock(&ipa_mpm_ctx->md[probe_id].mhi_mutex);
+		}
+	} else {
+		if (ipa_mpm_ctx->md[probe_id].remote_state ==
+				MPM_MHIP_REMOTE_STOP) {
+			IPA_MPM_DBG("Remote channel already stopped for %d\n",
+					probe_id);
+		} else {
+			ret = mhi_pause_transfer(mhi_dev);
+			mutex_lock(&ipa_mpm_ctx->md[probe_id].mhi_mutex);
+			if (ret)
+				ipa_mpm_ctx->md[probe_id].remote_state =
+							MPM_MHIP_REMOTE_ERR;
+			else
+				ipa_mpm_ctx->md[probe_id].remote_state =
+							MPM_MHIP_REMOTE_STOP;
+			mutex_unlock(&ipa_mpm_ctx->md[probe_id].mhi_mutex);
+		}
+	}
+	return ret;
+}
+
+static enum mhip_status_type ipa_mpm_start_stop_mhip_chan(
+	enum ipa_mpm_mhip_chan mhip_chan,
+	int probe_id,
+	enum ipa_mpm_start_stop_type start_stop)
+{
+	int ipa_ep_idx;
+	struct ipa3_ep_context *ep;
+	bool is_start;
+	enum ipa_client_type ul_chan, dl_chan;
+	u32 source_pipe_bitmask = 0;
+	enum gsi_status gsi_res = GSI_STATUS_SUCCESS;
+	int result;
+
+	IPA_MPM_FUNC_ENTRY();
+
+	if (mhip_chan > IPA_MPM_MHIP_CHAN_BOTH) {
+		IPA_MPM_ERR("MHI not initialized yet\n");
+		return MHIP_STATUS_FAIL;
+	}
+
+	if (probe_id >= IPA_MPM_MHIP_CH_ID_MAX) {
+		IPA_MPM_ERR("MHI not initialized yet\n");
+		return MHIP_STATUS_FAIL;
+	}
+
+	get_ipa3_client(probe_id, &ul_chan, &dl_chan);
+
+	if (mhip_chan == IPA_MPM_MHIP_CHAN_UL) {
+		ipa_ep_idx = ipa3_get_ep_mapping(ul_chan);
+	} else if (mhip_chan == IPA_MPM_MHIP_CHAN_DL) {
+		ipa_ep_idx = ipa3_get_ep_mapping(dl_chan);
+	} else if (mhip_chan == IPA_MPM_MHIP_CHAN_BOTH) {
+		ipa_ep_idx = ipa3_get_ep_mapping(ul_chan);
+		ipa_ep_idx = ipa3_get_ep_mapping(dl_chan);
+	}
+
+	if (ipa_ep_idx == IPA_EP_NOT_ALLOCATED) {
+		IPA_MPM_ERR("fail to get EP# for idx %d\n", ipa_ep_idx);
+		return MHIP_STATUS_EP_NOT_FOUND;
+	}
+
+	mutex_lock(&ipa_mpm_ctx->md[probe_id].mhi_mutex);
+	if (!ipa_mpm_ctx->md[probe_id].init_complete) {
+		IPA_MPM_ERR("MHIP probe %d not initialized\n", probe_id);
+		mutex_unlock(&ipa_mpm_ctx->md[probe_id].mhi_mutex);
+		return MHIP_STATUS_EP_NOT_READY;
+	}
+	mutex_unlock(&ipa_mpm_ctx->md[probe_id].mhi_mutex);
+
+	ep = &ipa3_ctx->ep[ipa_ep_idx];
+
+	if (mhip_chan == IPA_MPM_MHIP_CHAN_UL) {
+		IPA_MPM_DBG("current GSI state = %d, action = %d\n",
+			ipa_mpm_ctx->md[probe_id].ul_prod.gsi_state,
+			start_stop);
+		if (ipa_mpm_ctx->md[probe_id].ul_prod.gsi_state <
+			GSI_ALLOCATED) {
+			IPA_MPM_ERR("GSI chan is not allocated yet\n");
+			return MHIP_STATUS_EP_NOT_READY;
+		}
+	} else if (mhip_chan == IPA_MPM_MHIP_CHAN_DL) {
+		IPA_MPM_DBG("current GSI state = %d, action = %d\n",
+			ipa_mpm_ctx->md[probe_id].dl_cons.gsi_state,
+			start_stop);
+		if (ipa_mpm_ctx->md[probe_id].dl_cons.gsi_state <
+			GSI_ALLOCATED) {
+			IPA_MPM_ERR("GSI chan is not allocated yet\n");
+			return MHIP_STATUS_EP_NOT_READY;
+		}
+	}
+
+	is_start = (start_stop == MPM_MHIP_START) ? true : false;
+
+	if (is_start) {
+		if (mhip_chan == IPA_MPM_MHIP_CHAN_UL) {
+			if (ipa_mpm_ctx->md[probe_id].ul_prod.gsi_state ==
+				GSI_STARTED) {
+				IPA_MPM_ERR("GSI chan is already started\n");
+				return MHIP_STATUS_NO_OP;
+			}
+		}
+
+		if (mhip_chan == IPA_MPM_MHIP_CHAN_DL) {
+			if (ipa_mpm_ctx->md[probe_id].dl_cons.gsi_state ==
+				GSI_STARTED) {
+				IPA_MPM_ERR("GSI chan is already started\n");
+				return MHIP_STATUS_NO_OP;
+			}
+		}
+		/* Start GSI channel */
+		gsi_res = ipa3_start_gsi_channel(ipa_ep_idx);
+		if (gsi_res != GSI_STATUS_SUCCESS) {
+			IPA_MPM_ERR("Error starting channel: err = %d\n",
+					gsi_res);
+			goto gsi_chan_fail;
+		} else {
+			ipa_mpm_change_gsi_state(probe_id, mhip_chan,
+					GSI_STARTED);
+		}
+	} else {
+		if (mhip_chan == IPA_MPM_MHIP_CHAN_UL) {
+			if (ipa_mpm_ctx->md[probe_id].ul_prod.gsi_state ==
+				GSI_STOPPED) {
+				IPA_MPM_ERR("GSI chan is already stopped\n");
+				return MHIP_STATUS_NO_OP;
+			} else if (ipa_mpm_ctx->md[probe_id].ul_prod.gsi_state
+				!= GSI_STARTED) {
+				IPA_MPM_ERR("GSI chan isn't already started\n");
+				return MHIP_STATUS_NO_OP;
+			}
+		}
+
+		if (mhip_chan == IPA_MPM_MHIP_CHAN_DL) {
+			if (ipa_mpm_ctx->md[probe_id].dl_cons.gsi_state ==
+				GSI_STOPPED) {
+				IPA_MPM_ERR("GSI chan is already stopped\n");
+				return MHIP_STATUS_NO_OP;
+			} else if (ipa_mpm_ctx->md[probe_id].dl_cons.gsi_state
+				!= GSI_STARTED) {
+				IPA_MPM_ERR("GSI chan isn't already started\n");
+				return MHIP_STATUS_NO_OP;
+			}
+		}
+
+		if (mhip_chan == IPA_MPM_MHIP_CHAN_UL) {
+			source_pipe_bitmask = 1 <<
+				ipa3_get_ep_mapping(ep->client);
+			/* First Stop UL GSI channel before unvote PCIe clock */
+			result = ipa3_stop_gsi_channel(ipa_ep_idx);
+
+			if (result) {
+				IPA_MPM_ERR("UL chan stop failed\n");
+				goto gsi_chan_fail;
+			} else {
+				ipa_mpm_change_gsi_state(probe_id, mhip_chan,
+							GSI_STOPPED);
+			}
+		}
+
+		if (mhip_chan == IPA_MPM_MHIP_CHAN_DL) {
+			result = ipa3_stop_gsi_channel(ipa_ep_idx);
+			if (result) {
+				IPA_MPM_ERR("Fail to stop DL channel\n");
+				goto gsi_chan_fail;
+			} else {
+				ipa_mpm_change_gsi_state(probe_id, mhip_chan,
+							GSI_STOPPED);
+			}
+		}
+	}
+	IPA_MPM_FUNC_EXIT();
+
+	return MHIP_STATUS_SUCCESS;
+gsi_chan_fail:
+	ipa3_disable_data_path(ipa_ep_idx);
+	ipa_mpm_change_gsi_state(probe_id, mhip_chan, GSI_ERR);
+	ipa_assert();
+	return MHIP_STATUS_FAIL;
+}
+
+int ipa_mpm_notify_wan_state(struct wan_ioctl_notify_wan_state *state)
+{
+	int probe_id = IPA_MPM_MHIP_CH_ID_MAX;
+	int i;
+	static enum mhip_status_type status;
+	int ret = 0;
+	enum ipa_mpm_mhip_client_type mhip_client = IPA_MPM_MHIP_TETH;
+	bool is_acted = true;
+	const struct ipa_gsi_ep_config *ep_cfg;
+	uint32_t flow_ctrl_mask = 0;
+
+	if (!state)
+		return -EPERM;
+
+	if (!ipa3_is_mhip_offload_enabled())
+		return -EPERM;
+
+	for (i = 0; i < IPA_MPM_MHIP_CH_ID_MAX; i++) {
+		if (ipa_mpm_pipes[i].mhip_client == mhip_client) {
+			probe_id = i;
+			break;
+		}
+	}
+
+	if (probe_id == IPA_MPM_MHIP_CH_ID_MAX) {
+		IPA_MPM_ERR("Unknown probe_id\n");
+		return -EPERM;
+	}
+
+	IPA_MPM_DBG("WAN backhaul available for probe_id = %d\n", probe_id);
+
+	if (state->up) {
+		/* Start UL MHIP channel for offloading tethering connection */
+		ret = ipa_mpm_vote_unvote_pcie_clk(CLK_ON, probe_id,
+			false, &is_acted);
+		if (ret) {
+			IPA_MPM_ERR("Err %d cloking on PCIe clk %d\n", ret);
+			return ret;
+		}
+
+		/*
+		 * Make sure to start Device side channels before
+		 * starting Host side UL channels. This is to make
+		 * sure device side access host side only after
+		 * Host IPA gets voted.
+		 */
+		ret = ipa_mpm_start_stop_remote_mhip_chan(probe_id,
+							MPM_MHIP_START,
+							false);
+		if (ret) {
+			/*
+			 * This can fail only when modem is in SSR state.
+			 * Eventually there would be a remove callback,
+			 * so return a failure.
+			 */
+			IPA_MPM_ERR("MHIP remote chan start fail = %d\n", ret);
+
+			if (is_acted)
+				ipa_mpm_vote_unvote_pcie_clk(CLK_OFF,
+					probe_id,
+					false,
+					&is_acted);
+
+			return ret;
+		}
+		IPA_MPM_DBG("MHIP remote channels are started\n");
+
+		 /*
+		  * Update flow control monitoring end point info.
+		  * This info will be used to set delay on the end points upon
+		  * hitting RED water mark.
+		  */
+		ep_cfg = ipa3_get_gsi_ep_info(IPA_CLIENT_WLAN2_PROD);
+
+		if (!ep_cfg)
+			IPA_MPM_ERR("ep = %d not allocated yet\n",
+					IPA_CLIENT_WLAN2_PROD);
+		else
+			flow_ctrl_mask |= 1 << (ep_cfg->ipa_gsi_chan_num);
+
+		ep_cfg = ipa3_get_gsi_ep_info(IPA_CLIENT_USB_PROD);
+
+		if (!ep_cfg)
+			IPA_MPM_ERR("ep = %d not allocated yet\n",
+					IPA_CLIENT_USB_PROD);
+		else
+			flow_ctrl_mask |= 1 << (ep_cfg->ipa_gsi_chan_num);
+
+		atomic_set(&ipa_mpm_ctx->flow_ctrl_mask, flow_ctrl_mask);
+
+		ret = ipa3_uc_send_update_flow_control(flow_ctrl_mask,
+						IPA_MPM_FLOW_CTRL_ADD);
+
+		if (ret)
+			IPA_MPM_ERR("Err = %d setting uc flow control\n", ret);
+
+		status = ipa_mpm_start_stop_mhip_chan(
+				IPA_MPM_MHIP_CHAN_UL, probe_id, MPM_MHIP_START);
+		switch (status) {
+		case MHIP_STATUS_SUCCESS:
+			ipa_mpm_ctx->md[probe_id].teth_state =
+						IPA_MPM_TETH_CONNECTED;
+			break;
+		case MHIP_STATUS_EP_NOT_READY:
+		case MHIP_STATUS_NO_OP:
+			IPA_MPM_DBG("UL chan already start, status = %d\n",
+					status);
+			if (is_acted) {
+				return ipa_mpm_vote_unvote_pcie_clk(CLK_OFF,
+						probe_id,
+						false,
+						&is_acted);
+			}
+			break;
+		case MHIP_STATUS_FAIL:
+		case MHIP_STATUS_BAD_STATE:
+		case MHIP_STATUS_EP_NOT_FOUND:
+			IPA_MPM_ERR("UL chan start err =%d\n", status);
+			if (is_acted)
+				ipa_mpm_vote_unvote_pcie_clk(CLK_OFF, probe_id,
+					false, &is_acted);
+			ipa_assert();
+			return -EFAULT;
+		default:
+			IPA_MPM_ERR("Err not found\n");
+			if (is_acted)
+				ipa_mpm_vote_unvote_pcie_clk(CLK_OFF, probe_id,
+					false, &is_acted);
+			ret = -EFAULT;
+			break;
+		}
+		ipa_mpm_ctx->md[probe_id].mhip_client = mhip_client;
+	} else {
+		/*
+		 * Update flow control monitoring end point info.
+		 * This info will be used to reset delay on the end points.
+		 */
+		flow_ctrl_mask =
+			atomic_read(&ipa_mpm_ctx->flow_ctrl_mask);
+
+		ret = ipa3_uc_send_update_flow_control(flow_ctrl_mask,
+						IPA_MPM_FLOW_CTRL_DELETE);
+		flow_ctrl_mask = 0;
+		atomic_set(&ipa_mpm_ctx->flow_ctrl_mask, 0);
+
+		if (ret) {
+			IPA_MPM_ERR("Err = %d resetting uc flow control\n",
+					ret);
+			ipa_assert();
+		}
+		/*
+		 * Make sure to stop Device side channels before
+		 * stopping Host side UL channels. This is to make
+		 * sure device side doesn't access host IPA after
+		 * Host IPA gets devoted.
+		 */
+		ret = ipa_mpm_start_stop_remote_mhip_chan(probe_id,
+						MPM_MHIP_STOP,
+						false);
+		if (ret) {
+			/*
+			 * This can fail only when modem is in SSR state.
+			 * Eventually there would be a remove callback,
+			 * so return a failure.
+			 */
+			IPA_MPM_ERR("MHIP remote chan stop fail = %d\n", ret);
+			return ret;
+		}
+		IPA_MPM_DBG("MHIP remote channels are stopped\n");
+
+		status = ipa_mpm_start_stop_mhip_chan(
+					IPA_MPM_MHIP_CHAN_UL, probe_id,
+					MPM_MHIP_STOP);
+		switch (status) {
+		case MHIP_STATUS_SUCCESS:
+			ipa_mpm_change_teth_state(probe_id, IPA_MPM_TETH_INIT);
+			break;
+		case MHIP_STATUS_NO_OP:
+		case MHIP_STATUS_EP_NOT_READY:
+			IPA_MPM_DBG("UL chan already stop, status = %d\n",
+					status);
+			break;
+		case MHIP_STATUS_FAIL:
+		case MHIP_STATUS_BAD_STATE:
+		case MHIP_STATUS_EP_NOT_FOUND:
+			IPA_MPM_ERR("UL chan cant be stopped err =%d\n",
+				status);
+			ipa_assert();
+			return -EFAULT;
+		default:
+			IPA_MPM_ERR("Err not found\n");
+			return -EFAULT;
+		}
+		/* Stop UL MHIP channel for offloading tethering connection */
+		ret = ipa_mpm_vote_unvote_pcie_clk(CLK_OFF, probe_id,
+					false, &is_acted);
+
+		if (ret) {
+			IPA_MPM_ERR("Error cloking off PCIe clk, err = %d\n",
+				ret);
+			return ret;
+		}
+		ipa_mpm_ctx->md[probe_id].mhip_client = IPA_MPM_MHIP_NONE;
+	}
+	return ret;
+}
+
+static void ipa_mpm_change_gsi_state(int probe_id,
+	enum ipa_mpm_mhip_chan mhip_chan,
+	enum ipa_mpm_gsi_state next_state)
+{
+
+	if (probe_id >= IPA_MPM_MHIP_CH_ID_MAX)
+		return;
+
+	if (mhip_chan == IPA_MPM_MHIP_CHAN_UL) {
+		mutex_lock(&ipa_mpm_ctx->md[probe_id].mutex);
+		ipa_mpm_ctx->md[probe_id].ul_prod.gsi_state = next_state;
+		IPA_MPM_DBG("GSI next_state = %d\n",
+			ipa_mpm_ctx->md[probe_id].ul_prod.gsi_state);
+		 mutex_unlock(&ipa_mpm_ctx->md[probe_id].mutex);
+	}
+
+	if (mhip_chan == IPA_MPM_MHIP_CHAN_DL) {
+		mutex_lock(&ipa_mpm_ctx->md[probe_id].mutex);
+		ipa_mpm_ctx->md[probe_id].dl_cons.gsi_state = next_state;
+		IPA_MPM_DBG("GSI next_state = %d\n",
+			ipa_mpm_ctx->md[probe_id].dl_cons.gsi_state);
+		 mutex_unlock(&ipa_mpm_ctx->md[probe_id].mutex);
+	}
+}
+
+static void ipa_mpm_change_teth_state(int probe_id,
+	enum ipa_mpm_teth_state next_state)
+{
+	enum ipa_mpm_teth_state curr_state;
+
+	if (probe_id >= IPA_MPM_MHIP_CH_ID_MAX) {
+		IPA_MPM_ERR("Unknown probe_id\n");
+		return;
+	}
+
+	curr_state = ipa_mpm_ctx->md[probe_id].teth_state;
+
+	IPA_MPM_DBG("curr_state = %d, ip_state = %d mhip_s\n",
+		curr_state, next_state);
+
+	switch (curr_state) {
+	case IPA_MPM_TETH_INIT:
+		if (next_state == IPA_MPM_TETH_CONNECTED)
+			next_state = IPA_MPM_TETH_INPROGRESS;
+		break;
+	case IPA_MPM_TETH_INPROGRESS:
+		break;
+	case IPA_MPM_TETH_CONNECTED:
+		break;
+	default:
+		IPA_MPM_ERR("No change in state\n");
+		break;
+	}
+
+	ipa_mpm_ctx->md[probe_id].teth_state = next_state;
+	IPA_MPM_DBG("next_state = %d\n", next_state);
+}
+
+static void ipa_mpm_read_channel(enum ipa_client_type chan)
+{
+	struct gsi_chan_info chan_info;
+	int ipa_ep_idx;
+	struct ipa3_ep_context *ep;
+	int res;
+
+	ipa_ep_idx = ipa3_get_ep_mapping(chan);
+
+	if (ipa_ep_idx == IPA_EP_NOT_ALLOCATED) {
+		IPAERR("failed to get idx");
+		return;
+	}
+
+	ep = &ipa3_ctx->ep[ipa_ep_idx];
+
+	IPA_MPM_DBG("Reading channel for chan %d, ep = %d, gsi_chan_hdl = %d\n",
+		chan, ep, ep->gsi_chan_hdl);
+
+	res = ipa3_get_gsi_chan_info(&chan_info, ep->gsi_chan_hdl);
+	if (res)
+		IPA_MPM_ERR("Reading of channel failed for ep %d\n", ep);
+}
+
+/* ipa_mpm_mhi_probe_cb is received for each MHI'/MHI channel
+ * Currently we have 4 MHI channels.
+ */
+static int ipa_mpm_mhi_probe_cb(struct mhi_device *mhi_dev,
+	const struct mhi_device_id *mhi_id)
+{
+	struct ipa_mpm_channel *ch;
+	int ret;
+	enum ipa_client_type ul_prod, dl_cons;
+	int probe_id;
+	struct ipa_req_chan_out_params ul_out_params, dl_out_params;
+	void __iomem  *db_addr;
+	int ipa_ep_idx;
+	struct ipa3_ep_context *ep;
+	u32 evt_ring_db_addr_low, evt_ring_db_addr_high;
+	u32 wp_addr;
+	int pipe_idx;
+	bool is_acted = true;
+	uint64_t flow_ctrl_mask = 0;
+	bool add_delete = false;
+
+	IPA_MPM_FUNC_ENTRY();
+
+	if (ipa_mpm_ctx == NULL) {
+		IPA_MPM_ERR("ipa_mpm_ctx is NULL not expected, returning..\n");
+		return -ENOMEM;
+	}
+
+	probe_id = get_idx_from_id(mhi_id);
+
+	if (probe_id >= IPA_MPM_MHIP_CH_ID_MAX) {
+		IPA_MPM_ERR("chan=%pK is not supported for now\n", mhi_id);
+		return -EPERM;
+	}
+
+	if (ipa_mpm_ctx->md[probe_id].init_complete) {
+		IPA_MPM_ERR("Probe initialization already done, returning\n");
+		return 0;
+	}
+
+	IPA_MPM_DBG("Received probe for id=%d\n", probe_id);
+
+	get_ipa3_client(probe_id, &ul_prod, &dl_cons);
+
+	/* Vote for IPA clock for first time in initialization seq.
+	 * IPA clock will be devoted when MHI enters LPM
+	 * PCIe clock will be voted / devoted with every channel probe
+	 * we receive.
+	 * ul_prod = Host -> Device
+	 * dl_cons = Device -> Host
+	 */
+	ipa_mpm_ctx->md[probe_id].mhi_dev = mhi_dev;
+	ipa_mpm_ctx->mhi_parent_dev =
+		ipa_mpm_ctx->md[probe_id].mhi_dev->dev.parent;
+
+	mutex_lock(&ipa_mpm_ctx->md[probe_id].mhi_mutex);
+	ipa_mpm_ctx->md[probe_id].remote_state = MPM_MHIP_REMOTE_STOP;
+	mutex_unlock(&ipa_mpm_ctx->md[probe_id].mhi_mutex);
+	ret = ipa_mpm_vote_unvote_pcie_clk(CLK_ON, probe_id, true, &is_acted);
+	if (ret) {
+		IPA_MPM_ERR("Err %d voitng PCIe clocks\n", ret);
+		return -EPERM;
+	}
+
+	ipa_mpm_vote_unvote_ipa_clk(CLK_ON, probe_id);
+	ipa_mpm_ctx->md[probe_id].in_lpm = false;
+	IPA_MPM_DBG("ul chan = %d, dl_chan = %d\n", ul_prod, dl_cons);
+
+	/*
+	 * Set up MHI' pipes for Device IPA filling in
+	 * Channel Context and Event Context.
+	 * These params will be sent to Device side.
+	 * UL CHAN = HOST -> Device
+	 * DL CHAN = Device -> HOST
+	 * per channel a TRE and EV is allocated.
+	 * for a UL channel -
+	 * IPA HOST PROD TRE -> IPA DEVICE CONS EV
+	 * IPA HOST PROD EV ->  IPA DEVICE CONS TRE
+	 * for a DL channel -
+	 * IPA Device PROD TRE -> IPA HOST CONS EV
+	 * IPA Device PROD EV ->  IPA HOST CONS TRE
+	 */
+	if (ul_prod != IPA_CLIENT_MAX) {
+		/* store UL properties */
+		ch = &ipa_mpm_ctx->md[probe_id].ul_prod;
+		/* Store Channel properties */
+		ch->chan_props.id = mhi_dev->ul_chan_id;
+		ch->chan_props.device_db =
+			ipa_mpm_ctx->dev_info.chdb_base +
+			ch->chan_props.id * 8;
+		/* Fill Channel Conext to be sent to Device side */
+		ch->chan_props.ch_ctx.chtype =
+			IPA_MPM_MHI_HOST_UL_CHANNEL;
+		ch->chan_props.ch_ctx.erindex =
+			mhi_dev->ul_event_id;
+		ch->chan_props.ch_ctx.rlen = (IPA_MPM_RING_LEN) *
+			GSI_EVT_RING_RE_SIZE_16B;
+		/* Store Event properties */
+		ch->evt_props.ev_ctx.update_rp_modc = 0;
+		ch->evt_props.ev_ctx.update_rp_intmodt = 0;
+		ch->evt_props.ev_ctx.ertype = 1;
+		ch->evt_props.ev_ctx.rlen = (IPA_MPM_RING_LEN) *
+			GSI_EVT_RING_RE_SIZE_16B;
+		ch->evt_props.ev_ctx.buff_size = TRE_BUFF_SIZE;
+		ch->evt_props.device_db =
+			ipa_mpm_ctx->dev_info.erdb_base +
+			ch->chan_props.ch_ctx.erindex * 8;
+
+		/* connect Host GSI pipes with MHI' protocol */
+		ret = ipa_mpm_connect_mhip_gsi_pipe(ul_prod,
+			probe_id, &ul_out_params);
+		if (ret) {
+			IPA_MPM_ERR("failed connecting MPM client %d\n",
+					ul_prod);
+			goto fail_gsi_setup;
+		}
+
+		ch->evt_props.ev_ctx.update_rp_addr =
+			ipa_mpm_smmu_map_doorbell(
+				MHIP_SMMU_DOMAIN_PCIE,
+				ul_out_params.db_reg_phs_addr_lsb);
+		if (ch->evt_props.ev_ctx.update_rp_addr == 0)
+			ipa_assert();
+
+		ipa_mpm_ctx->md[probe_id].ul_prod.db_device_iova =
+			ch->evt_props.ev_ctx.update_rp_addr;
+
+		ret = __ipa_mpm_configure_mhi_device(
+				ch, probe_id, DMA_TO_HIPA);
+		if (ret) {
+			IPA_MPM_ERR("configure_mhi_dev fail %d\n",
+					ret);
+			goto fail_smmu;
+		}
+	}
+
+	if (dl_cons != IPA_CLIENT_MAX) {
+		/* store DL channel properties */
+		ch = &ipa_mpm_ctx->md[probe_id].dl_cons;
+		/* Store Channel properties */
+		ch->chan_props.id = mhi_dev->dl_chan_id;
+		ch->chan_props.device_db =
+			ipa_mpm_ctx->dev_info.chdb_base +
+			ch->chan_props.id * 8;
+		/* Fill Channel Conext to be be sent to Dev side */
+		ch->chan_props.ch_ctx.chstate = 1;
+		ch->chan_props.ch_ctx.chtype =
+			IPA_MPM_MHI_HOST_DL_CHANNEL;
+		ch->chan_props.ch_ctx.erindex = mhi_dev->dl_event_id;
+		ch->chan_props.ch_ctx.rlen = (IPA_MPM_RING_LEN) *
+			GSI_EVT_RING_RE_SIZE_16B;
+		/* Store Event properties */
+		ch->evt_props.ev_ctx.update_rp_modc = 0;
+		ch->evt_props.ev_ctx.update_rp_intmodt = 0;
+		ch->evt_props.ev_ctx.ertype = 1;
+		ch->evt_props.ev_ctx.rlen = (IPA_MPM_RING_LEN) *
+			GSI_EVT_RING_RE_SIZE_16B;
+		ch->evt_props.ev_ctx.buff_size = TRE_BUFF_SIZE;
+		ch->evt_props.device_db =
+			ipa_mpm_ctx->dev_info.erdb_base +
+			ch->chan_props.ch_ctx.erindex * 8;
+
+		/* connect Host GSI pipes with MHI' protocol */
+		ret = ipa_mpm_connect_mhip_gsi_pipe(dl_cons,
+			probe_id, &dl_out_params);
+		if (ret) {
+			IPA_MPM_ERR("connecting MPM client = %d failed\n",
+				dl_cons);
+			goto fail_gsi_setup;
+		}
+
+		ch->evt_props.ev_ctx.update_rp_addr =
+			ipa_mpm_smmu_map_doorbell(
+					MHIP_SMMU_DOMAIN_PCIE,
+					dl_out_params.db_reg_phs_addr_lsb);
+
+		if (ch->evt_props.ev_ctx.update_rp_addr == 0)
+			ipa_assert();
+
+		ipa_mpm_ctx->md[probe_id].dl_cons.db_device_iova =
+			ch->evt_props.ev_ctx.update_rp_addr;
+
+		ret = __ipa_mpm_configure_mhi_device(ch, probe_id,
+					DMA_FROM_HIPA);
+		if (ret) {
+			IPA_MPM_ERR("mpm_config_mhi_dev failed %d\n", ret);
+			goto fail_smmu;
+		}
+	}
+
+	ret = mhi_prepare_for_transfer(ipa_mpm_ctx->md[probe_id].mhi_dev);
+	if (ret) {
+		IPA_MPM_ERR("mhi_prepare_for_transfer failed %d\n", ret);
+		WARN_ON(1);
+		/*
+		 * WA to handle prepare_for_tx failures.
+		 * Though prepare for transfer fails, indicate success
+		 * to MHI driver. remove_cb will be called eventually when
+		 * Device side comes from where pending cleanup happens.
+		 */
+		mutex_lock(&ipa_mpm_ctx->md[probe_id].mhi_mutex);
+		atomic_inc(&ipa_mpm_ctx->probe_cnt);
+		ipa_mpm_ctx->md[probe_id].init_complete = false;
+		mutex_unlock(&ipa_mpm_ctx->md[probe_id].mhi_mutex);
+		IPA_MPM_FUNC_EXIT();
+		return 0;
+	}
+
+	/* mhi_prepare_for_transfer translates to starting remote channels */
+	mutex_lock(&ipa_mpm_ctx->md[probe_id].mhi_mutex);
+	ipa_mpm_ctx->md[probe_id].remote_state = MPM_MHIP_REMOTE_START;
+	mutex_unlock(&ipa_mpm_ctx->md[probe_id].mhi_mutex);
+	/*
+	 * Ring initial channel db - Host Side UL and Device side DL channel.
+	 * To ring doorbell, write "WP" into doorbell register.
+	 * This WP should be set to 1 element less than ring max.
+	 */
+
+	/* Ring UL PRODUCER TRANSFER RING (HOST IPA -> DEVICE IPA) Doorbell */
+	if (ul_prod != IPA_CLIENT_MAX) {
+		IPA_MPM_DBG("Host UL TR PA DB = 0X%0x\n",
+			ul_out_params.db_reg_phs_addr_lsb);
+
+		db_addr = ioremap(
+			(phys_addr_t)(ul_out_params.db_reg_phs_addr_lsb), 4);
+
+		wp_addr = ipa_mpm_ctx->md[probe_id].ul_prod_ring.tr_pa +
+			((IPA_MPM_RING_LEN - 1) * GSI_CHAN_RE_SIZE_16B);
+
+		iowrite32(wp_addr, db_addr);
+
+		IPA_MPM_DBG("Host UL TR  DB = 0X%pK, wp_addr = 0X%0x",
+			db_addr, wp_addr);
+
+		iounmap(db_addr);
+		ipa_mpm_read_channel(ul_prod);
+
+		/* Ring UL PRODUCER EVENT RING (HOST IPA -> DEVICE IPA) Doorbell
+		 * Ring the event DB to a value outside the
+		 * ring range such that rp and wp never meet.
+		 */
+		ipa_ep_idx = ipa3_get_ep_mapping(ul_prod);
+
+		if (ipa_ep_idx == IPA_EP_NOT_ALLOCATED) {
+			IPA_MPM_ERR("fail to alloc EP.\n");
+			goto fail_start_channel;
+		}
+		ep = &ipa3_ctx->ep[ipa_ep_idx];
+
+		IPA_MPM_DBG("for ep_idx %d , gsi_evt_ring_hdl = %ld\n",
+			ipa_ep_idx, ep->gsi_evt_ring_hdl);
+		gsi_query_evt_ring_db_addr(ep->gsi_evt_ring_hdl,
+			&evt_ring_db_addr_low, &evt_ring_db_addr_high);
+
+		IPA_MPM_DBG("Host UL ER PA DB = 0X%0x\n",
+			evt_ring_db_addr_low);
+
+		db_addr = ioremap((phys_addr_t)(evt_ring_db_addr_low), 4);
+
+		wp_addr = ipa_mpm_ctx->md[probe_id].ul_prod_ring.er_pa +
+			((IPA_MPM_RING_LEN + 1) * GSI_EVT_RING_RE_SIZE_16B);
+		IPA_MPM_DBG("Host UL ER  DB = 0X%pK, wp_addr = 0X%0x",
+			db_addr, wp_addr);
+
+		iowrite32(wp_addr, db_addr);
+		iounmap(db_addr);
+
+		/* Ring DEVICE IPA DL CONSUMER Event Doorbell */
+		db_addr = ioremap((phys_addr_t)
+			(ipa_mpm_ctx->md[probe_id].ul_prod.evt_props.device_db),
+			4);
+
+		wp_addr = ipa_mpm_ctx->md[probe_id].ul_prod_ring.tr_pa +
+			((IPA_MPM_RING_LEN + 1) * GSI_EVT_RING_RE_SIZE_16B);
+
+		iowrite32(wp_addr, db_addr);
+		iounmap(db_addr);
+	}
+
+	/* Ring DL PRODUCER (DEVICE IPA -> HOST IPA) Doorbell */
+	if (dl_cons != IPA_CLIENT_MAX) {
+		db_addr = ioremap((phys_addr_t)
+		(ipa_mpm_ctx->md[probe_id].dl_cons.chan_props.device_db),
+		4);
+
+		wp_addr = ipa_mpm_ctx->md[probe_id].dl_prod_ring.tr_pa +
+			((IPA_MPM_RING_LEN - 1) * GSI_CHAN_RE_SIZE_16B);
+
+		IPA_MPM_DBG("Device DL TR  DB = 0X%pK, wp_addr = 0X%0x",
+			db_addr, wp_addr);
+
+		iowrite32(wp_addr, db_addr);
+
+		iounmap(db_addr);
+
+		/*
+		 * Ring event ring DB on Device side.
+		 * ipa_mpm should ring the event DB to a value outside the
+		 * ring range such that rp and wp never meet.
+		 */
+		db_addr =
+		ioremap(
+		(phys_addr_t)
+		(ipa_mpm_ctx->md[probe_id].dl_cons.evt_props.device_db),
+		4);
+
+		wp_addr = ipa_mpm_ctx->md[probe_id].dl_prod_ring.er_pa +
+			((IPA_MPM_RING_LEN + 1) * GSI_EVT_RING_RE_SIZE_16B);
+
+		iowrite32(wp_addr, db_addr);
+		IPA_MPM_DBG("Device  UL ER  DB = 0X%pK,wp_addr = 0X%0x",
+			db_addr, wp_addr);
+		iounmap(db_addr);
+
+		/* Ring DL EVENT RING CONSUMER (DEVICE IPA CONSUMER) Doorbell */
+		ipa_ep_idx = ipa3_get_ep_mapping(dl_cons);
+
+		if (ipa_ep_idx == IPA_EP_NOT_ALLOCATED) {
+			IPA_MPM_ERR("fail to alloc EP.\n");
+			goto fail_start_channel;
+		}
+		ep = &ipa3_ctx->ep[ipa_ep_idx];
+
+		gsi_query_evt_ring_db_addr(ep->gsi_evt_ring_hdl,
+			&evt_ring_db_addr_low, &evt_ring_db_addr_high);
+		IPA_MPM_DBG("Host DL ER PA DB = 0X%0x\n",
+				evt_ring_db_addr_low);
+		db_addr = ioremap((phys_addr_t)(evt_ring_db_addr_low), 4);
+
+		wp_addr = ipa_mpm_ctx->md[probe_id].dl_prod_ring.tr_pa +
+			((IPA_MPM_RING_LEN + 1) * GSI_EVT_RING_RE_SIZE_16B);
+		iowrite32(wp_addr, db_addr);
+		IPA_MPM_DBG("Host  DL ER  DB = 0X%pK, wp_addr = 0X%0x",
+			db_addr, wp_addr);
+		iounmap(db_addr);
+	}
+
+	/* Check if TETH connection is in progress.
+	 * If teth isn't started by now, then Stop UL channel.
+	 */
+	switch (ipa_mpm_ctx->md[probe_id].teth_state) {
+	case IPA_MPM_TETH_INIT:
+		/*
+		 * Make sure to stop Device side channels before
+		 * stopping Host side UL channels. This is to make
+		 * sure Device side doesn't access host side IPA if
+		 * Host IPA gets unvoted.
+		 */
+		ret = ipa_mpm_start_stop_remote_mhip_chan(probe_id,
+						MPM_MHIP_STOP, true);
+		if (ret) {
+			/*
+			 * This can fail only when modem is in SSR.
+			 * Eventually there would be a remove callback,
+			 * so return a failure.
+			 */
+			IPA_MPM_ERR("MHIP remote chan stop fail = %d\n", ret);
+			return ret;
+		}
+		if (ul_prod != IPA_CLIENT_MAX) {
+			/* No teth started yet, disable UL channel */
+			ipa_ep_idx = ipa3_get_ep_mapping(ul_prod);
+			if (ipa_ep_idx == IPA_EP_NOT_ALLOCATED) {
+				IPA_MPM_ERR("fail to alloc EP.\n");
+				goto fail_stop_channel;
+			}
+			ret = ipa3_stop_gsi_channel(ipa_ep_idx);
+			if (ret) {
+				IPA_MPM_ERR("MHIP Stop channel err = %d\n",
+					ret);
+				goto fail_stop_channel;
+			}
+			ipa_mpm_change_gsi_state(probe_id,
+				IPA_MPM_MHIP_CHAN_UL,
+				GSI_STOPPED);
+		}
+		if (is_acted)
+			ipa_mpm_vote_unvote_pcie_clk(CLK_OFF, probe_id,
+							true, &is_acted);
+		break;
+	case IPA_MPM_TETH_INPROGRESS:
+	case IPA_MPM_TETH_CONNECTED:
+		IPA_MPM_DBG("UL channel is already started, continue\n");
+		ipa_mpm_change_teth_state(probe_id, IPA_MPM_TETH_CONNECTED);
+
+		/* Lift the delay for rmnet USB prod pipe */
+		if (probe_id == IPA_MPM_MHIP_CH_ID_1) {
+			pipe_idx = ipa3_get_ep_mapping(IPA_CLIENT_USB_PROD);
+			ipa3_xdci_ep_delay_rm(pipe_idx);
+		}
+		break;
+	default:
+		IPA_MPM_DBG("No op for UL channel, in teth state = %d",
+			ipa_mpm_ctx->md[probe_id].teth_state);
+		break;
+	}
+
+	atomic_inc(&ipa_mpm_ctx->probe_cnt);
+	/* Check if ODL/USB DPL pipe is connected before probe */
+	if (probe_id == IPA_MPM_MHIP_CH_ID_2) {
+		if (ipa3_is_odl_connected())
+			ret = ipa_mpm_set_dma_mode(
+				IPA_CLIENT_MHI_PRIME_DPL_PROD,
+				IPA_CLIENT_ODL_DPL_CONS, false);
+		else if (atomic_read(&ipa_mpm_ctx->adpl_over_usb_available))
+			ret = ipa_mpm_set_dma_mode(
+				IPA_CLIENT_MHI_PRIME_DPL_PROD,
+				IPA_CLIENT_USB_DPL_CONS, false);
+		if (ret)
+			IPA_MPM_ERR("DPL DMA to ODL/USB failed, ret = %d\n",
+				ret);
+	}
+	mutex_lock(&ipa_mpm_ctx->md[probe_id].mhi_mutex);
+	ipa_mpm_ctx->md[probe_id].init_complete = true;
+	mutex_unlock(&ipa_mpm_ctx->md[probe_id].mhi_mutex);
+	/* Update Flow control Monitoring, only for the teth UL Prod pipes */
+	if (probe_id == IPA_MPM_MHIP_CH_ID_0) {
+		ipa_ep_idx = ipa3_get_ep_mapping(ul_prod);
+		ep = &ipa3_ctx->ep[ipa_ep_idx];
+		ret = ipa3_uc_send_enable_flow_control(ep->gsi_chan_hdl,
+			IPA_MPM_RING_LEN / 4);
+		if (ret) {
+			IPA_MPM_ERR("Err %d flow control enable\n", ret);
+			goto fail_flow_control;
+		}
+		IPA_MPM_DBG("Flow Control enabled for %d", probe_id);
+		flow_ctrl_mask = atomic_read(&ipa_mpm_ctx->flow_ctrl_mask);
+		add_delete = flow_ctrl_mask > 0 ? 1 : 0;
+		ret = ipa3_uc_send_update_flow_control(flow_ctrl_mask,
+							add_delete);
+		if (ret) {
+			IPA_MPM_ERR("Err %d flow control update\n", ret);
+			goto fail_flow_control;
+		}
+		IPA_MPM_DBG("Flow Control updated for %d", probe_id);
+	}
+	IPA_MPM_FUNC_EXIT();
+	return 0;
+
+fail_gsi_setup:
+fail_start_channel:
+fail_stop_channel:
+fail_smmu:
+fail_flow_control:
+	if (ipa_mpm_ctx->dev_info.ipa_smmu_enabled)
+		IPA_MPM_DBG("SMMU failed\n");
+	if (is_acted)
+		ipa_mpm_vote_unvote_pcie_clk(CLK_OFF, probe_id, true,
+					&is_acted);
+	ipa_mpm_vote_unvote_ipa_clk(CLK_OFF, probe_id);
+	ipa_assert();
+	return ret;
+}
+
+static void ipa_mpm_init_mhip_channel_info(void)
+{
+	/* IPA_MPM_MHIP_CH_ID_0 => MHIP TETH PIPES  */
+	ipa_mpm_pipes[IPA_MPM_MHIP_CH_ID_0].dl_cons.ipa_client =
+		IPA_CLIENT_MHI_PRIME_TETH_PROD;
+	ipa_mpm_pipes[IPA_MPM_MHIP_CH_ID_0].dl_cons.ep_cfg =
+		mhip_dl_teth_ep_cfg;
+	ipa_mpm_pipes[IPA_MPM_MHIP_CH_ID_0].ul_prod.ipa_client =
+		IPA_CLIENT_MHI_PRIME_TETH_CONS;
+	ipa_mpm_pipes[IPA_MPM_MHIP_CH_ID_0].ul_prod.ep_cfg =
+		mhip_ul_teth_ep_cfg;
+	ipa_mpm_pipes[IPA_MPM_MHIP_CH_ID_0].mhip_client =
+		IPA_MPM_MHIP_TETH;
+
+	/* IPA_MPM_MHIP_CH_ID_1 => MHIP RMNET PIPES */
+	ipa_mpm_pipes[IPA_MPM_MHIP_CH_ID_1].dl_cons.ipa_client =
+		IPA_CLIENT_MHI_PRIME_RMNET_PROD;
+	ipa_mpm_pipes[IPA_MPM_MHIP_CH_ID_1].dl_cons.ep_cfg =
+		mhip_dl_rmnet_ep_cfg;
+	ipa_mpm_pipes[IPA_MPM_MHIP_CH_ID_1].ul_prod.ipa_client =
+		IPA_CLIENT_MHI_PRIME_RMNET_CONS;
+	ipa_mpm_pipes[IPA_MPM_MHIP_CH_ID_1].ul_prod.ep_cfg =
+		mhip_ul_rmnet_ep_cfg;
+	ipa_mpm_pipes[IPA_MPM_MHIP_CH_ID_1].mhip_client =
+		IPA_MPM_MHIP_USB_RMNET;
+
+	/* IPA_MPM_MHIP_CH_ID_2 => MHIP ADPL PIPE */
+	ipa_mpm_pipes[IPA_MPM_MHIP_CH_ID_2].dl_cons.ipa_client =
+		IPA_CLIENT_MHI_PRIME_DPL_PROD;
+	ipa_mpm_pipes[IPA_MPM_MHIP_CH_ID_2].dl_cons.ep_cfg =
+		mhip_dl_dpl_ep_cfg;
+	ipa_mpm_pipes[IPA_MPM_MHIP_CH_ID_2].ul_prod.ipa_client =
+		IPA_CLIENT_MAX;
+	ipa_mpm_pipes[IPA_MPM_MHIP_CH_ID_2].mhip_client =
+		IPA_MPM_MHIP_USB_DPL;
+}
+
+static void ipa_mpm_mhi_remove_cb(struct mhi_device *mhi_dev)
+{
+	int mhip_idx;
+
+	IPA_MPM_FUNC_ENTRY();
+
+	for (mhip_idx = 0; mhip_idx < IPA_MPM_MHIP_CH_ID_MAX; mhip_idx++) {
+		if (mhi_dev == ipa_mpm_ctx->md[mhip_idx].mhi_dev)
+			break;
+	}
+	if (mhip_idx >= IPA_MPM_MHIP_CH_ID_MAX) {
+		IPA_MPM_DBG("remove_cb for mhip_idx = %d not probed before\n",
+			mhip_idx);
+		return;
+	}
+
+	IPA_MPM_DBG("remove_cb for mhip_idx = %d", mhip_idx);
+
+	mutex_lock(&ipa_mpm_ctx->md[mhip_idx].mhi_mutex);
+	ipa_mpm_ctx->md[mhip_idx].init_complete = false;
+	mutex_unlock(&ipa_mpm_ctx->md[mhip_idx].mhi_mutex);
+
+	if (mhip_idx == IPA_MPM_MHIP_CH_ID_0)
+		ipa3_uc_send_disable_flow_control();
+
+	ipa_mpm_mhip_shutdown(mhip_idx);
+
+	atomic_dec(&ipa_mpm_ctx->probe_cnt);
+
+	if (atomic_read(&ipa_mpm_ctx->probe_cnt) == 0) {
+		/* Last probe done, reset Everything here */
+		ipa_mpm_ctx->mhi_parent_dev = NULL;
+		ipa_mpm_ctx->carved_smmu_cb.next_addr =
+			ipa_mpm_ctx->carved_smmu_cb.va_start;
+		atomic_set(&ipa_mpm_ctx->pcie_clk_total_cnt, 0);
+		for (mhip_idx = 0;
+			mhip_idx < IPA_MPM_MHIP_CH_ID_MAX; mhip_idx++) {
+			atomic_set(
+				&ipa_mpm_ctx->md[mhip_idx].clk_cnt.pcie_clk_cnt,
+				0);
+		}
+	}
+
+	IPA_MPM_FUNC_EXIT();
+}
+
+static void ipa_mpm_mhi_status_cb(struct mhi_device *mhi_dev,
+				enum MHI_CB mhi_cb)
+{
+	int mhip_idx;
+	enum mhip_status_type status;
+
+	IPA_MPM_DBG("%d\n", mhi_cb);
+
+	for (mhip_idx = 0; mhip_idx < IPA_MPM_MHIP_CH_ID_MAX; mhip_idx++) {
+		if (mhi_dev == ipa_mpm_ctx->md[mhip_idx].mhi_dev)
+			break;
+	}
+	if (mhip_idx >= IPA_MPM_MHIP_CH_ID_MAX) {
+		IPA_MPM_DBG("ignoring secondary callbacks\n");
+		return;
+	}
+
+	mutex_lock(&ipa_mpm_ctx->md[mhip_idx].mhi_mutex);
+	if (!ipa_mpm_ctx->md[mhip_idx].init_complete) {
+		/*
+		 * SSR might be in progress, dont have to vote/unvote for
+		 * IPA clocks as it will be taken care in remove_cb/subsequent
+		 * probe.
+		 */
+		IPA_MPM_DBG("SSR in progress, return\n");
+		mutex_unlock(&ipa_mpm_ctx->md[mhip_idx].mhi_mutex);
+		return;
+	}
+	mutex_unlock(&ipa_mpm_ctx->md[mhip_idx].mhi_mutex);
+
+	switch (mhi_cb) {
+	case MHI_CB_IDLE:
+		break;
+	case MHI_CB_LPM_ENTER:
+		if (!ipa_mpm_ctx->md[mhip_idx].in_lpm) {
+			status = ipa_mpm_start_stop_mhip_chan(
+				IPA_MPM_MHIP_CHAN_DL,
+				mhip_idx, MPM_MHIP_STOP);
+			IPA_MPM_DBG("status = %d\n", status);
+			ipa_mpm_vote_unvote_ipa_clk(CLK_OFF, mhip_idx);
+			ipa_mpm_ctx->md[mhip_idx].in_lpm = true;
+		} else {
+			IPA_MPM_DBG("Already in lpm\n");
+		}
+		break;
+	case MHI_CB_LPM_EXIT:
+		if (ipa_mpm_ctx->md[mhip_idx].in_lpm) {
+			ipa_mpm_vote_unvote_ipa_clk(CLK_ON, mhip_idx);
+			status = ipa_mpm_start_stop_mhip_chan(
+				IPA_MPM_MHIP_CHAN_DL,
+				mhip_idx, MPM_MHIP_START);
+			IPA_MPM_DBG("status = %d\n", status);
+			ipa_mpm_ctx->md[mhip_idx].in_lpm = false;
+		} else {
+			IPA_MPM_DBG("Already out of lpm\n");
+		}
+		break;
+	case MHI_CB_EE_RDDM:
+	case MHI_CB_PENDING_DATA:
+	case MHI_CB_SYS_ERROR:
+	case MHI_CB_FATAL_ERROR:
+	case MHI_CB_EE_MISSION_MODE:
+	case MHI_CB_DTR_SIGNAL:
+		IPA_MPM_ERR("unexpected event %d\n", mhi_cb);
+		break;
+	}
+}
+
+static void ipa_mpm_mhip_map_prot(enum ipa_usb_teth_prot prot,
+	enum ipa_mpm_mhip_client_type *mhip_client)
+{
+	switch (prot) {
+	case IPA_USB_RNDIS:
+		*mhip_client = IPA_MPM_MHIP_TETH;
+		break;
+	case IPA_USB_RMNET:
+		*mhip_client = IPA_MPM_MHIP_USB_RMNET;
+		break;
+	case IPA_USB_DIAG:
+		*mhip_client = IPA_MPM_MHIP_USB_DPL;
+		break;
+	default:
+		*mhip_client = IPA_MPM_MHIP_NONE;
+		break;
+	}
+	IPA_MPM_DBG("Mapped xdci prot %d -> MHIP prot %d\n", prot,
+		*mhip_client);
+}
+
+int ipa_mpm_mhip_xdci_pipe_enable(enum ipa_usb_teth_prot xdci_teth_prot)
+{
+	int probe_id = IPA_MPM_MHIP_CH_ID_MAX;
+	int i;
+	enum ipa_mpm_mhip_client_type mhip_client;
+	enum mhip_status_type status;
+	int pipe_idx;
+	bool is_acted = true;
+	int ret = 0;
+
+	if (ipa_mpm_ctx == NULL) {
+		IPA_MPM_ERR("MPM not platform probed yet, returning ..\n");
+		return 0;
+	}
+
+	ipa_mpm_mhip_map_prot(xdci_teth_prot, &mhip_client);
+
+	for (i = 0; i < IPA_MPM_MHIP_CH_ID_MAX; i++) {
+		if (ipa_mpm_pipes[i].mhip_client == mhip_client) {
+			probe_id = i;
+			break;
+		}
+	}
+
+	if (probe_id == IPA_MPM_MHIP_CH_ID_MAX) {
+		IPA_MPM_ERR("Unknown probe_id\n");
+		return 0;
+	}
+
+	IPA_MPM_DBG("Connect xdci prot %d -> mhip_client = %d probe_id = %d\n",
+			xdci_teth_prot, mhip_client, probe_id);
+
+	ipa_mpm_ctx->md[probe_id].mhip_client = mhip_client;
+
+	ret = ipa_mpm_vote_unvote_pcie_clk(CLK_ON, probe_id,
+		false, &is_acted);
+	if (ret) {
+		IPA_MPM_ERR("Error cloking on PCIe clk, err = %d\n", ret);
+			return ret;
+	}
+
+	/*
+	 * Make sure to start Device side channels before
+	 * starting Host side UL channels. This is to make
+	 * sure device side access host side IPA only when
+	 * Host IPA gets voted.
+	 */
+	ret = ipa_mpm_start_stop_remote_mhip_chan(probe_id,
+						MPM_MHIP_START, false);
+	if (ret) {
+		/*
+		 * This can fail only when modem is in SSR state.
+		 * Eventually there would be a remove callback,
+		 * so return a failure. Dont have to unvote PCIE here.
+		 */
+		IPA_MPM_ERR("MHIP remote chan start fail = %d\n",
+				ret);
+		return ret;
+	}
+
+	IPA_MPM_DBG("MHIP remote channel start success\n");
+
+	switch (mhip_client) {
+	case IPA_MPM_MHIP_USB_RMNET:
+		ipa_mpm_set_dma_mode(IPA_CLIENT_USB_PROD,
+			IPA_CLIENT_MHI_PRIME_RMNET_CONS, false);
+		break;
+	case IPA_MPM_MHIP_USB_DPL:
+		IPA_MPM_DBG("connecting DPL prot %d\n", mhip_client);
+		ipa_mpm_change_teth_state(probe_id, IPA_MPM_TETH_CONNECTED);
+		atomic_set(&ipa_mpm_ctx->adpl_over_usb_available, 1);
+		return 0;
+	default:
+		IPA_MPM_DBG("mhip_client = %d not processed\n", mhip_client);
+		if (is_acted) {
+			ret = ipa_mpm_vote_unvote_pcie_clk(CLK_OFF, probe_id,
+				false, &is_acted);
+			if (ret) {
+				IPA_MPM_ERR("Err unvoting PCIe clk, err = %d\n",
+					ret);
+				return ret;
+			}
+		}
+		return 0;
+	}
+
+	if (mhip_client != IPA_MPM_MHIP_USB_DPL)
+		/* Start UL MHIP channel for offloading teth connection */
+		status = ipa_mpm_start_stop_mhip_chan(IPA_MPM_MHIP_CHAN_UL,
+							probe_id,
+							MPM_MHIP_START);
+	switch (status) {
+	case MHIP_STATUS_SUCCESS:
+	case MHIP_STATUS_NO_OP:
+		ipa_mpm_change_teth_state(probe_id, IPA_MPM_TETH_CONNECTED);
+
+		pipe_idx = ipa3_get_ep_mapping(IPA_CLIENT_USB_PROD);
+
+		/* Lift the delay for rmnet USB prod pipe */
+		ipa3_xdci_ep_delay_rm(pipe_idx);
+		if (status == MHIP_STATUS_NO_OP && is_acted) {
+			/* Channels already have been started,
+			 * we can devote for pcie clocks
+			 */
+			ipa_mpm_vote_unvote_pcie_clk(CLK_OFF, probe_id,
+				false, &is_acted);
+		}
+		break;
+	case MHIP_STATUS_EP_NOT_READY:
+		if (is_acted)
+			ipa_mpm_vote_unvote_pcie_clk(CLK_OFF, probe_id,
+				false, &is_acted);
+		ipa_mpm_change_teth_state(probe_id, IPA_MPM_TETH_INPROGRESS);
+		break;
+	case MHIP_STATUS_FAIL:
+	case MHIP_STATUS_BAD_STATE:
+	case MHIP_STATUS_EP_NOT_FOUND:
+		IPA_MPM_ERR("UL chan cant be started err =%d\n", status);
+		if (is_acted)
+			ipa_mpm_vote_unvote_pcie_clk(CLK_OFF, probe_id,
+				false, &is_acted);
+		ret = -EFAULT;
+		break;
+	default:
+		if (is_acted)
+			ipa_mpm_vote_unvote_pcie_clk(CLK_OFF, probe_id,
+				false, &is_acted);
+		IPA_MPM_ERR("Err not found\n");
+		break;
+	}
+	return ret;
+}
+
+int ipa_mpm_mhip_xdci_pipe_disable(enum ipa_usb_teth_prot xdci_teth_prot)
+{
+	int probe_id = IPA_MPM_MHIP_CH_ID_MAX;
+	int i;
+	enum ipa_mpm_mhip_client_type mhip_client;
+	enum mhip_status_type status;
+	int ret = 0;
+	bool is_acted = true;
+
+	if (ipa_mpm_ctx == NULL) {
+		IPA_MPM_ERR("MPM not platform probed, returning ..\n");
+		return 0;
+	}
+
+	ipa_mpm_mhip_map_prot(xdci_teth_prot, &mhip_client);
+
+	for (i = 0; i < IPA_MPM_MHIP_CH_ID_MAX; i++) {
+		if (ipa_mpm_pipes[i].mhip_client == mhip_client) {
+			probe_id = i;
+			break;
+		}
+	}
+
+	if (probe_id == IPA_MPM_MHIP_CH_ID_MAX) {
+		IPA_MPM_ERR("Invalid probe_id\n");
+		return 0;
+	}
+
+	IPA_MPM_ERR("xdci disconnect prot %d mhip_client = %d probe_id = %d\n",
+			xdci_teth_prot, mhip_client, probe_id);
+	/*
+	 * Make sure to stop Device side channels before
+	 * stopping Host side UL channels. This is to make
+	 * sure device side doesn't access host side IPA if
+	 * Host IPA gets unvoted.
+	 */
+	ret = ipa_mpm_start_stop_remote_mhip_chan(probe_id,
+						MPM_MHIP_STOP, false);
+	if (ret) {
+		/*
+		 * This can fail only when modem is in SSR state.
+		 * Eventually there would be a remove callback,
+		 * so return a failure.
+		 */
+		IPA_MPM_ERR("MHIP remote chan stop fail = %d\n", ret);
+		return ret;
+	}
+
+	IPA_MPM_DBG("MHIP remote channels are stopped\n");
+
+	switch (mhip_client) {
+	case IPA_MPM_MHIP_USB_RMNET:
+		ret = ipa_mpm_set_dma_mode(IPA_CLIENT_USB_PROD,
+			IPA_CLIENT_APPS_LAN_CONS, true);
+		if (ret) {
+			IPA_MPM_ERR("failed to reset dma mode\n");
+			return ret;
+		}
+		break;
+	case IPA_MPM_MHIP_TETH:
+		IPA_MPM_DBG("Rndis Disconnect, wait for wan_state ioctl\n");
+		return 0;
+	case IPA_MPM_MHIP_USB_DPL:
+		IPA_MPM_DBG("Teth Disconnecting for DPL\n");
+
+		/* change teth state only if ODL is disconnected */
+		if (!ipa3_is_odl_connected()) {
+			ipa_mpm_change_teth_state(probe_id, IPA_MPM_TETH_INIT);
+			ipa_mpm_ctx->md[probe_id].mhip_client =
+				IPA_MPM_MHIP_NONE;
+		}
+		ret = ipa_mpm_vote_unvote_pcie_clk(CLK_OFF, probe_id,
+			false, &is_acted);
+		if (ret)
+			IPA_MPM_ERR("Error clking off PCIe clk err%d\n", ret);
+		atomic_set(&ipa_mpm_ctx->adpl_over_usb_available, 0);
+		return ret;
+	default:
+		IPA_MPM_ERR("mhip_client = %d not supported\n", mhip_client);
+		return 0;
+	}
+
+	status = ipa_mpm_start_stop_mhip_chan(IPA_MPM_MHIP_CHAN_UL,
+		probe_id, MPM_MHIP_STOP);
+
+	switch (status) {
+	case MHIP_STATUS_SUCCESS:
+	case MHIP_STATUS_NO_OP:
+	case MHIP_STATUS_EP_NOT_READY:
+		ipa_mpm_change_teth_state(probe_id, IPA_MPM_TETH_INIT);
+		break;
+	case MHIP_STATUS_FAIL:
+	case MHIP_STATUS_BAD_STATE:
+	case MHIP_STATUS_EP_NOT_FOUND:
+		IPA_MPM_ERR("UL chan cant be started err =%d\n", status);
+		ipa_mpm_vote_unvote_pcie_clk(CLK_OFF, probe_id,
+			false, &is_acted);
+		return -EFAULT;
+	default:
+		IPA_MPM_ERR("Err not found\n");
+		break;
+	}
+
+	ret = ipa_mpm_vote_unvote_pcie_clk(CLK_OFF, probe_id,
+		false, &is_acted);
+
+	if (ret) {
+		IPA_MPM_ERR("Error cloking off PCIe clk, err = %d\n", ret);
+		return ret;
+	}
+
+	ipa_mpm_ctx->md[probe_id].mhip_client = IPA_MPM_MHIP_NONE;
+
+	return ret;
+}
+
+static int ipa_mpm_populate_smmu_info(struct platform_device *pdev)
+{
+	struct ipa_smmu_in_params smmu_in;
+	struct ipa_smmu_out_params smmu_out;
+	u32 carved_iova_ap_mapping[2];
+	struct ipa_smmu_cb_ctx *cb;
+	struct ipa_smmu_cb_ctx *ap_cb = ipa3_get_smmu_ctx(IPA_SMMU_CB_AP);
+	int ret = 0;
+
+	if (ipa_mpm_ctx->carved_smmu_cb.valid) {
+		IPA_MPM_DBG("SMMU Context allocated, returning ..\n");
+		return ret;
+	}
+
+	cb = &ipa_mpm_ctx->carved_smmu_cb;
+
+	/* get IPA SMMU enabled status */
+	smmu_in.smmu_client = IPA_SMMU_AP_CLIENT;
+	if (ipa_get_smmu_params(&smmu_in, &smmu_out))
+		ipa_mpm_ctx->dev_info.ipa_smmu_enabled = false;
+	else
+		ipa_mpm_ctx->dev_info.ipa_smmu_enabled =
+		smmu_out.smmu_enable;
+
+	/* get cache_coherent enable or not */
+	ipa_mpm_ctx->dev_info.is_cache_coherent = ap_cb->is_cache_coherent;
+	if (of_property_read_u32_array(pdev->dev.of_node, "qcom,iova-mapping",
+		carved_iova_ap_mapping, 2)) {
+		IPA_MPM_ERR("failed to read of_node %s\n",
+			"qcom,mpm-iova-mapping");
+		return -EINVAL;
+	}
+	ipa_mpm_ctx->dev_info.pcie_smmu_enabled = true;
+
+	if (ipa_mpm_ctx->dev_info.ipa_smmu_enabled !=
+		ipa_mpm_ctx->dev_info.pcie_smmu_enabled) {
+		IPA_MPM_DBG("PCIE/IPA SMMU config mismatch\n");
+		return -EINVAL;
+	}
+
+	cb->va_start = carved_iova_ap_mapping[0];
+	cb->va_size = carved_iova_ap_mapping[1];
+	cb->va_end = cb->va_start + cb->va_size;
+
+	if (cb->va_end >= ap_cb->va_start) {
+		IPA_MPM_ERR("MPM iommu and AP overlap addr 0x%lx\n",
+				cb->va_start);
+		ipa_assert();
+		return -EFAULT;
+	}
+
+	cb->dev = ipa_mpm_ctx->dev_info.dev;
+	cb->valid = true;
+	cb->next_addr = cb->va_start;
+
+	if (dma_set_mask_and_coherent(ipa_mpm_ctx->dev_info.dev,
+		DMA_BIT_MASK(64))) {
+		IPA_MPM_ERR("setting DMA mask to 64 failed.\n");
+		return -EINVAL;
+	}
+
+	return ret;
+}
+
+static int ipa_mpm_probe(struct platform_device *pdev)
+{
+	int ret = 0;
+	int i = 0;
+	int idx = 0;
+
+	IPA_MPM_FUNC_ENTRY();
+
+	if (ipa_mpm_ctx) {
+		IPA_MPM_DBG("MPM is already probed, returning\n");
+		return 0;
+	}
+
+	ret = ipa_register_ipa_ready_cb(ipa_mpm_ipa3_ready_cb, (void *)pdev);
+	/*
+	 * If we received -EEXIST, IPA has initialized. So we need
+	 * to continue the probing process.
+	 */
+	if (!ret) {
+		IPA_MPM_DBG("IPA not ready yet, registering callback\n");
+		return ret;
+	}
+	IPA_MPM_DBG("IPA is ready, continue with probe\n");
+
+	ipa_mpm_ctx = kzalloc(sizeof(*ipa_mpm_ctx), GFP_KERNEL);
+
+	if (!ipa_mpm_ctx)
+		return -ENOMEM;
+
+	for (i = 0; i < IPA_MPM_MHIP_CH_ID_MAX; i++) {
+		mutex_init(&ipa_mpm_ctx->md[i].mutex);
+		mutex_init(&ipa_mpm_ctx->md[i].mhi_mutex);
+	}
+
+	ipa_mpm_ctx->dev_info.pdev = pdev;
+	ipa_mpm_ctx->dev_info.dev = &pdev->dev;
+
+	ipa_mpm_init_mhip_channel_info();
+
+	if (of_property_read_u32(pdev->dev.of_node, "qcom,mhi-chdb-base",
+		&ipa_mpm_ctx->dev_info.chdb_base)) {
+		IPA_MPM_ERR("failed to read qcom,mhi-chdb-base\n");
+		goto fail_probe;
+	}
+	IPA_MPM_DBG("chdb-base=0x%x\n", ipa_mpm_ctx->dev_info.chdb_base);
+
+	if (of_property_read_u32(pdev->dev.of_node, "qcom,mhi-erdb-base",
+		&ipa_mpm_ctx->dev_info.erdb_base)) {
+		IPA_MPM_ERR("failed to read qcom,mhi-erdb-base\n");
+		goto fail_probe;
+	}
+	IPA_MPM_DBG("erdb-base=0x%x\n", ipa_mpm_ctx->dev_info.erdb_base);
+
+	ret = ipa_mpm_populate_smmu_info(pdev);
+
+	if (ret) {
+		IPA_MPM_DBG("SMMU Config failed\n");
+		goto fail_probe;
+	}
+
+	atomic_set(&ipa_mpm_ctx->ipa_clk_total_cnt, 0);
+	atomic_set(&ipa_mpm_ctx->pcie_clk_total_cnt, 0);
+	atomic_set(&ipa_mpm_ctx->flow_ctrl_mask, 0);
+
+	for (idx = 0; idx < IPA_MPM_MHIP_CH_ID_MAX; idx++) {
+		ipa_mpm_ctx->md[idx].ul_prod.gsi_state = GSI_INIT;
+		ipa_mpm_ctx->md[idx].dl_cons.gsi_state = GSI_INIT;
+		atomic_set(&ipa_mpm_ctx->md[idx].clk_cnt.ipa_clk_cnt, 0);
+		atomic_set(&ipa_mpm_ctx->md[idx].clk_cnt.pcie_clk_cnt, 0);
+	}
+
+	ret = mhi_driver_register(&mhi_driver);
+	if (ret) {
+		IPA_MPM_ERR("mhi_driver_register failed %d\n", ret);
+		goto fail_probe;
+	}
+	IPA_MPM_FUNC_EXIT();
+	return 0;
+
+fail_probe:
+	kfree(ipa_mpm_ctx);
+	ipa_mpm_ctx = NULL;
+	return -EFAULT;
+}
+
+static int ipa_mpm_remove(struct platform_device *pdev)
+{
+	IPA_MPM_FUNC_ENTRY();
+
+	mhi_driver_unregister(&mhi_driver);
+	IPA_MPM_FUNC_EXIT();
+	return 0;
+}
+
+static const struct of_device_id ipa_mpm_dt_match[] = {
+	{ .compatible = "qcom,ipa-mpm" },
+	{},
+};
+MODULE_DEVICE_TABLE(of, ipa_mpm_dt_match);
+
+static struct platform_driver ipa_ipa_mpm_driver = {
+	.driver = {
+		.name = "ipa_mpm",
+		.of_match_table = ipa_mpm_dt_match,
+	},
+	.probe = ipa_mpm_probe,
+	.remove = ipa_mpm_remove,
+};
+
+/**
+ * ipa_mpm_init() - Registers ipa_mpm as a platform device for a APQ
+ *
+ * This function is called after bootup for APQ device.
+ * ipa_mpm will register itself as a platform device, and probe
+ * function will get called.
+ *
+ * Return: None
+ */
+static int __init ipa_mpm_init(void)
+{
+	IPA_MPM_DBG("register ipa_mpm platform device\n");
+	return platform_driver_register(&ipa_ipa_mpm_driver);
+}
+
+static void __exit ipa_mpm_exit(void)
+{
+	IPA_MPM_DBG("unregister ipa_mpm platform device\n");
+	platform_driver_unregister(&ipa_ipa_mpm_driver);
+}
+
+/**
+ * ipa3_is_mhip_offload_enabled() - check if IPA MPM module was initialized
+ * successfully. If it is initialized, MHIP is enabled for teth
+ *
+ * Return value: 1 for yes; 0 for no
+ */
+int ipa3_is_mhip_offload_enabled(void)
+{
+	if (ipa_mpm_ctx == NULL)
+		return 0;
+	else
+		return 1;
+}
+
+int ipa_mpm_panic_handler(char *buf, int size)
+{
+	int i;
+	int cnt = 0;
+
+	cnt = scnprintf(buf, size,
+			"\n---- MHIP Active Clients Table ----\n");
+	cnt += scnprintf(buf + cnt, size - cnt,
+			"Total PCIe active clients count: %d\n",
+			atomic_read(&ipa_mpm_ctx->pcie_clk_total_cnt));
+	cnt += scnprintf(buf + cnt, size - cnt,
+			"Total IPA active clients count: %d\n",
+			atomic_read(&ipa_mpm_ctx->ipa_clk_total_cnt));
+
+	for (i = 0; i < IPA_MPM_MHIP_CH_ID_MAX; i++) {
+		cnt += scnprintf(buf + cnt, size - cnt,
+			"client id: %d ipa vote cnt: %d pcie vote cnt\n", i,
+			atomic_read(&ipa_mpm_ctx->md[i].clk_cnt.ipa_clk_cnt),
+			atomic_read(&ipa_mpm_ctx->md[i].clk_cnt.pcie_clk_cnt));
+	}
+	return cnt;
+}
+
+/**
+ * ipa3_get_mhip_gsi_stats() - Query MHIP gsi stats from uc
+ * @stats:	[inout] stats blob from client populated by driver
+ *
+ * Returns:	0 on success, negative on failure
+ *
+ * @note Cannot be called from atomic context
+ *
+ */
+int ipa3_get_mhip_gsi_stats(struct ipa_uc_dbg_ring_stats *stats)
+{
+	int i;
+
+	if (!ipa3_ctx->mhip_ctx.dbg_stats.uc_dbg_stats_mmio) {
+		IPAERR("bad parms NULL mhip_gsi_stats_mmio\n");
+		return -EINVAL;
+	}
+	IPA_ACTIVE_CLIENTS_INC_SIMPLE();
+	for (i = 0; i < MAX_MHIP_CHANNELS; i++) {
+		stats->ring[i].ringFull = ioread32(
+			ipa3_ctx->mhip_ctx.dbg_stats.uc_dbg_stats_mmio
+			+ i * IPA3_UC_DEBUG_STATS_OFF +
+			IPA3_UC_DEBUG_STATS_RINGFULL_OFF);
+		stats->ring[i].ringEmpty = ioread32(
+			ipa3_ctx->mhip_ctx.dbg_stats.uc_dbg_stats_mmio
+			+ i * IPA3_UC_DEBUG_STATS_OFF +
+			IPA3_UC_DEBUG_STATS_RINGEMPTY_OFF);
+		stats->ring[i].ringUsageHigh = ioread32(
+			ipa3_ctx->mhip_ctx.dbg_stats.uc_dbg_stats_mmio
+			+ i * IPA3_UC_DEBUG_STATS_OFF +
+			IPA3_UC_DEBUG_STATS_RINGUSAGEHIGH_OFF);
+		stats->ring[i].ringUsageLow = ioread32(
+			ipa3_ctx->mhip_ctx.dbg_stats.uc_dbg_stats_mmio
+			+ i * IPA3_UC_DEBUG_STATS_OFF +
+			IPA3_UC_DEBUG_STATS_RINGUSAGELOW_OFF);
+		stats->ring[i].RingUtilCount = ioread32(
+			ipa3_ctx->mhip_ctx.dbg_stats.uc_dbg_stats_mmio
+			+ i * IPA3_UC_DEBUG_STATS_OFF +
+			IPA3_UC_DEBUG_STATS_RINGUTILCOUNT_OFF);
+	}
+	IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
+
+
+	return 0;
+}
+
+/**
+ * ipa3_mpm_enable_adpl_over_odl() - Enable or disable ADPL over ODL
+ * @enable:	true for enable, false for disable
+ *
+ * Returns:	0 on success, negative on failure
+ *
+ */
+int ipa3_mpm_enable_adpl_over_odl(bool enable)
+{
+	int ret;
+	bool is_acted = true;
+
+	IPA_MPM_FUNC_ENTRY();
+
+	if (!ipa3_is_mhip_offload_enabled()) {
+		IPA_MPM_ERR("mpm ctx is NULL\n");
+		return -EPERM;
+	}
+
+	if (enable) {
+		/* inc clk count and set DMA to ODL */
+		IPA_MPM_DBG("mpm enabling ADPL over ODL\n");
+
+		ret = ipa_mpm_vote_unvote_pcie_clk(CLK_ON,
+			IPA_MPM_MHIP_CH_ID_2, false, &is_acted);
+		if (ret) {
+			IPA_MPM_ERR("Err %d cloking on PCIe clk\n", ret);
+				return ret;
+		}
+
+		ret = ipa_mpm_set_dma_mode(IPA_CLIENT_MHI_PRIME_DPL_PROD,
+			IPA_CLIENT_ODL_DPL_CONS, false);
+		if (ret) {
+			IPA_MPM_ERR("MPM failed to set dma mode to ODL\n");
+			if (is_acted)
+				ipa_mpm_vote_unvote_pcie_clk(CLK_OFF,
+					IPA_MPM_MHIP_CH_ID_2,
+					false,
+					&is_acted);
+			return ret;
+		}
+
+		ipa_mpm_change_teth_state(IPA_MPM_MHIP_CH_ID_2,
+			IPA_MPM_TETH_CONNECTED);
+	} else {
+		/* dec clk count and set DMA to USB */
+		IPA_MPM_DBG("mpm disabling ADPL over ODL\n");
+		ret = ipa_mpm_vote_unvote_pcie_clk(CLK_OFF,
+						IPA_MPM_MHIP_CH_ID_2,
+						false,
+						&is_acted);
+		if (ret) {
+			IPA_MPM_ERR("Err %d cloking off PCIe clk\n",
+				ret);
+			return ret;
+		}
+
+		ret = ipa_mpm_set_dma_mode(IPA_CLIENT_MHI_PRIME_DPL_PROD,
+			IPA_CLIENT_USB_DPL_CONS, false);
+		if (ret) {
+			IPA_MPM_ERR("MPM failed to set dma mode to USB\n");
+			if (ipa_mpm_vote_unvote_pcie_clk(CLK_ON,
+							IPA_MPM_MHIP_CH_ID_2,
+							false,
+							&is_acted))
+				IPA_MPM_ERR("Err clocking on pcie\n");
+			return ret;
+		}
+
+		/* If USB is not available then reset teth state */
+		if (atomic_read(&ipa_mpm_ctx->adpl_over_usb_available)) {
+			IPA_MPM_DBG("mpm enabling ADPL over USB\n");
+		} else {
+			ipa_mpm_change_teth_state(IPA_MPM_MHIP_CH_ID_2,
+				IPA_MPM_TETH_INIT);
+			IPA_MPM_DBG("USB disconnected. ADPL on standby\n");
+		}
+	}
+
+	IPA_MPM_FUNC_EXIT();
+	return ret;
+}
+
+late_initcall(ipa_mpm_init);
+module_exit(ipa_mpm_exit);
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("MHI Proxy Manager Driver");

+ 2447 - 0
ipa/ipa_v3/ipa_nat.c

@@ -0,0 +1,2447 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2012-2019, The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/device.h>
+#include <linux/fs.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/uaccess.h>
+#include <linux/dma-noncoherent.h>
+#include "ipa_i.h"
+#include "ipahal/ipahal.h"
+#include "ipahal/ipahal_nat.h"
+
+
+/*
+ * The following for adding code (ie. for EMULATION) not found on x86.
+ */
+#if defined(CONFIG_IPA_EMULATION)
+# include "ipa_emulation_stubs.h"
+#endif
+
+#define IPA_NAT_PHYS_MEM_OFFSET IPA_MEM_PART(nat_tbl_ofst)
+#define IPA_NAT_PHYS_MEM_SIZE  IPA_RAM_NAT_SIZE
+
+#define IPA_IPV6CT_PHYS_MEM_OFFSET  0
+#define IPA_IPV6CT_PHYS_MEM_SIZE  IPA_RAM_IPV6CT_SIZE
+
+#define IPA_NAT_IPV6CT_TEMP_MEM_SIZE 128
+
+#define IPA_NAT_MAX_NUM_OF_INIT_CMD_DESC 4
+#define IPA_IPV6CT_MAX_NUM_OF_INIT_CMD_DESC 3
+#define IPA_MAX_NUM_OF_TABLE_DMA_CMD_DESC 5
+
+/*
+ * The base table max entries is limited by index into table 13 bits number.
+ * Limit the memory size required by user to prevent kernel memory starvation
+ */
+#define IPA_TABLE_MAX_ENTRIES 8192
+#define MAX_ALLOC_NAT_SIZE(size) (IPA_TABLE_MAX_ENTRIES * size)
+
+#define IPA_VALID_TBL_INDEX(ti) \
+	((ti) == 0)
+
+enum ipa_nat_ipv6ct_table_type {
+	IPA_NAT_BASE_TBL = 0,
+	IPA_NAT_EXPN_TBL = 1,
+	IPA_NAT_INDX_TBL = 2,
+	IPA_NAT_INDEX_EXPN_TBL = 3,
+	IPA_IPV6CT_BASE_TBL = 4,
+	IPA_IPV6CT_EXPN_TBL = 5
+};
+
+static bool sram_compatible;
+
+static vm_fault_t ipa3_nat_ipv6ct_vma_fault_remap(struct vm_fault *vmf)
+{
+	vmf->page = NULL;
+
+	IPADBG("\n");
+	return VM_FAULT_SIGBUS;
+}
+
+/* VMA related file operations functions */
+static const struct vm_operations_struct ipa3_nat_ipv6ct_remap_vm_ops = {
+	.fault = ipa3_nat_ipv6ct_vma_fault_remap,
+};
+
+
+static inline const char *ipa3_nat_mem_in_as_str(
+	enum ipa3_nat_mem_in nmi)
+{
+	switch (nmi) {
+	case IPA_NAT_MEM_IN_DDR:
+		return "IPA_NAT_MEM_IN_DDR";
+	case IPA_NAT_MEM_IN_SRAM:
+		return "IPA_NAT_MEM_IN_SRAM";
+	default:
+		break;
+	}
+	return "INVALID_MEM_TYPE";
+}
+
+static inline char *ipa_ioc_v4_nat_init_as_str(
+	struct ipa_ioc_v4_nat_init *ptr,
+	char                       *buf,
+	uint32_t                    buf_sz)
+{
+	if (ptr && buf && buf_sz) {
+		snprintf(
+			buf, buf_sz,
+			"V4 NAT INIT: tbl_index(0x%02X) ipv4_rules_offset(0x%08X) expn_rules_offset(0x%08X) index_offset(0x%08X) index_expn_offset(0x%08X) table_entries(0x%04X) expn_table_entries(0x%04X) ip_addr(0x%08X)",
+			ptr->tbl_index,
+			ptr->ipv4_rules_offset,
+			ptr->expn_rules_offset,
+			ptr->index_offset,
+			ptr->index_expn_offset,
+			ptr->table_entries,
+			ptr->expn_table_entries,
+			ptr->ip_addr);
+	}
+	return buf;
+}
+
+static int ipa3_nat_ipv6ct_open(struct inode *inode, struct file *filp)
+{
+	struct ipa3_nat_ipv6ct_common_mem *dev;
+
+	IPADBG("\n");
+	dev = container_of(inode->i_cdev,
+		struct ipa3_nat_ipv6ct_common_mem, cdev);
+	filp->private_data = dev;
+	IPADBG("return\n");
+
+	return 0;
+}
+
+static int ipa3_nat_ipv6ct_mmap(
+	struct file *filp,
+	struct vm_area_struct *vma)
+{
+	struct ipa3_nat_ipv6ct_common_mem *dev =
+		(struct ipa3_nat_ipv6ct_common_mem *)filp->private_data;
+	unsigned long vsize = vma->vm_end - vma->vm_start;
+	struct ipa_smmu_cb_ctx *cb = ipa3_get_smmu_ctx(IPA_SMMU_CB_AP);
+
+	struct ipa3_nat_mem          *nm_ptr = (struct ipa3_nat_mem *) dev;
+	struct ipa3_nat_mem_loc_data *mld_ptr;
+	enum ipa3_nat_mem_in          nmi;
+
+	int result = 0;
+
+	nmi = nm_ptr->last_alloc_loc;
+
+	IPADBG("In\n");
+
+	if (!IPA_VALID_NAT_MEM_IN(nmi)) {
+		IPAERR_RL("Bad ipa3_nat_mem_in type\n");
+		result = -EPERM;
+		goto bail;
+	}
+
+	mld_ptr = &nm_ptr->mem_loc[nmi];
+
+	if (!dev->is_dev_init) {
+		IPAERR("Attempt to mmap %s before dev init\n",
+			   dev->name);
+		result = -EPERM;
+		goto bail;
+	}
+
+	mutex_lock(&dev->lock);
+
+	if (!mld_ptr->vaddr) {
+		IPAERR_RL("Attempt to mmap %s before the memory allocation\n",
+				  dev->name);
+		result = -EPERM;
+		goto unlock;
+	}
+
+	if (mld_ptr->is_mapped) {
+		IPAERR("%s already mapped, only 1 mapping supported\n",
+			   dev->name);
+		result = -EINVAL;
+		goto unlock;
+	}
+
+	if (nmi == IPA_NAT_MEM_IN_SRAM) {
+		if (dev->phys_mem_size == 0 || dev->phys_mem_size > vsize) {
+			IPAERR_RL("%s err vsize(0x%X) phys_mem_size(0x%X)\n",
+			  dev->name, vsize, dev->phys_mem_size);
+			result = -EINVAL;
+			goto unlock;
+		}
+	}
+
+	/*
+	 * Check if no smmu or non dma coherent
+	 */
+	if (!cb->valid || !dev_is_dma_coherent(cb->dev)) {
+
+		IPADBG("Either smmu valid=%u and/or DMA coherent=%u false\n",
+			   cb->valid, !dev_is_dma_coherent(cb->dev));
+
+		vma->vm_page_prot =
+			pgprot_noncached(vma->vm_page_prot);
+	}
+
+	mld_ptr->base_address = NULL;
+
+	IPADBG("Mapping %s\n", ipa3_nat_mem_in_as_str(nmi));
+
+	if (nmi == IPA_NAT_MEM_IN_DDR) {
+
+		IPADBG("map sz=0x%zx into vma size=0x%08x\n",
+				  mld_ptr->table_alloc_size,
+				  vsize);
+
+		result =
+			dma_mmap_coherent(
+				ipa3_ctx->pdev,
+				vma,
+				mld_ptr->vaddr,
+				mld_ptr->dma_handle,
+				mld_ptr->table_alloc_size);
+
+		if (result) {
+			IPAERR("dma_mmap_coherent failed. Err:%d\n", result);
+			goto unlock;
+		}
+
+		mld_ptr->base_address = mld_ptr->vaddr;
+	} else {
+		if (nmi == IPA_NAT_MEM_IN_SRAM) {
+
+			IPADBG("map phys_mem_size(0x%08X) -> vma sz(0x%08X)\n",
+				   dev->phys_mem_size, vsize);
+
+			vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
+
+			result = vm_iomap_memory(
+				vma, mld_ptr->phys_addr, dev->phys_mem_size);
+
+			if (result) {
+				IPAERR("vm_iomap_memory failed. Err:%d\n",
+					   result);
+				goto unlock;
+			}
+
+			mld_ptr->base_address = mld_ptr->vaddr;
+		}
+	}
+
+	mld_ptr->is_mapped = true;
+
+	vma->vm_ops = &ipa3_nat_ipv6ct_remap_vm_ops;
+
+unlock:
+	mutex_unlock(&dev->lock);
+
+bail:
+	IPADBG("Out\n");
+
+	return result;
+}
+
+static const struct file_operations ipa3_nat_ipv6ct_fops = {
+	.owner = THIS_MODULE,
+	.open = ipa3_nat_ipv6ct_open,
+	.mmap = ipa3_nat_ipv6ct_mmap
+};
+
+/**
+ * ipa3_allocate_nat_ipv6ct_tmp_memory() - Allocates the NAT\IPv6CT temp memory
+ */
+static struct ipa3_nat_ipv6ct_tmp_mem *ipa3_nat_ipv6ct_allocate_tmp_memory(void)
+{
+	struct ipa3_nat_ipv6ct_tmp_mem *tmp_mem;
+	gfp_t gfp_flags = GFP_KERNEL | __GFP_ZERO;
+
+	IPADBG("\n");
+
+	tmp_mem = kzalloc(sizeof(*tmp_mem), GFP_KERNEL);
+	if (tmp_mem == NULL)
+		return NULL;
+
+	tmp_mem->vaddr =
+		dma_alloc_coherent(ipa3_ctx->pdev, IPA_NAT_IPV6CT_TEMP_MEM_SIZE,
+			&tmp_mem->dma_handle, gfp_flags);
+	if (tmp_mem->vaddr == NULL)
+		goto bail_tmp_mem;
+
+	IPADBG("IPA successfully allocated temp memory\n");
+	return tmp_mem;
+
+bail_tmp_mem:
+	kfree(tmp_mem);
+	return NULL;
+}
+
+static int ipa3_nat_ipv6ct_init_device(
+	struct ipa3_nat_ipv6ct_common_mem *dev,
+	const char                        *name,
+	u32                                phys_mem_size,
+	u32                                phys_mem_ofst,
+	struct ipa3_nat_ipv6ct_tmp_mem    *tmp_mem)
+{
+	int result = 0;
+
+	IPADBG("In: Init of %s\n", name);
+
+	mutex_init(&dev->lock);
+
+	dev->is_nat_mem    = IS_NAT_MEM_DEV(dev);
+	dev->is_ipv6ct_mem = IS_IPV6CT_MEM_DEV(dev);
+
+	if (strnlen(name, IPA_DEV_NAME_MAX_LEN) == IPA_DEV_NAME_MAX_LEN) {
+		IPAERR("device name is too long\n");
+		result = -ENODEV;
+		goto bail;
+	}
+
+	strlcpy(dev->name, name, IPA_DEV_NAME_MAX_LEN);
+
+	dev->class = class_create(THIS_MODULE, name);
+
+	if (IS_ERR(dev->class)) {
+		IPAERR("unable to create the class for %s\n", name);
+		result = -ENODEV;
+		goto bail;
+	}
+
+	result = alloc_chrdev_region(&dev->dev_num, 0, 1, name);
+
+	if (result) {
+		IPAERR("alloc_chrdev_region err. for %s\n", name);
+		result = -ENODEV;
+		goto alloc_chrdev_region_fail;
+	}
+
+	dev->dev = device_create(dev->class, NULL, dev->dev_num, NULL, name);
+
+	if (IS_ERR(dev->dev)) {
+		IPAERR("device_create err:%ld\n", PTR_ERR(dev->dev));
+		result = -ENODEV;
+		goto device_create_fail;
+	}
+
+	cdev_init(&dev->cdev, &ipa3_nat_ipv6ct_fops);
+
+	dev->cdev.owner = THIS_MODULE;
+
+	mutex_lock(&dev->lock);
+
+	result = cdev_add(&dev->cdev, dev->dev_num, 1);
+
+	if (result) {
+		IPAERR("cdev_add err=%d\n", -result);
+		goto cdev_add_fail;
+	}
+
+	dev->tmp_mem       = tmp_mem;
+	dev->phys_mem_size = phys_mem_size;
+	dev->phys_mem_ofst = phys_mem_ofst;
+	dev->is_dev_init   = true;
+
+	mutex_unlock(&dev->lock);
+
+	IPADBG("ipa dev %s added successfully. major:%d minor:%d\n", name,
+			  MAJOR(dev->dev_num), MINOR(dev->dev_num));
+
+	result = 0;
+
+	goto bail;
+
+cdev_add_fail:
+	mutex_unlock(&dev->lock);
+	device_destroy(dev->class, dev->dev_num);
+
+device_create_fail:
+	unregister_chrdev_region(dev->dev_num, 1);
+
+alloc_chrdev_region_fail:
+	class_destroy(dev->class);
+
+bail:
+	IPADBG("Out\n");
+
+	return result;
+}
+
+static void ipa3_nat_ipv6ct_destroy_device(
+	struct ipa3_nat_ipv6ct_common_mem *dev)
+{
+	IPADBG("In\n");
+
+	mutex_lock(&dev->lock);
+
+	if (dev->tmp_mem) {
+		if (ipa3_ctx->nat_mem.is_tmp_mem_allocated) {
+			dma_free_coherent(
+				ipa3_ctx->pdev,
+				IPA_NAT_IPV6CT_TEMP_MEM_SIZE,
+				dev->tmp_mem->vaddr,
+				dev->tmp_mem->dma_handle);
+			kfree(dev->tmp_mem);
+			dev->tmp_mem = NULL;
+			ipa3_ctx->nat_mem.is_tmp_mem_allocated = false;
+		}
+		dev->tmp_mem = NULL;
+	}
+
+	device_destroy(dev->class, dev->dev_num);
+
+	unregister_chrdev_region(dev->dev_num, 1);
+
+	class_destroy(dev->class);
+
+	dev->is_dev_init = false;
+
+	mutex_unlock(&dev->lock);
+
+	IPADBG("Out\n");
+}
+
+/**
+ * ipa3_nat_ipv6ct_init_devices() - Initialize the NAT and IPv6CT devices
+ *
+ * Called during IPA init to create memory device
+ *
+ * Returns:	0 on success, negative on failure
+ */
+int ipa3_nat_ipv6ct_init_devices(void)
+{
+	struct ipa3_nat_ipv6ct_tmp_mem *tmp_mem;
+	int result;
+
+	IPADBG("\n");
+
+	/*
+	 * Allocate NAT/IPv6CT temporary memory. The memory is never deleted,
+	 * because provided to HW once NAT or IPv6CT table is deleted.
+	 */
+	tmp_mem = ipa3_nat_ipv6ct_allocate_tmp_memory();
+
+	if (tmp_mem == NULL) {
+		IPAERR("unable to allocate tmp_mem\n");
+		return -ENOMEM;
+	}
+	ipa3_ctx->nat_mem.is_tmp_mem_allocated = true;
+
+	if (ipa3_nat_ipv6ct_init_device(
+		&ipa3_ctx->nat_mem.dev,
+		IPA_NAT_DEV_NAME,
+		IPA_NAT_PHYS_MEM_SIZE,
+		IPA_NAT_PHYS_MEM_OFFSET,
+		tmp_mem)) {
+		IPAERR("unable to create nat device\n");
+		result = -ENODEV;
+		goto fail_init_nat_dev;
+	}
+
+	if ((ipa3_ctx->ipa_hw_type >= IPA_HW_v4_0) &&
+		ipa3_nat_ipv6ct_init_device(
+			&ipa3_ctx->ipv6ct_mem.dev,
+			IPA_IPV6CT_DEV_NAME,
+			IPA_IPV6CT_PHYS_MEM_SIZE,
+			IPA_IPV6CT_PHYS_MEM_OFFSET,
+			tmp_mem)) {
+		IPAERR("unable to create IPv6CT device\n");
+		result = -ENODEV;
+		goto fail_init_ipv6ct_dev;
+	}
+
+	return 0;
+
+fail_init_ipv6ct_dev:
+	ipa3_nat_ipv6ct_destroy_device(&ipa3_ctx->nat_mem.dev);
+fail_init_nat_dev:
+	if (tmp_mem != NULL && ipa3_ctx->nat_mem.is_tmp_mem_allocated) {
+		dma_free_coherent(ipa3_ctx->pdev, IPA_NAT_IPV6CT_TEMP_MEM_SIZE,
+			tmp_mem->vaddr, tmp_mem->dma_handle);
+		kfree(tmp_mem);
+		ipa3_ctx->nat_mem.is_tmp_mem_allocated = false;
+	}
+	return result;
+}
+
+/**
+ * ipa3_nat_ipv6ct_destroy_devices() - destroy the NAT and IPv6CT devices
+ *
+ * Called during IPA init to destroy nat device
+ */
+void ipa3_nat_ipv6ct_destroy_devices(void)
+{
+	ipa3_nat_ipv6ct_destroy_device(&ipa3_ctx->nat_mem.dev);
+	if (ipa3_ctx->ipa_hw_type >= IPA_HW_v4_0)
+		ipa3_nat_ipv6ct_destroy_device(&ipa3_ctx->ipv6ct_mem.dev);
+}
+
+static int ipa3_nat_ipv6ct_allocate_mem(
+	struct ipa3_nat_ipv6ct_common_mem *dev,
+	struct ipa_ioc_nat_ipv6ct_table_alloc *table_alloc,
+	enum ipahal_nat_type nat_type)
+{
+	gfp_t gfp_flags = GFP_KERNEL | __GFP_ZERO;
+	size_t nat_entry_size;
+
+	struct ipa3_nat_mem *nm_ptr;
+	struct ipa3_nat_mem_loc_data *mld_ptr;
+	uintptr_t tmp_ptr;
+
+	int    result = 0;
+
+	IPADBG("In: Requested alloc size %zu for %s\n",
+			  table_alloc->size, dev->name);
+
+	if (!table_alloc->size) {
+		IPAERR_RL("Invalid Parameters\n");
+		result = -EPERM;
+		goto bail;
+	}
+
+	if (!dev->is_dev_init) {
+		IPAERR("%s hasn't been initialized\n", dev->name);
+		result = -EPERM;
+		goto bail;
+	}
+
+	if ((dev->is_nat_mem    && nat_type != IPAHAL_NAT_IPV4) ||
+		(dev->is_ipv6ct_mem && nat_type != IPAHAL_NAT_IPV6CT)) {
+		IPAERR("%s dev type(%s) and nat_type(%s) mismatch\n",
+			   dev->name,
+			   (dev->is_nat_mem) ? "V4" : "V6",
+			   ipahal_nat_type_str(nat_type));
+		result = -EPERM;
+		goto bail;
+	}
+
+	ipahal_nat_entry_size(nat_type, &nat_entry_size);
+
+	if (table_alloc->size > MAX_ALLOC_NAT_SIZE(nat_entry_size)) {
+		IPAERR("Trying allocate more size = %zu, Max allowed = %zu\n",
+			   table_alloc->size,
+			   MAX_ALLOC_NAT_SIZE(nat_entry_size));
+		result = -EPERM;
+		goto bail;
+	}
+
+	if (nat_type == IPAHAL_NAT_IPV4) {
+
+		nm_ptr = (struct ipa3_nat_mem *) dev;
+
+		if (table_alloc->size <= IPA_NAT_PHYS_MEM_SIZE) {
+			/*
+			 * CAN fit in SRAM, hence we'll use SRAM...
+			 */
+			IPADBG("V4 NAT will reside in: %s\n",
+				   ipa3_nat_mem_in_as_str(IPA_NAT_MEM_IN_SRAM));
+
+			if (nm_ptr->sram_in_use) {
+				IPAERR("Memory already allocated\n");
+				result = -EPERM;
+				goto bail;
+			}
+
+			mld_ptr = &nm_ptr->mem_loc[IPA_NAT_MEM_IN_SRAM];
+
+			mld_ptr->table_alloc_size = table_alloc->size;
+
+			mld_ptr->phys_addr =
+				ipa3_ctx->ipa_wrapper_base +
+				ipa3_ctx->ctrl->ipa_reg_base_ofst +
+				ipahal_get_reg_n_ofst(
+					IPA_SW_AREA_RAM_DIRECT_ACCESS_n,
+					0) +
+				IPA_NAT_PHYS_MEM_OFFSET;
+
+			mld_ptr->io_vaddr = ioremap(
+				mld_ptr->phys_addr, IPA_NAT_PHYS_MEM_SIZE);
+
+			if (mld_ptr->io_vaddr == NULL) {
+				IPAERR("ioremap failed\n");
+				result = -ENOMEM;
+				goto bail;
+			}
+
+			tmp_ptr = (uintptr_t) mld_ptr->io_vaddr;
+
+			mld_ptr->vaddr = (void *) tmp_ptr;
+
+			nm_ptr->sram_in_use    = true;
+			nm_ptr->last_alloc_loc = IPA_NAT_MEM_IN_SRAM;
+
+		} else {
+
+			/*
+			 * CAN NOT fit in SRAM, hence we'll allocate DDR...
+			 */
+			IPADBG("V4 NAT will reside in: %s\n",
+				   ipa3_nat_mem_in_as_str(IPA_NAT_MEM_IN_DDR));
+
+			if (nm_ptr->ddr_in_use) {
+				IPAERR("Memory already allocated\n");
+				result = -EPERM;
+				goto bail;
+			}
+
+			mld_ptr = &nm_ptr->mem_loc[IPA_NAT_MEM_IN_DDR];
+
+			mld_ptr->table_alloc_size = table_alloc->size;
+
+			mld_ptr->vaddr =
+				dma_alloc_coherent(
+					ipa3_ctx->pdev,
+					mld_ptr->table_alloc_size,
+					&mld_ptr->dma_handle,
+					gfp_flags);
+
+			if (mld_ptr->vaddr == NULL) {
+				IPAERR("memory alloc failed\n");
+				result = -ENOMEM;
+				goto bail;
+			}
+
+			nm_ptr->ddr_in_use     = true;
+			nm_ptr->last_alloc_loc = IPA_NAT_MEM_IN_DDR;
+		}
+	} else {
+		if (nat_type == IPAHAL_NAT_IPV6CT) {
+
+			dev->table_alloc_size = table_alloc->size;
+
+			IPADBG("V6 NAT will reside in: %s\n",
+				   ipa3_nat_mem_in_as_str(IPA_NAT_MEM_IN_DDR));
+
+			dev->vaddr =
+				dma_alloc_coherent(
+					ipa3_ctx->pdev,
+					dev->table_alloc_size,
+					&dev->dma_handle,
+					gfp_flags);
+
+			if (dev->vaddr == NULL) {
+				IPAERR("memory alloc failed\n");
+				result = -ENOMEM;
+				goto bail;
+			}
+		}
+	}
+
+bail:
+	IPADBG("Out\n");
+
+	return result;
+}
+
+/**
+ * ipa3_allocate_nat_device() - Allocates memory for the NAT device
+ * @mem:	[in/out] memory parameters
+ *
+ * Called by NAT client driver to allocate memory for the NAT entries. Based on
+ * the request size either shared or system memory will be used.
+ *
+ * Returns:	0 on success, negative on failure
+ */
+int ipa3_allocate_nat_device(struct ipa_ioc_nat_alloc_mem *mem)
+{
+	int result;
+	struct ipa_ioc_nat_ipv6ct_table_alloc tmp;
+
+	tmp.size = mem->size;
+	tmp.offset = 0;
+
+	result = ipa3_allocate_nat_table(&tmp);
+	if (result)
+		goto bail;
+
+	mem->offset = tmp.offset;
+
+bail:
+	return result;
+}
+
+/**
+ * ipa3_allocate_nat_table() - Allocates memory for the NAT table
+ * @table_alloc: [in/out] memory parameters
+ *
+ * Called by NAT client to allocate memory for the table entries.
+ * Based on the request size either shared or system memory will be used.
+ *
+ * Returns:	0 on success, negative on failure
+ */
+int ipa3_allocate_nat_table(
+	struct ipa_ioc_nat_ipv6ct_table_alloc *table_alloc)
+{
+	struct ipa3_nat_mem          *nm_ptr = &(ipa3_ctx->nat_mem);
+	struct ipa3_nat_mem_loc_data *mld_ptr;
+
+	int result;
+
+	IPADBG("table size:%u offset:%u\n",
+		   table_alloc->size, table_alloc->offset);
+
+	mutex_lock(&nm_ptr->dev.lock);
+
+	result = ipa3_nat_ipv6ct_allocate_mem(
+		&nm_ptr->dev,
+		table_alloc,
+		IPAHAL_NAT_IPV4);
+
+	if (result)
+		goto bail;
+
+	if (ipa3_ctx->ipa_hw_type >= IPA_HW_v4_0
+		&&
+		nm_ptr->pdn_mem.base == NULL) {
+
+		gfp_t gfp_flags = GFP_KERNEL | __GFP_ZERO;
+		size_t pdn_entry_size;
+		struct ipa_mem_buffer *pdn_mem_ptr = &nm_ptr->pdn_mem;
+
+		ipahal_nat_entry_size(IPAHAL_NAT_IPV4_PDN, &pdn_entry_size);
+
+		pdn_mem_ptr->size = pdn_entry_size * IPA_MAX_PDN_NUM;
+
+		if (IPA_MEM_PART(pdn_config_size) < pdn_mem_ptr->size) {
+			IPAERR(
+				"number of PDN entries exceeds SRAM available space\n");
+			result = -ENOMEM;
+			goto fail_alloc_pdn;
+		}
+
+		pdn_mem_ptr->base =
+			dma_alloc_coherent(
+				ipa3_ctx->pdev,
+				pdn_mem_ptr->size,
+				&pdn_mem_ptr->phys_base,
+				gfp_flags);
+
+		if (pdn_mem_ptr->base == NULL) {
+			IPAERR("fail to allocate PDN memory\n");
+			result = -ENOMEM;
+			goto fail_alloc_pdn;
+		}
+
+		IPADBG("IPA NAT dev allocated PDN memory successfully\n");
+	}
+
+	IPADBG("IPA NAT dev init successfully\n");
+
+	mutex_unlock(&nm_ptr->dev.lock);
+
+	IPADBG("return\n");
+
+	return 0;
+
+fail_alloc_pdn:
+	mld_ptr = &nm_ptr->mem_loc[nm_ptr->last_alloc_loc];
+
+	if (nm_ptr->last_alloc_loc == IPA_NAT_MEM_IN_DDR) {
+		if (mld_ptr->vaddr) {
+			dma_free_coherent(
+				ipa3_ctx->pdev,
+				mld_ptr->table_alloc_size,
+				mld_ptr->vaddr,
+				mld_ptr->dma_handle);
+			mld_ptr->vaddr = NULL;
+		}
+	}
+
+	if (nm_ptr->last_alloc_loc == IPA_NAT_MEM_IN_SRAM) {
+		if (mld_ptr->io_vaddr) {
+			iounmap(mld_ptr->io_vaddr);
+			mld_ptr->io_vaddr = NULL;
+			mld_ptr->vaddr    = NULL;
+		}
+	}
+
+bail:
+	mutex_unlock(&nm_ptr->dev.lock);
+
+	return result;
+}
+
+/**
+ * ipa3_allocate_ipv6ct_table() - Allocates memory for the IPv6CT table
+ * @table_alloc: [in/out] memory parameters
+ *
+ * Called by IPv6CT client to allocate memory for the table entries.
+ * Based on the request size either shared or system memory will be used.
+ *
+ * Returns:	0 on success, negative on failure
+ */
+int ipa3_allocate_ipv6ct_table(
+	struct ipa_ioc_nat_ipv6ct_table_alloc *table_alloc)
+{
+	int result;
+
+	IPADBG("\n");
+
+	if (ipa3_ctx->ipa_hw_type < IPA_HW_v4_0) {
+		IPAERR_RL("IPv6 connection tracking isn't supported\n");
+		return -EPERM;
+	}
+
+	mutex_lock(&ipa3_ctx->ipv6ct_mem.dev.lock);
+
+	result = ipa3_nat_ipv6ct_allocate_mem(
+		&ipa3_ctx->ipv6ct_mem.dev,
+		table_alloc,
+		IPAHAL_NAT_IPV6CT);
+
+	if (result)
+		goto bail;
+
+	IPADBG("IPA IPv6CT dev init successfully\n");
+
+bail:
+	mutex_unlock(&ipa3_ctx->ipv6ct_mem.dev.lock);
+	return result;
+}
+
+static int ipa3_nat_ipv6ct_check_table_params(
+	struct ipa3_nat_ipv6ct_common_mem *dev,
+	enum ipa3_nat_mem_in nmi,
+	uint32_t offset,
+	uint16_t entries_num,
+	enum ipahal_nat_type nat_type)
+{
+	size_t entry_size, table_size, orig_alloc_size;
+
+	struct ipa3_nat_mem *nm_ptr;
+	struct ipa3_nat_mem_loc_data *mld_ptr;
+
+	int ret = 0;
+
+	IPADBG("In\n");
+
+	IPADBG(
+		"v4(%u) v6(%u) nmi(%s) ofst(%u) ents(%u) nt(%s)\n",
+		dev->is_nat_mem,
+		dev->is_ipv6ct_mem,
+		ipa3_nat_mem_in_as_str(nmi),
+		offset,
+		entries_num,
+		ipahal_nat_type_str(nat_type));
+
+	if (dev->is_ipv6ct_mem) {
+
+		orig_alloc_size = dev->table_alloc_size;
+
+		if (offset > UINT_MAX - dev->dma_handle) {
+			IPAERR_RL("Failed due to integer overflow\n");
+			IPAERR_RL("%s dma_handle: 0x%pa offset: 0x%x\n",
+					  dev->name, &dev->dma_handle, offset);
+			ret = -EPERM;
+			goto bail;
+		}
+
+	} else { /* dev->is_nat_mem */
+
+		nm_ptr = (struct ipa3_nat_mem *) dev;
+
+		mld_ptr         = &nm_ptr->mem_loc[nmi];
+		orig_alloc_size = mld_ptr->table_alloc_size;
+
+		if (nmi == IPA_NAT_MEM_IN_DDR) {
+			if (offset > UINT_MAX - mld_ptr->dma_handle) {
+				IPAERR_RL("Failed due to integer overflow\n");
+				IPAERR_RL("%s dma_handle: 0x%pa offset: 0x%x\n",
+				  dev->name, &mld_ptr->dma_handle, offset);
+				ret = -EPERM;
+				goto bail;
+			}
+		}
+	}
+
+	ret = ipahal_nat_entry_size(nat_type, &entry_size);
+
+	if (ret) {
+		IPAERR("Failed to retrieve size of entry for %s\n",
+			   ipahal_nat_type_str(nat_type));
+		goto bail;
+	}
+
+	table_size = entry_size * entries_num;
+
+	/* check for integer overflow */
+	if (offset > UINT_MAX - table_size) {
+		IPAERR_RL("Detected overflow\n");
+		ret = -EPERM;
+		goto bail;
+	}
+
+	/* Check offset is not beyond allocated size */
+	if (offset + table_size > orig_alloc_size) {
+		IPAERR_RL("Table offset not valid\n");
+		IPAERR_RL("offset:%d entries:%d table_size:%zu mem_size:%zu\n",
+		  offset, entries_num, table_size, orig_alloc_size);
+		ret = -EPERM;
+		goto bail;
+	}
+
+bail:
+	IPADBG("Out\n");
+
+	return ret;
+}
+
+static inline void ipa3_nat_ipv6ct_create_init_cmd(
+	struct ipahal_imm_cmd_nat_ipv6ct_init_common *table_init_cmd,
+	bool is_shared,
+	dma_addr_t base_addr,
+	uint8_t tbl_index,
+	uint32_t base_table_offset,
+	uint32_t expn_table_offset,
+	uint16_t table_entries,
+	uint16_t expn_table_entries,
+	const char *table_name)
+{
+	table_init_cmd->base_table_addr_shared = is_shared;
+	table_init_cmd->expansion_table_addr_shared = is_shared;
+
+	table_init_cmd->base_table_addr = base_addr + base_table_offset;
+	IPADBG("%s base table offset:0x%x\n", table_name, base_table_offset);
+
+	table_init_cmd->expansion_table_addr = base_addr + expn_table_offset;
+	IPADBG("%s expn table offset:0x%x\n", table_name, expn_table_offset);
+
+	table_init_cmd->table_index = tbl_index;
+	IPADBG("%s table index:0x%x\n", table_name, tbl_index);
+
+	table_init_cmd->size_base_table = table_entries;
+	IPADBG("%s base table size:0x%x\n", table_name, table_entries);
+
+	table_init_cmd->size_expansion_table = expn_table_entries;
+	IPADBG("%s expansion table size:0x%x\n",
+		table_name, expn_table_entries);
+}
+
+static inline bool chk_sram_offset_alignment(
+	uintptr_t addr,
+	u32       mask)
+{
+	if (addr & (uintptr_t) mask) {
+		IPAERR("sram addr(%pK) is not properly aligned\n", addr);
+		return false;
+	}
+	return true;
+}
+
+static inline int ipa3_nat_ipv6ct_init_device_structure(
+	struct ipa3_nat_ipv6ct_common_mem *dev,
+	enum ipa3_nat_mem_in nmi,
+	uint32_t base_table_offset,
+	uint32_t expn_table_offset,
+	uint16_t table_entries,
+	uint16_t expn_table_entries,
+	uint32_t index_offset,
+	uint32_t index_expn_offset,
+	uint8_t  focus_change)
+{
+	int ret = 0;
+
+	IPADBG("In\n");
+
+	IPADBG(
+		"v4(%u) v6(%u) nmi(%s) bto(%u) eto(%u) t_ents(%u) et_ents(%u) io(%u) ieo(%u)\n",
+		dev->is_nat_mem,
+		dev->is_ipv6ct_mem,
+		ipa3_nat_mem_in_as_str(nmi),
+		base_table_offset,
+		expn_table_offset,
+		table_entries,
+		expn_table_entries,
+		index_offset,
+		index_expn_offset);
+
+	if (dev->is_ipv6ct_mem) {
+
+		IPADBG("v6\n");
+
+		dev->base_table_addr =
+			(char *) dev->base_address + base_table_offset;
+
+		IPADBG("%s base_table_addr: 0x%pK\n",
+			   dev->name, dev->base_table_addr);
+
+		dev->expansion_table_addr =
+			(char *) dev->base_address + expn_table_offset;
+
+		IPADBG("%s expansion_table_addr: 0x%pK\n",
+			   dev->name, dev->expansion_table_addr);
+
+		IPADBG("%s table_entries: %d\n",
+			   dev->name, table_entries);
+
+		dev->table_entries = table_entries;
+
+		IPADBG("%s expn_table_entries: %d\n",
+			   dev->name, expn_table_entries);
+
+		dev->expn_table_entries = expn_table_entries;
+
+	} else if (dev->is_nat_mem) {
+
+		struct ipa3_nat_mem *nm_ptr = (struct ipa3_nat_mem *) dev;
+		struct ipa3_nat_mem_loc_data *mld_p =
+			&nm_ptr->mem_loc[nmi];
+
+		IPADBG("v4\n");
+
+		nm_ptr->active_table = nmi;
+
+		mld_p->base_table_addr =
+			(char *) mld_p->base_address + base_table_offset;
+
+		IPADBG("%s base_table_addr: 0x%pK\n",
+				  dev->name, mld_p->base_table_addr);
+
+		mld_p->expansion_table_addr =
+			(char *) mld_p->base_address + expn_table_offset;
+
+		IPADBG("%s expansion_table_addr: 0x%pK\n",
+				  dev->name, mld_p->expansion_table_addr);
+
+		IPADBG("%s table_entries: %d\n",
+				  dev->name, table_entries);
+
+		mld_p->table_entries = table_entries;
+
+		IPADBG("%s expn_table_entries: %d\n",
+				  dev->name, expn_table_entries);
+
+		mld_p->expn_table_entries = expn_table_entries;
+
+		mld_p->index_table_addr =
+			(char *) mld_p->base_address + index_offset;
+
+		IPADBG("index_table_addr: 0x%pK\n",
+				  mld_p->index_table_addr);
+
+		mld_p->index_table_expansion_addr =
+			(char *) mld_p->base_address + index_expn_offset;
+
+		IPADBG("index_table_expansion_addr: 0x%pK\n",
+				  mld_p->index_table_expansion_addr);
+
+		if (nmi == IPA_NAT_MEM_IN_DDR) {
+			if (focus_change)
+				nm_ptr->switch2ddr_cnt++;
+		} else {
+			/*
+			 * The IPA wants certain SRAM addresses
+			 * to have particular low order bits to
+			 * be zero.  We test here to ensure...
+			 */
+			if (!chk_sram_offset_alignment(
+				 (uintptr_t) mld_p->base_table_addr,
+				 31) ||
+				!chk_sram_offset_alignment(
+				 (uintptr_t) mld_p->expansion_table_addr,
+				 31) ||
+				!chk_sram_offset_alignment(
+				 (uintptr_t) mld_p->index_table_addr,
+				 3) ||
+				!chk_sram_offset_alignment(
+				 (uintptr_t) mld_p->index_table_expansion_addr,
+				 3)) {
+				ret = -ENODEV;
+				goto done;
+			}
+
+			if (focus_change)
+				nm_ptr->switch2sram_cnt++;
+		}
+	}
+
+done:
+	IPADBG("Out\n");
+
+	return ret;
+}
+
+static void ipa3_nat_create_init_cmd(
+	struct ipa_ioc_v4_nat_init *init,
+	bool is_shared,
+	dma_addr_t base_addr,
+	struct ipahal_imm_cmd_ip_v4_nat_init *cmd)
+{
+	IPADBG("\n");
+
+	ipa3_nat_ipv6ct_create_init_cmd(
+		&cmd->table_init,
+		is_shared,
+		base_addr,
+		init->tbl_index,
+		init->ipv4_rules_offset,
+		init->expn_rules_offset,
+		init->table_entries,
+		init->expn_table_entries,
+		ipa3_ctx->nat_mem.dev.name);
+
+	cmd->index_table_addr_shared = is_shared;
+	cmd->index_table_expansion_addr_shared = is_shared;
+
+	cmd->index_table_addr =
+		base_addr + init->index_offset;
+	IPADBG("index_offset:0x%x\n", init->index_offset);
+
+	cmd->index_table_expansion_addr =
+		base_addr + init->index_expn_offset;
+	IPADBG("index_expn_offset:0x%x\n", init->index_expn_offset);
+
+	if (ipa3_ctx->ipa_hw_type >= IPA_HW_v4_0) {
+		/*
+		 * starting IPAv4.0 public ip field changed to store the
+		 * PDN config table offset in SMEM
+		 */
+		cmd->public_addr_info = IPA_MEM_PART(pdn_config_ofst);
+		IPADBG("pdn config base:0x%x\n", cmd->public_addr_info);
+	} else {
+		cmd->public_addr_info = init->ip_addr;
+		IPADBG("Public IP address:%pI4h\n", &cmd->public_addr_info);
+	}
+
+	IPADBG("return\n");
+}
+
+static void ipa3_nat_create_modify_pdn_cmd(
+	struct ipahal_imm_cmd_dma_shared_mem *mem_cmd, bool zero_mem)
+{
+	size_t pdn_entry_size, mem_size;
+
+	IPADBG("\n");
+
+	ipahal_nat_entry_size(IPAHAL_NAT_IPV4_PDN, &pdn_entry_size);
+	mem_size = pdn_entry_size * IPA_MAX_PDN_NUM;
+
+	if (zero_mem && ipa3_ctx->nat_mem.pdn_mem.base)
+		memset(ipa3_ctx->nat_mem.pdn_mem.base, 0, mem_size);
+
+	/* Copy the PDN config table to SRAM */
+	mem_cmd->is_read = false;
+	mem_cmd->skip_pipeline_clear = false;
+	mem_cmd->pipeline_clear_options = IPAHAL_HPS_CLEAR;
+	mem_cmd->size = mem_size;
+	mem_cmd->system_addr = ipa3_ctx->nat_mem.pdn_mem.phys_base;
+	mem_cmd->local_addr = ipa3_ctx->smem_restricted_bytes +
+		IPA_MEM_PART(pdn_config_ofst);
+
+	IPADBG("return\n");
+}
+
+static int ipa3_nat_send_init_cmd(struct ipahal_imm_cmd_ip_v4_nat_init *cmd,
+	bool zero_pdn_table)
+{
+	struct ipa3_desc desc[IPA_NAT_MAX_NUM_OF_INIT_CMD_DESC];
+	struct ipahal_imm_cmd_pyld *cmd_pyld[IPA_NAT_MAX_NUM_OF_INIT_CMD_DESC];
+	int i, num_cmd = 0, result;
+	struct ipahal_reg_valmask valmask;
+	struct ipahal_imm_cmd_register_write reg_write_coal_close;
+
+	IPADBG("\n");
+
+	memset(desc, 0, sizeof(desc));
+	memset(cmd_pyld, 0, sizeof(cmd_pyld));
+
+	/* IC to close the coal frame before HPS Clear if coal is enabled */
+	if (ipa3_get_ep_mapping(IPA_CLIENT_APPS_WAN_COAL_CONS) != -1) {
+		i = ipa3_get_ep_mapping(IPA_CLIENT_APPS_WAN_COAL_CONS);
+		reg_write_coal_close.skip_pipeline_clear = false;
+		reg_write_coal_close.pipeline_clear_options = IPAHAL_HPS_CLEAR;
+		reg_write_coal_close.offset = ipahal_get_reg_ofst(
+			IPA_AGGR_FORCE_CLOSE);
+		ipahal_get_aggr_force_close_valmask(i, &valmask);
+		reg_write_coal_close.value = valmask.val;
+		reg_write_coal_close.value_mask = valmask.mask;
+		cmd_pyld[num_cmd] = ipahal_construct_imm_cmd(
+			IPA_IMM_CMD_REGISTER_WRITE,
+			&reg_write_coal_close, false);
+		if (!cmd_pyld[num_cmd]) {
+			IPAERR("failed to construct coal close IC\n");
+			result = -ENOMEM;
+			goto destroy_imm_cmd;
+		}
+		ipa3_init_imm_cmd_desc(&desc[num_cmd], cmd_pyld[num_cmd]);
+		++num_cmd;
+	}
+
+	/* NO-OP IC for ensuring that IPA pipeline is empty */
+	cmd_pyld[num_cmd] =
+		ipahal_construct_nop_imm_cmd(false, IPAHAL_HPS_CLEAR, false);
+	if (!cmd_pyld[num_cmd]) {
+		IPAERR("failed to construct NOP imm cmd\n");
+		result = -ENOMEM;
+		goto destroy_imm_cmd;
+	}
+
+	ipa3_init_imm_cmd_desc(&desc[num_cmd], cmd_pyld[num_cmd]);
+	++num_cmd;
+
+	cmd_pyld[num_cmd] = ipahal_construct_imm_cmd(
+		IPA_IMM_CMD_IP_V4_NAT_INIT, cmd, false);
+	if (!cmd_pyld[num_cmd]) {
+		IPAERR_RL("fail to construct NAT init imm cmd\n");
+		result = -EPERM;
+		goto destroy_imm_cmd;
+	}
+
+	ipa3_init_imm_cmd_desc(&desc[num_cmd], cmd_pyld[num_cmd]);
+	++num_cmd;
+
+	if (ipa3_ctx->ipa_hw_type >= IPA_HW_v4_0) {
+		struct ipahal_imm_cmd_dma_shared_mem mem_cmd = { 0 };
+
+		if (num_cmd >= IPA_NAT_MAX_NUM_OF_INIT_CMD_DESC) {
+			IPAERR("number of commands is out of range\n");
+			result = -ENOBUFS;
+			goto destroy_imm_cmd;
+		}
+
+		/* Copy the PDN config table to SRAM */
+		ipa3_nat_create_modify_pdn_cmd(&mem_cmd, zero_pdn_table);
+		cmd_pyld[num_cmd] = ipahal_construct_imm_cmd(
+			IPA_IMM_CMD_DMA_SHARED_MEM, &mem_cmd, false);
+		if (!cmd_pyld[num_cmd]) {
+			IPAERR(
+				"fail construct dma_shared_mem cmd: for pdn table");
+			result = -ENOMEM;
+			goto destroy_imm_cmd;
+		}
+		ipa3_init_imm_cmd_desc(&desc[num_cmd], cmd_pyld[num_cmd]);
+		++num_cmd;
+		IPADBG("added PDN table copy cmd\n");
+	}
+
+	result = ipa3_send_cmd(num_cmd, desc);
+	if (result) {
+		IPAERR("fail to send NAT init immediate command\n");
+		goto destroy_imm_cmd;
+	}
+
+	IPADBG("return\n");
+
+destroy_imm_cmd:
+	for (i = 0; i < num_cmd; ++i)
+		ipahal_destroy_imm_cmd(cmd_pyld[i]);
+
+	return result;
+}
+
+static int ipa3_ipv6ct_send_init_cmd(struct ipahal_imm_cmd_ip_v6_ct_init *cmd)
+{
+	struct ipa3_desc desc[IPA_IPV6CT_MAX_NUM_OF_INIT_CMD_DESC];
+	struct ipahal_imm_cmd_pyld
+		*cmd_pyld[IPA_IPV6CT_MAX_NUM_OF_INIT_CMD_DESC];
+	int i, num_cmd = 0, result;
+	struct ipahal_reg_valmask valmask;
+	struct ipahal_imm_cmd_register_write reg_write_coal_close;
+
+	IPADBG("\n");
+
+	memset(desc, 0, sizeof(desc));
+	memset(cmd_pyld, 0, sizeof(cmd_pyld));
+
+	/* IC to close the coal frame before HPS Clear if coal is enabled */
+	if (ipa3_get_ep_mapping(IPA_CLIENT_APPS_WAN_COAL_CONS) != -1) {
+		i = ipa3_get_ep_mapping(IPA_CLIENT_APPS_WAN_COAL_CONS);
+		reg_write_coal_close.skip_pipeline_clear = false;
+		reg_write_coal_close.pipeline_clear_options = IPAHAL_HPS_CLEAR;
+		reg_write_coal_close.offset = ipahal_get_reg_ofst(
+			IPA_AGGR_FORCE_CLOSE);
+		ipahal_get_aggr_force_close_valmask(i, &valmask);
+		reg_write_coal_close.value = valmask.val;
+		reg_write_coal_close.value_mask = valmask.mask;
+		cmd_pyld[num_cmd] = ipahal_construct_imm_cmd(
+			IPA_IMM_CMD_REGISTER_WRITE,
+			&reg_write_coal_close, false);
+		if (!cmd_pyld[num_cmd]) {
+			IPAERR("failed to construct coal close IC\n");
+			result = -ENOMEM;
+			goto destroy_imm_cmd;
+		}
+		ipa3_init_imm_cmd_desc(&desc[num_cmd], cmd_pyld[num_cmd]);
+		++num_cmd;
+	}
+
+	/* NO-OP IC for ensuring that IPA pipeline is empty */
+	cmd_pyld[num_cmd] =
+		ipahal_construct_nop_imm_cmd(false, IPAHAL_HPS_CLEAR, false);
+	if (!cmd_pyld[num_cmd]) {
+		IPAERR("failed to construct NOP imm cmd\n");
+		result = -ENOMEM;
+		goto destroy_imm_cmd;
+	}
+
+	ipa3_init_imm_cmd_desc(&desc[num_cmd], cmd_pyld[num_cmd]);
+	++num_cmd;
+
+	if (num_cmd >= IPA_IPV6CT_MAX_NUM_OF_INIT_CMD_DESC) {
+		IPAERR("number of commands is out of range\n");
+		result = -ENOBUFS;
+		goto destroy_imm_cmd;
+	}
+
+	cmd_pyld[num_cmd] = ipahal_construct_imm_cmd(
+		IPA_IMM_CMD_IP_V6_CT_INIT, cmd, false);
+	if (!cmd_pyld[num_cmd]) {
+		IPAERR_RL("fail to construct IPv6CT init imm cmd\n");
+		result = -EPERM;
+		goto destroy_imm_cmd;
+	}
+
+	ipa3_init_imm_cmd_desc(&desc[num_cmd], cmd_pyld[num_cmd]);
+	++num_cmd;
+
+	result = ipa3_send_cmd(num_cmd, desc);
+	if (result) {
+		IPAERR("Fail to send IPv6CT init immediate command\n");
+		goto destroy_imm_cmd;
+	}
+
+	IPADBG("return\n");
+
+destroy_imm_cmd:
+	for (i = 0; i < num_cmd; ++i)
+		ipahal_destroy_imm_cmd(cmd_pyld[i]);
+
+	return result;
+}
+
+/* IOCTL function handlers */
+/**
+ * ipa3_nat_init_cmd() - Post IP_V4_NAT_INIT command to IPA HW
+ * @init:	[in] initialization command attributes
+ *
+ * Called by NAT client driver to post IP_V4_NAT_INIT command to IPA HW
+ *
+ * Returns:	0 on success, negative on failure
+ */
+int ipa3_nat_init_cmd(
+	struct ipa_ioc_v4_nat_init *init)
+{
+	struct ipa3_nat_ipv6ct_common_mem *dev = &ipa3_ctx->nat_mem.dev;
+	struct ipa3_nat_mem *nm_ptr = (struct ipa3_nat_mem *) dev;
+	enum ipa3_nat_mem_in nmi;
+	struct ipa3_nat_mem_loc_data *mld_ptr;
+
+	struct ipahal_imm_cmd_ip_v4_nat_init cmd;
+
+	int  result;
+
+	IPADBG("In\n");
+
+	if (!sram_compatible) {
+		init->mem_type     = 0;
+		init->focus_change = 0;
+	}
+
+	nmi = init->mem_type;
+
+	IPADBG("tbl_index(%d) table_entries(%u)\n",
+			  init->tbl_index,
+			  init->table_entries);
+
+	memset(&cmd, 0, sizeof(cmd));
+
+	if (!IPA_VALID_TBL_INDEX(init->tbl_index)) {
+		IPAERR_RL("Unsupported table index %d\n",
+				  init->tbl_index);
+		result = -EPERM;
+		goto bail;
+	}
+
+	if (init->table_entries == 0) {
+		IPAERR_RL("Table entries is zero\n");
+		result = -EPERM;
+		goto bail;
+	}
+
+	if (!IPA_VALID_NAT_MEM_IN(nmi)) {
+		IPAERR_RL("Bad ipa3_nat_mem_in type\n");
+		result = -EPERM;
+		goto bail;
+	}
+
+	IPADBG("nmi(%s)\n", ipa3_nat_mem_in_as_str(nmi));
+
+	mld_ptr = &nm_ptr->mem_loc[nmi];
+
+	if (!mld_ptr->is_mapped) {
+		IPAERR_RL("Attempt to init %s before mmap\n", dev->name);
+		result = -EPERM;
+		goto bail;
+	}
+
+	result = ipa3_nat_ipv6ct_check_table_params(
+		dev, nmi,
+		init->ipv4_rules_offset,
+		init->table_entries + 1,
+		IPAHAL_NAT_IPV4);
+
+	if (result) {
+		IPAERR_RL("Bad params for NAT base table\n");
+		goto bail;
+	}
+
+	result = ipa3_nat_ipv6ct_check_table_params(
+		dev, nmi,
+		init->expn_rules_offset,
+		init->expn_table_entries,
+		IPAHAL_NAT_IPV4);
+
+	if (result) {
+		IPAERR_RL("Bad params for NAT expansion table\n");
+		goto bail;
+	}
+
+	result = ipa3_nat_ipv6ct_check_table_params(
+		dev, nmi,
+		init->index_offset,
+		init->table_entries + 1,
+		IPAHAL_NAT_IPV4_INDEX);
+
+	if (result) {
+		IPAERR_RL("Bad params for index table\n");
+		goto bail;
+	}
+
+	result = ipa3_nat_ipv6ct_check_table_params(
+		dev, nmi,
+		init->index_expn_offset,
+		init->expn_table_entries,
+		IPAHAL_NAT_IPV4_INDEX);
+
+	if (result) {
+		IPAERR_RL("Bad params for index expansion table\n");
+		goto bail;
+	}
+
+	IPADBG("Table memory becoming active: %s\n",
+		   ipa3_nat_mem_in_as_str(nmi));
+
+	if (nmi == IPA_NAT_MEM_IN_DDR) {
+		ipa3_nat_create_init_cmd(
+			init,
+			false,
+			mld_ptr->dma_handle,
+			&cmd);
+	} else {
+		ipa3_nat_create_init_cmd(
+			init,
+			true,
+			IPA_RAM_NAT_OFST,
+			&cmd);
+	}
+
+	if (ipa3_ctx->ipa_hw_type >= IPA_HW_v4_0
+		&&
+		nm_ptr->pdn_mem.base
+		&&
+		!init->focus_change) {
+
+		struct ipahal_nat_pdn_entry pdn_entry;
+
+		/* store ip in pdn entry cache array */
+		pdn_entry.public_ip = init->ip_addr;
+		pdn_entry.src_metadata = 0;
+		pdn_entry.dst_metadata = 0;
+
+		result = ipahal_nat_construct_entry(
+			IPAHAL_NAT_IPV4_PDN,
+			&pdn_entry,
+			nm_ptr->pdn_mem.base);
+
+		if (result) {
+			IPAERR("Fail to construct NAT pdn entry\n");
+			goto bail;
+		}
+	}
+
+	IPADBG("Posting NAT init command\n");
+
+	result = ipa3_nat_send_init_cmd(&cmd, false);
+
+	if (result) {
+		IPAERR("Fail to send NAT init immediate command\n");
+		goto bail;
+	}
+
+	result = ipa3_nat_ipv6ct_init_device_structure(
+		dev, nmi,
+		init->ipv4_rules_offset,
+		init->expn_rules_offset,
+		init->table_entries,
+		init->expn_table_entries,
+		init->index_offset,
+		init->index_expn_offset,
+		init->focus_change);
+
+	if (result) {
+		IPAERR("Table offset initialization failure\n");
+		goto bail;
+	}
+
+	nm_ptr->public_ip_addr = init->ip_addr;
+
+	IPADBG("Public IP address:%pI4h\n", &nm_ptr->public_ip_addr);
+
+	dev->is_hw_init = true;
+
+bail:
+	IPADBG("Out\n");
+
+	return result;
+}
+
+/**
+ * ipa3_ipv6ct_init_cmd() - Post IP_V6_CONN_TRACK_INIT command to IPA HW
+ * @init:	[in] initialization command attributes
+ *
+ * Called by NAT client driver to post IP_V6_CONN_TRACK_INIT command to IPA HW
+ *
+ * Returns:	0 on success, negative on failure
+ */
+int ipa3_ipv6ct_init_cmd(
+	struct ipa_ioc_ipv6ct_init *init)
+{
+	struct ipa3_nat_ipv6ct_common_mem *dev = &ipa3_ctx->ipv6ct_mem.dev;
+
+	struct ipahal_imm_cmd_ip_v6_ct_init cmd;
+
+	int result;
+
+	IPADBG("In\n");
+
+	memset(&cmd, 0, sizeof(cmd));
+
+	if (ipa3_ctx->ipa_hw_type < IPA_HW_v4_0) {
+		IPAERR_RL("IPv6 connection tracking isn't supported\n");
+		return -EPERM;
+	}
+
+	if (!IPA_VALID_TBL_INDEX(init->tbl_index)) {
+		IPAERR_RL("Unsupported table index %d\n", init->tbl_index);
+		return -EPERM;
+	}
+
+	if (init->table_entries == 0) {
+		IPAERR_RL("Table entries is zero\n");
+		return -EPERM;
+	}
+
+	if (!dev->is_mapped) {
+		IPAERR_RL("attempt to init %s before mmap\n",
+				  dev->name);
+		return -EPERM;
+	}
+
+	result = ipa3_nat_ipv6ct_check_table_params(
+		dev, IPA_NAT_MEM_IN_DDR,
+		init->base_table_offset,
+		init->table_entries + 1,
+		IPAHAL_NAT_IPV6CT);
+
+	if (result) {
+		IPAERR_RL("Bad params for IPv6CT base table\n");
+		return result;
+	}
+
+	result = ipa3_nat_ipv6ct_check_table_params(
+		dev, IPA_NAT_MEM_IN_DDR,
+		init->expn_table_offset,
+		init->expn_table_entries,
+		IPAHAL_NAT_IPV6CT);
+
+	if (result) {
+		IPAERR_RL("Bad params for IPv6CT expansion table\n");
+		return result;
+	}
+
+	IPADBG("Will install v6 NAT in: %s\n",
+		   ipa3_nat_mem_in_as_str(IPA_NAT_MEM_IN_DDR));
+
+	ipa3_nat_ipv6ct_create_init_cmd(
+		&cmd.table_init,
+		false,
+		dev->dma_handle,
+		init->tbl_index,
+		init->base_table_offset,
+		init->expn_table_offset,
+		init->table_entries,
+		init->expn_table_entries,
+		dev->name);
+
+	IPADBG("posting ip_v6_ct_init imm command\n");
+
+	result = ipa3_ipv6ct_send_init_cmd(&cmd);
+
+	if (result) {
+		IPAERR("fail to send IPv6CT init immediate command\n");
+		return result;
+	}
+
+	ipa3_nat_ipv6ct_init_device_structure(
+		dev,
+		IPA_NAT_MEM_IN_DDR,
+		init->base_table_offset,
+		init->expn_table_offset,
+		init->table_entries,
+		init->expn_table_entries,
+		0, 0, 0);
+
+	dev->is_hw_init = true;
+
+	IPADBG("Out\n");
+
+	return 0;
+}
+
+/**
+ * ipa3_nat_mdfy_pdn() - Modify a PDN entry in PDN config table in IPA SRAM
+ * @mdfy_pdn:	[in] PDN info to be written to SRAM
+ *
+ * Called by NAT client driver to modify an entry in the PDN config table
+ *
+ * Returns:	0 on success, negative on failure
+ */
+int ipa3_nat_mdfy_pdn(
+	struct ipa_ioc_nat_pdn_entry *mdfy_pdn)
+{
+	struct ipa3_nat_ipv6ct_common_mem *dev = &ipa3_ctx->nat_mem.dev;
+	struct ipa3_nat_mem *nm_ptr = (struct ipa3_nat_mem *) dev;
+	struct ipa_mem_buffer *pdn_mem_ptr = &nm_ptr->pdn_mem;
+
+	struct ipahal_imm_cmd_dma_shared_mem mem_cmd = { 0 };
+	struct ipahal_nat_pdn_entry pdn_fields = { 0 };
+	struct ipa3_desc desc = { 0 };
+	struct ipahal_imm_cmd_pyld *cmd_pyld;
+
+	size_t entry_size;
+
+	int result = 0;
+
+	IPADBG("In\n");
+
+	mutex_lock(&dev->lock);
+
+	if (ipa3_ctx->ipa_hw_type < IPA_HW_v4_0) {
+		IPAERR_RL("IPA HW does not support multi PDN\n");
+		result = -EPERM;
+		goto bail;
+	}
+
+	if (pdn_mem_ptr->base == NULL) {
+		IPAERR_RL(
+			"Attempt to modify a PDN entry before the PDN table memory allocation\n");
+		result = -EPERM;
+		goto bail;
+	}
+
+	if (mdfy_pdn->pdn_index > (IPA_MAX_PDN_NUM - 1)) {
+		IPAERR_RL("pdn index out of range %d\n", mdfy_pdn->pdn_index);
+		result = -EPERM;
+		goto bail;
+	}
+
+	/*
+	 * Store ip in pdn entry cache array
+	 */
+	pdn_fields.public_ip    = mdfy_pdn->public_ip;
+	pdn_fields.dst_metadata = mdfy_pdn->dst_metadata;
+	pdn_fields.src_metadata = mdfy_pdn->src_metadata;
+
+	/*
+	 * Mark tethering bit for remote modem
+	 */
+	if (ipa3_ctx->ipa_hw_type == IPA_HW_v4_1) {
+		pdn_fields.src_metadata |= IPA_QMAP_TETH_BIT;
+	}
+
+	/*
+	 * Get size of the entry
+	 */
+	result = ipahal_nat_entry_size(
+		IPAHAL_NAT_IPV4_PDN,
+		&entry_size);
+
+	if (result) {
+		IPAERR("Failed to retrieve pdn entry size\n");
+		goto bail;
+	}
+
+	result = ipahal_nat_construct_entry(
+		IPAHAL_NAT_IPV4_PDN,
+		&pdn_fields,
+		(pdn_mem_ptr->base + (mdfy_pdn->pdn_index)*(entry_size)));
+
+	if (result) {
+		IPAERR("Fail to construct NAT pdn entry\n");
+		goto bail;
+	}
+
+	IPADBG("Modify PDN in index: %d Public ip address:%pI4h\n",
+		mdfy_pdn->pdn_index,
+		&pdn_fields.public_ip);
+
+	IPADBG("Modify PDN dst metadata: 0x%x src metadata: 0x%x\n",
+		pdn_fields.dst_metadata,
+		pdn_fields.src_metadata);
+
+	/*
+	 * Copy the PDN config table to SRAM
+	 */
+	ipa3_nat_create_modify_pdn_cmd(&mem_cmd, false);
+
+	cmd_pyld = ipahal_construct_imm_cmd(
+		IPA_IMM_CMD_DMA_SHARED_MEM, &mem_cmd, false);
+
+	if (!cmd_pyld) {
+		IPAERR(
+			"fail construct dma_shared_mem cmd: for pdn table\n");
+		result = -ENOMEM;
+		goto bail;
+	}
+
+	ipa3_init_imm_cmd_desc(&desc, cmd_pyld);
+
+	IPADBG("sending PDN table copy cmd\n");
+
+	result = ipa3_send_cmd(1, &desc);
+
+	if (result)
+		IPAERR("Fail to send PDN table copy immediate command\n");
+
+	ipahal_destroy_imm_cmd(cmd_pyld);
+
+bail:
+	mutex_unlock(&dev->lock);
+
+	IPADBG("Out\n");
+
+	return result;
+}
+
+static uint32_t ipa3_nat_ipv6ct_calculate_table_size(
+	enum ipa3_nat_mem_in nmi,
+	uint8_t base_addr)
+{
+	size_t entry_size;
+	u32 num_entries;
+	enum ipahal_nat_type nat_type;
+	struct ipa3_nat_mem_loc_data *mld_ptr = &ipa3_ctx->nat_mem.mem_loc[nmi];
+
+	switch (base_addr) {
+	case IPA_NAT_BASE_TBL:
+		num_entries = mld_ptr->table_entries + 1;
+		nat_type = IPAHAL_NAT_IPV4;
+		break;
+	case IPA_NAT_EXPN_TBL:
+		num_entries = mld_ptr->expn_table_entries;
+		nat_type = IPAHAL_NAT_IPV4;
+		break;
+	case IPA_NAT_INDX_TBL:
+		num_entries = mld_ptr->table_entries + 1;
+		nat_type = IPAHAL_NAT_IPV4_INDEX;
+		break;
+	case IPA_NAT_INDEX_EXPN_TBL:
+		num_entries = mld_ptr->expn_table_entries;
+		nat_type = IPAHAL_NAT_IPV4_INDEX;
+		break;
+	case IPA_IPV6CT_BASE_TBL:
+		num_entries = ipa3_ctx->ipv6ct_mem.dev.table_entries + 1;
+		nat_type = IPAHAL_NAT_IPV6CT;
+		break;
+	case IPA_IPV6CT_EXPN_TBL:
+		num_entries = ipa3_ctx->ipv6ct_mem.dev.expn_table_entries;
+		nat_type = IPAHAL_NAT_IPV6CT;
+		break;
+	default:
+		IPAERR_RL("Invalid base_addr %d for table DMA command\n",
+			base_addr);
+		return 0;
+	}
+
+	ipahal_nat_entry_size(nat_type, &entry_size);
+
+	return entry_size * num_entries;
+}
+
+static int ipa3_table_validate_table_dma_one(
+	enum ipa3_nat_mem_in        nmi,
+	struct ipa_ioc_nat_dma_one *param)
+{
+	uint32_t table_size;
+
+	if (param->table_index >= 1) {
+		IPAERR_RL("Unsupported table index %u\n", param->table_index);
+		return -EPERM;
+	}
+
+	switch (param->base_addr) {
+	case IPA_NAT_BASE_TBL:
+	case IPA_NAT_EXPN_TBL:
+	case IPA_NAT_INDX_TBL:
+	case IPA_NAT_INDEX_EXPN_TBL:
+		if (!ipa3_ctx->nat_mem.dev.is_hw_init) {
+			IPAERR_RL("attempt to write to %s before HW int\n",
+				ipa3_ctx->nat_mem.dev.name);
+			return -EPERM;
+		}
+		IPADBG("nmi(%s)\n", ipa3_nat_mem_in_as_str(nmi));
+		break;
+	case IPA_IPV6CT_BASE_TBL:
+	case IPA_IPV6CT_EXPN_TBL:
+		if (ipa3_ctx->ipa_hw_type < IPA_HW_v4_0) {
+			IPAERR_RL("IPv6 connection tracking isn't supported\n");
+			return -EPERM;
+		}
+
+		if (!ipa3_ctx->ipv6ct_mem.dev.is_hw_init) {
+			IPAERR_RL("attempt to write to %s before HW int\n",
+				ipa3_ctx->ipv6ct_mem.dev.name);
+			return -EPERM;
+		}
+		break;
+	default:
+		IPAERR_RL("Invalid base_addr %d for table DMA command\n",
+			param->base_addr);
+		return -EPERM;
+	}
+
+	table_size = ipa3_nat_ipv6ct_calculate_table_size(
+		nmi,
+		param->base_addr);
+
+	if (!table_size) {
+		IPAERR_RL("Failed to calculate table size for base_addr %d\n",
+				  param->base_addr);
+		return -EPERM;
+	}
+
+	if (param->offset >= table_size) {
+		IPAERR_RL("Invalid offset %d for table DMA command\n",
+			param->offset);
+		IPAERR_RL("table_index %d base addr %d size %d\n",
+			param->table_index, param->base_addr, table_size);
+		return -EPERM;
+	}
+
+	return 0;
+}
+
+
+/**
+ * ipa3_table_dma_cmd() - Post TABLE_DMA command to IPA HW
+ * @dma:	[in] initialization command attributes
+ *
+ * Called by NAT/IPv6CT clients to post TABLE_DMA command to IPA HW
+ *
+ * Returns:	0 on success, negative on failure
+ */
+int ipa3_table_dma_cmd(
+	struct ipa_ioc_nat_dma_cmd *dma)
+{
+	struct ipa3_nat_ipv6ct_common_mem *dev = &ipa3_ctx->nat_mem.dev;
+
+	enum ipahal_imm_cmd_name cmd_name = IPA_IMM_CMD_NAT_DMA;
+
+	struct ipahal_imm_cmd_table_dma cmd;
+	struct ipahal_imm_cmd_pyld *cmd_pyld[IPA_MAX_NUM_OF_TABLE_DMA_CMD_DESC];
+	struct ipa3_desc desc[IPA_MAX_NUM_OF_TABLE_DMA_CMD_DESC];
+
+	uint8_t cnt, num_cmd = 0;
+
+	int result = 0;
+	int i;
+	struct ipahal_reg_valmask valmask;
+	struct ipahal_imm_cmd_register_write reg_write_coal_close;
+	int max_dma_table_cmds = IPA_MAX_NUM_OF_TABLE_DMA_CMD_DESC;
+
+	IPADBG("In\n");
+
+	if (!sram_compatible)
+		dma->mem_type = 0;
+
+	if (!dev->is_dev_init) {
+		IPAERR_RL("NAT hasn't been initialized\n");
+		result = -EPERM;
+		goto bail;
+	}
+
+	if (!IPA_VALID_NAT_MEM_IN(dma->mem_type)) {
+		IPAERR_RL("Invalid ipa3_nat_mem_in type (%u)\n",
+				  dma->mem_type);
+		result = -EPERM;
+		goto bail;
+	}
+
+	IPADBG("nmi(%s)\n", ipa3_nat_mem_in_as_str(dma->mem_type));
+
+	memset(&cmd, 0, sizeof(cmd));
+	memset(cmd_pyld, 0, sizeof(cmd_pyld));
+	memset(desc, 0, sizeof(desc));
+
+	/**
+	 * We use a descriptor for closing coalsceing endpoint
+	 * by immediate command. So, DMA entries should be less than
+	 * IPA_MAX_NUM_OF_TABLE_DMA_CMD_DESC - 1 to overcome
+	 * buffer overflow of ipa3_desc array.
+	 */
+	if (ipa3_get_ep_mapping(IPA_CLIENT_APPS_WAN_COAL_CONS) != -1)
+		max_dma_table_cmds -= 1;
+
+	if (!dma->entries || dma->entries > (max_dma_table_cmds - 1)) {
+		IPAERR_RL("Invalid number of entries %d\n",
+			dma->entries);
+		result = -EPERM;
+		goto bail;
+	}
+
+	for (cnt = 0; cnt < dma->entries; ++cnt) {
+
+		result = ipa3_table_validate_table_dma_one(
+			dma->mem_type, &dma->dma[cnt]);
+
+		if (result) {
+			IPAERR_RL("Table DMA command parameter %d is invalid\n",
+					  cnt);
+			goto bail;
+		}
+	}
+
+	/* IC to close the coal frame before HPS Clear if coal is enabled */
+	if (ipa3_get_ep_mapping(IPA_CLIENT_APPS_WAN_COAL_CONS) != -1) {
+		i = ipa3_get_ep_mapping(IPA_CLIENT_APPS_WAN_COAL_CONS);
+		reg_write_coal_close.skip_pipeline_clear = false;
+		reg_write_coal_close.pipeline_clear_options = IPAHAL_HPS_CLEAR;
+		reg_write_coal_close.offset = ipahal_get_reg_ofst(
+			IPA_AGGR_FORCE_CLOSE);
+		ipahal_get_aggr_force_close_valmask(i, &valmask);
+		reg_write_coal_close.value = valmask.val;
+		reg_write_coal_close.value_mask = valmask.mask;
+		cmd_pyld[num_cmd] = ipahal_construct_imm_cmd(
+			IPA_IMM_CMD_REGISTER_WRITE,
+			&reg_write_coal_close, false);
+		if (!cmd_pyld[num_cmd]) {
+			IPAERR("failed to construct coal close IC\n");
+			result = -ENOMEM;
+			goto destroy_imm_cmd;
+		}
+		ipa3_init_imm_cmd_desc(&desc[num_cmd], cmd_pyld[num_cmd]);
+		++num_cmd;
+	}
+
+	/*
+	 * NO-OP IC for ensuring that IPA pipeline is empty
+	 */
+	cmd_pyld[num_cmd] =
+		ipahal_construct_nop_imm_cmd(false, IPAHAL_HPS_CLEAR, false);
+
+	if (!cmd_pyld[num_cmd]) {
+		IPAERR("Failed to construct NOP imm cmd\n");
+		result = -ENOMEM;
+		goto destroy_imm_cmd;
+	}
+
+	ipa3_init_imm_cmd_desc(&desc[num_cmd], cmd_pyld[num_cmd]);
+
+	++num_cmd;
+
+	/*
+	 * NAT_DMA was renamed to TABLE_DMA starting from IPAv4
+	 */
+	if (ipa3_ctx->ipa_hw_type >= IPA_HW_v4_0)
+		cmd_name = IPA_IMM_CMD_TABLE_DMA;
+
+	for (cnt = 0; cnt < dma->entries; ++cnt) {
+
+		cmd.table_index = dma->dma[cnt].table_index;
+		cmd.base_addr   = dma->dma[cnt].base_addr;
+		cmd.offset      = dma->dma[cnt].offset;
+		cmd.data        = dma->dma[cnt].data;
+
+		cmd_pyld[num_cmd] =
+			ipahal_construct_imm_cmd(cmd_name, &cmd, false);
+
+		if (!cmd_pyld[num_cmd]) {
+			IPAERR_RL("Fail to construct table_dma imm cmd\n");
+			result = -ENOMEM;
+			goto destroy_imm_cmd;
+		}
+
+		ipa3_init_imm_cmd_desc(&desc[num_cmd], cmd_pyld[num_cmd]);
+
+		++num_cmd;
+	}
+
+	result = ipa3_send_cmd(num_cmd, desc);
+
+	if (result)
+		IPAERR("Fail to send table_dma immediate command\n");
+
+destroy_imm_cmd:
+	for (cnt = 0; cnt < num_cmd; ++cnt)
+		ipahal_destroy_imm_cmd(cmd_pyld[cnt]);
+
+bail:
+	IPADBG("Out\n");
+
+	return result;
+}
+
+/**
+ * ipa3_nat_dma_cmd() - Post NAT_DMA command to IPA HW
+ * @dma:	[in] initialization command attributes
+ *
+ * Called by NAT client driver to post NAT_DMA command to IPA HW
+ *
+ * Returns:	0 on success, negative on failure
+ */
+int ipa3_nat_dma_cmd(struct ipa_ioc_nat_dma_cmd *dma)
+{
+	return ipa3_table_dma_cmd(dma);
+}
+
+static void ipa3_nat_ipv6ct_free_mem(
+	struct ipa3_nat_ipv6ct_common_mem *dev)
+{
+	struct ipa3_nat_mem *nm_ptr;
+	struct ipa3_nat_mem_loc_data *mld_ptr;
+
+	if (dev->is_ipv6ct_mem) {
+
+		IPADBG("In: v6\n");
+
+		if (dev->vaddr) {
+			IPADBG("Freeing dma memory for %s\n", dev->name);
+
+			dma_free_coherent(
+				ipa3_ctx->pdev,
+				dev->table_alloc_size,
+				dev->vaddr,
+				dev->dma_handle);
+		}
+
+		dev->vaddr                = NULL;
+		dev->dma_handle           = 0;
+		dev->table_alloc_size     = 0;
+		dev->base_table_addr      = NULL;
+		dev->expansion_table_addr = NULL;
+		dev->table_entries        = 0;
+		dev->expn_table_entries   = 0;
+
+		dev->is_hw_init           = false;
+		dev->is_mapped            = false;
+	} else {
+		if (dev->is_nat_mem) {
+
+			IPADBG("In: v4\n");
+
+			nm_ptr = (struct ipa3_nat_mem *) dev;
+
+			if (nm_ptr->ddr_in_use) {
+
+				nm_ptr->ddr_in_use = false;
+
+				mld_ptr = &nm_ptr->mem_loc[IPA_NAT_MEM_IN_DDR];
+
+				if (mld_ptr->vaddr) {
+					IPADBG("Freeing dma memory for %s\n",
+						   dev->name);
+
+					dma_free_coherent(
+						ipa3_ctx->pdev,
+						mld_ptr->table_alloc_size,
+						mld_ptr->vaddr,
+						mld_ptr->dma_handle);
+				}
+
+				mld_ptr->vaddr                      = NULL;
+				mld_ptr->dma_handle                 = 0;
+				mld_ptr->table_alloc_size           = 0;
+				mld_ptr->table_entries              = 0;
+				mld_ptr->expn_table_entries         = 0;
+				mld_ptr->base_table_addr            = NULL;
+				mld_ptr->expansion_table_addr       = NULL;
+				mld_ptr->index_table_addr           = NULL;
+				mld_ptr->index_table_expansion_addr = NULL;
+			}
+
+			if (nm_ptr->sram_in_use) {
+
+				nm_ptr->sram_in_use = false;
+
+				mld_ptr = &nm_ptr->mem_loc[IPA_NAT_MEM_IN_SRAM];
+
+				if (mld_ptr->io_vaddr) {
+					IPADBG("Unmappung sram memory for %s\n",
+						   dev->name);
+					iounmap(mld_ptr->io_vaddr);
+				}
+
+				mld_ptr->io_vaddr                   = NULL;
+				mld_ptr->vaddr                      = NULL;
+				mld_ptr->dma_handle                 = 0;
+				mld_ptr->table_alloc_size           = 0;
+				mld_ptr->table_entries              = 0;
+				mld_ptr->expn_table_entries         = 0;
+				mld_ptr->base_table_addr            = NULL;
+				mld_ptr->expansion_table_addr       = NULL;
+				mld_ptr->index_table_addr           = NULL;
+				mld_ptr->index_table_expansion_addr = NULL;
+			}
+
+			memset(nm_ptr->mem_loc, 0, sizeof(nm_ptr->mem_loc));
+		}
+	}
+
+	IPADBG("Out\n");
+}
+
+static int ipa3_nat_ipv6ct_create_del_table_cmd(
+	uint8_t tbl_index,
+	u32 base_addr,
+	struct ipa3_nat_ipv6ct_common_mem *dev,
+	struct ipahal_imm_cmd_nat_ipv6ct_init_common *table_init_cmd)
+{
+	bool mem_type_shared = true;
+
+	IPADBG("In: tbl_index(%u) base_addr(%u) v4(%u) v6(%u)\n",
+			  tbl_index,
+			  base_addr,
+			  dev->is_nat_mem,
+			  dev->is_ipv6ct_mem);
+
+	if (!IPA_VALID_TBL_INDEX(tbl_index)) {
+		IPAERR_RL("Unsupported table index %d\n", tbl_index);
+		return -EPERM;
+	}
+
+	if (dev->tmp_mem) {
+		IPADBG("using temp memory during %s del\n", dev->name);
+		mem_type_shared = false;
+		base_addr = dev->tmp_mem->dma_handle;
+	}
+
+	table_init_cmd->table_index = tbl_index;
+	table_init_cmd->base_table_addr = base_addr;
+	table_init_cmd->base_table_addr_shared = mem_type_shared;
+	table_init_cmd->expansion_table_addr = base_addr;
+	table_init_cmd->expansion_table_addr_shared = mem_type_shared;
+	table_init_cmd->size_base_table = 0;
+	table_init_cmd->size_expansion_table = 0;
+
+	IPADBG("Out\n");
+
+	return 0;
+}
+
+static int ipa3_nat_send_del_table_cmd(
+	uint8_t tbl_index)
+{
+	struct ipahal_imm_cmd_ip_v4_nat_init cmd;
+	int result = 0;
+
+	IPADBG("In\n");
+
+	result =
+		ipa3_nat_ipv6ct_create_del_table_cmd(
+			tbl_index,
+			IPA_NAT_PHYS_MEM_OFFSET,
+			&ipa3_ctx->nat_mem.dev,
+			&cmd.table_init);
+
+	if (result) {
+		IPAERR(
+			"Fail to create immediate command to delete NAT table\n");
+		goto bail;
+	}
+
+	cmd.index_table_addr =
+		cmd.table_init.base_table_addr;
+	cmd.index_table_addr_shared =
+		cmd.table_init.base_table_addr_shared;
+	cmd.index_table_expansion_addr =
+		cmd.index_table_addr;
+	cmd.index_table_expansion_addr_shared =
+		cmd.index_table_addr_shared;
+	cmd.public_addr_info = 0;
+
+	IPADBG("Posting NAT delete command\n");
+
+	result = ipa3_nat_send_init_cmd(&cmd, true);
+
+	if (result) {
+		IPAERR("Fail to send NAT delete immediate command\n");
+		goto bail;
+	}
+
+bail:
+	IPADBG("Out\n");
+
+	return result;
+}
+
+static int ipa3_ipv6ct_send_del_table_cmd(uint8_t tbl_index)
+{
+	struct ipahal_imm_cmd_ip_v6_ct_init cmd;
+	int result;
+
+	IPADBG("\n");
+
+	result = ipa3_nat_ipv6ct_create_del_table_cmd(
+		tbl_index,
+		IPA_IPV6CT_PHYS_MEM_OFFSET,
+		&ipa3_ctx->ipv6ct_mem.dev,
+		&cmd.table_init);
+	if (result) {
+		IPAERR(
+			"Fail to create immediate command to delete IPv6CT table\n");
+		return result;
+	}
+
+	IPADBG("posting IPv6CT delete command\n");
+	result = ipa3_ipv6ct_send_init_cmd(&cmd);
+	if (result) {
+		IPAERR("Fail to send IPv6CT delete immediate command\n");
+		return result;
+	}
+
+	IPADBG("return\n");
+	return 0;
+}
+
+/**
+ * ipa3_nat_del_cmd() - Delete a NAT table
+ * @del:	[in] delete table table table parameters
+ *
+ * Called by NAT client driver to delete the nat table
+ *
+ * Returns:	0 on success, negative on failure
+ */
+int ipa3_nat_del_cmd(struct ipa_ioc_v4_nat_del *del)
+{
+	struct ipa_ioc_nat_ipv6ct_table_del tmp;
+
+	tmp.table_index = del->table_index;
+
+	return ipa3_del_nat_table(&tmp);
+}
+
+/**
+ * ipa3_del_nat_table() - Delete the NAT table
+ * @del:	[in] delete table parameters
+ *
+ * Called by NAT client to delete the table
+ *
+ * Returns:	0 on success, negative on failure
+ */
+int ipa3_del_nat_table(
+	struct ipa_ioc_nat_ipv6ct_table_del *del)
+{
+	struct ipa3_nat_ipv6ct_common_mem *dev = &ipa3_ctx->nat_mem.dev;
+	struct ipa3_nat_mem *nm_ptr = (struct ipa3_nat_mem *) dev;
+	struct ipa3_nat_mem_loc_data *mld_ptr;
+	enum ipa3_nat_mem_in nmi;
+
+	int result = 0;
+
+	IPADBG("In\n");
+
+	if (!sram_compatible)
+		del->mem_type = 0;
+
+	nmi = del->mem_type;
+
+	if (!dev->is_dev_init) {
+		IPAERR("NAT hasn't been initialized\n");
+		result = -EPERM;
+		goto bail;
+	}
+
+	if (!IPA_VALID_TBL_INDEX(del->table_index)) {
+		IPAERR_RL("Unsupported table index %d\n",
+				  del->table_index);
+		result = -EPERM;
+		goto bail;
+	}
+
+	if (!IPA_VALID_NAT_MEM_IN(nmi)) {
+		IPAERR_RL("Bad ipa3_nat_mem_in type\n");
+		result = -EPERM;
+		goto bail;
+	}
+
+	IPADBG("nmi(%s)\n", ipa3_nat_mem_in_as_str(nmi));
+
+	mld_ptr = &nm_ptr->mem_loc[nmi];
+
+	mutex_lock(&dev->lock);
+
+	if (dev->is_hw_init) {
+
+		result = ipa3_nat_send_del_table_cmd(del->table_index);
+
+		if (result) {
+			IPAERR(
+				"Fail to send immediate command to delete NAT table\n");
+			goto unlock;
+		}
+	}
+
+	nm_ptr->public_ip_addr              = 0;
+
+	mld_ptr->index_table_addr           = NULL;
+	mld_ptr->index_table_expansion_addr = NULL;
+
+	if (ipa3_ctx->ipa_hw_type >= IPA_HW_v4_0
+		&&
+		nm_ptr->pdn_mem.base) {
+
+		struct ipa_mem_buffer *pdn_mem_ptr = &nm_ptr->pdn_mem;
+
+		IPADBG("Freeing the PDN memory\n");
+
+		dma_free_coherent(
+			ipa3_ctx->pdev,
+			pdn_mem_ptr->size,
+			pdn_mem_ptr->base,
+			pdn_mem_ptr->phys_base);
+
+		pdn_mem_ptr->base = NULL;
+	}
+
+	ipa3_nat_ipv6ct_free_mem(dev);
+
+unlock:
+	mutex_unlock(&dev->lock);
+
+bail:
+	IPADBG("Out\n");
+
+	return result;
+}
+
+/**
+ * ipa3_del_ipv6ct_table() - Delete the IPv6CT table
+ * @del:	[in] delete table parameters
+ *
+ * Called by IPv6CT client to delete the table
+ *
+ * Returns:	0 on success, negative on failure
+ */
+int ipa3_del_ipv6ct_table(
+	struct ipa_ioc_nat_ipv6ct_table_del *del)
+{
+	struct ipa3_nat_ipv6ct_common_mem *dev = &ipa3_ctx->ipv6ct_mem.dev;
+
+	int result = 0;
+
+	IPADBG("In\n");
+
+	if (!sram_compatible)
+		del->mem_type = 0;
+
+	if (!dev->is_dev_init) {
+		IPAERR("IPv6 connection tracking hasn't been initialized\n");
+		result = -EPERM;
+		goto bail;
+	}
+
+	if (ipa3_ctx->ipa_hw_type < IPA_HW_v4_0) {
+		IPAERR_RL("IPv6 connection tracking isn't supported\n");
+		result = -EPERM;
+		goto bail;
+	}
+
+	mutex_lock(&dev->lock);
+
+	if (dev->is_hw_init) {
+		result = ipa3_ipv6ct_send_del_table_cmd(del->table_index);
+
+		if (result) {
+			IPAERR("ipa3_ipv6ct_send_del_table_cmd() fail\n");
+			goto unlock;
+		}
+	}
+
+	ipa3_nat_ipv6ct_free_mem(&ipa3_ctx->ipv6ct_mem.dev);
+
+unlock:
+	mutex_unlock(&dev->lock);
+
+bail:
+	IPADBG("Out\n");
+
+	return result;
+}
+
+int ipa3_nat_get_sram_info(
+	struct ipa_nat_in_sram_info *info_ptr)
+{
+	struct ipa3_nat_ipv6ct_common_mem *dev = &ipa3_ctx->nat_mem.dev;
+
+	int ret = 0;
+
+	IPADBG("In\n");
+
+	if (!info_ptr) {
+		IPAERR("Bad argument passed\n");
+		ret = -EINVAL;
+		goto bail;
+	}
+
+	if (!dev->is_dev_init) {
+		IPAERR_RL("NAT hasn't been initialized\n");
+		ret = -EPERM;
+		goto bail;
+	}
+
+	sram_compatible = true;
+
+	memset(info_ptr,
+		   0,
+		   sizeof(struct ipa_nat_in_sram_info));
+
+	/*
+	 * Size of SRAM set aside for the NAT table.
+	 */
+	info_ptr->sram_mem_available_for_nat = IPA_RAM_NAT_SIZE;
+
+	/*
+	 * If table's phys addr in SRAM is not page aligned, it will be
+	 * offset into the mmap'd VM by the amount calculated below.  This
+	 * value can be used by the app, so that it can know where the
+	 * table actually lives in the mmap'd VM...
+	 */
+	info_ptr->nat_table_offset_into_mmap =
+		(ipa3_ctx->ipa_wrapper_base +
+		 ipa3_ctx->ctrl->ipa_reg_base_ofst +
+		 ipahal_get_reg_n_ofst(
+			 IPA_SW_AREA_RAM_DIRECT_ACCESS_n,
+			 0) +
+		 IPA_RAM_NAT_OFST) & ~PAGE_MASK;
+
+	/*
+	 * If the offset above plus the size of the NAT table causes the
+	 * table to extend beyond the next page boundary, the app needs to
+	 * know it, so that it can increase the size used in the mmap
+	 * request...
+	 */
+	info_ptr->best_nat_in_sram_size_rqst =
+		roundup(
+			info_ptr->nat_table_offset_into_mmap +
+			IPA_RAM_NAT_SIZE,
+			PAGE_SIZE);
+
+bail:
+	IPADBG("Out\n");
+
+	return ret;
+}

+ 772 - 0
ipa/ipa_v3/ipa_odl.c

@@ -0,0 +1,772 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.
+ */
+
+#include "ipa_i.h"
+#include "ipa_odl.h"
+#include <linux/msm_ipa.h>
+#include <linux/sched/signal.h>
+#include <linux/poll.h>
+
+struct ipa_odl_context *ipa3_odl_ctx;
+
+static DECLARE_WAIT_QUEUE_HEAD(odl_ctl_msg_wq);
+
+static void print_ipa_odl_state_bit_mask(void)
+{
+	IPADBG("ipa3_odl_ctx->odl_state.odl_init --> %d\n",
+		ipa3_odl_ctx->odl_state.odl_init);
+	IPADBG("ipa3_odl_ctx->odl_state.odl_open --> %d\n",
+		ipa3_odl_ctx->odl_state.odl_open);
+	IPADBG("ipa3_odl_ctx->odl_state.adpl_open --> %d\n",
+		ipa3_odl_ctx->odl_state.adpl_open);
+	IPADBG("ipa3_odl_ctx->odl_state.aggr_byte_limit_sent --> %d\n",
+		ipa3_odl_ctx->odl_state.aggr_byte_limit_sent);
+	IPADBG("ipa3_odl_ctx->odl_state.odl_ep_setup --> %d\n",
+		ipa3_odl_ctx->odl_state.odl_ep_setup);
+	IPADBG("ipa3_odl_ctx->odl_state.odl_setup_done_sent --> %d\n",
+		ipa3_odl_ctx->odl_state.odl_setup_done_sent);
+	IPADBG("ipa3_odl_ctx->odl_state.odl_ep_info_sent --> %d\n",
+		ipa3_odl_ctx->odl_state.odl_ep_info_sent);
+	IPADBG("ipa3_odl_ctx->odl_state.odl_connected --> %d\n",
+		ipa3_odl_ctx->odl_state.odl_connected);
+	IPADBG("ipa3_odl_ctx->odl_state.odl_disconnected --> %d\n\n",
+		ipa3_odl_ctx->odl_state.odl_disconnected);
+}
+
+static int ipa_odl_ctl_fops_open(struct inode *inode, struct file *filp)
+{
+	int ret = 0;
+
+	if (ipa3_odl_ctx->odl_state.odl_init) {
+		ipa3_odl_ctx->odl_state.odl_open = true;
+	} else {
+		IPAERR("Before odl init trying to open odl ctl pipe\n");
+		print_ipa_odl_state_bit_mask();
+		ret = -ENODEV;
+	}
+
+	return ret;
+}
+
+static int ipa_odl_ctl_fops_release(struct inode *inode, struct file *filp)
+{
+	IPADBG("QTI closed ipa_odl_ctl node\n");
+	ipa3_odl_ctx->odl_state.odl_open = false;
+	return 0;
+}
+
+/**
+ * ipa_odl_ctl_fops_read() - read message from IPA ODL device
+ * @filp:	[in] file pointer
+ * @buf:	[out] buffer to read into
+ * @count:	[in] size of above buffer
+ * @f_pos:	[inout] file position
+ *
+ * Uer-space should continuously read from /dev/ipa_odl_ctl,
+ * read will block when there are no messages to read.
+ * Upon return, user-space should read the u32 data from the
+ * start of the buffer.
+ *
+ * 0 --> ODL disconnected.
+ * 1 --> ODL connected.
+ *
+ * Buffer supplied must be big enough to
+ * hold the message of size u32.
+ *
+ * Returns: how many bytes copied to buffer
+ *
+ * Note: Should not be called from atomic context
+ */
+
+static ssize_t ipa_odl_ctl_fops_read(struct file *filp, char __user *buf,
+			size_t count, loff_t *f_pos)
+{
+	char __user *start;
+	u8 data;
+	int ret = 0;
+	static bool old_state;
+	bool new_state = false;
+
+	start = buf;
+	ipa3_odl_ctx->odl_ctl_msg_wq_flag = false;
+
+	if (!ipa3_odl_ctx->odl_state.adpl_open &&
+			!ipa3_odl_ctx->odl_state.odl_disconnected) {
+		IPADBG("Failed to send data odl pipe already disconnected\n");
+		ret = -EFAULT;
+		goto send_failed;
+	}
+
+	if (ipa3_odl_ctx->odl_state.odl_ep_setup)
+		new_state = true;
+	else if (ipa3_odl_ctx->odl_state.odl_disconnected)
+		new_state = false;
+	else {
+		IPADBG("Failed to send data odl already running\n");
+		ret = -EFAULT;
+		goto send_failed;
+	}
+
+	if (old_state != new_state) {
+		old_state = new_state;
+
+		if (new_state)
+			data = 1;
+		else if (!new_state)
+			data = 0;
+
+		if (copy_to_user(buf, &data,
+					sizeof(data))) {
+			IPADBG("Cpoying data to user failed\n");
+			ret = -EFAULT;
+			goto send_failed;
+		}
+
+		buf += sizeof(data);
+
+		if (data == 1)
+			ipa3_odl_ctx->odl_state.odl_setup_done_sent =
+				true;
+	}
+
+
+	if (start != buf && ret != -EFAULT)
+		ret = buf - start;
+send_failed:
+	return ret;
+}
+
+static unsigned int ipa_odl_ctl_fops_poll(struct file *file, poll_table *wait)
+{
+	unsigned int mask = 0;
+
+	poll_wait(file, &odl_ctl_msg_wq, wait);
+
+	if (ipa3_odl_ctx->odl_ctl_msg_wq_flag) {
+		IPADBG("Sending read mask to odl control pipe\n");
+		mask |= POLLIN | POLLRDNORM;
+	}
+	return mask;
+}
+
+static long ipa_odl_ctl_fops_ioctl(struct file *filp, unsigned int cmd,
+							unsigned long arg)
+{
+	struct ipa_odl_ep_info ep_info = {0};
+	struct ipa_odl_modem_config status;
+	int retval = 0;
+
+	IPADBG("Calling odl ioctl cmd = %d\n", cmd);
+	if (!ipa3_odl_ctx->odl_state.odl_setup_done_sent) {
+		IPAERR("Before complete the odl setup trying calling ioctl\n");
+		print_ipa_odl_state_bit_mask();
+		retval = -ENODEV;
+		goto fail;
+	}
+
+	switch (cmd) {
+	case IPA_IOC_ODL_QUERY_ADAPL_EP_INFO:
+		/* Send ep_info to user APP */
+		ep_info.ep_type = ODL_EP_TYPE_HSUSB;
+		ep_info.peripheral_iface_id = ODL_EP_PERIPHERAL_IFACE_ID;
+		ep_info.cons_pipe_num = -1;
+		ep_info.prod_pipe_num =
+			ipa3_odl_ctx->odl_client_hdl;
+		if (copy_to_user((void __user *)arg, &ep_info,
+					sizeof(ep_info))) {
+			retval = -EFAULT;
+			goto fail;
+		}
+		ipa3_odl_ctx->odl_state.odl_ep_info_sent = true;
+		break;
+	case IPA_IOC_ODL_QUERY_MODEM_CONFIG:
+		IPADBG("Received the IPA_IOC_ODL_QUERY_MODEM_CONFIG :\n");
+		if (copy_from_user(&status, (const void __user *)arg,
+			sizeof(status))) {
+			retval = -EFAULT;
+			break;
+		}
+		if (status.config_status == CONFIG_SUCCESS)
+			ipa3_odl_ctx->odl_state.odl_connected = true;
+		IPADBG("status.config_status = %d odl_connected = %d\n",
+		status.config_status, ipa3_odl_ctx->odl_state.odl_connected);
+		break;
+	default:
+		retval = -ENOIOCTLCMD;
+		break;
+	}
+
+fail:
+	return retval;
+}
+
+static void delete_first_node(void)
+{
+	struct ipa3_push_msg_odl *msg;
+
+	if (!list_empty(&ipa3_odl_ctx->adpl_msg_list)) {
+		msg = list_first_entry(&ipa3_odl_ctx->adpl_msg_list,
+				struct ipa3_push_msg_odl, link);
+		if (msg) {
+			list_del(&msg->link);
+			kfree(msg->buff);
+			kfree(msg);
+			ipa3_odl_ctx->stats.odl_drop_pkt++;
+			if (atomic_read(&ipa3_odl_ctx->stats.numer_in_queue))
+				atomic_dec(&ipa3_odl_ctx->stats.numer_in_queue);
+		}
+	} else {
+		IPADBG("List Empty\n");
+	}
+}
+
+int ipa3_send_adpl_msg(unsigned long skb_data)
+{
+	struct ipa3_push_msg_odl *msg;
+	struct sk_buff *skb = (struct sk_buff *)skb_data;
+	void *data;
+
+	IPADBG_LOW("Processing DPL data\n");
+	msg = kzalloc(sizeof(struct ipa3_push_msg_odl), GFP_KERNEL);
+	if (msg == NULL) {
+		IPADBG("Memory allocation failed\n");
+		return -ENOMEM;
+	}
+
+	data = kmemdup(skb->data, skb->len, GFP_KERNEL);
+	if (data == NULL) {
+		kfree(msg);
+		return -ENOMEM;
+	}
+	memcpy(data, skb->data, skb->len);
+	msg->buff = data;
+	msg->len = skb->len;
+	mutex_lock(&ipa3_odl_ctx->adpl_msg_lock);
+	if (atomic_read(&ipa3_odl_ctx->stats.numer_in_queue) >=
+						MAX_QUEUE_TO_ODL)
+		delete_first_node();
+	list_add_tail(&msg->link, &ipa3_odl_ctx->adpl_msg_list);
+	atomic_inc(&ipa3_odl_ctx->stats.numer_in_queue);
+	mutex_unlock(&ipa3_odl_ctx->adpl_msg_lock);
+	IPA_STATS_INC_CNT(ipa3_odl_ctx->stats.odl_rx_pkt);
+
+	return 0;
+}
+
+/**
+ * odl_ipa_packet_receive_notify() - Rx notify
+ *
+ * @priv: driver context
+ * @evt: event type
+ * @data: data provided with event
+ *
+ * IPA will pass a packet to the Linux network stack with skb->data
+ */
+static void odl_ipa_packet_receive_notify(void *priv,
+		enum ipa_dp_evt_type evt,
+		unsigned long data)
+{
+	IPADBG_LOW("Rx packet was received\n");
+	if (evt == IPA_RECEIVE)
+		ipa3_send_adpl_msg(data);
+	else
+		IPAERR("Invalid evt %d received in wan_ipa_receive\n", evt);
+}
+
+int ipa_setup_odl_pipe(void)
+{
+	struct ipa_sys_connect_params *ipa_odl_ep_cfg;
+	int ret;
+
+	ipa_odl_ep_cfg = &ipa3_odl_ctx->odl_sys_param;
+
+	IPADBG("Setting up the odl endpoint\n");
+	ipa_odl_ep_cfg->ipa_ep_cfg.cfg.cs_offload_en = IPA_ENABLE_CS_OFFLOAD_DL;
+
+	ipa_odl_ep_cfg->ipa_ep_cfg.aggr.aggr_en = IPA_ENABLE_AGGR;
+	ipa_odl_ep_cfg->ipa_ep_cfg.aggr.aggr_hard_byte_limit_en = 1;
+	ipa_odl_ep_cfg->ipa_ep_cfg.aggr.aggr = IPA_GENERIC;
+	ipa_odl_ep_cfg->ipa_ep_cfg.aggr.aggr_byte_limit =
+						IPA_ODL_AGGR_BYTE_LIMIT;
+	ipa_odl_ep_cfg->ipa_ep_cfg.aggr.aggr_pkt_limit = 0;
+
+	ipa_odl_ep_cfg->ipa_ep_cfg.hdr.hdr_len = 4;
+	ipa_odl_ep_cfg->ipa_ep_cfg.hdr.hdr_ofst_metadata_valid = 1;
+	ipa_odl_ep_cfg->ipa_ep_cfg.hdr.hdr_ofst_metadata = 1;
+	ipa_odl_ep_cfg->ipa_ep_cfg.hdr.hdr_ofst_pkt_size_valid = 1;
+	ipa_odl_ep_cfg->ipa_ep_cfg.hdr.hdr_ofst_pkt_size = 2;
+
+	ipa_odl_ep_cfg->ipa_ep_cfg.hdr_ext.hdr_total_len_or_pad_valid = true;
+	ipa_odl_ep_cfg->ipa_ep_cfg.hdr_ext.hdr_total_len_or_pad = 0;
+	ipa_odl_ep_cfg->ipa_ep_cfg.hdr_ext.hdr_payload_len_inc_padding = true;
+	ipa_odl_ep_cfg->ipa_ep_cfg.hdr_ext.hdr_total_len_or_pad_offset = 0;
+	ipa_odl_ep_cfg->ipa_ep_cfg.hdr_ext.hdr_little_endian = 0;
+	ipa_odl_ep_cfg->ipa_ep_cfg.metadata_mask.metadata_mask = 0xFF000000;
+
+	ipa_odl_ep_cfg->client = IPA_CLIENT_ODL_DPL_CONS;
+	ipa_odl_ep_cfg->notify = odl_ipa_packet_receive_notify;
+
+	ipa_odl_ep_cfg->napi_obj = NULL;
+	ipa_odl_ep_cfg->desc_fifo_sz = IPA_ODL_RX_RING_SIZE *
+						IPA_FIFO_ELEMENT_SIZE;
+	ipa3_odl_ctx->odl_client_hdl = -1;
+
+	/* For MHIP, ODL functionality is DMA. So bypass aggregation, checksum
+	 * offload, hdr_len.
+	 */
+	if (ipa3_ctx->platform_type == IPA_PLAT_TYPE_APQ &&
+		ipa3_is_mhip_offload_enabled()) {
+		IPADBG("MHIP enabled: bypass aggr + csum offload for ODL");
+		ipa_odl_ep_cfg->ipa_ep_cfg.aggr.aggr_en = IPA_BYPASS_AGGR;
+		ipa_odl_ep_cfg->ipa_ep_cfg.cfg.cs_offload_en =
+			IPA_DISABLE_CS_OFFLOAD;
+		ipa_odl_ep_cfg->ipa_ep_cfg.hdr.hdr_len = 0;
+	}
+
+	ret = ipa3_setup_sys_pipe(ipa_odl_ep_cfg,
+			&ipa3_odl_ctx->odl_client_hdl);
+	return ret;
+
+}
+
+/**
+ * ipa3_odl_register_pm - Register odl client for PM
+ *
+ * This function will register 1 client with IPA PM to represent odl
+ * in clock scaling calculation:
+ *	- "ODL" - this client will be activated when pipe connected
+ */
+static int ipa3_odl_register_pm(void)
+{
+	int result = 0;
+	struct ipa_pm_register_params pm_reg;
+
+	memset(&pm_reg, 0, sizeof(pm_reg));
+	pm_reg.name = "ODL";
+	pm_reg.group = IPA_PM_GROUP_DEFAULT;
+	pm_reg.skip_clk_vote = true;
+	result = ipa_pm_register(&pm_reg, &ipa3_odl_ctx->odl_pm_hdl);
+	if (result) {
+		IPAERR("failed to create IPA PM client %d\n", result);
+		return result;
+	}
+	return result;
+}
+
+int ipa3_odl_pipe_open(void)
+{
+	int ret = 0;
+	struct ipa_ep_cfg_holb holb_cfg;
+
+	if (!ipa3_odl_ctx->odl_state.adpl_open) {
+		IPAERR("adpl pipe not configured\n");
+		return 0;
+	}
+
+	memset(&holb_cfg, 0, sizeof(holb_cfg));
+	holb_cfg.tmr_val = 0;
+	holb_cfg.en = 1;
+
+	ipa3_cfg_ep_holb_by_client(IPA_CLIENT_USB_DPL_CONS, &holb_cfg);
+	ret = ipa_setup_odl_pipe();
+	if (ret) {
+		IPAERR(" Setup endpoint config failed\n");
+		ipa3_odl_ctx->odl_state.adpl_open = false;
+		goto fail;
+	}
+	ipa3_cfg_ep_holb_by_client(IPA_CLIENT_ODL_DPL_CONS, &holb_cfg);
+	ipa3_odl_ctx->odl_state.odl_ep_setup = true;
+	IPADBG("Setup endpoint config success\n");
+
+	ipa3_odl_ctx->stats.odl_drop_pkt = 0;
+	atomic_set(&ipa3_odl_ctx->stats.numer_in_queue, 0);
+	ipa3_odl_ctx->stats.odl_rx_pkt = 0;
+	ipa3_odl_ctx->stats.odl_tx_diag_pkt = 0;
+	/*
+	 * Send signal to ipa_odl_ctl_fops_read,
+	 * to send ODL ep open notification
+	 */
+	if (ipa3_is_mhip_offload_enabled()) {
+		IPADBG("MHIP is enabled, continue\n");
+		ipa3_odl_ctx->odl_state.odl_open = true;
+		ipa3_odl_ctx->odl_state.odl_setup_done_sent = true;
+		ipa3_odl_ctx->odl_state.odl_ep_info_sent = true;
+		ipa3_odl_ctx->odl_state.odl_connected = true;
+		ipa3_odl_ctx->odl_state.odl_disconnected = false;
+
+		/* Enable ADPL over ODL for MPM */
+		ret = ipa3_mpm_enable_adpl_over_odl(true);
+		if (ret) {
+			IPAERR("mpm failed to enable ADPL over ODL %d\n", ret);
+			return ret;
+		}
+	} else {
+		ipa3_odl_ctx->odl_ctl_msg_wq_flag = true;
+		IPAERR("Wake up odl ctl\n");
+		wake_up_interruptible(&odl_ctl_msg_wq);
+		if (ipa3_odl_ctx->odl_state.odl_disconnected)
+			ipa3_odl_ctx->odl_state.odl_disconnected = false;
+	}
+fail:
+	return ret;
+
+}
+static int ipa_adpl_open(struct inode *inode, struct file *filp)
+{
+	int ret = 0;
+
+	IPADBG("Called the function :\n");
+	if (ipa3_odl_ctx->odl_state.odl_init &&
+				!ipa3_odl_ctx->odl_state.adpl_open) {
+		/* Activate ipa_pm*/
+		ret = ipa_pm_activate_sync(ipa3_odl_ctx->odl_pm_hdl);
+		if (ret)
+			IPAERR("failed to activate pm\n");
+		ipa3_odl_ctx->odl_state.adpl_open = true;
+		ret = ipa3_odl_pipe_open();
+	} else {
+		IPAERR("Before odl init trying to open adpl pipe\n");
+		print_ipa_odl_state_bit_mask();
+		ret = -ENODEV;
+	}
+
+	return ret;
+}
+
+static int ipa_adpl_release(struct inode *inode, struct file *filp)
+{
+	int ret = 0;
+	/* Deactivate ipa_pm */
+	ret = ipa_pm_deactivate_sync(ipa3_odl_ctx->odl_pm_hdl);
+	if (ret)
+		IPAERR("failed to activate pm\n");
+	ipa3_odl_pipe_cleanup(false);
+
+	/* Disable ADPL over ODL for MPM */
+	if (ipa3_is_mhip_offload_enabled()) {
+		ret = ipa3_mpm_enable_adpl_over_odl(false);
+		if (ret)
+			IPAERR("mpm failed to disable ADPL over ODL\n");
+
+	}
+
+	return ret;
+}
+
+void ipa3_odl_pipe_cleanup(bool is_ssr)
+{
+	bool ipa_odl_opened = false;
+	struct ipa_ep_cfg_holb holb_cfg;
+
+	if (!ipa3_odl_ctx->odl_state.adpl_open) {
+		IPAERR("adpl pipe not configured\n");
+		return;
+	}
+	if (ipa3_odl_ctx->odl_state.odl_open)
+		ipa_odl_opened = true;
+
+	memset(&ipa3_odl_ctx->odl_state, 0, sizeof(ipa3_odl_ctx->odl_state));
+
+	/*Since init will not be done again*/
+	ipa3_odl_ctx->odl_state.odl_init = true;
+	memset(&holb_cfg, 0, sizeof(holb_cfg));
+	holb_cfg.tmr_val = 0;
+	holb_cfg.en = 0;
+
+	ipa3_cfg_ep_holb_by_client(IPA_CLIENT_USB_DPL_CONS, &holb_cfg);
+
+	ipa3_teardown_sys_pipe(ipa3_odl_ctx->odl_client_hdl);
+	ipa3_odl_ctx->odl_client_hdl = -1;
+	/*Assume QTI will never close this node once opened*/
+	if (ipa_odl_opened)
+		ipa3_odl_ctx->odl_state.odl_open = true;
+
+	/*Assume DIAG will not close this node in SSR case*/
+	if (is_ssr)
+		ipa3_odl_ctx->odl_state.adpl_open = true;
+	else
+		ipa3_odl_ctx->odl_state.adpl_open = false;
+
+	ipa3_odl_ctx->odl_state.odl_disconnected = true;
+	ipa3_odl_ctx->odl_state.odl_ep_setup = false;
+	ipa3_odl_ctx->odl_state.aggr_byte_limit_sent = false;
+	ipa3_odl_ctx->odl_state.odl_connected = false;
+	/*
+	 * Send signal to ipa_odl_ctl_fops_read,
+	 * to send ODL ep close notification
+	 */
+	ipa3_odl_ctx->odl_ctl_msg_wq_flag = true;
+	ipa3_odl_ctx->stats.odl_drop_pkt = 0;
+	atomic_set(&ipa3_odl_ctx->stats.numer_in_queue, 0);
+	ipa3_odl_ctx->stats.odl_rx_pkt = 0;
+	ipa3_odl_ctx->stats.odl_tx_diag_pkt = 0;
+	IPADBG("Wake up odl ctl\n");
+	wake_up_interruptible(&odl_ctl_msg_wq);
+
+}
+
+/**
+ * ipa_adpl_read() - read message from IPA device
+ * @filp:	[in] file pointer
+ * @buf:	[out] buffer to read into
+ * @count:	[in] size of above buffer
+ * @f_pos:	[inout] file position
+ *
+ * User-space should continually read from /dev/ipa_adpl,
+ * read will block when there are no messages to read.
+ * Upon return, user-space should read
+ * Buffer supplied must be big enough to
+ * hold the data.
+ *
+ * Returns:	how many bytes copied to buffer
+ *
+ * Note:	Should not be called from atomic context
+ */
+static ssize_t ipa_adpl_read(struct file *filp, char __user *buf, size_t count,
+		  loff_t *f_pos)
+{
+	int ret =  0;
+	char __user *start = buf;
+	struct ipa3_push_msg_odl *msg;
+
+	while (1) {
+		IPADBG_LOW("Writing message to adpl pipe\n");
+		if (!ipa3_odl_ctx->odl_state.odl_open)
+			break;
+
+		mutex_lock(&ipa3_odl_ctx->adpl_msg_lock);
+		msg = NULL;
+		if (!list_empty(&ipa3_odl_ctx->adpl_msg_list)) {
+			msg = list_first_entry(&ipa3_odl_ctx->adpl_msg_list,
+					struct ipa3_push_msg_odl, link);
+			list_del(&msg->link);
+			if (atomic_read(&ipa3_odl_ctx->stats.numer_in_queue))
+				atomic_dec(&ipa3_odl_ctx->stats.numer_in_queue);
+		}
+
+		mutex_unlock(&ipa3_odl_ctx->adpl_msg_lock);
+
+		if (msg != NULL) {
+			if (msg->len > count) {
+				IPAERR("Message length greater than count\n");
+				kfree(msg->buff);
+				kfree(msg);
+				msg = NULL;
+				ret = -EAGAIN;
+				break;
+			}
+
+			if (msg->buff) {
+				if (copy_to_user(buf, msg->buff,
+							msg->len)) {
+					ret = -EFAULT;
+					kfree(msg->buff);
+					kfree(msg);
+					msg = NULL;
+					ret = -EAGAIN;
+					break;
+				}
+				buf += msg->len;
+				count -= msg->len;
+				kfree(msg->buff);
+			}
+			IPA_STATS_INC_CNT(ipa3_odl_ctx->stats.odl_tx_diag_pkt);
+			kfree(msg);
+			msg = NULL;
+		} else {
+			ret = -EAGAIN;
+			break;
+		}
+
+		ret = -EAGAIN;
+		if (filp->f_flags & O_NONBLOCK)
+			break;
+
+		ret = -EINTR;
+		if (signal_pending(current))
+			break;
+
+		if (start != buf)
+			break;
+
+	}
+
+	if (start != buf && ret != -EFAULT)
+		ret = buf - start;
+
+	return ret;
+}
+
+static long ipa_adpl_ioctl(struct file *filp,
+	unsigned int cmd, unsigned long arg)
+{
+	struct odl_agg_pipe_info odl_pipe_info;
+	int retval = 0;
+
+	if (!ipa3_odl_ctx->odl_state.odl_connected) {
+		IPAERR("ODL config in progress not allowed ioctl\n");
+		print_ipa_odl_state_bit_mask();
+		retval = -ENODEV;
+		goto fail;
+	}
+	IPADBG("Calling adpl ioctl\n");
+
+	switch (cmd) {
+	case IPA_IOC_ODL_GET_AGG_BYTE_LIMIT:
+		odl_pipe_info.agg_byte_limit =
+		ipa3_odl_ctx->odl_sys_param.ipa_ep_cfg.aggr.aggr_byte_limit;
+		if (copy_to_user((void __user *)arg, &odl_pipe_info,
+					sizeof(odl_pipe_info))) {
+			retval = -EFAULT;
+			goto fail;
+		}
+		ipa3_odl_ctx->odl_state.aggr_byte_limit_sent = true;
+		break;
+	default:
+		retval = -ENOIOCTLCMD;
+		print_ipa_odl_state_bit_mask();
+		break;
+	}
+
+fail:
+	return retval;
+}
+
+static const struct file_operations ipa_odl_ctl_fops = {
+	.owner = THIS_MODULE,
+	.open = ipa_odl_ctl_fops_open,
+	.release = ipa_odl_ctl_fops_release,
+	.read = ipa_odl_ctl_fops_read,
+	.unlocked_ioctl = ipa_odl_ctl_fops_ioctl,
+	.poll = ipa_odl_ctl_fops_poll,
+};
+
+static const struct file_operations ipa_adpl_fops = {
+	.owner = THIS_MODULE,
+	.open = ipa_adpl_open,
+	.release = ipa_adpl_release,
+	.read = ipa_adpl_read,
+	.unlocked_ioctl = ipa_adpl_ioctl,
+};
+
+int ipa_odl_init(void)
+{
+	int result = 0;
+	struct cdev *cdev;
+	int loop = 0;
+	struct ipa3_odl_char_device_context *odl_cdev;
+
+	ipa3_odl_ctx = kzalloc(sizeof(*ipa3_odl_ctx), GFP_KERNEL);
+	if (!ipa3_odl_ctx) {
+		result = -ENOMEM;
+		goto fail_mem_ctx;
+	}
+
+	odl_cdev = ipa3_odl_ctx->odl_cdev;
+	INIT_LIST_HEAD(&ipa3_odl_ctx->adpl_msg_list);
+	mutex_init(&ipa3_odl_ctx->adpl_msg_lock);
+
+	odl_cdev[loop].class = class_create(THIS_MODULE, "ipa_adpl");
+
+	if (IS_ERR(odl_cdev[loop].class)) {
+		IPAERR("Error: odl_cdev->class NULL\n");
+		result = -ENODEV;
+		goto create_char_dev0_fail;
+	}
+
+	result = alloc_chrdev_region(&odl_cdev[loop].dev_num, 0, 1, "ipa_adpl");
+	if (result) {
+		IPAERR("alloc_chrdev_region error for ipa adpl pipe\n");
+		result = -ENODEV;
+		goto alloc_chrdev0_region_fail;
+	}
+
+	odl_cdev[loop].dev = device_create(odl_cdev[loop].class, NULL,
+		 odl_cdev[loop].dev_num, ipa3_ctx, "ipa_adpl");
+	if (IS_ERR(odl_cdev[loop].dev)) {
+		IPAERR("device_create err:%ld\n", PTR_ERR(odl_cdev[loop].dev));
+		result = PTR_ERR(odl_cdev[loop].dev);
+		goto device0_create_fail;
+	}
+
+	cdev = &odl_cdev[loop].cdev;
+	cdev_init(cdev, &ipa_adpl_fops);
+	cdev->owner = THIS_MODULE;
+	cdev->ops = &ipa_adpl_fops;
+
+	result = cdev_add(cdev, odl_cdev[loop].dev_num, 1);
+	if (result) {
+		IPAERR("cdev_add err=%d\n", -result);
+		goto cdev0_add_fail;
+	}
+
+	loop++;
+
+	odl_cdev[loop].class = class_create(THIS_MODULE, "ipa_odl_ctl");
+
+	if (IS_ERR(odl_cdev[loop].class)) {
+		IPAERR("Error: odl_cdev->class NULL\n");
+		result =  -ENODEV;
+		goto create_char_dev1_fail;
+	}
+
+	result = alloc_chrdev_region(&odl_cdev[loop].dev_num, 0, 1,
+							"ipa_odl_ctl");
+	if (result) {
+		IPAERR("alloc_chrdev_region error for ipa odl ctl pipe\n");
+		goto alloc_chrdev1_region_fail;
+	}
+
+	odl_cdev[loop].dev = device_create(odl_cdev[loop].class, NULL,
+		 odl_cdev[loop].dev_num, ipa3_ctx, "ipa_odl_ctl");
+	if (IS_ERR(odl_cdev[loop].dev)) {
+		IPAERR("device_create err:%ld\n", PTR_ERR(odl_cdev[loop].dev));
+		result = PTR_ERR(odl_cdev[loop].dev);
+		goto device1_create_fail;
+	}
+
+	cdev = &odl_cdev[loop].cdev;
+	cdev_init(cdev, &ipa_odl_ctl_fops);
+	cdev->owner = THIS_MODULE;
+	cdev->ops = &ipa_odl_ctl_fops;
+
+	result = cdev_add(cdev, odl_cdev[loop].dev_num, 1);
+	if (result) {
+		IPAERR(":cdev_add err=%d\n", -result);
+		goto cdev1_add_fail;
+	}
+
+	ipa3_odl_ctx->odl_state.odl_init = true;
+
+	/* register ipa_pm */
+	result = ipa3_odl_register_pm();
+	if (result) {
+		IPAWANERR("ipa3_odl_register_pm failed, ret: %d\n",
+				result);
+	}
+	return 0;
+cdev1_add_fail:
+	device_destroy(odl_cdev[1].class, odl_cdev[1].dev_num);
+device1_create_fail:
+	unregister_chrdev_region(odl_cdev[1].dev_num, 1);
+alloc_chrdev1_region_fail:
+	class_destroy(odl_cdev[1].class);
+create_char_dev1_fail:
+cdev0_add_fail:
+	device_destroy(odl_cdev[0].class, odl_cdev[0].dev_num);
+device0_create_fail:
+	unregister_chrdev_region(odl_cdev[0].dev_num, 1);
+alloc_chrdev0_region_fail:
+	class_destroy(odl_cdev[0].class);
+create_char_dev0_fail:
+	kfree(ipa3_odl_ctx);
+fail_mem_ctx:
+	return result;
+}
+
+bool ipa3_is_odl_connected(void)
+{
+	return ipa3_odl_ctx->odl_state.odl_connected;
+}

+ 74 - 0
ipa/ipa_v3/ipa_odl.h

@@ -0,0 +1,74 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _IPA3_ODL_H_
+#define _IPA3_ODL_H_
+
+#define IPA_ODL_AGGR_BYTE_LIMIT (15 * 1024)
+#define IPA_ODL_RX_RING_SIZE 192
+#define MAX_QUEUE_TO_ODL 1024
+#define CONFIG_SUCCESS 1
+#define ODL_EP_TYPE_HSUSB 2
+#define ODL_EP_PERIPHERAL_IFACE_ID 3
+
+struct ipa3_odlstats {
+	u32 odl_rx_pkt;
+	u32 odl_tx_diag_pkt;
+	u32 odl_drop_pkt;
+	atomic_t numer_in_queue;
+};
+
+struct odl_state_bit_mask {
+	u32 odl_init:1;
+	u32 odl_open:1;
+	u32 adpl_open:1;
+	u32 aggr_byte_limit_sent:1;
+	u32 odl_ep_setup:1;
+	u32 odl_setup_done_sent:1;
+	u32 odl_ep_info_sent:1;
+	u32 odl_connected:1;
+	u32 odl_disconnected:1;
+	u32:0;
+};
+
+/**
+ * struct ipa3_odl_char_device_context - IPA ODL character device
+ * @class: pointer to the struct class
+ * @dev_num: device number
+ * @dev: the dev_t of the device
+ * @cdev: cdev of the device
+ */
+struct ipa3_odl_char_device_context {
+	struct class *class;
+	dev_t dev_num;
+	struct device *dev;
+	struct cdev cdev;
+};
+
+struct ipa_odl_context {
+	struct ipa3_odl_char_device_context odl_cdev[2];
+	struct list_head adpl_msg_list;
+	struct mutex adpl_msg_lock;
+	struct ipa_sys_connect_params odl_sys_param;
+	u32 odl_client_hdl;
+	struct odl_state_bit_mask odl_state;
+	bool odl_ctl_msg_wq_flag;
+	struct ipa3_odlstats stats;
+	u32 odl_pm_hdl;
+};
+
+struct ipa3_push_msg_odl {
+	void *buff;
+	int len;
+	struct list_head link;
+};
+
+extern struct ipa_odl_context *ipa3_odl_ctx;
+
+int ipa_odl_init(void);
+void ipa3_odl_pipe_cleanup(bool is_ssr);
+int ipa3_odl_pipe_open(void);
+
+#endif /* _IPA3_ODL_H_ */

+ 1431 - 0
ipa/ipa_v3/ipa_pm.c

@@ -0,0 +1,1431 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2017-2019, The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/debugfs.h>
+#include "ipa_pm.h"
+#include "ipa_i.h"
+
+
+#define IPA_PM_DRV_NAME "ipa_pm"
+
+#define IPA_PM_DBG(fmt, args...) \
+	do { \
+		pr_debug(IPA_PM_DRV_NAME " %s:%d " fmt, \
+			__func__, __LINE__, ## args); \
+		IPA_IPC_LOGGING(ipa_get_ipc_logbuf(), \
+			IPA_PM_DRV_NAME " %s:%d " fmt, ## args); \
+		IPA_IPC_LOGGING(ipa_get_ipc_logbuf_low(), \
+			IPA_PM_DRV_NAME " %s:%d " fmt, ## args); \
+	} while (0)
+#define IPA_PM_DBG_LOW(fmt, args...) \
+	do { \
+		pr_debug(IPA_PM_DRV_NAME " %s:%d " fmt, \
+			__func__, __LINE__, ## args); \
+		IPA_IPC_LOGGING(ipa_get_ipc_logbuf_low(), \
+			IPA_PM_DRV_NAME " %s:%d " fmt, ## args); \
+	} while (0)
+#define IPA_PM_ERR(fmt, args...) \
+	do { \
+		pr_err(IPA_PM_DRV_NAME " %s:%d " fmt, \
+			__func__, __LINE__, ## args); \
+		IPA_IPC_LOGGING(ipa_get_ipc_logbuf(), \
+			IPA_PM_DRV_NAME " %s:%d " fmt, ## args); \
+		IPA_IPC_LOGGING(ipa_get_ipc_logbuf_low(), \
+			IPA_PM_DRV_NAME " %s:%d " fmt, ## args); \
+	} while (0)
+#define IPA_PM_DBG_STATE(hdl, name, state) \
+	IPA_PM_DBG_LOW("Client[%d] %s: %s\n", hdl, name, \
+		client_state_to_str[state])
+
+
+#if IPA_PM_MAX_CLIENTS > 32
+#error max client greater than 32 all bitmask types should be changed
+#endif
+
+/*
+ * struct ipa_pm_exception_list - holds information about an exception
+ * @pending: number of clients in exception that have not yet been adctivated
+ * @bitmask: bitmask of the clients in the exception based on handle
+ * @threshold: the threshold values for the exception
+ */
+struct ipa_pm_exception_list {
+	char clients[IPA_PM_MAX_EX_CL];
+	int pending;
+	u32 bitmask;
+	int threshold[IPA_PM_THRESHOLD_MAX];
+};
+
+/*
+ * struct clk_scaling_db - holds information about threshholds and exceptions
+ * @lock: lock the bitmasks and thresholds
+ * @exception_list: pointer to the list of exceptions
+ * @work: work for clock scaling algorithm
+ * @active_client_bitmask: the bits represent handles in the clients array that
+ * contain non-null client
+ * @threshold_size: size of the throughput threshold
+ * @exception_size: size of the exception list
+ * @cur_vote: idx of the threshold
+ * @default_threshold: the thresholds used if no exception passes
+ * @current_threshold: the current threshold of the clock plan
+ */
+struct clk_scaling_db {
+	spinlock_t lock;
+	struct ipa_pm_exception_list exception_list[IPA_PM_EXCEPTION_MAX];
+	struct work_struct work;
+	u32 active_client_bitmask;
+	int threshold_size;
+	int exception_size;
+	int cur_vote;
+	int default_threshold[IPA_PM_THRESHOLD_MAX];
+	int *current_threshold;
+};
+
+/*
+ * ipa_pm state names
+ *
+ * Timer free states:
+ * @IPA_PM_DEACTIVATED: client starting state when registered
+ * @IPA_PM_DEACTIVATE_IN_PROGRESS: deactivate was called in progress of a client
+ *				   activating
+ * @IPA_PM_ACTIVATE_IN_PROGRESS: client is being activated by work_queue
+ * @IPA_PM_ACTIVATED: client is activated without any timers
+ *
+ * Timer set states:
+ * @IPA_PM_ACTIVATED_PENDING_DEACTIVATION: moves to deactivate once timer pass
+ * @IPA_PM_ACTIVATED_TIMER_SET: client was activated while timer was set, so
+ *			 when the timer pass, client will still be activated
+ *@IPA_PM_ACTIVATED_PENDING_RESCHEDULE: state signifying extended timer when
+ *             a client is deferred_deactivated when a time ris still active
+ */
+enum ipa_pm_state {
+	IPA_PM_DEACTIVATED,
+	IPA_PM_DEACTIVATE_IN_PROGRESS,
+	IPA_PM_ACTIVATE_IN_PROGRESS,
+	IPA_PM_ACTIVATED,
+	IPA_PM_ACTIVATED_PENDING_DEACTIVATION,
+	IPA_PM_ACTIVATED_TIMER_SET,
+	IPA_PM_ACTIVATED_PENDING_RESCHEDULE,
+	IPA_PM_STATE_MAX
+};
+
+#define IPA_PM_STATE_ACTIVE(state) \
+	(state == IPA_PM_ACTIVATED ||\
+		state == IPA_PM_ACTIVATED_PENDING_DEACTIVATION ||\
+		state  == IPA_PM_ACTIVATED_TIMER_SET ||\
+		state == IPA_PM_ACTIVATED_PENDING_RESCHEDULE)
+
+#define IPA_PM_STATE_IN_PROGRESS(state) \
+	(state == IPA_PM_ACTIVATE_IN_PROGRESS \
+		|| state == IPA_PM_DEACTIVATE_IN_PROGRESS)
+
+/*
+ * struct ipa_pm_client - holds information about a specific IPA client
+ * @name: string name of the client
+ * @callback: pointer to the client's callback function
+ * @callback_params: pointer to the client's callback parameters
+ * @state: Activation state of the client
+ * @skip_clk_vote: 0 if client votes for clock when activated, 1 if no vote
+ * @group: the ipa_pm_group the client belongs to
+ * @hdl: handle of the client
+ * @throughput: the throughput of the client for clock scaling
+ * @state_lock: spinlock to lock the pm_states
+ * @activate_work: work for activate (blocking case)
+ * @deactivate work: delayed work for deferred_deactivate function
+ * @complete: generic wait-for-completion handler
+ * @wlock: wake source to prevent AP suspend
+ */
+struct ipa_pm_client {
+	char name[IPA_PM_MAX_EX_CL];
+	void (*callback)(void *user_data, enum ipa_pm_cb_event);
+	void *callback_params;
+	enum ipa_pm_state state;
+	bool skip_clk_vote;
+	int group;
+	int hdl;
+	int throughput;
+	spinlock_t state_lock;
+	struct work_struct activate_work;
+	struct delayed_work deactivate_work;
+	struct completion complete;
+	struct wakeup_source *wlock;
+};
+
+/*
+ * struct ipa_pm_ctx - global ctx that will hold the client arrays and tput info
+ * @clients: array to the clients with the handle as its index
+ * @clients_by_pipe: array to the clients with endpoint as the index
+ * @wq: work queue for deferred deactivate, activate, and clk_scaling work
+ 8 @clk_scaling: pointer to clock scaling database
+ * @client_mutex: global mutex to  lock the client arrays
+ * @aggragated_tput: aggragated tput value of all valid activated clients
+ * @group_tput: combined throughput for the groups
+ */
+struct ipa_pm_ctx {
+	struct ipa_pm_client *clients[IPA_PM_MAX_CLIENTS];
+	struct ipa_pm_client *clients_by_pipe[IPA3_MAX_NUM_PIPES];
+	struct workqueue_struct *wq;
+	struct clk_scaling_db clk_scaling;
+	struct mutex client_mutex;
+	int aggregated_tput;
+	int group_tput[IPA_PM_GROUP_MAX];
+};
+
+static struct ipa_pm_ctx *ipa_pm_ctx;
+
+static const char *client_state_to_str[IPA_PM_STATE_MAX] = {
+	__stringify(IPA_PM_DEACTIVATED),
+	__stringify(IPA_PM_DEACTIVATE_IN_PROGRESS),
+	__stringify(IPA_PM_ACTIVATE_IN_PROGRESS),
+	__stringify(IPA_PM_ACTIVATED),
+	__stringify(IPA_PM_ACTIVATED_PENDING_DEACTIVATION),
+	__stringify(IPA_PM_ACTIVATED_TIMER_SET),
+	__stringify(IPA_PM_ACTIVATED_PENDING_RESCHEDULE),
+};
+
+static const char *ipa_pm_group_to_str[IPA_PM_GROUP_MAX] = {
+	__stringify(IPA_PM_GROUP_DEFAULT),
+	__stringify(IPA_PM_GROUP_APPS),
+	__stringify(IPA_PM_GROUP_MODEM),
+};
+
+/**
+ * pop_max_from_array() -pop the max and move the last element to where the
+ * max was popped
+ * @arr: array to be searched for max
+ * @n: size of the array
+ *
+ * Returns: max value of the array
+ */
+static int pop_max_from_array(int *arr, int *n)
+{
+	int i;
+	int max, max_idx;
+
+	max_idx = *n - 1;
+	max = 0;
+
+	if (*n == 0)
+		return 0;
+
+	for (i = 0; i < *n; i++) {
+		if (arr[i] > max) {
+			max = arr[i];
+			max_idx = i;
+		}
+	}
+	(*n)--;
+	arr[max_idx] = arr[*n];
+
+	return max;
+}
+
+/**
+ * calculate_throughput() - calculate the aggregated throughput
+ * based on active clients
+ *
+ * Returns: aggregated tput value
+ */
+static int calculate_throughput(void)
+{
+	int client_tput[IPA_PM_MAX_CLIENTS] = { 0 };
+	bool group_voted[IPA_PM_GROUP_MAX] = { false };
+	int i, n;
+	int max, second_max, aggregated_tput;
+	struct ipa_pm_client *client;
+
+	/* Create a basic array to hold throughputs*/
+	for (i = 1, n = 0; i < IPA_PM_MAX_CLIENTS; i++) {
+		client = ipa_pm_ctx->clients[i];
+		if (client != NULL && IPA_PM_STATE_ACTIVE(client->state)) {
+			/* default case */
+			if (client->group == IPA_PM_GROUP_DEFAULT) {
+				client_tput[n++] = client->throughput;
+			} else if (!group_voted[client->group]) {
+				client_tput[n++] = ipa_pm_ctx->group_tput
+					[client->group];
+				group_voted[client->group] = true;
+			}
+		}
+	}
+	/*the array will only use n+1 spots. n will be the last index used*/
+
+	aggregated_tput = 0;
+
+	/**
+	 * throughput algorithm:
+	 * 1) pop the max and second_max
+	 * 2) add the 2nd max to aggregated tput
+	 * 3) insert the value of max - 2nd max
+	 * 4) repeat until array is of size 1
+	 */
+	while (n > 1) {
+		max = pop_max_from_array(client_tput, &n);
+		second_max = pop_max_from_array(client_tput, &n);
+		client_tput[n++] = max - second_max;
+		aggregated_tput += second_max;
+	}
+
+	IPA_PM_DBG_LOW("Aggregated throughput: %d\n", aggregated_tput);
+
+	return aggregated_tput;
+}
+
+/**
+ * deactivate_client() - turn off the bit in the active client bitmask based on
+ * the handle passed in
+ * @hdl: The index of the client to be deactivated
+ */
+static void deactivate_client(u32 hdl)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&ipa_pm_ctx->clk_scaling.lock, flags);
+	ipa_pm_ctx->clk_scaling.active_client_bitmask &= ~(1 << hdl);
+	spin_unlock_irqrestore(&ipa_pm_ctx->clk_scaling.lock, flags);
+	IPA_PM_DBG_LOW("active bitmask: %x\n",
+		ipa_pm_ctx->clk_scaling.active_client_bitmask);
+}
+
+/**
+ * activate_client() - turn on the bit in the active client bitmask based on
+ * the handle passed in
+ * @hdl: The index of the client to be activated
+ */
+static void activate_client(u32 hdl)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&ipa_pm_ctx->clk_scaling.lock, flags);
+	ipa_pm_ctx->clk_scaling.active_client_bitmask |= (1 << hdl);
+	spin_unlock_irqrestore(&ipa_pm_ctx->clk_scaling.lock, flags);
+	IPA_PM_DBG_LOW("active bitmask: %x\n",
+		ipa_pm_ctx->clk_scaling.active_client_bitmask);
+}
+
+/**
+ * deactivate_client() - get threshold
+ *
+ * Returns: threshold of the exception that passes or default if none pass
+ */
+static void set_current_threshold(void)
+{
+	int i;
+	struct clk_scaling_db *clk;
+	struct ipa_pm_exception_list *exception;
+	unsigned long flags;
+
+	clk = &ipa_pm_ctx->clk_scaling;
+
+	spin_lock_irqsave(&ipa_pm_ctx->clk_scaling.lock, flags);
+	for (i = 0; i < clk->exception_size; i++) {
+		exception = &clk->exception_list[i];
+		if (exception->pending == 0 && (exception->bitmask
+			& ~clk->active_client_bitmask) == 0) {
+			spin_unlock_irqrestore(&ipa_pm_ctx->clk_scaling.lock,
+				 flags);
+			clk->current_threshold = exception->threshold;
+			IPA_PM_DBG("Exception %d set\n", i);
+			return;
+		}
+	}
+	clk->current_threshold = clk->default_threshold;
+	spin_unlock_irqrestore(&ipa_pm_ctx->clk_scaling.lock, flags);
+}
+
+/**
+ * do_clk_scaling() - set the clock based on the activated clients
+ *
+ * Returns: 0 if success, negative otherwise
+ */
+static int do_clk_scaling(void)
+{
+	int i, tput;
+	int new_th_idx = 1;
+	struct clk_scaling_db *clk_scaling;
+
+	if (atomic_read(&ipa3_ctx->ipa_clk_vote) == 0) {
+		IPA_PM_DBG("IPA clock is gated\n");
+		return 0;
+	}
+
+	clk_scaling = &ipa_pm_ctx->clk_scaling;
+
+	mutex_lock(&ipa_pm_ctx->client_mutex);
+	IPA_PM_DBG_LOW("clock scaling started\n");
+	tput = calculate_throughput();
+	ipa_pm_ctx->aggregated_tput = tput;
+	set_current_threshold();
+
+	mutex_unlock(&ipa_pm_ctx->client_mutex);
+
+	for (i = 0; i < clk_scaling->threshold_size; i++) {
+		if (tput >= clk_scaling->current_threshold[i])
+			new_th_idx++;
+	}
+
+	IPA_PM_DBG_LOW("old idx was at %d\n", ipa_pm_ctx->clk_scaling.cur_vote);
+
+
+	if (ipa_pm_ctx->clk_scaling.cur_vote != new_th_idx) {
+		ipa_pm_ctx->clk_scaling.cur_vote = new_th_idx;
+		ipa3_set_clock_plan_from_pm(ipa_pm_ctx->clk_scaling.cur_vote);
+	}
+
+	IPA_PM_DBG_LOW("new idx is at %d\n", ipa_pm_ctx->clk_scaling.cur_vote);
+
+	return 0;
+}
+
+/**
+ * clock_scaling_func() - set the clock on a work queue
+ */
+static void clock_scaling_func(struct work_struct *work)
+{
+	do_clk_scaling();
+}
+
+/**
+ * activate_work_func - activate a client and vote for clock on a work queue
+ */
+static void activate_work_func(struct work_struct *work)
+{
+	struct ipa_pm_client *client;
+	bool dec_clk = false;
+	unsigned long flags;
+
+	client = container_of(work, struct ipa_pm_client, activate_work);
+	if (!client->skip_clk_vote) {
+		IPA_ACTIVE_CLIENTS_INC_SPECIAL(client->name);
+		if (client->group == IPA_PM_GROUP_APPS)
+			__pm_stay_awake(client->wlock);
+	}
+
+	spin_lock_irqsave(&client->state_lock, flags);
+	IPA_PM_DBG_STATE(client->hdl, client->name, client->state);
+	if (client->state == IPA_PM_ACTIVATE_IN_PROGRESS) {
+		client->state = IPA_PM_ACTIVATED;
+	} else if (client->state == IPA_PM_DEACTIVATE_IN_PROGRESS) {
+		client->state = IPA_PM_DEACTIVATED;
+		dec_clk = true;
+	} else {
+		IPA_PM_ERR("unexpected state %d\n", client->state);
+		WARN_ON(1);
+	}
+	spin_unlock_irqrestore(&client->state_lock, flags);
+
+	complete_all(&client->complete);
+
+	if (dec_clk) {
+		if (!client->skip_clk_vote) {
+			IPA_ACTIVE_CLIENTS_DEC_SPECIAL(client->name);
+			if (client->group == IPA_PM_GROUP_APPS)
+				__pm_relax(client->wlock);
+		}
+
+		IPA_PM_DBG_STATE(client->hdl, client->name, client->state);
+		return;
+	}
+
+	activate_client(client->hdl);
+
+	mutex_lock(&ipa_pm_ctx->client_mutex);
+	if (client->callback) {
+		client->callback(client->callback_params,
+			IPA_PM_CLIENT_ACTIVATED);
+	} else {
+		IPA_PM_ERR("client has no callback");
+		WARN_ON(1);
+	}
+	mutex_unlock(&ipa_pm_ctx->client_mutex);
+
+	IPA_PM_DBG_STATE(client->hdl, client->name, client->state);
+	do_clk_scaling();
+}
+
+/**
+ * delayed_deferred_deactivate_work_func - deferred deactivate on a work queue
+ */
+static void delayed_deferred_deactivate_work_func(struct work_struct *work)
+{
+	struct delayed_work *dwork;
+	struct ipa_pm_client *client;
+	unsigned long flags;
+	unsigned long delay;
+
+	dwork = container_of(work, struct delayed_work, work);
+	client = container_of(dwork, struct ipa_pm_client, deactivate_work);
+
+	spin_lock_irqsave(&client->state_lock, flags);
+	IPA_PM_DBG_STATE(client->hdl, client->name, client->state);
+	switch (client->state) {
+	case IPA_PM_ACTIVATED_TIMER_SET:
+		client->state = IPA_PM_ACTIVATED;
+		goto bail;
+	case IPA_PM_ACTIVATED_PENDING_RESCHEDULE:
+		delay = IPA_PM_DEFERRED_TIMEOUT;
+		if (ipa3_ctx->ipa3_hw_mode == IPA_HW_MODE_VIRTUAL ||
+			ipa3_ctx->ipa3_hw_mode == IPA_HW_MODE_EMULATION)
+			delay *= 5;
+
+		queue_delayed_work(ipa_pm_ctx->wq, &client->deactivate_work,
+			msecs_to_jiffies(delay));
+		client->state = IPA_PM_ACTIVATED_PENDING_DEACTIVATION;
+		goto bail;
+	case IPA_PM_ACTIVATED_PENDING_DEACTIVATION:
+		client->state = IPA_PM_DEACTIVATED;
+		IPA_PM_DBG_STATE(client->hdl, client->name, client->state);
+		spin_unlock_irqrestore(&client->state_lock, flags);
+		if (!client->skip_clk_vote) {
+			IPA_ACTIVE_CLIENTS_DEC_SPECIAL(client->name);
+			if (client->group == IPA_PM_GROUP_APPS)
+				__pm_relax(client->wlock);
+		}
+
+		deactivate_client(client->hdl);
+		do_clk_scaling();
+		return;
+	default:
+		IPA_PM_ERR("unexpected state %d\n", client->state);
+		WARN_ON(1);
+		goto bail;
+	}
+
+bail:
+	IPA_PM_DBG_STATE(client->hdl, client->name, client->state);
+	spin_unlock_irqrestore(&client->state_lock, flags);
+}
+
+static int find_next_open_array_element(const char *name)
+{
+	int i, n;
+
+	n = -ENOBUFS;
+
+	/* 0 is not a valid handle */
+	for (i = IPA_PM_MAX_CLIENTS - 1; i >= 1; i--) {
+		if (ipa_pm_ctx->clients[i] == NULL) {
+			n = i;
+			continue;
+		}
+
+		if (strlen(name) == strlen(ipa_pm_ctx->clients[i]->name))
+			if (!strcmp(name, ipa_pm_ctx->clients[i]->name))
+				return -EEXIST;
+	}
+	return n;
+}
+
+/**
+ * add_client_to_exception_list() - add client to the exception list and
+ * update pending if necessary
+ * @hdl: index of the IPA client
+ *
+ * Returns: 0 if success, negative otherwise
+ */
+static int add_client_to_exception_list(u32 hdl)
+{
+	int i, len = 0;
+	struct ipa_pm_exception_list *exception;
+
+	mutex_lock(&ipa_pm_ctx->client_mutex);
+	len = strlen(ipa_pm_ctx->clients[hdl]->name);
+	for (i = 0; i < ipa_pm_ctx->clk_scaling.exception_size; i++) {
+		exception = &ipa_pm_ctx->clk_scaling.exception_list[i];
+		if (strnstr(exception->clients, ipa_pm_ctx->clients[hdl]->name,
+			len) && (strlen(exception->clients)
+			== len)) {
+			exception->pending--;
+			IPA_PM_DBG("Pending: %d\n",
+			exception->pending);
+
+			if (exception->pending < 0) {
+				WARN_ON(1);
+				exception->pending = 0;
+				mutex_unlock(&ipa_pm_ctx->client_mutex);
+				return -EPERM;
+			}
+			exception->bitmask |= (1 << hdl);
+		}
+	}
+	IPA_PM_DBG("%s added to exception list\n",
+		ipa_pm_ctx->clients[hdl]->name);
+	mutex_unlock(&ipa_pm_ctx->client_mutex);
+
+	return 0;
+}
+
+/**
+ * remove_client_to_exception_list() - remove client from the exception list and
+ * update pending if necessary
+ * @hdl: index of the IPA client
+ *
+ * Returns: 0 if success, negative otherwise
+ */
+static int remove_client_from_exception_list(u32 hdl)
+{
+	int i;
+	struct ipa_pm_exception_list *exception;
+
+	for (i = 0; i < ipa_pm_ctx->clk_scaling.exception_size; i++) {
+		exception = &ipa_pm_ctx->clk_scaling.exception_list[i];
+		if (exception->bitmask & (1 << hdl)) {
+			exception->pending++;
+			IPA_PM_DBG("Pending: %d\n",
+			exception->pending);
+			exception->bitmask &= ~(1 << hdl);
+		}
+	}
+	IPA_PM_DBG("Client %d removed from exception list\n", hdl);
+
+	return 0;
+}
+
+/**
+ * ipa_pm_init() - initialize  IPA PM Components
+ * @ipa_pm_init_params: parameters needed to fill exceptions and thresholds
+ *
+ * Returns: 0 on success, negative on failure
+ */
+int ipa_pm_init(struct ipa_pm_init_params *params)
+{
+	int i, j;
+	struct clk_scaling_db *clk_scaling;
+
+	if (params == NULL) {
+		IPA_PM_ERR("Invalid Params\n");
+		return -EINVAL;
+	}
+
+	if (params->threshold_size <= 0
+		|| params->threshold_size > IPA_PM_THRESHOLD_MAX) {
+		IPA_PM_ERR("Invalid threshold size\n");
+		return -EINVAL;
+	}
+
+	if (params->exception_size < 0
+		|| params->exception_size > IPA_PM_EXCEPTION_MAX) {
+		IPA_PM_ERR("Invalid exception size\n");
+		return -EINVAL;
+	}
+
+	IPA_PM_DBG("IPA PM initialization started\n");
+
+	if (ipa_pm_ctx != NULL) {
+		IPA_PM_ERR("Already initialized\n");
+		return -EPERM;
+	}
+
+
+	ipa_pm_ctx = kzalloc(sizeof(*ipa_pm_ctx), GFP_KERNEL);
+	if (!ipa_pm_ctx) {
+		IPA_PM_ERR(":kzalloc err.\n");
+		return -ENOMEM;
+	}
+
+	ipa_pm_ctx->wq = create_singlethread_workqueue("ipa_pm_activate");
+	if (!ipa_pm_ctx->wq) {
+		IPA_PM_ERR("create workqueue failed\n");
+		kfree(ipa_pm_ctx);
+		return -ENOMEM;
+	}
+
+	mutex_init(&ipa_pm_ctx->client_mutex);
+
+	/* Populate and init locks in clk_scaling_db */
+	clk_scaling = &ipa_pm_ctx->clk_scaling;
+	spin_lock_init(&clk_scaling->lock);
+	clk_scaling->threshold_size = params->threshold_size;
+	clk_scaling->exception_size = params->exception_size;
+	INIT_WORK(&clk_scaling->work, clock_scaling_func);
+
+	for (i = 0; i < params->threshold_size; i++)
+		clk_scaling->default_threshold[i] =
+			params->default_threshold[i];
+
+	/* Populate exception list*/
+	for (i = 0; i < params->exception_size; i++) {
+		strlcpy(clk_scaling->exception_list[i].clients,
+			params->exceptions[i].usecase, IPA_PM_MAX_EX_CL);
+		IPA_PM_DBG("Usecase: %s\n", params->exceptions[i].usecase);
+
+		/* Parse the commas to count the size of the clients */
+		for (j = 0; j < IPA_PM_MAX_EX_CL &&
+			clk_scaling->exception_list[i].clients[j]; j++) {
+			if (clk_scaling->exception_list[i].clients[j] == ',')
+				clk_scaling->exception_list[i].pending++;
+		}
+
+		/* for the first client */
+		clk_scaling->exception_list[i].pending++;
+		IPA_PM_DBG("Pending: %d\n",
+			clk_scaling->exception_list[i].pending);
+
+		/* populate the threshold */
+		for (j = 0; j < params->threshold_size; j++) {
+			clk_scaling->exception_list[i].threshold[j]
+			= params->exceptions[i].threshold[j];
+		}
+
+	}
+	IPA_PM_DBG("initialization success");
+
+	return 0;
+}
+
+int ipa_pm_destroy(void)
+{
+	IPA_PM_DBG("IPA PM destroy started\n");
+
+	if (ipa_pm_ctx == NULL) {
+		IPA_PM_ERR("Already destroyed\n");
+		return -EPERM;
+	}
+
+	destroy_workqueue(ipa_pm_ctx->wq);
+
+	kfree(ipa_pm_ctx);
+	ipa_pm_ctx = NULL;
+
+	return 0;
+}
+
+/**
+ * ipa_pm_register() - register an IPA PM client with the PM
+ * @register_params: params for a client like throughput, callback, etc.
+ * @hdl: int pointer that will be used as an index to access the client
+ *
+ * Returns: 0 on success, negative on failure
+ *
+ * Side effects: *hdl is replaced with the client index or -EEXIST if
+ * client is already registered
+ */
+int ipa_pm_register(struct ipa_pm_register_params *params, u32 *hdl)
+{
+	struct ipa_pm_client *client;
+	int elem;
+
+	if (ipa_pm_ctx == NULL) {
+		IPA_PM_ERR("PM_ctx is null\n");
+		return -EINVAL;
+	}
+
+	if (params == NULL || hdl == NULL || params->name == NULL) {
+		IPA_PM_ERR("Invalid Params\n");
+		return -EINVAL;
+	}
+
+	IPA_PM_DBG("IPA PM registering client\n");
+
+	mutex_lock(&ipa_pm_ctx->client_mutex);
+
+	elem = find_next_open_array_element(params->name);
+	*hdl = elem;
+	if (elem < 0 || elem > IPA_PM_MAX_CLIENTS) {
+		mutex_unlock(&ipa_pm_ctx->client_mutex);
+		IPA_PM_ERR("client already registered or full array elem=%d\n",
+			elem);
+		return elem;
+	}
+
+	ipa_pm_ctx->clients[*hdl] = kzalloc(sizeof
+		(struct ipa_pm_client), GFP_KERNEL);
+	if (!ipa_pm_ctx->clients[*hdl]) {
+		mutex_unlock(&ipa_pm_ctx->client_mutex);
+		IPA_PM_ERR(":kzalloc err.\n");
+		return -ENOMEM;
+	}
+	mutex_unlock(&ipa_pm_ctx->client_mutex);
+
+	client = ipa_pm_ctx->clients[*hdl];
+
+	spin_lock_init(&client->state_lock);
+
+	INIT_DELAYED_WORK(&client->deactivate_work,
+		delayed_deferred_deactivate_work_func);
+
+	INIT_WORK(&client->activate_work, activate_work_func);
+
+	/* populate fields */
+	strlcpy(client->name, params->name, IPA_PM_MAX_EX_CL);
+	client->callback = params->callback;
+	client->callback_params = params->user_data;
+	client->group = params->group;
+	client->hdl = *hdl;
+	client->skip_clk_vote = params->skip_clk_vote;
+	client->wlock = wakeup_source_register(NULL, client->name);
+	if (!client->wlock) {
+		ipa_pm_deregister(*hdl);
+		IPA_PM_ERR("IPA wakeup source register failed %s\n",
+			client->name);
+		return -ENOMEM;
+	}
+
+	init_completion(&client->complete);
+
+	/* add client to exception list */
+	if (add_client_to_exception_list(*hdl)) {
+		ipa_pm_deregister(*hdl);
+		IPA_PM_ERR("Fail to add client to exception_list\n");
+		return -EPERM;
+	}
+
+	IPA_PM_DBG("IPA PM client registered with handle %d\n", *hdl);
+	return 0;
+}
+EXPORT_SYMBOL(ipa_pm_register);
+
+/**
+ * ipa_pm_deregister() - deregister IPA client from the PM
+ * @hdl: index of the client in the array
+ *
+ * Returns: 0 on success, negative on failure
+ */
+int ipa_pm_deregister(u32 hdl)
+{
+	struct ipa_pm_client *client;
+	int i;
+	unsigned long flags;
+
+	if (ipa_pm_ctx == NULL) {
+		IPA_PM_ERR("PM_ctx is null\n");
+		return -EINVAL;
+	}
+
+	if (hdl >= IPA_PM_MAX_CLIENTS) {
+		IPA_PM_ERR("Invalid Param\n");
+		return -EINVAL;
+	}
+
+	if (ipa_pm_ctx->clients[hdl] == NULL) {
+		IPA_PM_ERR("Client is Null\n");
+		return -EINVAL;
+	}
+
+	IPA_PM_DBG("IPA PM deregistering client\n");
+
+	client = ipa_pm_ctx->clients[hdl];
+	spin_lock_irqsave(&client->state_lock, flags);
+	if (IPA_PM_STATE_IN_PROGRESS(client->state)) {
+		spin_unlock_irqrestore(&client->state_lock, flags);
+		wait_for_completion(&client->complete);
+		spin_lock_irqsave(&client->state_lock, flags);
+	}
+
+	if (IPA_PM_STATE_ACTIVE(client->state)) {
+		IPA_PM_DBG("Activated clients cannot be deregistered");
+		spin_unlock_irqrestore(&client->state_lock, flags);
+		return -EPERM;
+	}
+	spin_unlock_irqrestore(&client->state_lock, flags);
+
+	mutex_lock(&ipa_pm_ctx->client_mutex);
+
+	/* nullify pointers in pipe array */
+	for (i = 0; i < IPA3_MAX_NUM_PIPES; i++) {
+		if (ipa_pm_ctx->clients_by_pipe[i] == ipa_pm_ctx->clients[hdl])
+			ipa_pm_ctx->clients_by_pipe[i] = NULL;
+	}
+	wakeup_source_unregister(client->wlock);
+	kfree(client);
+	ipa_pm_ctx->clients[hdl] = NULL;
+
+	remove_client_from_exception_list(hdl);
+	IPA_PM_DBG("IPA PM client %d deregistered\n", hdl);
+	mutex_unlock(&ipa_pm_ctx->client_mutex);
+
+	return 0;
+}
+EXPORT_SYMBOL(ipa_pm_deregister);
+
+/**
+ * ipa_pm_associate_ipa_cons_to_client() - add mapping to pipe with ipa cllent
+ * @hdl: index of the client to be mapped
+ * @consumer: the pipe/consumer name to be pipped to the client
+ *
+ * Returns: 0 on success, negative on failure
+ *
+ * Side effects: multiple pipes are allowed to be mapped to a single client
+ */
+int ipa_pm_associate_ipa_cons_to_client(u32 hdl, enum ipa_client_type consumer)
+{
+	int idx;
+
+	if (ipa_pm_ctx == NULL) {
+		IPA_PM_ERR("PM_ctx is null\n");
+		return -EINVAL;
+	}
+
+	if (hdl >= IPA_PM_MAX_CLIENTS || consumer < 0 ||
+		consumer >= IPA_CLIENT_MAX) {
+		IPA_PM_ERR("invalid params\n");
+		return -EINVAL;
+	}
+
+	mutex_lock(&ipa_pm_ctx->client_mutex);
+	if (ipa_pm_ctx->clients[hdl] == NULL) {
+		mutex_unlock(&ipa_pm_ctx->client_mutex);
+		IPA_PM_ERR("Client is NULL\n");
+		return -EPERM;
+	}
+
+	idx = ipa_get_ep_mapping(consumer);
+
+	if (idx < 0) {
+		mutex_unlock(&ipa_pm_ctx->client_mutex);
+		IPA_PM_DBG("Pipe is not used\n");
+		return 0;
+	}
+
+	IPA_PM_DBG("Mapping pipe %d to client %d\n", idx, hdl);
+
+	if (ipa_pm_ctx->clients_by_pipe[idx] != NULL) {
+		mutex_unlock(&ipa_pm_ctx->client_mutex);
+		IPA_PM_ERR("Pipe is already mapped\n");
+		return -EPERM;
+	}
+	ipa_pm_ctx->clients_by_pipe[idx] = ipa_pm_ctx->clients[hdl];
+	mutex_unlock(&ipa_pm_ctx->client_mutex);
+
+	IPA_PM_DBG("Pipe %d is mapped to client %d\n", idx, hdl);
+
+	return 0;
+}
+EXPORT_SYMBOL(ipa_pm_associate_ipa_cons_to_client);
+
+static int ipa_pm_activate_helper(struct ipa_pm_client *client, bool sync)
+{
+	struct ipa_active_client_logging_info log_info;
+	int result = 0;
+	unsigned long flags;
+
+	spin_lock_irqsave(&client->state_lock, flags);
+	IPA_PM_DBG_STATE(client->hdl, client->name, client->state);
+
+	if (IPA_PM_STATE_IN_PROGRESS(client->state)) {
+		if (sync) {
+			spin_unlock_irqrestore(&client->state_lock, flags);
+			wait_for_completion(&client->complete);
+			spin_lock_irqsave(&client->state_lock, flags);
+		} else {
+			client->state = IPA_PM_ACTIVATE_IN_PROGRESS;
+			spin_unlock_irqrestore(&client->state_lock, flags);
+			return -EINPROGRESS;
+		}
+	}
+
+	switch (client->state) {
+	case IPA_PM_ACTIVATED_PENDING_RESCHEDULE:
+	case IPA_PM_ACTIVATED_PENDING_DEACTIVATION:
+		client->state = IPA_PM_ACTIVATED_TIMER_SET;
+	case IPA_PM_ACTIVATED:
+	case IPA_PM_ACTIVATED_TIMER_SET:
+		spin_unlock_irqrestore(&client->state_lock, flags);
+		return 0;
+	case IPA_PM_DEACTIVATED:
+		break;
+	default:
+		IPA_PM_ERR("Invalid State\n");
+		spin_unlock_irqrestore(&client->state_lock, flags);
+		return -EPERM;
+	}
+	IPA_PM_DBG_STATE(client->hdl, client->name, client->state);
+
+	IPA_ACTIVE_CLIENTS_PREP_SPECIAL(log_info, client->name);
+	if (!client->skip_clk_vote) {
+		if (sync) {
+			client->state = IPA_PM_ACTIVATE_IN_PROGRESS;
+			spin_unlock_irqrestore(&client->state_lock, flags);
+			IPA_ACTIVE_CLIENTS_INC_SPECIAL(client->name);
+			spin_lock_irqsave(&client->state_lock, flags);
+		} else
+			result = ipa3_inc_client_enable_clks_no_block
+				 (&log_info);
+	}
+
+	/* we got the clocks */
+	if (result == 0) {
+		client->state = IPA_PM_ACTIVATED;
+		if (client->group == IPA_PM_GROUP_APPS)
+			__pm_stay_awake(client->wlock);
+		spin_unlock_irqrestore(&client->state_lock, flags);
+		activate_client(client->hdl);
+		if (sync)
+			do_clk_scaling();
+		else
+			queue_work(ipa_pm_ctx->wq,
+				   &ipa_pm_ctx->clk_scaling.work);
+		IPA_PM_DBG_STATE(client->hdl, client->name, client->state);
+		return 0;
+	}
+
+	client->state = IPA_PM_ACTIVATE_IN_PROGRESS;
+	reinit_completion(&client->complete);
+	queue_work(ipa_pm_ctx->wq, &client->activate_work);
+	spin_unlock_irqrestore(&client->state_lock, flags);
+	IPA_PM_DBG_STATE(client->hdl, client->name, client->state);
+	return -EINPROGRESS;
+}
+
+/**
+ * ipa_pm_activate(): activate ipa client to vote for clock(). Can be called
+ * from atomic context and returns -EINPROGRESS if cannot be done synchronously
+ * @hdl: index of the client in the array
+ *
+ * Returns: 0 on success, -EINPROGRESS if operation cannot be done synchronously
+ * and other negatives on failure
+ */
+int ipa_pm_activate(u32 hdl)
+{
+	if (ipa_pm_ctx == NULL) {
+		IPA_PM_ERR("PM_ctx is null\n");
+		return -EINVAL;
+	}
+
+	if (hdl >= IPA_PM_MAX_CLIENTS || ipa_pm_ctx->clients[hdl] == NULL) {
+		IPA_PM_ERR("Invalid Param\n");
+		return -EINVAL;
+	}
+
+	return ipa_pm_activate_helper(ipa_pm_ctx->clients[hdl], false);
+}
+EXPORT_SYMBOL(ipa_pm_activate);
+
+/**
+ * ipa_pm_activate(): activate ipa client to vote for clock synchronously.
+ * Cannot be called from an atomic contex.
+ * @hdl: index of the client in the array
+ *
+ * Returns: 0 on success, negative on failure
+ */
+int ipa_pm_activate_sync(u32 hdl)
+{
+	if (ipa_pm_ctx == NULL) {
+		IPA_PM_ERR("PM_ctx is null\n");
+		return -EINVAL;
+	}
+
+	if (hdl >= IPA_PM_MAX_CLIENTS || ipa_pm_ctx->clients[hdl] == NULL) {
+		IPA_PM_ERR("Invalid Param\n");
+		return -EINVAL;
+	}
+
+	return ipa_pm_activate_helper(ipa_pm_ctx->clients[hdl], true);
+}
+EXPORT_SYMBOL(ipa_pm_activate_sync);
+
+/**
+ * ipa_pm_deferred_deactivate(): schedule a timer to deactivate client and
+ * devote clock. Can be called from atomic context (asynchronously)
+ * @hdl: index of the client in the array
+ *
+ * Returns: 0 on success, negative on failure
+ */
+int ipa_pm_deferred_deactivate(u32 hdl)
+{
+	struct ipa_pm_client *client;
+	unsigned long flags;
+	unsigned long delay;
+
+	if (ipa_pm_ctx == NULL) {
+		IPA_PM_ERR("PM_ctx is null\n");
+		return -EINVAL;
+	}
+
+	if (hdl >= IPA_PM_MAX_CLIENTS || ipa_pm_ctx->clients[hdl] == NULL) {
+		IPA_PM_ERR("Invalid Param\n");
+		return -EINVAL;
+	}
+
+	client = ipa_pm_ctx->clients[hdl];
+	IPA_PM_DBG_STATE(hdl, client->name, client->state);
+
+	spin_lock_irqsave(&client->state_lock, flags);
+	switch (client->state) {
+	case IPA_PM_ACTIVATE_IN_PROGRESS:
+		client->state = IPA_PM_DEACTIVATE_IN_PROGRESS;
+	case IPA_PM_DEACTIVATED:
+		IPA_PM_DBG_STATE(hdl, client->name, client->state);
+		spin_unlock_irqrestore(&client->state_lock, flags);
+		return 0;
+	case IPA_PM_ACTIVATED:
+		delay = IPA_PM_DEFERRED_TIMEOUT;
+		if (ipa3_ctx->ipa3_hw_mode == IPA_HW_MODE_VIRTUAL ||
+			ipa3_ctx->ipa3_hw_mode == IPA_HW_MODE_EMULATION)
+			delay *= 5;
+
+		client->state = IPA_PM_ACTIVATED_PENDING_DEACTIVATION;
+		queue_delayed_work(ipa_pm_ctx->wq, &client->deactivate_work,
+			msecs_to_jiffies(delay));
+		break;
+	case IPA_PM_ACTIVATED_TIMER_SET:
+	case IPA_PM_ACTIVATED_PENDING_DEACTIVATION:
+		client->state = IPA_PM_ACTIVATED_PENDING_RESCHEDULE;
+	case IPA_PM_DEACTIVATE_IN_PROGRESS:
+	case IPA_PM_ACTIVATED_PENDING_RESCHEDULE:
+		break;
+	case IPA_PM_STATE_MAX:
+	default:
+		IPA_PM_ERR("Bad State");
+		spin_unlock_irqrestore(&client->state_lock, flags);
+		return -EINVAL;
+	}
+	IPA_PM_DBG_STATE(hdl, client->name, client->state);
+	spin_unlock_irqrestore(&client->state_lock, flags);
+
+	return 0;
+}
+EXPORT_SYMBOL(ipa_pm_deferred_deactivate);
+
+/**
+ * ipa_pm_deactivate_all_deferred(): Cancel the deferred deactivation timer and
+ * immediately devotes for IPA clocks
+ *
+ * Returns: 0 on success, negative on failure
+ */
+int ipa_pm_deactivate_all_deferred(void)
+{
+	int i;
+	bool run_algorithm = false;
+	struct ipa_pm_client *client;
+	unsigned long flags;
+
+	if (ipa_pm_ctx == NULL) {
+		IPA_PM_ERR("PM_ctx is null\n");
+		return -EINVAL;
+	}
+
+	for (i = 1; i < IPA_PM_MAX_CLIENTS; i++) {
+		client = ipa_pm_ctx->clients[i];
+
+		if (client == NULL)
+			continue;
+
+		cancel_delayed_work_sync(&client->deactivate_work);
+
+		if (IPA_PM_STATE_IN_PROGRESS(client->state)) {
+			wait_for_completion(&client->complete);
+			continue;
+		}
+
+		spin_lock_irqsave(&client->state_lock, flags);
+		IPA_PM_DBG_STATE(client->hdl, client->name, client->state);
+
+		if (client->state == IPA_PM_ACTIVATED_TIMER_SET) {
+			client->state = IPA_PM_ACTIVATED;
+			IPA_PM_DBG_STATE(client->hdl, client->name,
+				client->state);
+			spin_unlock_irqrestore(&client->state_lock, flags);
+		} else if (client->state ==
+				IPA_PM_ACTIVATED_PENDING_DEACTIVATION ||
+			client->state ==
+				IPA_PM_ACTIVATED_PENDING_RESCHEDULE) {
+			run_algorithm = true;
+			client->state = IPA_PM_DEACTIVATED;
+			IPA_PM_DBG_STATE(client->hdl, client->name,
+				client->state);
+			spin_unlock_irqrestore(&client->state_lock, flags);
+			if (!client->skip_clk_vote) {
+				IPA_ACTIVE_CLIENTS_DEC_SPECIAL(client->name);
+				if (client->group == IPA_PM_GROUP_APPS)
+					__pm_relax(client->wlock);
+			}
+			deactivate_client(client->hdl);
+		} else /* if activated or deactivated, we do nothing */
+			spin_unlock_irqrestore(&client->state_lock, flags);
+	}
+
+	if (run_algorithm)
+		do_clk_scaling();
+
+	return 0;
+}
+
+/**
+ * ipa_pm_deactivate_sync(): deactivate ipa client and devote clock. Cannot be
+ * called from atomic context.
+ * @hdl: index of the client in the array
+ *
+ * Returns: 0 on success, negative on failure
+ */
+int ipa_pm_deactivate_sync(u32 hdl)
+{
+	struct ipa_pm_client *client;
+	unsigned long flags;
+
+	if (ipa_pm_ctx == NULL) {
+		IPA_PM_ERR("PM_ctx is null\n");
+		return -EINVAL;
+	}
+
+	if (hdl >= IPA_PM_MAX_CLIENTS || ipa_pm_ctx->clients[hdl] == NULL) {
+		IPA_PM_ERR("Invalid Param\n");
+		return -EINVAL;
+	}
+	client = ipa_pm_ctx->clients[hdl];
+
+	cancel_delayed_work_sync(&client->deactivate_work);
+
+	if (IPA_PM_STATE_IN_PROGRESS(client->state))
+		wait_for_completion(&client->complete);
+
+	spin_lock_irqsave(&client->state_lock, flags);
+	IPA_PM_DBG_STATE(hdl, client->name, client->state);
+
+	if (client->state == IPA_PM_DEACTIVATED) {
+		spin_unlock_irqrestore(&client->state_lock, flags);
+		return 0;
+	}
+
+	spin_unlock_irqrestore(&client->state_lock, flags);
+
+	/* else case (Deactivates all Activated cases)*/
+	if (!client->skip_clk_vote) {
+		IPA_ACTIVE_CLIENTS_DEC_SPECIAL(client->name);
+		if (client->group == IPA_PM_GROUP_APPS)
+			__pm_relax(client->wlock);
+	}
+
+	spin_lock_irqsave(&client->state_lock, flags);
+	client->state = IPA_PM_DEACTIVATED;
+	IPA_PM_DBG_STATE(hdl, client->name, client->state);
+	spin_unlock_irqrestore(&client->state_lock, flags);
+	deactivate_client(hdl);
+	do_clk_scaling();
+
+	return 0;
+}
+EXPORT_SYMBOL(ipa_pm_deactivate_sync);
+
+/**
+ * ipa_pm_handle_suspend(): calls the callbacks of suspended clients to wake up
+ * @pipe_bitmask: the bits represent the indexes of the clients to be woken up
+ *
+ * Returns: 0 on success, negative on failure
+ */
+int ipa_pm_handle_suspend(u32 pipe_bitmask)
+{
+	int i;
+	struct ipa_pm_client *client;
+	bool client_notified[IPA_PM_MAX_CLIENTS] = { false };
+
+	if (ipa_pm_ctx == NULL) {
+		IPA_PM_ERR("PM_ctx is null\n");
+		return -EINVAL;
+	}
+
+	IPA_PM_DBG_LOW("bitmask: %d",  pipe_bitmask);
+
+	if (pipe_bitmask == 0)
+		return 0;
+
+	mutex_lock(&ipa_pm_ctx->client_mutex);
+	for (i = 0; i < IPA3_MAX_NUM_PIPES; i++) {
+		if (pipe_bitmask & (1 << i)) {
+			client = ipa_pm_ctx->clients_by_pipe[i];
+			if (client && !client_notified[client->hdl]) {
+				if (client->callback) {
+					client->callback(client->callback_params
+						, IPA_PM_REQUEST_WAKEUP);
+					client_notified[client->hdl] = true;
+				} else {
+					IPA_PM_ERR("client has no callback");
+					WARN_ON(1);
+				}
+			}
+		}
+	}
+	mutex_unlock(&ipa_pm_ctx->client_mutex);
+	return 0;
+}
+
+/**
+ * ipa_pm_set_throughput(): Adds/changes the throughput requirement to IPA PM
+ * to be used for clock scaling
+ * @hdl: index of the client in the array
+ * @throughput: the new throughput value to be set for that client
+ *
+ * Returns: 0 on success, negative on failure
+ */
+int ipa_pm_set_throughput(u32 hdl, int throughput)
+{
+	struct ipa_pm_client *client;
+	unsigned long flags;
+
+	if (ipa_pm_ctx == NULL) {
+		IPA_PM_ERR("PM_ctx is null\n");
+		return -EINVAL;
+	}
+
+	if (hdl >= IPA_PM_MAX_CLIENTS || ipa_pm_ctx->clients[hdl] == NULL
+		|| throughput < 0) {
+		IPA_PM_ERR("Invalid Params\n");
+		return -EINVAL;
+	}
+	client = ipa_pm_ctx->clients[hdl];
+
+	mutex_lock(&ipa_pm_ctx->client_mutex);
+	if (client->group == IPA_PM_GROUP_DEFAULT)
+		IPA_PM_DBG_LOW("Old throughput: %d\n",  client->throughput);
+	else
+		IPA_PM_DBG_LOW("old Group %d throughput: %d\n",
+			client->group, ipa_pm_ctx->group_tput[client->group]);
+
+	if (client->group == IPA_PM_GROUP_DEFAULT)
+		client->throughput = throughput;
+	else
+		ipa_pm_ctx->group_tput[client->group] = throughput;
+
+	if (client->group == IPA_PM_GROUP_DEFAULT)
+		IPA_PM_DBG_LOW("New throughput: %d\n",  client->throughput);
+	else
+		IPA_PM_DBG_LOW("New Group %d throughput: %d\n",
+			client->group, ipa_pm_ctx->group_tput[client->group]);
+	mutex_unlock(&ipa_pm_ctx->client_mutex);
+
+	spin_lock_irqsave(&client->state_lock, flags);
+	if (IPA_PM_STATE_ACTIVE(client->state) || (client->group !=
+			IPA_PM_GROUP_DEFAULT)) {
+		spin_unlock_irqrestore(&client->state_lock, flags);
+		do_clk_scaling();
+		return 0;
+	}
+	spin_unlock_irqrestore(&client->state_lock, flags);
+
+	return 0;
+}
+EXPORT_SYMBOL(ipa_pm_set_throughput);
+
+void ipa_pm_set_clock_index(int index)
+{
+	if (ipa_pm_ctx && index >= 0)
+		ipa_pm_ctx->clk_scaling.cur_vote = index;
+
+	IPA_PM_DBG("Setting pm clock vote to %d\n", index);
+}
+
+/**
+ * ipa_pm_stat() - print PM stat
+ * @buf: [in] The user buff used to print
+ * @size: [in] The size of buf
+ * Returns: number of bytes used on success, negative on failure
+ *
+ * This function is called by ipa_debugfs in order to receive
+ * a picture of the clients in the PM and the throughput, threshold and cur vote
+ */
+int ipa_pm_stat(char *buf, int size)
+{
+	struct ipa_pm_client *client;
+	struct clk_scaling_db *clk = &ipa_pm_ctx->clk_scaling;
+	int i, j, tput, cnt = 0, result = 0;
+	unsigned long flags;
+
+	if (!buf || size < 0)
+		return -EINVAL;
+
+	mutex_lock(&ipa_pm_ctx->client_mutex);
+
+	result = scnprintf(buf + cnt, size - cnt, "\n\nCurrent threshold: [");
+	cnt += result;
+
+	for (i = 0; i < clk->threshold_size; i++) {
+		result = scnprintf(buf + cnt, size - cnt,
+			"%d, ", clk->current_threshold[i]);
+		cnt += result;
+	}
+
+	result = scnprintf(buf + cnt, size - cnt, "\b\b]\n");
+	cnt += result;
+
+	result = scnprintf(buf + cnt, size - cnt,
+		"Aggregated tput: %d, Cur vote: %d",
+		ipa_pm_ctx->aggregated_tput, clk->cur_vote);
+	cnt += result;
+
+	result = scnprintf(buf + cnt, size - cnt, "\n\nRegistered Clients:\n");
+	cnt += result;
+
+
+	for (i = 1; i < IPA_PM_MAX_CLIENTS; i++) {
+		client = ipa_pm_ctx->clients[i];
+
+		if (client == NULL)
+			continue;
+
+		spin_lock_irqsave(&client->state_lock, flags);
+		if (client->group == IPA_PM_GROUP_DEFAULT)
+			tput = client->throughput;
+		else
+			tput = ipa_pm_ctx->group_tput[client->group];
+
+		result = scnprintf(buf + cnt, size - cnt,
+		"Client[%d]: %s State:%s\nGroup: %s Throughput: %d Pipes: ",
+			i, client->name, client_state_to_str[client->state],
+			ipa_pm_group_to_str[client->group], tput);
+		cnt += result;
+
+		for (j = 0; j < IPA3_MAX_NUM_PIPES; j++) {
+			if (ipa_pm_ctx->clients_by_pipe[j] == client) {
+				result = scnprintf(buf + cnt, size - cnt,
+					"%d, ", j);
+				cnt += result;
+			}
+		}
+
+		result = scnprintf(buf + cnt, size - cnt, "\b\b\n\n");
+		cnt += result;
+		spin_unlock_irqrestore(&client->state_lock, flags);
+	}
+	mutex_unlock(&ipa_pm_ctx->client_mutex);
+
+	return cnt;
+}
+
+/**
+ * ipa_pm_exceptions_stat() - print PM exceptions stat
+ * @buf: [in] The user buff used to print
+ * @size: [in] The size of buf
+ * Returns: number of bytes used on success, negative on failure
+ *
+ * This function is called by ipa_debugfs in order to receive
+ * a full picture of the exceptions in the PM
+ */
+int ipa_pm_exceptions_stat(char *buf, int size)
+{
+	int i, j, cnt = 0, result = 0;
+	struct ipa_pm_exception_list *exception;
+
+	if (!buf || size < 0)
+		return -EINVAL;
+
+	result = scnprintf(buf + cnt, size - cnt, "\n");
+	cnt += result;
+
+	mutex_lock(&ipa_pm_ctx->client_mutex);
+	for (i = 0; i < ipa_pm_ctx->clk_scaling.exception_size; i++) {
+		exception = &ipa_pm_ctx->clk_scaling.exception_list[i];
+		if (exception == NULL) {
+			result = scnprintf(buf + cnt, size - cnt,
+			"Exception %d is NULL\n\n", i);
+			cnt += result;
+			continue;
+		}
+
+		result = scnprintf(buf + cnt, size - cnt,
+			"Exception %d: %s\nPending: %d Bitmask: %d Threshold: ["
+			, i, exception->clients, exception->pending,
+			exception->bitmask);
+		cnt += result;
+		for (j = 0; j < ipa_pm_ctx->clk_scaling.threshold_size; j++) {
+			result = scnprintf(buf + cnt, size - cnt,
+				"%d, ", exception->threshold[j]);
+			cnt += result;
+		}
+		result = scnprintf(buf + cnt, size - cnt, "\b\b]\n\n");
+		cnt += result;
+	}
+	mutex_unlock(&ipa_pm_ctx->client_mutex);
+
+	return cnt;
+}

+ 181 - 0
ipa/ipa_v3/ipa_pm.h

@@ -0,0 +1,181 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2017-2019, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _IPA_PM_H_
+#define _IPA_PM_H_
+
+#include <linux/msm_ipa.h>
+
+/* internal to ipa */
+#define IPA_PM_MAX_CLIENTS 32 /* actual max is value -1 since we start from 1*/
+#define IPA_PM_MAX_EX_CL 64
+#define IPA_PM_THRESHOLD_MAX 5
+#define IPA_PM_EXCEPTION_MAX 5
+#define IPA_PM_DEFERRED_TIMEOUT 10
+
+/*
+ * ipa_pm group names
+ *
+ * Default stands for individual clients while other groups share one throughput
+ * Some groups also have special flags like modem which do not vote for clock
+ * but is accounted for in clock scaling while activated
+ */
+enum ipa_pm_group {
+	IPA_PM_GROUP_DEFAULT,
+	IPA_PM_GROUP_APPS,
+	IPA_PM_GROUP_MODEM,
+	IPA_PM_GROUP_MAX,
+};
+
+/*
+ * ipa_pm_cb_event
+ *
+ * specifies what kind of callback is being called.
+ * IPA_PM_CLIENT_ACTIVATED: the client has completed asynchronous activation
+ * IPA_PM_REQUEST_WAKEUP: wake up the client after it has been suspended
+ */
+enum ipa_pm_cb_event {
+	IPA_PM_CLIENT_ACTIVATED,
+	IPA_PM_REQUEST_WAKEUP,
+	IPA_PM_CB_EVENT_MAX,
+};
+
+/*
+ * struct ipa_pm_exception - clients included in exception and its threshold
+ * @usecase: comma separated client names
+ * @threshold: the threshold values for the exception
+ */
+struct ipa_pm_exception {
+	const char *usecase;
+	int threshold[IPA_PM_THRESHOLD_MAX];
+};
+
+/*
+ * struct ipa_pm_init_params - parameters needed for initializng the pm
+ * @default_threshold: the thresholds used if no exception passes
+ * @threshold_size: size of the threshold
+ * @exceptions: list of exceptions  for the pm
+ * @exception_size: size of the exception_list
+ */
+struct ipa_pm_init_params {
+	int default_threshold[IPA_PM_THRESHOLD_MAX];
+	int threshold_size;
+	struct ipa_pm_exception exceptions[IPA_PM_EXCEPTION_MAX];
+	int exception_size;
+};
+
+/*
+ * struct ipa_pm_register_params - parameters needed to register a client
+ * @name: name of the client
+ * @callback: pointer to the client's callback function
+ * @user_data: pointer to the client's callback parameters
+ * @group: group number of the client
+ * @skip_clk_vote: 0 if client votes for clock when activated, 1 if no vote
+ */
+struct ipa_pm_register_params {
+	const char *name;
+	void (*callback)(void *user_data, enum ipa_pm_cb_event);
+	void *user_data;
+	enum ipa_pm_group group;
+	bool skip_clk_vote;
+};
+
+#if IS_ENABLED(CONFIG_IPA3)
+
+int ipa_pm_register(struct ipa_pm_register_params *params, u32 *hdl);
+int ipa_pm_associate_ipa_cons_to_client(u32 hdl, enum ipa_client_type consumer);
+int ipa_pm_activate(u32 hdl);
+int ipa_pm_activate_sync(u32 hdl);
+int ipa_pm_deferred_deactivate(u32 hdl);
+int ipa_pm_deactivate_sync(u32 hdl);
+int ipa_pm_set_throughput(u32 hdl, int throughput);
+int ipa_pm_deregister(u32 hdl);
+
+/* IPA Internal Functions */
+int ipa_pm_init(struct ipa_pm_init_params *params);
+int ipa_pm_destroy(void);
+int ipa_pm_handle_suspend(u32 pipe_bitmask);
+int ipa_pm_deactivate_all_deferred(void);
+int ipa_pm_stat(char *buf, int size);
+int ipa_pm_exceptions_stat(char *buf, int size);
+void ipa_pm_set_clock_index(int index);
+
+#else /* IS_ENABLED(CONFIG_IPA3) */
+
+static inline int ipa_pm_register(
+	struct ipa_pm_register_params *params, u32 *hdl)
+{
+	return -EPERM;
+}
+
+static inline int ipa_pm_associate_ipa_cons_to_client(
+	u32 hdl, enum ipa_client_type consumer)
+{
+	return -EPERM;
+}
+
+static inline int ipa_pm_activate(u32 hdl)
+{
+	return -EPERM;
+}
+
+static inline int ipa_pm_activate_sync(u32 hdl)
+{
+	return -EPERM;
+}
+
+static inline int ipa_pm_deferred_deactivate(u32 hdl)
+{
+	return -EPERM;
+}
+
+static inline int ipa_pm_deactivate_sync(u32 hdl)
+{
+	return -EPERM;
+}
+
+static inline int ipa_pm_set_throughput(u32 hdl, int throughput)
+{
+	return -EPERM;
+}
+
+static inline int ipa_pm_deregister(u32 hdl)
+{
+	return -EPERM;
+}
+
+/* IPA Internal Functions */
+static inline int ipa_pm_init(struct ipa_pm_init_params *params)
+{
+	return -EPERM;
+}
+
+static inline int ipa_pm_destroy(void)
+{
+	return -EPERM;
+}
+
+static inline int ipa_pm_handle_suspend(u32 pipe_bitmask)
+{
+	return -EPERM;
+}
+
+static inline int ipa_pm_deactivate_all_deferred(void)
+{
+	return -EPERM;
+}
+
+static inline int ipa_pm_stat(char *buf, int size)
+{
+	return -EPERM;
+}
+
+static inline int ipa_pm_exceptions_stat(char *buf, int size)
+{
+	return -EPERM;
+}
+#endif /* IS_ENABLED(CONFIG_IPA3) */
+
+#endif /* _IPA_PM_H_ */

+ 2262 - 0
ipa/ipa_v3/ipa_qmi_service.c

@@ -0,0 +1,2262 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2013-2019, The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/errno.h>
+#include <linux/delay.h>
+#include <linux/debugfs.h>
+#include <linux/delay.h>
+#include <linux/uaccess.h>
+#include <soc/qcom/subsystem_restart.h>
+#include <linux/ipa.h>
+#include <linux/vmalloc.h>
+
+#include "ipa_qmi_service.h"
+#include "ipa_mhi_proxy.h"
+
+#define IPA_Q6_SVC_VERS 1
+#define IPA_A5_SVC_VERS 1
+#define Q6_QMI_COMPLETION_TIMEOUT (60*HZ)
+
+#define IPA_A5_SERVICE_SVC_ID 0x31
+#define IPA_A5_SERVICE_INS_ID 1
+#define IPA_Q6_SERVICE_SVC_ID 0x31
+#define IPA_Q6_SERVICE_INS_ID 2
+
+#define QMI_SEND_STATS_REQ_TIMEOUT_MS 5000
+#define QMI_SEND_REQ_TIMEOUT_MS 60000
+#define QMI_MHI_SEND_REQ_TIMEOUT_MS 1000
+
+#define QMI_IPA_FORCE_CLEAR_DATAPATH_TIMEOUT_MS 1000
+
+static struct qmi_handle *ipa3_svc_handle;
+static struct workqueue_struct *ipa_clnt_req_workqueue;
+static bool ipa3_qmi_modem_init_fin, ipa3_qmi_indication_fin;
+static struct work_struct ipa3_qmi_service_init_work;
+static uint32_t ipa_wan_platform;
+struct ipa3_qmi_context *ipa3_qmi_ctx;
+static bool workqueues_stopped;
+static bool ipa3_modem_init_cmplt;
+static bool first_time_handshake;
+static bool send_qmi_init_q6;
+struct mutex ipa3_qmi_lock;
+struct ipa_msg_desc {
+	uint16_t msg_id;
+	int max_msg_len;
+	struct qmi_elem_info *ei_array;
+};
+
+static struct ipa_mhi_prime_aggr_info_req_msg_v01 aggr_req = {
+	.aggr_info_valid = 1,
+	.aggr_info_len = 5,
+	.aggr_info[0] = {
+		.ic_type = DATA_IC_TYPE_MHI_PRIME_V01,
+		.ep_type = DATA_EP_DESC_TYPE_DPL_PROD_V01,
+		.bytes_count = 16,
+	},
+	.aggr_info[1] = {
+		.ic_type = DATA_IC_TYPE_MHI_PRIME_V01,
+		.ep_type = DATA_EP_DESC_TYPE_TETH_CONS_V01,
+		.bytes_count = 24,
+		.aggr_type = DATA_AGGR_TYPE_QMAPv5_V01,
+	},
+	.aggr_info[2] = {
+		.ic_type = DATA_IC_TYPE_MHI_PRIME_V01,
+		.ep_type = DATA_EP_DESC_TYPE_TETH_PROD_V01,
+		.bytes_count = 16,
+		.aggr_type = DATA_AGGR_TYPE_QMAPv5_V01,
+	},
+	.aggr_info[3] = {
+		.ic_type = DATA_IC_TYPE_MHI_PRIME_V01,
+		.ep_type = DATA_EP_DESC_TYPE_TETH_RMNET_CONS_V01,
+		.bytes_count = 31,
+		.aggr_type = DATA_AGGR_TYPE_QMAPv5_V01,
+	},
+	.aggr_info[4] = {
+		.ic_type = DATA_IC_TYPE_MHI_PRIME_V01,
+		.ep_type = DATA_EP_DESC_TYPE_TETH_RMNET_PROD_V01,
+		.bytes_count = 31,
+		.aggr_type = DATA_AGGR_TYPE_QMAPv5_V01,
+	},
+};
+
+/* QMI A5 service */
+
+static void ipa3_handle_indication_req(struct qmi_handle *qmi_handle,
+	struct sockaddr_qrtr *sq,
+	struct qmi_txn *txn,
+	const void *decoded_msg)
+{
+	struct ipa_indication_reg_req_msg_v01 *indication_req;
+	struct ipa_indication_reg_resp_msg_v01 resp;
+	struct ipa_master_driver_init_complt_ind_msg_v01 ind;
+	int rc;
+
+	indication_req = (struct ipa_indication_reg_req_msg_v01 *)decoded_msg;
+	IPAWANDBG("Received INDICATION Request\n");
+
+	/* cache the client sq */
+	memcpy(&ipa3_qmi_ctx->client_sq, sq, sizeof(*sq));
+
+	memset(&resp, 0, sizeof(struct ipa_indication_reg_resp_msg_v01));
+	resp.resp.result = IPA_QMI_RESULT_SUCCESS_V01;
+
+	IPAWANDBG("qmi_snd_rsp: result %d, err %d\n",
+		resp.resp.result, resp.resp.error);
+	rc = qmi_send_response(qmi_handle, sq, txn,
+		QMI_IPA_INDICATION_REGISTER_RESP_V01,
+		QMI_IPA_INDICATION_REGISTER_RESP_MAX_MSG_LEN_V01,
+		ipa3_indication_reg_resp_msg_data_v01_ei,
+		&resp);
+
+	if (rc < 0) {
+		IPAWANERR("send response for Indication register failed\n");
+		return;
+	}
+
+	ipa3_qmi_indication_fin = true;
+
+	/* check if need sending indication to modem */
+	if (ipa3_qmi_modem_init_fin)	{
+		IPAWANDBG("send indication to modem (%d)\n",
+		ipa3_qmi_modem_init_fin);
+		memset(&ind, 0, sizeof(struct
+				ipa_master_driver_init_complt_ind_msg_v01));
+		ind.master_driver_init_status.result =
+			IPA_QMI_RESULT_SUCCESS_V01;
+
+		rc = qmi_send_indication(qmi_handle,
+			&(ipa3_qmi_ctx->client_sq),
+			QMI_IPA_MASTER_DRIVER_INIT_COMPLETE_IND_V01,
+			QMI_IPA_MASTER_DRIVER_INIT_COMPLETE_IND_MAX_MSG_LEN_V01,
+			ipa3_master_driver_init_complt_ind_msg_data_v01_ei,
+			&ind);
+
+		if (rc < 0) {
+			IPAWANERR("send indication failed\n");
+			ipa3_qmi_indication_fin = false;
+		}
+	} else {
+		IPAWANERR("not send indication\n");
+	}
+}
+
+static void ipa3_handle_install_filter_rule_req(struct qmi_handle *qmi_handle,
+	struct sockaddr_qrtr *sq,
+	struct qmi_txn *txn,
+	const void *decoded_msg)
+{
+	struct ipa_install_fltr_rule_req_msg_v01 *rule_req;
+	struct ipa_install_fltr_rule_resp_msg_v01 resp;
+	uint32_t rule_hdl[MAX_NUM_Q6_RULE];
+	int rc = 0, i;
+
+	rule_req = (struct ipa_install_fltr_rule_req_msg_v01 *)decoded_msg;
+	memset(rule_hdl, 0, sizeof(rule_hdl));
+	memset(&resp, 0, sizeof(struct ipa_install_fltr_rule_resp_msg_v01));
+	IPAWANDBG("Received install filter Request\n");
+
+	rc = ipa3_copy_ul_filter_rule_to_ipa((struct
+		ipa_install_fltr_rule_req_msg_v01*)decoded_msg);
+
+	if (rc) {
+		IPAWANERR("copy UL rules from modem is failed\n");
+		return;
+	}
+
+	resp.resp.result = IPA_QMI_RESULT_SUCCESS_V01;
+	if (rule_req->filter_spec_ex_list_valid == true) {
+		resp.rule_id_valid = 1;
+		if (rule_req->filter_spec_ex_list_len > MAX_NUM_Q6_RULE) {
+			resp.rule_id_len = MAX_NUM_Q6_RULE;
+			IPAWANERR("installed (%d) max Q6-UL rules ",
+			MAX_NUM_Q6_RULE);
+			IPAWANERR("but modem gives total (%u)\n",
+			rule_req->filter_spec_ex_list_len);
+		} else {
+			resp.rule_id_len =
+				rule_req->filter_spec_ex_list_len;
+		}
+	} else {
+		resp.rule_id_valid = 0;
+		resp.rule_id_len = 0;
+	}
+
+	/* construct UL filter rules response to Modem*/
+	for (i = 0; i < resp.rule_id_len; i++) {
+		resp.rule_id[i] =
+			rule_req->filter_spec_ex_list[i].rule_id;
+	}
+
+	IPAWANDBG("qmi_snd_rsp: result %d, err %d\n",
+		resp.resp.result, resp.resp.error);
+	rc = qmi_send_response(qmi_handle, sq, txn,
+		QMI_IPA_INSTALL_FILTER_RULE_RESP_V01,
+		QMI_IPA_INSTALL_FILTER_RULE_RESP_MAX_MSG_LEN_V01,
+		ipa3_install_fltr_rule_resp_msg_data_v01_ei,
+		&resp);
+
+	if (rc < 0)
+		IPAWANERR("install filter rules failed\n");
+	else
+		IPAWANDBG("Replied to install filter request\n");
+}
+
+static void ipa3_handle_filter_installed_notify_req(
+	struct qmi_handle *qmi_handle,
+	struct sockaddr_qrtr *sq,
+	struct qmi_txn *txn,
+	const void *decoded_msg)
+{
+	struct ipa_fltr_installed_notif_resp_msg_v01 resp;
+	int rc = 0;
+
+	memset(&resp, 0, sizeof(struct ipa_fltr_installed_notif_resp_msg_v01));
+	IPAWANDBG("Received filter_install_notify Request\n");
+	resp.resp.result = IPA_QMI_RESULT_SUCCESS_V01;
+
+	IPAWANDBG("qmi_snd_rsp: result %d, err %d\n",
+		resp.resp.result, resp.resp.error);
+	rc = qmi_send_response(qmi_handle, sq, txn,
+		QMI_IPA_FILTER_INSTALLED_NOTIF_RESP_V01,
+		QMI_IPA_FILTER_INSTALLED_NOTIF_RESP_MAX_MSG_LEN_V01,
+		ipa3_fltr_installed_notif_resp_msg_data_v01_ei,
+		&resp);
+
+	if (rc < 0)
+		IPAWANERR("handle filter rules failed\n");
+	else
+		IPAWANDBG("Responsed filter_install_notify Request\n");
+}
+
+static void handle_ipa_config_req(struct qmi_handle *qmi_handle,
+	struct sockaddr_qrtr *sq,
+	struct qmi_txn *txn,
+	const void *decoded_msg)
+{
+	struct ipa_config_resp_msg_v01 resp;
+	int rc;
+
+	memset(&resp, 0, sizeof(struct ipa_config_resp_msg_v01));
+	resp.resp.result = IPA_QMI_RESULT_SUCCESS_V01;
+	IPAWANDBG("Received IPA CONFIG Request\n");
+	rc = ipa_mhi_handle_ipa_config_req(
+		(struct ipa_config_req_msg_v01 *)decoded_msg);
+	if (rc) {
+		IPAERR("ipa3_mhi_handle_ipa_config_req failed %d\n", rc);
+		resp.resp.result = IPA_QMI_RESULT_FAILURE_V01;
+	}
+
+	IPAWANDBG("qmi_snd_rsp: result %d, err %d\n",
+		resp.resp.result, resp.resp.error);
+	rc = qmi_send_response(qmi_handle, sq, txn,
+		QMI_IPA_CONFIG_RESP_V01,
+		QMI_IPA_CONFIG_RESP_MAX_MSG_LEN_V01,
+		ipa3_config_resp_msg_data_v01_ei,
+		&resp);
+
+	if (rc < 0)
+		IPAWANERR("QMI_IPA_CONFIG_RESP_V01 failed\n");
+	else
+		IPAWANDBG("Responsed QMI_IPA_CONFIG_RESP_V01\n");
+}
+
+static void ipa3_handle_modem_init_cmplt_req(struct qmi_handle *qmi_handle,
+	struct sockaddr_qrtr *sq,
+	struct qmi_txn *txn,
+	const void *decoded_msg)
+{
+	struct ipa_init_modem_driver_cmplt_req_msg_v01 *cmplt_req;
+	struct ipa_init_modem_driver_cmplt_resp_msg_v01 resp;
+	int rc;
+
+	IPAWANDBG("Received QMI_IPA_INIT_MODEM_DRIVER_CMPLT_REQ_V01\n");
+	cmplt_req = (struct ipa_init_modem_driver_cmplt_req_msg_v01 *)
+		decoded_msg;
+
+	if (!ipa3_modem_init_cmplt)
+		ipa3_modem_init_cmplt = true;
+
+	memset(&resp, 0, sizeof(resp));
+	resp.resp.result = IPA_QMI_RESULT_SUCCESS_V01;
+
+	IPAWANDBG("qmi_snd_rsp: result %d, err %d\n",
+		resp.resp.result, resp.resp.error);
+	rc = qmi_send_response(qmi_handle, sq, txn,
+		QMI_IPA_INIT_MODEM_DRIVER_CMPLT_RESP_V01,
+		QMI_IPA_INIT_MODEM_DRIVER_CMPLT_RESP_MAX_MSG_LEN_V01,
+		ipa3_init_modem_driver_cmplt_resp_msg_data_v01_ei,
+		&resp);
+
+
+	if (rc < 0)
+		IPAWANERR("QMI_IPA_INIT_MODEM_DRIVER_CMPLT_RESP_V01 failed\n");
+	else
+		IPAWANDBG("Sent QMI_IPA_INIT_MODEM_DRIVER_CMPLT_RESP_V01\n");
+}
+
+static void ipa3_handle_mhi_alloc_channel_req(struct qmi_handle *qmi_handle,
+	struct sockaddr_qrtr *sq,
+	struct qmi_txn *txn,
+	const void *decoded_msg)
+{
+	struct ipa_mhi_alloc_channel_req_msg_v01 *ch_alloc_req;
+	struct ipa_mhi_alloc_channel_resp_msg_v01 *resp = NULL;
+	int rc;
+
+	IPAWANDBG("Received QMI_IPA_MHI_ALLOC_CHANNEL_REQ_V01\n");
+	ch_alloc_req = (struct ipa_mhi_alloc_channel_req_msg_v01 *)decoded_msg;
+
+	resp = imp_handle_allocate_channel_req(ch_alloc_req);
+	if (!resp) {
+		IPAWANERR("imp handle allocate channel req fails\n");
+		return;
+	}
+
+	IPAWANDBG("qmi_snd_rsp: result %d, err %d, arr_vald: %d, arr_len %d\n",
+		resp->resp.result, resp->resp.error, resp->alloc_resp_arr_valid,
+		resp->alloc_resp_arr_len);
+	rc = qmi_send_response(qmi_handle, sq, txn,
+		QMI_IPA_MHI_ALLOC_CHANNEL_RESP_V01,
+		IPA_MHI_ALLOC_CHANNEL_RESP_MSG_V01_MAX_MSG_LEN,
+		ipa_mhi_alloc_channel_resp_msg_v01_ei,
+		resp);
+
+	if (rc < 0)
+		IPAWANERR("QMI_IPA_MHI_ALLOC_CHANNEL_RESP_V01 failed\n");
+	else
+		IPAWANDBG("Sent QMI_IPA_MHI_ALLOC_CHANNEL_RESP_V01\n");
+}
+
+static void ipa3_handle_mhi_vote_req(struct qmi_handle *qmi_handle,
+	struct sockaddr_qrtr *sq,
+	struct qmi_txn *txn,
+	const void *decoded_msg)
+{
+	struct ipa_mhi_clk_vote_req_msg_v01 *vote_req;
+	struct ipa_mhi_clk_vote_resp_msg_v01 *resp = NULL, resp2;
+	int rc;
+	uint32_t bw_mbps = 0;
+
+	vote_req = (struct ipa_mhi_clk_vote_req_msg_v01 *)decoded_msg;
+	IPAWANDBG("Received QMI_IPA_MHI_CLK_VOTE_REQ_V01(%d)\n",
+		vote_req->mhi_vote);
+
+	memset(&resp2, 0, sizeof(struct ipa_mhi_clk_vote_resp_msg_v01));
+
+	/* for mpm used for ipa clk voting */
+	if (ipa3_is_apq()) {
+		IPAWANDBG("Throughput(%d:%d) clk-rate(%d:%d)\n",
+			vote_req->tput_value_valid,
+			vote_req->tput_value,
+			vote_req->clk_rate_valid,
+			vote_req->clk_rate);
+		if (vote_req->clk_rate_valid) {
+			switch (vote_req->clk_rate) {
+			case QMI_IPA_CLOCK_RATE_LOW_SVS_V01:
+				bw_mbps = 0;
+				break;
+			case QMI_IPA_CLOCK_RATE_SVS_V01:
+				bw_mbps = 350;
+				break;
+			case QMI_IPA_CLOCK_RATE_NOMINAL_V01:
+				bw_mbps = 690;
+				break;
+			case QMI_IPA_CLOCK_RATE_TURBO_V01:
+				bw_mbps = 1200;
+				break;
+			default:
+				IPAWANERR("Note supported clk_rate (%d)\n",
+				vote_req->clk_rate);
+				bw_mbps = 0;
+				resp2.resp.result = IPA_QMI_RESULT_FAILURE_V01;
+				resp2.resp.error =
+					IPA_QMI_ERR_NOT_SUPPORTED_V01;
+				break;
+			}
+			if (ipa3_vote_for_bus_bw(&bw_mbps)) {
+				IPAWANERR("Failed to vote BW (%u)\n", bw_mbps);
+				resp2.resp.result = IPA_QMI_RESULT_FAILURE_V01;
+				resp2.resp.error =
+					IPA_QMI_ERR_NOT_SUPPORTED_V01;
+			}
+			resp = &resp2;
+		} else {
+			IPAWANERR("clk_rate_valid is false\n");
+			return;
+		}
+	} else {
+		resp = imp_handle_vote_req(vote_req->mhi_vote);
+		if (!resp) {
+			IPAWANERR("imp handle vote req fails\n");
+			return;
+		}
+		IPAWANDBG("start sending QMI_IPA_MHI_CLK_VOTE_RESP_V01\n");
+	}
+
+	IPAWANDBG("qmi_snd_rsp: result %d, err %d\n",
+		resp->resp.result, resp->resp.error);
+	rc = qmi_send_response(qmi_handle, sq, txn,
+		QMI_IPA_MHI_CLK_VOTE_RESP_V01,
+		IPA_MHI_CLK_VOTE_RESP_MSG_V01_MAX_MSG_LEN,
+		ipa_mhi_clk_vote_resp_msg_v01_ei,
+		resp);
+
+	if (rc < 0)
+		IPAWANERR("QMI_IPA_MHI_CLK_VOTE_RESP_V01 failed\n");
+	else
+		IPAWANDBG("Finished senting QMI_IPA_MHI_CLK_VOTE_RESP_V01\n");
+}
+
+static void ipa3_a5_svc_disconnect_cb(struct qmi_handle *qmi,
+	unsigned int node, unsigned int port)
+{
+	IPAWANDBG_LOW("Received QMI client disconnect\n");
+}
+
+/****************************************************/
+/*                 QMI A5 client ->Q6               */
+/****************************************************/
+static void ipa3_q6_clnt_svc_arrive(struct work_struct *work);
+static DECLARE_DELAYED_WORK(ipa3_work_svc_arrive, ipa3_q6_clnt_svc_arrive);
+static void ipa3_q6_clnt_svc_exit(struct work_struct *work);
+static DECLARE_DELAYED_WORK(ipa3_work_svc_exit, ipa3_q6_clnt_svc_exit);
+/* Test client port for IPC Router */
+static struct qmi_handle *ipa_q6_clnt;
+
+static int ipa3_check_qmi_response(int rc,
+				  int req_id,
+				  enum ipa_qmi_result_type_v01 result,
+				  enum ipa_qmi_error_type_v01 error,
+				  char *resp_type)
+{
+	if (rc < 0) {
+		if (rc == -ETIMEDOUT && ipa3_rmnet_ctx.ipa_rmnet_ssr) {
+			IPAWANERR(
+			"Timeout for qmi request id %d\n", req_id);
+			return rc;
+		}
+		if ((rc == -ENETRESET) || (rc == -ENODEV)) {
+			IPAWANERR(
+			"SSR while waiting for qmi request id %d\n", req_id);
+			return rc;
+		}
+		IPAWANERR("Error sending qmi request id %d, rc = %d\n",
+			req_id, rc);
+		return rc;
+	}
+	if (result != IPA_QMI_RESULT_SUCCESS_V01 &&
+	    ipa3_rmnet_ctx.ipa_rmnet_ssr) {
+		IPAWANERR(
+		"Got bad response %d from request id %d (error %d)\n",
+		req_id, result, error);
+		return result;
+	}
+	IPAWANDBG_LOW("Received %s successfully\n", resp_type);
+	return 0;
+}
+
+static int ipa3_qmi_send_req_wait(struct qmi_handle *client_handle,
+	struct ipa_msg_desc *req_desc, void *req,
+	struct ipa_msg_desc *resp_desc, void *resp,
+	unsigned long timeout_ms)
+{
+	struct qmi_txn txn;
+	int ret;
+
+	ret = qmi_txn_init(client_handle, &txn, resp_desc->ei_array, resp);
+
+	if (ret < 0) {
+		IPAWANERR("QMI txn init failed, ret= %d\n", ret);
+		return ret;
+	}
+
+	ret = qmi_send_request(client_handle,
+		&ipa3_qmi_ctx->server_sq,
+		&txn,
+		req_desc->msg_id,
+		req_desc->max_msg_len,
+		req_desc->ei_array,
+		req);
+
+	if (ret < 0) {
+		qmi_txn_cancel(&txn);
+		return ret;
+	}
+	ret = qmi_txn_wait(&txn, msecs_to_jiffies(timeout_ms));
+
+	return ret;
+}
+
+static int ipa3_qmi_init_modem_send_sync_msg(void)
+{
+	struct ipa_init_modem_driver_req_msg_v01 req;
+	struct ipa_init_modem_driver_resp_msg_v01 resp;
+	struct ipa_msg_desc req_desc, resp_desc;
+	int rc;
+	u16 smem_restr_bytes = ipa3_get_smem_restr_bytes();
+	int wan_cons_ep;
+
+	memset(&req, 0, sizeof(struct ipa_init_modem_driver_req_msg_v01));
+	memset(&resp, 0, sizeof(struct ipa_init_modem_driver_resp_msg_v01));
+
+	req.platform_type_valid = true;
+	req.platform_type = ipa_wan_platform;
+
+	req.hdr_tbl_info_valid = (IPA_MEM_PART(modem_hdr_size) != 0);
+	req.hdr_tbl_info.modem_offset_start =
+		IPA_MEM_PART(modem_hdr_ofst) + smem_restr_bytes;
+	req.hdr_tbl_info.modem_offset_end = IPA_MEM_PART(modem_hdr_ofst) +
+		smem_restr_bytes + IPA_MEM_PART(modem_hdr_size) - 1;
+
+	req.v4_route_tbl_info_valid = true;
+	req.v4_route_tbl_info.route_tbl_start_addr =
+		IPA_MEM_PART(v4_rt_nhash_ofst) + smem_restr_bytes;
+	req.v4_route_tbl_info.num_indices =
+		IPA_MEM_PART(v4_modem_rt_index_hi);
+	req.v6_route_tbl_info_valid = true;
+
+	req.v6_route_tbl_info.route_tbl_start_addr =
+		IPA_MEM_PART(v6_rt_nhash_ofst) + smem_restr_bytes;
+	req.v6_route_tbl_info.num_indices =
+		IPA_MEM_PART(v6_modem_rt_index_hi);
+
+	req.v4_filter_tbl_start_addr_valid = true;
+	req.v4_filter_tbl_start_addr =
+		IPA_MEM_PART(v4_flt_nhash_ofst) + smem_restr_bytes;
+
+	req.v6_filter_tbl_start_addr_valid = true;
+	req.v6_filter_tbl_start_addr =
+		IPA_MEM_PART(v6_flt_nhash_ofst) + smem_restr_bytes;
+
+	req.modem_mem_info_valid = (IPA_MEM_PART(modem_size) != 0);
+	req.modem_mem_info.block_start_addr =
+		IPA_MEM_PART(modem_ofst) + smem_restr_bytes;
+	req.modem_mem_info.size = IPA_MEM_PART(modem_size);
+
+	wan_cons_ep = ipa_get_ep_mapping(IPA_CLIENT_APPS_WAN_CONS);
+	if (wan_cons_ep == IPA_EP_NOT_ALLOCATED) {
+		IPAWANDBG("APPS_WAN_CONS is not valid\n");
+		req.ctrl_comm_dest_end_pt_valid = false;
+		req.ctrl_comm_dest_end_pt = 0;
+	} else {
+		req.ctrl_comm_dest_end_pt_valid = true;
+		req.ctrl_comm_dest_end_pt =
+			ipa_get_ep_mapping(IPA_CLIENT_APPS_WAN_CONS);
+	}
+
+	req.hdr_proc_ctx_tbl_info_valid =
+		(IPA_MEM_PART(modem_hdr_proc_ctx_size) != 0);
+	req.hdr_proc_ctx_tbl_info.modem_offset_start =
+		IPA_MEM_PART(modem_hdr_proc_ctx_ofst) + smem_restr_bytes;
+	req.hdr_proc_ctx_tbl_info.modem_offset_end =
+		IPA_MEM_PART(modem_hdr_proc_ctx_ofst) +
+		IPA_MEM_PART(modem_hdr_proc_ctx_size) + smem_restr_bytes - 1;
+
+	req.zip_tbl_info_valid = (IPA_MEM_PART(modem_comp_decomp_size) != 0);
+	req.zip_tbl_info.modem_offset_start =
+		IPA_MEM_PART(modem_comp_decomp_size) + smem_restr_bytes;
+	req.zip_tbl_info.modem_offset_end =
+		IPA_MEM_PART(modem_comp_decomp_ofst) +
+		IPA_MEM_PART(modem_comp_decomp_size) + smem_restr_bytes - 1;
+
+	/* if hashing not supported, Modem filter/routing hash
+	 * tables should not fill with valid data.
+	 */
+	if (!ipa3_ctx_get_flag(IPA_FLTRT_NOT_HASHABLE_EN)) {
+		req.v4_hash_route_tbl_info_valid = true;
+		req.v4_hash_route_tbl_info.route_tbl_start_addr =
+			IPA_MEM_PART(v4_rt_hash_ofst) + smem_restr_bytes;
+		req.v4_hash_route_tbl_info.num_indices =
+			IPA_MEM_PART(v4_modem_rt_index_hi);
+
+		req.v6_hash_route_tbl_info_valid = true;
+		req.v6_hash_route_tbl_info.route_tbl_start_addr =
+			IPA_MEM_PART(v6_rt_hash_ofst) + smem_restr_bytes;
+		req.v6_hash_route_tbl_info.num_indices =
+			IPA_MEM_PART(v6_modem_rt_index_hi);
+
+		req.v4_hash_filter_tbl_start_addr_valid = true;
+		req.v4_hash_filter_tbl_start_addr =
+			IPA_MEM_PART(v4_flt_hash_ofst) + smem_restr_bytes;
+
+		req.v6_hash_filter_tbl_start_addr_valid = true;
+		req.v6_hash_filter_tbl_start_addr =
+			IPA_MEM_PART(v6_flt_hash_ofst) + smem_restr_bytes;
+	}
+	req.hw_stats_quota_base_addr_valid = true;
+	req.hw_stats_quota_base_addr =
+		IPA_MEM_PART(stats_quota_ofst) + smem_restr_bytes;
+
+	req.hw_stats_quota_size_valid = true;
+	req.hw_stats_quota_size = IPA_MEM_PART(stats_quota_size);
+
+	req.hw_drop_stats_base_addr_valid = true;
+	req.hw_drop_stats_base_addr =
+		IPA_MEM_PART(stats_drop_ofst) + smem_restr_bytes;
+
+	req.hw_drop_stats_table_size_valid = true;
+	req.hw_drop_stats_table_size = IPA_MEM_PART(stats_drop_size);
+
+	if (!ipa3_uc_loaded_check()) {  /* First time boot */
+		req.is_ssr_bootup_valid = false;
+		req.is_ssr_bootup = 0;
+	} else {  /* After SSR boot */
+		req.is_ssr_bootup_valid = true;
+		req.is_ssr_bootup = 1;
+	}
+
+	IPAWANDBG("platform_type %d\n", req.platform_type);
+	IPAWANDBG("hdr_tbl_info.modem_offset_start %d\n",
+			req.hdr_tbl_info.modem_offset_start);
+	IPAWANDBG("hdr_tbl_info.modem_offset_end %d\n",
+			req.hdr_tbl_info.modem_offset_end);
+	IPAWANDBG("v4_route_tbl_info.route_tbl_start_addr %d\n",
+			req.v4_route_tbl_info.route_tbl_start_addr);
+	IPAWANDBG("v4_route_tbl_info.num_indices %d\n",
+			req.v4_route_tbl_info.num_indices);
+	IPAWANDBG("v6_route_tbl_info.route_tbl_start_addr %d\n",
+			req.v6_route_tbl_info.route_tbl_start_addr);
+	IPAWANDBG("v6_route_tbl_info.num_indices %d\n",
+			req.v6_route_tbl_info.num_indices);
+	IPAWANDBG("v4_filter_tbl_start_addr %d\n",
+			req.v4_filter_tbl_start_addr);
+	IPAWANDBG("v6_filter_tbl_start_addr %d\n",
+			req.v6_filter_tbl_start_addr);
+	IPAWANDBG("modem_mem_info.block_start_addr %d\n",
+			req.modem_mem_info.block_start_addr);
+	IPAWANDBG("modem_mem_info.size %d\n",
+			req.modem_mem_info.size);
+	IPAWANDBG("ctrl_comm_dest_end_pt %d\n",
+			req.ctrl_comm_dest_end_pt);
+	IPAWANDBG("is_ssr_bootup %d\n",
+			req.is_ssr_bootup);
+	IPAWANDBG("v4_hash_route_tbl_info.route_tbl_start_addr %d\n",
+		req.v4_hash_route_tbl_info.route_tbl_start_addr);
+	IPAWANDBG("v4_hash_route_tbl_info.num_indices %d\n",
+		req.v4_hash_route_tbl_info.num_indices);
+	IPAWANDBG("v6_hash_route_tbl_info.route_tbl_start_addr %d\n",
+		req.v6_hash_route_tbl_info.route_tbl_start_addr);
+	IPAWANDBG("v6_hash_route_tbl_info.num_indices %d\n",
+		req.v6_hash_route_tbl_info.num_indices);
+	IPAWANDBG("v4_hash_filter_tbl_start_addr %d\n",
+		req.v4_hash_filter_tbl_start_addr);
+	IPAWANDBG("v6_hash_filter_tbl_start_addr %d\n",
+		req.v6_hash_filter_tbl_start_addr);
+
+	req_desc.max_msg_len = QMI_IPA_INIT_MODEM_DRIVER_REQ_MAX_MSG_LEN_V01;
+	req_desc.msg_id = QMI_IPA_INIT_MODEM_DRIVER_REQ_V01;
+	req_desc.ei_array = ipa3_init_modem_driver_req_msg_data_v01_ei;
+
+	resp_desc.max_msg_len = QMI_IPA_INIT_MODEM_DRIVER_RESP_MAX_MSG_LEN_V01;
+	resp_desc.msg_id = QMI_IPA_INIT_MODEM_DRIVER_RESP_V01;
+	resp_desc.ei_array = ipa3_init_modem_driver_resp_msg_data_v01_ei;
+
+	pr_info("Sending QMI_IPA_INIT_MODEM_DRIVER_REQ_V01\n");
+	if (unlikely(!ipa_q6_clnt))
+		return -ETIMEDOUT;
+	rc = ipa3_qmi_send_req_wait(ipa_q6_clnt,
+		&req_desc, &req,
+		&resp_desc, &resp,
+		QMI_SEND_REQ_TIMEOUT_MS);
+
+	if (rc < 0) {
+		IPAWANERR("QMI send Req %d failed, rc= %d\n",
+			QMI_IPA_INIT_MODEM_DRIVER_REQ_V01,
+			rc);
+		return rc;
+	}
+
+	pr_info("QMI_IPA_INIT_MODEM_DRIVER_REQ_V01 response received\n");
+	return ipa3_check_qmi_response(rc,
+		QMI_IPA_INIT_MODEM_DRIVER_REQ_V01, resp.resp.result,
+		resp.resp.error, "ipa_init_modem_driver_resp_msg_v01");
+}
+
+/* sending filter-install-request to modem*/
+int ipa3_qmi_filter_request_send(struct ipa_install_fltr_rule_req_msg_v01 *req)
+{
+	struct ipa_install_fltr_rule_resp_msg_v01 resp;
+	struct ipa_msg_desc req_desc, resp_desc;
+	int rc;
+	int i;
+
+	/* check if modem up */
+	if (!ipa3_qmi_indication_fin ||
+		!ipa3_qmi_modem_init_fin ||
+		!ipa_q6_clnt) {
+		IPAWANDBG("modem QMI haven't up yet\n");
+		return -EINVAL;
+	}
+
+	/* check if the filter rules from IPACM is valid */
+	if (req->filter_spec_list_len == 0) {
+		IPAWANDBG("IPACM pass zero rules to Q6\n");
+	} else {
+		IPAWANDBG("IPACM pass %u rules to Q6\n",
+		req->filter_spec_list_len);
+	}
+
+	if (req->filter_spec_list_len >= QMI_IPA_MAX_FILTERS_V01) {
+		IPAWANDBG(
+		"IPACM passes the number of filtering rules exceed limit\n");
+		return -EINVAL;
+	} else if (req->source_pipe_index_valid != 0) {
+		IPAWANDBG(
+		"IPACM passes source_pipe_index_valid not zero 0 != %d\n",
+			req->source_pipe_index_valid);
+		return -EINVAL;
+	} else if (req->source_pipe_index >= ipa3_ctx_get_num_pipes()) {
+		IPAWANDBG(
+		"IPACM passes source pipe index not valid ID = %d\n",
+		req->source_pipe_index);
+		return -EINVAL;
+	}
+	for (i = 0; i < req->filter_spec_list_len; i++) {
+		if ((req->filter_spec_list[i].ip_type !=
+			QMI_IPA_IP_TYPE_V4_V01) &&
+			(req->filter_spec_list[i].ip_type !=
+			QMI_IPA_IP_TYPE_V6_V01))
+			return -EINVAL;
+		if (req->filter_spec_list[i].is_mux_id_valid == false)
+			return -EINVAL;
+		if (req->filter_spec_list[i].is_routing_table_index_valid
+			== false)
+			return -EINVAL;
+		if ((req->filter_spec_list[i].filter_action <=
+			QMI_IPA_FILTER_ACTION_INVALID_V01) ||
+			(req->filter_spec_list[i].filter_action >
+			QMI_IPA_FILTER_ACTION_EXCEPTION_V01))
+			return -EINVAL;
+	}
+
+	mutex_lock(&ipa3_qmi_lock);
+	if (ipa3_qmi_ctx != NULL) {
+		/* cache the qmi_filter_request */
+		memcpy(&(ipa3_qmi_ctx->ipa_install_fltr_rule_req_msg_cache[
+			ipa3_qmi_ctx->num_ipa_install_fltr_rule_req_msg]),
+			req,
+			sizeof(struct ipa_install_fltr_rule_req_msg_v01));
+		ipa3_qmi_ctx->num_ipa_install_fltr_rule_req_msg++;
+		ipa3_qmi_ctx->num_ipa_install_fltr_rule_req_msg %= 10;
+	}
+	mutex_unlock(&ipa3_qmi_lock);
+
+	req_desc.max_msg_len = QMI_IPA_INSTALL_FILTER_RULE_REQ_MAX_MSG_LEN_V01;
+	req_desc.msg_id = QMI_IPA_INSTALL_FILTER_RULE_REQ_V01;
+	req_desc.ei_array = ipa3_install_fltr_rule_req_msg_data_v01_ei;
+
+	memset(&resp, 0, sizeof(struct ipa_install_fltr_rule_resp_msg_v01));
+	resp_desc.max_msg_len =
+		QMI_IPA_INSTALL_FILTER_RULE_RESP_MAX_MSG_LEN_V01;
+	resp_desc.msg_id = QMI_IPA_INSTALL_FILTER_RULE_RESP_V01;
+	resp_desc.ei_array = ipa3_install_fltr_rule_resp_msg_data_v01_ei;
+
+	if (unlikely(!ipa_q6_clnt))
+		return -ETIMEDOUT;
+	rc = ipa3_qmi_send_req_wait(ipa_q6_clnt,
+		&req_desc, req,
+		&resp_desc, &resp,
+		QMI_SEND_REQ_TIMEOUT_MS);
+
+	if (rc < 0) {
+		IPAWANERR("QMI send Req %d failed, rc= %d\n",
+			QMI_IPA_INSTALL_FILTER_RULE_REQ_V01,
+			rc);
+		return rc;
+	}
+
+	return ipa3_check_qmi_response(rc,
+		QMI_IPA_INSTALL_FILTER_RULE_REQ_V01, resp.resp.result,
+		resp.resp.error, "ipa_install_filter");
+}
+
+/* sending filter-install-request to modem*/
+int ipa3_qmi_filter_request_ex_send(
+	struct ipa_install_fltr_rule_req_ex_msg_v01 *req)
+{
+	struct ipa_install_fltr_rule_resp_ex_msg_v01 resp;
+	struct ipa_msg_desc req_desc, resp_desc;
+	int rc;
+	int i;
+
+	/* check if modem up */
+	if (!ipa3_qmi_indication_fin ||
+		!ipa3_qmi_modem_init_fin ||
+		!ipa_q6_clnt) {
+		IPAWANDBG("modem QMI haven't up yet\n");
+		return -EINVAL;
+	}
+
+	/* check if the filter rules from IPACM is valid */
+	if (req->filter_spec_ex_list_len == 0) {
+		IPAWANDBG("IPACM pass zero rules to Q6\n");
+	} else {
+		IPAWANDBG("IPACM pass %u rules to Q6\n",
+		req->filter_spec_ex_list_len);
+	}
+
+	if (req->filter_spec_ex_list_len >= QMI_IPA_MAX_FILTERS_EX_V01) {
+		IPAWANDBG(
+		"IPACM pass the number of filtering rules exceed limit\n");
+		return -EINVAL;
+	} else if (req->source_pipe_index_valid != 0) {
+		IPAWANDBG(
+		"IPACM passes source_pipe_index_valid not zero 0 != %d\n",
+			req->source_pipe_index_valid);
+		return -EINVAL;
+	}
+
+	for (i = 0; i < req->filter_spec_ex_list_len; i++) {
+		if ((req->filter_spec_ex_list[i].ip_type !=
+			QMI_IPA_IP_TYPE_V4_V01) &&
+			(req->filter_spec_ex_list[i].ip_type !=
+			QMI_IPA_IP_TYPE_V6_V01))
+			return -EINVAL;
+		if (req->filter_spec_ex_list[i].is_mux_id_valid == false)
+			return -EINVAL;
+		if (req->filter_spec_ex_list[i].is_routing_table_index_valid
+			== false)
+			return -EINVAL;
+		if ((req->filter_spec_ex_list[i].filter_action <=
+			QMI_IPA_FILTER_ACTION_INVALID_V01) ||
+			(req->filter_spec_ex_list[i].filter_action >
+			QMI_IPA_FILTER_ACTION_EXCEPTION_V01))
+			return -EINVAL;
+	}
+	mutex_lock(&ipa3_qmi_lock);
+	if (ipa3_qmi_ctx != NULL) {
+		/* cache the qmi_filter_request */
+		memcpy(&(ipa3_qmi_ctx->ipa_install_fltr_rule_req_ex_msg_cache[
+			ipa3_qmi_ctx->num_ipa_install_fltr_rule_req_ex_msg]),
+			req,
+			sizeof(struct ipa_install_fltr_rule_req_ex_msg_v01));
+		ipa3_qmi_ctx->num_ipa_install_fltr_rule_req_ex_msg++;
+		ipa3_qmi_ctx->num_ipa_install_fltr_rule_req_ex_msg %= 10;
+	}
+	mutex_unlock(&ipa3_qmi_lock);
+
+	req_desc.max_msg_len =
+		QMI_IPA_INSTALL_FILTER_RULE_EX_REQ_MAX_MSG_LEN_V01;
+	req_desc.msg_id = QMI_IPA_INSTALL_FILTER_RULE_EX_REQ_V01;
+	req_desc.ei_array = ipa3_install_fltr_rule_req_ex_msg_data_v01_ei;
+
+	memset(&resp, 0, sizeof(struct ipa_install_fltr_rule_resp_ex_msg_v01));
+	resp_desc.max_msg_len =
+		QMI_IPA_INSTALL_FILTER_RULE_EX_RESP_MAX_MSG_LEN_V01;
+	resp_desc.msg_id = QMI_IPA_INSTALL_FILTER_RULE_EX_RESP_V01;
+	resp_desc.ei_array = ipa3_install_fltr_rule_resp_ex_msg_data_v01_ei;
+
+	rc = ipa3_qmi_send_req_wait(ipa_q6_clnt,
+		&req_desc, req,
+		&resp_desc, &resp,
+		QMI_SEND_REQ_TIMEOUT_MS);
+
+	if (rc < 0) {
+		IPAWANERR("QMI send Req %d failed, rc= %d\n",
+			QMI_IPA_INSTALL_FILTER_RULE_EX_REQ_V01,
+			rc);
+		return rc;
+	}
+
+	return ipa3_check_qmi_response(rc,
+		QMI_IPA_INSTALL_FILTER_RULE_EX_REQ_V01, resp.resp.result,
+		resp.resp.error, "ipa_install_filter");
+}
+
+/* sending add offload-connection-request to modem*/
+int ipa3_qmi_add_offload_request_send(
+	struct ipa_add_offload_connection_req_msg_v01 *req)
+{
+	struct ipa_add_offload_connection_resp_msg_v01 resp;
+	struct ipa_msg_desc req_desc, resp_desc;
+	int rc = 0;
+	int i, j;
+	uint32_t id;
+
+	/* check if modem up */
+	if (!ipa3_qmi_modem_init_fin ||
+		!ipa_q6_clnt) {
+		IPAWANDBG("modem QMI haven't up yet\n");
+		return -EINVAL;
+	}
+
+	/* check if the filter rules from IPACM is valid */
+	if (req->filter_spec_ex2_list_len == 0) {
+		IPAWANDBG("IPACM pass zero rules to Q6\n");
+	} else {
+		IPAWANDBG("IPACM pass %u rules to Q6\n",
+		req->filter_spec_ex2_list_len);
+	}
+
+	/* currently set total max to 64 */
+	if (req->filter_spec_ex2_list_len +
+		ipa3_qmi_ctx->num_ipa_offload_connection
+		>= QMI_IPA_MAX_FILTERS_V01) {
+		IPAWANDBG(
+		"cur(%d), req(%d), exceed limit (%d)\n",
+			ipa3_qmi_ctx->num_ipa_offload_connection,
+			req->filter_spec_ex2_list_len,
+			QMI_IPA_MAX_FILTERS_V01);
+		return -EINVAL;
+	}
+
+	for (i = 0; i < req->filter_spec_ex2_list_len; i++) {
+		if ((req->filter_spec_ex2_list[i].ip_type !=
+			QMI_IPA_IP_TYPE_V4_V01) &&
+			(req->filter_spec_ex2_list[i].ip_type !=
+			QMI_IPA_IP_TYPE_V6_V01))
+			return -EINVAL;
+		if (req->filter_spec_ex2_list[i].is_mux_id_valid == false)
+			return -EINVAL;
+		if ((req->filter_spec_ex2_list[i].filter_action <=
+			QMI_IPA_FILTER_ACTION_INVALID_V01) ||
+			(req->filter_spec_ex2_list[i].filter_action >
+			QMI_IPA_FILTER_ACTION_EXCEPTION_V01))
+			return -EINVAL;
+	}
+
+	req_desc.max_msg_len =
+		IPA_ADD_OFFLOAD_CONNECTION_REQ_MSG_V01_MAX_MSG_LEN;
+	req_desc.msg_id = QMI_IPA_ADD_OFFLOAD_CONNECTION_REQ_V01;
+	req_desc.ei_array = ipa_add_offload_connection_req_msg_v01_ei;
+
+	memset(&resp, 0, sizeof(struct
+		ipa_add_offload_connection_resp_msg_v01));
+	resp_desc.max_msg_len =
+		IPA_ADD_OFFLOAD_CONNECTION_RESP_MSG_V01_MAX_MSG_LEN;
+	resp_desc.msg_id = QMI_IPA_ADD_OFFLOAD_CONNECTION_RESP_V01;
+	resp_desc.ei_array = ipa_add_offload_connection_resp_msg_v01_ei;
+
+	rc = ipa3_qmi_send_req_wait(ipa_q6_clnt,
+		&req_desc, req,
+		&resp_desc, &resp,
+		QMI_SEND_REQ_TIMEOUT_MS);
+
+	if (rc < 0) {
+		IPAWANERR("QMI send Req %d failed, rc= %d\n",
+			QMI_IPA_ADD_OFFLOAD_CONNECTION_REQ_V01,
+			rc);
+		return rc;
+	}
+
+	rc = ipa3_check_qmi_response(rc,
+		QMI_IPA_ADD_OFFLOAD_CONNECTION_REQ_V01, resp.resp.result,
+		resp.resp.error, "ipa_add_offload_connection");
+
+	if (rc) {
+		IPAWANERR("QMI get Response %d failed, rc= %d\n",
+			QMI_IPA_ADD_OFFLOAD_CONNECTION_REQ_V01,
+			rc);
+		return rc;
+	}
+
+	/* Check & copy rule-handle */
+	if (!resp.filter_handle_list_valid) {
+		IPAWANERR("QMI resp invalid %d failed\n",
+			resp.filter_handle_list_valid);
+		return -ERANGE;
+	}
+
+	if (resp.filter_handle_list_len !=
+		req->filter_spec_ex2_list_len) {
+		IPAWANERR("QMI resp invalid size %d req %d\n",
+			resp.filter_handle_list_len,
+			req->filter_spec_ex2_list_len);
+		return -ERANGE;
+	}
+
+	mutex_lock(&ipa3_qmi_lock);
+	for (i = 0; i < req->filter_spec_ex2_list_len; i++) {
+		id = resp.filter_handle_list[i].filter_spec_identifier;
+		/* check rule-id matched or not */
+		if (req->filter_spec_ex2_list[i].rule_id !=
+			id) {
+			IPAWANERR("QMI error (%d)st-(%d) rule-id (%d)\n",
+				i,
+				id,
+				req->filter_spec_ex2_list[i].rule_id);
+			mutex_unlock(&ipa3_qmi_lock);
+			return -EINVAL;
+		}
+		/* find free spot*/
+		for (j = 0; j < QMI_IPA_MAX_FILTERS_V01; j++) {
+			if (!ipa3_qmi_ctx->ipa_offload_cache[j].valid)
+				break;
+		}
+
+		if (j == QMI_IPA_MAX_FILTERS_V01) {
+			IPAWANERR("can't find free spot for rule-id %d\n",
+				id);
+			mutex_unlock(&ipa3_qmi_lock);
+			return -EINVAL;
+		}
+
+		/* save rule-id handle to cache */
+		ipa3_qmi_ctx->ipa_offload_cache[j].rule_id =
+			resp.filter_handle_list[i].filter_spec_identifier;
+		ipa3_qmi_ctx->ipa_offload_cache[j].rule_hdl =
+			resp.filter_handle_list[i].filter_handle;
+		ipa3_qmi_ctx->ipa_offload_cache[j].valid = true;
+		ipa3_qmi_ctx->ipa_offload_cache[j].ip_type =
+			req->filter_spec_ex2_list[i].ip_type;
+		ipa3_qmi_ctx->num_ipa_offload_connection++;
+	}
+	mutex_unlock(&ipa3_qmi_lock);
+	IPAWANDBG("Update cached conntrack entries (%d)\n",
+		ipa3_qmi_ctx->num_ipa_offload_connection);
+	return rc;
+}
+
+/* sending rmv offload-connection-request to modem*/
+int ipa3_qmi_rmv_offload_request_send(
+	struct ipa_remove_offload_connection_req_msg_v01 *req)
+{
+	struct ipa_remove_offload_connection_resp_msg_v01 resp;
+	struct ipa_msg_desc req_desc, resp_desc;
+	int rc = 0;
+	int i, j;
+	uint32_t id;
+
+	/* check if modem up */
+	if (!ipa3_qmi_modem_init_fin ||
+		!ipa_q6_clnt) {
+		IPAWANDBG("modem QMI haven't up yet\n");
+		return -EINVAL;
+	}
+
+	/* check if the # of handles from IPACM is valid */
+	if (!req->clean_all_rules_valid && req->filter_handle_list_len == 0) {
+		IPAWANDBG("IPACM deleted zero rules !\n");
+		return -EINVAL;
+	}
+
+	IPAWANDBG("IPACM pass (%d) rules handles to Q6, cur (%d)\n",
+	req->filter_handle_list_len,
+	ipa3_qmi_ctx->num_ipa_offload_connection);
+
+	/*  max as num_ipa_offload_connection */
+	if (req->filter_handle_list_len >
+		ipa3_qmi_ctx->num_ipa_offload_connection) {
+		IPAWANDBG(
+		"cur(%d), req_rmv(%d)\n",
+			ipa3_qmi_ctx->num_ipa_offload_connection,
+			req->filter_handle_list_len);
+		return -EINVAL;
+	}
+
+	mutex_lock(&ipa3_qmi_lock);
+	for (i = 0; i < req->filter_handle_list_len; i++) {
+		/* check if rule-id match */
+		id =
+			req->filter_handle_list[i].filter_spec_identifier;
+		for (j = 0; j < QMI_IPA_MAX_FILTERS_V01; j++) {
+			if ((ipa3_qmi_ctx->ipa_offload_cache[j].valid) &&
+				(ipa3_qmi_ctx->ipa_offload_cache[j].rule_id ==
+				id))
+				break;
+		}
+		if (j == QMI_IPA_MAX_FILTERS_V01) {
+			IPAWANERR("can't find rule-id %d\n",
+				id);
+			mutex_unlock(&ipa3_qmi_lock);
+			return -EINVAL;
+		}
+
+		/* fill up the filter_handle */
+		req->filter_handle_list[i].filter_handle =
+			ipa3_qmi_ctx->ipa_offload_cache[j].rule_hdl;
+		ipa3_qmi_ctx->ipa_offload_cache[j].valid = false;
+		ipa3_qmi_ctx->num_ipa_offload_connection--;
+	}
+	mutex_unlock(&ipa3_qmi_lock);
+
+	req_desc.max_msg_len =
+		IPA_REMOVE_OFFLOAD_CONNECTION_REQ_MSG_V01_MAX_MSG_LEN;
+	req_desc.msg_id = QMI_IPA_REMOVE_OFFLOAD_CONNECTION_REQ_V01;
+	req_desc.ei_array = ipa_remove_offload_connection_req_msg_v01_ei;
+
+	/* clean the Dl rules  in the cache if flag is set */
+	if (req->clean_all_rules) {
+		for (i = 0; i < QMI_IPA_MAX_FILTERS_V01; i++)
+			if (ipa3_qmi_ctx->ipa_offload_cache[i].valid)
+				ipa3_qmi_ctx->ipa_offload_cache[i].valid =
+				false;
+	}
+
+
+	memset(&resp, 0, sizeof(struct
+		ipa_remove_offload_connection_resp_msg_v01));
+	resp_desc.max_msg_len =
+		IPA_REMOVE_OFFLOAD_CONNECTION_RESP_MSG_V01_MAX_MSG_LEN;
+	resp_desc.msg_id = QMI_IPA_REMOVE_OFFLOAD_CONNECTION_RESP_V01;
+	resp_desc.ei_array = ipa_remove_offload_connection_resp_msg_v01_ei;
+
+	rc = ipa3_qmi_send_req_wait(ipa_q6_clnt,
+		&req_desc, req,
+		&resp_desc, &resp,
+		QMI_SEND_REQ_TIMEOUT_MS);
+
+	if (rc < 0) {
+		IPAWANERR("QMI send Req %d failed, rc= %d\n",
+			QMI_IPA_REMOVE_OFFLOAD_CONNECTION_REQ_V01,
+			rc);
+		return rc;
+	}
+	IPAWANDBG("left cached conntrack entries (%d)\n",
+		ipa3_qmi_ctx->num_ipa_offload_connection);
+
+	return ipa3_check_qmi_response(rc,
+		QMI_IPA_REMOVE_OFFLOAD_CONNECTION_REQ_V01, resp.resp.result,
+		resp.resp.error, "ipa_rmv_offload_connection");
+}
+
+/* sending ul-filter-install-request to modem*/
+int ipa3_qmi_ul_filter_request_send(
+	struct ipa_configure_ul_firewall_rules_req_msg_v01 *req)
+{
+	struct ipa_configure_ul_firewall_rules_resp_msg_v01 resp;
+	struct ipa_msg_desc req_desc, resp_desc;
+	int rc, i;
+
+	IPAWANDBG("IPACM pass %u rules to Q6\n",
+		req->firewall_rules_list_len);
+
+	mutex_lock(&ipa3_qmi_lock);
+	if (ipa3_qmi_ctx != NULL) {
+		/* cache the qmi_filter_request */
+		memcpy(
+		&(ipa3_qmi_ctx->ipa_configure_ul_firewall_rules_req_msg_cache[
+		ipa3_qmi_ctx->num_ipa_configure_ul_firewall_rules_req_msg]),
+		req,
+		sizeof(struct
+		ipa_configure_ul_firewall_rules_req_msg_v01));
+		ipa3_qmi_ctx->num_ipa_configure_ul_firewall_rules_req_msg++;
+		ipa3_qmi_ctx->num_ipa_configure_ul_firewall_rules_req_msg %=
+			MAX_NUM_QMI_RULE_CACHE;
+	}
+	mutex_unlock(&ipa3_qmi_lock);
+
+	/* check if modem is up */
+	if (!ipa3_qmi_indication_fin ||
+		!ipa3_qmi_modem_init_fin ||
+		!ipa_q6_clnt) {
+		IPAWANDBG("modem QMI service is not up yet\n");
+		return -EINVAL;
+	}
+
+	/* Passing 0 rules means that firewall is disabled */
+	if (req->firewall_rules_list_len == 0)
+		IPAWANDBG("IPACM passed 0 rules to Q6\n");
+
+	if (req->firewall_rules_list_len >= QMI_IPA_MAX_UL_FIREWALL_RULES_V01) {
+		IPAWANERR(
+		"Number of rules passed by IPACM, %d, exceed limit %d\n",
+			req->firewall_rules_list_len,
+			QMI_IPA_MAX_UL_FIREWALL_RULES_V01);
+		return -EINVAL;
+	}
+
+	/* Check for valid IP type */
+	for (i = 0; i < req->firewall_rules_list_len; i++) {
+		if (req->firewall_rules_list[i].ip_type !=
+				QMI_IPA_IP_TYPE_V4_V01 &&
+			req->firewall_rules_list[i].ip_type !=
+				QMI_IPA_IP_TYPE_V6_V01) {
+			IPAWANERR("Invalid IP type %d\n",
+					req->firewall_rules_list[i].ip_type);
+			return -EINVAL;
+		}
+	}
+
+	req_desc.max_msg_len =
+		QMI_IPA_INSTALL_UL_FIREWALL_RULES_REQ_MAX_MSG_LEN_V01;
+	req_desc.msg_id = QMI_IPA_INSTALL_UL_FIREWALL_RULES_REQ_V01;
+	req_desc.ei_array =
+		ipa3_configure_ul_firewall_rules_req_msg_data_v01_ei;
+
+	memset(&resp, 0,
+		sizeof(struct ipa_configure_ul_firewall_rules_resp_msg_v01));
+	resp_desc.max_msg_len =
+		QMI_IPA_INSTALL_UL_FIREWALL_RULES_RESP_MAX_MSG_LEN_V01;
+	resp_desc.msg_id = QMI_IPA_INSTALL_UL_FIREWALL_RULES_RESP_V01;
+	resp_desc.ei_array =
+		ipa3_configure_ul_firewall_rules_resp_msg_data_v01_ei;
+	rc = ipa3_qmi_send_req_wait(ipa_q6_clnt,
+		&req_desc, req,
+		&resp_desc, &resp,
+		QMI_SEND_REQ_TIMEOUT_MS);
+	if (rc < 0) {
+		IPAWANERR("send Req %d failed, rc= %d\n",
+			QMI_IPA_INSTALL_UL_FIREWALL_RULES_REQ_V01,
+			rc);
+		return rc;
+	}
+
+	return ipa3_check_qmi_response(rc,
+		QMI_IPA_INSTALL_UL_FIREWALL_RULES_REQ_V01,
+		resp.resp.result,
+		resp.resp.error, "ipa_received_ul_firewall_filter");
+}
+
+int ipa3_qmi_enable_force_clear_datapath_send(
+	struct ipa_enable_force_clear_datapath_req_msg_v01 *req)
+{
+	struct ipa_enable_force_clear_datapath_resp_msg_v01 resp;
+	struct ipa_msg_desc req_desc, resp_desc;
+	int rc = 0;
+
+	if (!req || !req->source_pipe_bitmask) {
+		IPAWANERR("invalid params\n");
+		return -EINVAL;
+	}
+
+	if (ipa3_ctx_get_type(IPA3_HW_MODE) == IPA_HW_MODE_VIRTUAL ||
+		ipa3_ctx_get_type(IPA3_HW_MODE) == IPA_HW_MODE_EMULATION) {
+		IPAWANDBG("Simulating success on emu/virt mode\n");
+		return 0;
+	}
+
+	req_desc.max_msg_len =
+	QMI_IPA_ENABLE_FORCE_CLEAR_DATAPATH_REQ_MAX_MSG_LEN_V01;
+	req_desc.msg_id = QMI_IPA_ENABLE_FORCE_CLEAR_DATAPATH_REQ_V01;
+	req_desc.ei_array =
+		ipa3_enable_force_clear_datapath_req_msg_data_v01_ei;
+
+	memset(&resp, 0, sizeof(struct ipa_fltr_installed_notif_resp_msg_v01));
+	resp_desc.max_msg_len =
+		QMI_IPA_ENABLE_FORCE_CLEAR_DATAPATH_RESP_MAX_MSG_LEN_V01;
+	resp_desc.msg_id = QMI_IPA_ENABLE_FORCE_CLEAR_DATAPATH_RESP_V01;
+	resp_desc.ei_array =
+		ipa3_enable_force_clear_datapath_resp_msg_data_v01_ei;
+
+	if (unlikely(!ipa_q6_clnt))
+		return -ETIMEDOUT;
+	rc = ipa3_qmi_send_req_wait(ipa_q6_clnt,
+		&req_desc, req,
+		&resp_desc, &resp,
+		QMI_IPA_FORCE_CLEAR_DATAPATH_TIMEOUT_MS);
+
+	if (rc < 0) {
+		IPAWANERR("send Req %d failed, rc= %d\n",
+			QMI_IPA_ENABLE_FORCE_CLEAR_DATAPATH_REQ_V01,
+			rc);
+		return rc;
+	}
+
+	if (resp.resp.result != IPA_QMI_RESULT_SUCCESS_V01) {
+		IPAWANERR("filter_notify failed %d\n",
+			resp.resp.result);
+		return resp.resp.result;
+	}
+
+	return ipa3_check_qmi_response(rc,
+		QMI_IPA_ENABLE_FORCE_CLEAR_DATAPATH_REQ_V01,
+		resp.resp.result,
+		resp.resp.error, "ipa_enable_force_clear_datapath");
+}
+
+int ipa3_qmi_disable_force_clear_datapath_send(
+	struct ipa_disable_force_clear_datapath_req_msg_v01 *req)
+{
+	struct ipa_disable_force_clear_datapath_resp_msg_v01 resp;
+	struct ipa_msg_desc req_desc, resp_desc;
+	int rc = 0;
+
+
+	if (!req) {
+		IPAWANERR("invalid params\n");
+		return -EINVAL;
+	}
+
+	if (ipa3_ctx_get_type(IPA3_HW_MODE) == IPA_HW_MODE_VIRTUAL ||
+		ipa3_ctx_get_type(IPA3_HW_MODE) == IPA_HW_MODE_EMULATION) {
+		IPAWANDBG("Simulating success on emu/virt mode\n");
+		return 0;
+	}
+
+	req_desc.max_msg_len =
+		QMI_IPA_DISABLE_FORCE_CLEAR_DATAPATH_REQ_MAX_MSG_LEN_V01;
+	req_desc.msg_id = QMI_IPA_DISABLE_FORCE_CLEAR_DATAPATH_REQ_V01;
+	req_desc.ei_array =
+		ipa3_disable_force_clear_datapath_req_msg_data_v01_ei;
+
+	memset(&resp, 0, sizeof(struct ipa_fltr_installed_notif_resp_msg_v01));
+	resp_desc.max_msg_len =
+		QMI_IPA_DISABLE_FORCE_CLEAR_DATAPATH_RESP_MAX_MSG_LEN_V01;
+	resp_desc.msg_id = QMI_IPA_DISABLE_FORCE_CLEAR_DATAPATH_RESP_V01;
+	resp_desc.ei_array =
+		ipa3_disable_force_clear_datapath_resp_msg_data_v01_ei;
+	if (unlikely(!ipa_q6_clnt))
+		return -ETIMEDOUT;
+	rc = ipa3_qmi_send_req_wait(ipa_q6_clnt,
+		&req_desc, req,
+		&resp_desc, &resp,
+		QMI_IPA_FORCE_CLEAR_DATAPATH_TIMEOUT_MS);
+
+	if (rc < 0) {
+		IPAWANERR("send Req %d failed, rc= %d\n",
+			QMI_IPA_DISABLE_FORCE_CLEAR_DATAPATH_REQ_V01,
+			rc);
+		return rc;
+	}
+
+	if (resp.resp.result != IPA_QMI_RESULT_SUCCESS_V01) {
+		IPAWANERR("filter_notify failed %d\n",
+			resp.resp.result);
+		return resp.resp.result;
+	}
+
+	return ipa3_check_qmi_response(rc,
+		QMI_IPA_DISABLE_FORCE_CLEAR_DATAPATH_REQ_V01,
+		resp.resp.result,
+		resp.resp.error, "ipa_disable_force_clear_datapath");
+}
+
+/* sending filter-installed-notify-request to modem*/
+int ipa3_qmi_filter_notify_send(
+		struct ipa_fltr_installed_notif_req_msg_v01 *req)
+{
+	struct ipa_fltr_installed_notif_resp_msg_v01 resp;
+	struct ipa_msg_desc req_desc, resp_desc;
+	int rc = 0;
+
+	/* check if the filter rules from IPACM is valid */
+	if (req->rule_id_len == 0) {
+		IPAWANDBG(" delete UL filter rule for pipe %d\n",
+		req->source_pipe_index);
+	} else if (req->rule_id_len > QMI_IPA_MAX_FILTERS_V01) {
+		IPAWANERR(" UL filter rule for pipe %d exceed max (%u)\n",
+		req->source_pipe_index,
+		req->rule_id_len);
+		return -EINVAL;
+	}
+
+	if (req->rule_id_ex_len == 0) {
+		IPAWANDBG(" delete UL filter rule for pipe %d\n",
+		req->source_pipe_index);
+	} else if (req->rule_id_ex_len > QMI_IPA_MAX_FILTERS_EX2_V01) {
+		IPAWANERR(" UL filter rule for pipe %d exceed max (%u)\n",
+		req->source_pipe_index,
+		req->rule_id_ex_len);
+		return -EINVAL;
+	}
+
+	if (req->install_status != IPA_QMI_RESULT_SUCCESS_V01) {
+		IPAWANERR(" UL filter rule for pipe %d install_status = %d\n",
+			req->source_pipe_index, req->install_status);
+		return -EINVAL;
+	} else if ((req->rule_id_valid != 1) &&
+		(req->rule_id_ex_valid != 1)) {
+		IPAWANERR(" UL filter rule for pipe %d rule_id_valid = %d/%d\n",
+			req->source_pipe_index, req->rule_id_valid,
+			req->rule_id_ex_valid);
+		return -EINVAL;
+	} else if (req->source_pipe_index >= ipa3_ctx_get_num_pipes()) {
+		IPAWANDBG(
+		"IPACM passes source pipe index not valid ID = %d\n",
+		req->source_pipe_index);
+		return -EINVAL;
+	} else if (((req->embedded_pipe_index_valid != true) ||
+			(req->embedded_call_mux_id_valid != true)) &&
+			((req->embedded_pipe_index_valid != false) ||
+			(req->embedded_call_mux_id_valid != false))) {
+		IPAWANERR(
+			"IPACM passes embedded pipe and mux valid not valid\n");
+		return -EINVAL;
+	} else if (req->embedded_pipe_index >= ipa3_ctx_get_num_pipes()) {
+		IPAWANERR("IPACM passes source pipe index not valid ID = %d\n",
+		req->source_pipe_index);
+		return -EINVAL;
+	}
+
+	if (req->source_pipe_index == -1) {
+		IPAWANERR("Source pipe index invalid\n");
+		return -EINVAL;
+	}
+
+	mutex_lock(&ipa3_qmi_lock);
+	if (ipa3_qmi_ctx != NULL) {
+		/* cache the qmi_filter_request */
+		memcpy(&(ipa3_qmi_ctx->ipa_fltr_installed_notif_req_msg_cache[
+			ipa3_qmi_ctx->num_ipa_fltr_installed_notif_req_msg]),
+			req,
+			sizeof(struct ipa_fltr_installed_notif_req_msg_v01));
+		ipa3_qmi_ctx->num_ipa_fltr_installed_notif_req_msg++;
+		ipa3_qmi_ctx->num_ipa_fltr_installed_notif_req_msg %= 10;
+	}
+	mutex_unlock(&ipa3_qmi_lock);
+
+	req_desc.max_msg_len =
+	QMI_IPA_FILTER_INSTALLED_NOTIF_REQ_MAX_MSG_LEN_V01;
+	req_desc.msg_id = QMI_IPA_FILTER_INSTALLED_NOTIF_REQ_V01;
+	req_desc.ei_array = ipa3_fltr_installed_notif_req_msg_data_v01_ei;
+
+	memset(&resp, 0, sizeof(struct ipa_fltr_installed_notif_resp_msg_v01));
+	resp_desc.max_msg_len =
+		QMI_IPA_FILTER_INSTALLED_NOTIF_RESP_MAX_MSG_LEN_V01;
+	resp_desc.msg_id = QMI_IPA_FILTER_INSTALLED_NOTIF_RESP_V01;
+	resp_desc.ei_array = ipa3_fltr_installed_notif_resp_msg_data_v01_ei;
+
+	if (unlikely(!ipa_q6_clnt))
+		return -ETIMEDOUT;
+	rc = ipa3_qmi_send_req_wait(ipa_q6_clnt,
+		&req_desc, req,
+		&resp_desc, &resp,
+		QMI_SEND_REQ_TIMEOUT_MS);
+
+	if (rc < 0) {
+		IPAWANERR("send Req %d failed, rc= %d\n",
+			QMI_IPA_FILTER_INSTALLED_NOTIF_REQ_V01,
+			rc);
+		return rc;
+	}
+
+	return ipa3_check_qmi_response(rc,
+		QMI_IPA_FILTER_INSTALLED_NOTIF_REQ_V01, resp.resp.result,
+		resp.resp.error, "ipa_fltr_installed_notif_resp");
+}
+
+static void ipa3_q6_clnt_quota_reached_ind_cb(struct qmi_handle *handle,
+	struct sockaddr_qrtr *sq,
+	struct qmi_txn *txn,
+	const void *data)
+{
+	struct ipa_data_usage_quota_reached_ind_msg_v01 *qmi_ind;
+
+	if (handle != ipa_q6_clnt) {
+		IPAWANERR("Wrong client\n");
+		return;
+	}
+
+	qmi_ind = (struct ipa_data_usage_quota_reached_ind_msg_v01 *) data;
+
+	IPAWANDBG("Quota reached indication on qmux(%d) Mbytes(%lu)\n",
+		qmi_ind->apn.mux_id, (unsigned long) qmi_ind->apn.num_Mbytes);
+	ipa3_broadcast_quota_reach_ind(qmi_ind->apn.mux_id,
+		IPA_UPSTEAM_MODEM);
+}
+
+static void ipa3_q6_clnt_install_firewall_rules_ind_cb(
+	struct qmi_handle *handle,
+	struct sockaddr_qrtr *sq,
+	struct qmi_txn *txn,
+	const void *data)
+{
+	struct ipa_configure_ul_firewall_rules_ind_msg_v01 qmi_ul_firewall_ind;
+
+	memset(&qmi_ul_firewall_ind, 0, sizeof(
+		struct ipa_configure_ul_firewall_rules_ind_msg_v01));
+	memcpy(&qmi_ul_firewall_ind, data, sizeof(
+		struct ipa_configure_ul_firewall_rules_ind_msg_v01));
+
+	IPAWANDBG("UL firewall rules install indication on Q6");
+	if (qmi_ul_firewall_ind.result.is_success ==
+		QMI_IPA_UL_FIREWALL_STATUS_SUCCESS_V01) {
+		IPAWANDBG(" : Success\n");
+		IPAWANDBG
+		("Mux ID : %d\n", qmi_ul_firewall_ind.result.mux_id);
+	} else if (qmi_ul_firewall_ind.result.is_success ==
+		QMI_IPA_UL_FIREWALL_STATUS_FAILURE_V01) {
+		IPAWANERR(": Failure\n");
+	} else {
+		IPAWANERR(": Unexpected Result");
+	}
+}
+
+static void ipa3_q6_clnt_svc_arrive(struct work_struct *work)
+{
+	int rc;
+	struct ipa_master_driver_init_complt_ind_msg_v01 ind;
+
+	rc = kernel_connect(ipa_q6_clnt->sock,
+		(struct sockaddr *) &ipa3_qmi_ctx->server_sq,
+		sizeof(ipa3_qmi_ctx->server_sq),
+		0);
+
+	if (rc < 0) {
+		IPAWANERR("Couldnt connect Server\n");
+		return;
+	}
+
+	if (!send_qmi_init_q6)
+		return;
+
+	IPAWANDBG("Q6 QMI service available now\n");
+	if (ipa3_is_apq()) {
+		ipa3_qmi_modem_init_fin = true;
+		IPAWANDBG("QMI-client complete, ipa3_qmi_modem_init_fin : %d\n",
+			ipa3_qmi_modem_init_fin);
+		return;
+	}
+
+	/* Initialize modem IPA-driver */
+	IPAWANDBG("send ipa3_qmi_init_modem_send_sync_msg to modem\n");
+	rc = ipa3_qmi_init_modem_send_sync_msg();
+	if ((rc == -ENETRESET) || (rc == -ENODEV)) {
+		IPAWANERR(
+		"ipa3_qmi_init_modem_send_sync_msg failed due to SSR!\n");
+		/* Cleanup when ipa3_wwan_remove is called */
+		vfree(ipa_q6_clnt);
+		ipa_q6_clnt = NULL;
+		return;
+	}
+
+	if (rc != 0) {
+		IPAWANERR("ipa3_qmi_init_modem_send_sync_msg failed\n");
+		/*
+		 * Hardware not responding.
+		 * This is a very unexpected scenario
+		 * which requires a kernel panic in
+		 * order to force dumps for QMI/Q6 side analysis.
+		 */
+		BUG();
+	}
+	ipa3_qmi_modem_init_fin = true;
+
+	/* In cold-bootup, first_time_handshake = false */
+	ipa3_q6_handshake_complete(first_time_handshake);
+	first_time_handshake = true;
+	IPAWANDBG("complete, ipa3_qmi_modem_init_fin : %d\n",
+		ipa3_qmi_modem_init_fin);
+
+	if (ipa3_qmi_indication_fin) {
+		IPAWANDBG("send indication to modem (%d)\n",
+		ipa3_qmi_indication_fin);
+		memset(&ind, 0, sizeof(struct
+			ipa_master_driver_init_complt_ind_msg_v01));
+		ind.master_driver_init_status.result =
+			IPA_QMI_RESULT_SUCCESS_V01;
+
+		if (unlikely(!ipa3_svc_handle)) {
+			IPAWANERR("Invalid svc handle.Ignore sending ind.");
+			return;
+		}
+
+		rc = qmi_send_indication(ipa3_svc_handle,
+			&ipa3_qmi_ctx->client_sq,
+			QMI_IPA_MASTER_DRIVER_INIT_COMPLETE_IND_V01,
+			QMI_IPA_MASTER_DRIVER_INIT_COMPLETE_IND_MAX_MSG_LEN_V01,
+			ipa3_master_driver_init_complt_ind_msg_data_v01_ei,
+			&ind);
+
+		IPAWANDBG("ipa_qmi_service_client good\n");
+	} else {
+		IPAWANERR("not send indication (%d)\n",
+		ipa3_qmi_indication_fin);
+	}
+
+	send_qmi_init_q6 = false;
+
+}
+
+static void ipa3_q6_clnt_svc_exit(struct work_struct *work)
+{
+	if (ipa3_qmi_ctx != NULL) {
+		ipa3_qmi_ctx->server_sq.sq_family = 0;
+		ipa3_qmi_ctx->server_sq.sq_node = 0;
+		ipa3_qmi_ctx->server_sq.sq_port = 0;
+	}
+}
+
+static int ipa3_q6_clnt_svc_event_notify_svc_new(struct qmi_handle *qmi,
+	struct qmi_service *service)
+{
+	IPAWANDBG("QMI svc:%d vers:%d ins:%d node:%d port:%d\n",
+		  service->service, service->version, service->instance,
+		  service->node, service->port);
+
+	if (ipa3_qmi_ctx != NULL) {
+		ipa3_qmi_ctx->server_sq.sq_family = AF_QIPCRTR;
+		ipa3_qmi_ctx->server_sq.sq_node = service->node;
+		ipa3_qmi_ctx->server_sq.sq_port = service->port;
+	}
+	if (!workqueues_stopped) {
+		queue_delayed_work(ipa_clnt_req_workqueue,
+			&ipa3_work_svc_arrive, 0);
+	}
+	return 0;
+}
+
+static void ipa3_q6_clnt_svc_event_notify_net_reset(struct qmi_handle *qmi)
+{
+	if (!workqueues_stopped)
+		queue_delayed_work(ipa_clnt_req_workqueue,
+			&ipa3_work_svc_exit, 0);
+}
+
+static void ipa3_q6_clnt_svc_event_notify_svc_exit(struct qmi_handle *qmi,
+						   struct qmi_service *svc)
+{
+	IPAWANDBG("QMI svc:%d vers:%d ins:%d node:%d port:%d\n", svc->service,
+		  svc->version, svc->instance, svc->node, svc->port);
+
+	if (!workqueues_stopped)
+		queue_delayed_work(ipa_clnt_req_workqueue,
+			&ipa3_work_svc_exit, 0);
+}
+
+static struct qmi_ops server_ops = {
+	.del_client = ipa3_a5_svc_disconnect_cb,
+};
+
+static struct qmi_ops client_ops = {
+	.new_server = ipa3_q6_clnt_svc_event_notify_svc_new,
+	.del_server = ipa3_q6_clnt_svc_event_notify_svc_exit,
+	.net_reset = ipa3_q6_clnt_svc_event_notify_net_reset,
+};
+
+static struct qmi_msg_handler server_handlers[] = {
+	{
+		.type = QMI_REQUEST,
+		.msg_id = QMI_IPA_INDICATION_REGISTER_REQ_V01,
+		.ei = ipa3_indication_reg_req_msg_data_v01_ei,
+		.decoded_size = sizeof(struct ipa_indication_reg_req_msg_v01),
+		.fn = ipa3_handle_indication_req,
+	},
+	{
+		.type = QMI_REQUEST,
+		.msg_id = QMI_IPA_INSTALL_FILTER_RULE_REQ_V01,
+		.ei = ipa3_install_fltr_rule_req_msg_data_v01_ei,
+		.decoded_size = sizeof(
+			struct ipa_install_fltr_rule_req_msg_v01),
+		.fn = ipa3_handle_install_filter_rule_req,
+	},
+	{
+		.type = QMI_REQUEST,
+		.msg_id = QMI_IPA_FILTER_INSTALLED_NOTIF_REQ_V01,
+		.ei = ipa3_fltr_installed_notif_req_msg_data_v01_ei,
+		.decoded_size = sizeof(
+			struct ipa_fltr_installed_notif_req_msg_v01),
+		.fn = ipa3_handle_filter_installed_notify_req,
+	},
+	{
+		.type = QMI_REQUEST,
+		.msg_id = QMI_IPA_CONFIG_REQ_V01,
+		.ei = ipa3_config_req_msg_data_v01_ei,
+		.decoded_size = sizeof(struct ipa_config_req_msg_v01),
+		.fn = handle_ipa_config_req,
+	},
+	{
+		.type = QMI_REQUEST,
+		.msg_id = QMI_IPA_INIT_MODEM_DRIVER_CMPLT_REQ_V01,
+		.ei = ipa3_init_modem_driver_cmplt_req_msg_data_v01_ei,
+		.decoded_size = sizeof(
+			struct ipa_init_modem_driver_cmplt_req_msg_v01),
+		.fn = ipa3_handle_modem_init_cmplt_req,
+	},
+	{
+		.type = QMI_REQUEST,
+		.msg_id = QMI_IPA_INIT_MODEM_DRIVER_CMPLT_REQ_V01,
+		.ei = ipa3_init_modem_driver_cmplt_req_msg_data_v01_ei,
+		.decoded_size = sizeof(
+			struct ipa_init_modem_driver_cmplt_req_msg_v01),
+		.fn = ipa3_handle_modem_init_cmplt_req,
+	},
+	{
+		.type = QMI_REQUEST,
+		.msg_id = QMI_IPA_MHI_ALLOC_CHANNEL_REQ_V01,
+		.ei = ipa_mhi_alloc_channel_req_msg_v01_ei,
+		.decoded_size = sizeof(
+			struct ipa_mhi_alloc_channel_req_msg_v01),
+		.fn = ipa3_handle_mhi_alloc_channel_req,
+	},
+	{
+		.type = QMI_REQUEST,
+		.msg_id = QMI_IPA_MHI_CLK_VOTE_REQ_V01,
+		.ei = ipa_mhi_clk_vote_req_msg_v01_ei,
+		.decoded_size = sizeof(struct ipa_mhi_clk_vote_req_msg_v01),
+		.fn = ipa3_handle_mhi_vote_req,
+	},
+
+};
+
+/*  clinet_handlers are client callbacks that will be called from QMI context
+ *  when an indication from Q6 server arrives.
+ *  In our case, client_handlers needs handling only for QMI_INDICATION,
+ *  since the QMI_REQUEST/ QMI_RESPONSE are handled in a blocking fashion
+ *  at the time of sending QMI_REQUESTs.
+ */
+static struct qmi_msg_handler client_handlers[] = {
+	{
+		.type = QMI_INDICATION,
+		.msg_id = QMI_IPA_DATA_USAGE_QUOTA_REACHED_IND_V01,
+		.ei = ipa3_data_usage_quota_reached_ind_msg_data_v01_ei,
+		.decoded_size = sizeof(
+			struct ipa_data_usage_quota_reached_ind_msg_v01),
+		.fn = ipa3_q6_clnt_quota_reached_ind_cb,
+	},
+	{
+		.type = QMI_INDICATION,
+		.msg_id = QMI_IPA_INSTALL_UL_FIREWALL_RULES_IND_V01,
+		.ei = ipa3_install_fltr_rule_req_msg_data_v01_ei,
+		.decoded_size = sizeof(
+			struct ipa_configure_ul_firewall_rules_ind_msg_v01),
+		.fn = ipa3_q6_clnt_install_firewall_rules_ind_cb,
+	},
+};
+
+
+static void ipa3_qmi_service_init_worker(struct work_struct *work)
+{
+	int rc;
+
+	/* start the QMI msg cache */
+	ipa3_qmi_ctx = vzalloc(sizeof(*ipa3_qmi_ctx));
+	if (!ipa3_qmi_ctx) {
+		IPAWANERR("Failed to allocate ipa3_qmi_ctx\n");
+		return;
+	}
+
+	if (ipa3_is_apq()) {
+		/* Only start QMI-client */
+		IPAWANDBG("Only start IPA A7 QMI client\n");
+		goto qmi_client_start;
+	}
+
+	/* Initialize QMI-service*/
+	IPAWANDBG("IPA A7 QMI init OK :>>>>\n");
+
+	ipa3_qmi_ctx->modem_cfg_emb_pipe_flt =
+		ipa_get_modem_cfg_emb_pipe_flt();
+
+	ipa3_qmi_ctx->num_ipa_offload_connection = 0;
+	ipa3_svc_handle = vzalloc(sizeof(*ipa3_svc_handle));
+
+	if (!ipa3_svc_handle)
+		goto destroy_ipa_A7_svc_wq;
+
+	rc = qmi_handle_init(ipa3_svc_handle,
+		QMI_IPA_MAX_MSG_LEN,
+		&server_ops,
+		server_handlers);
+
+	if (rc < 0) {
+		IPAWANERR("Initializing ipa_a5 svc failed %d\n", rc);
+		goto destroy_qmi_handle;
+	}
+
+	rc = qmi_add_server(ipa3_svc_handle,
+		IPA_A5_SERVICE_SVC_ID,
+		IPA_A5_SVC_VERS,
+		IPA_A5_SERVICE_INS_ID);
+
+	if (rc < 0) {
+		IPAWANERR("Registering ipa_a5 svc failed %d\n",
+				rc);
+		goto deregister_qmi_srv;
+	}
+
+qmi_client_start:
+	/* Initialize QMI-client */
+	ipa_clnt_req_workqueue = create_singlethread_workqueue("clnt_req");
+	if (!ipa_clnt_req_workqueue) {
+		IPAWANERR("Creating clnt_req workqueue failed\n");
+		goto deregister_qmi_srv;
+	}
+
+	/* Create a Local client port for QMI communication */
+	ipa_q6_clnt = vzalloc(sizeof(*ipa_q6_clnt));
+
+	if (!ipa_q6_clnt)
+		goto destroy_clnt_req_wq;
+
+	rc = qmi_handle_init(ipa_q6_clnt,
+		QMI_IPA_MAX_MSG_LEN,
+		&client_ops,
+		client_handlers);
+
+	if (rc < 0) {
+		IPAWANERR("Creating clnt handle failed\n");
+		goto destroy_qmi_client_handle;
+	}
+
+	rc = qmi_add_lookup(ipa_q6_clnt,
+		IPA_Q6_SERVICE_SVC_ID,
+		IPA_Q6_SVC_VERS,
+		IPA_Q6_SERVICE_INS_ID);
+
+	if (rc < 0) {
+		IPAWANERR("Adding Q6 Svc failed\n");
+		goto deregister_qmi_client;
+	}
+
+	/* get Q6 service and start send modem-initial to Q6 */
+	IPAWANDBG("wait service available\n");
+	return;
+
+deregister_qmi_client:
+	qmi_handle_release(ipa_q6_clnt);
+destroy_qmi_client_handle:
+	vfree(ipa_q6_clnt);
+	ipa_q6_clnt = NULL;
+destroy_clnt_req_wq:
+	destroy_workqueue(ipa_clnt_req_workqueue);
+	ipa_clnt_req_workqueue = NULL;
+deregister_qmi_srv:
+	if (!ipa3_is_apq())
+		qmi_handle_release(ipa3_svc_handle);
+destroy_qmi_handle:
+	vfree(ipa3_qmi_ctx);
+destroy_ipa_A7_svc_wq:
+	if (!ipa3_is_apq()) {
+		vfree(ipa3_svc_handle);
+		ipa3_svc_handle = NULL;
+	}
+	ipa3_qmi_ctx = NULL;
+}
+
+int ipa3_qmi_service_init(uint32_t wan_platform_type)
+{
+	ipa_wan_platform = wan_platform_type;
+	ipa3_qmi_modem_init_fin = false;
+	ipa3_qmi_indication_fin = false;
+	ipa3_modem_init_cmplt = false;
+	send_qmi_init_q6 = true;
+	workqueues_stopped = false;
+
+	if (!ipa3_svc_handle) {
+		INIT_WORK(&ipa3_qmi_service_init_work,
+			ipa3_qmi_service_init_worker);
+		schedule_work(&ipa3_qmi_service_init_work);
+	}
+	return 0;
+}
+
+void ipa3_qmi_service_exit(void)
+{
+
+	workqueues_stopped = true;
+
+	/* qmi-service */
+	if (ipa3_svc_handle != NULL) {
+		qmi_handle_release(ipa3_svc_handle);
+		vfree(ipa3_svc_handle);
+		ipa3_svc_handle = NULL;
+	}
+
+	/* qmi-client */
+
+	/* Release client handle */
+	if (ipa_q6_clnt != NULL) {
+		qmi_handle_release(ipa_q6_clnt);
+		vfree(ipa_q6_clnt);
+		ipa_q6_clnt = NULL;
+		if (ipa_clnt_req_workqueue) {
+			destroy_workqueue(ipa_clnt_req_workqueue);
+			ipa_clnt_req_workqueue = NULL;
+		}
+	}
+
+	/* clean the QMI msg cache */
+	mutex_lock(&ipa3_qmi_lock);
+	if (ipa3_qmi_ctx != NULL) {
+		vfree(ipa3_qmi_ctx);
+		ipa3_qmi_ctx = NULL;
+	}
+	mutex_unlock(&ipa3_qmi_lock);
+
+	ipa3_qmi_modem_init_fin = false;
+	ipa3_qmi_indication_fin = false;
+	ipa3_modem_init_cmplt = false;
+	send_qmi_init_q6 = true;
+}
+
+void ipa3_qmi_stop_workqueues(void)
+{
+	IPAWANDBG("Stopping all QMI workqueues\n");
+
+	/* Stopping all workqueues so new work won't be scheduled */
+	workqueues_stopped = true;
+
+	/* Making sure that the current scheduled work won't be executed */
+	cancel_delayed_work(&ipa3_work_svc_arrive);
+	cancel_delayed_work(&ipa3_work_svc_exit);
+}
+
+/* voting for bus BW to ipa_rm*/
+int ipa3_vote_for_bus_bw(uint32_t *bw_mbps)
+{
+	int ret;
+
+	IPAWANDBG("Bus BW is %d\n", *bw_mbps);
+
+	if (bw_mbps == NULL) {
+		IPAWANERR("Bus BW is invalid\n");
+		return -EINVAL;
+	}
+
+	ret = ipa3_wwan_set_modem_perf_profile(*bw_mbps);
+	if (ret)
+		IPAWANERR("Failed to set perf profile to BW %u\n",
+			*bw_mbps);
+	else
+		IPAWANDBG("Succeeded to set perf profile to BW %u\n",
+			*bw_mbps);
+
+	return ret;
+}
+
+int ipa3_qmi_get_data_stats(struct ipa_get_data_stats_req_msg_v01 *req,
+			   struct ipa_get_data_stats_resp_msg_v01 *resp)
+{
+	struct ipa_msg_desc req_desc, resp_desc;
+	int rc;
+
+	req_desc.max_msg_len = QMI_IPA_GET_DATA_STATS_REQ_MAX_MSG_LEN_V01;
+	req_desc.msg_id = QMI_IPA_GET_DATA_STATS_REQ_V01;
+	req_desc.ei_array = ipa3_get_data_stats_req_msg_data_v01_ei;
+
+	resp_desc.max_msg_len = QMI_IPA_GET_DATA_STATS_RESP_MAX_MSG_LEN_V01;
+	resp_desc.msg_id = QMI_IPA_GET_DATA_STATS_RESP_V01;
+	resp_desc.ei_array = ipa3_get_data_stats_resp_msg_data_v01_ei;
+
+	IPAWANDBG_LOW("Sending QMI_IPA_GET_DATA_STATS_REQ_V01\n");
+
+	if (unlikely(!ipa_q6_clnt))
+		return -ETIMEDOUT;
+	rc = ipa3_qmi_send_req_wait(ipa_q6_clnt,
+		&req_desc, req,
+		&resp_desc, resp,
+		QMI_SEND_STATS_REQ_TIMEOUT_MS);
+
+	if (rc < 0) {
+		IPAWANERR("QMI send Req %d failed, rc= %d\n",
+			QMI_IPA_GET_DATA_STATS_REQ_V01,
+			rc);
+		return rc;
+	}
+
+	IPAWANDBG_LOW("QMI_IPA_GET_DATA_STATS_RESP_V01 received\n");
+
+	return ipa3_check_qmi_response(rc,
+		QMI_IPA_GET_DATA_STATS_REQ_V01, resp->resp.result,
+		resp->resp.error, "ipa_get_data_stats_resp_msg_v01");
+}
+
+int ipa3_qmi_get_network_stats(struct ipa_get_apn_data_stats_req_msg_v01 *req,
+			      struct ipa_get_apn_data_stats_resp_msg_v01 *resp)
+{
+	struct ipa_msg_desc req_desc, resp_desc;
+	int rc;
+
+	req_desc.max_msg_len = QMI_IPA_GET_APN_DATA_STATS_REQ_MAX_MSG_LEN_V01;
+	req_desc.msg_id = QMI_IPA_GET_APN_DATA_STATS_REQ_V01;
+	req_desc.ei_array = ipa3_get_apn_data_stats_req_msg_data_v01_ei;
+
+	resp_desc.max_msg_len = QMI_IPA_GET_APN_DATA_STATS_RESP_MAX_MSG_LEN_V01;
+	resp_desc.msg_id = QMI_IPA_GET_APN_DATA_STATS_RESP_V01;
+	resp_desc.ei_array = ipa3_get_apn_data_stats_resp_msg_data_v01_ei;
+
+	IPAWANDBG_LOW("Sending QMI_IPA_GET_APN_DATA_STATS_REQ_V01\n");
+
+	if (unlikely(!ipa_q6_clnt))
+		return -ETIMEDOUT;
+	rc = ipa3_qmi_send_req_wait(ipa_q6_clnt,
+		&req_desc, req,
+		&resp_desc, resp,
+		QMI_SEND_STATS_REQ_TIMEOUT_MS);
+
+	if (rc < 0) {
+		IPAWANERR("QMI send Req %d failed, rc= %d\n",
+			QMI_IPA_GET_APN_DATA_STATS_REQ_V01,
+			rc);
+		return rc;
+	}
+
+	IPAWANDBG_LOW("QMI_IPA_GET_APN_DATA_STATS_RESP_V01 received\n");
+
+	return ipa3_check_qmi_response(rc,
+		QMI_IPA_GET_APN_DATA_STATS_REQ_V01, resp->resp.result,
+		resp->resp.error, "ipa_get_apn_data_stats_req_msg_v01");
+}
+
+int ipa3_qmi_set_data_quota(struct ipa_set_data_usage_quota_req_msg_v01 *req)
+{
+	struct ipa_set_data_usage_quota_resp_msg_v01 resp;
+	struct ipa_msg_desc req_desc, resp_desc;
+	int rc;
+
+	memset(&resp, 0, sizeof(struct ipa_set_data_usage_quota_resp_msg_v01));
+
+	req_desc.max_msg_len = QMI_IPA_SET_DATA_USAGE_QUOTA_REQ_MAX_MSG_LEN_V01;
+	req_desc.msg_id = QMI_IPA_SET_DATA_USAGE_QUOTA_REQ_V01;
+	req_desc.ei_array = ipa3_set_data_usage_quota_req_msg_data_v01_ei;
+
+	resp_desc.max_msg_len =
+		QMI_IPA_SET_DATA_USAGE_QUOTA_RESP_MAX_MSG_LEN_V01;
+	resp_desc.msg_id = QMI_IPA_SET_DATA_USAGE_QUOTA_RESP_V01;
+	resp_desc.ei_array = ipa3_set_data_usage_quota_resp_msg_data_v01_ei;
+
+	IPAWANDBG_LOW("Sending QMI_IPA_SET_DATA_USAGE_QUOTA_REQ_V01\n");
+	if (unlikely(!ipa_q6_clnt))
+		return -ETIMEDOUT;
+	rc = ipa3_qmi_send_req_wait(ipa_q6_clnt,
+		&req_desc, req,
+		&resp_desc, &resp,
+		QMI_SEND_STATS_REQ_TIMEOUT_MS);
+
+	if (rc < 0) {
+		IPAWANERR("QMI send Req %d failed, rc= %d\n",
+			QMI_IPA_SET_DATA_USAGE_QUOTA_REQ_V01,
+			rc);
+		return rc;
+	}
+
+	IPAWANDBG_LOW("QMI_IPA_SET_DATA_USAGE_QUOTA_RESP_V01 received\n");
+
+	return ipa3_check_qmi_response(rc,
+		QMI_IPA_SET_DATA_USAGE_QUOTA_REQ_V01, resp.resp.result,
+		resp.resp.error, "ipa_set_data_usage_quota_req_msg_v01");
+}
+
+int ipa3_qmi_set_aggr_info(enum ipa_aggr_enum_type_v01 aggr_enum_type)
+{
+	struct ipa_mhi_prime_aggr_info_resp_msg_v01 resp;
+	struct ipa_msg_desc req_desc, resp_desc;
+	int rc;
+
+	IPAWANDBG("sending aggr_info_request\n");
+
+	/* replace to right qmap format */
+	aggr_req.aggr_info[1].aggr_type = aggr_enum_type;
+	aggr_req.aggr_info[2].aggr_type = aggr_enum_type;
+	aggr_req.aggr_info[3].aggr_type = aggr_enum_type;
+	aggr_req.aggr_info[4].aggr_type = aggr_enum_type;
+
+	memset(&resp, 0, sizeof(struct ipa_mhi_prime_aggr_info_resp_msg_v01));
+
+	req_desc.max_msg_len = IPA_MHI_PRIME_AGGR_INFO_REQ_MSG_V01_MAX_MSG_LEN;
+	req_desc.msg_id = QMI_IPA_MHI_PRIME_AGGR_INFO_REQ_V01;
+	req_desc.ei_array = ipa_mhi_prime_aggr_info_req_msg_v01_ei;
+
+	resp_desc.max_msg_len =
+		IPA_MHI_PRIME_AGGR_INFO_RESP_MSG_V01_MAX_MSG_LEN;
+	resp_desc.msg_id = QMI_IPA_MHI_PRIME_AGGR_INFO_RESP_V01;
+	resp_desc.ei_array = ipa_mhi_prime_aggr_info_resp_msg_v01_ei;
+
+	IPAWANDBG("Sending QMI_IPA_MHI_PRIME_AGGR_INFO_REQ_V01(%d)\n",
+		aggr_enum_type);
+	if (unlikely(!ipa_q6_clnt)) {
+		IPAWANERR(" ipa_q6_clnt not initialized\n");
+		return -ETIMEDOUT;
+	}
+	rc = ipa3_qmi_send_req_wait(ipa_q6_clnt,
+		&req_desc, &aggr_req,
+		&resp_desc, &resp,
+		QMI_SEND_STATS_REQ_TIMEOUT_MS);
+
+	if (rc < 0) {
+		IPAWANERR("QMI send Req %d failed, rc= %d\n",
+			QMI_IPA_SET_DATA_USAGE_QUOTA_REQ_V01,
+			rc);
+		return rc;
+	}
+
+	IPAWANDBG_LOW("QMI_IPA_MHI_PRIME_AGGR_INFO_RESP_V01 received\n");
+
+	return ipa3_check_qmi_response(rc,
+		QMI_IPA_SET_DATA_USAGE_QUOTA_REQ_V01, resp.resp.result,
+		resp.resp.error, "ipa_mhi_prime_aggr_info_req_msg_v01");
+}
+
+int ipa3_qmi_stop_data_qouta(void)
+{
+	struct ipa_stop_data_usage_quota_req_msg_v01 req;
+	struct ipa_stop_data_usage_quota_resp_msg_v01 resp;
+	struct ipa_msg_desc req_desc, resp_desc;
+	int rc;
+
+	memset(&req, 0, sizeof(struct ipa_stop_data_usage_quota_req_msg_v01));
+	memset(&resp, 0, sizeof(struct ipa_stop_data_usage_quota_resp_msg_v01));
+
+	req_desc.max_msg_len =
+		QMI_IPA_STOP_DATA_USAGE_QUOTA_REQ_MAX_MSG_LEN_V01;
+	req_desc.msg_id = QMI_IPA_STOP_DATA_USAGE_QUOTA_REQ_V01;
+	req_desc.ei_array = ipa3_stop_data_usage_quota_req_msg_data_v01_ei;
+
+	resp_desc.max_msg_len =
+		QMI_IPA_STOP_DATA_USAGE_QUOTA_RESP_MAX_MSG_LEN_V01;
+	resp_desc.msg_id = QMI_IPA_STOP_DATA_USAGE_QUOTA_RESP_V01;
+	resp_desc.ei_array = ipa3_stop_data_usage_quota_resp_msg_data_v01_ei;
+
+	IPAWANDBG_LOW("Sending QMI_IPA_STOP_DATA_USAGE_QUOTA_REQ_V01\n");
+	if (unlikely(!ipa_q6_clnt))
+		return -ETIMEDOUT;
+	rc = ipa3_qmi_send_req_wait(ipa_q6_clnt,
+		&req_desc, &req,
+		&resp_desc, &resp,
+		QMI_SEND_STATS_REQ_TIMEOUT_MS);
+
+	if (rc < 0) {
+		IPAWANERR("QMI send Req %d failed, rc= %d\n",
+			QMI_IPA_STOP_DATA_USAGE_QUOTA_REQ_V01,
+			rc);
+		return rc;
+	}
+
+	IPAWANDBG_LOW("QMI_IPA_STOP_DATA_USAGE_QUOTA_RESP_V01 received\n");
+
+	return ipa3_check_qmi_response(rc,
+		QMI_IPA_STOP_DATA_USAGE_QUOTA_REQ_V01, resp.resp.result,
+		resp.resp.error, "ipa_stop_data_usage_quota_req_msg_v01");
+}
+
+int ipa3_qmi_enable_per_client_stats(
+	struct ipa_enable_per_client_stats_req_msg_v01 *req,
+	struct ipa_enable_per_client_stats_resp_msg_v01 *resp)
+{
+	struct ipa_msg_desc req_desc, resp_desc;
+	int rc = 0;
+
+	req_desc.max_msg_len =
+		QMI_IPA_ENABLE_PER_CLIENT_STATS_REQ_MAX_MSG_LEN_V01;
+	req_desc.msg_id =
+		QMI_IPA_ENABLE_PER_CLIENT_STATS_REQ_V01;
+	req_desc.ei_array =
+		ipa3_enable_per_client_stats_req_msg_data_v01_ei;
+
+	resp_desc.max_msg_len =
+		QMI_IPA_ENABLE_PER_CLIENT_STATS_RESP_MAX_MSG_LEN_V01;
+	resp_desc.msg_id =
+		QMI_IPA_ENABLE_PER_CLIENT_STATS_RESP_V01;
+	resp_desc.ei_array =
+		ipa3_enable_per_client_stats_resp_msg_data_v01_ei;
+
+	IPAWANDBG("Sending QMI_IPA_ENABLE_PER_CLIENT_STATS_REQ_V01\n");
+
+	rc = ipa3_qmi_send_req_wait(ipa_q6_clnt,
+		&req_desc, req,
+		&resp_desc, resp,
+		QMI_SEND_STATS_REQ_TIMEOUT_MS);
+
+	if (rc < 0) {
+		IPAWANERR("send Req %d failed, rc= %d\n",
+			QMI_IPA_ENABLE_PER_CLIENT_STATS_REQ_V01,
+			rc);
+		return rc;
+	}
+
+	IPAWANDBG("QMI_IPA_ENABLE_PER_CLIENT_STATS_RESP_V01 received\n");
+
+	return ipa3_check_qmi_response(rc,
+		QMI_IPA_ENABLE_PER_CLIENT_STATS_REQ_V01, resp->resp.result,
+		resp->resp.error, "ipa3_qmi_enable_per_client_stats");
+}
+
+int ipa3_qmi_get_per_client_packet_stats(
+	struct ipa_get_stats_per_client_req_msg_v01 *req,
+	struct ipa_get_stats_per_client_resp_msg_v01 *resp)
+{
+	struct ipa_msg_desc req_desc, resp_desc;
+	int rc;
+
+	req_desc.max_msg_len = QMI_IPA_GET_STATS_PER_CLIENT_REQ_MAX_MSG_LEN_V01;
+	req_desc.msg_id = QMI_IPA_GET_STATS_PER_CLIENT_REQ_V01;
+	req_desc.ei_array = ipa3_get_stats_per_client_req_msg_data_v01_ei;
+
+	resp_desc.max_msg_len =
+		QMI_IPA_GET_STATS_PER_CLIENT_RESP_MAX_MSG_LEN_V01;
+	resp_desc.msg_id = QMI_IPA_GET_STATS_PER_CLIENT_RESP_V01;
+	resp_desc.ei_array = ipa3_get_stats_per_client_resp_msg_data_v01_ei;
+
+	IPAWANDBG("Sending QMI_IPA_GET_STATS_PER_CLIENT_REQ_V01\n");
+
+	rc = ipa3_qmi_send_req_wait(ipa_q6_clnt,
+		&req_desc, req,
+		&resp_desc, resp,
+		QMI_SEND_STATS_REQ_TIMEOUT_MS);
+
+	if (rc < 0) {
+		IPAWANERR("send Req %d failed, rc= %d\n",
+			QMI_IPA_GET_STATS_PER_CLIENT_REQ_V01,
+			rc);
+		return rc;
+	}
+
+	IPAWANDBG("QMI_IPA_GET_STATS_PER_CLIENT_RESP_V01 received\n");
+
+	return ipa3_check_qmi_response(rc,
+		QMI_IPA_GET_STATS_PER_CLIENT_REQ_V01, resp->resp.result,
+		resp->resp.error,
+		"struct ipa_get_stats_per_client_req_msg_v01");
+}
+
+int ipa3_qmi_send_mhi_ready_indication(
+	struct ipa_mhi_ready_indication_msg_v01 *req)
+{
+	IPAWANDBG("Sending QMI_IPA_MHI_READY_IND_V01\n");
+
+	if (unlikely(!ipa3_svc_handle))
+		return -ETIMEDOUT;
+
+	return qmi_send_indication(ipa3_svc_handle,
+		&ipa3_qmi_ctx->client_sq,
+		QMI_IPA_MHI_READY_IND_V01,
+		IPA_MHI_READY_INDICATION_MSG_V01_MAX_MSG_LEN,
+		ipa_mhi_ready_indication_msg_v01_ei,
+		req);
+}
+
+int ipa3_qmi_send_mhi_cleanup_request(struct ipa_mhi_cleanup_req_msg_v01 *req)
+{
+
+	struct ipa_msg_desc req_desc, resp_desc;
+	struct ipa_mhi_cleanup_resp_msg_v01 resp;
+	int rc;
+
+	memset(&resp, 0, sizeof(resp));
+
+	IPAWANDBG("Sending QMI_IPA_MHI_CLEANUP_REQ_V01\n");
+	if (unlikely(!ipa_q6_clnt))
+		return -ETIMEDOUT;
+
+	req_desc.max_msg_len = IPA_MHI_CLK_VOTE_REQ_MSG_V01_MAX_MSG_LEN;
+	req_desc.msg_id = QMI_IPA_MHI_CLEANUP_REQ_V01;
+	req_desc.ei_array = ipa_mhi_cleanup_req_msg_v01_ei;
+
+	resp_desc.max_msg_len = IPA_MHI_CLK_VOTE_RESP_MSG_V01_MAX_MSG_LEN;
+	resp_desc.msg_id = QMI_IPA_MHI_CLEANUP_RESP_V01;
+	resp_desc.ei_array = ipa_mhi_cleanup_resp_msg_v01_ei;
+
+	rc = ipa3_qmi_send_req_wait(ipa_q6_clnt,
+		&req_desc, req,
+		&resp_desc, &resp,
+		QMI_MHI_SEND_REQ_TIMEOUT_MS);
+
+	IPAWANDBG("QMI_IPA_MHI_CLEANUP_RESP_V01 received\n");
+
+	return ipa3_check_qmi_response(rc,
+		QMI_IPA_MHI_CLEANUP_REQ_V01, resp.resp.result,
+		resp.resp.error, "ipa_mhi_cleanup_req_msg");
+}
+
+int ipa3_qmi_send_rsc_pipe_indication(
+	struct ipa_endp_desc_indication_msg_v01 *req)
+{
+	IPAWANDBG("Sending QMI_IPA_ENDP_DESC_INDICATION_V01\n");
+
+	if (unlikely(!ipa3_svc_handle))
+		return -ETIMEDOUT;
+
+	return qmi_send_indication(ipa3_svc_handle,
+		&ipa3_qmi_ctx->client_sq,
+		QMI_IPA_ENDP_DESC_INDICATION_V01,
+		IPA_ENDP_DESC_INDICATION_MSG_V01_MAX_MSG_LEN,
+		ipa_endp_desc_indication_msg_v01_ei,
+		req);
+}
+
+void ipa3_qmi_init(void)
+{
+	mutex_init(&ipa3_qmi_lock);
+}
+
+void ipa3_qmi_cleanup(void)
+{
+	mutex_destroy(&ipa3_qmi_lock);
+}
+

+ 540 - 0
ipa/ipa_v3/ipa_qmi_service.h

@@ -0,0 +1,540 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2013-2019, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef IPA_QMI_SERVICE_H
+#define IPA_QMI_SERVICE_H
+
+#include <linux/ipa.h>
+#include <linux/ipa_qmi_service_v01.h>
+#include <uapi/linux/msm_rmnet.h>
+#include <linux/soc/qcom/qmi.h>
+#include "ipa_i.h"
+#include <linux/rmnet_ipa_fd_ioctl.h>
+
+/**
+ * name of the DL wwan default routing tables for v4 and v6
+ */
+#define IPA_A7_QMAP_HDR_NAME "ipa_qmap_hdr"
+#define IPA_DFLT_WAN_RT_TBL_NAME "ipa_dflt_wan_rt"
+#define MAX_NUM_Q6_RULE 35
+#define MAX_NUM_QMI_RULE_CACHE 10
+#define MAX_NUM_QMI_MPM_AGGR_CACHE 3
+#define DEV_NAME "ipa-wan"
+#define SUBSYS_LOCAL_MODEM "modem"
+#define SUBSYS_REMOTE_MODEM "esoc0"
+
+
+#define IPAWANDBG(fmt, args...) \
+	do { \
+		pr_debug(DEV_NAME " %s:%d " fmt, __func__,\
+				__LINE__, ## args); \
+		IPA_IPC_LOGGING(ipa_get_ipc_logbuf(), \
+				DEV_NAME " %s:%d " fmt, ## args); \
+		IPA_IPC_LOGGING(ipa_get_ipc_logbuf_low(), \
+				DEV_NAME " %s:%d " fmt, ## args); \
+	} while (0)
+
+
+#define IPAWANDBG_LOW(fmt, args...) \
+	do { \
+		pr_debug(DEV_NAME " %s:%d " fmt, __func__,\
+				__LINE__, ## args); \
+		IPA_IPC_LOGGING(ipa_get_ipc_logbuf_low(), \
+				DEV_NAME " %s:%d " fmt, ## args); \
+	} while (0)
+
+#define IPAWANERR(fmt, args...) \
+	do { \
+		pr_err(DEV_NAME " %s:%d " fmt, __func__,\
+				__LINE__, ## args); \
+		IPA_IPC_LOGGING(ipa_get_ipc_logbuf(), \
+				DEV_NAME " %s:%d " fmt, ## args); \
+		IPA_IPC_LOGGING(ipa_get_ipc_logbuf_low(), \
+				DEV_NAME " %s:%d " fmt, ## args); \
+	} while (0)
+
+#define IPAWANERR_RL(fmt, args...) \
+	do { \
+		pr_err_ratelimited_ipa(DEV_NAME " %s:%d " fmt, __func__,\
+				__LINE__, ## args); \
+		IPA_IPC_LOGGING(ipa_get_ipc_logbuf(), \
+				DEV_NAME " %s:%d " fmt, ## args); \
+		IPA_IPC_LOGGING(ipa_get_ipc_logbuf_low(), \
+				DEV_NAME " %s:%d " fmt, ## args); \
+	} while (0)
+
+#define IPAWANINFO(fmt, args...) \
+	do { \
+		pr_info(DEV_NAME " %s:%d " fmt, __func__,\
+				__LINE__, ## args); \
+		IPA_IPC_LOGGING(ipa_get_ipc_logbuf(), \
+				DEV_NAME " %s:%d " fmt, ## args); \
+		IPA_IPC_LOGGING(ipa_get_ipc_logbuf_low(), \
+				DEV_NAME " %s:%d " fmt, ## args); \
+	} while (0)
+
+extern struct ipa3_qmi_context *ipa3_qmi_ctx;
+
+struct ipa_offload_connection_val {
+	enum ipa_ip_type_enum_v01 ip_type;
+	bool valid;
+	uint32_t rule_id;
+	uint32_t  rule_hdl;
+};
+
+struct ipa3_qmi_context {
+	struct ipa_ioc_ext_intf_prop q6_ul_filter_rule[MAX_NUM_Q6_RULE];
+	u32 q6_ul_filter_rule_hdl[MAX_NUM_Q6_RULE];
+	int num_ipa_install_fltr_rule_req_msg;
+	struct ipa_install_fltr_rule_req_msg_v01
+		ipa_install_fltr_rule_req_msg_cache[MAX_NUM_QMI_RULE_CACHE];
+	int num_ipa_install_fltr_rule_req_ex_msg;
+	struct ipa_install_fltr_rule_req_ex_msg_v01
+		ipa_install_fltr_rule_req_ex_msg_cache[MAX_NUM_QMI_RULE_CACHE];
+	int num_ipa_fltr_installed_notif_req_msg;
+	struct ipa_fltr_installed_notif_req_msg_v01
+		ipa_fltr_installed_notif_req_msg_cache[MAX_NUM_QMI_RULE_CACHE];
+	int num_ipa_configure_ul_firewall_rules_req_msg;
+	struct ipa_configure_ul_firewall_rules_req_msg_v01
+		ipa_configure_ul_firewall_rules_req_msg_cache
+			[MAX_NUM_QMI_RULE_CACHE];
+	struct ipa_mhi_prime_aggr_info_req_msg_v01
+		ipa_mhi_prime_aggr_info_req_msg_cache
+			[MAX_NUM_QMI_MPM_AGGR_CACHE];
+	bool modem_cfg_emb_pipe_flt;
+	struct sockaddr_qrtr client_sq;
+	struct sockaddr_qrtr server_sq;
+	int num_ipa_offload_connection;
+	struct ipa_offload_connection_val
+		ipa_offload_cache[QMI_IPA_MAX_FILTERS_V01];
+	uint8_t ul_firewall_indices_list_valid;
+	uint32_t ul_firewall_indices_list_len;
+	uint32_t ul_firewall_indices_list[QMI_IPA_MAX_FILTERS_V01];
+};
+
+struct ipa3_rmnet_mux_val {
+	uint32_t  mux_id;
+	int8_t    vchannel_name[IFNAMSIZ];
+	bool mux_channel_set;
+	bool ul_flt_reg;
+	bool mux_hdr_set;
+	uint32_t  hdr_hdl;
+};
+
+extern struct qmi_elem_info
+	ipa3_init_modem_driver_req_msg_data_v01_ei[];
+extern struct qmi_elem_info
+	ipa3_init_modem_driver_resp_msg_data_v01_ei[];
+extern struct qmi_elem_info
+	ipa3_indication_reg_req_msg_data_v01_ei[];
+extern struct qmi_elem_info ipa3_indication_reg_resp_msg_data_v01_ei[];
+
+extern struct qmi_elem_info
+	ipa3_master_driver_init_complt_ind_msg_data_v01_ei[];
+extern struct qmi_elem_info ipa3_install_fltr_rule_req_msg_data_v01_ei[];
+extern struct qmi_elem_info ipa3_install_fltr_rule_resp_msg_data_v01_ei[];
+extern struct qmi_elem_info ipa3_fltr_installed_notif_req_msg_data_v01_ei[];
+
+extern struct qmi_elem_info
+	ipa3_fltr_installed_notif_resp_msg_data_v01_ei[];
+extern struct qmi_elem_info
+	ipa3_enable_force_clear_datapath_req_msg_data_v01_ei[];
+extern struct qmi_elem_info
+	ipa3_enable_force_clear_datapath_resp_msg_data_v01_ei[];
+extern struct qmi_elem_info
+	ipa3_disable_force_clear_datapath_req_msg_data_v01_ei[];
+extern struct qmi_elem_info
+	ipa3_disable_force_clear_datapath_resp_msg_data_v01_ei[];
+
+extern struct qmi_elem_info ipa3_config_req_msg_data_v01_ei[];
+extern struct qmi_elem_info ipa3_config_resp_msg_data_v01_ei[];
+extern struct qmi_elem_info ipa3_get_data_stats_req_msg_data_v01_ei[];
+extern struct qmi_elem_info ipa3_get_data_stats_resp_msg_data_v01_ei[];
+extern struct qmi_elem_info ipa3_get_apn_data_stats_req_msg_data_v01_ei[];
+extern struct qmi_elem_info ipa3_get_apn_data_stats_resp_msg_data_v01_ei[];
+extern struct qmi_elem_info ipa3_set_data_usage_quota_req_msg_data_v01_ei[];
+
+extern struct qmi_elem_info
+	ipa3_set_data_usage_quota_resp_msg_data_v01_ei[];
+extern struct qmi_elem_info
+	ipa3_data_usage_quota_reached_ind_msg_data_v01_ei[];
+extern struct qmi_elem_info
+	ipa3_stop_data_usage_quota_req_msg_data_v01_ei[];
+extern struct qmi_elem_info
+	ipa3_stop_data_usage_quota_resp_msg_data_v01_ei[];
+extern struct qmi_elem_info
+	ipa3_init_modem_driver_cmplt_req_msg_data_v01_ei[];
+extern struct qmi_elem_info
+	ipa3_init_modem_driver_cmplt_resp_msg_data_v01_ei[];
+extern struct qmi_elem_info
+	ipa3_install_fltr_rule_req_ex_msg_data_v01_ei[];
+extern struct qmi_elem_info
+	ipa3_install_fltr_rule_resp_ex_msg_data_v01_ei[];
+extern struct qmi_elem_info
+	ipa3_ul_firewall_rule_type_data_v01_ei[];
+extern struct qmi_elem_info
+	ipa3_ul_firewall_config_result_type_data_v01_ei[];
+extern struct
+	qmi_elem_info ipa3_per_client_stats_info_type_data_v01_ei[];
+extern struct qmi_elem_info
+	ipa3_enable_per_client_stats_req_msg_data_v01_ei[];
+extern struct qmi_elem_info
+	ipa3_enable_per_client_stats_resp_msg_data_v01_ei[];
+extern struct qmi_elem_info
+	ipa3_get_stats_per_client_req_msg_data_v01_ei[];
+
+extern struct qmi_elem_info
+	ipa3_get_stats_per_client_resp_msg_data_v01_ei[];
+extern struct qmi_elem_info
+	ipa3_configure_ul_firewall_rules_req_msg_data_v01_ei[];
+extern struct qmi_elem_info
+	ipa3_configure_ul_firewall_rules_resp_msg_data_v01_ei[];
+extern struct qmi_elem_info
+	ipa3_configure_ul_firewall_rules_ind_msg_data_v01_ei[];
+
+extern struct qmi_elem_info ipa_mhi_ready_indication_msg_v01_ei[];
+extern struct qmi_elem_info ipa_mhi_mem_addr_info_type_v01_ei[];
+extern struct qmi_elem_info ipa_mhi_tr_info_type_v01_ei[];
+extern struct qmi_elem_info ipa_mhi_er_info_type_v01_ei[];
+extern struct qmi_elem_info ipa_mhi_alloc_channel_req_msg_v01_ei[];
+extern struct qmi_elem_info ipa_mhi_ch_alloc_resp_type_v01_ei[];
+extern struct qmi_elem_info ipa_mhi_alloc_channel_resp_msg_v01_ei[];
+extern struct qmi_elem_info ipa_mhi_clk_vote_req_msg_v01_ei[];
+extern struct qmi_elem_info ipa_mhi_clk_vote_resp_msg_v01_ei[];
+extern struct qmi_elem_info ipa_mhi_cleanup_req_msg_v01_ei[];
+extern struct qmi_elem_info ipa_mhi_cleanup_resp_msg_v01_ei[];
+
+extern struct qmi_elem_info ipa_endp_desc_indication_msg_v01_ei[];
+extern struct qmi_elem_info ipa_mhi_prime_aggr_info_req_msg_v01_ei[];
+extern struct qmi_elem_info ipa_mhi_prime_aggr_info_resp_msg_v01_ei[];
+extern struct qmi_elem_info ipa_add_offload_connection_req_msg_v01_ei[];
+extern struct qmi_elem_info ipa_add_offload_connection_resp_msg_v01_ei[];
+extern struct qmi_elem_info ipa_remove_offload_connection_req_msg_v01_ei[];
+extern struct qmi_elem_info ipa_remove_offload_connection_resp_msg_v01_ei[];
+
+/**
+ * struct ipa3_rmnet_context - IPA rmnet context
+ * @ipa_rmnet_ssr: support modem SSR
+ * @polling_interval: Requested interval for polling tethered statistics
+ * @metered_mux_id: The mux ID on which quota has been set
+ */
+struct ipa3_rmnet_context {
+	bool ipa_rmnet_ssr;
+	u64 polling_interval;
+	u32 metered_mux_id;
+};
+
+extern struct ipa3_rmnet_context ipa3_rmnet_ctx;
+
+#if IS_ENABLED(CONFIG_RMNET_IPA3)
+
+int ipa3_qmi_service_init(uint32_t wan_platform_type);
+
+void ipa3_qmi_service_exit(void);
+
+/* sending filter-install-request to modem*/
+int ipa3_qmi_filter_request_send(
+	struct ipa_install_fltr_rule_req_msg_v01 *req);
+
+int ipa3_qmi_filter_request_ex_send(
+	struct ipa_install_fltr_rule_req_ex_msg_v01 *req);
+
+int ipa3_qmi_add_offload_request_send(
+	struct ipa_add_offload_connection_req_msg_v01 *req);
+
+int ipa3_qmi_rmv_offload_request_send(
+	struct ipa_remove_offload_connection_req_msg_v01 *req);
+
+int ipa3_qmi_ul_filter_request_send(
+	struct ipa_configure_ul_firewall_rules_req_msg_v01 *req);
+
+/* sending filter-installed-notify-request to modem*/
+int ipa3_qmi_filter_notify_send(struct ipa_fltr_installed_notif_req_msg_v01
+		*req);
+
+/* voting for bus BW to ipa_rm*/
+int ipa3_vote_for_bus_bw(uint32_t *bw_mbps);
+
+int ipa3_qmi_enable_force_clear_datapath_send(
+	struct ipa_enable_force_clear_datapath_req_msg_v01 *req);
+
+int ipa3_qmi_disable_force_clear_datapath_send(
+	struct ipa_disable_force_clear_datapath_req_msg_v01 *req);
+
+int ipa3_copy_ul_filter_rule_to_ipa(struct ipa_install_fltr_rule_req_msg_v01
+	*rule_req);
+
+int ipa3_wan_ioctl_init(void);
+
+void ipa3_wan_ioctl_stop_qmi_messages(void);
+
+void ipa3_wan_ioctl_enable_qmi_messages(void);
+
+void ipa3_wan_ioctl_deinit(void);
+
+void ipa3_qmi_stop_workqueues(void);
+
+int rmnet_ipa3_poll_tethering_stats(struct wan_ioctl_poll_tethering_stats
+		*data);
+
+int rmnet_ipa3_set_data_quota(struct wan_ioctl_set_data_quota *data);
+
+void ipa3_broadcast_quota_reach_ind(uint32_t mux_id,
+	enum ipa_upstream_type upstream_type);
+
+int rmnet_ipa3_set_tether_client_pipe(struct wan_ioctl_set_tether_client_pipe
+	*data);
+
+int rmnet_ipa3_query_tethering_stats(struct wan_ioctl_query_tether_stats *data,
+	bool reset);
+
+int rmnet_ipa3_query_tethering_stats_all(
+	struct wan_ioctl_query_tether_stats_all *data);
+
+int rmnet_ipa3_reset_tethering_stats(struct wan_ioctl_reset_tether_stats *data);
+int rmnet_ipa3_set_lan_client_info(struct wan_ioctl_lan_client_info *data);
+
+int rmnet_ipa3_clear_lan_client_info(struct wan_ioctl_lan_client_info *data);
+
+int rmnet_ipa3_send_lan_client_msg(struct wan_ioctl_send_lan_client_msg *data);
+
+int rmnet_ipa3_enable_per_client_stats(bool *data);
+
+int rmnet_ipa3_query_per_client_stats(
+	struct wan_ioctl_query_per_client_stats *data);
+
+int ipa3_qmi_get_data_stats(struct ipa_get_data_stats_req_msg_v01 *req,
+	struct ipa_get_data_stats_resp_msg_v01 *resp);
+
+int ipa3_qmi_get_network_stats(struct ipa_get_apn_data_stats_req_msg_v01 *req,
+	struct ipa_get_apn_data_stats_resp_msg_v01 *resp);
+
+int ipa3_qmi_set_data_quota(struct ipa_set_data_usage_quota_req_msg_v01 *req);
+
+int ipa3_qmi_set_aggr_info(
+	enum ipa_aggr_enum_type_v01 aggr_enum_type);
+
+int ipa3_qmi_stop_data_qouta(void);
+
+void ipa3_q6_handshake_complete(bool ssr_bootup);
+
+int ipa3_wwan_set_modem_perf_profile(int throughput);
+
+int ipa3_wwan_set_modem_state(struct wan_ioctl_notify_wan_state *state);
+int ipa3_qmi_enable_per_client_stats(
+	struct ipa_enable_per_client_stats_req_msg_v01 *req,
+	struct ipa_enable_per_client_stats_resp_msg_v01 *resp);
+
+int ipa3_qmi_get_per_client_packet_stats(
+	struct ipa_get_stats_per_client_req_msg_v01 *req,
+	struct ipa_get_stats_per_client_resp_msg_v01 *resp);
+
+int ipa3_qmi_send_mhi_ready_indication(
+	struct ipa_mhi_ready_indication_msg_v01 *req);
+
+int ipa3_qmi_send_rsc_pipe_indication(
+	struct ipa_endp_desc_indication_msg_v01 *req);
+
+int ipa3_qmi_send_mhi_cleanup_request(struct ipa_mhi_cleanup_req_msg_v01 *req);
+
+void ipa3_qmi_init(void);
+
+void ipa3_qmi_cleanup(void);
+
+int ipa3_wwan_init(void);
+
+void ipa3_wwan_cleanup(void);
+
+#else /* IS_ENABLED(CONFIG_RMNET_IPA3) */
+
+static inline int ipa3_qmi_service_init(uint32_t wan_platform_type)
+{
+	return -EPERM;
+}
+
+static inline void ipa3_qmi_service_exit(void) { }
+
+/* sending filter-install-request to modem*/
+static inline int ipa3_qmi_filter_request_send(
+	struct ipa_install_fltr_rule_req_msg_v01 *req)
+{
+	return -EPERM;
+}
+
+static inline int ipa3_qmi_add_offload_request_send(
+	struct ipa_add_offload_connection_req_msg_v01 *req)
+{
+	return -EPERM;
+}
+
+static inline int ipa3_qmi_rmv_offload_request_send(
+	struct ipa_remove_offload_connection_req_msg_v01 *req)
+{
+	return -EPERM;
+}
+
+static inline int ipa3_qmi_ul_filter_request_send(
+	struct ipa_configure_ul_firewall_rules_req_msg_v01 *req)
+{
+	return -EPERM;
+}
+
+static inline int ipa3_qmi_filter_request_ex_send(
+	struct ipa_install_fltr_rule_req_ex_msg_v01 *req)
+{
+	return -EPERM;
+}
+
+/* sending filter-installed-notify-request to modem*/
+static inline int ipa3_qmi_filter_notify_send(
+	struct ipa_fltr_installed_notif_req_msg_v01 *req)
+{
+	return -EPERM;
+}
+
+static inline int ipa3_qmi_enable_force_clear_datapath_send(
+	struct ipa_enable_force_clear_datapath_req_msg_v01 *req)
+{
+	return -EPERM;
+}
+
+static inline int ipa3_qmi_disable_force_clear_datapath_send(
+	struct ipa_disable_force_clear_datapath_req_msg_v01 *req)
+{
+	return -EPERM;
+}
+
+static inline int ipa3_copy_ul_filter_rule_to_ipa(
+	struct ipa_install_fltr_rule_req_msg_v01 *rule_req)
+{
+	return -EPERM;
+}
+
+static inline int ipa3_wan_ioctl_init(void)
+{
+	return -EPERM;
+}
+
+static inline void ipa3_wan_ioctl_stop_qmi_messages(void) { }
+
+static inline void ipa3_wan_ioctl_enable_qmi_messages(void) { }
+
+static inline void ipa3_wan_ioctl_deinit(void) { }
+
+static inline void ipa3_qmi_stop_workqueues(void) { }
+
+static inline int ipa3_vote_for_bus_bw(uint32_t *bw_mbps)
+{
+	return -EPERM;
+}
+
+static inline int rmnet_ipa3_poll_tethering_stats(
+	struct wan_ioctl_poll_tethering_stats *data)
+{
+	return -EPERM;
+}
+
+static inline int rmnet_ipa3_set_data_quota(
+	struct wan_ioctl_set_data_quota *data)
+{
+	return -EPERM;
+}
+
+static inline void ipa3_broadcast_quota_reach_ind(uint32_t mux_id,
+	enum ipa_upstream_type upstream_type) { }
+
+static inline int ipa3_qmi_get_data_stats(
+	struct ipa_get_data_stats_req_msg_v01 *req,
+	struct ipa_get_data_stats_resp_msg_v01 *resp)
+{
+	return -EPERM;
+}
+
+static inline int ipa3_qmi_get_network_stats(
+	struct ipa_get_apn_data_stats_req_msg_v01 *req,
+	struct ipa_get_apn_data_stats_resp_msg_v01 *resp)
+{
+	return -EPERM;
+}
+
+static inline int ipa3_qmi_set_data_quota(
+	struct ipa_set_data_usage_quota_req_msg_v01 *req)
+{
+	return -EPERM;
+}
+
+static inline int ipa3_qmi_stop_data_qouta(void)
+{
+	return -EPERM;
+}
+
+static inline void ipa3_q6_handshake_complete(bool ssr_bootup) { }
+
+static inline int ipa3_qmi_send_mhi_ready_indication(
+	struct ipa_mhi_ready_indication_msg_v01 *req)
+{
+	return -EPERM;
+}
+
+static inline int ipa3_qmi_send_rsc_pipe_indication(
+	struct ipa_endp_desc_indication_msg_v01 *req)
+{
+	return -EPERM;
+}
+
+static inline int ipa3_qmi_send_mhi_cleanup_request(
+	struct ipa_mhi_cleanup_req_msg_v01 *req)
+{
+	return -EPERM;
+}
+
+static inline int ipa3_wwan_set_modem_perf_profile(
+	int throughput)
+{
+	return -EPERM;
+}
+static inline int ipa3_qmi_enable_per_client_stats(
+	struct ipa_enable_per_client_stats_req_msg_v01 *req,
+	struct ipa_enable_per_client_stats_resp_msg_v01 *resp)
+{
+	return -EPERM;
+}
+
+static inline int ipa3_qmi_get_per_client_packet_stats(
+	struct ipa_get_stats_per_client_req_msg_v01 *req,
+	struct ipa_get_stats_per_client_resp_msg_v01 *resp)
+{
+	return -EPERM;
+}
+
+static inline int ipa3_qmi_set_aggr_info(
+	enum ipa_aggr_enum_type_v01 aggr_enum_type)
+{
+	return -EPERM;
+}
+
+static inline void ipa3_qmi_init(void)
+{
+
+}
+
+static inline void ipa3_qmi_cleanup(void)
+{
+
+}
+
+static inline int ipa3_wwan_init(void)
+{
+	return -EPERM;
+}
+
+static inline void ipa3_wwan_cleanup(void)
+{
+
+}
+
+#endif /* IS_ENABLED(CONFIG_RMNET_IPA3) */
+
+#endif /* IPA_QMI_SERVICE_H */

+ 5110 - 0
ipa/ipa_v3/ipa_qmi_service_v01.c

@@ -0,0 +1,5110 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2013-2019, The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/ipa_qmi_service_v01.h>
+
+#include <linux/soc/qcom/qmi.h>
+
+#include "ipa_qmi_service.h"
+
+/* Type Definitions  */
+static struct qmi_elem_info ipa3_hdr_tbl_info_type_data_v01_ei[] = {
+	{
+		.data_type	= QMI_UNSIGNED_4_BYTE,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint32_t),
+		.array_type	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+		.offset		= offsetof(struct ipa_hdr_tbl_info_type_v01,
+					modem_offset_start),
+	},
+	{
+		.data_type	= QMI_UNSIGNED_4_BYTE,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint32_t),
+		.array_type	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+		.offset		= offsetof(struct ipa_hdr_tbl_info_type_v01,
+					modem_offset_end),
+	},
+	{
+		.data_type	= QMI_EOTI,
+		.array_type	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+	},
+};
+
+static struct qmi_elem_info ipa3_route_tbl_info_type_data_v01_ei[] = {
+	{
+		.data_type	= QMI_UNSIGNED_4_BYTE,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint32_t),
+		.array_type	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+		.offset		= offsetof(struct ipa_route_tbl_info_type_v01,
+					route_tbl_start_addr),
+	},
+	{
+		.data_type	= QMI_UNSIGNED_4_BYTE,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint32_t),
+		.array_type	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+		.offset		= offsetof(struct ipa_route_tbl_info_type_v01,
+					num_indices),
+	},
+	{
+		.data_type	= QMI_EOTI,
+		.array_type	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+	},
+};
+
+static struct qmi_elem_info ipa3_modem_mem_info_type_data_v01_ei[] = {
+	{
+		.data_type	= QMI_UNSIGNED_4_BYTE,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint32_t),
+		.array_type	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+		.offset		= offsetof(struct ipa_modem_mem_info_type_v01,
+					block_start_addr),
+	},
+	{
+		.data_type	= QMI_UNSIGNED_4_BYTE,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint32_t),
+		.array_type	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+		.offset		= offsetof(struct ipa_modem_mem_info_type_v01,
+					size),
+	},
+	{
+		.data_type	= QMI_EOTI,
+		.array_type	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+	},
+};
+
+static struct qmi_elem_info ipa3_hdr_proc_ctx_tbl_info_type_data_v01_ei[] = {
+	{
+		.data_type	= QMI_UNSIGNED_4_BYTE,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint32_t),
+		.array_type	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+		.offset		= offsetof(
+			struct ipa_hdr_proc_ctx_tbl_info_type_v01,
+			modem_offset_start),
+	},
+	{
+		.data_type	= QMI_UNSIGNED_4_BYTE,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint32_t),
+		.array_type	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+		.offset		= offsetof(
+			struct ipa_hdr_proc_ctx_tbl_info_type_v01,
+			modem_offset_end),
+	},
+	{
+		.data_type	= QMI_EOTI,
+		.array_type	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+	},
+};
+
+static struct qmi_elem_info ipa3_zip_tbl_info_type_data_v01_ei[] = {
+	{
+		.data_type	= QMI_UNSIGNED_4_BYTE,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint32_t),
+		.array_type	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+		.offset		= offsetof(struct ipa_zip_tbl_info_type_v01,
+					modem_offset_start),
+	},
+	{
+		.data_type	= QMI_UNSIGNED_4_BYTE,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint32_t),
+		.array_type	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+		.offset		= offsetof(struct ipa_zip_tbl_info_type_v01,
+					modem_offset_end),
+	},
+	{
+		.data_type	= QMI_EOTI,
+		.array_type	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+	},
+};
+
+static struct qmi_elem_info ipa3_ipfltr_range_eq_16_type_data_v01_ei[] = {
+	{
+		.data_type	= QMI_UNSIGNED_1_BYTE,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint8_t),
+		.array_type	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+		.offset		= offsetof(
+			struct ipa_ipfltr_range_eq_16_type_v01,
+			offset),
+	},
+	{
+		.data_type	= QMI_UNSIGNED_2_BYTE,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint16_t),
+		.array_type	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+		.offset		= offsetof(
+			struct ipa_ipfltr_range_eq_16_type_v01,
+			range_low),
+	},
+	{
+		.data_type	= QMI_UNSIGNED_2_BYTE,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint16_t),
+		.array_type	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+		.offset		= offsetof(
+			struct ipa_ipfltr_range_eq_16_type_v01,
+			range_high),
+	},
+	{
+		.data_type	= QMI_EOTI,
+		.array_type	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+	},
+};
+
+static struct qmi_elem_info ipa3_ipfltr_mask_eq_32_type_data_v01_ei[] = {
+	{
+		.data_type	= QMI_UNSIGNED_1_BYTE,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint8_t),
+		.array_type	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+		.offset		= offsetof(
+				struct ipa_ipfltr_mask_eq_32_type_v01,
+				offset),
+	},
+	{
+		.data_type	= QMI_UNSIGNED_4_BYTE,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint32_t),
+		.array_type	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+		.offset		= offsetof(
+				struct ipa_ipfltr_mask_eq_32_type_v01,
+				mask),
+	},
+	{
+		.data_type	= QMI_UNSIGNED_4_BYTE,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint32_t),
+		.array_type	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+		.offset		= offsetof(
+			struct ipa_ipfltr_mask_eq_32_type_v01,
+			value),
+	},
+	{
+		.data_type	= QMI_EOTI,
+		.array_type	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+	},
+};
+
+static struct qmi_elem_info ipa3_ipfltr_eq_16_type_data_v01_ei[] = {
+	{
+		.data_type	= QMI_UNSIGNED_1_BYTE,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint8_t),
+		.array_type	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+		.offset		= offsetof(
+			struct ipa_ipfltr_eq_16_type_v01,
+			offset),
+	},
+	{
+		.data_type	= QMI_UNSIGNED_2_BYTE,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint16_t),
+		.array_type	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+		.offset		= offsetof(struct ipa_ipfltr_eq_16_type_v01,
+					value),
+	},
+	{
+		.data_type	= QMI_EOTI,
+		.array_type	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+	},
+};
+
+static struct qmi_elem_info ipa3_ipfltr_eq_32_type_data_v01_ei[] = {
+	{
+		.data_type	= QMI_UNSIGNED_1_BYTE,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint8_t),
+		.array_type	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+		.offset		= offsetof(struct ipa_ipfltr_eq_32_type_v01,
+					offset),
+	},
+	{
+		.data_type	= QMI_UNSIGNED_4_BYTE,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint32_t),
+		.array_type	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+		.offset		= offsetof(struct ipa_ipfltr_eq_32_type_v01,
+					value),
+	},
+	{
+		.data_type	= QMI_EOTI,
+		.array_type	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+	},
+};
+
+static struct qmi_elem_info ipa3_ipfltr_mask_eq_128_type_data_v01_ei[] = {
+	{
+		.data_type	= QMI_UNSIGNED_1_BYTE,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint8_t),
+		.array_type	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+		.offset		= offsetof(
+			struct ipa_ipfltr_mask_eq_128_type_v01,
+			offset),
+	},
+	{
+		.data_type	= QMI_UNSIGNED_1_BYTE,
+		.elem_len	= 16,
+		.elem_size	= sizeof(uint8_t),
+		.array_type	= STATIC_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+		.offset		= offsetof(
+			struct ipa_ipfltr_mask_eq_128_type_v01,
+			mask),
+	},
+	{
+		.data_type	= QMI_UNSIGNED_1_BYTE,
+		.elem_len	= 16,
+		.elem_size	= sizeof(uint8_t),
+		.array_type	= STATIC_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+		.offset		= offsetof(
+			struct ipa_ipfltr_mask_eq_128_type_v01,
+			value),
+	},
+	{
+		.data_type	= QMI_EOTI,
+		.array_type	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+	},
+};
+
+static struct qmi_elem_info ipa3_filter_rule_type_data_v01_ei[] = {
+	{
+		.data_type	= QMI_UNSIGNED_2_BYTE,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint16_t),
+		.array_type	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+		.offset		= offsetof(
+			struct ipa_filter_rule_type_v01,
+			rule_eq_bitmap),
+	},
+	{
+		.data_type	= QMI_UNSIGNED_1_BYTE,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint8_t),
+		.array_type	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+		.offset		= offsetof(
+			struct ipa_filter_rule_type_v01,
+			tos_eq_present),
+	},
+	{
+		.data_type	= QMI_UNSIGNED_1_BYTE,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint8_t),
+		.array_type	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+		.offset		= offsetof(struct ipa_filter_rule_type_v01,
+					tos_eq),
+	},
+	{
+		.data_type	= QMI_UNSIGNED_1_BYTE,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint8_t),
+		.array_type	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+		.offset		= offsetof(struct ipa_filter_rule_type_v01,
+					protocol_eq_present),
+	},
+	{
+		.data_type	= QMI_UNSIGNED_1_BYTE,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint8_t),
+		.array_type	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+		.offset		= offsetof(struct ipa_filter_rule_type_v01,
+					protocol_eq),
+	},
+	{
+		.data_type	= QMI_UNSIGNED_1_BYTE,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint8_t),
+		.array_type	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+		.offset		= offsetof(struct ipa_filter_rule_type_v01,
+					num_ihl_offset_range_16),
+	},
+	{
+		.data_type	= QMI_STRUCT,
+		.elem_len	= QMI_IPA_IPFLTR_NUM_IHL_RANGE_16_EQNS_V01,
+		.elem_size	= sizeof(
+			struct ipa_ipfltr_range_eq_16_type_v01),
+		.array_type	= STATIC_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+		.offset		= offsetof(struct ipa_filter_rule_type_v01,
+					ihl_offset_range_16),
+		.ei_array	= ipa3_ipfltr_range_eq_16_type_data_v01_ei,
+	},
+	{
+		.data_type	= QMI_UNSIGNED_1_BYTE,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint8_t),
+		.array_type	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+		.offset		= offsetof(struct ipa_filter_rule_type_v01,
+					num_offset_meq_32),
+	},
+	{
+		.data_type	= QMI_STRUCT,
+		.elem_len	= QMI_IPA_IPFLTR_NUM_MEQ_32_EQNS_V01,
+		.elem_size	= sizeof(struct ipa_ipfltr_mask_eq_32_type_v01),
+		.array_type	= STATIC_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+		.offset		= offsetof(struct ipa_filter_rule_type_v01,
+					offset_meq_32),
+		.ei_array	= ipa3_ipfltr_mask_eq_32_type_data_v01_ei,
+	},
+	{
+		.data_type	= QMI_UNSIGNED_1_BYTE,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint8_t),
+		.array_type	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+		.offset		= offsetof(struct ipa_filter_rule_type_v01,
+					tc_eq_present),
+	},
+	{
+		.data_type	= QMI_UNSIGNED_1_BYTE,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint8_t),
+		.array_type	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+		.offset		= offsetof(struct ipa_filter_rule_type_v01,
+					tc_eq),
+	},
+	{
+		.data_type	= QMI_UNSIGNED_1_BYTE,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint8_t),
+		.array_type	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+		.offset		= offsetof(struct ipa_filter_rule_type_v01,
+					flow_eq_present),
+	},
+	{
+		.data_type	= QMI_UNSIGNED_4_BYTE,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint32_t),
+		.array_type	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+		.offset		= offsetof(struct ipa_filter_rule_type_v01,
+					flow_eq),
+	},
+	{
+		.data_type	= QMI_UNSIGNED_1_BYTE,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint8_t),
+		.array_type	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+		.offset		= offsetof(struct ipa_filter_rule_type_v01,
+					ihl_offset_eq_16_present),
+	},
+	{
+		.data_type	= QMI_STRUCT,
+		.elem_len	= 1,
+		.elem_size	= sizeof(struct ipa_ipfltr_eq_16_type_v01),
+		.array_type	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+		.offset		= offsetof(struct ipa_filter_rule_type_v01,
+					ihl_offset_eq_16),
+		.ei_array	= ipa3_ipfltr_eq_16_type_data_v01_ei,
+	},
+	{
+		.data_type	= QMI_UNSIGNED_1_BYTE,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint8_t),
+		.array_type	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+		.offset		= offsetof(struct ipa_filter_rule_type_v01,
+					ihl_offset_eq_32_present),
+	},
+	{
+		.data_type	= QMI_STRUCT,
+		.elem_len	= 1,
+		.elem_size	= sizeof(struct ipa_ipfltr_eq_32_type_v01),
+		.array_type	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+		.offset		= offsetof(struct ipa_filter_rule_type_v01,
+					ihl_offset_eq_32),
+		.ei_array	= ipa3_ipfltr_eq_32_type_data_v01_ei,
+	},
+	{
+		.data_type	= QMI_UNSIGNED_1_BYTE,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint8_t),
+		.array_type	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+		.offset		= offsetof(struct ipa_filter_rule_type_v01,
+					num_ihl_offset_meq_32),
+	},
+	{
+		.data_type	= QMI_STRUCT,
+		.elem_len	= QMI_IPA_IPFLTR_NUM_IHL_MEQ_32_EQNS_V01,
+		.elem_size	= sizeof(struct ipa_ipfltr_mask_eq_32_type_v01),
+		.array_type	= STATIC_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+		.offset		= offsetof(struct ipa_filter_rule_type_v01,
+					ihl_offset_meq_32),
+		.ei_array	= ipa3_ipfltr_mask_eq_32_type_data_v01_ei,
+	},
+	{
+		.data_type	= QMI_UNSIGNED_1_BYTE,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint8_t),
+		.array_type	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+		.offset		= offsetof(struct ipa_filter_rule_type_v01,
+					num_offset_meq_128),
+	},
+	{
+		.data_type	= QMI_STRUCT,
+		.elem_len	=
+			QMI_IPA_IPFLTR_NUM_MEQ_128_EQNS_V01,
+		.elem_size	= sizeof(
+			struct ipa_ipfltr_mask_eq_128_type_v01),
+		.array_type	= STATIC_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+		.offset		= offsetof(
+			struct ipa_filter_rule_type_v01,
+			offset_meq_128),
+		.ei_array	= ipa3_ipfltr_mask_eq_128_type_data_v01_ei,
+	},
+	{
+		.data_type	= QMI_UNSIGNED_1_BYTE,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint8_t),
+		.array_type	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+		.offset		= offsetof(struct ipa_filter_rule_type_v01,
+					metadata_meq32_present),
+	},
+	{
+		.data_type	= QMI_STRUCT,
+		.elem_len	= 1,
+		.elem_size	= sizeof(struct ipa_ipfltr_mask_eq_32_type_v01),
+		.array_type	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+		.offset		= offsetof(struct ipa_filter_rule_type_v01,
+					metadata_meq32),
+		.ei_array	= ipa3_ipfltr_mask_eq_32_type_data_v01_ei,
+	},
+	{
+		.data_type	= QMI_UNSIGNED_1_BYTE,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint8_t),
+		.array_type	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+		.offset		= offsetof(struct ipa_filter_rule_type_v01,
+					ipv4_frag_eq_present),
+	},
+	{
+		.data_type	= QMI_EOTI,
+		.array_type	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+	},
+};
+
+static struct qmi_elem_info ipa_filter_spec_type_data_v01_ei[] = {
+	{
+		.data_type	= QMI_UNSIGNED_4_BYTE,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint32_t),
+		.array_type	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+		.offset		= offsetof(struct ipa_filter_spec_type_v01,
+					filter_spec_identifier),
+	},
+	{
+		.data_type	= QMI_SIGNED_4_BYTE_ENUM,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint32_t),
+		.array_type	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+		.offset		= offsetof(struct ipa_filter_spec_type_v01,
+					ip_type),
+	},
+	{
+		.data_type	= QMI_STRUCT,
+		.elem_len	= 1,
+		.elem_size	= sizeof(struct ipa_filter_rule_type_v01),
+		.array_type	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+		.offset		= offsetof(struct ipa_filter_spec_type_v01,
+					filter_rule),
+		.ei_array	= ipa3_filter_rule_type_data_v01_ei,
+	},
+	{
+		.data_type	= QMI_SIGNED_4_BYTE_ENUM,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint32_t),
+		.array_type	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+		.offset		= offsetof(struct ipa_filter_spec_type_v01,
+					filter_action),
+	},
+	{
+		.data_type	= QMI_UNSIGNED_1_BYTE,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint8_t),
+		.array_type	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+		.offset		= offsetof(struct ipa_filter_spec_type_v01,
+					is_routing_table_index_valid),
+	},
+	{
+		.data_type	= QMI_UNSIGNED_4_BYTE,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint32_t),
+		.array_type	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+		.offset		= offsetof(struct ipa_filter_spec_type_v01,
+					route_table_index),
+	},
+	{
+		.data_type	= QMI_UNSIGNED_1_BYTE,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint8_t),
+		.array_type	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+		.offset		= offsetof(struct ipa_filter_spec_type_v01,
+					is_mux_id_valid),
+	},
+	{
+		.data_type	= QMI_UNSIGNED_4_BYTE,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint32_t),
+		.array_type	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+		.offset		= offsetof(struct ipa_filter_spec_type_v01,
+					mux_id),
+	},
+	{
+		.data_type	= QMI_EOTI,
+		.array_type	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+	},
+};
+
+static struct qmi_elem_info ipa_filter_spec_ex_type_data_v01_ei[] = {
+	{
+		.data_type	= QMI_SIGNED_4_BYTE_ENUM,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint32_t),
+		.array_type	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+		.offset		= offsetof(struct ipa_filter_spec_ex_type_v01,
+					ip_type),
+	},
+	{
+		.data_type	= QMI_STRUCT,
+		.elem_len	= 1,
+		.elem_size	= sizeof(struct ipa_filter_rule_type_v01),
+		.array_type	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+		.offset		= offsetof(struct ipa_filter_spec_ex_type_v01,
+					filter_rule),
+		.ei_array	= ipa3_filter_rule_type_data_v01_ei,
+	},
+	{
+		.data_type	= QMI_SIGNED_4_BYTE_ENUM,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint32_t),
+		.array_type	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+		.offset		= offsetof(struct ipa_filter_spec_ex_type_v01,
+					filter_action),
+	},
+	{
+		.data_type	= QMI_UNSIGNED_1_BYTE,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint8_t),
+		.array_type	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+		.offset		= offsetof(struct ipa_filter_spec_ex_type_v01,
+					is_routing_table_index_valid),
+	},
+	{
+		.data_type	= QMI_UNSIGNED_4_BYTE,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint32_t),
+		.array_type	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+		.offset		= offsetof(struct ipa_filter_spec_ex_type_v01,
+					route_table_index),
+	},
+	{
+		.data_type	= QMI_UNSIGNED_1_BYTE,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint8_t),
+		.array_type	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+		.offset		= offsetof(struct ipa_filter_spec_ex_type_v01,
+					is_mux_id_valid),
+	},
+	{
+		.data_type	= QMI_UNSIGNED_4_BYTE,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint32_t),
+		.array_type	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+		.offset		= offsetof(struct ipa_filter_spec_ex_type_v01,
+					mux_id),
+	},
+	{
+		.data_type	= QMI_UNSIGNED_4_BYTE,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint32_t),
+		.array_type	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+		.offset		= offsetof(struct ipa_filter_spec_ex_type_v01,
+					rule_id),
+	},
+	{
+		.data_type	= QMI_UNSIGNED_1_BYTE,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint8_t),
+		.array_type	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+		.offset		= offsetof(struct ipa_filter_spec_ex_type_v01,
+					is_rule_hashable),
+	},
+	{
+		.data_type	= QMI_EOTI,
+		.array_type	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+	},
+};
+
+static struct
+qmi_elem_info ipa3_filter_rule_identifier_to_handle_map_data_v01_ei[] = {
+	{
+		.data_type	= QMI_UNSIGNED_4_BYTE,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint32_t),
+		.array_type	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+		.offset		= offsetof(
+			struct ipa_filter_rule_identifier_to_handle_map_v01,
+			filter_spec_identifier),
+	},
+	{
+		.data_type	= QMI_UNSIGNED_4_BYTE,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint32_t),
+		.array_type	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+		.offset		= offsetof(
+			struct ipa_filter_rule_identifier_to_handle_map_v01,
+			filter_handle),
+	},
+	{
+		.data_type	= QMI_EOTI,
+		.array_type	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+	},
+};
+
+static struct qmi_elem_info ipa3_filter_handle_to_index_map_data_v01_ei[] = {
+	{
+		.data_type	= QMI_UNSIGNED_4_BYTE,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint32_t),
+		.array_type	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+		.offset		= offsetof(
+			struct ipa_filter_handle_to_index_map_v01,
+			filter_handle),
+	},
+	{
+		.data_type	= QMI_UNSIGNED_4_BYTE,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint32_t),
+		.array_type	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+		.offset		= offsetof(
+			struct ipa_filter_handle_to_index_map_v01,
+			filter_index),
+	},
+	{
+		.data_type	= QMI_EOTI,
+		.array_type	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+	},
+};
+
+struct qmi_elem_info ipa3_init_modem_driver_req_msg_data_v01_ei[] = {
+	{
+		.data_type	= QMI_OPT_FLAG,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint8_t),
+		.array_type	= NO_ARRAY,
+		.tlv_type	= 0x10,
+		.offset		= offsetof(
+			struct ipa_init_modem_driver_req_msg_v01,
+			platform_type_valid),
+	},
+	{
+		.data_type	= QMI_SIGNED_4_BYTE_ENUM,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint32_t),
+		.array_type	= NO_ARRAY,
+		.tlv_type	= 0x10,
+		.offset		= offsetof(
+			struct ipa_init_modem_driver_req_msg_v01,
+			platform_type),
+	},
+	{
+		.data_type	= QMI_OPT_FLAG,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint8_t),
+		.array_type	= NO_ARRAY,
+		.tlv_type	= 0x11,
+		.offset		= offsetof(
+			struct ipa_init_modem_driver_req_msg_v01,
+			hdr_tbl_info_valid),
+	},
+	{
+		.data_type	= QMI_STRUCT,
+		.elem_len	= 1,
+		.elem_size	= sizeof(struct ipa_hdr_tbl_info_type_v01),
+		.array_type	= NO_ARRAY,
+		.tlv_type	= 0x11,
+		.offset		= offsetof(
+			struct ipa_init_modem_driver_req_msg_v01,
+			hdr_tbl_info),
+		.ei_array	= ipa3_hdr_tbl_info_type_data_v01_ei,
+	},
+	{
+		.data_type	= QMI_OPT_FLAG,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint8_t),
+		.array_type	= NO_ARRAY,
+		.tlv_type	= 0x12,
+		.offset		= offsetof(
+			struct ipa_init_modem_driver_req_msg_v01,
+			v4_route_tbl_info_valid),
+	},
+	{
+		.data_type	= QMI_STRUCT,
+		.elem_len	= 1,
+		.elem_size	= sizeof(struct ipa_route_tbl_info_type_v01),
+		.array_type	= NO_ARRAY,
+		.tlv_type	= 0x12,
+		.offset		= offsetof(
+			struct ipa_init_modem_driver_req_msg_v01,
+			v4_route_tbl_info),
+		.ei_array	= ipa3_route_tbl_info_type_data_v01_ei,
+	},
+	{
+		.data_type	= QMI_OPT_FLAG,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint8_t),
+		.array_type	= NO_ARRAY,
+		.tlv_type	= 0x13,
+		.offset		= offsetof(
+			struct ipa_init_modem_driver_req_msg_v01,
+			v6_route_tbl_info_valid),
+	},
+	{
+		.data_type	= QMI_STRUCT,
+		.elem_len	= 1,
+		.elem_size	= sizeof(struct ipa_route_tbl_info_type_v01),
+		.array_type	= NO_ARRAY,
+		.tlv_type	= 0x13,
+		.offset		= offsetof(
+			struct ipa_init_modem_driver_req_msg_v01,
+			v6_route_tbl_info),
+		.ei_array	= ipa3_route_tbl_info_type_data_v01_ei,
+	},
+	{
+		.data_type	= QMI_OPT_FLAG,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint8_t),
+		.array_type	= NO_ARRAY,
+		.tlv_type	= 0x14,
+		.offset		= offsetof(
+			struct ipa_init_modem_driver_req_msg_v01,
+			v4_filter_tbl_start_addr_valid),
+	},
+	{
+		.data_type	= QMI_UNSIGNED_4_BYTE,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint32_t),
+		.array_type	= NO_ARRAY,
+		.tlv_type	= 0x14,
+		.offset		= offsetof(
+			struct ipa_init_modem_driver_req_msg_v01,
+			v4_filter_tbl_start_addr),
+	},
+	{
+		.data_type	= QMI_OPT_FLAG,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint8_t),
+		.array_type	= NO_ARRAY,
+		.tlv_type	= 0x15,
+		.offset		= offsetof(
+			struct ipa_init_modem_driver_req_msg_v01,
+			v6_filter_tbl_start_addr_valid),
+	},
+	{
+		.data_type	= QMI_UNSIGNED_4_BYTE,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint32_t),
+		.array_type	= NO_ARRAY,
+		.tlv_type	= 0x15,
+		.offset		= offsetof(
+			struct ipa_init_modem_driver_req_msg_v01,
+			v6_filter_tbl_start_addr),
+	},
+	{
+		.data_type	= QMI_OPT_FLAG,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint8_t),
+		.array_type	= NO_ARRAY,
+		.tlv_type	= 0x16,
+		.offset		= offsetof(
+			struct ipa_init_modem_driver_req_msg_v01,
+			modem_mem_info_valid),
+	},
+	{
+		.data_type	= QMI_STRUCT,
+		.elem_len	= 1,
+		.elem_size	= sizeof(struct ipa_modem_mem_info_type_v01),
+		.array_type	= NO_ARRAY,
+		.tlv_type	= 0x16,
+		.offset		= offsetof(
+			struct ipa_init_modem_driver_req_msg_v01,
+			modem_mem_info),
+		.ei_array	= ipa3_modem_mem_info_type_data_v01_ei,
+	},
+	{
+		.data_type	= QMI_OPT_FLAG,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint8_t),
+		.array_type	= NO_ARRAY,
+		.tlv_type	= 0x17,
+		.offset		= offsetof(
+			struct ipa_init_modem_driver_req_msg_v01,
+			ctrl_comm_dest_end_pt_valid),
+	},
+	{
+		.data_type	= QMI_UNSIGNED_4_BYTE,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint32_t),
+		.array_type	= NO_ARRAY,
+		.tlv_type	= 0x17,
+		.offset		= offsetof(
+			struct ipa_init_modem_driver_req_msg_v01,
+			ctrl_comm_dest_end_pt),
+	},
+	{
+		.data_type	= QMI_OPT_FLAG,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint8_t),
+		.array_type	= NO_ARRAY,
+		.tlv_type	= 0x18,
+		.offset		= offsetof(
+			struct ipa_init_modem_driver_req_msg_v01,
+			is_ssr_bootup_valid),
+	},
+	{
+		.data_type	= QMI_UNSIGNED_1_BYTE,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint8_t),
+		.array_type	= NO_ARRAY,
+		.tlv_type	= 0x18,
+		.offset		= offsetof(
+			struct ipa_init_modem_driver_req_msg_v01,
+			is_ssr_bootup),
+	},
+	{
+		.data_type	= QMI_OPT_FLAG,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint8_t),
+		.array_type	= NO_ARRAY,
+		.tlv_type	= 0x19,
+		.offset		= offsetof(
+			struct ipa_init_modem_driver_req_msg_v01,
+			hdr_proc_ctx_tbl_info_valid),
+	},
+	{
+		.data_type	= QMI_STRUCT,
+		.elem_len	= 1,
+		.elem_size	= sizeof(
+			struct ipa_hdr_proc_ctx_tbl_info_type_v01),
+		.array_type	= NO_ARRAY,
+		.tlv_type	= 0x19,
+		.offset		= offsetof(
+			struct ipa_init_modem_driver_req_msg_v01,
+			hdr_proc_ctx_tbl_info),
+		.ei_array	= ipa3_hdr_proc_ctx_tbl_info_type_data_v01_ei,
+	},
+	{
+		.data_type	= QMI_OPT_FLAG,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint8_t),
+		.array_type	= NO_ARRAY,
+		.tlv_type	= 0x1A,
+		.offset		= offsetof(
+			struct ipa_init_modem_driver_req_msg_v01,
+			zip_tbl_info_valid),
+	},
+	{
+		.data_type	= QMI_STRUCT,
+		.elem_len	= 1,
+		.elem_size	= sizeof(struct ipa_zip_tbl_info_type_v01),
+		.array_type	= NO_ARRAY,
+		.tlv_type	= 0x1A,
+		.offset		= offsetof(
+			struct ipa_init_modem_driver_req_msg_v01,
+			zip_tbl_info),
+		.ei_array	= ipa3_zip_tbl_info_type_data_v01_ei,
+	},
+	{
+		.data_type	= QMI_OPT_FLAG,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint8_t),
+		.array_type	= NO_ARRAY,
+		.tlv_type	= 0x1B,
+		.offset		= offsetof(
+			struct ipa_init_modem_driver_req_msg_v01,
+			v4_hash_route_tbl_info_valid),
+	},
+	{
+		.data_type	= QMI_STRUCT,
+		.elem_len	= 1,
+		.elem_size	= sizeof(struct ipa_route_tbl_info_type_v01),
+		.array_type	= NO_ARRAY,
+		.tlv_type	= 0x1B,
+		.offset		= offsetof(
+			struct ipa_init_modem_driver_req_msg_v01,
+			v4_hash_route_tbl_info),
+		.ei_array	= ipa3_route_tbl_info_type_data_v01_ei,
+	},
+	{
+		.data_type	= QMI_OPT_FLAG,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint8_t),
+		.array_type	= NO_ARRAY,
+		.tlv_type	= 0x1C,
+		.offset		= offsetof(
+			struct ipa_init_modem_driver_req_msg_v01,
+			v6_hash_route_tbl_info_valid),
+	},
+	{
+		.data_type	= QMI_STRUCT,
+		.elem_len	= 1,
+		.elem_size	= sizeof(struct ipa_route_tbl_info_type_v01),
+		.array_type	= NO_ARRAY,
+		.tlv_type	= 0x1C,
+		.offset		= offsetof(
+			struct ipa_init_modem_driver_req_msg_v01,
+			v6_hash_route_tbl_info),
+		.ei_array	= ipa3_route_tbl_info_type_data_v01_ei,
+	},
+	{
+		.data_type	= QMI_OPT_FLAG,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint8_t),
+		.array_type	= NO_ARRAY,
+		.tlv_type	= 0x1D,
+		.offset		= offsetof(
+			struct ipa_init_modem_driver_req_msg_v01,
+			v4_hash_filter_tbl_start_addr_valid),
+	},
+	{
+		.data_type	= QMI_UNSIGNED_4_BYTE,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint32_t),
+		.array_type	= NO_ARRAY,
+		.tlv_type	= 0x1D,
+		.offset		= offsetof(
+			struct ipa_init_modem_driver_req_msg_v01,
+			v4_hash_filter_tbl_start_addr),
+	},
+	{
+		.data_type	= QMI_OPT_FLAG,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint8_t),
+		.array_type	= NO_ARRAY,
+		.tlv_type	= 0x1E,
+		.offset		= offsetof(
+			struct ipa_init_modem_driver_req_msg_v01,
+			v6_hash_filter_tbl_start_addr_valid),
+	},
+	{
+		.data_type	= QMI_UNSIGNED_4_BYTE,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint32_t),
+		.array_type	= NO_ARRAY,
+		.tlv_type	= 0x1E,
+		.offset		= offsetof(
+			struct ipa_init_modem_driver_req_msg_v01,
+			v6_hash_filter_tbl_start_addr),
+	},
+	{
+		.data_type	= QMI_OPT_FLAG,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint8_t),
+		.array_type	= NO_ARRAY,
+		.tlv_type	= 0x1F,
+		.offset		= offsetof(
+			struct ipa_init_modem_driver_req_msg_v01,
+			hw_stats_quota_base_addr_valid),
+	},
+	{
+		.data_type	= QMI_UNSIGNED_4_BYTE,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint32_t),
+		.array_type	= NO_ARRAY,
+		.tlv_type	= 0x1F,
+		.offset		= offsetof(
+			struct ipa_init_modem_driver_req_msg_v01,
+			hw_stats_quota_base_addr),
+	},
+	{
+		.data_type	= QMI_OPT_FLAG,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint8_t),
+		.array_type	= NO_ARRAY,
+		.tlv_type	= 0x20,
+		.offset		= offsetof(
+			struct ipa_init_modem_driver_req_msg_v01,
+			hw_stats_quota_size_valid),
+	},
+	{
+		.data_type	= QMI_UNSIGNED_4_BYTE,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint32_t),
+		.array_type	= NO_ARRAY,
+		.tlv_type	= 0x20,
+		.offset		= offsetof(
+			struct ipa_init_modem_driver_req_msg_v01,
+			hw_stats_quota_size),
+	},
+	{
+		.data_type	= QMI_OPT_FLAG,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint8_t),
+		.array_type	= NO_ARRAY,
+		.tlv_type	= 0x21,
+		.offset		= offsetof(
+			struct ipa_init_modem_driver_req_msg_v01,
+			hw_drop_stats_base_addr_valid),
+	},
+	{
+		.data_type	= QMI_UNSIGNED_4_BYTE,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint32_t),
+		.array_type	= NO_ARRAY,
+		.tlv_type	= 0x21,
+		.offset		= offsetof(
+			struct ipa_init_modem_driver_req_msg_v01,
+			hw_drop_stats_base_addr),
+	},
+	{
+		.data_type	= QMI_OPT_FLAG,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint8_t),
+		.array_type	= NO_ARRAY,
+		.tlv_type	= 0x22,
+		.offset		= offsetof(
+			struct ipa_init_modem_driver_req_msg_v01,
+			hw_drop_stats_table_size_valid),
+	},
+	{
+		.data_type	= QMI_UNSIGNED_4_BYTE,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint32_t),
+		.array_type	= NO_ARRAY,
+		.tlv_type	= 0x22,
+		.offset		= offsetof(
+			struct ipa_init_modem_driver_req_msg_v01,
+			hw_drop_stats_table_size),
+	},
+	{
+		.data_type	= QMI_EOTI,
+		.array_type	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+	},
+
+};
+
+struct qmi_elem_info ipa3_init_modem_driver_resp_msg_data_v01_ei[] = {
+	{
+		.data_type	= QMI_STRUCT,
+		.elem_len	= 1,
+		.elem_size	= sizeof(struct qmi_response_type_v01),
+		.array_type	= NO_ARRAY,
+		.tlv_type	= 0x02,
+		.offset		= offsetof(
+			struct ipa_init_modem_driver_resp_msg_v01,
+			resp),
+		.ei_array	= qmi_response_type_v01_ei,
+	},
+	{
+		.data_type	= QMI_OPT_FLAG,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint8_t),
+		.array_type	= NO_ARRAY,
+		.tlv_type	= 0x10,
+		.offset		= offsetof(
+			struct ipa_init_modem_driver_resp_msg_v01,
+			ctrl_comm_dest_end_pt_valid),
+	},
+	{
+		.data_type	= QMI_UNSIGNED_4_BYTE,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint32_t),
+		.array_type	= NO_ARRAY,
+		.tlv_type	= 0x10,
+		.offset		= offsetof(
+			struct ipa_init_modem_driver_resp_msg_v01,
+			ctrl_comm_dest_end_pt),
+	},
+	{
+		.data_type	= QMI_OPT_FLAG,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint8_t),
+		.array_type	= NO_ARRAY,
+		.tlv_type	= 0x11,
+		.offset		= offsetof(
+			struct ipa_init_modem_driver_resp_msg_v01,
+			default_end_pt_valid),
+	},
+	{
+		.data_type	= QMI_UNSIGNED_4_BYTE,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint32_t),
+		.array_type	= NO_ARRAY,
+		.tlv_type	= 0x11,
+		.offset		= offsetof(
+			struct ipa_init_modem_driver_resp_msg_v01,
+			default_end_pt),
+	},
+	{
+		.data_type	= QMI_OPT_FLAG,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint8_t),
+		.array_type	= NO_ARRAY,
+		.tlv_type	= 0x12,
+		.offset		= offsetof(
+			struct ipa_init_modem_driver_resp_msg_v01,
+			modem_driver_init_pending_valid),
+	},
+	{
+		.data_type	= QMI_UNSIGNED_1_BYTE,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint8_t),
+		.array_type	= NO_ARRAY,
+		.tlv_type	= 0x12,
+		.offset		= offsetof(
+			struct ipa_init_modem_driver_resp_msg_v01,
+			modem_driver_init_pending),
+	},
+	{
+		.data_type	= QMI_EOTI,
+		.array_type	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+	},
+};
+
+struct qmi_elem_info ipa3_init_modem_driver_cmplt_req_msg_data_v01_ei[] = {
+	{
+		.data_type	= QMI_UNSIGNED_1_BYTE,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint8_t),
+		.array_type	= NO_ARRAY,
+		.tlv_type	= 0x01,
+		.offset		= offsetof(
+			struct ipa_init_modem_driver_cmplt_req_msg_v01,
+			status),
+	},
+	{
+		.data_type	= QMI_EOTI,
+		.array_type	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+	},
+};
+
+struct qmi_elem_info ipa3_init_modem_driver_cmplt_resp_msg_data_v01_ei[] = {
+	{
+		.data_type	= QMI_STRUCT,
+		.elem_len	= 1,
+		.elem_size	= sizeof(struct qmi_response_type_v01),
+		.array_type	= NO_ARRAY,
+		.tlv_type	= 0x02,
+		.offset		= offsetof(
+			struct ipa_init_modem_driver_cmplt_resp_msg_v01,
+			resp),
+		.ei_array	= qmi_response_type_v01_ei,
+	},
+	{
+		.data_type	= QMI_EOTI,
+		.array_type	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+	},
+};
+
+struct qmi_elem_info ipa3_indication_reg_req_msg_data_v01_ei[] = {
+	{
+		.data_type	= QMI_OPT_FLAG,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint8_t),
+		.array_type	= NO_ARRAY,
+		.tlv_type	= 0x10,
+		.offset		= offsetof(
+			struct ipa_indication_reg_req_msg_v01,
+			master_driver_init_complete_valid),
+	},
+	{
+		.data_type	= QMI_UNSIGNED_1_BYTE,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint8_t),
+		.array_type	= NO_ARRAY,
+		.tlv_type	= 0x10,
+		.offset		= offsetof(
+			struct ipa_indication_reg_req_msg_v01,
+			master_driver_init_complete),
+	},
+	{
+		.data_type	= QMI_OPT_FLAG,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint8_t),
+		.array_type	= NO_ARRAY,
+		.tlv_type	= 0x11,
+		.offset		= offsetof(
+			struct ipa_indication_reg_req_msg_v01,
+			data_usage_quota_reached_valid),
+	},
+	{
+		.data_type	= QMI_UNSIGNED_1_BYTE,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint8_t),
+		.array_type	= NO_ARRAY,
+		.tlv_type	= 0x11,
+		.offset		= offsetof(
+			struct ipa_indication_reg_req_msg_v01,
+			data_usage_quota_reached),
+	},
+	{
+		.data_type	= QMI_OPT_FLAG,
+		.elem_len	= 1,
+		.elem_size	= sizeof(u8),
+		.array_type	= NO_ARRAY,
+		.tlv_type	= 0x12,
+		.offset		= offsetof(
+			struct ipa_indication_reg_req_msg_v01,
+				ipa_mhi_ready_ind_valid),
+	},
+	{
+		.data_type	= QMI_OPT_FLAG,
+		.elem_len	= 1,
+		.elem_size	= sizeof(u8),
+		.array_type	= NO_ARRAY,
+		.tlv_type	= 0x13,
+		.offset		= offsetof(
+			struct ipa_indication_reg_req_msg_v01,
+				endpoint_desc_ind_valid),
+	},
+	{
+		.data_type	= QMI_UNSIGNED_1_BYTE,
+		.elem_len	= 1,
+		.elem_size	= sizeof(u8),
+		.array_type	= NO_ARRAY,
+		.tlv_type	= 0x13,
+		.offset		= offsetof(
+			struct ipa_indication_reg_req_msg_v01,
+				endpoint_desc_ind),
+	},
+	{
+		.data_type	= QMI_UNSIGNED_1_BYTE,
+		.elem_len	= 1,
+		.elem_size	= sizeof(u8),
+		.array_type	= NO_ARRAY,
+		.tlv_type	= 0x12,
+		.offset		= offsetof(
+			struct ipa_indication_reg_req_msg_v01,
+				ipa_mhi_ready_ind),
+	},
+	{
+		.data_type	= QMI_EOTI,
+		.array_type	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+	},
+};
+
+struct qmi_elem_info ipa3_indication_reg_resp_msg_data_v01_ei[] = {
+	{
+		.data_type	= QMI_STRUCT,
+		.elem_len	= 1,
+		.elem_size	= sizeof(struct qmi_response_type_v01),
+		.array_type	= NO_ARRAY,
+		.tlv_type	= 0x02,
+		.offset		= offsetof(
+			struct ipa_indication_reg_resp_msg_v01,
+			resp),
+		.ei_array	= qmi_response_type_v01_ei,
+	},
+	{
+		.data_type	= QMI_EOTI,
+		.array_type	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+	},
+};
+
+struct qmi_elem_info ipa3_master_driver_init_complt_ind_msg_data_v01_ei[] = {
+	{
+		.data_type	= QMI_STRUCT,
+		.elem_len	= 1,
+		.elem_size	= sizeof(struct qmi_response_type_v01),
+		.array_type	= NO_ARRAY,
+		.tlv_type	= 0x02,
+		.offset		= offsetof(struct
+			ipa_master_driver_init_complt_ind_msg_v01,
+			master_driver_init_status),
+		.ei_array	= qmi_response_type_v01_ei,
+	},
+	{
+		.data_type	= QMI_EOTI,
+		.array_type	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+	},
+};
+
+static struct qmi_elem_info ipa_filter_rule_req2_type_v01_ei[] = {
+	{
+		.data_type      = QMI_UNSIGNED_2_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(u16),
+		.array_type       = NO_ARRAY,
+		.tlv_type       = 0,
+		.offset         = offsetof(struct ipa_filter_rule_req2_type_v01,
+					   rule_eq_bitmap),
+	},
+	{
+		.data_type      = QMI_UNSIGNED_1_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(u8),
+		.array_type       = NO_ARRAY,
+		.tlv_type       = 0,
+		.offset         = offsetof(struct ipa_filter_rule_req2_type_v01,
+					   pure_ack_eq_present),
+	},
+	{
+		.data_type      = QMI_UNSIGNED_1_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(u8),
+		.array_type       = NO_ARRAY,
+		.tlv_type       = 0,
+		.offset         = offsetof(struct ipa_filter_rule_req2_type_v01,
+					   pure_ack_eq),
+	},
+	{
+		.data_type      = QMI_UNSIGNED_1_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(u8),
+		.array_type       = NO_ARRAY,
+		.tlv_type       = 0,
+		.offset         = offsetof(struct ipa_filter_rule_req2_type_v01,
+					   protocol_eq_present),
+	},
+	{
+		.data_type      = QMI_UNSIGNED_1_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(u8),
+		.array_type       = NO_ARRAY,
+		.tlv_type       = 0,
+		.offset         = offsetof(struct ipa_filter_rule_req2_type_v01,
+					   protocol_eq),
+	},
+	{
+		.data_type      = QMI_UNSIGNED_1_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(u8),
+		.array_type       = NO_ARRAY,
+		.tlv_type       = 0,
+		.offset         = offsetof(struct ipa_filter_rule_req2_type_v01,
+					   num_ihl_offset_range_16),
+	},
+	{
+		.data_type      = QMI_STRUCT,
+		.elem_len       = QMI_IPA_IPFLTR_NUM_IHL_RANGE_16_EQNS_V01,
+		.elem_size      = sizeof(
+			struct ipa_ipfltr_range_eq_16_type_v01),
+		.array_type       = STATIC_ARRAY,
+		.tlv_type       = 0,
+		.offset         = offsetof(struct ipa_filter_rule_req2_type_v01,
+					   ihl_offset_range_16),
+		.ei_array      = ipa3_ipfltr_range_eq_16_type_data_v01_ei,
+	},
+	{
+		.data_type      = QMI_UNSIGNED_1_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(u8),
+		.array_type       = NO_ARRAY,
+		.tlv_type       = 0,
+		.offset         = offsetof(struct ipa_filter_rule_req2_type_v01,
+					   num_offset_meq_32),
+	},
+	{
+		.data_type      = QMI_STRUCT,
+		.elem_len       = QMI_IPA_IPFLTR_NUM_MEQ_32_EQNS_V01,
+		.elem_size      = sizeof(struct ipa_ipfltr_mask_eq_32_type_v01),
+		.array_type       = STATIC_ARRAY,
+		.tlv_type       = 0,
+		.offset         = offsetof(struct ipa_filter_rule_req2_type_v01,
+					   offset_meq_32),
+		.ei_array      = ipa3_ipfltr_mask_eq_32_type_data_v01_ei,
+	},
+	{
+		.data_type      = QMI_UNSIGNED_1_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(u8),
+		.array_type       = NO_ARRAY,
+		.tlv_type       = 0,
+		.offset         = offsetof(struct ipa_filter_rule_req2_type_v01,
+					   tc_eq_present),
+	},
+	{
+		.data_type      = QMI_UNSIGNED_1_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(u8),
+		.array_type       = NO_ARRAY,
+		.tlv_type       = 0,
+		.offset         = offsetof(struct ipa_filter_rule_req2_type_v01,
+					   tc_eq),
+	},
+	{
+		.data_type      = QMI_UNSIGNED_1_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(u8),
+		.array_type       = NO_ARRAY,
+		.tlv_type       = 0,
+		.offset         = offsetof(struct ipa_filter_rule_req2_type_v01,
+					   flow_eq_present),
+	},
+	{
+		.data_type      = QMI_UNSIGNED_4_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(u32),
+		.array_type       = NO_ARRAY,
+		.tlv_type       = 0,
+		.offset         = offsetof(struct ipa_filter_rule_req2_type_v01,
+					   flow_eq),
+	},
+	{
+		.data_type      = QMI_UNSIGNED_1_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(u8),
+		.array_type       = NO_ARRAY,
+		.tlv_type       = 0,
+		.offset         = offsetof(struct ipa_filter_rule_req2_type_v01,
+					   ihl_offset_eq_16_present),
+	},
+	{
+		.data_type      = QMI_STRUCT,
+		.elem_len       = 1,
+		.elem_size      = sizeof(struct ipa_ipfltr_eq_16_type_v01),
+		.array_type       = NO_ARRAY,
+		.tlv_type       = 0,
+		.offset         = offsetof(struct ipa_filter_rule_req2_type_v01,
+					   ihl_offset_eq_16),
+		.ei_array      = ipa3_ipfltr_eq_16_type_data_v01_ei,
+	},
+	{
+		.data_type      = QMI_UNSIGNED_1_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(u8),
+		.array_type       = NO_ARRAY,
+		.tlv_type       = 0,
+		.offset         = offsetof(struct ipa_filter_rule_req2_type_v01,
+					   ihl_offset_eq_32_present),
+	},
+	{
+		.data_type      = QMI_STRUCT,
+		.elem_len       = 1,
+		.elem_size      = sizeof(struct ipa_ipfltr_eq_32_type_v01),
+		.array_type       = NO_ARRAY,
+		.tlv_type       = 0,
+		.offset         = offsetof(struct ipa_filter_rule_req2_type_v01,
+					   ihl_offset_eq_32),
+		.ei_array      = ipa3_ipfltr_eq_32_type_data_v01_ei,
+	},
+	{
+		.data_type      = QMI_UNSIGNED_1_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(u8),
+		.array_type       = NO_ARRAY,
+		.tlv_type       = 0,
+		.offset         = offsetof(struct ipa_filter_rule_req2_type_v01,
+					   num_ihl_offset_meq_32),
+	},
+	{
+		.data_type      = QMI_STRUCT,
+		.elem_len       = QMI_IPA_IPFLTR_NUM_IHL_MEQ_32_EQNS_V01,
+		.elem_size      = sizeof(struct ipa_ipfltr_mask_eq_32_type_v01),
+		.array_type       = STATIC_ARRAY,
+		.tlv_type       = 0,
+		.offset         = offsetof(struct ipa_filter_rule_req2_type_v01,
+					   ihl_offset_meq_32),
+		.ei_array      = ipa3_ipfltr_mask_eq_32_type_data_v01_ei,
+	},
+	{
+		.data_type      = QMI_UNSIGNED_1_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(u8),
+		.array_type       = NO_ARRAY,
+		.tlv_type       = 0,
+		.offset         = offsetof(struct ipa_filter_rule_req2_type_v01,
+					   num_offset_meq_128),
+	},
+	{
+		.data_type      = QMI_STRUCT,
+		.elem_len       = QMI_IPA_IPFLTR_NUM_MEQ_128_EQNS_V01,
+		.elem_size      = sizeof(
+			struct ipa_ipfltr_mask_eq_128_type_v01),
+		.array_type       = STATIC_ARRAY,
+		.tlv_type       = 0,
+		.offset         = offsetof(struct ipa_filter_rule_req2_type_v01,
+					   offset_meq_128),
+		.ei_array      = ipa3_ipfltr_mask_eq_128_type_data_v01_ei,
+	},
+	{
+		.data_type      = QMI_UNSIGNED_1_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(u8),
+		.array_type       = NO_ARRAY,
+		.tlv_type       = 0,
+		.offset         = offsetof(struct ipa_filter_rule_req2_type_v01,
+					   metadata_meq32_present),
+	},
+	{
+		.data_type      = QMI_STRUCT,
+		.elem_len       = 1,
+		.elem_size      = sizeof(struct ipa_ipfltr_mask_eq_32_type_v01),
+		.array_type       = NO_ARRAY,
+		.tlv_type       = 0,
+		.offset         = offsetof(struct ipa_filter_rule_req2_type_v01,
+					   metadata_meq32),
+		.ei_array      = ipa3_ipfltr_mask_eq_32_type_data_v01_ei,
+	},
+	{
+		.data_type      = QMI_UNSIGNED_1_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(u8),
+		.array_type       = NO_ARRAY,
+		.tlv_type       = 0,
+		.offset         = offsetof(struct ipa_filter_rule_req2_type_v01,
+					   ipv4_frag_eq_present),
+	},
+	{
+		.data_type      = QMI_EOTI,
+		.array_type       = NO_ARRAY,
+		.tlv_type       = QMI_COMMON_TLV_TYPE,
+	},
+};
+
+static struct qmi_elem_info ipa_filter_spec_ex2_type_v01_ei[] = {
+	{
+		.data_type      = QMI_SIGNED_4_BYTE_ENUM,
+		.elem_len       = 1,
+		.elem_size      = sizeof(enum ipa_ip_type_enum_v01),
+		.array_type       = NO_ARRAY,
+		.tlv_type       = 0,
+		.offset         = offsetof(struct ipa_filter_spec_ex2_type_v01,
+					   ip_type),
+	},
+	{
+		.data_type      = QMI_STRUCT,
+		.elem_len       = 1,
+		.elem_size      = sizeof(struct ipa_filter_rule_req2_type_v01),
+		.array_type       = NO_ARRAY,
+		.tlv_type       = 0,
+		.offset         = offsetof(struct ipa_filter_spec_ex2_type_v01,
+					   filter_rule),
+		.ei_array      = ipa_filter_rule_req2_type_v01_ei,
+	},
+	{
+		.data_type      = QMI_SIGNED_4_BYTE_ENUM,
+		.elem_len       = 1,
+		.elem_size      = sizeof(enum ipa_filter_action_enum_v01),
+		.array_type       = NO_ARRAY,
+		.tlv_type       = 0,
+		.offset         = offsetof(struct ipa_filter_spec_ex2_type_v01,
+					   filter_action),
+	},
+	{
+		.data_type      = QMI_UNSIGNED_1_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(u8),
+		.array_type       = NO_ARRAY,
+		.tlv_type       = 0,
+		.offset         = offsetof(struct ipa_filter_spec_ex2_type_v01,
+					   is_routing_table_index_valid),
+	},
+	{
+		.data_type      = QMI_UNSIGNED_4_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(u32),
+		.array_type       = NO_ARRAY,
+		.tlv_type       = 0,
+		.offset         = offsetof(struct ipa_filter_spec_ex2_type_v01,
+					   route_table_index),
+	},
+	{
+		.data_type      = QMI_UNSIGNED_1_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(u8),
+		.array_type       = NO_ARRAY,
+		.tlv_type       = 0,
+		.offset         = offsetof(struct ipa_filter_spec_ex2_type_v01,
+					   is_mux_id_valid),
+	},
+	{
+		.data_type      = QMI_UNSIGNED_4_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(u32),
+		.array_type       = NO_ARRAY,
+		.tlv_type       = 0,
+		.offset         = offsetof(struct ipa_filter_spec_ex2_type_v01,
+					   mux_id),
+	},
+	{
+		.data_type      = QMI_UNSIGNED_4_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(u32),
+		.array_type       = NO_ARRAY,
+		.tlv_type       = 0,
+		.offset         = offsetof(struct ipa_filter_spec_ex2_type_v01,
+					   rule_id),
+	},
+	{
+		.data_type      = QMI_UNSIGNED_1_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(u8),
+		.array_type       = NO_ARRAY,
+		.tlv_type       = 0,
+		.offset         = offsetof(struct ipa_filter_spec_ex2_type_v01,
+					   is_rule_hashable),
+	},
+	{
+		.data_type      = QMI_EOTI,
+		.array_type       = NO_ARRAY,
+		.tlv_type       = QMI_COMMON_TLV_TYPE,
+	},
+};
+
+struct qmi_elem_info ipa3_install_fltr_rule_req_msg_data_v01_ei[] = {
+	{
+		.data_type	= QMI_OPT_FLAG,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint8_t),
+		.array_type	= NO_ARRAY,
+		.tlv_type	= 0x10,
+		.offset		= offsetof(
+			struct ipa_install_fltr_rule_req_msg_v01,
+			filter_spec_list_valid),
+	},
+	{
+		.data_type	= QMI_DATA_LEN,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint8_t),
+		.array_type	= NO_ARRAY,
+		.tlv_type	= 0x10,
+		.offset		= offsetof(
+			struct ipa_install_fltr_rule_req_msg_v01,
+			filter_spec_list_len),
+	},
+	{
+		.data_type	= QMI_STRUCT,
+		.elem_len	= QMI_IPA_MAX_FILTERS_V01,
+		.elem_size	= sizeof(struct ipa_filter_spec_type_v01),
+		.array_type	= VAR_LEN_ARRAY,
+		.tlv_type	= 0x10,
+		.offset		= offsetof(
+			struct ipa_install_fltr_rule_req_msg_v01,
+			filter_spec_list),
+		.ei_array	= ipa_filter_spec_type_data_v01_ei,
+	},
+	{
+		.data_type	= QMI_OPT_FLAG,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint8_t),
+		.array_type	= NO_ARRAY,
+		.tlv_type	= 0x11,
+		.offset		= offsetof(
+			struct ipa_install_fltr_rule_req_msg_v01,
+			source_pipe_index_valid),
+	},
+	{
+		.data_type	= QMI_UNSIGNED_4_BYTE,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint32_t),
+		.array_type	= NO_ARRAY,
+		.tlv_type	= 0x11,
+		.offset		= offsetof(
+			struct ipa_install_fltr_rule_req_msg_v01,
+			source_pipe_index),
+	},
+	{
+		.data_type	= QMI_OPT_FLAG,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint8_t),
+		.array_type	= NO_ARRAY,
+		.tlv_type	= 0x12,
+		.offset		= offsetof(
+			struct ipa_install_fltr_rule_req_msg_v01,
+			num_ipv4_filters_valid),
+	},
+	{
+		.data_type	= QMI_UNSIGNED_4_BYTE,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint32_t),
+		.array_type	= NO_ARRAY,
+		.tlv_type	= 0x12,
+		.offset		= offsetof(
+			struct ipa_install_fltr_rule_req_msg_v01,
+			num_ipv4_filters),
+	},
+	{
+		.data_type	= QMI_OPT_FLAG,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint8_t),
+		.array_type	= NO_ARRAY,
+		.tlv_type	= 0x13,
+		.offset		= offsetof(
+			struct ipa_install_fltr_rule_req_msg_v01,
+			num_ipv6_filters_valid),
+	},
+	{
+		.data_type	= QMI_UNSIGNED_4_BYTE,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint32_t),
+		.array_type	= NO_ARRAY,
+		.tlv_type	= 0x13,
+		.offset		= offsetof(
+			struct ipa_install_fltr_rule_req_msg_v01,
+			num_ipv6_filters),
+	},
+	{
+		.data_type	= QMI_OPT_FLAG,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint8_t),
+		.array_type	= NO_ARRAY,
+		.tlv_type	= 0x14,
+		.offset		= offsetof(
+			struct ipa_install_fltr_rule_req_msg_v01,
+			xlat_filter_indices_list_valid),
+	},
+	{
+		.data_type	= QMI_DATA_LEN,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint8_t),
+		.array_type	= NO_ARRAY,
+		.tlv_type	= 0x14,
+		.offset		= offsetof(
+			struct ipa_install_fltr_rule_req_msg_v01,
+			xlat_filter_indices_list_len),
+	},
+	{
+		.data_type	= QMI_UNSIGNED_4_BYTE,
+		.elem_len	= QMI_IPA_MAX_FILTERS_V01,
+		.elem_size	= sizeof(uint32_t),
+		.array_type	= VAR_LEN_ARRAY,
+		.tlv_type	= 0x14,
+		.offset		= offsetof(
+			struct ipa_install_fltr_rule_req_msg_v01,
+			xlat_filter_indices_list),
+	},
+	{
+		.data_type	= QMI_OPT_FLAG,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint8_t),
+		.array_type	= NO_ARRAY,
+		.tlv_type	= 0x15,
+		.offset		= offsetof(
+			struct ipa_install_fltr_rule_req_msg_v01,
+			filter_spec_ex_list_valid),
+	},
+	{
+		.data_type	= QMI_DATA_LEN,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint8_t),
+		.array_type	= NO_ARRAY,
+		.tlv_type	= 0x15,
+		.offset		= offsetof(
+			struct ipa_install_fltr_rule_req_msg_v01,
+			filter_spec_ex_list_len),
+	},
+	{
+		.data_type	= QMI_STRUCT,
+		.elem_len	= QMI_IPA_MAX_FILTERS_V01,
+		.elem_size	= sizeof(struct ipa_filter_spec_ex_type_v01),
+		.array_type	= VAR_LEN_ARRAY,
+		.tlv_type	= 0x15,
+		.offset		= offsetof(
+			struct ipa_install_fltr_rule_req_msg_v01,
+			filter_spec_ex_list),
+		.ei_array	= ipa_filter_spec_ex_type_data_v01_ei,
+	},
+	{
+		.data_type      = QMI_OPT_FLAG,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint8_t),
+		.array_type       = NO_ARRAY,
+		.tlv_type       = 0x16,
+		.offset         = offsetof(
+			struct ipa_install_fltr_rule_req_msg_v01,
+			filter_spec_ex2_list_valid),
+	},
+	{
+		.data_type      = QMI_DATA_LEN,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint8_t),
+		.array_type       = NO_ARRAY,
+		.tlv_type       = 0x16,
+		.offset         = offsetof(
+			struct ipa_install_fltr_rule_req_msg_v01,
+			filter_spec_ex2_list_len),
+	},
+	{
+		.data_type      = QMI_STRUCT,
+		.elem_len       = QMI_IPA_MAX_FILTERS_V01,
+		.elem_size      = sizeof(struct ipa_filter_spec_ex2_type_v01),
+		.array_type       = VAR_LEN_ARRAY,
+		.tlv_type       = 0x16,
+		.offset         = offsetof(
+			struct ipa_install_fltr_rule_req_msg_v01,
+			filter_spec_ex2_list),
+		.ei_array      = ipa_filter_spec_ex2_type_v01_ei,
+	},
+	{
+		.data_type      = QMI_OPT_FLAG,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint8_t),
+		.array_type       = NO_ARRAY,
+		.tlv_type       = 0x17,
+		.offset         = offsetof(
+			struct ipa_install_fltr_rule_req_msg_v01,
+			ul_firewall_indices_list_valid),
+	},
+	{
+		.data_type      = QMI_DATA_LEN,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint8_t),
+		.array_type       = NO_ARRAY,
+		.tlv_type       = 0x17,
+		.offset         = offsetof(
+			struct ipa_install_fltr_rule_req_msg_v01,
+			ul_firewall_indices_list_len),
+	},
+	{
+		.data_type      = QMI_UNSIGNED_4_BYTE,
+		.elem_len       = QMI_IPA_MAX_FILTERS_V01,
+		.elem_size      = sizeof(uint32_t),
+		.array_type       = VAR_LEN_ARRAY,
+		.tlv_type       = 0x17,
+		.offset         = offsetof(
+			struct ipa_install_fltr_rule_req_msg_v01,
+			ul_firewall_indices_list),
+	},
+	{
+		.data_type	= QMI_EOTI,
+		.array_type	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+	},
+};
+
+struct qmi_elem_info ipa3_install_fltr_rule_resp_msg_data_v01_ei[] = {
+	{
+		.data_type	= QMI_STRUCT,
+		.elem_len	= 1,
+		.elem_size	= sizeof(struct qmi_response_type_v01),
+		.array_type	= NO_ARRAY,
+		.tlv_type	= 0x02,
+		.offset		= offsetof(
+			struct ipa_install_fltr_rule_resp_msg_v01,
+			resp),
+		.ei_array       = qmi_response_type_v01_ei,
+	},
+	{
+		.data_type	= QMI_OPT_FLAG,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint8_t),
+		.array_type	= NO_ARRAY,
+		.tlv_type	= 0x10,
+		.offset		= offsetof(
+			struct ipa_install_fltr_rule_resp_msg_v01,
+			filter_handle_list_valid),
+	},
+	{
+		.data_type	= QMI_DATA_LEN,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint8_t),
+		.array_type	= NO_ARRAY,
+		.tlv_type	= 0x10,
+		.offset		= offsetof(
+			struct ipa_install_fltr_rule_resp_msg_v01,
+			filter_handle_list_len),
+	},
+	{
+		.data_type	= QMI_STRUCT,
+		.elem_len	= QMI_IPA_MAX_FILTERS_V01,
+		.elem_size	= sizeof(
+			struct ipa_filter_rule_identifier_to_handle_map_v01),
+		.array_type	= VAR_LEN_ARRAY,
+		.tlv_type	= 0x10,
+		.offset		= offsetof(
+			struct ipa_install_fltr_rule_resp_msg_v01,
+			filter_handle_list),
+		.ei_array	=
+			ipa3_filter_rule_identifier_to_handle_map_data_v01_ei,
+	},
+	{
+		.data_type	= QMI_OPT_FLAG,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint8_t),
+		.array_type	= NO_ARRAY,
+		.tlv_type	= 0x11,
+		.offset		= offsetof(
+			struct ipa_install_fltr_rule_resp_msg_v01,
+			rule_id_valid),
+	},
+	{
+		.data_type	= QMI_DATA_LEN,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint8_t),
+		.array_type	= NO_ARRAY,
+		.tlv_type	= 0x11,
+		.offset		= offsetof(
+			struct ipa_install_fltr_rule_resp_msg_v01,
+			rule_id_len),
+	},
+	{
+		.data_type	= QMI_UNSIGNED_4_BYTE,
+		.elem_len	= QMI_IPA_MAX_FILTERS_V01,
+		.elem_size	= sizeof(uint32_t),
+		.array_type	= VAR_LEN_ARRAY,
+		.tlv_type	= 0x11,
+		.offset		= offsetof(
+			struct ipa_install_fltr_rule_resp_msg_v01,
+			rule_id),
+	},
+	{
+		.data_type	= QMI_EOTI,
+		.array_type	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+	},
+};
+
+struct qmi_elem_info ipa3_fltr_installed_notif_req_msg_data_v01_ei[] = {
+	{
+		.data_type	= QMI_UNSIGNED_4_BYTE,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint32_t),
+		.array_type	= NO_ARRAY,
+		.tlv_type	= 0x01,
+		.offset		= offsetof(
+			struct ipa_fltr_installed_notif_req_msg_v01,
+			source_pipe_index),
+	},
+	{
+		.data_type	= QMI_UNSIGNED_2_BYTE,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint16_t),
+		.array_type	= NO_ARRAY,
+		.tlv_type	= 0x02,
+		.offset		= offsetof(
+			struct ipa_fltr_installed_notif_req_msg_v01,
+			install_status),
+	},
+	{
+		.data_type	= QMI_DATA_LEN,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint8_t),
+		.array_type	= NO_ARRAY,
+		.tlv_type	= 0x03,
+		.offset		= offsetof(
+			struct ipa_fltr_installed_notif_req_msg_v01,
+			filter_index_list_len),
+	},
+	{
+		.data_type	= QMI_STRUCT,
+		.elem_len	= QMI_IPA_MAX_FILTERS_V01,
+		.elem_size	= sizeof(
+			struct ipa_filter_handle_to_index_map_v01),
+		.array_type	= VAR_LEN_ARRAY,
+		.tlv_type	= 0x03,
+		.offset		= offsetof(
+			struct ipa_fltr_installed_notif_req_msg_v01,
+			filter_index_list),
+		.ei_array	= ipa3_filter_handle_to_index_map_data_v01_ei,
+	},
+	{
+		.data_type	= QMI_OPT_FLAG,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint8_t),
+		.array_type	= NO_ARRAY,
+		.tlv_type	= 0x10,
+		.offset		= offsetof(
+			struct ipa_fltr_installed_notif_req_msg_v01,
+			embedded_pipe_index_valid),
+	},
+	{
+		.data_type	= QMI_UNSIGNED_4_BYTE,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint32_t),
+		.array_type	= NO_ARRAY,
+		.tlv_type	= 0x10,
+		.offset		= offsetof(
+			struct ipa_fltr_installed_notif_req_msg_v01,
+			embedded_pipe_index),
+	},
+	{
+		.data_type	= QMI_OPT_FLAG,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint8_t),
+		.array_type	= NO_ARRAY,
+		.tlv_type	= 0x11,
+		.offset		= offsetof(
+			struct ipa_fltr_installed_notif_req_msg_v01,
+			retain_header_valid),
+	},
+	{
+		.data_type	= QMI_UNSIGNED_1_BYTE,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint8_t),
+		.array_type	= NO_ARRAY,
+		.tlv_type	= 0x11,
+		.offset		= offsetof(
+			struct ipa_fltr_installed_notif_req_msg_v01,
+			retain_header),
+	},
+	{
+		.data_type	= QMI_OPT_FLAG,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint8_t),
+		.array_type	= NO_ARRAY,
+		.tlv_type	= 0x12,
+		.offset		= offsetof(
+			struct ipa_fltr_installed_notif_req_msg_v01,
+			embedded_call_mux_id_valid),
+	},
+	{
+		.data_type	= QMI_UNSIGNED_4_BYTE,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint32_t),
+		.array_type	= NO_ARRAY,
+		.tlv_type	= 0x12,
+		.offset		= offsetof(
+			struct ipa_fltr_installed_notif_req_msg_v01,
+			embedded_call_mux_id),
+	},
+	{
+		.data_type	= QMI_OPT_FLAG,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint8_t),
+		.array_type	= NO_ARRAY,
+		.tlv_type	= 0x13,
+		.offset		= offsetof(
+			struct ipa_fltr_installed_notif_req_msg_v01,
+			num_ipv4_filters_valid),
+	},
+	{
+		.data_type	= QMI_UNSIGNED_4_BYTE,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint32_t),
+		.array_type	= NO_ARRAY,
+		.tlv_type	= 0x13,
+		.offset		= offsetof(
+			struct ipa_fltr_installed_notif_req_msg_v01,
+			num_ipv4_filters),
+	},
+	{
+		.data_type	= QMI_OPT_FLAG,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint8_t),
+		.array_type	= NO_ARRAY,
+		.tlv_type	= 0x14,
+		.offset		= offsetof(
+			struct ipa_fltr_installed_notif_req_msg_v01,
+			num_ipv6_filters_valid),
+	},
+	{
+		.data_type	= QMI_UNSIGNED_4_BYTE,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint32_t),
+		.array_type	= NO_ARRAY,
+		.tlv_type	= 0x14,
+		.offset		= offsetof(
+			struct ipa_fltr_installed_notif_req_msg_v01,
+			num_ipv6_filters),
+	},
+	{
+		.data_type	= QMI_OPT_FLAG,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint8_t),
+		.array_type	= NO_ARRAY,
+		.tlv_type	= 0x15,
+		.offset		= offsetof(
+			struct ipa_fltr_installed_notif_req_msg_v01,
+			start_ipv4_filter_idx_valid),
+	},
+	{
+		.data_type	= QMI_UNSIGNED_4_BYTE,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint32_t),
+		.array_type	= NO_ARRAY,
+		.tlv_type	= 0x15,
+		.offset		= offsetof(
+			struct ipa_fltr_installed_notif_req_msg_v01,
+			start_ipv4_filter_idx),
+	},
+	{
+		.data_type	= QMI_OPT_FLAG,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint8_t),
+		.array_type	= NO_ARRAY,
+		.tlv_type	= 0x16,
+		.offset		= offsetof(
+			struct ipa_fltr_installed_notif_req_msg_v01,
+			start_ipv6_filter_idx_valid),
+	},
+	{
+		.data_type	= QMI_UNSIGNED_4_BYTE,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint32_t),
+		.array_type	= NO_ARRAY,
+		.tlv_type	= 0x16,
+		.offset		= offsetof(
+			struct ipa_fltr_installed_notif_req_msg_v01,
+			start_ipv6_filter_idx),
+	},
+	{
+		.data_type	= QMI_OPT_FLAG,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint8_t),
+		.array_type	= NO_ARRAY,
+		.tlv_type	= 0x17,
+		.offset		= offsetof(
+			struct ipa_fltr_installed_notif_req_msg_v01,
+			rule_id_valid),
+	},
+	{
+		.data_type	= QMI_DATA_LEN,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint8_t),
+		.array_type	= NO_ARRAY,
+		.tlv_type	= 0x17,
+		.offset		= offsetof(
+			struct ipa_fltr_installed_notif_req_msg_v01,
+			rule_id_len),
+	},
+	{
+		.data_type	= QMI_UNSIGNED_4_BYTE,
+		.elem_len	= QMI_IPA_MAX_FILTERS_V01,
+		.elem_size	= sizeof(uint32_t),
+		.array_type	= VAR_LEN_ARRAY,
+		.tlv_type	= 0x17,
+		.offset		= offsetof(
+			struct ipa_fltr_installed_notif_req_msg_v01,
+			rule_id),
+	},
+	{
+		.data_type	= QMI_OPT_FLAG,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint8_t),
+		.array_type	= NO_ARRAY,
+		.tlv_type	= 0x18,
+		.offset		= offsetof(
+			struct ipa_fltr_installed_notif_req_msg_v01,
+			dst_pipe_id_valid),
+	},
+	{
+		.data_type	= QMI_DATA_LEN,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint8_t),
+		.array_type	= NO_ARRAY,
+		.tlv_type	= 0x18,
+		.offset		= offsetof(
+			struct ipa_fltr_installed_notif_req_msg_v01,
+			dst_pipe_id_len),
+	},
+	{
+		.data_type	= QMI_UNSIGNED_4_BYTE,
+		.elem_len	= QMI_IPA_MAX_CLIENT_DST_PIPES_V01,
+		.elem_size	= sizeof(uint32_t),
+		.array_type	= VAR_LEN_ARRAY,
+		.tlv_type	= 0x18,
+		.offset		= offsetof(
+			struct ipa_fltr_installed_notif_req_msg_v01,
+			dst_pipe_id),
+	},
+	{
+		.data_type	= QMI_OPT_FLAG,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint8_t),
+		.array_type	= NO_ARRAY,
+		.tlv_type	= 0x19,
+		.offset		= offsetof(
+			struct ipa_fltr_installed_notif_req_msg_v01,
+			rule_id_ex_valid),
+	},
+	{
+		.data_type	= QMI_DATA_LEN,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint32_t),
+		.array_type	= NO_ARRAY,
+		.tlv_type	= 0x19,
+		.offset		= offsetof(
+			struct ipa_fltr_installed_notif_req_msg_v01,
+			rule_id_ex_len),
+	},
+	{
+		.data_type	= QMI_UNSIGNED_4_BYTE,
+		.elem_len	= QMI_IPA_MAX_FILTERS_EX2_V01,
+		.elem_size	= sizeof(uint32_t),
+		.array_type	= VAR_LEN_ARRAY,
+		.tlv_type	= 0x19,
+		.offset		= offsetof(
+			struct ipa_fltr_installed_notif_req_msg_v01,
+			rule_id_ex),
+	},
+	{
+		.data_type	= QMI_EOTI,
+		.array_type	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+	},
+};
+
+struct qmi_elem_info ipa3_fltr_installed_notif_resp_msg_data_v01_ei[] = {
+	{
+		.data_type	= QMI_STRUCT,
+		.elem_len	= 1,
+		.elem_size	= sizeof(struct qmi_response_type_v01),
+		.array_type	= NO_ARRAY,
+		.tlv_type	= 0x02,
+		.offset		= offsetof(
+			struct ipa_fltr_installed_notif_resp_msg_v01,
+			resp),
+		.ei_array	= qmi_response_type_v01_ei,
+	},
+	{
+		.data_type	= QMI_EOTI,
+		.array_type	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+	},
+};
+
+struct qmi_elem_info ipa3_enable_force_clear_datapath_req_msg_data_v01_ei[] = {
+	{
+		.data_type	= QMI_UNSIGNED_4_BYTE,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint32_t),
+		.array_type	= NO_ARRAY,
+		.tlv_type	= 0x01,
+		.offset		= offsetof(
+			struct ipa_enable_force_clear_datapath_req_msg_v01,
+			source_pipe_bitmask),
+	},
+	{
+		.data_type	= QMI_UNSIGNED_4_BYTE,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint32_t),
+		.array_type	= NO_ARRAY,
+		.tlv_type	= 0x02,
+		.offset		= offsetof(
+			struct ipa_enable_force_clear_datapath_req_msg_v01,
+			request_id),
+	},
+	{
+		.data_type	= QMI_OPT_FLAG,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint8_t),
+		.array_type	= NO_ARRAY,
+		.tlv_type	= 0x10,
+		.offset		= offsetof(
+			struct ipa_enable_force_clear_datapath_req_msg_v01,
+			throttle_source_valid),
+	},
+	{
+		.data_type	= QMI_UNSIGNED_1_BYTE,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint8_t),
+		.array_type	= NO_ARRAY,
+		.tlv_type	= 0x10,
+		.offset		= offsetof(
+			struct ipa_enable_force_clear_datapath_req_msg_v01,
+			throttle_source),
+	},
+	{
+		.data_type	= QMI_EOTI,
+		.array_type	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+	},
+};
+
+struct qmi_elem_info ipa3_enable_force_clear_datapath_resp_msg_data_v01_ei[] = {
+	{
+		.data_type	= QMI_STRUCT,
+		.elem_len	= 1,
+		.elem_size	= sizeof(struct qmi_response_type_v01),
+		.array_type	= NO_ARRAY,
+		.tlv_type	= 0x02,
+		.offset		= offsetof(
+			struct ipa_enable_force_clear_datapath_resp_msg_v01,
+			resp),
+		.ei_array	= qmi_response_type_v01_ei,
+	},
+	{
+		.data_type	= QMI_EOTI,
+		.array_type	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+	},
+};
+
+struct qmi_elem_info ipa3_disable_force_clear_datapath_req_msg_data_v01_ei[] = {
+	{
+		.data_type	= QMI_UNSIGNED_4_BYTE,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint32_t),
+		.array_type	= NO_ARRAY,
+		.tlv_type	= 0x01,
+		.offset		= offsetof(
+			struct ipa_disable_force_clear_datapath_req_msg_v01,
+			request_id),
+	},
+	{
+		.data_type	= QMI_EOTI,
+		.array_type	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+	},
+};
+
+struct qmi_elem_info
+	ipa3_disable_force_clear_datapath_resp_msg_data_v01_ei[] = {
+	{
+		.data_type	= QMI_STRUCT,
+		.elem_len	= 1,
+		.elem_size	= sizeof(struct qmi_response_type_v01),
+		.array_type	= NO_ARRAY,
+		.tlv_type	= 0x02,
+		.offset		= offsetof(
+			struct ipa_disable_force_clear_datapath_resp_msg_v01,
+			resp),
+		.ei_array	= qmi_response_type_v01_ei,
+	},
+	{
+		.data_type	= QMI_EOTI,
+		.array_type	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+	},
+};
+
+struct qmi_elem_info ipa3_config_req_msg_data_v01_ei[] = {
+	{
+		.data_type	= QMI_OPT_FLAG,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint8_t),
+		.array_type	= NO_ARRAY,
+		.tlv_type	= 0x10,
+		.offset		= offsetof(
+			struct ipa_config_req_msg_v01,
+			peripheral_type_valid),
+	},
+	{
+		.data_type	= QMI_SIGNED_4_BYTE_ENUM,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint32_t),
+		.array_type	= NO_ARRAY,
+		.tlv_type	= 0x10,
+		.offset		= offsetof(
+			struct ipa_config_req_msg_v01,
+			peripheral_type),
+	},
+	{
+		.data_type	= QMI_OPT_FLAG,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint8_t),
+		.array_type	= NO_ARRAY,
+		.tlv_type	= 0x11,
+		.offset		= offsetof(
+			struct ipa_config_req_msg_v01,
+			hw_deaggr_supported_valid),
+	},
+	{
+		.data_type	= QMI_UNSIGNED_1_BYTE,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint8_t),
+		.array_type	= NO_ARRAY,
+		.tlv_type	= 0x11,
+		.offset		= offsetof(
+			struct ipa_config_req_msg_v01,
+			hw_deaggr_supported),
+	},
+	{
+		.data_type	= QMI_OPT_FLAG,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint8_t),
+		.array_type	= NO_ARRAY,
+		.tlv_type	= 0x12,
+		.offset		= offsetof(
+			struct ipa_config_req_msg_v01,
+			max_aggr_frame_size_valid),
+	},
+	{
+		.data_type	= QMI_UNSIGNED_4_BYTE,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint32_t),
+		.array_type	= NO_ARRAY,
+		.tlv_type	= 0x12,
+		.offset		= offsetof(
+			struct ipa_config_req_msg_v01,
+					max_aggr_frame_size),
+	},
+	{
+		.data_type	= QMI_OPT_FLAG,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint8_t),
+		.array_type	= NO_ARRAY,
+		.tlv_type	= 0x13,
+		.offset		= offsetof(
+			struct ipa_config_req_msg_v01,
+			ipa_ingress_pipe_mode_valid),
+	},
+	{
+		.data_type	= QMI_SIGNED_4_BYTE_ENUM,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint32_t),
+		.array_type	= NO_ARRAY,
+		.tlv_type	= 0x13,
+		.offset		= offsetof(
+			struct ipa_config_req_msg_v01,
+			ipa_ingress_pipe_mode),
+	},
+	{
+		.data_type	= QMI_OPT_FLAG,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint8_t),
+		.array_type	= NO_ARRAY,
+		.tlv_type	= 0x14,
+		.offset		= offsetof(
+			struct ipa_config_req_msg_v01,
+			peripheral_speed_info_valid),
+	},
+	{
+		.data_type	= QMI_SIGNED_4_BYTE_ENUM,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint32_t),
+		.array_type	= NO_ARRAY,
+		.tlv_type	= 0x14,
+		.offset		= offsetof(
+			struct ipa_config_req_msg_v01,
+			peripheral_speed_info),
+	},
+	{
+		.data_type	= QMI_OPT_FLAG,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint8_t),
+		.array_type	= NO_ARRAY,
+		.tlv_type	= 0x15,
+		.offset		= offsetof(
+			struct ipa_config_req_msg_v01,
+			dl_accumulation_time_limit_valid),
+	},
+	{
+		.data_type	= QMI_SIGNED_4_BYTE_ENUM,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint32_t),
+		.array_type	= NO_ARRAY,
+		.tlv_type	= 0x15,
+		.offset		= offsetof(
+			struct ipa_config_req_msg_v01,
+			dl_accumulation_time_limit),
+	},
+	{
+		.data_type	= QMI_OPT_FLAG,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint8_t),
+		.array_type	= NO_ARRAY,
+		.tlv_type	= 0x16,
+		.offset		= offsetof(
+			struct ipa_config_req_msg_v01,
+			dl_accumulation_pkt_limit_valid),
+	},
+	{
+		.data_type	= QMI_SIGNED_4_BYTE_ENUM,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint32_t),
+		.array_type	= NO_ARRAY,
+		.tlv_type	= 0x16,
+		.offset		= offsetof(
+			struct ipa_config_req_msg_v01,
+			dl_accumulation_pkt_limit),
+	},
+	{
+		.data_type	= QMI_OPT_FLAG,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint8_t),
+		.array_type	= NO_ARRAY,
+		.tlv_type	= 0x17,
+		.offset		= offsetof(
+			struct ipa_config_req_msg_v01,
+			dl_accumulation_byte_limit_valid),
+	},
+	{
+		.data_type	= QMI_SIGNED_4_BYTE_ENUM,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint32_t),
+		.array_type	= NO_ARRAY,
+		.tlv_type	= 0x17,
+		.offset		= offsetof(
+			struct ipa_config_req_msg_v01,
+			dl_accumulation_byte_limit),
+	},
+	{
+		.data_type	= QMI_OPT_FLAG,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint8_t),
+		.array_type	= NO_ARRAY,
+		.tlv_type	= 0x18,
+		.offset		= offsetof(
+			struct ipa_config_req_msg_v01,
+			ul_accumulation_time_limit_valid),
+	},
+	{
+		.data_type	= QMI_SIGNED_4_BYTE_ENUM,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint32_t),
+		.array_type	= NO_ARRAY,
+		.tlv_type	= 0x18,
+		.offset		= offsetof(
+			struct ipa_config_req_msg_v01,
+			ul_accumulation_time_limit),
+	},
+	{
+		.data_type	= QMI_OPT_FLAG,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint8_t),
+		.array_type	= NO_ARRAY,
+		.tlv_type	= 0x19,
+		.offset		= offsetof(
+			struct ipa_config_req_msg_v01,
+			hw_control_flags_valid),
+	},
+	{
+		.data_type	= QMI_SIGNED_4_BYTE_ENUM,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint32_t),
+		.array_type	= NO_ARRAY,
+		.tlv_type	= 0x19,
+		.offset		= offsetof(
+			struct ipa_config_req_msg_v01,
+			hw_control_flags),
+	},
+	{
+		.data_type	= QMI_OPT_FLAG,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint8_t),
+		.array_type	= NO_ARRAY,
+		.tlv_type	= 0x1A,
+		.offset		= offsetof(
+			struct ipa_config_req_msg_v01,
+			ul_msi_event_threshold_valid),
+	},
+	{
+		.data_type	= QMI_SIGNED_4_BYTE_ENUM,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint32_t),
+		.array_type	= NO_ARRAY,
+		.tlv_type	= 0x1A,
+		.offset		= offsetof(
+			struct ipa_config_req_msg_v01,
+			ul_msi_event_threshold),
+	},
+	{
+		.data_type	= QMI_OPT_FLAG,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint8_t),
+		.array_type	= NO_ARRAY,
+		.tlv_type	= 0x1B,
+		.offset		= offsetof(
+			struct ipa_config_req_msg_v01,
+			dl_msi_event_threshold_valid),
+	},
+	{
+		.data_type	= QMI_SIGNED_4_BYTE_ENUM,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint32_t),
+		.array_type	= NO_ARRAY,
+		.tlv_type	= 0x1B,
+		.offset		= offsetof(
+			struct ipa_config_req_msg_v01,
+			dl_msi_event_threshold),
+	},
+	{
+		.data_type	= QMI_OPT_FLAG,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint8_t),
+		.array_type	= NO_ARRAY,
+		.tlv_type	= 0x1C,
+		.offset		= offsetof(
+			struct ipa_config_req_msg_v01,
+			ul_fifo_size_valid),
+	},
+	{
+		.data_type	= QMI_UNSIGNED_4_BYTE,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint32_t),
+		.array_type	= NO_ARRAY,
+		.tlv_type	= 0x1C,
+		.offset		= offsetof(
+			struct ipa_config_req_msg_v01,
+			ul_fifo_size),
+	},
+	{
+		.data_type	= QMI_OPT_FLAG,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint8_t),
+		.array_type	= NO_ARRAY,
+		.tlv_type	= 0x1D,
+		.offset		= offsetof(
+			struct ipa_config_req_msg_v01,
+			dl_fifo_size_valid),
+	},
+	{
+		.data_type	= QMI_UNSIGNED_4_BYTE,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint32_t),
+		.array_type	= NO_ARRAY,
+		.tlv_type	= 0x1D,
+		.offset		= offsetof(
+			struct ipa_config_req_msg_v01,
+			dl_fifo_size),
+	},
+	{
+		.data_type	= QMI_OPT_FLAG,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint8_t),
+		.array_type	= NO_ARRAY,
+		.tlv_type	= 0x1E,
+		.offset		= offsetof(
+			struct ipa_config_req_msg_v01,
+			dl_buf_size_valid),
+	},
+	{
+		.data_type	= QMI_UNSIGNED_4_BYTE,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint32_t),
+		.array_type	= NO_ARRAY,
+		.tlv_type	= 0x1E,
+		.offset		= offsetof(
+			struct ipa_config_req_msg_v01,
+			dl_buf_size),
+	},
+	{
+		.data_type	= QMI_EOTI,
+		.array_type	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+	},
+};
+
+struct qmi_elem_info ipa3_config_resp_msg_data_v01_ei[] = {
+	{
+		.data_type	= QMI_STRUCT,
+		.elem_len	= 1,
+		.elem_size	= sizeof(struct qmi_response_type_v01),
+		.array_type	= NO_ARRAY,
+		.tlv_type	= 0x02,
+		.offset		= offsetof(
+			struct ipa_config_resp_msg_v01,
+			resp),
+		.ei_array	= qmi_response_type_v01_ei,
+	},
+	{
+		.data_type	= QMI_EOTI,
+		.array_type	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+	},
+};
+
+struct qmi_elem_info ipa3_get_data_stats_req_msg_data_v01_ei[] = {
+	{
+		.data_type	= QMI_SIGNED_4_BYTE_ENUM,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint32_t),
+		.array_type	= NO_ARRAY,
+		.tlv_type	= 0x01,
+		.offset		= offsetof(
+			struct ipa_get_data_stats_req_msg_v01,
+			ipa_stats_type),
+	},
+	{
+		.data_type	= QMI_OPT_FLAG,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint8_t),
+		.array_type	= NO_ARRAY,
+		.tlv_type	= 0x10,
+		.offset		= offsetof(
+			struct ipa_get_data_stats_req_msg_v01,
+			reset_stats_valid),
+	},
+	{
+		.data_type	= QMI_UNSIGNED_1_BYTE,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint8_t),
+		.array_type	= NO_ARRAY,
+		.tlv_type	= 0x10,
+		.offset		= offsetof(
+			struct ipa_get_data_stats_req_msg_v01,
+			reset_stats),
+	},
+	{
+		.data_type	= QMI_EOTI,
+		.array_type	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+	},
+};
+
+static struct qmi_elem_info ipa3_pipe_stats_info_type_data_v01_ei[] = {
+	{
+		.data_type	= QMI_UNSIGNED_4_BYTE,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint32_t),
+		.array_type	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+		.offset		= offsetof(struct ipa_pipe_stats_info_type_v01,
+					pipe_index),
+	},
+	{
+		.data_type	= QMI_UNSIGNED_8_BYTE,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint64_t),
+		.array_type	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+		.offset		= offsetof(struct ipa_pipe_stats_info_type_v01,
+					num_ipv4_packets),
+	},
+	{
+		.data_type	= QMI_UNSIGNED_8_BYTE,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint64_t),
+		.array_type	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+		.offset		= offsetof(struct ipa_pipe_stats_info_type_v01,
+					num_ipv4_bytes),
+	},
+	{
+		.data_type	= QMI_UNSIGNED_8_BYTE,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint64_t),
+		.array_type	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+		.offset		= offsetof(struct ipa_pipe_stats_info_type_v01,
+					num_ipv6_packets),
+	},
+	{
+		.data_type	= QMI_UNSIGNED_8_BYTE,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint64_t),
+		.array_type	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+		.offset		= offsetof(struct ipa_pipe_stats_info_type_v01,
+					num_ipv6_bytes),
+	},
+	{
+		.data_type	= QMI_EOTI,
+		.array_type	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+	},
+};
+
+static struct qmi_elem_info ipa3_stats_type_filter_rule_data_v01_ei[] = {
+	{
+		.data_type	= QMI_UNSIGNED_4_BYTE,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint32_t),
+		.array_type	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+		.offset		= offsetof(struct
+					ipa_stats_type_filter_rule_v01,
+					filter_rule_index),
+	},
+	{
+		.data_type	= QMI_UNSIGNED_8_BYTE,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint64_t),
+		.array_type	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+		.offset		= offsetof(struct
+					ipa_stats_type_filter_rule_v01,
+					num_packets),
+	},
+	{
+		.data_type	= QMI_EOTI,
+		.array_type	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+	},
+};
+
+struct qmi_elem_info ipa3_get_data_stats_resp_msg_data_v01_ei[] = {
+	{
+		.data_type	= QMI_STRUCT,
+		.elem_len	= 1,
+		.elem_size	= sizeof(struct qmi_response_type_v01),
+		.array_type	= NO_ARRAY,
+		.tlv_type	= 0x02,
+		.offset		= offsetof(
+			struct ipa_get_data_stats_resp_msg_v01,
+			resp),
+		.ei_array	= qmi_response_type_v01_ei,
+	},
+	{
+		.data_type	= QMI_OPT_FLAG,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint8_t),
+		.array_type	= NO_ARRAY,
+		.tlv_type	= 0x10,
+		.offset		= offsetof(
+			struct ipa_get_data_stats_resp_msg_v01,
+			ipa_stats_type_valid),
+	},
+	{
+		.data_type	= QMI_SIGNED_4_BYTE_ENUM,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint32_t),
+		.array_type	= NO_ARRAY,
+		.tlv_type	= 0x10,
+		.offset		= offsetof(
+			struct ipa_get_data_stats_resp_msg_v01,
+			ipa_stats_type),
+	},
+	{
+		.data_type	= QMI_OPT_FLAG,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint8_t),
+		.array_type	= NO_ARRAY,
+		.tlv_type	= 0x11,
+		.offset		= offsetof(
+			struct ipa_get_data_stats_resp_msg_v01,
+			ul_src_pipe_stats_list_valid),
+	},
+	{
+		.data_type	= QMI_DATA_LEN,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint8_t),
+		.array_type	= NO_ARRAY,
+		.tlv_type	= 0x11,
+		.offset		= offsetof(
+			struct ipa_get_data_stats_resp_msg_v01,
+			ul_src_pipe_stats_list_len),
+	},
+	{
+		.data_type	= QMI_STRUCT,
+		.elem_len	= QMI_IPA_MAX_PIPES_V01,
+		.elem_size	= sizeof(struct ipa_pipe_stats_info_type_v01),
+		.array_type	= VAR_LEN_ARRAY,
+		.tlv_type	= 0x11,
+		.offset		= offsetof(
+			struct ipa_get_data_stats_resp_msg_v01,
+			ul_src_pipe_stats_list),
+		.ei_array	= ipa3_pipe_stats_info_type_data_v01_ei,
+	},
+	{
+		.data_type	= QMI_OPT_FLAG,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint8_t),
+		.array_type	= NO_ARRAY,
+		.tlv_type	= 0x12,
+		.offset		= offsetof(
+			struct ipa_get_data_stats_resp_msg_v01,
+			dl_dst_pipe_stats_list_valid),
+	},
+	{
+		.data_type	= QMI_DATA_LEN,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint8_t),
+		.array_type	= NO_ARRAY,
+		.tlv_type	= 0x12,
+		.offset		= offsetof(
+			struct ipa_get_data_stats_resp_msg_v01,
+			dl_dst_pipe_stats_list_len),
+	},
+	{
+		.data_type	= QMI_STRUCT,
+		.elem_len	= QMI_IPA_MAX_PIPES_V01,
+		.elem_size	= sizeof(struct ipa_pipe_stats_info_type_v01),
+		.array_type	= VAR_LEN_ARRAY,
+		.tlv_type	= 0x12,
+		.offset		= offsetof(
+			struct ipa_get_data_stats_resp_msg_v01,
+			dl_dst_pipe_stats_list),
+		.ei_array	= ipa3_pipe_stats_info_type_data_v01_ei,
+	},
+	{
+		.data_type	= QMI_OPT_FLAG,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint8_t),
+		.array_type	= NO_ARRAY,
+		.tlv_type	= 0x13,
+		.offset		= offsetof(
+			struct ipa_get_data_stats_resp_msg_v01,
+			dl_filter_rule_stats_list_valid),
+	},
+	{
+		.data_type	= QMI_DATA_LEN,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint8_t),
+		.array_type	= NO_ARRAY,
+		.tlv_type	= 0x13,
+		.offset		= offsetof(
+			struct ipa_get_data_stats_resp_msg_v01,
+			dl_filter_rule_stats_list_len),
+	},
+	{
+		.data_type	= QMI_STRUCT,
+		.elem_len	= QMI_IPA_MAX_FILTERS_V01,
+		.elem_size	= sizeof(struct ipa_pipe_stats_info_type_v01),
+		.array_type	= VAR_LEN_ARRAY,
+		.tlv_type	= 0x13,
+		.offset		= offsetof(
+			struct ipa_get_data_stats_resp_msg_v01,
+			dl_filter_rule_stats_list),
+		.ei_array	= ipa3_stats_type_filter_rule_data_v01_ei,
+	},
+	{
+		.data_type	= QMI_EOTI,
+		.array_type	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+	},
+};
+
+static struct qmi_elem_info ipa3_apn_data_stats_info_type_data_v01_ei[] = {
+	{
+		.data_type	= QMI_UNSIGNED_4_BYTE,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint32_t),
+		.array_type	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+		.offset		= offsetof(struct
+					ipa_apn_data_stats_info_type_v01,
+					mux_id),
+	},
+	{
+		.data_type	= QMI_UNSIGNED_8_BYTE,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint64_t),
+		.array_type	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+		.offset		= offsetof(struct
+					ipa_apn_data_stats_info_type_v01,
+					num_ul_packets),
+	},
+	{
+		.data_type	= QMI_UNSIGNED_8_BYTE,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint64_t),
+		.array_type	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+		.offset		= offsetof(struct
+					ipa_apn_data_stats_info_type_v01,
+					num_ul_bytes),
+	},
+	{
+		.data_type	= QMI_UNSIGNED_8_BYTE,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint64_t),
+		.array_type	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+		.offset		= offsetof(struct
+					ipa_apn_data_stats_info_type_v01,
+					num_dl_packets),
+	},
+	{
+		.data_type	= QMI_UNSIGNED_8_BYTE,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint64_t),
+		.array_type	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+		.offset		= offsetof(struct
+					ipa_apn_data_stats_info_type_v01,
+					num_dl_bytes),
+	},
+	{
+		.data_type	= QMI_EOTI,
+		.array_type	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+	},
+};
+
+struct qmi_elem_info ipa3_get_apn_data_stats_req_msg_data_v01_ei[] = {
+	{
+		.data_type	= QMI_OPT_FLAG,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint8_t),
+		.array_type	= NO_ARRAY,
+		.tlv_type	= 0x10,
+		.offset		= offsetof(
+			struct ipa_get_apn_data_stats_req_msg_v01,
+			mux_id_list_valid),
+	},
+	{
+		.data_type	= QMI_DATA_LEN,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint8_t),
+		.array_type	= NO_ARRAY,
+		.tlv_type	= 0x10,
+		.offset		= offsetof(
+			struct ipa_get_apn_data_stats_req_msg_v01,
+			mux_id_list_len),
+	},
+	{
+		.data_type	= QMI_UNSIGNED_4_BYTE,
+		.elem_len	= QMI_IPA_MAX_APN_V01,
+		.elem_size	= sizeof(uint32_t),
+		.array_type	= VAR_LEN_ARRAY,
+		.tlv_type	= 0x10,
+		.offset		= offsetof(
+			struct ipa_get_apn_data_stats_req_msg_v01,
+			mux_id_list),
+	},
+	{
+		.data_type	= QMI_EOTI,
+		.array_type	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+	},
+};
+
+struct qmi_elem_info ipa3_get_apn_data_stats_resp_msg_data_v01_ei[] = {
+	{
+		.data_type	= QMI_STRUCT,
+		.elem_len	= 1,
+		.elem_size	= sizeof(struct qmi_response_type_v01),
+		.array_type	= NO_ARRAY,
+		.tlv_type	= 0x02,
+		.offset		= offsetof(
+			struct ipa_get_apn_data_stats_resp_msg_v01,
+			resp),
+		.ei_array	= qmi_response_type_v01_ei,
+	},
+	{
+		.data_type	= QMI_OPT_FLAG,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint8_t),
+		.array_type	= NO_ARRAY,
+		.tlv_type	= 0x10,
+		.offset		= offsetof(
+			struct ipa_get_apn_data_stats_resp_msg_v01,
+			apn_data_stats_list_valid),
+	},
+	{
+		.data_type	= QMI_DATA_LEN,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint8_t),
+		.array_type	= NO_ARRAY,
+		.tlv_type	= 0x10,
+		.offset		= offsetof(
+			struct ipa_get_apn_data_stats_resp_msg_v01,
+			apn_data_stats_list_len),
+	},
+	{
+		.data_type	= QMI_STRUCT,
+		.elem_len	= QMI_IPA_MAX_APN_V01,
+		.elem_size	= sizeof(struct
+					ipa_apn_data_stats_info_type_v01),
+		.array_type	= VAR_LEN_ARRAY,
+		.tlv_type	= 0x10,
+		.offset		= offsetof(
+			struct ipa_get_apn_data_stats_resp_msg_v01,
+			apn_data_stats_list),
+		.ei_array	= ipa3_apn_data_stats_info_type_data_v01_ei,
+	},
+	{
+		.data_type	= QMI_EOTI,
+		.array_type	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+	},
+};
+
+static struct qmi_elem_info ipa3_data_usage_quota_info_type_data_v01_ei[] = {
+	{
+		.data_type	= QMI_UNSIGNED_4_BYTE,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint32_t),
+		.array_type	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+		.offset		= offsetof(struct
+					ipa_data_usage_quota_info_type_v01,
+					mux_id),
+	},
+	{
+		.data_type	= QMI_UNSIGNED_8_BYTE,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint64_t),
+		.array_type	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+		.offset		= offsetof(struct
+					ipa_data_usage_quota_info_type_v01,
+					num_Mbytes),
+	},
+	{
+		.data_type	= QMI_EOTI,
+		.array_type	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+	},
+};
+
+struct qmi_elem_info ipa3_set_data_usage_quota_req_msg_data_v01_ei[] = {
+	{
+		.data_type	= QMI_OPT_FLAG,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint8_t),
+		.array_type	= NO_ARRAY,
+		.tlv_type	= 0x10,
+		.offset		= offsetof(
+			struct ipa_set_data_usage_quota_req_msg_v01,
+			apn_quota_list_valid),
+	},
+	{
+		.data_type	= QMI_DATA_LEN,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint8_t),
+		.array_type	= NO_ARRAY,
+		.tlv_type	= 0x10,
+		.offset		= offsetof(
+			struct ipa_set_data_usage_quota_req_msg_v01,
+			apn_quota_list_len),
+	},
+	{
+		.data_type	= QMI_STRUCT,
+		.elem_len	= QMI_IPA_MAX_APN_V01,
+		.elem_size	= sizeof(struct
+					ipa_data_usage_quota_info_type_v01),
+		.array_type	= VAR_LEN_ARRAY,
+		.tlv_type	= 0x10,
+		.offset		= offsetof(
+			struct ipa_set_data_usage_quota_req_msg_v01,
+			apn_quota_list),
+		.ei_array	= ipa3_data_usage_quota_info_type_data_v01_ei,
+	},
+	{
+		.data_type	= QMI_EOTI,
+		.array_type	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+	},
+};
+
+struct qmi_elem_info ipa3_set_data_usage_quota_resp_msg_data_v01_ei[] = {
+	{
+		.data_type	= QMI_STRUCT,
+		.elem_len	= 1,
+		.elem_size	= sizeof(struct qmi_response_type_v01),
+		.array_type	= NO_ARRAY,
+		.tlv_type	= 0x02,
+		.offset		= offsetof(
+			struct ipa_set_data_usage_quota_resp_msg_v01,
+			resp),
+		.ei_array	= qmi_response_type_v01_ei,
+	},
+	{
+		.data_type	= QMI_EOTI,
+		.array_type	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+	},
+};
+
+struct qmi_elem_info ipa3_data_usage_quota_reached_ind_msg_data_v01_ei[] = {
+	{
+		.data_type	= QMI_STRUCT,
+		.elem_len	= 1,
+		.elem_size	= sizeof(struct
+					ipa_data_usage_quota_info_type_v01),
+		.array_type	= NO_ARRAY,
+		.tlv_type	= 0x01,
+		.offset		= offsetof(
+			struct ipa_data_usage_quota_reached_ind_msg_v01,
+			apn),
+		.ei_array	= ipa3_data_usage_quota_info_type_data_v01_ei,
+	},
+	{
+		.data_type	= QMI_EOTI,
+		.array_type	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+	},
+};
+
+struct qmi_elem_info ipa3_stop_data_usage_quota_req_msg_data_v01_ei[] = {
+	/* ipa_stop_data_usage_quota_req_msg is empty */
+	{
+		.data_type	= QMI_EOTI,
+		.array_type	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+	},
+};
+
+struct qmi_elem_info ipa3_stop_data_usage_quota_resp_msg_data_v01_ei[] = {
+	{
+		.data_type	= QMI_STRUCT,
+		.elem_len	= 1,
+		.elem_size	= sizeof(struct qmi_response_type_v01),
+		.array_type	= NO_ARRAY,
+		.tlv_type	= 0x02,
+		.offset		= offsetof(
+			struct ipa_stop_data_usage_quota_resp_msg_v01,
+			resp),
+		.ei_array	= qmi_response_type_v01_ei,
+	},
+	{
+		.data_type	= QMI_EOTI,
+		.array_type	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+	},
+};
+
+struct qmi_elem_info ipa3_install_fltr_rule_req_ex_msg_data_v01_ei[] = {
+	{
+		.data_type	= QMI_OPT_FLAG,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint8_t),
+		.array_type	= NO_ARRAY,
+		.tlv_type	= 0x10,
+		.offset		= offsetof(
+			struct ipa_install_fltr_rule_req_ex_msg_v01,
+			filter_spec_ex_list_valid),
+	},
+	{
+		.data_type	= QMI_DATA_LEN,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint8_t),
+		.array_type	= NO_ARRAY,
+		.tlv_type	= 0x10,
+		.offset		= offsetof(
+			struct ipa_install_fltr_rule_req_ex_msg_v01,
+			filter_spec_ex_list_len),
+	},
+	{
+		.data_type	= QMI_STRUCT,
+		.elem_len	= QMI_IPA_MAX_FILTERS_EX_V01,
+		.elem_size	= sizeof(struct
+					ipa_filter_spec_ex_type_v01),
+		.array_type	= VAR_LEN_ARRAY,
+		.tlv_type	= 0x10,
+		.offset		= offsetof(
+			struct ipa_install_fltr_rule_req_ex_msg_v01,
+			filter_spec_ex_list),
+		.ei_array	= ipa_filter_spec_ex_type_data_v01_ei,
+	},
+	{
+		.data_type	= QMI_OPT_FLAG,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint8_t),
+		.array_type	= NO_ARRAY,
+		.tlv_type	= 0x11,
+		.offset		= offsetof(
+			struct ipa_install_fltr_rule_req_ex_msg_v01,
+			source_pipe_index_valid),
+	},
+	{
+		.data_type	= QMI_UNSIGNED_4_BYTE,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint32_t),
+		.array_type	= NO_ARRAY,
+		.tlv_type	= 0x11,
+		.offset		= offsetof(
+			struct ipa_install_fltr_rule_req_ex_msg_v01,
+			source_pipe_index),
+	},
+	{
+		.data_type	= QMI_OPT_FLAG,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint8_t),
+		.array_type	= NO_ARRAY,
+		.tlv_type	= 0x12,
+		.offset		= offsetof(
+			struct ipa_install_fltr_rule_req_ex_msg_v01,
+			num_ipv4_filters_valid),
+	},
+	{
+		.data_type	= QMI_UNSIGNED_4_BYTE,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint32_t),
+		.array_type	= NO_ARRAY,
+		.tlv_type	= 0x12,
+		.offset		= offsetof(
+			struct ipa_install_fltr_rule_req_ex_msg_v01,
+			num_ipv4_filters),
+	},
+	{
+		.data_type	= QMI_OPT_FLAG,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint8_t),
+		.array_type	= NO_ARRAY,
+		.tlv_type	= 0x13,
+		.offset		= offsetof(
+			struct ipa_install_fltr_rule_req_ex_msg_v01,
+			num_ipv6_filters_valid),
+	},
+	{
+		.data_type	= QMI_UNSIGNED_4_BYTE,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint32_t),
+		.array_type	= NO_ARRAY,
+		.tlv_type	= 0x13,
+		.offset		= offsetof(
+			struct ipa_install_fltr_rule_req_ex_msg_v01,
+			num_ipv6_filters),
+	},
+	{
+		.data_type	= QMI_OPT_FLAG,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint8_t),
+		.array_type	= NO_ARRAY,
+		.tlv_type	= 0x14,
+		.offset		= offsetof(
+			struct ipa_install_fltr_rule_req_ex_msg_v01,
+			xlat_filter_indices_list_valid),
+	},
+	{
+		.data_type	= QMI_DATA_LEN,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint8_t),
+		.array_type	= NO_ARRAY,
+		.tlv_type	= 0x14,
+		.offset		= offsetof(
+			struct ipa_install_fltr_rule_req_ex_msg_v01,
+			xlat_filter_indices_list_len),
+	},
+	{
+		.data_type	= QMI_UNSIGNED_4_BYTE,
+		.elem_len	= QMI_IPA_MAX_FILTERS_EX_V01,
+		.elem_size	= sizeof(uint32_t),
+		.array_type	= VAR_LEN_ARRAY,
+		.tlv_type	= 0x14,
+		.offset		= offsetof(
+			struct ipa_install_fltr_rule_req_ex_msg_v01,
+			xlat_filter_indices_list),
+	},
+		{
+		.data_type      = QMI_OPT_FLAG,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint8_t),
+		.array_type       = NO_ARRAY,
+		.tlv_type       = 0x15,
+		.offset         = offsetof(
+			struct ipa_install_fltr_rule_req_ex_msg_v01,
+			filter_spec_ex2_list_valid),
+	},
+	{
+		.data_type      = QMI_DATA_LEN,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint8_t),
+		.array_type       = NO_ARRAY,
+		.tlv_type       = 0x15,
+		.offset         = offsetof(
+			struct ipa_install_fltr_rule_req_ex_msg_v01,
+			filter_spec_ex2_list_len),
+	},
+	{
+		.data_type      = QMI_STRUCT,
+		.elem_len       = QMI_IPA_MAX_FILTERS_V01,
+		.elem_size      = sizeof(struct ipa_filter_spec_ex2_type_v01),
+		.array_type       = VAR_LEN_ARRAY,
+		.tlv_type       = 0x15,
+		.offset         = offsetof(
+			struct ipa_install_fltr_rule_req_ex_msg_v01,
+			filter_spec_ex2_list),
+		.ei_array      = ipa_filter_spec_ex2_type_v01_ei,
+	},
+	{
+		.data_type      = QMI_OPT_FLAG,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint8_t),
+		.array_type       = NO_ARRAY,
+		.tlv_type       = 0x16,
+		.offset         = offsetof(
+			struct ipa_install_fltr_rule_req_ex_msg_v01,
+			ul_firewall_indices_list_valid),
+	},
+	{
+		.data_type      = QMI_DATA_LEN,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint8_t),
+		.array_type       = NO_ARRAY,
+		.tlv_type       = 0x16,
+		.offset         = offsetof(
+			struct ipa_install_fltr_rule_req_ex_msg_v01,
+			ul_firewall_indices_list_len),
+	},
+	{
+		.data_type      = QMI_UNSIGNED_4_BYTE,
+		.elem_len       = QMI_IPA_MAX_FILTERS_V01,
+		.elem_size      = sizeof(uint32_t),
+		.array_type       = VAR_LEN_ARRAY,
+		.tlv_type       = 0x16,
+		.offset         = offsetof(
+			struct ipa_install_fltr_rule_req_ex_msg_v01,
+			ul_firewall_indices_list),
+	},
+	{
+		.data_type	= QMI_EOTI,
+		.array_type	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+	},
+};
+
+struct qmi_elem_info ipa3_install_fltr_rule_resp_ex_msg_data_v01_ei[] = {
+	{
+		.data_type	= QMI_STRUCT,
+		.elem_len	= 1,
+		.elem_size	= sizeof(struct qmi_response_type_v01),
+		.array_type	= NO_ARRAY,
+		.tlv_type	= 0x02,
+		.offset		= offsetof(
+			struct ipa_install_fltr_rule_resp_ex_msg_v01,
+			resp),
+		.ei_array	= qmi_response_type_v01_ei,
+	},
+	{
+		.data_type	= QMI_OPT_FLAG,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint8_t),
+		.array_type	= NO_ARRAY,
+		.tlv_type	= 0x10,
+		.offset		= offsetof(
+			struct ipa_install_fltr_rule_resp_ex_msg_v01,
+			rule_id_valid),
+	},
+	{
+		.data_type	= QMI_DATA_LEN,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint8_t),
+		.array_type	= NO_ARRAY,
+		.tlv_type	= 0x10,
+		.offset		= offsetof(
+			struct ipa_install_fltr_rule_resp_ex_msg_v01,
+			rule_id_len),
+	},
+	{
+		.data_type	= QMI_UNSIGNED_4_BYTE,
+		.elem_len	= QMI_IPA_MAX_FILTERS_EX_V01,
+		.elem_size	= sizeof(uint32_t),
+		.array_type	= VAR_LEN_ARRAY,
+		.tlv_type	= 0x10,
+		.offset		= offsetof(
+			struct ipa_install_fltr_rule_resp_ex_msg_v01,
+			rule_id),
+	},
+	{
+		.data_type	= QMI_EOTI,
+		.array_type	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+	},
+};
+
+struct qmi_elem_info ipa3_per_client_stats_info_type_data_v01_ei[] = {
+	{
+			.data_type	= QMI_UNSIGNED_4_BYTE,
+			.elem_len	= 1,
+			.elem_size	= sizeof(uint32_t),
+			.array_type	= NO_ARRAY,
+			.tlv_type	= QMI_COMMON_TLV_TYPE,
+			.offset		= offsetof(
+				struct ipa_per_client_stats_info_type_v01,
+				client_id),
+	},
+	{
+			.data_type	= QMI_UNSIGNED_4_BYTE,
+			.elem_len	= 1,
+			.elem_size	= sizeof(uint32_t),
+			.array_type	= NO_ARRAY,
+			.tlv_type	= QMI_COMMON_TLV_TYPE,
+			.offset		= offsetof(
+				struct ipa_per_client_stats_info_type_v01,
+				src_pipe_id),
+	},
+	{
+			.data_type	= QMI_UNSIGNED_8_BYTE,
+			.elem_len	= 1,
+			.elem_size	= sizeof(uint64_t),
+			.array_type	= NO_ARRAY,
+			.tlv_type	= QMI_COMMON_TLV_TYPE,
+			.offset		= offsetof(
+				struct ipa_per_client_stats_info_type_v01,
+				num_ul_ipv4_bytes),
+
+	},
+	{
+			.data_type	= QMI_UNSIGNED_8_BYTE,
+			.elem_len	= 1,
+			.elem_size	= sizeof(uint64_t),
+			.array_type	= NO_ARRAY,
+			.tlv_type	= QMI_COMMON_TLV_TYPE,
+			.offset		= offsetof(
+				struct ipa_per_client_stats_info_type_v01,
+				num_ul_ipv6_bytes),
+
+	},
+	{
+			.data_type	= QMI_UNSIGNED_8_BYTE,
+			.elem_len	= 1,
+			.elem_size	= sizeof(uint64_t),
+			.array_type	= NO_ARRAY,
+			.tlv_type	= QMI_COMMON_TLV_TYPE,
+			.offset		= offsetof(
+				struct ipa_per_client_stats_info_type_v01,
+				num_dl_ipv4_bytes),
+
+	},
+	{
+			.data_type	= QMI_UNSIGNED_8_BYTE,
+			.elem_len	= 1,
+			.elem_size	= sizeof(uint64_t),
+			.array_type	= NO_ARRAY,
+			.tlv_type	= QMI_COMMON_TLV_TYPE,
+			.offset		= offsetof(
+				struct ipa_per_client_stats_info_type_v01,
+				num_dl_ipv6_bytes),
+
+	},
+	{
+			.data_type	= QMI_UNSIGNED_4_BYTE,
+			.elem_len	= 1,
+			.elem_size	= sizeof(uint32_t),
+			.array_type	= NO_ARRAY,
+			.tlv_type	= QMI_COMMON_TLV_TYPE,
+			.offset		= offsetof(
+				struct ipa_per_client_stats_info_type_v01,
+				num_ul_ipv4_pkts),
+
+	},
+	{
+			.data_type	= QMI_UNSIGNED_4_BYTE,
+			.elem_len	= 1,
+			.elem_size	= sizeof(uint32_t),
+			.array_type	= NO_ARRAY,
+			.tlv_type	= QMI_COMMON_TLV_TYPE,
+			.offset		= offsetof(
+				struct ipa_per_client_stats_info_type_v01,
+				num_ul_ipv6_pkts),
+
+	},
+	{
+			.data_type	= QMI_UNSIGNED_4_BYTE,
+			.elem_len	= 1,
+			.elem_size	= sizeof(uint32_t),
+			.array_type	= NO_ARRAY,
+			.tlv_type	= QMI_COMMON_TLV_TYPE,
+			.offset		= offsetof(
+				struct ipa_per_client_stats_info_type_v01,
+				num_dl_ipv4_pkts),
+
+	},
+	{
+			.data_type	= QMI_UNSIGNED_4_BYTE,
+			.elem_len	= 1,
+			.elem_size	= sizeof(uint32_t),
+			.array_type	= NO_ARRAY,
+			.tlv_type	= QMI_COMMON_TLV_TYPE,
+			.offset		= offsetof(
+				struct ipa_per_client_stats_info_type_v01,
+				num_dl_ipv6_pkts),
+
+	},
+	{
+		.data_type	= QMI_EOTI,
+		.array_type	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+	},
+};
+
+struct qmi_elem_info ipa3_ul_firewall_rule_type_data_v01_ei[] = {
+	{
+			.data_type	= QMI_UNSIGNED_4_BYTE,
+			.elem_len	= 1,
+			.elem_size	= sizeof(uint32_t),
+			.array_type	= NO_ARRAY,
+			.tlv_type	= QMI_COMMON_TLV_TYPE,
+			.offset		= offsetof(
+				struct ipa_ul_firewall_rule_type_v01,
+				ip_type),
+	},
+	{
+		.data_type	= QMI_STRUCT,
+		.elem_len	= 1,
+		.elem_size	= sizeof(struct ipa_filter_rule_type_v01),
+		.array_type	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+		.offset		= offsetof(struct ipa_ul_firewall_rule_type_v01,
+					filter_rule),
+		.ei_array	= ipa3_filter_rule_type_data_v01_ei,
+	},
+	{
+		.data_type	= QMI_EOTI,
+		.array_type	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+	},
+};
+
+struct qmi_elem_info ipa3_ul_firewall_config_result_type_data_v01_ei[] = {
+	{
+			.data_type	= QMI_UNSIGNED_4_BYTE,
+			.elem_len	= 1,
+			.elem_size	= sizeof(uint32_t),
+			.array_type	= NO_ARRAY,
+			.tlv_type	= QMI_COMMON_TLV_TYPE,
+			.offset		= offsetof(
+				struct ipa_ul_firewall_config_result_type_v01,
+				is_success),
+	},
+	{
+		.data_type	= QMI_UNSIGNED_4_BYTE,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint32_t),
+		.array_type	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+		.offset		= offsetof(
+			struct ipa_ul_firewall_config_result_type_v01,
+			mux_id),
+	},
+	{
+		.data_type	= QMI_EOTI,
+		.array_type	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+	},
+};
+
+struct qmi_elem_info ipa3_enable_per_client_stats_req_msg_data_v01_ei[] = {
+	{
+				.data_type	= QMI_UNSIGNED_1_BYTE,
+				.elem_len	= 1,
+				.elem_size	= sizeof(uint8_t),
+				.array_type	= NO_ARRAY,
+				.tlv_type	= 0x01,
+				.offset		= offsetof(struct
+				ipa_enable_per_client_stats_req_msg_v01,
+				enable_per_client_stats),
+	},
+	{
+		.data_type	= QMI_EOTI,
+		.array_type	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+	},
+};
+
+struct qmi_elem_info ipa3_enable_per_client_stats_resp_msg_data_v01_ei[] = {
+	{
+		.data_type	= QMI_STRUCT,
+		.elem_len	= 1,
+		.elem_size	= sizeof(struct qmi_response_type_v01),
+		.array_type	= NO_ARRAY,
+		.tlv_type	= 0x02,
+		.offset		= offsetof(
+			struct ipa_enable_per_client_stats_resp_msg_v01,
+			resp),
+		.ei_array	= qmi_response_type_v01_ei,
+	},
+	{
+		.data_type	= QMI_EOTI,
+		.array_type	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+	},
+};
+
+struct qmi_elem_info ipa3_get_stats_per_client_req_msg_data_v01_ei[] = {
+	{
+		.data_type	= QMI_UNSIGNED_4_BYTE,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint32_t),
+		.array_type	= NO_ARRAY,
+		.tlv_type	= 0x01,
+		.offset		= offsetof(
+			struct ipa_get_stats_per_client_req_msg_v01,
+			client_id),
+	},
+	{
+		.data_type	= QMI_UNSIGNED_4_BYTE,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint32_t),
+		.array_type	= NO_ARRAY,
+		.tlv_type	= 0x02,
+		.offset		= offsetof(
+			struct ipa_get_stats_per_client_req_msg_v01,
+			src_pipe_id),
+	},
+	{
+		.data_type	= QMI_OPT_FLAG,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint8_t),
+		.array_type	= NO_ARRAY,
+		.tlv_type	= 0x10,
+		.offset		= offsetof(
+			struct ipa_get_stats_per_client_req_msg_v01,
+			reset_stats_valid),
+	},
+	{
+		.data_type	= QMI_UNSIGNED_1_BYTE,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint8_t),
+		.array_type	= NO_ARRAY,
+		.tlv_type	= 0x10,
+		.offset		= offsetof(
+			struct ipa_get_stats_per_client_req_msg_v01,
+			reset_stats),
+	},
+	{
+		.data_type	= QMI_EOTI,
+		.array_type	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+	},
+};
+
+struct qmi_elem_info ipa3_get_stats_per_client_resp_msg_data_v01_ei[] = {
+	{
+		.data_type	= QMI_STRUCT,
+		.elem_len	= 1,
+		.elem_size	= sizeof(struct qmi_response_type_v01),
+		.array_type	= NO_ARRAY,
+		.tlv_type	= 0x02,
+		.offset		= offsetof(
+			struct ipa_get_stats_per_client_resp_msg_v01,
+			resp),
+		.ei_array	= qmi_response_type_v01_ei,
+	},
+	{
+		.data_type	= QMI_OPT_FLAG,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint8_t),
+		.array_type	= NO_ARRAY,
+		.tlv_type	= 0x10,
+		.offset		= offsetof(
+			struct ipa_get_stats_per_client_resp_msg_v01,
+			per_client_stats_list_valid),
+	},
+	{
+		.data_type	= QMI_DATA_LEN,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint8_t),
+		.array_type	= NO_ARRAY,
+		.tlv_type	= 0x10,
+		.offset		= offsetof(
+			struct ipa_get_stats_per_client_resp_msg_v01,
+			per_client_stats_list_len),
+	},
+	{
+		.data_type	= QMI_STRUCT,
+		.elem_len	= QMI_IPA_MAX_PER_CLIENTS_V01,
+		.elem_size	=
+			sizeof(struct ipa_per_client_stats_info_type_v01),
+		.array_type	= VAR_LEN_ARRAY,
+		.tlv_type	= 0x10,
+		.offset		= offsetof(
+			struct ipa_get_stats_per_client_resp_msg_v01,
+			per_client_stats_list),
+		.ei_array	=
+			ipa3_per_client_stats_info_type_data_v01_ei,
+	},
+	{
+		.data_type	= QMI_EOTI,
+		.array_type	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+	},
+};
+
+struct qmi_elem_info ipa3_configure_ul_firewall_rules_req_msg_data_v01_ei[] = {
+	{
+		.data_type	= QMI_DATA_LEN,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint8_t),
+		.array_type	= NO_ARRAY,
+		.tlv_type	= 0x1,
+		.offset		= offsetof(
+			struct ipa_configure_ul_firewall_rules_req_msg_v01,
+			firewall_rules_list_len),
+	},
+	{
+		.data_type	= QMI_STRUCT,
+		.elem_len	= QMI_IPA_MAX_UL_FIREWALL_RULES_V01,
+		.elem_size	= sizeof(struct ipa_ul_firewall_rule_type_v01),
+		.array_type	= VAR_LEN_ARRAY,
+		.tlv_type	= 0x1,
+		.offset		= offsetof(
+			struct ipa_configure_ul_firewall_rules_req_msg_v01,
+			firewall_rules_list),
+		.ei_array	=
+			ipa3_ul_firewall_rule_type_data_v01_ei,
+	},
+	{
+		.data_type	= QMI_UNSIGNED_4_BYTE,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint32_t),
+		.array_type	= NO_ARRAY,
+		.tlv_type	= 0x2,
+		.offset		= offsetof(
+			struct ipa_configure_ul_firewall_rules_req_msg_v01,
+			mux_id),
+	},
+	{
+		.data_type	= QMI_OPT_FLAG,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint8_t),
+		.array_type	= NO_ARRAY,
+		.tlv_type	= 0x10,
+		.offset		= offsetof(
+			struct ipa_configure_ul_firewall_rules_req_msg_v01,
+			disable_valid),
+	},
+	{
+		.data_type	= QMI_UNSIGNED_1_BYTE,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint8_t),
+		.array_type	= NO_ARRAY,
+		.tlv_type	= 0x10,
+		.offset		= offsetof(
+			struct ipa_configure_ul_firewall_rules_req_msg_v01,
+			disable),
+	},
+	{
+		.data_type	= QMI_OPT_FLAG,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint8_t),
+		.array_type	= NO_ARRAY,
+		.tlv_type	= 0x11,
+		.offset		= offsetof(
+			struct ipa_configure_ul_firewall_rules_req_msg_v01,
+			are_blacklist_filters_valid),
+	},
+	{
+		.data_type	= QMI_UNSIGNED_1_BYTE,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint8_t),
+		.array_type	= NO_ARRAY,
+		.tlv_type	= 0x11,
+		.offset		= offsetof(
+			struct ipa_configure_ul_firewall_rules_req_msg_v01,
+			are_blacklist_filters),
+	},
+	{
+		.data_type	= QMI_EOTI,
+		.array_type	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+	},
+};
+
+struct qmi_elem_info ipa3_configure_ul_firewall_rules_resp_msg_data_v01_ei[] = {
+	{
+		.data_type	= QMI_STRUCT,
+		.elem_len	= 1,
+		.elem_size	= sizeof(struct qmi_response_type_v01),
+		.array_type	= NO_ARRAY,
+		.tlv_type	= 0x02,
+		.offset		= offsetof(
+			struct ipa_configure_ul_firewall_rules_resp_msg_v01,
+			resp),
+		.ei_array	= qmi_response_type_v01_ei,
+	},
+	{
+		.data_type	= QMI_EOTI,
+		.array_type	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+	},
+};
+
+struct qmi_elem_info ipa3_configure_ul_firewall_rules_ind_msg_data_v01_ei[] = {
+	{
+		.data_type	= QMI_STRUCT,
+		.elem_len	= 1,
+		.elem_size	= sizeof(
+			struct ipa_ul_firewall_config_result_type_v01),
+		.array_type	= NO_ARRAY,
+		.tlv_type	= 0x01,
+		.offset		= offsetof(
+			struct ipa_configure_ul_firewall_rules_ind_msg_v01,
+			result),
+		.ei_array	=
+		ipa3_ul_firewall_config_result_type_data_v01_ei,
+	},
+	{
+		.data_type	= QMI_EOTI,
+		.array_type	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+	},
+};
+
+static struct qmi_elem_info ipa_mhi_ch_init_info_type_v01_ei[] = {
+	{
+		.data_type = QMI_UNSIGNED_1_BYTE,
+		.elem_len = 1,
+		.elem_size = sizeof(u8),
+		.array_type = NO_ARRAY,
+		.tlv_type = 0,
+		.offset = offsetof(struct ipa_mhi_ch_init_info_type_v01,
+		ch_id),
+	},
+	{
+		.data_type = QMI_UNSIGNED_1_BYTE,
+		.elem_len = 1,
+		.elem_size = sizeof(u8),
+		.array_type = NO_ARRAY,
+		.tlv_type = 0,
+		.offset = offsetof(struct ipa_mhi_ch_init_info_type_v01,
+		er_id),
+	},
+	{
+		.data_type = QMI_UNSIGNED_4_BYTE,
+		.elem_len = 1,
+		.elem_size = sizeof(u32),
+		.array_type = NO_ARRAY,
+		.tlv_type = 0,
+		.offset = offsetof(struct ipa_mhi_ch_init_info_type_v01,
+		ch_doorbell_addr),
+	},
+	{
+		.data_type = QMI_UNSIGNED_4_BYTE,
+		.elem_len = 1,
+		.elem_size = sizeof(u32),
+		.array_type = NO_ARRAY,
+		.tlv_type = 0,
+		.offset = offsetof(struct ipa_mhi_ch_init_info_type_v01,
+		er_doorbell_addr),
+	},
+	{
+		.data_type = QMI_UNSIGNED_4_BYTE,
+		.elem_len = 1,
+		.elem_size = sizeof(u32),
+		.array_type = NO_ARRAY,
+		.tlv_type = 0,
+		.offset = offsetof(struct ipa_mhi_ch_init_info_type_v01,
+		direction_type),
+	},
+	{
+		.data_type = QMI_EOTI,
+		.array_type = NO_ARRAY,
+		.tlv_type = QMI_COMMON_TLV_TYPE,
+	},
+};
+
+static struct qmi_elem_info ipa_mhi_smmu_info_type_v01_ei[] = {
+	{
+		.data_type = QMI_UNSIGNED_8_BYTE,
+		.elem_len = 1,
+		.elem_size = sizeof(u64),
+		.array_type = NO_ARRAY,
+		.tlv_type = 0,
+		.offset = offsetof(struct ipa_mhi_smmu_info_type_v01,
+		iova_ctl_base_addr),
+	},
+	{
+		.data_type = QMI_UNSIGNED_8_BYTE,
+		.elem_len = 1,
+		.elem_size = sizeof(u64),
+		.array_type = NO_ARRAY,
+		.tlv_type = 0,
+		.offset = offsetof(struct ipa_mhi_smmu_info_type_v01,
+		iova_ctl_size),
+	},
+	{
+		.data_type = QMI_UNSIGNED_8_BYTE,
+		.elem_len = 1,
+		.elem_size = sizeof(u64),
+		.array_type = NO_ARRAY,
+		.tlv_type = 0,
+		.offset = offsetof(struct ipa_mhi_smmu_info_type_v01,
+		iova_data_base_addr),
+	},
+	{
+		.data_type = QMI_UNSIGNED_8_BYTE,
+		.elem_len = 1,
+		.elem_size = sizeof(u64),
+		.array_type = NO_ARRAY,
+		.tlv_type = 0,
+		.offset = offsetof(struct ipa_mhi_smmu_info_type_v01,
+		iova_data_size),
+	},
+	{
+		.data_type = QMI_EOTI,
+		.array_type = NO_ARRAY,
+		.tlv_type = QMI_COMMON_TLV_TYPE,
+	},
+};
+
+
+struct qmi_elem_info ipa_mhi_ready_indication_msg_v01_ei[] = {
+	{
+		.data_type = QMI_DATA_LEN,
+		.elem_len = 1,
+		.elem_size = sizeof(u8),
+		.array_type = NO_ARRAY,
+		.tlv_type = 0x01,
+		.offset = offsetof(struct ipa_mhi_ready_indication_msg_v01,
+		ch_info_arr_len),
+	},
+	{
+		.data_type = QMI_STRUCT,
+		.elem_len = QMI_IPA_REMOTE_MHI_CHANNELS_NUM_MAX_V01,
+		.elem_size = sizeof(struct ipa_mhi_ch_init_info_type_v01),
+		.array_type = VAR_LEN_ARRAY,
+		.tlv_type = 0x01,
+		.offset = offsetof(struct ipa_mhi_ready_indication_msg_v01,
+		ch_info_arr),
+		.ei_array = ipa_mhi_ch_init_info_type_v01_ei,
+	},
+	{
+		.data_type = QMI_OPT_FLAG,
+		.elem_len = 1,
+		.elem_size = sizeof(u8),
+		.array_type = NO_ARRAY,
+		.tlv_type = 0x10,
+		.offset = offsetof(struct ipa_mhi_ready_indication_msg_v01,
+		smmu_info_valid),
+	},
+	{
+		.data_type = QMI_STRUCT,
+		.elem_len = 1,
+		.elem_size = sizeof(struct ipa_mhi_smmu_info_type_v01),
+		.array_type = NO_ARRAY,
+		.tlv_type = 0x10,
+		.offset = offsetof(struct ipa_mhi_ready_indication_msg_v01,
+		smmu_info),
+		.ei_array = ipa_mhi_smmu_info_type_v01_ei,
+	},
+	{
+		.data_type = QMI_EOTI,
+		.array_type = NO_ARRAY,
+		.tlv_type = QMI_COMMON_TLV_TYPE,
+	},
+};
+
+struct qmi_elem_info ipa_mhi_mem_addr_info_type_v01_ei[] = {
+	{
+		.data_type = QMI_UNSIGNED_8_BYTE,
+		.elem_len = 1,
+		.elem_size = sizeof(u64),
+		.array_type = NO_ARRAY,
+		.tlv_type = 0,
+		.offset = offsetof(struct ipa_mhi_mem_addr_info_type_v01,
+		pa),
+	},
+	{
+		.data_type = QMI_UNSIGNED_8_BYTE,
+		.elem_len = 1,
+		.elem_size = sizeof(u64),
+		.array_type = NO_ARRAY,
+		.tlv_type = 0,
+		.offset = offsetof(struct ipa_mhi_mem_addr_info_type_v01,
+		iova),
+	},
+	{
+		.data_type = QMI_UNSIGNED_8_BYTE,
+		.elem_len = 1,
+		.elem_size = sizeof(u64),
+		.array_type = NO_ARRAY,
+		.tlv_type = 0,
+		.offset = offsetof(struct ipa_mhi_mem_addr_info_type_v01,
+		size),
+	},
+	{
+		.data_type = QMI_EOTI,
+		.array_type = NO_ARRAY,
+		.tlv_type = QMI_COMMON_TLV_TYPE,
+	},
+};
+
+struct qmi_elem_info ipa_mhi_tr_info_type_v01_ei[] = {
+	{
+		.data_type = QMI_UNSIGNED_1_BYTE,
+		.elem_len = 1,
+		.elem_size = sizeof(u8),
+		.array_type = NO_ARRAY,
+		.tlv_type = 0,
+		.offset = offsetof(struct ipa_mhi_tr_info_type_v01,
+		ch_id),
+	},
+	{
+		.data_type = QMI_UNSIGNED_2_BYTE,
+		.elem_len = 1,
+		.elem_size = sizeof(u16),
+		.array_type = NO_ARRAY,
+		.tlv_type = 0,
+		.offset = offsetof(struct ipa_mhi_tr_info_type_v01,
+		poll_cfg),
+	},
+	{
+		.data_type = QMI_SIGNED_4_BYTE_ENUM,
+		.elem_len = 1,
+		.elem_size = sizeof(enum ipa_mhi_brst_mode_enum_v01),
+		.array_type = NO_ARRAY,
+		.tlv_type = 0,
+		.offset = offsetof(struct ipa_mhi_tr_info_type_v01,
+		brst_mode_type),
+	},
+	{
+		.data_type = QMI_UNSIGNED_8_BYTE,
+		.elem_len = 1,
+		.elem_size = sizeof(u64),
+		.array_type = NO_ARRAY,
+		.tlv_type = 0,
+		.offset = offsetof(struct ipa_mhi_tr_info_type_v01,
+		ring_iova),
+	},
+	{
+		.data_type = QMI_UNSIGNED_8_BYTE,
+		.elem_len = 1,
+		.elem_size = sizeof(u64),
+		.array_type = NO_ARRAY,
+		.tlv_type = 0,
+		.offset = offsetof(struct ipa_mhi_tr_info_type_v01,
+		ring_len),
+	},
+	{
+		.data_type = QMI_UNSIGNED_8_BYTE,
+		.elem_len = 1,
+		.elem_size = sizeof(u64),
+		.array_type = NO_ARRAY,
+		.tlv_type = 0,
+		.offset = offsetof(struct ipa_mhi_tr_info_type_v01,
+		rp),
+	},
+	{
+		.data_type = QMI_UNSIGNED_8_BYTE,
+		.elem_len = 1,
+		.elem_size = sizeof(u64),
+		.array_type = NO_ARRAY,
+		.tlv_type = 0,
+		.offset = offsetof(struct ipa_mhi_tr_info_type_v01,
+		wp),
+	},
+	{
+		.data_type = QMI_EOTI,
+		.array_type = NO_ARRAY,
+		.tlv_type = QMI_COMMON_TLV_TYPE,
+	},
+};
+
+struct qmi_elem_info ipa_mhi_er_info_type_v01_ei[] = {
+	{
+		.data_type = QMI_UNSIGNED_1_BYTE,
+		.elem_len = 1,
+		.elem_size = sizeof(u8),
+		.array_type = NO_ARRAY,
+		.tlv_type = 0,
+		.offset = offsetof(struct ipa_mhi_er_info_type_v01,
+		er_id),
+	},
+	{
+		.data_type = QMI_UNSIGNED_4_BYTE,
+		.elem_len = 1,
+		.elem_size = sizeof(u32),
+		.array_type = NO_ARRAY,
+		.tlv_type = 0,
+		.offset = offsetof(struct ipa_mhi_er_info_type_v01,
+		intmod_cycles),
+	},
+	{
+		.data_type = QMI_UNSIGNED_4_BYTE,
+		.elem_len = 1,
+		.elem_size = sizeof(u32),
+		.array_type = NO_ARRAY,
+		.tlv_type = 0,
+		.offset = offsetof(struct ipa_mhi_er_info_type_v01,
+		intmod_count),
+	},
+	{
+		.data_type = QMI_UNSIGNED_4_BYTE,
+		.elem_len = 1,
+		.elem_size = sizeof(u32),
+		.array_type = NO_ARRAY,
+		.tlv_type = 0,
+		.offset = offsetof(struct ipa_mhi_er_info_type_v01,
+		msi_addr),
+	},
+	{
+		.data_type = QMI_UNSIGNED_8_BYTE,
+		.elem_len = 1,
+		.elem_size = sizeof(u64),
+		.array_type = NO_ARRAY,
+		.tlv_type = 0,
+		.offset = offsetof(struct ipa_mhi_er_info_type_v01,
+		ring_iova),
+	},
+	{
+		.data_type = QMI_UNSIGNED_8_BYTE,
+		.elem_len = 1,
+		.elem_size = sizeof(u64),
+		.array_type = NO_ARRAY,
+		.tlv_type = 0,
+		.offset = offsetof(struct ipa_mhi_er_info_type_v01,
+		ring_len),
+	},
+	{
+		.data_type = QMI_UNSIGNED_8_BYTE,
+		.elem_len = 1,
+		.elem_size = sizeof(u64),
+		.array_type = NO_ARRAY,
+		.tlv_type = 0,
+		.offset = offsetof(struct ipa_mhi_er_info_type_v01,
+		rp),
+	},
+	{
+		.data_type = QMI_UNSIGNED_8_BYTE,
+		.elem_len = 1,
+		.elem_size = sizeof(u64),
+		.array_type = NO_ARRAY,
+		.tlv_type = 0,
+		.offset = offsetof(struct ipa_mhi_er_info_type_v01,
+		wp),
+	},
+	{
+		.data_type = QMI_EOTI,
+		.array_type = NO_ARRAY,
+		.tlv_type = QMI_COMMON_TLV_TYPE,
+	},
+};
+
+struct qmi_elem_info ipa_mhi_alloc_channel_req_msg_v01_ei[] = {
+	{
+		.data_type = QMI_DATA_LEN,
+		.elem_len = 1,
+		.elem_size = sizeof(u8),
+		.array_type = NO_ARRAY,
+		.tlv_type = 0x01,
+		.offset = offsetof(struct ipa_mhi_alloc_channel_req_msg_v01,
+		tr_info_arr_len),
+	},
+	{
+		.data_type = QMI_STRUCT,
+		.elem_len = QMI_IPA_REMOTE_MHI_CHANNELS_NUM_MAX_V01,
+		.elem_size = sizeof(struct ipa_mhi_tr_info_type_v01),
+		.array_type = VAR_LEN_ARRAY,
+		.tlv_type = 0x01,
+		.offset = offsetof(struct ipa_mhi_alloc_channel_req_msg_v01,
+		tr_info_arr),
+		.ei_array = ipa_mhi_tr_info_type_v01_ei,
+	},
+	{
+		.data_type = QMI_DATA_LEN,
+		.elem_len = 1,
+		.elem_size = sizeof(u8),
+		.array_type = NO_ARRAY,
+		.tlv_type = 0x02,
+		.offset = offsetof(struct ipa_mhi_alloc_channel_req_msg_v01,
+		er_info_arr_len),
+	},
+	{
+		.data_type = QMI_STRUCT,
+		.elem_len = QMI_IPA_REMOTE_MHI_CHANNELS_NUM_MAX_V01,
+		.elem_size = sizeof(struct ipa_mhi_er_info_type_v01),
+		.array_type = VAR_LEN_ARRAY,
+		.tlv_type = 0x02,
+		.offset = offsetof(struct ipa_mhi_alloc_channel_req_msg_v01,
+		er_info_arr),
+		.ei_array = ipa_mhi_er_info_type_v01_ei,
+	},
+	{
+		.data_type = QMI_DATA_LEN,
+		.elem_len = 1,
+		.elem_size = sizeof(u8),
+		.array_type = NO_ARRAY,
+		.tlv_type = 0x03,
+		.offset = offsetof(struct ipa_mhi_alloc_channel_req_msg_v01,
+		ctrl_addr_map_info_len),
+	},
+	{
+		.data_type = QMI_STRUCT,
+		.elem_len = QMI_IPA_REMOTE_MHI_MEMORY_MAPPING_NUM_MAX_V01,
+		.elem_size = sizeof(struct ipa_mhi_mem_addr_info_type_v01),
+		.array_type = VAR_LEN_ARRAY,
+		.tlv_type = 0x03,
+		.offset = offsetof(struct ipa_mhi_alloc_channel_req_msg_v01,
+		ctrl_addr_map_info),
+		.ei_array = ipa_mhi_mem_addr_info_type_v01_ei,
+	},
+	{
+		.data_type = QMI_DATA_LEN,
+		.elem_len = 1,
+		.elem_size = sizeof(u8),
+		.array_type = NO_ARRAY,
+		.tlv_type = 0x04,
+		.offset = offsetof(struct ipa_mhi_alloc_channel_req_msg_v01,
+		data_addr_map_info_len),
+	},
+	{
+		.data_type = QMI_STRUCT,
+		.elem_len = QMI_IPA_REMOTE_MHI_MEMORY_MAPPING_NUM_MAX_V01,
+		.elem_size = sizeof(struct ipa_mhi_mem_addr_info_type_v01),
+		.array_type = VAR_LEN_ARRAY,
+		.tlv_type = 0x04,
+		.offset = offsetof(struct ipa_mhi_alloc_channel_req_msg_v01,
+		data_addr_map_info),
+		.ei_array = ipa_mhi_mem_addr_info_type_v01_ei,
+	},
+	{
+		.data_type = QMI_EOTI,
+		.array_type = NO_ARRAY,
+		.tlv_type = QMI_COMMON_TLV_TYPE,
+	},
+};
+
+struct qmi_elem_info ipa_mhi_ch_alloc_resp_type_v01_ei[] = {
+	{
+		.data_type = QMI_UNSIGNED_1_BYTE,
+		.elem_len = 1,
+		.elem_size = sizeof(u8),
+		.array_type = NO_ARRAY,
+		.tlv_type = 0,
+		.offset = offsetof(struct ipa_mhi_ch_alloc_resp_type_v01,
+		ch_id),
+	},
+	{
+		.data_type = QMI_UNSIGNED_1_BYTE,
+		.elem_len = 1,
+		.elem_size = sizeof(u8),
+		.array_type = NO_ARRAY,
+		.tlv_type = 0,
+		.offset = offsetof(struct ipa_mhi_ch_alloc_resp_type_v01,
+		is_success),
+	},
+	{
+		.data_type = QMI_EOTI,
+		.array_type = NO_ARRAY,
+		.tlv_type = QMI_COMMON_TLV_TYPE,
+	},
+};
+
+struct qmi_elem_info ipa_mhi_alloc_channel_resp_msg_v01_ei[] = {
+	{
+		.data_type = QMI_STRUCT,
+		.elem_len = 1,
+		.elem_size = sizeof(struct qmi_response_type_v01),
+		.array_type = NO_ARRAY,
+		.tlv_type = 0x02,
+		.offset = offsetof(struct ipa_mhi_alloc_channel_resp_msg_v01,
+		resp),
+		.ei_array = qmi_response_type_v01_ei,
+	},
+	{
+		.data_type = QMI_OPT_FLAG,
+		.elem_len = 1,
+		.elem_size = sizeof(u8),
+		.array_type = NO_ARRAY,
+		.tlv_type = 0x10,
+		.offset = offsetof(struct ipa_mhi_alloc_channel_resp_msg_v01,
+		alloc_resp_arr_valid),
+	},
+	{
+		.data_type = QMI_DATA_LEN,
+		.elem_len = 1,
+		.elem_size = sizeof(u8),
+		.array_type = NO_ARRAY,
+		.tlv_type = 0x10,
+		.offset = offsetof(struct ipa_mhi_alloc_channel_resp_msg_v01,
+		alloc_resp_arr_len),
+	},
+	{
+		.data_type = QMI_STRUCT,
+		.elem_len = QMI_IPA_REMOTE_MHI_CHANNELS_NUM_MAX_V01,
+		.elem_size = sizeof(struct ipa_mhi_ch_alloc_resp_type_v01),
+		.array_type = VAR_LEN_ARRAY,
+		.tlv_type = 0x10,
+		.offset = offsetof(struct ipa_mhi_alloc_channel_resp_msg_v01,
+		alloc_resp_arr),
+		.ei_array = ipa_mhi_ch_alloc_resp_type_v01_ei,
+	},
+	{
+		.data_type = QMI_EOTI,
+		.array_type = NO_ARRAY,
+		.tlv_type = QMI_COMMON_TLV_TYPE,
+	},
+};
+
+struct qmi_elem_info ipa_mhi_clk_vote_req_msg_v01_ei[] = {
+	{
+		.data_type = QMI_UNSIGNED_1_BYTE,
+		.elem_len = 1,
+		.elem_size = sizeof(u8),
+		.array_type = NO_ARRAY,
+		.tlv_type = 0x01,
+		.offset = offsetof(struct ipa_mhi_clk_vote_req_msg_v01,
+		mhi_vote),
+	},
+	{
+		.data_type	= QMI_OPT_FLAG,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint8_t),
+		.array_type	= NO_ARRAY,
+		.tlv_type	= 0x10,
+		.offset		= offsetof(
+			struct ipa_mhi_clk_vote_req_msg_v01,
+			tput_value_valid),
+	},
+	{
+		.data_type	= QMI_UNSIGNED_4_BYTE,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint32_t),
+		.array_type	= NO_ARRAY,
+		.tlv_type	= 0x10,
+		.offset		= offsetof(
+			struct ipa_mhi_clk_vote_req_msg_v01,
+			tput_value),
+	},
+	{
+		.data_type	= QMI_OPT_FLAG,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint8_t),
+		.array_type	= NO_ARRAY,
+		.tlv_type	= 0x11,
+		.offset		= offsetof(
+			struct ipa_mhi_clk_vote_req_msg_v01,
+			clk_rate_valid),
+	},
+	{
+		.data_type	= QMI_SIGNED_4_BYTE_ENUM,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint32_t),
+		.array_type	= NO_ARRAY,
+		.tlv_type	= 0x11,
+		.offset		= offsetof(
+			struct ipa_mhi_clk_vote_req_msg_v01,
+			clk_rate),
+	},
+	{
+		.data_type = QMI_EOTI,
+		.array_type = NO_ARRAY,
+		.tlv_type = QMI_COMMON_TLV_TYPE,
+	},
+};
+
+struct qmi_elem_info ipa_mhi_clk_vote_resp_msg_v01_ei[] = {
+	{
+		.data_type = QMI_STRUCT,
+		.elem_len = 1,
+		.elem_size = sizeof(struct qmi_response_type_v01),
+		.array_type = NO_ARRAY,
+		.tlv_type = 0x02,
+		.offset = offsetof(struct ipa_mhi_clk_vote_resp_msg_v01,
+		resp),
+		.ei_array = qmi_response_type_v01_ei,
+	},
+	{
+		.data_type = QMI_EOTI,
+		.array_type = NO_ARRAY,
+		.tlv_type = QMI_COMMON_TLV_TYPE,
+	},
+};
+
+struct qmi_elem_info ipa_mhi_cleanup_req_msg_v01_ei[] = {
+	{
+		.data_type = QMI_OPT_FLAG,
+		.elem_len = 1,
+		.elem_size = sizeof(u8),
+		.array_type = NO_ARRAY,
+		.tlv_type = 0x10,
+		.offset = offsetof(struct ipa_mhi_cleanup_req_msg_v01,
+		cleanup_valid),
+	},
+	{
+		.data_type = QMI_UNSIGNED_1_BYTE,
+		.elem_len = 1,
+		.elem_size = sizeof(u8),
+		.array_type = NO_ARRAY,
+		.tlv_type = 0x10,
+		.offset = offsetof(struct ipa_mhi_cleanup_req_msg_v01,
+		cleanup),
+	},
+	{
+		.data_type = QMI_EOTI,
+		.array_type = NO_ARRAY,
+		.tlv_type = QMI_COMMON_TLV_TYPE,
+	},
+};
+
+struct qmi_elem_info ipa_mhi_cleanup_resp_msg_v01_ei[] = {
+	{
+		.data_type = QMI_STRUCT,
+		.elem_len = 1,
+		.elem_size = sizeof(struct qmi_response_type_v01),
+		.array_type = NO_ARRAY,
+		.tlv_type = 0x02,
+		.offset = offsetof(struct ipa_mhi_cleanup_resp_msg_v01,
+		resp),
+		.ei_array = qmi_response_type_v01_ei,
+	},
+	{
+		.data_type = QMI_EOTI,
+		.array_type = NO_ARRAY,
+		.tlv_type = QMI_COMMON_TLV_TYPE,
+	},
+};
+
+static struct qmi_elem_info ipa_ep_id_type_v01_ei[] = {
+	{
+		.data_type      = QMI_SIGNED_4_BYTE_ENUM,
+		.elem_len       = 1,
+		.elem_size      = sizeof(enum ipa_ic_type_enum_v01),
+		.array_type       = NO_ARRAY,
+		.tlv_type       = 0,
+		.offset         = offsetof(struct ipa_ep_id_type_v01,
+					   ic_type),
+	},
+	{
+		.data_type      = QMI_SIGNED_4_BYTE_ENUM,
+		.elem_len       = 1,
+		.elem_size      = sizeof(enum ipa_ep_desc_type_enum_v01),
+		.array_type       = NO_ARRAY,
+		.tlv_type       = 0,
+		.offset         = offsetof(struct ipa_ep_id_type_v01,
+					   ep_type),
+	},
+	{
+		.data_type      = QMI_UNSIGNED_4_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(u32),
+		.array_type       = NO_ARRAY,
+		.tlv_type       = 0,
+		.offset         = offsetof(struct ipa_ep_id_type_v01,
+					   ep_id),
+	},
+	{
+		.data_type      = QMI_SIGNED_4_BYTE_ENUM,
+		.elem_len       = 1,
+		.elem_size      = sizeof(enum ipa_ep_status_type_v01),
+		.array_type       = NO_ARRAY,
+		.tlv_type       = 0,
+		.offset         = offsetof(struct ipa_ep_id_type_v01,
+					   ep_status),
+	},
+	{
+		.data_type      = QMI_EOTI,
+		.array_type       = NO_ARRAY,
+		.tlv_type       = QMI_COMMON_TLV_TYPE,
+	},
+};
+
+struct qmi_elem_info ipa_endp_desc_indication_msg_v01_ei[] = {
+	{
+		.data_type      = QMI_OPT_FLAG,
+		.elem_len       = 1,
+		.elem_size      = sizeof(u8),
+		.array_type       = NO_ARRAY,
+		.tlv_type       = 0x10,
+		.offset         = offsetof(
+			struct ipa_endp_desc_indication_msg_v01,
+			ep_info_valid),
+	},
+	{
+		.data_type      = QMI_DATA_LEN,
+		.elem_len       = 1,
+		.elem_size      = sizeof(u8),
+		.array_type       = NO_ARRAY,
+		.tlv_type       = 0x10,
+		.offset         = offsetof(
+			struct ipa_endp_desc_indication_msg_v01,
+			ep_info_len),
+	},
+	{
+		.data_type      = QMI_STRUCT,
+		.elem_len       = QMI_IPA_ENDP_DESC_NUM_MAX_V01,
+		.elem_size      = sizeof(struct ipa_ep_id_type_v01),
+		.array_type       = VAR_LEN_ARRAY,
+		.tlv_type       = 0x10,
+		.offset         = offsetof(
+			struct ipa_endp_desc_indication_msg_v01,
+			ep_info),
+		.ei_array      = ipa_ep_id_type_v01_ei,
+	},
+	{
+		.data_type      = QMI_OPT_FLAG,
+		.elem_len       = 1,
+		.elem_size      = sizeof(u8),
+		.array_type       = NO_ARRAY,
+		.tlv_type       = 0x11,
+		.offset         = offsetof(
+			struct ipa_endp_desc_indication_msg_v01,
+			num_eps_valid),
+	},
+	{
+		.data_type      = QMI_UNSIGNED_4_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(u32),
+		.array_type       = NO_ARRAY,
+		.tlv_type       = 0x11,
+		.offset         = offsetof(
+			struct ipa_endp_desc_indication_msg_v01,
+			num_eps),
+	},
+	{
+		.data_type      = QMI_EOTI,
+		.array_type       = NO_ARRAY,
+		.tlv_type       = QMI_COMMON_TLV_TYPE,
+	},
+};
+
+static struct qmi_elem_info ipa_mhi_prime_aggr_info_type_v01_ei[] = {
+	{
+		.data_type      = QMI_SIGNED_4_BYTE_ENUM,
+		.elem_len       = 1,
+		.elem_size      = sizeof(enum ipa_ic_type_enum_v01),
+		.array_type       = NO_ARRAY,
+		.tlv_type       = 0,
+		.offset         = offsetof(
+			struct ipa_mhi_prime_aggr_info_type_v01,
+			ic_type),
+	},
+	{
+		.data_type      = QMI_SIGNED_4_BYTE_ENUM,
+		.elem_len       = 1,
+		.elem_size      = sizeof(enum ipa_ep_desc_type_enum_v01),
+		.array_type       = NO_ARRAY,
+		.tlv_type       = 0,
+		.offset         = offsetof(
+			struct ipa_mhi_prime_aggr_info_type_v01,
+			ep_type),
+	},
+	{
+		.data_type      = QMI_UNSIGNED_4_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(u32),
+		.array_type       = NO_ARRAY,
+		.tlv_type       = 0,
+		.offset         = offsetof(
+			struct ipa_mhi_prime_aggr_info_type_v01,
+			bytes_count),
+	},
+	{
+		.data_type      = QMI_UNSIGNED_4_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(u32),
+		.array_type       = NO_ARRAY,
+		.tlv_type       = 0,
+		.offset         = offsetof(
+			struct ipa_mhi_prime_aggr_info_type_v01,
+			pkt_count),
+	},
+	{
+		.data_type      = QMI_SIGNED_4_BYTE_ENUM,
+		.elem_len       = 1,
+		.elem_size      = sizeof(enum ipa_aggr_enum_type_v01),
+		.array_type       = NO_ARRAY,
+		.tlv_type       = 0,
+		.offset         = offsetof(
+			struct ipa_mhi_prime_aggr_info_type_v01,
+			aggr_type),
+	},
+	{
+		.data_type      = QMI_EOTI,
+		.array_type       = NO_ARRAY,
+		.tlv_type       = QMI_COMMON_TLV_TYPE,
+	},
+};
+
+struct qmi_elem_info ipa_mhi_prime_aggr_info_req_msg_v01_ei[] = {
+	{
+		.data_type      = QMI_OPT_FLAG,
+		.elem_len       = 1,
+		.elem_size      = sizeof(u8),
+		.array_type       = NO_ARRAY,
+		.tlv_type       = 0x10,
+		.offset         = offsetof(
+			struct ipa_mhi_prime_aggr_info_req_msg_v01,
+			aggr_info_valid),
+	},
+	{
+		.data_type      = QMI_DATA_LEN,
+		.elem_len       = 1,
+		.elem_size      = sizeof(u8),
+		.array_type       = NO_ARRAY,
+		.tlv_type       = 0x10,
+		.offset         = offsetof(
+			struct ipa_mhi_prime_aggr_info_req_msg_v01,
+			aggr_info_len),
+	},
+	{
+		.data_type      = QMI_STRUCT,
+		.elem_len       = QMI_IPA_ENDP_DESC_NUM_MAX_V01,
+		.elem_size      = sizeof(
+			struct ipa_mhi_prime_aggr_info_type_v01),
+		.array_type       = VAR_LEN_ARRAY,
+		.tlv_type       = 0x10,
+		.offset         = offsetof(
+			struct ipa_mhi_prime_aggr_info_req_msg_v01,
+			aggr_info),
+		.ei_array      = ipa_mhi_prime_aggr_info_type_v01_ei,
+	},
+	{
+		.data_type      = QMI_OPT_FLAG,
+		.elem_len       = 1,
+		.elem_size      = sizeof(u8),
+		.array_type       = NO_ARRAY,
+		.tlv_type       = 0x11,
+		.offset         = offsetof(
+			struct ipa_mhi_prime_aggr_info_req_msg_v01,
+			num_eps_valid),
+	},
+	{
+		.data_type      = QMI_UNSIGNED_4_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(u32),
+		.array_type       = NO_ARRAY,
+		.tlv_type       = 0x11,
+		.offset         = offsetof(
+			struct ipa_mhi_prime_aggr_info_req_msg_v01,
+			num_eps),
+	},
+	{
+		.data_type      = QMI_EOTI,
+		.array_type       = NO_ARRAY,
+		.tlv_type       = QMI_COMMON_TLV_TYPE,
+	},
+};
+
+struct qmi_elem_info ipa_mhi_prime_aggr_info_resp_msg_v01_ei[] = {
+	{
+		.data_type      = QMI_STRUCT,
+		.elem_len       = 1,
+		.elem_size      = sizeof(struct qmi_response_type_v01),
+		.array_type       = NO_ARRAY,
+		.tlv_type       = 0x02,
+		.offset         = offsetof(
+			struct ipa_mhi_prime_aggr_info_resp_msg_v01,
+			resp),
+		.ei_array      = qmi_response_type_v01_ei,
+	},
+	{
+		.data_type      = QMI_EOTI,
+		.array_type       = NO_ARRAY,
+		.tlv_type       = QMI_COMMON_TLV_TYPE,
+	},
+};
+
+struct qmi_elem_info ipa_add_offload_connection_req_msg_v01_ei[] = {
+	{
+		.data_type      = QMI_OPT_FLAG,
+		.elem_len       = 1,
+		.elem_size      = sizeof(u8),
+		.array_type       = NO_ARRAY,
+		.tlv_type       = 0x10,
+		.offset         = offsetof(
+			struct ipa_add_offload_connection_req_msg_v01,
+			num_ipv4_filters_valid),
+	},
+	{
+		.data_type      = QMI_UNSIGNED_4_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(u32),
+		.array_type       = NO_ARRAY,
+		.tlv_type       = 0x10,
+		.offset         = offsetof(
+			struct ipa_add_offload_connection_req_msg_v01,
+			num_ipv4_filters),
+	},
+	{
+		.data_type      = QMI_OPT_FLAG,
+		.elem_len       = 1,
+		.elem_size      = sizeof(u8),
+		.array_type       = NO_ARRAY,
+		.tlv_type       = 0x11,
+		.offset         = offsetof(
+			struct ipa_add_offload_connection_req_msg_v01,
+			num_ipv6_filters_valid),
+	},
+	{
+		.data_type      = QMI_UNSIGNED_4_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(u32),
+		.array_type       = NO_ARRAY,
+		.tlv_type       = 0x11,
+		.offset         = offsetof(
+			struct ipa_add_offload_connection_req_msg_v01,
+			num_ipv6_filters),
+	},
+	{
+		.data_type      = QMI_OPT_FLAG,
+		.elem_len       = 1,
+		.elem_size      = sizeof(u8),
+		.array_type       = NO_ARRAY,
+		.tlv_type       = 0x12,
+		.offset         = offsetof(
+			struct ipa_add_offload_connection_req_msg_v01,
+			xlat_filter_indices_list_valid),
+	},
+	{
+		.data_type      = QMI_DATA_LEN,
+		.elem_len       = 1,
+		.elem_size      = sizeof(u8),
+		.array_type       = NO_ARRAY,
+		.tlv_type       = 0x12,
+		.offset         = offsetof(
+			struct ipa_add_offload_connection_req_msg_v01,
+			xlat_filter_indices_list_len),
+	},
+	{
+		.data_type      = QMI_UNSIGNED_4_BYTE,
+		.elem_len       = QMI_IPA_MAX_FILTERS_V01,
+		.elem_size      = sizeof(u32),
+		.array_type       = VAR_LEN_ARRAY,
+		.tlv_type       = 0x12,
+		.offset         = offsetof(
+			struct ipa_add_offload_connection_req_msg_v01,
+			xlat_filter_indices_list),
+	},
+	{
+		.data_type      = QMI_OPT_FLAG,
+		.elem_len       = 1,
+		.elem_size      = sizeof(u8),
+		.array_type       = NO_ARRAY,
+		.tlv_type       = 0x13,
+		.offset         = offsetof(
+			struct ipa_add_offload_connection_req_msg_v01,
+			filter_spec_ex2_list_valid),
+	},
+	{
+		.data_type      = QMI_DATA_LEN,
+		.elem_len       = 1,
+		.elem_size      = sizeof(u8),
+		.array_type       = NO_ARRAY,
+		.tlv_type       = 0x13,
+		.offset         = offsetof(
+			struct ipa_add_offload_connection_req_msg_v01,
+			filter_spec_ex2_list_len),
+	},
+	{
+		.data_type      = QMI_STRUCT,
+		.elem_len       = QMI_IPA_MAX_FILTERS_V01,
+		.elem_size      = sizeof(struct ipa_filter_spec_ex2_type_v01),
+		.array_type       = VAR_LEN_ARRAY,
+		.tlv_type       = 0x13,
+		.offset         = offsetof(
+			struct ipa_add_offload_connection_req_msg_v01,
+			filter_spec_ex2_list),
+		.ei_array      = ipa_filter_spec_ex2_type_v01_ei,
+	},
+	{
+		.data_type      = QMI_OPT_FLAG,
+		.elem_len       = 1,
+		.elem_size      = sizeof(u8),
+		.array_type       = NO_ARRAY,
+		.tlv_type       = 0x14,
+		.offset         = offsetof(
+			struct ipa_add_offload_connection_req_msg_v01,
+			embedded_call_mux_id_valid),
+	},
+	{
+		.data_type      = QMI_UNSIGNED_4_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint32_t),
+		.array_type       = NO_ARRAY,
+		.tlv_type       = 0x14,
+		.offset         = offsetof(
+			struct ipa_add_offload_connection_req_msg_v01,
+			embedded_call_mux_id),
+	},
+	{
+		.data_type      = QMI_OPT_FLAG,
+		.elem_len       = 1,
+		.elem_size      = sizeof(u8),
+		.array_type       = NO_ARRAY,
+		.tlv_type       = 0x15,
+		.offset         = offsetof(
+			struct ipa_add_offload_connection_req_msg_v01,
+			default_mhi_path_valid),
+	},
+	{
+		.data_type      = QMI_UNSIGNED_1_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(u8),
+		.array_type       = NO_ARRAY,
+		.tlv_type       = 0x15,
+		.offset         = offsetof(
+			struct ipa_add_offload_connection_req_msg_v01,
+			default_mhi_path),
+	},
+	{
+		.data_type      = QMI_EOTI,
+		.array_type       = NO_ARRAY,
+		.tlv_type       = QMI_COMMON_TLV_TYPE,
+	},
+};
+
+struct qmi_elem_info ipa_add_offload_connection_resp_msg_v01_ei[] = {
+	{
+		.data_type      = QMI_STRUCT,
+		.elem_len       = 1,
+		.elem_size      = sizeof(struct qmi_response_type_v01),
+		.array_type       = NO_ARRAY,
+		.tlv_type       = 0x02,
+		.offset         = offsetof(
+			struct ipa_add_offload_connection_resp_msg_v01,
+			resp),
+		.ei_array      = qmi_response_type_v01_ei,
+	},
+	{
+		.data_type      = QMI_OPT_FLAG,
+		.elem_len       = 1,
+		.elem_size      = sizeof(u8),
+		.array_type       = NO_ARRAY,
+		.tlv_type       = 0x10,
+		.offset         = offsetof(
+			struct ipa_add_offload_connection_resp_msg_v01,
+			filter_handle_list_valid),
+	},
+	{
+		.data_type      = QMI_DATA_LEN,
+		.elem_len       = 1,
+		.elem_size      = sizeof(u8),
+		.array_type       = NO_ARRAY,
+		.tlv_type       = 0x10,
+		.offset         = offsetof(
+			struct ipa_add_offload_connection_resp_msg_v01,
+			filter_handle_list_len),
+	},
+	{
+		.data_type      = QMI_STRUCT,
+		.elem_len       = QMI_IPA_MAX_FILTERS_V01,
+		.elem_size      = sizeof(
+			struct ipa_filter_rule_identifier_to_handle_map_v01),
+		.array_type       = VAR_LEN_ARRAY,
+		.tlv_type       = 0x10,
+		.offset         = offsetof(
+			struct ipa_add_offload_connection_resp_msg_v01,
+			filter_handle_list),
+		.ei_array      =
+			ipa3_filter_rule_identifier_to_handle_map_data_v01_ei,
+	},
+	{
+		.data_type      = QMI_EOTI,
+		.array_type       = NO_ARRAY,
+		.tlv_type       = QMI_COMMON_TLV_TYPE,
+	},
+};
+
+struct qmi_elem_info ipa_remove_offload_connection_req_msg_v01_ei[] = {
+	{
+		.data_type      = QMI_OPT_FLAG,
+		.elem_len       = 1,
+		.elem_size      = sizeof(u8),
+		.array_type       = NO_ARRAY,
+		.tlv_type       = 0x10,
+		.offset         = offsetof(
+			struct ipa_remove_offload_connection_req_msg_v01,
+			filter_handle_list_valid),
+	},
+	{
+		.data_type      = QMI_DATA_LEN,
+		.elem_len       = 1,
+		.elem_size      = sizeof(u8),
+		.array_type       = NO_ARRAY,
+		.tlv_type       = 0x10,
+		.offset         = offsetof(
+			struct ipa_remove_offload_connection_req_msg_v01,
+			filter_handle_list_len),
+	},
+	{
+		.data_type      = QMI_STRUCT,
+		.elem_len       = QMI_IPA_MAX_FILTERS_V01,
+		.elem_size      = sizeof(
+			struct ipa_filter_rule_identifier_to_handle_map_v01),
+		.array_type       = VAR_LEN_ARRAY,
+		.tlv_type       = 0x10,
+		.offset         = offsetof(
+			struct ipa_remove_offload_connection_req_msg_v01,
+			filter_handle_list),
+		.ei_array      =
+			ipa3_filter_rule_identifier_to_handle_map_data_v01_ei,
+	},
+	{
+		.data_type      = QMI_OPT_FLAG,
+		.elem_len       = 1,
+		.elem_size      = sizeof(u8),
+		.array_type       = NO_ARRAY,
+		.tlv_type       = 0x11,
+		.offset         = offsetof(
+			struct ipa_remove_offload_connection_req_msg_v01,
+			clean_all_rules_valid),
+	},
+	{
+		.data_type      = QMI_UNSIGNED_1_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(u8),
+		.array_type       = NO_ARRAY,
+		.tlv_type       = 0x11,
+		.offset         = offsetof(
+			struct ipa_remove_offload_connection_req_msg_v01,
+			clean_all_rules),
+	},
+	{
+		.data_type      = QMI_EOTI,
+		.array_type       = NO_ARRAY,
+		.tlv_type       = QMI_COMMON_TLV_TYPE,
+	},
+};
+
+struct qmi_elem_info ipa_remove_offload_connection_resp_msg_v01_ei[] = {
+	{
+		.data_type      = QMI_OPT_FLAG,
+		.elem_len       = 1,
+		.elem_size      = sizeof(u8),
+		.array_type       = NO_ARRAY,
+		.tlv_type       = 0x02,
+		.offset         = offsetof(
+			struct ipa_remove_offload_connection_resp_msg_v01,
+			resp_valid),
+	},
+	{
+		.data_type      = QMI_STRUCT,
+		.elem_len       = 1,
+		.elem_size      = sizeof(struct qmi_response_type_v01),
+		.array_type       = NO_ARRAY,
+		.tlv_type       = 0x02,
+		.offset         = offsetof(
+			struct ipa_remove_offload_connection_resp_msg_v01,
+			resp),
+		.ei_array      = qmi_response_type_v01_ei,
+	},
+	{
+		.data_type      = QMI_EOTI,
+		.array_type       = NO_ARRAY,
+		.tlv_type       = QMI_COMMON_TLV_TYPE,
+	},
+};

+ 2518 - 0
ipa/ipa_v3/ipa_rt.c

@@ -0,0 +1,2518 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2012-2019, The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/bitops.h>
+#include <linux/idr.h>
+#include "ipa_i.h"
+#include "ipahal/ipahal.h"
+#include "ipahal/ipahal_fltrt.h"
+
+#define IPA_RT_INDEX_BITMAP_SIZE	(32)
+#define IPA_RT_STATUS_OF_ADD_FAILED	(-1)
+#define IPA_RT_STATUS_OF_DEL_FAILED	(-1)
+#define IPA_RT_STATUS_OF_MDFY_FAILED (-1)
+
+#define IPA_RT_MAX_NUM_OF_COMMIT_TABLES_CMD_DESC 6
+
+#define IPA_RT_GET_RULE_TYPE(__entry) \
+	( \
+	((__entry)->rule.hashable) ? \
+	(IPA_RULE_HASHABLE) : (IPA_RULE_NON_HASHABLE) \
+	)
+
+/**
+ * ipa_generate_rt_hw_rule() - Generated the RT H/W single rule
+ *  This func will do the preparation core driver work and then calls
+ *  the HAL layer for the real work.
+ * @ip: the ip address family type
+ * @entry: routing entry
+ * @buf: output buffer, buf == NULL means
+ *	caller wants to know the size of the rule as seen
+ *	by HW so they did not pass a valid buffer, we will use a
+ *	scratch buffer instead.
+ *	With this scheme we are going to
+ *	generate the rule twice, once to know size using scratch
+ *	buffer and second to write the rule to the actual caller
+ *	supplied buffer which is of required size
+ *
+ * Returns: 0 on success, negative on failure
+ *
+ * caller needs to hold any needed locks to ensure integrity
+ */
+static int ipa_generate_rt_hw_rule(enum ipa_ip_type ip,
+	struct ipa3_rt_entry *entry, u8 *buf)
+{
+	struct ipahal_rt_rule_gen_params gen_params;
+	struct ipa3_hdr_entry *hdr_entry;
+	struct ipa3_hdr_proc_ctx_entry *hdr_proc_entry;
+	int res = 0;
+
+	memset(&gen_params, 0, sizeof(gen_params));
+
+	if (entry->rule.hashable &&
+		entry->rule.attrib.attrib_mask & IPA_FLT_IS_PURE_ACK) {
+		IPAERR_RL("PURE_ACK rule atrb used with hash rule\n");
+		WARN_ON_RATELIMIT_IPA(1);
+		return -EPERM;
+	}
+
+	gen_params.ipt = ip;
+	gen_params.dst_pipe_idx = ipa3_get_ep_mapping(entry->rule.dst);
+	if (gen_params.dst_pipe_idx == -1) {
+		IPAERR_RL("Wrong destination pipe specified in RT rule\n");
+		WARN_ON_RATELIMIT_IPA(1);
+		return -EPERM;
+	}
+	if (!IPA_CLIENT_IS_CONS(entry->rule.dst)) {
+		IPAERR_RL("No RT rule on IPA_client_producer pipe.\n");
+		IPAERR_RL("pipe_idx: %d dst_pipe: %d\n",
+				gen_params.dst_pipe_idx, entry->rule.dst);
+		WARN_ON_RATELIMIT_IPA(1);
+		return -EPERM;
+	}
+
+	/* Adding check to confirm still
+	 * header entry present in header table or not
+	 */
+
+	if (entry->hdr) {
+		hdr_entry = ipa3_id_find(entry->rule.hdr_hdl);
+		if (!hdr_entry || hdr_entry->cookie != IPA_HDR_COOKIE) {
+			IPAERR_RL("Header entry already deleted\n");
+			return -EPERM;
+		}
+	} else if (entry->proc_ctx) {
+		hdr_proc_entry = ipa3_id_find(entry->rule.hdr_proc_ctx_hdl);
+		if (!hdr_proc_entry ||
+			hdr_proc_entry->cookie != IPA_PROC_HDR_COOKIE) {
+			IPAERR_RL("Proc header entry already deleted\n");
+			return -EPERM;
+		}
+	}
+
+	if (entry->proc_ctx || (entry->hdr && entry->hdr->is_hdr_proc_ctx)) {
+		struct ipa3_hdr_proc_ctx_entry *proc_ctx;
+
+		proc_ctx = (entry->proc_ctx) ? : entry->hdr->proc_ctx;
+		if ((proc_ctx == NULL) ||
+			ipa3_check_idr_if_freed(proc_ctx) ||
+			(proc_ctx->cookie != IPA_PROC_HDR_COOKIE)) {
+			gen_params.hdr_type = IPAHAL_RT_RULE_HDR_NONE;
+			gen_params.hdr_ofst = 0;
+		} else {
+			gen_params.hdr_lcl = ipa3_ctx->hdr_proc_ctx_tbl_lcl;
+			gen_params.hdr_type = IPAHAL_RT_RULE_HDR_PROC_CTX;
+			gen_params.hdr_ofst = proc_ctx->offset_entry->offset +
+				ipa3_ctx->hdr_proc_ctx_tbl.start_offset;
+		}
+	} else if ((entry->hdr != NULL) &&
+		(entry->hdr->cookie == IPA_HDR_COOKIE)) {
+		gen_params.hdr_lcl = ipa3_ctx->hdr_tbl_lcl;
+		gen_params.hdr_type = IPAHAL_RT_RULE_HDR_RAW;
+		gen_params.hdr_ofst = entry->hdr->offset_entry->offset;
+	} else {
+		gen_params.hdr_type = IPAHAL_RT_RULE_HDR_NONE;
+		gen_params.hdr_ofst = 0;
+	}
+
+	gen_params.priority = entry->prio;
+	gen_params.id = entry->rule_id;
+	gen_params.rule = (const struct ipa_rt_rule_i *)&entry->rule;
+	gen_params.cnt_idx = entry->cnt_idx;
+
+	res = ipahal_rt_generate_hw_rule(&gen_params, &entry->hw_len, buf);
+	if (res)
+		IPAERR("failed to generate rt h/w rule\n");
+
+	return res;
+}
+
+/**
+ * ipa_translate_rt_tbl_to_hw_fmt() - translate the routing driver structures
+ *  (rules and tables) to HW format and fill it in the given buffers
+ * @ip: the ip address family type
+ * @rlt: the type of the rules to translate (hashable or non-hashable)
+ * @base: the rules body buffer to be filled
+ * @hdr: the rules header (addresses/offsets) buffer to be filled
+ * @body_ofst: the offset of the rules body from the rules header at
+ *  ipa sram (for local body usage)
+ * @apps_start_idx: the first rt table index of apps tables
+ *
+ * Returns: 0 on success, negative on failure
+ *
+ * caller needs to hold any needed locks to ensure integrity
+ *
+ */
+static int ipa_translate_rt_tbl_to_hw_fmt(enum ipa_ip_type ip,
+	enum ipa_rule_type rlt, u8 *base, u8 *hdr,
+	u32 body_ofst, u32 apps_start_idx)
+{
+	struct ipa3_rt_tbl_set *set;
+	struct ipa3_rt_tbl *tbl;
+	struct ipa_mem_buffer tbl_mem;
+	u8 *tbl_mem_buf;
+	struct ipa3_rt_entry *entry;
+	int res;
+	u64 offset;
+	u8 *body_i;
+
+	set = &ipa3_ctx->rt_tbl_set[ip];
+	body_i = base;
+	list_for_each_entry(tbl, &set->head_rt_tbl_list, link) {
+		if (tbl->sz[rlt] == 0)
+			continue;
+		if (tbl->in_sys[rlt]) {
+			/* only body (no header) */
+			tbl_mem.size = tbl->sz[rlt] -
+				ipahal_get_hw_tbl_hdr_width();
+			if (ipahal_fltrt_allocate_hw_sys_tbl(&tbl_mem)) {
+				IPAERR_RL("fail to alloc sys tbl of size %d\n",
+					tbl_mem.size);
+				goto err;
+			}
+
+			if (ipahal_fltrt_write_addr_to_hdr(tbl_mem.phys_base,
+				hdr, tbl->idx - apps_start_idx, true)) {
+				IPAERR_RL("fail to wrt sys tbl addr to hdr\n");
+				goto hdr_update_fail;
+			}
+
+			tbl_mem_buf = tbl_mem.base;
+
+			/* generate the rule-set */
+			list_for_each_entry(entry, &tbl->head_rt_rule_list,
+					link) {
+				if (IPA_RT_GET_RULE_TYPE(entry) != rlt)
+					continue;
+				res = ipa_generate_rt_hw_rule(ip, entry,
+					tbl_mem_buf);
+				if (res) {
+					IPAERR_RL("failed to gen HW RT rule\n");
+					goto hdr_update_fail;
+				}
+				tbl_mem_buf += entry->hw_len;
+			}
+
+			if (tbl->curr_mem[rlt].phys_base) {
+				WARN_ON(tbl->prev_mem[rlt].phys_base);
+				tbl->prev_mem[rlt] = tbl->curr_mem[rlt];
+			}
+			tbl->curr_mem[rlt] = tbl_mem;
+		} else {
+			offset = body_i - base + body_ofst;
+
+			/* update the hdr at the right index */
+			if (ipahal_fltrt_write_addr_to_hdr(offset, hdr,
+				tbl->idx - apps_start_idx, true)) {
+				IPAERR_RL("fail to wrt lcl tbl ofst to hdr\n");
+				goto hdr_update_fail;
+			}
+
+			/* generate the rule-set */
+			list_for_each_entry(entry, &tbl->head_rt_rule_list,
+					link) {
+				if (IPA_RT_GET_RULE_TYPE(entry) != rlt)
+					continue;
+				res = ipa_generate_rt_hw_rule(ip, entry,
+					body_i);
+				if (res) {
+					IPAERR_RL("failed to gen HW RT rule\n");
+					goto err;
+				}
+				body_i += entry->hw_len;
+			}
+
+			/**
+			 * advance body_i to next table alignment as local
+			 * tables
+			 * are order back-to-back
+			 */
+			body_i += ipahal_get_lcl_tbl_addr_alignment();
+			body_i = (u8 *)((long)body_i &
+				~ipahal_get_lcl_tbl_addr_alignment());
+		}
+	}
+
+	return 0;
+
+hdr_update_fail:
+	ipahal_free_dma_mem(&tbl_mem);
+err:
+	return -EPERM;
+}
+
+static void __ipa_reap_sys_rt_tbls(enum ipa_ip_type ip)
+{
+	struct ipa3_rt_tbl *tbl;
+	struct ipa3_rt_tbl *next;
+	struct ipa3_rt_tbl_set *set;
+	int i;
+
+	set = &ipa3_ctx->rt_tbl_set[ip];
+	list_for_each_entry(tbl, &set->head_rt_tbl_list, link) {
+		for (i = 0; i < IPA_RULE_TYPE_MAX; i++) {
+			if (tbl->prev_mem[i].phys_base) {
+				IPADBG_LOW(
+				"reaping sys rt tbl name=%s ip=%d rlt=%d\n",
+				tbl->name, ip, i);
+				ipahal_free_dma_mem(&tbl->prev_mem[i]);
+				memset(&tbl->prev_mem[i], 0,
+					sizeof(tbl->prev_mem[i]));
+			}
+		}
+	}
+
+	set = &ipa3_ctx->reap_rt_tbl_set[ip];
+	list_for_each_entry_safe(tbl, next, &set->head_rt_tbl_list, link) {
+		for (i = 0; i < IPA_RULE_TYPE_MAX; i++) {
+			WARN_ON(tbl->prev_mem[i].phys_base != 0);
+			if (tbl->curr_mem[i].phys_base) {
+				IPADBG_LOW(
+				"reaping sys rt tbl name=%s ip=%d rlt=%d\n",
+				tbl->name, ip, i);
+				ipahal_free_dma_mem(&tbl->curr_mem[i]);
+			}
+		}
+		list_del(&tbl->link);
+		kmem_cache_free(ipa3_ctx->rt_tbl_cache, tbl);
+	}
+}
+
+/**
+ * ipa_prep_rt_tbl_for_cmt() - preparing the rt table for commit
+ *  assign priorities to the rules, calculate their sizes and calculate
+ *  the overall table size
+ * @ip: the ip address family type
+ * @tbl: the rt tbl to be prepared
+ *
+ * Return: 0 on success, negative on failure
+ */
+static int ipa_prep_rt_tbl_for_cmt(enum ipa_ip_type ip,
+	struct ipa3_rt_tbl *tbl)
+{
+	struct ipa3_rt_entry *entry;
+	int prio_i;
+	int res;
+	int max_prio;
+	u32 hdr_width;
+
+	tbl->sz[IPA_RULE_HASHABLE] = 0;
+	tbl->sz[IPA_RULE_NON_HASHABLE] = 0;
+
+	max_prio = ipahal_get_rule_max_priority();
+
+	prio_i = max_prio;
+	list_for_each_entry(entry, &tbl->head_rt_rule_list, link) {
+
+		if (entry->rule.max_prio) {
+			entry->prio = max_prio;
+		} else {
+			if (ipahal_rule_decrease_priority(&prio_i)) {
+				IPAERR("cannot rule decrease priority - %d\n",
+					prio_i);
+				return -EPERM;
+			}
+			entry->prio = prio_i;
+		}
+
+		res = ipa_generate_rt_hw_rule(ip, entry, NULL);
+		if (res) {
+			IPAERR_RL("failed to calculate HW RT rule size\n");
+			return -EPERM;
+		}
+
+		IPADBG_LOW("RT rule id (handle) %d hw_len %u priority %u\n",
+			entry->id, entry->hw_len, entry->prio);
+
+		if (entry->rule.hashable)
+			tbl->sz[IPA_RULE_HASHABLE] += entry->hw_len;
+		else
+			tbl->sz[IPA_RULE_NON_HASHABLE] += entry->hw_len;
+	}
+
+	if ((tbl->sz[IPA_RULE_HASHABLE] +
+		tbl->sz[IPA_RULE_NON_HASHABLE]) == 0) {
+		WARN_ON_RATELIMIT_IPA(1);
+		IPAERR_RL("rt tbl %s is with zero total size\n", tbl->name);
+	}
+
+	hdr_width = ipahal_get_hw_tbl_hdr_width();
+
+	if (tbl->sz[IPA_RULE_HASHABLE])
+		tbl->sz[IPA_RULE_HASHABLE] += hdr_width;
+	if (tbl->sz[IPA_RULE_NON_HASHABLE])
+		tbl->sz[IPA_RULE_NON_HASHABLE] += hdr_width;
+
+	IPADBG("RT tbl index %u hash_sz %u non-hash sz %u\n", tbl->idx,
+		tbl->sz[IPA_RULE_HASHABLE], tbl->sz[IPA_RULE_NON_HASHABLE]);
+
+	return 0;
+}
+
+/**
+ * ipa_generate_rt_hw_tbl_img() - generates the rt hw tbls.
+ *  headers and bodies (sys bodies) are being created into buffers that will
+ *  be filled into the local memory (sram)
+ * @ip: the ip address family type
+ * @alloc_params: IN/OUT parameters to hold info regard the tables headers
+ *  and bodies on DDR (DMA buffers), and needed info for the allocation
+ *  that the HAL needs
+ *
+ * Return: 0 on success, negative on failure
+ */
+static int ipa_generate_rt_hw_tbl_img(enum ipa_ip_type ip,
+	struct ipahal_fltrt_alloc_imgs_params *alloc_params)
+{
+	u32 hash_bdy_start_ofst, nhash_bdy_start_ofst;
+	u32 apps_start_idx;
+	int rc = 0;
+
+	if (ip == IPA_IP_v4) {
+		nhash_bdy_start_ofst = IPA_MEM_PART(apps_v4_rt_nhash_ofst) -
+			IPA_MEM_PART(v4_rt_nhash_ofst);
+		hash_bdy_start_ofst = IPA_MEM_PART(apps_v4_rt_hash_ofst) -
+			IPA_MEM_PART(v4_rt_hash_ofst);
+		apps_start_idx = IPA_MEM_PART(v4_apps_rt_index_lo);
+	} else {
+		nhash_bdy_start_ofst = IPA_MEM_PART(apps_v6_rt_nhash_ofst) -
+			IPA_MEM_PART(v6_rt_nhash_ofst);
+		hash_bdy_start_ofst = IPA_MEM_PART(apps_v6_rt_hash_ofst) -
+			IPA_MEM_PART(v6_rt_hash_ofst);
+		apps_start_idx = IPA_MEM_PART(v6_apps_rt_index_lo);
+	}
+
+	if (ipahal_fltrt_allocate_hw_tbl_imgs(alloc_params)) {
+		IPAERR("fail to allocate RT HW TBL images. IP %d\n", ip);
+		rc = -ENOMEM;
+		goto allocate_fail;
+	}
+
+	if (ipa_translate_rt_tbl_to_hw_fmt(ip, IPA_RULE_HASHABLE,
+		alloc_params->hash_bdy.base, alloc_params->hash_hdr.base,
+		hash_bdy_start_ofst, apps_start_idx)) {
+		IPAERR("fail to translate hashable rt tbls to hw format\n");
+		rc = -EPERM;
+		goto translate_fail;
+	}
+	if (ipa_translate_rt_tbl_to_hw_fmt(ip, IPA_RULE_NON_HASHABLE,
+		alloc_params->nhash_bdy.base, alloc_params->nhash_hdr.base,
+		nhash_bdy_start_ofst, apps_start_idx)) {
+		IPAERR("fail to translate non-hashable rt tbls to hw format\n");
+		rc = -EPERM;
+		goto translate_fail;
+	}
+
+	return rc;
+
+translate_fail:
+	if (alloc_params->hash_hdr.size)
+		ipahal_free_dma_mem(&alloc_params->hash_hdr);
+	ipahal_free_dma_mem(&alloc_params->nhash_hdr);
+	if (alloc_params->hash_bdy.size)
+		ipahal_free_dma_mem(&alloc_params->hash_bdy);
+	if (alloc_params->nhash_bdy.size)
+		ipahal_free_dma_mem(&alloc_params->nhash_bdy);
+allocate_fail:
+	return rc;
+}
+
+/**
+ * ipa_rt_valid_lcl_tbl_size() - validate if the space allocated for rt tbl
+ *  bodies at the sram is enough for the commit
+ * @ipt: the ip address family type
+ * @rlt: the rule type (hashable or non-hashable)
+ *
+ * Return: true if enough space available or false in other cases
+ */
+static bool ipa_rt_valid_lcl_tbl_size(enum ipa_ip_type ipt,
+	enum ipa_rule_type rlt, struct ipa_mem_buffer *bdy)
+{
+	u16 avail;
+
+	if (ipt == IPA_IP_v4)
+		avail = (rlt == IPA_RULE_HASHABLE) ?
+			IPA_MEM_PART(apps_v4_rt_hash_size) :
+			IPA_MEM_PART(apps_v4_rt_nhash_size);
+	else
+		avail = (rlt == IPA_RULE_HASHABLE) ?
+			IPA_MEM_PART(apps_v6_rt_hash_size) :
+			IPA_MEM_PART(apps_v6_rt_nhash_size);
+
+	if (bdy->size <= avail)
+		return true;
+
+	IPAERR("tbl too big, needed %d avail %d ipt %d rlt %d\n",
+		bdy->size, avail, ipt, rlt);
+	return false;
+}
+
+/**
+ * __ipa_commit_rt_v3() - commit rt tables to the hw
+ * commit the headers and the bodies if are local with internal cache flushing
+ * @ipt: the ip address family type
+ *
+ * Return: 0 on success, negative on failure
+ */
+int __ipa_commit_rt_v3(enum ipa_ip_type ip)
+{
+	struct ipa3_desc desc[IPA_RT_MAX_NUM_OF_COMMIT_TABLES_CMD_DESC];
+	struct ipahal_imm_cmd_register_write reg_write_cmd = {0};
+	struct ipahal_imm_cmd_dma_shared_mem  mem_cmd = {0};
+	struct ipahal_imm_cmd_pyld
+		*cmd_pyld[IPA_RT_MAX_NUM_OF_COMMIT_TABLES_CMD_DESC];
+	int num_cmd = 0;
+	struct ipahal_fltrt_alloc_imgs_params alloc_params;
+	u32 num_modem_rt_index;
+	int rc = 0;
+	u32 lcl_hash_hdr, lcl_nhash_hdr;
+	u32 lcl_hash_bdy, lcl_nhash_bdy;
+	bool lcl_hash, lcl_nhash;
+	struct ipahal_reg_fltrt_hash_flush flush;
+	struct ipahal_reg_valmask valmask;
+	int i;
+	struct ipa3_rt_tbl_set *set;
+	struct ipa3_rt_tbl *tbl;
+	u32 tbl_hdr_width;
+	struct ipahal_imm_cmd_register_write reg_write_coal_close;
+
+	tbl_hdr_width = ipahal_get_hw_tbl_hdr_width();
+	memset(desc, 0, sizeof(desc));
+	memset(cmd_pyld, 0, sizeof(cmd_pyld));
+	memset(&alloc_params, 0, sizeof(alloc_params));
+	alloc_params.ipt = ip;
+
+	if (ip == IPA_IP_v4) {
+		num_modem_rt_index =
+			IPA_MEM_PART(v4_modem_rt_index_hi) -
+			IPA_MEM_PART(v4_modem_rt_index_lo) + 1;
+		lcl_hash_hdr = ipa3_ctx->smem_restricted_bytes +
+			IPA_MEM_PART(v4_rt_hash_ofst) +
+			num_modem_rt_index * tbl_hdr_width;
+		lcl_nhash_hdr = ipa3_ctx->smem_restricted_bytes +
+			IPA_MEM_PART(v4_rt_nhash_ofst) +
+			num_modem_rt_index * tbl_hdr_width;
+		lcl_hash_bdy = ipa3_ctx->smem_restricted_bytes +
+			IPA_MEM_PART(apps_v4_rt_hash_ofst);
+		lcl_nhash_bdy = ipa3_ctx->smem_restricted_bytes +
+			IPA_MEM_PART(apps_v4_rt_nhash_ofst);
+		lcl_hash = ipa3_ctx->ip4_rt_tbl_hash_lcl;
+		lcl_nhash = ipa3_ctx->ip4_rt_tbl_nhash_lcl;
+		alloc_params.tbls_num = IPA_MEM_PART(v4_apps_rt_index_hi) -
+			IPA_MEM_PART(v4_apps_rt_index_lo) + 1;
+	} else {
+		num_modem_rt_index =
+			IPA_MEM_PART(v6_modem_rt_index_hi) -
+			IPA_MEM_PART(v6_modem_rt_index_lo) + 1;
+		lcl_hash_hdr = ipa3_ctx->smem_restricted_bytes +
+			IPA_MEM_PART(v6_rt_hash_ofst) +
+			num_modem_rt_index * tbl_hdr_width;
+		lcl_nhash_hdr = ipa3_ctx->smem_restricted_bytes +
+			IPA_MEM_PART(v6_rt_nhash_ofst) +
+			num_modem_rt_index * tbl_hdr_width;
+		lcl_hash_bdy = ipa3_ctx->smem_restricted_bytes +
+			IPA_MEM_PART(apps_v6_rt_hash_ofst);
+		lcl_nhash_bdy = ipa3_ctx->smem_restricted_bytes +
+			IPA_MEM_PART(apps_v6_rt_nhash_ofst);
+		lcl_hash = ipa3_ctx->ip6_rt_tbl_hash_lcl;
+		lcl_nhash = ipa3_ctx->ip6_rt_tbl_nhash_lcl;
+		alloc_params.tbls_num = IPA_MEM_PART(v6_apps_rt_index_hi) -
+			IPA_MEM_PART(v6_apps_rt_index_lo) + 1;
+	}
+
+	if (!ipa3_ctx->rt_idx_bitmap[ip]) {
+		IPAERR("no rt tbls present\n");
+		rc = -EPERM;
+		goto no_rt_tbls;
+	}
+
+	set = &ipa3_ctx->rt_tbl_set[ip];
+	list_for_each_entry(tbl, &set->head_rt_tbl_list, link) {
+		if (ipa_prep_rt_tbl_for_cmt(ip, tbl)) {
+			rc = -EPERM;
+			goto no_rt_tbls;
+		}
+		if (!tbl->in_sys[IPA_RULE_HASHABLE] &&
+			tbl->sz[IPA_RULE_HASHABLE]) {
+			alloc_params.num_lcl_hash_tbls++;
+			alloc_params.total_sz_lcl_hash_tbls +=
+				tbl->sz[IPA_RULE_HASHABLE];
+			alloc_params.total_sz_lcl_hash_tbls -= tbl_hdr_width;
+		}
+		if (!tbl->in_sys[IPA_RULE_NON_HASHABLE] &&
+			tbl->sz[IPA_RULE_NON_HASHABLE]) {
+			alloc_params.num_lcl_nhash_tbls++;
+			alloc_params.total_sz_lcl_nhash_tbls +=
+				tbl->sz[IPA_RULE_NON_HASHABLE];
+			alloc_params.total_sz_lcl_nhash_tbls -= tbl_hdr_width;
+		}
+	}
+
+	if (ipa_generate_rt_hw_tbl_img(ip, &alloc_params)) {
+		IPAERR("fail to generate RT HW TBL images. IP %d\n", ip);
+		rc = -EFAULT;
+		goto no_rt_tbls;
+	}
+
+	if (!ipa_rt_valid_lcl_tbl_size(ip, IPA_RULE_HASHABLE,
+		&alloc_params.hash_bdy)) {
+		rc = -EFAULT;
+		goto fail_size_valid;
+	}
+	if (!ipa_rt_valid_lcl_tbl_size(ip, IPA_RULE_NON_HASHABLE,
+		&alloc_params.nhash_bdy)) {
+		rc = -EFAULT;
+		goto fail_size_valid;
+	}
+
+	/* IC to close the coal frame before HPS Clear if coal is enabled */
+	if (ipa3_get_ep_mapping(IPA_CLIENT_APPS_WAN_COAL_CONS) != -1) {
+		i = ipa3_get_ep_mapping(IPA_CLIENT_APPS_WAN_COAL_CONS);
+		reg_write_coal_close.skip_pipeline_clear = false;
+		reg_write_coal_close.pipeline_clear_options = IPAHAL_HPS_CLEAR;
+		reg_write_coal_close.offset = ipahal_get_reg_ofst(
+			IPA_AGGR_FORCE_CLOSE);
+		ipahal_get_aggr_force_close_valmask(i, &valmask);
+		reg_write_coal_close.value = valmask.val;
+		reg_write_coal_close.value_mask = valmask.mask;
+		cmd_pyld[num_cmd] = ipahal_construct_imm_cmd(
+			IPA_IMM_CMD_REGISTER_WRITE,
+			&reg_write_coal_close, false);
+		if (!cmd_pyld[num_cmd]) {
+			IPAERR("failed to construct coal close IC\n");
+			goto fail_size_valid;
+		}
+		ipa3_init_imm_cmd_desc(&desc[num_cmd], cmd_pyld[num_cmd]);
+		++num_cmd;
+	}
+
+	/*
+	 * SRAM memory not allocated to hash tables. Sending
+	 * command to hash tables(filer/routing) operation not supported.
+	 */
+	if (!ipa3_ctx->ipa_fltrt_not_hashable) {
+		/* flushing ipa internal hashable rt rules cache */
+		memset(&flush, 0, sizeof(flush));
+		if (ip == IPA_IP_v4)
+			flush.v4_rt = true;
+		else
+			flush.v6_rt = true;
+		ipahal_get_fltrt_hash_flush_valmask(&flush, &valmask);
+		reg_write_cmd.skip_pipeline_clear = false;
+		reg_write_cmd.pipeline_clear_options = IPAHAL_HPS_CLEAR;
+		reg_write_cmd.offset = ipahal_get_reg_ofst(
+					IPA_FILT_ROUT_HASH_FLUSH);
+		reg_write_cmd.value = valmask.val;
+		reg_write_cmd.value_mask = valmask.mask;
+		cmd_pyld[num_cmd] = ipahal_construct_imm_cmd(
+				IPA_IMM_CMD_REGISTER_WRITE, &reg_write_cmd,
+							false);
+		if (!cmd_pyld[num_cmd]) {
+			IPAERR(
+			"fail construct register_write imm cmd. IP %d\n", ip);
+			goto fail_imm_cmd_construct;
+		}
+		ipa3_init_imm_cmd_desc(&desc[num_cmd], cmd_pyld[num_cmd]);
+		num_cmd++;
+	}
+
+	mem_cmd.is_read = false;
+	mem_cmd.skip_pipeline_clear = false;
+	mem_cmd.pipeline_clear_options = IPAHAL_HPS_CLEAR;
+	mem_cmd.size = alloc_params.nhash_hdr.size;
+	mem_cmd.system_addr = alloc_params.nhash_hdr.phys_base;
+	mem_cmd.local_addr = lcl_nhash_hdr;
+	cmd_pyld[num_cmd] = ipahal_construct_imm_cmd(
+		IPA_IMM_CMD_DMA_SHARED_MEM, &mem_cmd, false);
+	if (!cmd_pyld[num_cmd]) {
+		IPAERR("fail construct dma_shared_mem imm cmd. IP %d\n", ip);
+		goto fail_imm_cmd_construct;
+	}
+	ipa3_init_imm_cmd_desc(&desc[num_cmd], cmd_pyld[num_cmd]);
+	num_cmd++;
+
+	/*
+	 * SRAM memory not allocated to hash tables. Sending
+	 * command to hash tables(filer/routing) operation not supported.
+	 */
+	if (!ipa3_ctx->ipa_fltrt_not_hashable) {
+		mem_cmd.is_read = false;
+		mem_cmd.skip_pipeline_clear = false;
+		mem_cmd.pipeline_clear_options = IPAHAL_HPS_CLEAR;
+		mem_cmd.size = alloc_params.hash_hdr.size;
+		mem_cmd.system_addr = alloc_params.hash_hdr.phys_base;
+		mem_cmd.local_addr = lcl_hash_hdr;
+		cmd_pyld[num_cmd] = ipahal_construct_imm_cmd(
+				IPA_IMM_CMD_DMA_SHARED_MEM, &mem_cmd, false);
+		if (!cmd_pyld[num_cmd]) {
+			IPAERR(
+			"fail construct dma_shared_mem imm cmd. IP %d\n", ip);
+			goto fail_imm_cmd_construct;
+		}
+		ipa3_init_imm_cmd_desc(&desc[num_cmd], cmd_pyld[num_cmd]);
+		num_cmd++;
+	}
+
+	if (lcl_nhash) {
+		if (num_cmd >= IPA_RT_MAX_NUM_OF_COMMIT_TABLES_CMD_DESC) {
+			IPAERR("number of commands is out of range: IP = %d\n",
+				ip);
+			rc = -ENOBUFS;
+			goto fail_imm_cmd_construct;
+		}
+
+		mem_cmd.is_read = false;
+		mem_cmd.skip_pipeline_clear = false;
+		mem_cmd.pipeline_clear_options = IPAHAL_HPS_CLEAR;
+		mem_cmd.size = alloc_params.nhash_bdy.size;
+		mem_cmd.system_addr = alloc_params.nhash_bdy.phys_base;
+		mem_cmd.local_addr = lcl_nhash_bdy;
+		cmd_pyld[num_cmd] = ipahal_construct_imm_cmd(
+			IPA_IMM_CMD_DMA_SHARED_MEM, &mem_cmd, false);
+		if (!cmd_pyld[num_cmd]) {
+			IPAERR("fail construct dma_shared_mem cmd. IP %d\n",
+				ip);
+			goto fail_imm_cmd_construct;
+		}
+		ipa3_init_imm_cmd_desc(&desc[num_cmd], cmd_pyld[num_cmd]);
+		num_cmd++;
+	}
+	if (lcl_hash) {
+		if (num_cmd >= IPA_RT_MAX_NUM_OF_COMMIT_TABLES_CMD_DESC) {
+			IPAERR("number of commands is out of range: IP = %d\n",
+				ip);
+			rc = -ENOBUFS;
+			goto fail_imm_cmd_construct;
+		}
+
+		mem_cmd.is_read = false;
+		mem_cmd.skip_pipeline_clear = false;
+		mem_cmd.pipeline_clear_options = IPAHAL_HPS_CLEAR;
+		mem_cmd.size = alloc_params.hash_bdy.size;
+		mem_cmd.system_addr = alloc_params.hash_bdy.phys_base;
+		mem_cmd.local_addr = lcl_hash_bdy;
+		cmd_pyld[num_cmd] = ipahal_construct_imm_cmd(
+			IPA_IMM_CMD_DMA_SHARED_MEM, &mem_cmd, false);
+		if (!cmd_pyld[num_cmd]) {
+			IPAERR("fail construct dma_shared_mem cmd. IP %d\n",
+				ip);
+			goto fail_imm_cmd_construct;
+		}
+		ipa3_init_imm_cmd_desc(&desc[num_cmd], cmd_pyld[num_cmd]);
+		num_cmd++;
+	}
+
+	if (ipa3_send_cmd(num_cmd, desc)) {
+		IPAERR_RL("fail to send immediate command\n");
+		rc = -EFAULT;
+		goto fail_imm_cmd_construct;
+	}
+
+	IPADBG_LOW("Hashable HEAD\n");
+	IPA_DUMP_BUFF(alloc_params.hash_hdr.base,
+		alloc_params.hash_hdr.phys_base, alloc_params.hash_hdr.size);
+
+	IPADBG_LOW("Non-Hashable HEAD\n");
+	IPA_DUMP_BUFF(alloc_params.nhash_hdr.base,
+		alloc_params.nhash_hdr.phys_base, alloc_params.nhash_hdr.size);
+
+	if (alloc_params.hash_bdy.size) {
+		IPADBG_LOW("Hashable BODY\n");
+		IPA_DUMP_BUFF(alloc_params.hash_bdy.base,
+			alloc_params.hash_bdy.phys_base,
+			alloc_params.hash_bdy.size);
+	}
+
+	if (alloc_params.nhash_bdy.size) {
+		IPADBG_LOW("Non-Hashable BODY\n");
+		IPA_DUMP_BUFF(alloc_params.nhash_bdy.base,
+			alloc_params.nhash_bdy.phys_base,
+			alloc_params.nhash_bdy.size);
+	}
+
+	__ipa_reap_sys_rt_tbls(ip);
+
+fail_imm_cmd_construct:
+	for (i = 0 ; i < num_cmd ; i++)
+		ipahal_destroy_imm_cmd(cmd_pyld[i]);
+fail_size_valid:
+	if (alloc_params.hash_hdr.size)
+		ipahal_free_dma_mem(&alloc_params.hash_hdr);
+	ipahal_free_dma_mem(&alloc_params.nhash_hdr);
+	if (alloc_params.hash_bdy.size)
+		ipahal_free_dma_mem(&alloc_params.hash_bdy);
+	if (alloc_params.nhash_bdy.size)
+		ipahal_free_dma_mem(&alloc_params.nhash_bdy);
+
+no_rt_tbls:
+	return rc;
+}
+
+/**
+ * __ipa3_find_rt_tbl() - find the routing table
+ *			which name is given as parameter
+ * @ip:	[in] the ip address family type of the wanted routing table
+ * @name:	[in] the name of the wanted routing table
+ *
+ * Returns: the routing table which name is given as parameter, or NULL if it
+ * doesn't exist
+ */
+struct ipa3_rt_tbl *__ipa3_find_rt_tbl(enum ipa_ip_type ip, const char *name)
+{
+	struct ipa3_rt_tbl *entry;
+	struct ipa3_rt_tbl_set *set;
+
+	if (strnlen(name, IPA_RESOURCE_NAME_MAX) == IPA_RESOURCE_NAME_MAX) {
+		IPAERR_RL("Name too long: %s\n", name);
+		return NULL;
+	}
+
+	set = &ipa3_ctx->rt_tbl_set[ip];
+	list_for_each_entry(entry, &set->head_rt_tbl_list, link) {
+		if (!ipa3_check_idr_if_freed(entry) &&
+			!strcmp(name, entry->name))
+			return entry;
+	}
+
+	return NULL;
+}
+
+/**
+ * ipa3_query_rt_index() - find the routing table index
+ *			which name and ip type are given as parameters
+ * @in:	[out] the index of the wanted routing table
+ *
+ * Returns: the routing table which name is given as parameter, or NULL if it
+ * doesn't exist
+ */
+int ipa3_query_rt_index(struct ipa_ioc_get_rt_tbl_indx *in)
+{
+	struct ipa3_rt_tbl *entry;
+
+	if (in->ip >= IPA_IP_MAX) {
+		IPAERR_RL("bad param\n");
+		return -EINVAL;
+	}
+
+	mutex_lock(&ipa3_ctx->lock);
+	in->name[IPA_RESOURCE_NAME_MAX-1] = '\0';
+	/* check if this table exists */
+	entry = __ipa3_find_rt_tbl(in->ip, in->name);
+	if (!entry) {
+		mutex_unlock(&ipa3_ctx->lock);
+		return -EFAULT;
+	}
+	in->idx  = entry->idx;
+	mutex_unlock(&ipa3_ctx->lock);
+	return 0;
+}
+
+static struct ipa3_rt_tbl *__ipa_add_rt_tbl(enum ipa_ip_type ip,
+		const char *name)
+{
+	struct ipa3_rt_tbl *entry;
+	struct ipa3_rt_tbl_set *set;
+	int i;
+	int id;
+	int max_tbl_indx;
+
+	if (name == NULL) {
+		IPAERR_RL("no tbl name\n");
+		goto error;
+	}
+
+	if (ip == IPA_IP_v4) {
+		max_tbl_indx =
+			max(IPA_MEM_PART(v4_modem_rt_index_hi),
+			IPA_MEM_PART(v4_apps_rt_index_hi));
+	} else if (ip == IPA_IP_v6) {
+		max_tbl_indx =
+			max(IPA_MEM_PART(v6_modem_rt_index_hi),
+			IPA_MEM_PART(v6_apps_rt_index_hi));
+	} else {
+		IPAERR_RL("bad ip family type\n");
+		goto error;
+	}
+
+	set = &ipa3_ctx->rt_tbl_set[ip];
+	/* check if this table exists */
+	entry = __ipa3_find_rt_tbl(ip, name);
+	if (!entry) {
+		entry = kmem_cache_zalloc(ipa3_ctx->rt_tbl_cache, GFP_KERNEL);
+		if (!entry)
+			goto error;
+
+		/* find a routing tbl index */
+		for (i = 0; i < IPA_RT_INDEX_BITMAP_SIZE; i++) {
+			if (!test_bit(i, &ipa3_ctx->rt_idx_bitmap[ip])) {
+				entry->idx = i;
+				set_bit(i, &ipa3_ctx->rt_idx_bitmap[ip]);
+				break;
+			}
+		}
+		if (i == IPA_RT_INDEX_BITMAP_SIZE) {
+			IPAERR("not free RT tbl indices left\n");
+			goto fail_rt_idx_alloc;
+		}
+		if (i > max_tbl_indx) {
+			IPAERR("rt tbl index is above max\n");
+			goto fail_rt_idx_alloc;
+		}
+
+		INIT_LIST_HEAD(&entry->head_rt_rule_list);
+		INIT_LIST_HEAD(&entry->link);
+		strlcpy(entry->name, name, IPA_RESOURCE_NAME_MAX);
+		entry->set = set;
+		entry->cookie = IPA_RT_TBL_COOKIE;
+		entry->in_sys[IPA_RULE_HASHABLE] = (ip == IPA_IP_v4) ?
+			!ipa3_ctx->ip4_rt_tbl_hash_lcl :
+			!ipa3_ctx->ip6_rt_tbl_hash_lcl;
+		entry->in_sys[IPA_RULE_NON_HASHABLE] = (ip == IPA_IP_v4) ?
+			!ipa3_ctx->ip4_rt_tbl_nhash_lcl :
+			!ipa3_ctx->ip6_rt_tbl_nhash_lcl;
+		set->tbl_cnt++;
+		entry->rule_ids = &set->rule_ids;
+		list_add(&entry->link, &set->head_rt_tbl_list);
+
+		IPADBG("add rt tbl idx=%d tbl_cnt=%d ip=%d\n", entry->idx,
+				set->tbl_cnt, ip);
+
+		id = ipa3_id_alloc(entry);
+		if (id < 0) {
+			IPAERR_RL("failed to add to tree\n");
+			WARN_ON_RATELIMIT_IPA(1);
+			goto ipa_insert_failed;
+		}
+		entry->id = id;
+	}
+
+	return entry;
+ipa_insert_failed:
+	set->tbl_cnt--;
+	list_del(&entry->link);
+	idr_destroy(entry->rule_ids);
+fail_rt_idx_alloc:
+	entry->cookie = 0;
+	kmem_cache_free(ipa3_ctx->rt_tbl_cache, entry);
+error:
+	return NULL;
+}
+
+static int __ipa_del_rt_tbl(struct ipa3_rt_tbl *entry)
+{
+	enum ipa_ip_type ip = IPA_IP_MAX;
+	u32 id;
+	struct ipa3_rt_tbl_set *rset;
+
+	if (entry == NULL || (entry->cookie != IPA_RT_TBL_COOKIE)) {
+		IPAERR_RL("bad params\n");
+		return -EINVAL;
+	}
+	id = entry->id;
+	if (ipa3_id_find(id) == NULL) {
+		IPAERR_RL("lookup failed\n");
+		return -EPERM;
+	}
+
+	if (entry->set == &ipa3_ctx->rt_tbl_set[IPA_IP_v4])
+		ip = IPA_IP_v4;
+	else if (entry->set == &ipa3_ctx->rt_tbl_set[IPA_IP_v6])
+		ip = IPA_IP_v6;
+	else {
+		WARN_ON_RATELIMIT_IPA(1);
+		return -EPERM;
+	}
+
+	rset = &ipa3_ctx->reap_rt_tbl_set[ip];
+
+	entry->rule_ids = NULL;
+	if (entry->in_sys[IPA_RULE_HASHABLE] ||
+		entry->in_sys[IPA_RULE_NON_HASHABLE]) {
+		list_move(&entry->link, &rset->head_rt_tbl_list);
+		clear_bit(entry->idx, &ipa3_ctx->rt_idx_bitmap[ip]);
+		entry->set->tbl_cnt--;
+		IPADBG("del sys rt tbl_idx=%d tbl_cnt=%d ip=%d\n",
+			entry->idx, entry->set->tbl_cnt, ip);
+	} else {
+		list_del(&entry->link);
+		clear_bit(entry->idx, &ipa3_ctx->rt_idx_bitmap[ip]);
+		entry->set->tbl_cnt--;
+		IPADBG("del rt tbl_idx=%d tbl_cnt=%d ip=%d\n",
+			entry->idx, entry->set->tbl_cnt, ip);
+		kmem_cache_free(ipa3_ctx->rt_tbl_cache, entry);
+	}
+
+	/* remove the handle from the database */
+	ipa3_id_remove(id);
+	return 0;
+}
+
+static int __ipa_rt_validate_rule_id(u16 rule_id)
+{
+	if (!rule_id)
+		return 0;
+
+	if ((rule_id < ipahal_get_rule_id_hi_bit()) ||
+		(rule_id >= ((ipahal_get_rule_id_hi_bit()<<1)-1))) {
+		IPAERR_RL("Invalid rule_id provided 0x%x\n",
+			rule_id);
+		return -EPERM;
+	}
+
+	return 0;
+}
+static int __ipa_rt_validate_hndls(const struct ipa_rt_rule_i *rule,
+				struct ipa3_hdr_entry **hdr,
+				struct ipa3_hdr_proc_ctx_entry **proc_ctx)
+{
+	int index;
+
+	if (rule->hdr_hdl && rule->hdr_proc_ctx_hdl) {
+		IPAERR_RL("rule contains both hdr_hdl and hdr_proc_ctx_hdl\n");
+		return -EPERM;
+	}
+
+	if (rule->hdr_hdl) {
+		*hdr = ipa3_id_find(rule->hdr_hdl);
+		if ((*hdr == NULL) || ((*hdr)->cookie != IPA_HDR_COOKIE)) {
+			IPAERR_RL("rt rule does not point to valid hdr\n");
+			return -EPERM;
+		}
+	} else if (rule->hdr_proc_ctx_hdl) {
+		*proc_ctx = ipa3_id_find(rule->hdr_proc_ctx_hdl);
+		if ((*proc_ctx == NULL) ||
+			((*proc_ctx)->cookie != IPA_PROC_HDR_COOKIE)) {
+
+			IPAERR_RL("rt rule does not point to valid proc ctx\n");
+			return -EPERM;
+		}
+	}
+
+	if (ipa3_ctx->ipa_hw_type < IPA_HW_v4_5 && rule->coalesce) {
+		IPAERR_RL("rt rule should not allow coalescing\n");
+		return -EPERM;
+	}
+
+	if (ipa3_ctx->ipa_hw_type >= IPA_HW_v4_5) {
+		if (rule->enable_stats && rule->cnt_idx) {
+			if (!ipahal_is_rule_cnt_id_valid(rule->cnt_idx)) {
+				IPAERR_RL(
+					"invalid cnt_idx %hhu out of range\n",
+					rule->cnt_idx);
+				return -EPERM;
+			}
+			index = rule->cnt_idx - 1;
+			if (!ipa3_ctx->flt_rt_counters.used_hw[index]) {
+				IPAERR_RL(
+					"invalid cnt_idx %hhu not alloc by driver\n",
+					rule->cnt_idx);
+				return -EPERM;
+			}
+		}
+	} else {
+		if (rule->enable_stats) {
+			IPAERR_RL(
+				"enable_stats won't support on ipa_hw_type %d\n",
+				ipa3_ctx->ipa_hw_type);
+			return -EPERM;
+		}
+	}
+	return 0;
+}
+
+static int __ipa_create_rt_entry(struct ipa3_rt_entry **entry,
+		const struct ipa_rt_rule_i *rule,
+		struct ipa3_rt_tbl *tbl, struct ipa3_hdr_entry *hdr,
+		struct ipa3_hdr_proc_ctx_entry *proc_ctx,
+		u16 rule_id, bool user)
+{
+	int id;
+
+	*entry = kmem_cache_zalloc(ipa3_ctx->rt_rule_cache, GFP_KERNEL);
+	if (!*entry)
+		goto error;
+
+	INIT_LIST_HEAD(&(*entry)->link);
+	(*(entry))->cookie = IPA_RT_RULE_COOKIE;
+	(*(entry))->rule = *rule;
+	(*(entry))->tbl = tbl;
+	(*(entry))->hdr = hdr;
+	(*(entry))->proc_ctx = proc_ctx;
+	if (rule_id) {
+		id = rule_id;
+		(*(entry))->rule_id_valid = 1;
+	} else {
+		id = ipa3_alloc_rule_id(tbl->rule_ids);
+		if (id < 0) {
+			IPAERR_RL("failed to allocate rule id\n");
+			WARN_ON_RATELIMIT_IPA(1);
+			goto alloc_rule_id_fail;
+		}
+	}
+	(*(entry))->rule_id = id;
+	(*(entry))->ipacm_installed = user;
+
+	if ((*(entry))->rule.coalesce &&
+		(*(entry))->rule.dst == IPA_CLIENT_APPS_WAN_CONS &&
+		ipa3_get_ep_mapping(IPA_CLIENT_APPS_WAN_COAL_CONS) != -1)
+		(*(entry))->rule.dst = IPA_CLIENT_APPS_WAN_COAL_CONS;
+
+	if (rule->enable_stats)
+		(*entry)->cnt_idx = rule->cnt_idx;
+	else
+		(*entry)->cnt_idx = 0;
+	return 0;
+
+alloc_rule_id_fail:
+	kmem_cache_free(ipa3_ctx->rt_rule_cache, *entry);
+error:
+	return -EPERM;
+}
+
+static int __ipa_finish_rt_rule_add(struct ipa3_rt_entry *entry, u32 *rule_hdl,
+		struct ipa3_rt_tbl *tbl)
+{
+	int id;
+
+	tbl->rule_cnt++;
+	if (entry->hdr)
+		entry->hdr->ref_cnt++;
+	else if (entry->proc_ctx)
+		entry->proc_ctx->ref_cnt++;
+	id = ipa3_id_alloc(entry);
+	if (id < 0) {
+		IPAERR_RL("failed to add to tree\n");
+		WARN_ON_RATELIMIT_IPA(1);
+		goto ipa_insert_failed;
+	}
+	IPADBG("add rt rule tbl_idx=%d rule_cnt=%d rule_id=%d\n",
+		tbl->idx, tbl->rule_cnt, entry->rule_id);
+	*rule_hdl = id;
+	entry->id = id;
+
+	return 0;
+
+ipa_insert_failed:
+	if (entry->hdr)
+		entry->hdr->ref_cnt--;
+	else if (entry->proc_ctx)
+		entry->proc_ctx->ref_cnt--;
+	idr_remove(tbl->rule_ids, entry->rule_id);
+	list_del(&entry->link);
+	kmem_cache_free(ipa3_ctx->rt_rule_cache, entry);
+	return -EPERM;
+}
+
+static int __ipa_add_rt_rule(enum ipa_ip_type ip, const char *name,
+		const struct ipa_rt_rule_i *rule, u8 at_rear, u32 *rule_hdl,
+		u16 rule_id, bool user)
+{
+	struct ipa3_rt_tbl *tbl;
+	struct ipa3_rt_entry *entry;
+	struct ipa3_hdr_entry *hdr = NULL;
+	struct ipa3_hdr_proc_ctx_entry *proc_ctx = NULL;
+
+	if (__ipa_rt_validate_hndls(rule, &hdr, &proc_ctx))
+		goto error;
+
+	if (__ipa_rt_validate_rule_id(rule_id))
+		goto error;
+
+	tbl = __ipa_add_rt_tbl(ip, name);
+	if (tbl == NULL || (tbl->cookie != IPA_RT_TBL_COOKIE)) {
+		IPAERR_RL("failed adding rt tbl name = %s\n",
+			name ? name : "");
+		goto error;
+	}
+	/*
+	 * do not allow any rule to be added at "default" routing
+	 * table
+	 */
+	if (!strcmp(tbl->name, IPA_DFLT_RT_TBL_NAME) &&
+	    (tbl->rule_cnt > 0)) {
+		IPAERR_RL("cannot add rules to default rt table\n");
+		goto error;
+	}
+
+	if (__ipa_create_rt_entry(&entry, rule, tbl, hdr, proc_ctx,
+		rule_id, user))
+		goto error;
+
+	if (at_rear)
+		list_add_tail(&entry->link, &tbl->head_rt_rule_list);
+	else
+		list_add(&entry->link, &tbl->head_rt_rule_list);
+
+	if (__ipa_finish_rt_rule_add(entry, rule_hdl, tbl))
+		goto error;
+
+	return 0;
+
+error:
+	return -EPERM;
+}
+
+static int __ipa_add_rt_rule_after(struct ipa3_rt_tbl *tbl,
+		const struct ipa_rt_rule_i *rule, u32 *rule_hdl,
+		struct ipa3_rt_entry **add_after_entry)
+{
+	struct ipa3_rt_entry *entry;
+	struct ipa3_hdr_entry *hdr = NULL;
+	struct ipa3_hdr_proc_ctx_entry *proc_ctx = NULL;
+
+	if (!*add_after_entry)
+		goto error;
+
+	if (__ipa_rt_validate_hndls(rule, &hdr, &proc_ctx))
+		goto error;
+
+	if (__ipa_create_rt_entry(&entry, rule, tbl, hdr, proc_ctx, 0, true))
+		goto error;
+
+	list_add(&entry->link, &((*add_after_entry)->link));
+
+	if (__ipa_finish_rt_rule_add(entry, rule_hdl, tbl))
+		goto error;
+
+	/*
+	 * prepare for next insertion
+	 */
+	*add_after_entry = entry;
+
+	return 0;
+
+error:
+	*add_after_entry = NULL;
+	return -EPERM;
+}
+
+static void __ipa_convert_rt_rule_in(struct ipa_rt_rule rule_in,
+	struct ipa_rt_rule_i *rule_out)
+{
+	if (unlikely(sizeof(struct ipa_rt_rule) >
+			sizeof(struct ipa_rt_rule_i))) {
+		IPAERR_RL("invalid size in: %d size out: %d\n",
+			sizeof(struct ipa_rt_rule),
+			sizeof(struct ipa_rt_rule_i));
+		return;
+	}
+	memset(rule_out, 0, sizeof(struct ipa_rt_rule_i));
+	memcpy(rule_out, &rule_in, sizeof(struct ipa_rt_rule));
+}
+
+static void __ipa_convert_rt_rule_out(struct ipa_rt_rule_i rule_in,
+	struct ipa_rt_rule *rule_out)
+{
+	if (unlikely(sizeof(struct ipa_rt_rule) >
+			sizeof(struct ipa_rt_rule_i))) {
+		IPAERR_RL("invalid size in:%d size out:%d\n",
+			sizeof(struct ipa_rt_rule),
+			sizeof(struct ipa_rt_rule_i));
+		return;
+	}
+	memset(rule_out, 0, sizeof(struct ipa_rt_rule));
+	memcpy(rule_out, &rule_in, sizeof(struct ipa_rt_rule));
+}
+
+static void __ipa_convert_rt_mdfy_in(struct ipa_rt_rule_mdfy rule_in,
+	struct ipa_rt_rule_mdfy_i *rule_out)
+{
+	if (unlikely(sizeof(struct ipa_rt_rule_mdfy) >
+			sizeof(struct ipa_rt_rule_mdfy_i))) {
+		IPAERR_RL("invalid size in:%d size out:%d\n",
+			sizeof(struct ipa_rt_rule_mdfy),
+			sizeof(struct ipa_rt_rule_mdfy_i));
+		return;
+	}
+	memset(rule_out, 0, sizeof(struct ipa_rt_rule_mdfy_i));
+	memcpy(&rule_out->rule, &rule_in.rule,
+		sizeof(struct ipa_rt_rule));
+	rule_out->rt_rule_hdl = rule_in.rt_rule_hdl;
+	rule_out->status = rule_in.status;
+}
+
+static void __ipa_convert_rt_mdfy_out(struct ipa_rt_rule_mdfy_i rule_in,
+	struct ipa_rt_rule_mdfy *rule_out)
+{
+	if (unlikely(sizeof(struct ipa_rt_rule_mdfy) >
+			sizeof(struct ipa_rt_rule_mdfy_i))) {
+		IPAERR_RL("invalid size in:%d size out:%d\n",
+			sizeof(struct ipa_rt_rule_mdfy),
+			sizeof(struct ipa_rt_rule_mdfy_i));
+		return;
+	}
+	memset(rule_out, 0, sizeof(struct ipa_rt_rule_mdfy));
+	memcpy(&rule_out->rule, &rule_in.rule,
+		sizeof(struct ipa_rt_rule));
+	rule_out->rt_rule_hdl = rule_in.rt_rule_hdl;
+	rule_out->status = rule_in.status;
+}
+
+/**
+ * ipa3_add_rt_rule() - Add the specified routing rules to SW and optionally
+ * commit to IPA HW
+ * @rules:	[inout] set of routing rules to add
+ *
+ * Returns:	0 on success, negative on failure
+ *
+ * Note:	Should not be called from atomic context
+ */
+
+int ipa3_add_rt_rule(struct ipa_ioc_add_rt_rule *rules)
+{
+	return ipa3_add_rt_rule_usr(rules, false);
+}
+
+/**
+ * ipa3_add_rt_rule_v2() - Add the specified routing rules to SW
+ * and optionally commit to IPA HW
+ * @rules:	[inout] set of routing rules to add
+ *
+ * Returns:	0 on success, negative on failure
+ *
+ * Note:	Should not be called from atomic context
+ */
+
+int ipa3_add_rt_rule_v2(struct ipa_ioc_add_rt_rule_v2 *rules)
+{
+	return ipa3_add_rt_rule_usr_v2(rules, false);
+}
+
+/**
+ * ipa3_add_rt_rule_usr() - Add the specified routing rules to SW and optionally
+ * commit to IPA HW
+ * @rules:		[inout] set of routing rules to add
+ * @user_only:	[in] indicate installed by userspace module
+ *
+ * Returns:	0 on success, negative on failure
+ *
+ * Note:	Should not be called from atomic context
+ */
+
+int ipa3_add_rt_rule_usr(struct ipa_ioc_add_rt_rule *rules, bool user_only)
+{
+	int i;
+	int ret;
+	struct ipa_rt_rule_i rule;
+
+	if (rules == NULL || rules->num_rules == 0 || rules->ip >= IPA_IP_MAX) {
+		IPAERR_RL("bad param\n");
+		return -EINVAL;
+	}
+
+	mutex_lock(&ipa3_ctx->lock);
+	for (i = 0; i < rules->num_rules; i++) {
+		rules->rt_tbl_name[IPA_RESOURCE_NAME_MAX-1] = '\0';
+		/* if hashing not supported, all tables are non-hash tables*/
+		if (ipa3_ctx->ipa_fltrt_not_hashable)
+			rules->rules[i].rule.hashable = false;
+		__ipa_convert_rt_rule_in(rules->rules[i].rule, &rule);
+		if (__ipa_add_rt_rule(rules->ip, rules->rt_tbl_name,
+					&rule,
+					rules->rules[i].at_rear,
+					&rules->rules[i].rt_rule_hdl,
+					0,
+					user_only)) {
+			IPAERR_RL("failed to add rt rule %d\n", i);
+			rules->rules[i].status = IPA_RT_STATUS_OF_ADD_FAILED;
+		} else {
+			__ipa_convert_rt_rule_out(rule, &rules->rules[i].rule);
+			rules->rules[i].status = 0;
+		}
+	}
+
+	if (rules->commit)
+		if (ipa3_ctx->ctrl->ipa3_commit_rt(rules->ip)) {
+			ret = -EPERM;
+			goto bail;
+		}
+
+	ret = 0;
+bail:
+	mutex_unlock(&ipa3_ctx->lock);
+	return ret;
+}
+
+/**
+ * ipa3_add_rt_rule_usr_v2() - Add the specified routing rules
+ * to SW and optionally commit to IPA HW
+ * @rules:		[inout] set of routing rules to add
+ * @user_only:	[in] indicate installed by userspace module
+ *
+ * Returns:	0 on success, negative on failure
+ *
+ * Note:	Should not be called from atomic context
+ */
+
+int ipa3_add_rt_rule_usr_v2(struct ipa_ioc_add_rt_rule_v2 *rules,
+	bool user_only)
+{
+	int i;
+	int ret;
+
+	if (rules == NULL || rules->num_rules == 0 || rules->ip >= IPA_IP_MAX) {
+		IPAERR_RL("bad param\n");
+		return -EINVAL;
+	}
+
+	mutex_lock(&ipa3_ctx->lock);
+	for (i = 0; i < rules->num_rules; i++) {
+		rules->rt_tbl_name[IPA_RESOURCE_NAME_MAX-1] = '\0';
+		/* if hashing not supported, all tables are non-hash tables*/
+		if (ipa3_ctx->ipa_fltrt_not_hashable)
+			((struct ipa_rt_rule_add_i *)
+			rules->rules)[i].rule.hashable = false;
+		if (__ipa_add_rt_rule(rules->ip, rules->rt_tbl_name,
+					&(((struct ipa_rt_rule_add_i *)
+					rules->rules)[i].rule),
+					((struct ipa_rt_rule_add_i *)
+					rules->rules)[i].at_rear,
+					&(((struct ipa_rt_rule_add_i *)
+					rules->rules)[i].rt_rule_hdl),
+					0,
+					user_only)) {
+			IPAERR_RL("failed to add rt rule %d\n", i);
+			((struct ipa_rt_rule_add_i *)rules->rules)[i].status
+				= IPA_RT_STATUS_OF_ADD_FAILED;
+		} else {
+			((struct ipa_rt_rule_add_i *)
+			rules->rules)[i].status = 0;
+		}
+	}
+
+	if (rules->commit)
+		if (ipa3_ctx->ctrl->ipa3_commit_rt(rules->ip)) {
+			ret = -EPERM;
+			goto bail;
+		}
+
+	ret = 0;
+bail:
+	mutex_unlock(&ipa3_ctx->lock);
+	return ret;
+}
+
+
+/**
+ * ipa3_add_rt_rule_ext() - Add the specified routing rules to SW with rule id
+ * and optionally commit to IPA HW
+ * @rules:	[inout] set of routing rules to add
+ *
+ * Returns:	0 on success, negative on failure
+ *
+ * Note:	Should not be called from atomic context
+ */
+int ipa3_add_rt_rule_ext(struct ipa_ioc_add_rt_rule_ext *rules)
+{
+	int i;
+	int ret;
+	struct ipa_rt_rule_i rule;
+
+	if (rules == NULL || rules->num_rules == 0 || rules->ip >= IPA_IP_MAX) {
+		IPAERR_RL("bad param\n");
+		return -EINVAL;
+	}
+
+	mutex_lock(&ipa3_ctx->lock);
+	for (i = 0; i < rules->num_rules; i++) {
+		/* if hashing not supported, all tables are non-hash tables*/
+		if (ipa3_ctx->ipa_fltrt_not_hashable)
+			rules->rules[i].rule.hashable = false;
+		__ipa_convert_rt_rule_in(
+				rules->rules[i].rule, &rule);
+		if (__ipa_add_rt_rule(rules->ip, rules->rt_tbl_name,
+					&rule,
+					rules->rules[i].at_rear,
+					&rules->rules[i].rt_rule_hdl,
+					rules->rules[i].rule_id, true)) {
+			IPAERR_RL("failed to add rt rule %d\n", i);
+			rules->rules[i].status = IPA_RT_STATUS_OF_ADD_FAILED;
+		} else {
+			__ipa_convert_rt_rule_out(rule, &rules->rules[i].rule);
+			rules->rules[i].status = 0;
+		}
+	}
+
+	if (rules->commit)
+		if (ipa3_ctx->ctrl->ipa3_commit_rt(rules->ip)) {
+			ret = -EPERM;
+			goto bail;
+		}
+
+	ret = 0;
+bail:
+	mutex_unlock(&ipa3_ctx->lock);
+	return ret;
+}
+
+/**
+ * ipa3_add_rt_rule_ext_v2() - Add the specified routing rules
+ * to SW with rule id and optionally commit to IPA HW
+ * @rules:	[inout] set of routing rules to add
+ *
+ * Returns:	0 on success, negative on failure
+ *
+ * Note:	Should not be called from atomic context
+ */
+int ipa3_add_rt_rule_ext_v2(struct ipa_ioc_add_rt_rule_ext_v2 *rules)
+{
+	int i;
+	int ret;
+
+	if (rules == NULL || rules->num_rules == 0 || rules->ip >= IPA_IP_MAX) {
+		IPAERR_RL("bad param\n");
+		return -EINVAL;
+	}
+
+	mutex_lock(&ipa3_ctx->lock);
+	for (i = 0; i < rules->num_rules; i++) {
+		/* if hashing not supported, all tables are non-hash tables*/
+		if (ipa3_ctx->ipa_fltrt_not_hashable)
+			((struct ipa_rt_rule_add_ext_i *)
+			rules->rules)[i].rule.hashable = false;
+		if (__ipa_add_rt_rule(rules->ip, rules->rt_tbl_name,
+					&(((struct ipa_rt_rule_add_ext_i *)
+					rules->rules)[i].rule),
+					((struct ipa_rt_rule_add_ext_i *)
+					rules->rules)[i].at_rear,
+					&(((struct ipa_rt_rule_add_ext_i *)
+					rules->rules)[i].rt_rule_hdl),
+					((struct ipa_rt_rule_add_ext_i *)
+					rules->rules)[i].rule_id, true)) {
+			IPAERR_RL("failed to add rt rule %d\n", i);
+			((struct ipa_rt_rule_add_ext_i *)
+			rules->rules)[i].status = IPA_RT_STATUS_OF_ADD_FAILED;
+		} else {
+			((struct ipa_rt_rule_add_ext_i *)
+			rules->rules)[i].status = 0;
+		}
+	}
+
+	if (rules->commit)
+		if (ipa3_ctx->ctrl->ipa3_commit_rt(rules->ip)) {
+			ret = -EPERM;
+			goto bail;
+		}
+
+	ret = 0;
+bail:
+	mutex_unlock(&ipa3_ctx->lock);
+	return ret;
+}
+
+/**
+ * ipa3_add_rt_rule_after() - Add the given routing rules after the
+ * specified rule to SW and optionally commit to IPA HW
+ * @rules:	[inout] set of routing rules to add + handle where to add
+ *
+ * Returns:	0 on success, negative on failure
+ *
+ * Note:	Should not be called from atomic context
+ */
+int ipa3_add_rt_rule_after(struct ipa_ioc_add_rt_rule_after *rules)
+{
+	int i;
+	int ret = 0;
+	struct ipa3_rt_tbl *tbl = NULL;
+	struct ipa3_rt_entry *entry = NULL;
+	struct ipa_rt_rule_i rule;
+
+	if (rules == NULL || rules->num_rules == 0 || rules->ip >= IPA_IP_MAX) {
+		IPAERR_RL("bad param\n");
+		return -EINVAL;
+	}
+
+	mutex_lock(&ipa3_ctx->lock);
+	rules->rt_tbl_name[IPA_RESOURCE_NAME_MAX-1] = '\0';
+	tbl = __ipa3_find_rt_tbl(rules->ip, rules->rt_tbl_name);
+	if (tbl == NULL || (tbl->cookie != IPA_RT_TBL_COOKIE)) {
+		IPAERR_RL("failed finding rt tbl name = %s\n",
+			rules->rt_tbl_name);
+		ret = -EINVAL;
+		goto bail;
+	}
+
+	if (!tbl->rule_cnt) {
+		IPAERR_RL("tbl->rule_cnt == 0");
+		ret = -EINVAL;
+		goto bail;
+	}
+
+	entry = ipa3_id_find(rules->add_after_hdl);
+	if (!entry) {
+		IPAERR_RL("failed finding rule %d in rt tbls\n",
+			rules->add_after_hdl);
+		ret = -EINVAL;
+		goto bail;
+	}
+
+	if (entry->cookie != IPA_RT_RULE_COOKIE) {
+		IPAERR_RL("Invalid cookie value =  %u rule %d in rt tbls\n",
+			entry->cookie, rules->add_after_hdl);
+		ret = -EINVAL;
+		goto bail;
+	}
+
+	if (entry->tbl != tbl) {
+		IPAERR_RL("given rt rule does not match the table\n");
+		ret = -EINVAL;
+		goto bail;
+	}
+
+	/*
+	 * do not allow any rule to be added at "default" routing
+	 * table
+	 */
+	if (!strcmp(tbl->name, IPA_DFLT_RT_TBL_NAME) &&
+		(tbl->rule_cnt > 0)) {
+		IPAERR_RL("cannot add rules to default rt table\n");
+		ret = -EINVAL;
+		goto bail;
+	}
+
+	/*
+	 * we add all rules one after the other, if one insertion fails, it cuts
+	 * the chain (all following will receive fail status) following calls to
+	 * __ipa_add_rt_rule_after will fail (entry == NULL)
+	 */
+
+	for (i = 0; i < rules->num_rules; i++) {
+		/* if hashing not supported, all tables are non-hash tables*/
+		if (ipa3_ctx->ipa_fltrt_not_hashable)
+			rules->rules[i].rule.hashable = false;
+		__ipa_convert_rt_rule_in(
+				rules->rules[i].rule, &rule);
+		if (__ipa_add_rt_rule_after(tbl,
+					&rule,
+					&rules->rules[i].rt_rule_hdl,
+					&entry)) {
+			IPAERR_RL("failed to add rt rule %d\n", i);
+			rules->rules[i].status = IPA_RT_STATUS_OF_ADD_FAILED;
+		} else {
+			rules->rules[i].status = 0;
+			__ipa_convert_rt_rule_out(rule, &rules->rules[i].rule);
+		}
+	}
+
+	if (rules->commit)
+		if (ipa3_ctx->ctrl->ipa3_commit_rt(rules->ip)) {
+			IPAERR_RL("failed to commit\n");
+			ret = -EPERM;
+			goto bail;
+		}
+
+	ret = 0;
+	goto bail;
+
+bail:
+	mutex_unlock(&ipa3_ctx->lock);
+	return ret;
+}
+
+/**
+ * ipa3_add_rt_rule_after_v2() - Add the given routing rules
+ * after the specified rule to SW and optionally commit to IPA
+ * HW
+ * @rules:	[inout] set of routing rules to add + handle where to add
+ *
+ * Returns:	0 on success, negative on failure
+ *
+ * Note:	Should not be called from atomic context
+ */
+int ipa3_add_rt_rule_after_v2(struct ipa_ioc_add_rt_rule_after_v2
+	*rules)
+{
+	int i;
+	int ret = 0;
+	struct ipa3_rt_tbl *tbl = NULL;
+	struct ipa3_rt_entry *entry = NULL;
+
+	if (rules == NULL || rules->num_rules == 0 || rules->ip >= IPA_IP_MAX) {
+		IPAERR_RL("bad param\n");
+		return -EINVAL;
+	}
+
+	mutex_lock(&ipa3_ctx->lock);
+	rules->rt_tbl_name[IPA_RESOURCE_NAME_MAX-1] = '\0';
+	tbl = __ipa3_find_rt_tbl(rules->ip, rules->rt_tbl_name);
+	if (tbl == NULL || (tbl->cookie != IPA_RT_TBL_COOKIE)) {
+		IPAERR_RL("failed finding rt tbl name = %s\n",
+			rules->rt_tbl_name);
+		ret = -EINVAL;
+		goto bail;
+	}
+
+	if (!tbl->rule_cnt) {
+		IPAERR_RL("tbl->rule_cnt == 0");
+		ret = -EINVAL;
+		goto bail;
+	}
+
+	entry = ipa3_id_find(rules->add_after_hdl);
+	if (!entry) {
+		IPAERR_RL("failed finding rule %d in rt tbls\n",
+			rules->add_after_hdl);
+		ret = -EINVAL;
+		goto bail;
+	}
+
+	if (entry->cookie != IPA_RT_RULE_COOKIE) {
+		IPAERR_RL("Invalid cookie value =  %u rule %d in rt tbls\n",
+			entry->cookie, rules->add_after_hdl);
+		ret = -EINVAL;
+		goto bail;
+	}
+
+	if (entry->tbl != tbl) {
+		IPAERR_RL("given rt rule does not match the table\n");
+		ret = -EINVAL;
+		goto bail;
+	}
+
+	/*
+	 * do not allow any rule to be added at "default" routing
+	 * table
+	 */
+	if (!strcmp(tbl->name, IPA_DFLT_RT_TBL_NAME) &&
+		(tbl->rule_cnt > 0)) {
+		IPAERR_RL("cannot add rules to default rt table\n");
+		ret = -EINVAL;
+		goto bail;
+	}
+
+	/*
+	 * we add all rules one after the other, if one insertion fails, it cuts
+	 * the chain (all following will receive fail status) following calls to
+	 * __ipa_add_rt_rule_after will fail (entry == NULL)
+	 */
+
+	for (i = 0; i < rules->num_rules; i++) {
+		/* if hashing not supported, all tables are non-hash tables*/
+		if (ipa3_ctx->ipa_fltrt_not_hashable)
+			((struct ipa_rt_rule_add_i *)
+			rules->rules)[i].rule.hashable = false;
+		if (__ipa_add_rt_rule_after(tbl,
+					&(((struct ipa_rt_rule_add_i *)
+					rules->rules)[i].rule),
+					&(((struct ipa_rt_rule_add_i *)
+					rules->rules)[i].rt_rule_hdl),
+					&entry)) {
+			IPAERR_RL("failed to add rt rule %d\n", i);
+			((struct ipa_rt_rule_add_i *)
+			rules->rules)[i].status = IPA_RT_STATUS_OF_ADD_FAILED;
+		} else {
+			((struct ipa_rt_rule_add_i *)
+			rules->rules)[i].status = 0;
+		}
+	}
+
+	if (rules->commit)
+		if (ipa3_ctx->ctrl->ipa3_commit_rt(rules->ip)) {
+			IPAERR_RL("failed to commit\n");
+			ret = -EPERM;
+			goto bail;
+		}
+
+	ret = 0;
+	goto bail;
+
+bail:
+	mutex_unlock(&ipa3_ctx->lock);
+	return ret;
+}
+
+int __ipa3_del_rt_rule(u32 rule_hdl)
+{
+	struct ipa3_rt_entry *entry;
+	int id;
+	struct ipa3_hdr_entry *hdr_entry;
+	struct ipa3_hdr_proc_ctx_entry *hdr_proc_entry;
+
+	entry = ipa3_id_find(rule_hdl);
+
+	if (entry == NULL) {
+		IPAERR_RL("lookup failed\n");
+		return -EINVAL;
+	}
+
+	if (entry->cookie != IPA_RT_RULE_COOKIE) {
+		IPAERR_RL("bad params\n");
+		return -EINVAL;
+	}
+
+	if (!strcmp(entry->tbl->name, IPA_DFLT_RT_TBL_NAME)) {
+		IPADBG("Deleting rule from default rt table idx=%u\n",
+			entry->tbl->idx);
+		if (entry->tbl->rule_cnt == 1) {
+			IPAERR_RL("Default tbl last rule cannot be deleted\n");
+			return -EINVAL;
+		}
+	}
+
+	/* Adding check to confirm still
+	 * header entry present in header table or not
+	 */
+
+	if (entry->hdr) {
+		hdr_entry = ipa3_id_find(entry->rule.hdr_hdl);
+		if (!hdr_entry || hdr_entry->cookie != IPA_HDR_COOKIE) {
+			IPAERR_RL("Header entry already deleted\n");
+			return -EINVAL;
+		}
+	} else if (entry->proc_ctx) {
+		hdr_proc_entry = ipa3_id_find(entry->rule.hdr_proc_ctx_hdl);
+		if (!hdr_proc_entry ||
+			hdr_proc_entry->cookie != IPA_PROC_HDR_COOKIE) {
+			IPAERR_RL("Proc header entry already deleted\n");
+			return -EINVAL;
+		}
+	}
+
+	if (entry->hdr)
+		__ipa3_release_hdr(entry->hdr->id);
+	else if (entry->proc_ctx &&
+		(!ipa3_check_idr_if_freed(entry->proc_ctx)))
+		__ipa3_release_hdr_proc_ctx(entry->proc_ctx->id);
+	list_del(&entry->link);
+	entry->tbl->rule_cnt--;
+	IPADBG("del rt rule tbl_idx=%d rule_cnt=%d rule_id=%d\n ref_cnt=%u",
+		entry->tbl->idx, entry->tbl->rule_cnt,
+		entry->rule_id, entry->tbl->ref_cnt);
+		/* if rule id was allocated from idr, remove it */
+	if (!entry->rule_id_valid)
+		idr_remove(entry->tbl->rule_ids, entry->rule_id);
+	if (entry->tbl->rule_cnt == 0 && entry->tbl->ref_cnt == 0) {
+		if (__ipa_del_rt_tbl(entry->tbl))
+			IPAERR_RL("fail to del RT tbl\n");
+	}
+	entry->cookie = 0;
+	id = entry->id;
+	kmem_cache_free(ipa3_ctx->rt_rule_cache, entry);
+
+	/* remove the handle from the database */
+	ipa3_id_remove(id);
+
+	return 0;
+}
+
+/**
+ * ipa3_del_rt_rule() - Remove the specified routing rules to SW and optionally
+ * commit to IPA HW
+ * @hdls:	[inout] set of routing rules to delete
+ *
+ * Returns:	0 on success, negative on failure
+ *
+ * Note:	Should not be called from atomic context
+ */
+int ipa3_del_rt_rule(struct ipa_ioc_del_rt_rule *hdls)
+{
+	int i;
+	int ret;
+
+	if (hdls == NULL || hdls->num_hdls == 0 || hdls->ip >= IPA_IP_MAX) {
+		IPAERR_RL("bad param\n");
+		return -EINVAL;
+	}
+
+	mutex_lock(&ipa3_ctx->lock);
+	for (i = 0; i < hdls->num_hdls; i++) {
+		if (__ipa3_del_rt_rule(hdls->hdl[i].hdl)) {
+			IPAERR_RL("failed to del rt rule %i\n", i);
+			hdls->hdl[i].status = IPA_RT_STATUS_OF_DEL_FAILED;
+		} else {
+			hdls->hdl[i].status = 0;
+		}
+	}
+
+	if (hdls->commit)
+		if (ipa3_ctx->ctrl->ipa3_commit_rt(hdls->ip)) {
+			ret = -EPERM;
+			goto bail;
+		}
+
+	ret = 0;
+bail:
+	mutex_unlock(&ipa3_ctx->lock);
+	return ret;
+}
+
+/**
+ * ipa_commit_rt_rule() - Commit the current SW routing table of specified type
+ * to IPA HW
+ * @ip:	The family of routing tables
+ *
+ * Returns:	0 on success, negative on failure
+ *
+ * Note:	Should not be called from atomic context
+ */
+int ipa3_commit_rt(enum ipa_ip_type ip)
+{
+	int ret;
+
+	if (ip >= IPA_IP_MAX) {
+		IPAERR_RL("bad param\n");
+		return -EINVAL;
+	}
+
+	/*
+	 * issue a commit on the filtering module of same IP type since
+	 * filtering rules point to routing tables
+	 */
+	if (ipa3_commit_flt(ip))
+		return -EPERM;
+
+	mutex_lock(&ipa3_ctx->lock);
+	if (ipa3_ctx->ctrl->ipa3_commit_rt(ip)) {
+		ret = -EPERM;
+		goto bail;
+	}
+
+	ret = 0;
+bail:
+	mutex_unlock(&ipa3_ctx->lock);
+	return ret;
+}
+
+/**
+ * ipa3_reset_rt() - reset the current SW routing table of specified type
+ * (does not commit to HW)
+ * @ip:			[in] The family of routing tables
+ * @user_only:	[in] indicate delete rules installed by userspace
+ *
+ * Returns:	0 on success, negative on failure
+ *
+ * Note:	Should not be called from atomic context
+ */
+int ipa3_reset_rt(enum ipa_ip_type ip, bool user_only)
+{
+	struct ipa3_rt_tbl *tbl;
+	struct ipa3_rt_tbl *tbl_next;
+	struct ipa3_rt_tbl_set *set;
+	struct ipa3_rt_entry *rule;
+	struct ipa3_rt_entry *rule_next;
+	struct ipa3_rt_tbl_set *rset;
+	struct ipa3_hdr_entry *hdr_entry;
+	struct ipa3_hdr_proc_ctx_entry *hdr_proc_entry;
+	u32 apps_start_idx;
+	int id;
+	bool tbl_user = false;
+
+	if (ip >= IPA_IP_MAX) {
+		IPAERR_RL("bad param\n");
+		return -EINVAL;
+	}
+
+	if (ip == IPA_IP_v4)
+		apps_start_idx =
+			IPA_MEM_PART(v4_apps_rt_index_lo);
+	else
+		apps_start_idx =
+			IPA_MEM_PART(v6_apps_rt_index_lo);
+
+	/*
+	 * issue a reset on the filtering module of same IP type since
+	 * filtering rules point to routing tables
+	 */
+	if (ipa3_reset_flt(ip, user_only))
+		IPAERR_RL("fail to reset flt ip=%d\n", ip);
+
+	set = &ipa3_ctx->rt_tbl_set[ip];
+	rset = &ipa3_ctx->reap_rt_tbl_set[ip];
+	mutex_lock(&ipa3_ctx->lock);
+	IPADBG("reset rt ip=%d\n", ip);
+	list_for_each_entry_safe(tbl, tbl_next, &set->head_rt_tbl_list, link) {
+		tbl_user = false;
+		list_for_each_entry_safe(rule, rule_next,
+					 &tbl->head_rt_rule_list, link) {
+			if (ipa3_id_find(rule->id) == NULL) {
+				WARN_ON_RATELIMIT_IPA(1);
+				mutex_unlock(&ipa3_ctx->lock);
+				return -EFAULT;
+			}
+
+			/* indicate if tbl used for user-specified rules*/
+			if (rule->ipacm_installed) {
+				IPADBG("tbl_user %d, tbl-index %d\n",
+				tbl_user, tbl->id);
+				tbl_user = true;
+			}
+			/*
+			 * for the "default" routing tbl, remove all but the
+			 *  last rule
+			 */
+			if (tbl->idx == apps_start_idx && tbl->rule_cnt == 1)
+				continue;
+
+			if (!user_only ||
+				rule->ipacm_installed) {
+				list_del(&rule->link);
+				if (rule->hdr) {
+					hdr_entry = ipa3_id_find(
+							rule->rule.hdr_hdl);
+					if (!hdr_entry ||
+					hdr_entry->cookie != IPA_HDR_COOKIE) {
+						mutex_unlock(&ipa3_ctx->lock);
+						IPAERR_RL(
+						"Header already deleted\n");
+						return -EINVAL;
+					}
+				} else if (rule->proc_ctx) {
+					hdr_proc_entry =
+						ipa3_id_find(
+						rule->rule.hdr_proc_ctx_hdl);
+					if (!hdr_proc_entry ||
+						hdr_proc_entry->cookie !=
+							IPA_PROC_HDR_COOKIE) {
+						mutex_unlock(&ipa3_ctx->lock);
+						IPAERR_RL(
+						"Proc entry already deleted\n");
+						return -EINVAL;
+					}
+				}
+				tbl->rule_cnt--;
+				if (rule->hdr)
+					__ipa3_release_hdr(rule->hdr->id);
+				else if (rule->proc_ctx &&
+					(!ipa3_check_idr_if_freed(
+						rule->proc_ctx)))
+					__ipa3_release_hdr_proc_ctx(
+						rule->proc_ctx->id);
+				rule->cookie = 0;
+				if (!rule->rule_id_valid)
+					idr_remove(tbl->rule_ids,
+						rule->rule_id);
+				id = rule->id;
+				kmem_cache_free(ipa3_ctx->rt_rule_cache, rule);
+
+				/* remove the handle from the database */
+				ipa3_id_remove(id);
+			}
+		}
+
+		if (ipa3_id_find(tbl->id) == NULL) {
+			WARN_ON_RATELIMIT_IPA(1);
+			mutex_unlock(&ipa3_ctx->lock);
+			return -EFAULT;
+		}
+		id = tbl->id;
+
+		/* do not remove the "default" routing tbl which has index 0 */
+		if (tbl->idx != apps_start_idx) {
+			if (!user_only || tbl_user) {
+				tbl->rule_ids = NULL;
+				if (tbl->in_sys[IPA_RULE_HASHABLE] ||
+					tbl->in_sys[IPA_RULE_NON_HASHABLE]) {
+					list_move(&tbl->link,
+						&rset->head_rt_tbl_list);
+					clear_bit(tbl->idx,
+					  &ipa3_ctx->rt_idx_bitmap[ip]);
+					set->tbl_cnt--;
+					IPADBG("rst tbl_idx=%d cnt=%d\n",
+						tbl->idx, set->tbl_cnt);
+				} else {
+					list_del(&tbl->link);
+					set->tbl_cnt--;
+					clear_bit(tbl->idx,
+					  &ipa3_ctx->rt_idx_bitmap[ip]);
+					IPADBG("rst rt tbl_idx=%d tbl_cnt=%d\n",
+						tbl->idx, set->tbl_cnt);
+					kmem_cache_free(ipa3_ctx->rt_tbl_cache,
+						tbl);
+				}
+				/* remove the handle from the database */
+				ipa3_id_remove(id);
+			}
+		}
+	}
+
+	/* commit the change to IPA-HW */
+	if (ipa3_ctx->ctrl->ipa3_commit_rt(IPA_IP_v4) ||
+		ipa3_ctx->ctrl->ipa3_commit_rt(IPA_IP_v6)) {
+		IPAERR("fail to commit rt-rule\n");
+		WARN_ON_RATELIMIT_IPA(1);
+		mutex_unlock(&ipa3_ctx->lock);
+		return -EPERM;
+	}
+	mutex_unlock(&ipa3_ctx->lock);
+
+	return 0;
+}
+
+/**
+ * ipa3_get_rt_tbl() - lookup the specified routing table and return handle if
+ * it exists, if lookup succeeds the routing table ref cnt is increased
+ * @lookup:	[inout] routing table to lookup and its handle
+ *
+ * Returns:	0 on success, negative on failure
+ *
+ * Note:	Should not be called from atomic context
+ *	Caller should call ipa3_put_rt_tbl later if this function succeeds
+ */
+int ipa3_get_rt_tbl(struct ipa_ioc_get_rt_tbl *lookup)
+{
+	struct ipa3_rt_tbl *entry;
+	int result = -EFAULT;
+
+	if (lookup == NULL || lookup->ip >= IPA_IP_MAX) {
+		IPAERR_RL("bad param\n");
+		return -EINVAL;
+	}
+	mutex_lock(&ipa3_ctx->lock);
+	lookup->name[IPA_RESOURCE_NAME_MAX-1] = '\0';
+	entry = __ipa3_find_rt_tbl(lookup->ip, lookup->name);
+	if (entry && entry->cookie == IPA_RT_TBL_COOKIE) {
+		if (entry->ref_cnt == U32_MAX) {
+			IPAERR_RL("fail: ref count crossed limit\n");
+			goto ret;
+		}
+		entry->ref_cnt++;
+		lookup->hdl = entry->id;
+
+		/* commit for get */
+		if (ipa3_ctx->ctrl->ipa3_commit_rt(lookup->ip))
+			IPAERR_RL("fail to commit RT tbl\n");
+
+		result = 0;
+	}
+
+ret:
+	mutex_unlock(&ipa3_ctx->lock);
+
+	return result;
+}
+
+/**
+ * ipa3_put_rt_tbl() - Release the specified routing table handle
+ * @rt_tbl_hdl:	[in] the routing table handle to release
+ *
+ * Returns:	0 on success, negative on failure
+ *
+ * Note:	Should not be called from atomic context
+ */
+int ipa3_put_rt_tbl(u32 rt_tbl_hdl)
+{
+	struct ipa3_rt_tbl *entry;
+	enum ipa_ip_type ip = IPA_IP_MAX;
+	int result = 0;
+
+	mutex_lock(&ipa3_ctx->lock);
+	entry = ipa3_id_find(rt_tbl_hdl);
+	if (entry == NULL) {
+		IPAERR_RL("lookup failed\n");
+		result = -EINVAL;
+		goto ret;
+	}
+
+	if ((entry->cookie != IPA_RT_TBL_COOKIE) || entry->ref_cnt == 0) {
+		IPAERR_RL("bad params\n");
+		result = -EINVAL;
+		goto ret;
+	}
+
+	if (entry->set == &ipa3_ctx->rt_tbl_set[IPA_IP_v4])
+		ip = IPA_IP_v4;
+	else if (entry->set == &ipa3_ctx->rt_tbl_set[IPA_IP_v6])
+		ip = IPA_IP_v6;
+	else {
+		WARN_ON_RATELIMIT_IPA(1);
+		result = -EINVAL;
+		goto ret;
+	}
+
+	entry->ref_cnt--;
+	if (entry->ref_cnt == 0 && entry->rule_cnt == 0) {
+		IPADBG("zero ref_cnt, delete rt tbl (idx=%u)\n",
+			entry->idx);
+		if (__ipa_del_rt_tbl(entry))
+			IPAERR_RL("fail to del RT tbl\n");
+		/* commit for put */
+		if (ipa3_ctx->ctrl->ipa3_commit_rt(ip))
+			IPAERR_RL("fail to commit RT tbl\n");
+	}
+
+	result = 0;
+
+ret:
+	mutex_unlock(&ipa3_ctx->lock);
+
+	return result;
+}
+
+
+static int __ipa_mdfy_rt_rule(struct ipa_rt_rule_mdfy_i *rtrule)
+{
+	struct ipa3_rt_entry *entry;
+	struct ipa3_hdr_entry *hdr = NULL;
+	struct ipa3_hdr_proc_ctx_entry *proc_ctx = NULL;
+	struct ipa3_hdr_entry *hdr_entry;
+	struct ipa3_hdr_proc_ctx_entry *hdr_proc_entry;
+
+	if (rtrule->rule.hdr_hdl) {
+		hdr = ipa3_id_find(rtrule->rule.hdr_hdl);
+		if ((hdr == NULL) || (hdr->cookie != IPA_HDR_COOKIE)) {
+			IPAERR_RL("rt rule does not point to valid hdr\n");
+			goto error;
+		}
+	} else if (rtrule->rule.hdr_proc_ctx_hdl) {
+		proc_ctx = ipa3_id_find(rtrule->rule.hdr_proc_ctx_hdl);
+		if ((proc_ctx == NULL) ||
+			(proc_ctx->cookie != IPA_PROC_HDR_COOKIE)) {
+			IPAERR_RL("rt rule does not point to valid proc ctx\n");
+			goto error;
+		}
+	}
+
+	entry = ipa3_id_find(rtrule->rt_rule_hdl);
+	if (entry == NULL) {
+		IPAERR_RL("lookup failed\n");
+		goto error;
+	}
+
+	if (entry->cookie != IPA_RT_RULE_COOKIE) {
+		IPAERR_RL("bad params\n");
+		goto error;
+	}
+
+	if (!strcmp(entry->tbl->name, IPA_DFLT_RT_TBL_NAME)) {
+		IPAERR_RL("Default tbl rule cannot be modified\n");
+		return -EINVAL;
+	}
+	/* Adding check to confirm still
+	 * header entry present in header table or not
+	 */
+
+	if (entry->hdr) {
+		hdr_entry = ipa3_id_find(entry->rule.hdr_hdl);
+		if (!hdr_entry || hdr_entry->cookie != IPA_HDR_COOKIE) {
+			IPAERR_RL("Header entry already deleted\n");
+			return -EPERM;
+		}
+	} else if (entry->proc_ctx) {
+		hdr_proc_entry = ipa3_id_find(entry->rule.hdr_proc_ctx_hdl);
+		if (!hdr_proc_entry ||
+			hdr_proc_entry->cookie != IPA_PROC_HDR_COOKIE) {
+			IPAERR_RL("Proc header entry already deleted\n");
+			return -EPERM;
+		}
+	}
+
+	if (entry->hdr)
+		entry->hdr->ref_cnt--;
+	if (entry->proc_ctx)
+		entry->proc_ctx->ref_cnt--;
+
+	entry->rule = rtrule->rule;
+	entry->hdr = hdr;
+	entry->proc_ctx = proc_ctx;
+
+	if (entry->hdr)
+		entry->hdr->ref_cnt++;
+	if (entry->proc_ctx)
+		entry->proc_ctx->ref_cnt++;
+
+	entry->hw_len = 0;
+	entry->prio = 0;
+	if (rtrule->rule.enable_stats)
+		entry->cnt_idx = rtrule->rule.cnt_idx;
+	else
+		entry->cnt_idx = 0;
+	return 0;
+
+error:
+	return -EPERM;
+}
+
+/**
+ * ipa3_mdfy_rt_rule() - Modify the specified routing rules in SW and optionally
+ * commit to IPA HW
+ *
+ * Returns:	0 on success, negative on failure
+ *
+ * Note:	Should not be called from atomic context
+ */
+int ipa3_mdfy_rt_rule(struct ipa_ioc_mdfy_rt_rule *hdls)
+{
+	int i;
+	int result;
+	struct ipa_rt_rule_mdfy_i rule;
+
+	if (hdls == NULL || hdls->num_rules == 0 || hdls->ip >= IPA_IP_MAX) {
+		IPAERR_RL("bad param\n");
+		return -EINVAL;
+	}
+
+	mutex_lock(&ipa3_ctx->lock);
+	for (i = 0; i < hdls->num_rules; i++) {
+		/* if hashing not supported, all tables are non-hash tables*/
+		if (ipa3_ctx->ipa_fltrt_not_hashable)
+			hdls->rules[i].rule.hashable = false;
+		__ipa_convert_rt_mdfy_in(hdls->rules[i], &rule);
+		if (__ipa_mdfy_rt_rule(&rule)) {
+			IPAERR_RL("failed to mdfy rt rule %i\n", i);
+			hdls->rules[i].status = IPA_RT_STATUS_OF_MDFY_FAILED;
+		} else {
+			hdls->rules[i].status = 0;
+			__ipa_convert_rt_mdfy_out(rule, &hdls->rules[i]);
+		}
+	}
+
+	if (hdls->commit)
+		if (ipa3_ctx->ctrl->ipa3_commit_rt(hdls->ip)) {
+			result = -EPERM;
+			goto bail;
+		}
+	result = 0;
+bail:
+	mutex_unlock(&ipa3_ctx->lock);
+
+	return result;
+}
+
+/**
+ * ipa3_mdfy_rt_rule_v2() - Modify the specified routing rules
+ * in SW and optionally commit to IPA HW
+ *
+ * Returns:	0 on success, negative on failure
+ *
+ * Note:	Should not be called from atomic context
+ */
+int ipa3_mdfy_rt_rule_v2(struct ipa_ioc_mdfy_rt_rule_v2 *hdls)
+{
+	int i;
+	int result;
+
+	if (hdls == NULL || hdls->num_rules == 0 || hdls->ip >= IPA_IP_MAX) {
+		IPAERR_RL("bad param\n");
+		return -EINVAL;
+	}
+
+	mutex_lock(&ipa3_ctx->lock);
+	for (i = 0; i < hdls->num_rules; i++) {
+		/* if hashing not supported, all tables are non-hash tables*/
+		if (ipa3_ctx->ipa_fltrt_not_hashable)
+			((struct ipa_rt_rule_mdfy_i *)
+			hdls->rules)[i].rule.hashable = false;
+		if (__ipa_mdfy_rt_rule(&(((struct ipa_rt_rule_mdfy_i *)
+			hdls->rules)[i]))) {
+			IPAERR_RL("failed to mdfy rt rule %i\n", i);
+			((struct ipa_rt_rule_mdfy_i *)
+			hdls->rules)[i].status = IPA_RT_STATUS_OF_MDFY_FAILED;
+		} else {
+			((struct ipa_rt_rule_mdfy_i *)
+			hdls->rules)[i].status = 0;
+		}
+	}
+
+	if (hdls->commit)
+		if (ipa3_ctx->ctrl->ipa3_commit_rt(hdls->ip)) {
+			result = -EPERM;
+			goto bail;
+		}
+	result = 0;
+bail:
+	mutex_unlock(&ipa3_ctx->lock);
+
+	return result;
+}
+
+/**
+ * ipa3_set_rt_tuple_mask() - Sets the rt tuple masking for the given tbl
+ *  table index must be for AP EP (not modem)
+ *  updates the the routing masking values without changing the flt ones.
+ *
+ * @tbl_idx: routing table index to configure the tuple masking
+ * @tuple: the tuple members masking
+ * Returns:	0 on success, negative on failure
+ *
+ */
+int ipa3_set_rt_tuple_mask(int tbl_idx, struct ipahal_reg_hash_tuple *tuple)
+{
+	struct ipahal_reg_fltrt_hash_tuple fltrt_tuple;
+
+	if (!tuple) {
+		IPAERR_RL("bad tuple\n");
+		return -EINVAL;
+	}
+
+	if (tbl_idx >=
+		max(IPA_MEM_PART(v6_rt_num_index),
+		IPA_MEM_PART(v4_rt_num_index)) ||
+		tbl_idx < 0) {
+		IPAERR_RL("bad table index\n");
+		return -EINVAL;
+	}
+
+	if (tbl_idx >= IPA_MEM_PART(v4_modem_rt_index_lo) &&
+		tbl_idx <= IPA_MEM_PART(v4_modem_rt_index_hi)) {
+		IPAERR_RL("cannot configure modem v4 rt tuple by AP\n");
+		return -EINVAL;
+	}
+
+	if (tbl_idx >= IPA_MEM_PART(v6_modem_rt_index_lo) &&
+		tbl_idx <= IPA_MEM_PART(v6_modem_rt_index_hi)) {
+		IPAERR_RL("cannot configure modem v6 rt tuple by AP\n");
+		return -EINVAL;
+	}
+
+	ipahal_read_reg_n_fields(IPA_ENDP_FILTER_ROUTER_HSH_CFG_n,
+		tbl_idx, &fltrt_tuple);
+	fltrt_tuple.rt = *tuple;
+	ipahal_write_reg_n_fields(IPA_ENDP_FILTER_ROUTER_HSH_CFG_n,
+		tbl_idx, &fltrt_tuple);
+
+	return 0;
+}
+
+/**
+ * ipa3_rt_read_tbl_from_hw() -Read routing table from IPA HW
+ * @tbl_idx: routing table index
+ * @ip_type: IPv4 or IPv6 table
+ * @hashable: hashable or non-hashable table
+ * @entry: array to fill the table entries
+ * @num_entry: number of entries in entry array. set by the caller to indicate
+ *  entry array size. Then set by this function as an output parameter to
+ *  indicate the number of entries in the array
+ *
+ * This function reads the routing table from IPA SRAM and prepares an array
+ * of entries. This function is mainly used for debugging purposes.
+ *
+ * If empty table or Modem Apps table, zero entries will be returned.
+ *
+ * Returns:	0 on success, negative on failure
+ */
+int ipa3_rt_read_tbl_from_hw(u32 tbl_idx, enum ipa_ip_type ip_type,
+	bool hashable, struct ipahal_rt_rule_entry entry[], int *num_entry)
+{
+	void *ipa_sram_mmio;
+	u64 hdr_base_ofst;
+	int res = 0;
+	u64 tbl_addr;
+	bool is_sys;
+	struct ipa_mem_buffer *sys_tbl_mem;
+	u8 *rule_addr;
+	int rule_idx;
+
+	IPADBG_LOW("tbl_idx=%d ip_t=%d hash=%d entry=0x%pK num_entry=0x%pK\n",
+		tbl_idx, ip_type, hashable, entry, num_entry);
+
+	/*
+	 * SRAM memory not allocated to hash tables. Reading of hash table
+	 * rules operation not supported
+	 */
+	if (hashable && ipa3_ctx->ipa_fltrt_not_hashable) {
+		IPADBG("Reading hashable rules not supported\n");
+		*num_entry = 0;
+		return 0;
+	}
+
+	if (ip_type == IPA_IP_v4 && tbl_idx >= IPA_MEM_PART(v4_rt_num_index)) {
+		IPAERR_RL("Invalid params\n");
+		return -EFAULT;
+	}
+
+	if (ip_type == IPA_IP_v6 && tbl_idx >= IPA_MEM_PART(v6_rt_num_index)) {
+		IPAERR_RL("Invalid params\n");
+		return -EFAULT;
+	}
+
+	/* map IPA SRAM */
+	ipa_sram_mmio = ioremap(ipa3_ctx->ipa_wrapper_base +
+		ipa3_ctx->ctrl->ipa_reg_base_ofst +
+		ipahal_get_reg_n_ofst(IPA_SW_AREA_RAM_DIRECT_ACCESS_n,
+			ipa3_ctx->smem_restricted_bytes / 4),
+		ipa3_ctx->smem_sz);
+	if (!ipa_sram_mmio) {
+		IPAERR("fail to ioremap IPA SRAM\n");
+		return -ENOMEM;
+	}
+
+	memset(entry, 0, sizeof(*entry) * (*num_entry));
+	if (hashable) {
+		if (ip_type == IPA_IP_v4)
+			hdr_base_ofst =
+				IPA_MEM_PART(v4_rt_hash_ofst);
+		else
+			hdr_base_ofst =
+				IPA_MEM_PART(v6_rt_hash_ofst);
+	} else {
+		if (ip_type == IPA_IP_v4)
+			hdr_base_ofst =
+				IPA_MEM_PART(v4_rt_nhash_ofst);
+		else
+			hdr_base_ofst =
+				IPA_MEM_PART(v6_rt_nhash_ofst);
+	}
+
+	IPADBG_LOW("hdr_base_ofst=0x%llx\n", hdr_base_ofst);
+
+	res = ipahal_fltrt_read_addr_from_hdr(ipa_sram_mmio + hdr_base_ofst,
+		tbl_idx, &tbl_addr, &is_sys);
+	if (res) {
+		IPAERR("failed to read table address from header structure\n");
+		goto bail;
+	}
+	IPADBG_LOW("rt tbl %d: tbl_addr=0x%llx is_sys=%d\n",
+		tbl_idx, tbl_addr, is_sys);
+	if (!tbl_addr) {
+		IPAERR("invalid rt tbl addr\n");
+		res = -EFAULT;
+		goto bail;
+	}
+
+	/* for tables which reside in DDR access it from the virtual memory */
+	if (is_sys) {
+		struct ipa3_rt_tbl_set *set;
+		struct ipa3_rt_tbl *tbl;
+
+		set = &ipa3_ctx->rt_tbl_set[ip_type];
+		rule_addr = NULL;
+		list_for_each_entry(tbl, &set->head_rt_tbl_list, link) {
+			if (tbl->idx == tbl_idx) {
+				sys_tbl_mem = &(tbl->curr_mem[hashable ?
+					IPA_RULE_HASHABLE :
+					IPA_RULE_NON_HASHABLE]);
+				if (sys_tbl_mem->phys_base &&
+					sys_tbl_mem->phys_base != tbl_addr) {
+					IPAERR("mismatch:parsed=%llx sw=%pad\n"
+						, tbl_addr,
+						&sys_tbl_mem->phys_base);
+				}
+				if (sys_tbl_mem->phys_base)
+					rule_addr = sys_tbl_mem->base;
+				else
+					rule_addr = NULL;
+			}
+		}
+	} else {
+		rule_addr = ipa_sram_mmio + hdr_base_ofst + tbl_addr;
+	}
+
+	IPADBG_LOW("First rule addr 0x%pK\n", rule_addr);
+
+	if (!rule_addr) {
+		/* Modem table in system memory or empty table */
+		*num_entry = 0;
+		goto bail;
+	}
+
+	rule_idx = 0;
+	while (rule_idx < *num_entry) {
+		res = ipahal_rt_parse_hw_rule(rule_addr, &entry[rule_idx]);
+		if (res) {
+			IPAERR("failed parsing rt rule\n");
+			goto bail;
+		}
+
+		IPADBG_LOW("rule_size=%d\n", entry[rule_idx].rule_size);
+		if (!entry[rule_idx].rule_size)
+			break;
+
+		rule_addr += entry[rule_idx].rule_size;
+		rule_idx++;
+	}
+	*num_entry = rule_idx;
+bail:
+	iounmap(ipa_sram_mmio);
+	return res;
+}

+ 183 - 0
ipa/ipa_v3/ipa_trace.h

@@ -0,0 +1,183 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2012-2019, The Linux Foundation. All rights reserved.
+ */
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM ipa
+#define TRACE_INCLUDE_FILE ipa_trace
+
+#if !defined(_IPA_TRACE_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _IPA_TRACE_H
+
+#include <linux/tracepoint.h>
+
+TRACE_EVENT(
+	intr_to_poll3,
+
+	TP_PROTO(unsigned long client),
+
+	TP_ARGS(client),
+
+	TP_STRUCT__entry(
+		__field(unsigned long,	client)
+	),
+
+	TP_fast_assign(
+		__entry->client = client;
+	),
+
+	TP_printk("client=%lu", __entry->client)
+);
+
+TRACE_EVENT(
+	poll_to_intr3,
+
+	TP_PROTO(unsigned long client),
+
+	TP_ARGS(client),
+
+	TP_STRUCT__entry(
+		__field(unsigned long,	client)
+	),
+
+	TP_fast_assign(
+		__entry->client = client;
+	),
+
+	TP_printk("client=%lu", __entry->client)
+);
+
+TRACE_EVENT(
+	idle_sleep_enter3,
+
+	TP_PROTO(unsigned long client),
+
+	TP_ARGS(client),
+
+	TP_STRUCT__entry(
+		__field(unsigned long,	client)
+	),
+
+	TP_fast_assign(
+		__entry->client = client;
+	),
+
+	TP_printk("client=%lu", __entry->client)
+);
+
+TRACE_EVENT(
+	idle_sleep_exit3,
+
+	TP_PROTO(unsigned long client),
+
+	TP_ARGS(client),
+
+	TP_STRUCT__entry(
+		__field(unsigned long,	client)
+	),
+
+	TP_fast_assign(
+		__entry->client = client;
+	),
+
+	TP_printk("client=%lu", __entry->client)
+);
+
+TRACE_EVENT(
+	rmnet_ipa_netifni3,
+
+	TP_PROTO(unsigned long rx_pkt_cnt),
+
+	TP_ARGS(rx_pkt_cnt),
+
+	TP_STRUCT__entry(
+		__field(unsigned long,	rx_pkt_cnt)
+	),
+
+	TP_fast_assign(
+		__entry->rx_pkt_cnt = rx_pkt_cnt;
+	),
+
+	TP_printk("rx_pkt_cnt=%lu", __entry->rx_pkt_cnt)
+);
+
+TRACE_EVENT(
+	rmnet_ipa_netifrx3,
+
+	TP_PROTO(unsigned long rx_pkt_cnt),
+
+	TP_ARGS(rx_pkt_cnt),
+
+	TP_STRUCT__entry(
+		__field(unsigned long,	rx_pkt_cnt)
+	),
+
+	TP_fast_assign(
+		__entry->rx_pkt_cnt = rx_pkt_cnt;
+	),
+
+	TP_printk("rx_pkt_cnt=%lu", __entry->rx_pkt_cnt)
+);
+
+TRACE_EVENT(
+	rmnet_ipa_netif_rcv_skb3,
+
+	TP_PROTO(unsigned long rx_pkt_cnt),
+
+	TP_ARGS(rx_pkt_cnt),
+
+	TP_STRUCT__entry(
+		__field(unsigned long,	rx_pkt_cnt)
+	),
+
+	TP_fast_assign(
+		__entry->rx_pkt_cnt = rx_pkt_cnt;
+	),
+
+	TP_printk("rx_pkt_cnt=%lu", __entry->rx_pkt_cnt)
+);
+
+TRACE_EVENT(
+	ipa3_rx_poll_num,
+
+	TP_PROTO(int poll_num),
+
+	TP_ARGS(poll_num),
+
+	TP_STRUCT__entry(
+		__field(int,	poll_num)
+	),
+
+	TP_fast_assign(
+		__entry->poll_num = poll_num;
+	),
+
+	TP_printk("each_poll_aggr_pkt_num=%d", __entry->poll_num)
+);
+
+TRACE_EVENT(
+	ipa3_rx_poll_cnt,
+
+	TP_PROTO(int poll_num),
+
+	TP_ARGS(poll_num),
+
+	TP_STRUCT__entry(
+		__field(int,	poll_num)
+	),
+
+	TP_fast_assign(
+		__entry->poll_num = poll_num;
+	),
+
+	TP_printk("napi_overall_poll_pkt_cnt=%d", __entry->poll_num)
+);
+
+
+#endif /* _IPA_TRACE_H */
+
+/* This part must be outside protection */
+#undef TRACE_INCLUDE_PATH
+#define TRACE_INCLUDE_PATH ../../drivers/platform/msm/ipa/ipa_v3
+#include <trace/define_trace.h>

+ 1589 - 0
ipa/ipa_v3/ipa_uc.c

@@ -0,0 +1,1589 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2012-2019, The Linux Foundation. All rights reserved.
+ */
+
+#include "ipa_i.h"
+#include <linux/delay.h>
+
+#define IPA_RAM_UC_SMEM_SIZE 128
+#define IPA_HW_INTERFACE_VERSION     0x2000
+#define IPA_PKT_FLUSH_TO_US 100
+#define IPA_UC_POLL_SLEEP_USEC 100
+#define IPA_UC_POLL_MAX_RETRY 10000
+
+#define IPA_UC_DBG_STATS_GET_PROT_ID(x) (0xff & ((x) >> 24))
+#define IPA_UC_DBG_STATS_GET_OFFSET(x) (0x00ffffff & (x))
+#define IPA_UC_EVENT_RING_SIZE 10
+/**
+ * Mailbox register to Interrupt HWP for CPU cmd
+ * Usage of IPA_UC_MAILBOX_m_n doorbell instead of IPA_IRQ_EE_UC_0
+ * due to HW limitation.
+ *
+ */
+#define IPA_CPU_2_HW_CMD_MBOX_m          0
+#define IPA_CPU_2_HW_CMD_MBOX_n         23
+
+#define IPA_UC_ERING_m 0
+#define IPA_UC_ERING_n_r 1
+#define IPA_UC_ERING_n_w 0
+#define IPA_UC_MON_INTERVAL 5
+
+/**
+ * enum ipa3_cpu_2_hw_commands - Values that represent the commands from the CPU
+ * IPA_CPU_2_HW_CMD_NO_OP : No operation is required.
+ * IPA_CPU_2_HW_CMD_UPDATE_FLAGS : Update SW flags which defines the behavior
+ *                                 of HW.
+ * IPA_CPU_2_HW_CMD_DEBUG_RUN_TEST : Launch predefined test over HW.
+ * IPA_CPU_2_HW_CMD_DEBUG_GET_INFO : Read HW internal debug information.
+ * IPA_CPU_2_HW_CMD_ERR_FATAL : CPU instructs HW to perform error fatal
+ *                              handling.
+ * IPA_CPU_2_HW_CMD_CLK_GATE : CPU instructs HW to goto Clock Gated state.
+ * IPA_CPU_2_HW_CMD_CLK_UNGATE : CPU instructs HW to goto Clock Ungated state.
+ * IPA_CPU_2_HW_CMD_MEMCPY : CPU instructs HW to do memcopy using QMB.
+ * IPA_CPU_2_HW_CMD_RESET_PIPE : Command to reset a pipe - SW WA for a HW bug.
+ * IPA_CPU_2_HW_CMD_GSI_CH_EMPTY : Command to check for GSI channel emptiness.
+ * IPA_CPU_2_HW_CMD_REMOTE_IPA_INFO: Command to store remote IPA Info
+ * IPA_CPU_2_HW_CMD_SETUP_EVENT_RING:  Command to setup the event ring
+ * IPA_CPU_2_HW_CMD_ENABLE_FLOW_CTL_MONITOR: Command to enable pipe monitoring.
+ * IPA_CPU_2_HW_CMD_UPDATE_FLOW_CTL_MONITOR: Command to update pipes to monitor.
+ * IPA_CPU_2_HW_CMD_DISABLE_FLOW_CTL_MONITOR: Command to disable pipe
+					monitoring, no parameter required.
+ */
+enum ipa3_cpu_2_hw_commands {
+	IPA_CPU_2_HW_CMD_NO_OP                     =
+		FEATURE_ENUM_VAL(IPA_HW_FEATURE_COMMON, 0),
+	IPA_CPU_2_HW_CMD_UPDATE_FLAGS              =
+		FEATURE_ENUM_VAL(IPA_HW_FEATURE_COMMON, 1),
+	IPA_CPU_2_HW_CMD_DEBUG_RUN_TEST            =
+		FEATURE_ENUM_VAL(IPA_HW_FEATURE_COMMON, 2),
+	IPA_CPU_2_HW_CMD_DEBUG_GET_INFO            =
+		FEATURE_ENUM_VAL(IPA_HW_FEATURE_COMMON, 3),
+	IPA_CPU_2_HW_CMD_ERR_FATAL                 =
+		FEATURE_ENUM_VAL(IPA_HW_FEATURE_COMMON, 4),
+	IPA_CPU_2_HW_CMD_CLK_GATE                  =
+		FEATURE_ENUM_VAL(IPA_HW_FEATURE_COMMON, 5),
+	IPA_CPU_2_HW_CMD_CLK_UNGATE                =
+		FEATURE_ENUM_VAL(IPA_HW_FEATURE_COMMON, 6),
+	IPA_CPU_2_HW_CMD_MEMCPY                    =
+		FEATURE_ENUM_VAL(IPA_HW_FEATURE_COMMON, 7),
+	IPA_CPU_2_HW_CMD_RESET_PIPE                =
+		FEATURE_ENUM_VAL(IPA_HW_FEATURE_COMMON, 8),
+	IPA_CPU_2_HW_CMD_REG_WRITE                 =
+		FEATURE_ENUM_VAL(IPA_HW_FEATURE_COMMON, 9),
+	IPA_CPU_2_HW_CMD_GSI_CH_EMPTY              =
+		FEATURE_ENUM_VAL(IPA_HW_FEATURE_COMMON, 10),
+	IPA_CPU_2_HW_CMD_REMOTE_IPA_INFO           =
+		FEATURE_ENUM_VAL(IPA_HW_FEATURE_COMMON, 11),
+	IPA_CPU_2_HW_CMD_SETUP_EVENT_RING          =
+		FEATURE_ENUM_VAL(IPA_HW_FEATURE_COMMON, 12),
+	IPA_CPU_2_HW_CMD_ENABLE_FLOW_CTL_MONITOR   =
+		FEATURE_ENUM_VAL(IPA_HW_FEATURE_COMMON, 13),
+	IPA_CPU_2_HW_CMD_UPDATE_FLOW_CTL_MONITOR   =
+		FEATURE_ENUM_VAL(IPA_HW_FEATURE_COMMON, 14),
+	IPA_CPU_2_HW_CMD_DISABLE_FLOW_CTL_MONITOR  =
+		FEATURE_ENUM_VAL(IPA_HW_FEATURE_COMMON, 15),
+
+};
+
+/**
+ * enum ipa3_hw_2_cpu_responses -  Values that represent common HW responses
+ *  to CPU commands.
+ * @IPA_HW_2_CPU_RESPONSE_NO_OP : No operation response
+ * @IPA_HW_2_CPU_RESPONSE_INIT_COMPLETED : HW shall send this command once
+ *  boot sequence is completed and HW is ready to serve commands from CPU
+ * @IPA_HW_2_CPU_RESPONSE_CMD_COMPLETED: Response to CPU commands
+ * @IPA_HW_2_CPU_RESPONSE_DEBUG_GET_INFO : Response to
+ *  IPA_CPU_2_HW_CMD_DEBUG_GET_INFO command
+ */
+enum ipa3_hw_2_cpu_responses {
+	IPA_HW_2_CPU_RESPONSE_NO_OP          =
+		FEATURE_ENUM_VAL(IPA_HW_FEATURE_COMMON, 0),
+	IPA_HW_2_CPU_RESPONSE_INIT_COMPLETED =
+		FEATURE_ENUM_VAL(IPA_HW_FEATURE_COMMON, 1),
+	IPA_HW_2_CPU_RESPONSE_CMD_COMPLETED  =
+		FEATURE_ENUM_VAL(IPA_HW_FEATURE_COMMON, 2),
+	IPA_HW_2_CPU_RESPONSE_DEBUG_GET_INFO =
+		FEATURE_ENUM_VAL(IPA_HW_FEATURE_COMMON, 3),
+};
+
+/**
+ * struct IpaHwMemCopyData_t - Structure holding the parameters
+ * for IPA_CPU_2_HW_CMD_MEMCPY command.
+ *
+ * The parameters are passed as immediate params in the shared memory
+ */
+struct IpaHwMemCopyData_t  {
+	u32 destination_addr;
+	u32 source_addr;
+	u32 dest_buffer_size;
+	u32 source_buffer_size;
+};
+
+/**
+ * struct IpaHwRegWriteCmdData_t - holds the parameters for
+ * IPA_CPU_2_HW_CMD_REG_WRITE command. Parameters are
+ * sent as 64b immediate parameters.
+ * @RegisterAddress: RG10 register address where the value needs to be written
+ * @RegisterValue: 32-Bit value to be written into the register
+ */
+struct IpaHwRegWriteCmdData_t {
+	u32 RegisterAddress;
+	u32 RegisterValue;
+};
+
+/**
+ * union IpaHwCpuCmdCompletedResponseData_t - Structure holding the parameters
+ * for IPA_HW_2_CPU_RESPONSE_CMD_COMPLETED response.
+ * @originalCmdOp : The original command opcode
+ * @status : 0 for success indication, otherwise failure
+ * @responseData : 16b responseData
+ *
+ * Parameters are sent as 32b immediate parameters.
+ */
+union IpaHwCpuCmdCompletedResponseData_t {
+	struct IpaHwCpuCmdCompletedResponseParams_t {
+		u32 originalCmdOp:8;
+		u32 status:8;
+		u32 responseData:16;
+	} __packed params;
+	u32 raw32b;
+} __packed;
+
+/**
+ * union IpaHwUpdateFlagsCmdData_t - Structure holding the parameters for
+ * IPA_CPU_2_HW_CMD_UPDATE_FLAGS command
+ * @newFlags: SW flags defined the behavior of HW.
+ *	This field is expected to be used as bitmask for enum ipa3_hw_flags
+ */
+union IpaHwUpdateFlagsCmdData_t {
+	struct IpaHwUpdateFlagsCmdParams_t {
+		u32 newFlags;
+	} params;
+	u32 raw32b;
+};
+
+/**
+ * union IpaHwChkChEmptyCmdData_t -  Structure holding the parameters for
+ *  IPA_CPU_2_HW_CMD_GSI_CH_EMPTY command. Parameters are sent as 32b
+ *  immediate parameters.
+ * @ee_n : EE owner of the channel
+ * @vir_ch_id : GSI virtual channel ID of the channel to checked of emptiness
+ * @reserved_02_04 : Reserved
+ */
+union IpaHwChkChEmptyCmdData_t {
+	struct IpaHwChkChEmptyCmdParams_t {
+		u8 ee_n;
+		u8 vir_ch_id;
+		u16 reserved_02_04;
+	} __packed params;
+	u32 raw32b;
+} __packed;
+
+struct IpaSetupEventRingCmdParams_t {
+	u32 ring_base_pa;
+	u32 ring_base_pa_hi;
+	u32 ring_size; //size = 10
+} __packed;
+
+
+/**
+ * Structure holding the parameters for
+ * IPA_CPU_2_HW_CMD_SETUP_EVENT_RING command. Parameters are
+ * sent as 32b immediate parameters.
+ */
+union IpaSetupEventRingCmdData_t {
+	struct IpaSetupEventRingCmdParams_t event;
+	u32 raw32b[6]; //uc-internal
+} __packed;
+
+
+/**
+ * Structure holding the parameters for IPA_CPU_2_HW_CMD_REMOTE_IPA_INFO
+ * command.
+ * @remoteIPAAddr: 5G IPA address : uC proxies Q6 doorbell to this address
+ * @mboxN: mbox on which Q6 will interrupt uC
+ */
+struct IpaHwDbAddrInfo_t {
+	u32 remoteIPAAddr;
+	uint32_t mboxN;
+} __packed;
+
+
+/**
+ * Structure holding the parameters for IPA_CPU_2_HW_CMD_ENABLE_PIPE_MONITOR
+ * command.
+ * @ipaProdGsiChid       IPA prod GSI chid to monitor
+ * @redMarkerThreshold   red marker threshold in elements for the GSI channel
+ */
+union IpaEnablePipeMonitorCmdData_t {
+	struct IpaEnablePipeMonitorCmdParams_t {
+		u32 ipaProdGsiChid:16;
+		u32 redMarkerThreshold:16;
+	} __packed params;
+	u32 raw32b;
+} __packed;
+
+/**
+ * Structure holding the parameters for IPA_CPU_2_HW_CMD_UPDATE_PIPE_MONITOR
+ * command.
+ *
+ * @bitmask      The parameter of bitmask to add/delete channels/pipes from
+ *                global monitoring pipemask
+ *                IPA pipe# bitmask or GSI chid bitmask
+ * add_delete   1: add pipes to monitor
+ *              0: delete pipes to monitor
+ */
+struct IpaUpdateFlowCtlMonitorData_t {
+	u32 bitmask;
+	u8 add_delete;
+};
+
+static DEFINE_MUTEX(uc_loaded_nb_lock);
+static BLOCKING_NOTIFIER_HEAD(uc_loaded_notifier);
+
+struct ipa3_uc_hdlrs ipa3_uc_hdlrs[IPA_HW_NUM_FEATURES] = { { 0 } };
+
+const char *ipa_hw_error_str(enum ipa3_hw_errors err_type)
+{
+	const char *str;
+
+	switch (err_type) {
+	case IPA_HW_ERROR_NONE:
+		str = "IPA_HW_ERROR_NONE";
+		break;
+	case IPA_HW_INVALID_DOORBELL_ERROR:
+		str = "IPA_HW_INVALID_DOORBELL_ERROR";
+		break;
+	case IPA_HW_DMA_ERROR:
+		str = "IPA_HW_DMA_ERROR";
+		break;
+	case IPA_HW_FATAL_SYSTEM_ERROR:
+		str = "IPA_HW_FATAL_SYSTEM_ERROR";
+		break;
+	case IPA_HW_INVALID_OPCODE:
+		str = "IPA_HW_INVALID_OPCODE";
+		break;
+	case IPA_HW_INVALID_PARAMS:
+		str = "IPA_HW_INVALID_PARAMS";
+		break;
+	case IPA_HW_CONS_DISABLE_CMD_GSI_STOP_FAILURE:
+		str = "IPA_HW_CONS_DISABLE_CMD_GSI_STOP_FAILURE";
+		break;
+	case IPA_HW_PROD_DISABLE_CMD_GSI_STOP_FAILURE:
+		str = "IPA_HW_PROD_DISABLE_CMD_GSI_STOP_FAILURE";
+		break;
+	case IPA_HW_GSI_CH_NOT_EMPTY_FAILURE:
+		str = "IPA_HW_GSI_CH_NOT_EMPTY_FAILURE";
+		break;
+	default:
+		str = "INVALID ipa_hw_errors type";
+	}
+
+	return str;
+}
+
+static void ipa3_uc_save_dbg_stats(u32 size)
+{
+	u8 prot_id;
+	u32 addr_offset;
+	void __iomem *mmio;
+
+	prot_id = IPA_UC_DBG_STATS_GET_PROT_ID(
+		ipa3_ctx->uc_ctx.uc_sram_mmio->responseParams_1);
+	addr_offset = IPA_UC_DBG_STATS_GET_OFFSET(
+		ipa3_ctx->uc_ctx.uc_sram_mmio->responseParams_1);
+	mmio = ioremap(ipa3_ctx->ipa_wrapper_base +
+		addr_offset, sizeof(struct IpaHwRingStats_t) *
+		MAX_CH_STATS_SUPPORTED);
+	if (mmio == NULL) {
+		IPAERR("unexpected NULL mmio\n");
+		return;
+	}
+	switch (prot_id) {
+	case IPA_HW_PROTOCOL_AQC:
+		if (!ipa3_ctx->aqc_ctx.dbg_stats.uc_dbg_stats_mmio) {
+			ipa3_ctx->aqc_ctx.dbg_stats.uc_dbg_stats_size =
+				size;
+			ipa3_ctx->aqc_ctx.dbg_stats.uc_dbg_stats_ofst =
+				addr_offset;
+			ipa3_ctx->aqc_ctx.dbg_stats.uc_dbg_stats_mmio =
+				mmio;
+		} else
+			goto unmap;
+		break;
+	case IPA_HW_PROTOCOL_11ad:
+		break;
+	case IPA_HW_PROTOCOL_WDI:
+		if (!ipa3_ctx->wdi2_ctx.dbg_stats.uc_dbg_stats_mmio) {
+			ipa3_ctx->wdi2_ctx.dbg_stats.uc_dbg_stats_size =
+				size;
+			ipa3_ctx->wdi2_ctx.dbg_stats.uc_dbg_stats_ofst =
+				addr_offset;
+			ipa3_ctx->wdi2_ctx.dbg_stats.uc_dbg_stats_mmio =
+				mmio;
+		} else
+			goto unmap;
+		break;
+	case IPA_HW_PROTOCOL_WDI3:
+		if (!ipa3_ctx->wdi3_ctx.dbg_stats.uc_dbg_stats_mmio) {
+			ipa3_ctx->wdi3_ctx.dbg_stats.uc_dbg_stats_size =
+				size;
+			ipa3_ctx->wdi3_ctx.dbg_stats.uc_dbg_stats_ofst =
+				addr_offset;
+			ipa3_ctx->wdi3_ctx.dbg_stats.uc_dbg_stats_mmio =
+				mmio;
+		} else
+			goto unmap;
+		break;
+	case IPA_HW_PROTOCOL_ETH:
+		break;
+	case IPA_HW_PROTOCOL_MHIP:
+		if (!ipa3_ctx->mhip_ctx.dbg_stats.uc_dbg_stats_mmio) {
+			ipa3_ctx->mhip_ctx.dbg_stats.uc_dbg_stats_size =
+				size;
+			ipa3_ctx->mhip_ctx.dbg_stats.uc_dbg_stats_ofst =
+				addr_offset;
+			ipa3_ctx->mhip_ctx.dbg_stats.uc_dbg_stats_mmio =
+				mmio;
+		} else
+			goto unmap;
+		break;
+	case IPA_HW_PROTOCOL_USB:
+		if (!ipa3_ctx->usb_ctx.dbg_stats.uc_dbg_stats_mmio) {
+			ipa3_ctx->usb_ctx.dbg_stats.uc_dbg_stats_size =
+				size;
+			ipa3_ctx->usb_ctx.dbg_stats.uc_dbg_stats_ofst =
+				addr_offset;
+			ipa3_ctx->usb_ctx.dbg_stats.uc_dbg_stats_mmio =
+				mmio;
+		} else
+			goto unmap;
+		break;
+	default:
+		IPAERR("unknown protocols %d\n", prot_id);
+		goto unmap;
+	}
+	return;
+unmap:
+	iounmap(mmio);
+}
+
+static void ipa3_log_evt_hdlr(void)
+{
+	int i;
+
+	if (!ipa3_ctx->uc_ctx.uc_event_top_ofst) {
+		ipa3_ctx->uc_ctx.uc_event_top_ofst =
+			ipa3_ctx->uc_ctx.uc_sram_mmio->eventParams;
+		if (ipa3_ctx->uc_ctx.uc_event_top_ofst +
+			sizeof(struct IpaHwEventLogInfoData_t) >=
+			ipa3_ctx->ctrl->ipa_reg_base_ofst +
+			ipahal_get_reg_n_ofst(
+				IPA_SW_AREA_RAM_DIRECT_ACCESS_n, 0) +
+			ipa3_ctx->smem_sz) {
+			IPAERR("uc_top 0x%x outside SRAM\n",
+				ipa3_ctx->uc_ctx.uc_event_top_ofst);
+			goto bad_uc_top_ofst;
+		}
+
+		ipa3_ctx->uc_ctx.uc_event_top_mmio = ioremap(
+			ipa3_ctx->ipa_wrapper_base +
+			ipa3_ctx->uc_ctx.uc_event_top_ofst,
+			sizeof(struct IpaHwEventLogInfoData_t));
+		if (!ipa3_ctx->uc_ctx.uc_event_top_mmio) {
+			IPAERR("fail to ioremap uc top\n");
+			goto bad_uc_top_ofst;
+		}
+
+		for (i = 0; i < IPA_HW_NUM_FEATURES; i++) {
+			if (ipa3_uc_hdlrs[i].ipa_uc_event_log_info_hdlr)
+				ipa3_uc_hdlrs[i].ipa_uc_event_log_info_hdlr
+					(ipa3_ctx->uc_ctx.uc_event_top_mmio);
+		}
+	} else {
+
+		if (ipa3_ctx->uc_ctx.uc_sram_mmio->eventParams !=
+			ipa3_ctx->uc_ctx.uc_event_top_ofst) {
+			IPAERR("uc top ofst changed new=%u cur=%u\n",
+				ipa3_ctx->uc_ctx.uc_sram_mmio->eventParams,
+				ipa3_ctx->uc_ctx.uc_event_top_ofst);
+		}
+	}
+
+	return;
+
+bad_uc_top_ofst:
+	ipa3_ctx->uc_ctx.uc_event_top_ofst = 0;
+}
+
+static void ipa3_event_ring_hdlr(void)
+{
+	u32 ering_rp, offset;
+	void *rp_va;
+	struct ipa_inform_wlan_bw bw_info;
+	struct eventElement_t *e_b = NULL, *e_q = NULL;
+	int mul = 0;
+
+	ering_rp = ipahal_read_reg_mn(IPA_UC_MAILBOX_m_n,
+		IPA_UC_ERING_m, IPA_UC_ERING_n_r);
+	offset = sizeof(struct eventElement_t);
+	ipa3_ctx->uc_ctx.ering_rp = ering_rp;
+
+	while (ipa3_ctx->uc_ctx.ering_rp_local != ering_rp) {
+		rp_va = ipa3_ctx->uc_ctx.event_ring.base +
+			ipa3_ctx->uc_ctx.ering_rp_local;
+
+		if (((struct eventElement_t *) rp_va)->Opcode == BW_NOTIFY) {
+			e_b = ((struct eventElement_t *) rp_va);
+			IPADBG("prot(%d), index (%d) throughput (%lu)\n",
+			e_b->Protocol,
+			e_b->Value.bw_param.ThresholdIndex,
+			e_b->Value.bw_param.throughput);
+
+			memset(&bw_info, 0, sizeof(struct ipa_inform_wlan_bw));
+			bw_info.index =
+				e_b->Value.bw_param.ThresholdIndex;
+			mul = 1000 / IPA_UC_MON_INTERVAL;
+			bw_info.throughput =
+				e_b->Value.bw_param.throughput*mul;
+			if (ipa3_inform_wlan_bw(&bw_info))
+				IPAERR_RL("failed on index %d to wlan\n",
+				bw_info.index);
+		} else if (((struct eventElement_t *) rp_va)->Opcode
+			== QUOTA_NOTIFY) {
+			e_q = ((struct eventElement_t *) rp_va);
+			IPADBG("got quota-notify %d reach(%d) usage (%lu)\n",
+			e_q->Protocol,
+			e_q->Value.quota_param.ThreasholdReached,
+			e_q->Value.quota_param.usage);
+			if (ipa3_broadcast_wdi_quota_reach_ind(0,
+				e_q->Value.quota_param.usage))
+				IPAERR_RL("failed on quota_reach for %d\n",
+				e_q->Protocol);
+		}
+		ipa3_ctx->uc_ctx.ering_rp_local += offset;
+		ipa3_ctx->uc_ctx.ering_rp_local %=
+			ipa3_ctx->uc_ctx.event_ring.size;
+		/* update wp */
+		ipa3_ctx->uc_ctx.ering_wp_local += offset;
+		ipa3_ctx->uc_ctx.ering_wp_local %=
+			ipa3_ctx->uc_ctx.event_ring.size;
+		ipahal_write_reg_mn(IPA_UC_MAILBOX_m_n, IPA_UC_ERING_m,
+			IPA_UC_ERING_n_w, ipa3_ctx->uc_ctx.ering_wp_local);
+	}
+}
+
+/**
+ * ipa3_uc_state_check() - Check the status of the uC interface
+ *
+ * Return value: 0 if the uC is loaded, interface is initialized
+ *               and there was no recent failure in one of the commands.
+ *               A negative value is returned otherwise.
+ */
+int ipa3_uc_state_check(void)
+{
+	if (!ipa3_ctx->uc_ctx.uc_inited) {
+		IPAERR("uC interface not initialized\n");
+		return -EFAULT;
+	}
+
+	if (!ipa3_ctx->uc_ctx.uc_loaded) {
+		IPAERR("uC is not loaded\n");
+		return -EFAULT;
+	}
+
+	if (ipa3_ctx->uc_ctx.uc_failed) {
+		IPAERR("uC has failed its last command\n");
+		return -EFAULT;
+	}
+
+	return 0;
+}
+
+/**
+ * ipa3_uc_loaded_check() - Check the uC has been loaded
+ *
+ * Return value: 1 if the uC is loaded, 0 otherwise
+ */
+int ipa3_uc_loaded_check(void)
+{
+	return ipa3_ctx->uc_ctx.uc_loaded;
+}
+EXPORT_SYMBOL(ipa3_uc_loaded_check);
+
+/**
+ * ipa3_uc_register_ready_cb() - register a uC ready callback notifier block
+ * @nb: notifier
+ *
+ * Register a callback to be called when uC is ready to receive commands. uC is
+ * considered to be ready when it sends %IPA_HW_2_CPU_RESPONSE_INIT_COMPLETED.
+ *
+ * Return: 0 on successful registration, negative errno otherwise
+ *
+ * See blocking_notifier_chain_register() for possible errno values
+ */
+int ipa3_uc_register_ready_cb(struct notifier_block *nb)
+{
+	int rc;
+
+	mutex_lock(&uc_loaded_nb_lock);
+
+	rc = blocking_notifier_chain_register(&uc_loaded_notifier, nb);
+	if (!rc && ipa3_ctx->uc_ctx.uc_loaded)
+		(void) nb->notifier_call(nb, false, ipa3_ctx);
+
+	mutex_unlock(&uc_loaded_nb_lock);
+
+	return rc;
+}
+EXPORT_SYMBOL(ipa3_uc_register_ready_cb);
+
+/**
+ * ipa3_uc_unregister_ready_cb() - unregister a uC ready callback
+ * @nb: notifier
+ *
+ * Unregister a uC loaded notifier block that was previously registered by
+ * ipa3_uc_register_ready_cb().
+ *
+ * Return: 0 on successful unregistration, negative errno otherwise
+ *
+ * See blocking_notifier_chain_unregister() for possible errno values
+ */
+int ipa3_uc_unregister_ready_cb(struct notifier_block *nb)
+{
+	return blocking_notifier_chain_unregister(&uc_loaded_notifier, nb);
+}
+EXPORT_SYMBOL(ipa3_uc_unregister_ready_cb);
+
+static void ipa3_uc_event_handler(enum ipa_irq_type interrupt,
+				 void *private_data,
+				 void *interrupt_data)
+{
+	union IpaHwErrorEventData_t evt;
+	u8 feature;
+
+	WARN_ON(private_data != ipa3_ctx);
+
+	IPA_ACTIVE_CLIENTS_INC_SIMPLE();
+
+	IPADBG("uC evt opcode=%u\n",
+		ipa3_ctx->uc_ctx.uc_sram_mmio->eventOp);
+
+
+	feature = EXTRACT_UC_FEATURE(ipa3_ctx->uc_ctx.uc_sram_mmio->eventOp);
+
+	if (feature >= IPA_HW_FEATURE_MAX) {
+		IPAERR("Invalid feature %u for event %u\n",
+			feature, ipa3_ctx->uc_ctx.uc_sram_mmio->eventOp);
+		IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
+		return;
+	}
+	/* Feature specific handling */
+	if (ipa3_uc_hdlrs[feature].ipa_uc_event_hdlr)
+		ipa3_uc_hdlrs[feature].ipa_uc_event_hdlr
+			(ipa3_ctx->uc_ctx.uc_sram_mmio);
+
+	/* General handling */
+	if (ipa3_ctx->uc_ctx.uc_sram_mmio->eventOp ==
+	    IPA_HW_2_CPU_EVENT_ERROR) {
+		evt.raw32b = ipa3_ctx->uc_ctx.uc_sram_mmio->eventParams;
+		IPAERR("uC Error, evt errorType = %s\n",
+			ipa_hw_error_str(evt.params.errorType));
+		ipa3_ctx->uc_ctx.uc_failed = true;
+		ipa3_ctx->uc_ctx.uc_error_type = evt.params.errorType;
+		ipa3_ctx->uc_ctx.uc_error_timestamp =
+			ipahal_read_reg(IPA_TAG_TIMER);
+		/* Unexpected UC hardware state */
+		ipa_assert();
+	} else if (ipa3_ctx->uc_ctx.uc_sram_mmio->eventOp ==
+		IPA_HW_2_CPU_EVENT_LOG_INFO) {
+		IPADBG("uC evt log info ofst=0x%x\n",
+			ipa3_ctx->uc_ctx.uc_sram_mmio->eventParams);
+		ipa3_log_evt_hdlr();
+	} else if (ipa3_ctx->uc_ctx.uc_sram_mmio->eventOp ==
+		IPA_HW_2_CPU_EVNT_RING_NOTIFY) {
+		IPADBG("uC evt log info ofst=0x%x\n",
+			ipa3_ctx->uc_ctx.uc_sram_mmio->eventParams);
+		ipa3_event_ring_hdlr();
+	} else {
+		IPADBG("unsupported uC evt opcode=%u\n",
+				ipa3_ctx->uc_ctx.uc_sram_mmio->eventOp);
+	}
+	IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
+
+}
+
+int ipa3_uc_panic_notifier(struct notifier_block *this,
+		unsigned long event, void *ptr)
+{
+	int result = 0;
+	struct ipa_active_client_logging_info log_info;
+
+	IPADBG("this=%pK evt=%lu ptr=%pK\n", this, event, ptr);
+
+	result = ipa3_uc_state_check();
+	if (result)
+		goto fail;
+
+	IPA_ACTIVE_CLIENTS_PREP_SIMPLE(log_info);
+	if (ipa3_inc_client_enable_clks_no_block(&log_info))
+		goto fail;
+
+	ipa3_ctx->uc_ctx.uc_sram_mmio->cmdOp =
+		IPA_CPU_2_HW_CMD_ERR_FATAL;
+	ipa3_ctx->uc_ctx.pending_cmd = ipa3_ctx->uc_ctx.uc_sram_mmio->cmdOp;
+	/* ensure write to shared memory is done before triggering uc */
+	wmb();
+	ipahal_write_reg_n(IPA_IRQ_EE_UC_n, 0, 0x1);
+
+	/* give uc enough time to save state */
+	udelay(IPA_PKT_FLUSH_TO_US);
+
+	IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
+	IPADBG("err_fatal issued\n");
+
+fail:
+	return NOTIFY_DONE;
+}
+
+static void ipa3_uc_response_hdlr(enum ipa_irq_type interrupt,
+				void *private_data,
+				void *interrupt_data)
+{
+	union IpaHwCpuCmdCompletedResponseData_t uc_rsp;
+	u8 feature;
+	int res;
+	int i;
+
+	WARN_ON(private_data != ipa3_ctx);
+	IPA_ACTIVE_CLIENTS_INC_SIMPLE();
+	IPADBG("uC rsp opcode=%u\n",
+			ipa3_ctx->uc_ctx.uc_sram_mmio->responseOp);
+
+	feature = EXTRACT_UC_FEATURE(ipa3_ctx->uc_ctx.uc_sram_mmio->responseOp);
+
+	if (feature >= IPA_HW_FEATURE_MAX) {
+		IPAERR("Invalid feature %u for event %u\n",
+			feature, ipa3_ctx->uc_ctx.uc_sram_mmio->eventOp);
+		IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
+		return;
+	}
+
+	/* Feature specific handling */
+	if (ipa3_uc_hdlrs[feature].ipa3_uc_response_hdlr) {
+		res = ipa3_uc_hdlrs[feature].ipa3_uc_response_hdlr(
+			ipa3_ctx->uc_ctx.uc_sram_mmio,
+			&ipa3_ctx->uc_ctx.uc_status);
+		if (res == 0) {
+			IPADBG("feature %d specific response handler\n",
+				feature);
+			complete_all(&ipa3_ctx->uc_ctx.uc_completion);
+			IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
+			return;
+		}
+	}
+
+	/* General handling */
+	if (ipa3_ctx->uc_ctx.uc_sram_mmio->responseOp ==
+			IPA_HW_2_CPU_RESPONSE_INIT_COMPLETED) {
+
+		if (ipa3_ctx->uc_ctx.uc_loaded) {
+			IPADBG("uC resp op INIT_COMPLETED is unexpected\n");
+			IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
+			return;
+		}
+
+		mutex_lock(&uc_loaded_nb_lock);
+
+		ipa3_ctx->uc_ctx.uc_loaded = true;
+
+		(void) blocking_notifier_call_chain(&uc_loaded_notifier, true,
+			ipa3_ctx);
+
+		mutex_unlock(&uc_loaded_nb_lock);
+
+		IPADBG("IPA uC loaded\n");
+		/*
+		 * The proxy vote is held until uC is loaded to ensure that
+		 * IPA_HW_2_CPU_RESPONSE_INIT_COMPLETED is received.
+		 */
+		ipa3_proxy_clk_unvote();
+
+		/*
+		 * To enable ipa power collapse we need to enable rpmh and uc
+		 * handshake So that uc can do register retention. To enable
+		 * this handshake we need to send the below message to rpmh.
+		 */
+		ipa_pc_qmp_enable();
+
+		for (i = 0; i < IPA_HW_NUM_FEATURES; i++) {
+			if (ipa3_uc_hdlrs[i].ipa_uc_loaded_hdlr)
+				ipa3_uc_hdlrs[i].ipa_uc_loaded_hdlr();
+		}
+	} else if (ipa3_ctx->uc_ctx.uc_sram_mmio->responseOp ==
+		   IPA_HW_2_CPU_RESPONSE_CMD_COMPLETED) {
+		uc_rsp.raw32b = ipa3_ctx->uc_ctx.uc_sram_mmio->responseParams;
+		IPADBG("uC cmd response opcode=%u status=%u\n",
+		       uc_rsp.params.originalCmdOp,
+		       uc_rsp.params.status);
+		if (uc_rsp.params.originalCmdOp ==
+		    ipa3_ctx->uc_ctx.pending_cmd) {
+			ipa3_ctx->uc_ctx.uc_status = uc_rsp.params.status;
+			if (uc_rsp.params.originalCmdOp ==
+				IPA_CPU_2_HW_CMD_OFFLOAD_STATS_ALLOC)
+				ipa3_uc_save_dbg_stats(
+					uc_rsp.params.responseData);
+			complete_all(&ipa3_ctx->uc_ctx.uc_completion);
+		} else {
+			IPAERR("Expected cmd=%u rcvd cmd=%u\n",
+			       ipa3_ctx->uc_ctx.pending_cmd,
+			       uc_rsp.params.originalCmdOp);
+		}
+	} else {
+		IPAERR("Unsupported uC rsp opcode = %u\n",
+		       ipa3_ctx->uc_ctx.uc_sram_mmio->responseOp);
+	}
+	IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
+}
+
+static void ipa3_uc_wigig_misc_int_handler(enum ipa_irq_type interrupt,
+	void *private_data,
+	void *interrupt_data)
+{
+	IPADBG("\n");
+
+	WARN_ON(private_data != ipa3_ctx);
+
+	if (ipa3_ctx->uc_wigig_ctx.misc_notify_cb)
+		ipa3_ctx->uc_wigig_ctx.misc_notify_cb(
+			ipa3_ctx->uc_wigig_ctx.priv);
+
+	IPADBG("exit\n");
+}
+
+static int ipa3_uc_send_cmd_64b_param(u32 cmd_lo, u32 cmd_hi, u32 opcode,
+	u32 expected_status, bool polling_mode, unsigned long timeout_jiffies)
+{
+	int index;
+	union IpaHwCpuCmdCompletedResponseData_t uc_rsp;
+	int retries = 0;
+	u32 uc_error_type;
+
+send_cmd_lock:
+	mutex_lock(&ipa3_ctx->uc_ctx.uc_lock);
+
+	if (ipa3_uc_state_check()) {
+		IPADBG("uC send command aborted\n");
+		mutex_unlock(&ipa3_ctx->uc_ctx.uc_lock);
+		return -EBADF;
+	}
+send_cmd:
+	init_completion(&ipa3_ctx->uc_ctx.uc_completion);
+
+	ipa3_ctx->uc_ctx.uc_sram_mmio->cmdParams = cmd_lo;
+	ipa3_ctx->uc_ctx.uc_sram_mmio->cmdParams_hi = cmd_hi;
+	ipa3_ctx->uc_ctx.uc_sram_mmio->cmdOp = opcode;
+	ipa3_ctx->uc_ctx.pending_cmd = opcode;
+	ipa3_ctx->uc_ctx.uc_sram_mmio->responseOp = 0;
+	ipa3_ctx->uc_ctx.uc_sram_mmio->responseParams = 0;
+
+	ipa3_ctx->uc_ctx.uc_status = 0;
+
+	/* ensure write to shared memory is done before triggering uc */
+	wmb();
+
+	ipahal_write_reg_n(IPA_IRQ_EE_UC_n, 0, 0x1);
+
+	if (polling_mode) {
+		struct IpaHwSharedMemCommonMapping_t *uc_sram_ptr =
+			ipa3_ctx->uc_ctx.uc_sram_mmio;
+		for (index = 0; index < IPA_UC_POLL_MAX_RETRY; index++) {
+			if (uc_sram_ptr->responseOp ==
+			    IPA_HW_2_CPU_RESPONSE_CMD_COMPLETED) {
+				uc_rsp.raw32b = uc_sram_ptr->responseParams;
+				if (uc_rsp.params.originalCmdOp ==
+					ipa3_ctx->uc_ctx.pending_cmd) {
+					ipa3_ctx->uc_ctx.uc_status =
+						uc_rsp.params.status;
+					break;
+				}
+			}
+			usleep_range(IPA_UC_POLL_SLEEP_USEC,
+				IPA_UC_POLL_SLEEP_USEC);
+		}
+
+		if (index == IPA_UC_POLL_MAX_RETRY) {
+			IPAERR("uC max polling retries reached\n");
+			if (ipa3_ctx->uc_ctx.uc_failed) {
+				uc_error_type = ipa3_ctx->uc_ctx.uc_error_type;
+				IPAERR("uC reported on Error, errorType = %s\n",
+					ipa_hw_error_str(uc_error_type));
+			}
+			mutex_unlock(&ipa3_ctx->uc_ctx.uc_lock);
+			/* Unexpected UC hardware state */
+			ipa_assert();
+		}
+	} else {
+		if (wait_for_completion_timeout(&ipa3_ctx->uc_ctx.uc_completion,
+			timeout_jiffies) == 0) {
+			IPAERR("uC timed out\n");
+			if (ipa3_ctx->uc_ctx.uc_failed) {
+				uc_error_type = ipa3_ctx->uc_ctx.uc_error_type;
+				IPAERR("uC reported on Error, errorType = %s\n",
+					ipa_hw_error_str(uc_error_type));
+			}
+			mutex_unlock(&ipa3_ctx->uc_ctx.uc_lock);
+			/* Unexpected UC hardware state */
+			ipa_assert();
+		}
+	}
+
+	if (ipa3_ctx->uc_ctx.uc_status != expected_status) {
+		if (ipa3_ctx->uc_ctx.uc_status ==
+		    IPA_HW_PROD_DISABLE_CMD_GSI_STOP_FAILURE ||
+		    ipa3_ctx->uc_ctx.uc_status ==
+		    IPA_HW_CONS_DISABLE_CMD_GSI_STOP_FAILURE ||
+		    ipa3_ctx->uc_ctx.uc_status ==
+		    IPA_HW_CONS_STOP_FAILURE ||
+		    ipa3_ctx->uc_ctx.uc_status ==
+		    IPA_HW_PROD_STOP_FAILURE) {
+			retries++;
+			if (retries == IPA_GSI_CHANNEL_STOP_MAX_RETRY) {
+				IPAERR("Failed after %d tries\n", retries);
+				mutex_unlock(&ipa3_ctx->uc_ctx.uc_lock);
+				/* Unexpected UC hardware state */
+				ipa_assert();
+			}
+			mutex_unlock(&ipa3_ctx->uc_ctx.uc_lock);
+			if (ipa3_ctx->uc_ctx.uc_status ==
+			    IPA_HW_PROD_DISABLE_CMD_GSI_STOP_FAILURE)
+				ipa3_inject_dma_task_for_gsi();
+			/* sleep for short period to flush IPA */
+			usleep_range(IPA_GSI_CHANNEL_STOP_SLEEP_MIN_USEC,
+				IPA_GSI_CHANNEL_STOP_SLEEP_MAX_USEC);
+			goto send_cmd_lock;
+		}
+
+		if (ipa3_ctx->uc_ctx.uc_status ==
+			IPA_HW_GSI_CH_NOT_EMPTY_FAILURE) {
+			retries++;
+			if (retries >= IPA_GSI_CHANNEL_EMPTY_MAX_RETRY) {
+				IPAERR("Failed after %d tries\n", retries);
+				mutex_unlock(&ipa3_ctx->uc_ctx.uc_lock);
+				return -EFAULT;
+			}
+			usleep_range(
+			IPA_GSI_CHANNEL_EMPTY_SLEEP_MIN_USEC,
+			IPA_GSI_CHANNEL_EMPTY_SLEEP_MAX_USEC);
+			goto send_cmd;
+		}
+
+		IPAERR("Received status %u, Expected status %u\n",
+			ipa3_ctx->uc_ctx.uc_status, expected_status);
+		mutex_unlock(&ipa3_ctx->uc_ctx.uc_lock);
+		return -EFAULT;
+	}
+
+	mutex_unlock(&ipa3_ctx->uc_ctx.uc_lock);
+
+	IPADBG("uC cmd %u send succeeded\n", opcode);
+
+	return 0;
+}
+
+/**
+ * ipa3_uc_interface_init() - Initialize the interface with the uC
+ *
+ * Return value: 0 on success, negative value otherwise
+ */
+int ipa3_uc_interface_init(void)
+{
+	int result;
+	unsigned long phys_addr;
+
+	if (ipa3_ctx->uc_ctx.uc_inited) {
+		IPADBG("uC interface already initialized\n");
+		return 0;
+	}
+
+	mutex_init(&ipa3_ctx->uc_ctx.uc_lock);
+	spin_lock_init(&ipa3_ctx->uc_ctx.uc_spinlock);
+
+	phys_addr = ipa3_ctx->ipa_wrapper_base +
+		ipa3_ctx->ctrl->ipa_reg_base_ofst +
+		ipahal_get_reg_n_ofst(IPA_SW_AREA_RAM_DIRECT_ACCESS_n, 0);
+	ipa3_ctx->uc_ctx.uc_sram_mmio = ioremap(phys_addr,
+					       IPA_RAM_UC_SMEM_SIZE);
+	if (!ipa3_ctx->uc_ctx.uc_sram_mmio) {
+		IPAERR("Fail to ioremap IPA uC SRAM\n");
+		result = -ENOMEM;
+		goto remap_fail;
+	}
+
+	result = ipa3_add_interrupt_handler(IPA_UC_IRQ_0,
+		ipa3_uc_event_handler, true,
+		ipa3_ctx);
+	if (result) {
+		IPAERR("Fail to register for UC_IRQ0 event interrupt\n");
+		result = -EFAULT;
+		goto irq_fail0;
+	}
+
+	result = ipa3_add_interrupt_handler(IPA_UC_IRQ_1,
+		ipa3_uc_response_hdlr, true,
+		ipa3_ctx);
+	if (result) {
+		IPAERR("fail to register for UC_IRQ1 rsp interrupt\n");
+		result = -EFAULT;
+		goto irq_fail1;
+	}
+
+	result = ipa3_add_interrupt_handler(IPA_UC_IRQ_2,
+		ipa3_uc_wigig_misc_int_handler, true,
+		ipa3_ctx);
+	if (result) {
+		IPAERR("fail to register for UC_IRQ2 wigig misc interrupt\n");
+		result = -EFAULT;
+		goto irq_fail2;
+	}
+
+	ipa3_ctx->uc_ctx.uc_inited = true;
+
+	IPADBG("IPA uC interface is initialized\n");
+	return 0;
+irq_fail2:
+	ipa3_remove_interrupt_handler(IPA_UC_IRQ_1);
+irq_fail1:
+	ipa3_remove_interrupt_handler(IPA_UC_IRQ_0);
+irq_fail0:
+	iounmap(ipa3_ctx->uc_ctx.uc_sram_mmio);
+remap_fail:
+	return result;
+}
+
+/**
+ * ipa3_uc_send_cmd() - Send a command to the uC
+ *
+ * Note1: This function sends command with 32bit parameter and do not
+ *	use the higher 32bit of the command parameter (set to zero).
+ *
+ * Note2: In case the operation times out (No response from the uC) or
+ *       polling maximal amount of retries has reached, the logic
+ *       considers it as an invalid state of the uC/IPA, and
+ *       issues a kernel panic.
+ *
+ * Returns: 0 on success.
+ *          -EINVAL in case of invalid input.
+ *          -EBADF in case uC interface is not initialized /
+ *                 or the uC has failed previously.
+ *          -EFAULT in case the received status doesn't match
+ *                  the expected.
+ */
+int ipa3_uc_send_cmd(u32 cmd, u32 opcode, u32 expected_status,
+		    bool polling_mode, unsigned long timeout_jiffies)
+{
+	return ipa3_uc_send_cmd_64b_param(cmd, 0, opcode,
+		expected_status, polling_mode, timeout_jiffies);
+}
+
+/**
+ * ipa3_uc_register_handlers() - Registers event, response and log event
+ *                              handlers for a specific feature.Please note
+ *                              that currently only one handler can be
+ *                              registered per feature.
+ *
+ * Return value: None
+ */
+void ipa3_uc_register_handlers(enum ipa3_hw_features feature,
+			      struct ipa3_uc_hdlrs *hdlrs)
+{
+	if (0 > feature || IPA_HW_FEATURE_MAX <= feature) {
+		IPAERR("Feature %u is invalid, not registering hdlrs\n",
+		       feature);
+		return;
+	}
+
+	mutex_lock(&ipa3_ctx->uc_ctx.uc_lock);
+	ipa3_uc_hdlrs[feature] = *hdlrs;
+	mutex_unlock(&ipa3_ctx->uc_ctx.uc_lock);
+
+	IPADBG("uC handlers registered for feature %u\n", feature);
+}
+
+int ipa3_uc_is_gsi_channel_empty(enum ipa_client_type ipa_client)
+{
+	const struct ipa_gsi_ep_config *gsi_ep_info;
+	union IpaHwChkChEmptyCmdData_t cmd;
+	int ret;
+
+	gsi_ep_info = ipa3_get_gsi_ep_info(ipa_client);
+	if (!gsi_ep_info) {
+		IPAERR("Failed getting GSI EP info for client=%d\n",
+		       ipa_client);
+		return 0;
+	}
+
+	if (ipa3_uc_state_check()) {
+		IPADBG("uC cannot be used to validate ch emptiness clnt=%d\n"
+			, ipa_client);
+		return 0;
+	}
+
+	cmd.params.ee_n = gsi_ep_info->ee;
+	cmd.params.vir_ch_id = gsi_ep_info->ipa_gsi_chan_num;
+
+	IPADBG("uC emptiness check for IPA GSI Channel %d\n",
+	       gsi_ep_info->ipa_gsi_chan_num);
+
+	ret = ipa3_uc_send_cmd(cmd.raw32b, IPA_CPU_2_HW_CMD_GSI_CH_EMPTY, 0,
+			      false, 10*HZ);
+
+	return ret;
+}
+
+
+/**
+ * ipa3_uc_notify_clk_state() - notify to uC of clock enable / disable
+ * @enabled: true if clock are enabled
+ *
+ * The function uses the uC interface in order to notify uC before IPA clocks
+ * are disabled to make sure uC is not in the middle of operation.
+ * Also after clocks are enabled ned to notify uC to start processing.
+ *
+ * Returns: 0 on success, negative on failure
+ */
+int ipa3_uc_notify_clk_state(bool enabled)
+{
+	u32 opcode;
+
+	if (ipa3_ctx->ipa_hw_type > IPA_HW_v4_0) {
+		IPADBG_LOW("not supported past IPA v4.0\n");
+		return 0;
+	}
+
+	/*
+	 * If the uC interface has not been initialized yet,
+	 * don't notify the uC on the enable/disable
+	 */
+	if (ipa3_uc_state_check()) {
+		IPADBG("uC interface will not notify the UC on clock state\n");
+		return 0;
+	}
+
+	IPADBG("uC clock %s notification\n", (enabled) ? "UNGATE" : "GATE");
+
+	opcode = (enabled) ? IPA_CPU_2_HW_CMD_CLK_UNGATE :
+			     IPA_CPU_2_HW_CMD_CLK_GATE;
+
+	return ipa3_uc_send_cmd(0, opcode, 0, true, 0);
+}
+
+/**
+ * ipa3_uc_update_hw_flags() - send uC the HW flags to be used
+ * @flags: This field is expected to be used as bitmask for enum ipa3_hw_flags
+ *
+ * Returns: 0 on success, negative on failure
+ */
+int ipa3_uc_update_hw_flags(u32 flags)
+{
+	union IpaHwUpdateFlagsCmdData_t cmd;
+
+	memset(&cmd, 0, sizeof(cmd));
+	cmd.params.newFlags = flags;
+	return ipa3_uc_send_cmd(cmd.raw32b, IPA_CPU_2_HW_CMD_UPDATE_FLAGS, 0,
+		false, HZ);
+}
+
+/**
+ * ipa3_uc_memcpy() - Perform a memcpy action using IPA uC
+ * @dest: physical address to store the copied data.
+ * @src: physical address of the source data to copy.
+ * @len: number of bytes to copy.
+ *
+ * Returns: 0 on success, negative on failure
+ */
+int ipa3_uc_memcpy(phys_addr_t dest, phys_addr_t src, int len)
+{
+	int res;
+	struct ipa_mem_buffer mem;
+	struct IpaHwMemCopyData_t *cmd;
+
+	IPADBG("dest 0x%pa src 0x%pa len %d\n", &dest, &src, len);
+	mem.size = sizeof(cmd);
+	mem.base = dma_alloc_coherent(ipa3_ctx->pdev, mem.size, &mem.phys_base,
+		GFP_KERNEL);
+	if (!mem.base) {
+		IPAERR("fail to alloc DMA buff of size %d\n", mem.size);
+		return -ENOMEM;
+	}
+	cmd = (struct IpaHwMemCopyData_t *)mem.base;
+	memset(cmd, 0, sizeof(*cmd));
+	cmd->destination_addr = dest;
+	cmd->dest_buffer_size = len;
+	cmd->source_addr = src;
+	cmd->source_buffer_size = len;
+	res = ipa3_uc_send_cmd((u32)mem.phys_base, IPA_CPU_2_HW_CMD_MEMCPY, 0,
+		true, 10 * HZ);
+	if (res) {
+		IPAERR("ipa3_uc_send_cmd failed %d\n", res);
+		goto free_coherent;
+	}
+
+	res = 0;
+free_coherent:
+	dma_free_coherent(ipa3_ctx->pdev, mem.size, mem.base, mem.phys_base);
+	return res;
+}
+
+int ipa3_uc_send_remote_ipa_info(u32 remote_addr, uint32_t mbox_n)
+{
+	int res;
+	struct ipa_mem_buffer cmd;
+	struct IpaHwDbAddrInfo_t *uc_info;
+
+	cmd.size = sizeof(*uc_info);
+	cmd.base = dma_alloc_coherent(ipa3_ctx->uc_pdev, cmd.size,
+		&cmd.phys_base, GFP_KERNEL);
+	if (cmd.base == NULL)
+		return -ENOMEM;
+
+	uc_info = (struct IpaHwDbAddrInfo_t *) cmd.base;
+	uc_info->remoteIPAAddr = remote_addr;
+	uc_info->mboxN = mbox_n;
+
+	res = ipa3_uc_send_cmd((u32)(cmd.phys_base),
+		IPA_CPU_2_HW_CMD_REMOTE_IPA_INFO, 0,
+		false, 10 * HZ);
+
+	if (res) {
+		IPAERR("fail to map 0x%x to mbox %d\n",
+			uc_info->remoteIPAAddr,
+			uc_info->mboxN);
+		goto free_coherent;
+	}
+
+	res = 0;
+free_coherent:
+	dma_free_coherent(ipa3_ctx->uc_pdev, cmd.size, cmd.base, cmd.phys_base);
+	return res;
+}
+
+int ipa3_uc_debug_stats_alloc(
+	struct IpaHwOffloadStatsAllocCmdData_t cmdinfo)
+{
+	int result;
+	struct ipa_mem_buffer cmd;
+	enum ipa_cpu_2_hw_offload_commands command;
+	struct IpaHwOffloadStatsAllocCmdData_t *cmd_data;
+
+	cmd.size = sizeof(*cmd_data);
+	cmd.base = dma_alloc_coherent(ipa3_ctx->uc_pdev, cmd.size,
+		&cmd.phys_base, GFP_KERNEL);
+	if (cmd.base == NULL) {
+		result = -ENOMEM;
+		return result;
+	}
+	cmd_data = (struct IpaHwOffloadStatsAllocCmdData_t *)cmd.base;
+	memcpy(cmd_data, &cmdinfo,
+		sizeof(struct IpaHwOffloadStatsAllocCmdData_t));
+	command = IPA_CPU_2_HW_CMD_OFFLOAD_STATS_ALLOC;
+
+	IPA_ACTIVE_CLIENTS_INC_SIMPLE();
+
+	result = ipa3_uc_send_cmd((u32)(cmd.phys_base),
+		command,
+		IPA_HW_2_CPU_OFFLOAD_CMD_STATUS_SUCCESS,
+		false, 10 * HZ);
+	if (result) {
+		IPAERR("fail to alloc offload stats\n");
+		goto cleanup;
+	}
+	result = 0;
+cleanup:
+	dma_free_coherent(ipa3_ctx->uc_pdev,
+		cmd.size,
+		cmd.base, cmd.phys_base);
+	IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
+	IPADBG("exit\n");
+	return result;
+}
+
+int ipa3_uc_debug_stats_dealloc(uint32_t prot_id)
+{
+	int result;
+	struct ipa_mem_buffer cmd;
+	enum ipa_cpu_2_hw_offload_commands command;
+	struct IpaHwOffloadStatsDeAllocCmdData_t *cmd_data;
+
+	cmd.size = sizeof(*cmd_data);
+	cmd.base = dma_alloc_coherent(ipa3_ctx->uc_pdev, cmd.size,
+		&cmd.phys_base, GFP_KERNEL);
+	if (cmd.base == NULL) {
+		result = -ENOMEM;
+		return result;
+	}
+	cmd_data = (struct IpaHwOffloadStatsDeAllocCmdData_t *)
+		cmd.base;
+	cmd_data->protocol = prot_id;
+	command = IPA_CPU_2_HW_CMD_OFFLOAD_STATS_DEALLOC;
+
+	IPA_ACTIVE_CLIENTS_INC_SIMPLE();
+
+	result = ipa3_uc_send_cmd((u32)(cmd.phys_base),
+		command,
+		IPA_HW_2_CPU_OFFLOAD_CMD_STATUS_SUCCESS,
+		false, 10 * HZ);
+	if (result) {
+		IPAERR("fail to dealloc offload stats\n");
+		goto cleanup;
+	}
+	switch (prot_id) {
+	case IPA_HW_PROTOCOL_AQC:
+		iounmap(ipa3_ctx->aqc_ctx.dbg_stats.uc_dbg_stats_mmio);
+		ipa3_ctx->aqc_ctx.dbg_stats.uc_dbg_stats_mmio = NULL;
+		break;
+	case IPA_HW_PROTOCOL_11ad:
+		break;
+	case IPA_HW_PROTOCOL_WDI:
+		iounmap(ipa3_ctx->wdi2_ctx.dbg_stats.uc_dbg_stats_mmio);
+		ipa3_ctx->wdi2_ctx.dbg_stats.uc_dbg_stats_mmio = NULL;
+		break;
+	case IPA_HW_PROTOCOL_WDI3:
+		iounmap(ipa3_ctx->wdi3_ctx.dbg_stats.uc_dbg_stats_mmio);
+		ipa3_ctx->wdi3_ctx.dbg_stats.uc_dbg_stats_mmio = NULL;
+		break;
+	case IPA_HW_PROTOCOL_ETH:
+		break;
+	default:
+		IPAERR("unknown protocols %d\n", prot_id);
+	}
+	result = 0;
+cleanup:
+	dma_free_coherent(ipa3_ctx->uc_pdev, cmd.size,
+		cmd.base, cmd.phys_base);
+	IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
+	IPADBG("exit\n");
+	return result;
+}
+
+int ipa3_uc_setup_event_ring(void)
+{
+	int res = 0;
+	struct ipa_mem_buffer cmd, *ring;
+	union IpaSetupEventRingCmdData_t *ring_info;
+
+	ring = &ipa3_ctx->uc_ctx.event_ring;
+	/* Allocate event ring */
+	ring->size = sizeof(struct eventElement_t) * IPA_UC_EVENT_RING_SIZE;
+	ring->base = dma_alloc_coherent(ipa3_ctx->uc_pdev, ring->size,
+		&ring->phys_base, GFP_KERNEL);
+	if (ring->base == NULL)
+		return -ENOMEM;
+
+	cmd.size = sizeof(*ring_info);
+	cmd.base = dma_alloc_coherent(ipa3_ctx->uc_pdev, cmd.size,
+		&cmd.phys_base, GFP_KERNEL);
+	if (cmd.base == NULL) {
+		dma_free_coherent(ipa3_ctx->uc_pdev, ring->size,
+			ring->base, ring->phys_base);
+		return -ENOMEM;
+	}
+
+	ring_info = (union IpaSetupEventRingCmdData_t *) cmd.base;
+	ring_info->event.ring_base_pa = (u32) (ring->phys_base & 0xFFFFFFFF);
+	ring_info->event.ring_base_pa_hi =
+		(u32) ((ring->phys_base & 0xFFFFFFFF00000000) >> 32);
+	ring_info->event.ring_size = IPA_UC_EVENT_RING_SIZE;
+
+	IPA_ACTIVE_CLIENTS_INC_SIMPLE();
+
+	res = ipa3_uc_send_cmd((u32)(cmd.phys_base),
+		IPA_CPU_2_HW_CMD_SETUP_EVENT_RING, 0,
+		false, 10 * HZ);
+
+	if (res) {
+		IPAERR(" faile to setup event ring 0x%x 0x%x, size %d\n",
+			ring_info->event.ring_base_pa,
+			ring_info->event.ring_base_pa_hi,
+			ring_info->event.ring_size);
+		goto free_cmd;
+	}
+
+	ipa3_ctx->uc_ctx.uc_event_ring_valid = true;
+	/* write wp/rp values */
+	ipa3_ctx->uc_ctx.ering_rp_local = 0;
+	ipa3_ctx->uc_ctx.ering_wp_local =
+		ring->size - sizeof(struct eventElement_t);
+	ipahal_write_reg_mn(IPA_UC_MAILBOX_m_n,
+		IPA_UC_ERING_m, IPA_UC_ERING_n_r, 0);
+	ipahal_write_reg_mn(IPA_UC_MAILBOX_m_n,
+		IPA_UC_ERING_m, IPA_UC_ERING_n_w,
+			ipa3_ctx->uc_ctx.ering_wp_local);
+	ipa3_ctx->uc_ctx.ering_wp =
+		ipa3_ctx->uc_ctx.ering_wp_local;
+	ipa3_ctx->uc_ctx.ering_rp = 0;
+
+free_cmd:
+	dma_free_coherent(ipa3_ctx->uc_pdev,
+		cmd.size, cmd.base, cmd.phys_base);
+	IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
+	return res;
+}
+
+int ipa3_uc_quota_monitor(uint64_t quota)
+{
+	int ind, res = 0;
+	struct ipa_mem_buffer cmd;
+	struct IpaQuotaMonitoring_t *quota_info;
+
+	cmd.size = sizeof(*quota_info);
+	cmd.base = dma_alloc_coherent(ipa3_ctx->uc_pdev, cmd.size,
+		&cmd.phys_base, GFP_KERNEL);
+	if (cmd.base == NULL)
+		return -ENOMEM;
+
+	quota_info = (struct IpaQuotaMonitoring_t *)cmd.base;
+	quota_info->protocol = IPA_HW_PROTOCOL_WDI3;
+	quota_info->params.WdiQM.Quota = quota;
+	quota_info->params.WdiQM.info.Num = 4;
+	ind = ipa3_ctx->fnr_info.hw_counter_offset +
+		UL_HW - 1;
+	quota_info->params.WdiQM.info.Offset[0] =
+		IPA_MEM_PART(stats_fnr_ofst) +
+		sizeof(struct ipa_flt_rt_stats) * ind + 8;
+	ind = ipa3_ctx->fnr_info.hw_counter_offset +
+		DL_ALL - 1;
+	quota_info->params.WdiQM.info.Offset[1] =
+		IPA_MEM_PART(stats_fnr_ofst) +
+		sizeof(struct ipa_flt_rt_stats) * ind + 8;
+	ind = ipa3_ctx->fnr_info.sw_counter_offset +
+		UL_HW_CACHE - 1;
+	quota_info->params.WdiQM.info.Offset[2] =
+		IPA_MEM_PART(stats_fnr_ofst) +
+		sizeof(struct ipa_flt_rt_stats) * ind + 8;
+	ind = ipa3_ctx->fnr_info.sw_counter_offset +
+		UL_WLAN_TX - 1;
+	quota_info->params.WdiQM.info.Offset[3] =
+		IPA_MEM_PART(stats_fnr_ofst) +
+		sizeof(struct ipa_flt_rt_stats) * ind + 8;
+	quota_info->params.WdiQM.info.Interval =
+		IPA_UC_MON_INTERVAL;
+
+	IPA_ACTIVE_CLIENTS_INC_SIMPLE();
+	res = ipa3_uc_send_cmd((u32)(cmd.phys_base),
+		IPA_CPU_2_HW_CMD_QUOTA_MONITORING,
+		IPA_HW_2_CPU_OFFLOAD_CMD_STATUS_SUCCESS,
+		false, 10 * HZ);
+
+	if (res) {
+		IPAERR(" faile to set quota %d, number offset %d\n",
+			quota_info->params.WdiQM.Quota,
+			quota_info->params.WdiQM.info.Num);
+		goto free_cmd;
+	}
+
+	IPADBG(" offest1 %d offest2 %d offest3 %d offest4 %d\n",
+			quota_info->params.WdiQM.info.Offset[0],
+			quota_info->params.WdiQM.info.Offset[1],
+			quota_info->params.WdiQM.info.Offset[2],
+			quota_info->params.WdiQM.info.Offset[3]);
+
+free_cmd:
+	dma_free_coherent(ipa3_ctx->uc_pdev, cmd.size, cmd.base, cmd.phys_base);
+	IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
+
+	return res;
+}
+
+int ipa3_uc_bw_monitor(struct ipa_wdi_bw_info *info)
+{
+	int i, ind, res = 0;
+	struct ipa_mem_buffer cmd;
+	struct IpaBwMonitoring_t *bw_info;
+
+	if (!info)
+		return -EINVAL;
+
+	/* check max entry */
+	if (info->num > BW_MONITORING_MAX_THRESHOLD) {
+		IPAERR("%d, support max %d bw monitor\n", info->num,
+		BW_MONITORING_MAX_THRESHOLD);
+		return -EINVAL;
+	}
+
+	cmd.size = sizeof(*bw_info);
+	cmd.base = dma_alloc_coherent(ipa3_ctx->uc_pdev, cmd.size,
+		&cmd.phys_base, GFP_KERNEL);
+	if (cmd.base == NULL)
+		return -ENOMEM;
+
+	bw_info = (struct IpaBwMonitoring_t *)cmd.base;
+	bw_info->protocol = IPA_HW_PROTOCOL_WDI3;
+	bw_info->params.WdiBw.NumThresh = info->num;
+	bw_info->params.WdiBw.Stop = info->stop;
+	IPADBG("stop bw-monitor? %d\n", bw_info->params.WdiBw.Stop);
+
+	for (i = 0; i < info->num; i++) {
+		bw_info->params.WdiBw.BwThreshold[i] = info->threshold[i];
+		IPADBG("%d-st, %lu\n", i, bw_info->params.WdiBw.BwThreshold[i]);
+	}
+
+	bw_info->params.WdiBw.info.Num = 8;
+	ind = ipa3_ctx->fnr_info.hw_counter_offset +
+		UL_HW - 1;
+	bw_info->params.WdiBw.info.Offset[0] =
+		IPA_MEM_PART(stats_fnr_ofst) +
+			sizeof(struct ipa_flt_rt_stats) * ind + 8;
+	ind = ipa3_ctx->fnr_info.hw_counter_offset +
+		DL_HW - 1;
+	bw_info->params.WdiBw.info.Offset[1] =
+		IPA_MEM_PART(stats_fnr_ofst) +
+			sizeof(struct ipa_flt_rt_stats) * ind + 8;
+	ind = ipa3_ctx->fnr_info.hw_counter_offset +
+		DL_ALL - 1;
+	bw_info->params.WdiBw.info.Offset[2] =
+		IPA_MEM_PART(stats_fnr_ofst) +
+			sizeof(struct ipa_flt_rt_stats) * ind + 8;
+	ind = ipa3_ctx->fnr_info.hw_counter_offset +
+		UL_ALL - 1;
+	bw_info->params.WdiBw.info.Offset[3] =
+		IPA_MEM_PART(stats_fnr_ofst) +
+			sizeof(struct ipa_flt_rt_stats) * ind + 8;
+	ind = ipa3_ctx->fnr_info.sw_counter_offset +
+		UL_HW_CACHE - 1;
+	bw_info->params.WdiBw.info.Offset[4] =
+		IPA_MEM_PART(stats_fnr_ofst) +
+			sizeof(struct ipa_flt_rt_stats) * ind + 8;
+	ind = ipa3_ctx->fnr_info.sw_counter_offset +
+		DL_HW_CACHE - 1;
+	bw_info->params.WdiBw.info.Offset[5] =
+		IPA_MEM_PART(stats_fnr_ofst) +
+			sizeof(struct ipa_flt_rt_stats) * ind + 8;
+	ind = ipa3_ctx->fnr_info.sw_counter_offset +
+		UL_WLAN_TX - 1;
+	bw_info->params.WdiBw.info.Offset[6] =
+		IPA_MEM_PART(stats_fnr_ofst) +
+			sizeof(struct ipa_flt_rt_stats) * ind + 8;
+	ind = ipa3_ctx->fnr_info.sw_counter_offset +
+		DL_WLAN_TX - 1;
+	bw_info->params.WdiBw.info.Offset[7] =
+		IPA_MEM_PART(stats_fnr_ofst) +
+			sizeof(struct ipa_flt_rt_stats) * ind + 8;
+	bw_info->params.WdiBw.info.Interval =
+		IPA_UC_MON_INTERVAL;
+
+	IPA_ACTIVE_CLIENTS_INC_SIMPLE();
+
+	res = ipa3_uc_send_cmd((u32)(cmd.phys_base),
+		IPA_CPU_2_HW_CMD_BW_MONITORING,
+			IPA_HW_2_CPU_OFFLOAD_CMD_STATUS_SUCCESS,
+			false, 10 * HZ);
+
+	if (res) {
+		IPAERR(" faile to set bw %d level with %d coutners\n",
+			bw_info->params.WdiBw.NumThresh,
+			bw_info->params.WdiBw.info.Num);
+		goto free_cmd;
+	}
+
+free_cmd:
+	dma_free_coherent(ipa3_ctx->uc_pdev, cmd.size, cmd.base, cmd.phys_base);
+	IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
+
+	return res;
+}
+
+int ipa3_set_wlan_tx_info(struct ipa_wdi_tx_info *info)
+{
+	struct ipa_flt_rt_stats stats;
+	struct ipacm_fnr_info fnr_info;
+
+	memset(&fnr_info, 0, sizeof(struct ipacm_fnr_info));
+	if (!ipa_get_fnr_info(&fnr_info)) {
+		IPAERR("FNR counter haven't configured\n");
+		return -EINVAL;
+	}
+
+	/* update sw counters */
+	memset(&stats, 0, sizeof(struct ipa_flt_rt_stats));
+	stats.num_bytes = info->sta_tx;
+	if (ipa_set_flt_rt_stats(fnr_info.sw_counter_offset +
+		UL_WLAN_TX, stats)) {
+		IPAERR("Failed to set stats to ul_wlan_tx %d\n",
+			fnr_info.sw_counter_offset + UL_WLAN_TX);
+		return -EINVAL;
+	}
+
+	stats.num_bytes = info->ap_tx;
+	if (ipa_set_flt_rt_stats(fnr_info.sw_counter_offset +
+		DL_WLAN_TX, stats)) {
+		IPAERR("Failed to set stats to dl_wlan_tx %d\n",
+			fnr_info.sw_counter_offset + DL_WLAN_TX);
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+int ipa3_uc_send_enable_flow_control(uint16_t gsi_chid,
+		uint16_t redMarkerThreshold)
+{
+
+	int res;
+	union IpaEnablePipeMonitorCmdData_t cmd;
+
+	cmd.params.ipaProdGsiChid = gsi_chid;
+	cmd.params.redMarkerThreshold = redMarkerThreshold;
+
+	IPA_ACTIVE_CLIENTS_INC_SIMPLE();
+	res = ipa3_uc_send_cmd((cmd.raw32b),
+		IPA_CPU_2_HW_CMD_ENABLE_FLOW_CTL_MONITOR, 0,
+		false, 10 * HZ);
+
+	if (res)
+		IPAERR("fail to enable flow ctrl for 0x%x\n",
+			cmd.params.ipaProdGsiChid);
+
+	IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
+	return res;
+}
+
+int ipa3_uc_send_disable_flow_control(void)
+{
+	int res;
+
+	IPA_ACTIVE_CLIENTS_INC_SIMPLE();
+	res = ipa3_uc_send_cmd(0,
+		IPA_CPU_2_HW_CMD_DISABLE_FLOW_CTL_MONITOR, 0,
+		false, 10 * HZ);
+
+	if (res)
+		IPAERR("fail to disable flow control\n");
+
+	IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
+	return res;
+}
+
+int ipa3_uc_send_update_flow_control(uint32_t bitmask,
+		 uint8_t  add_delete)
+{
+	int res;
+
+	if (bitmask == 0) {
+		IPAERR("Err update flow control, mask = 0\n");
+		return 0;
+	}
+
+	IPA_ACTIVE_CLIENTS_INC_SIMPLE();
+	res = ipa3_uc_send_cmd_64b_param(bitmask, add_delete,
+		IPA_CPU_2_HW_CMD_UPDATE_FLOW_CTL_MONITOR, 0,
+		false, 10 * HZ);
+
+	if (res)
+		IPAERR("fail flowCtrl update mask = 0x%x add_del = 0x%x\n",
+			bitmask, add_delete);
+
+	IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
+	return res;
+}

+ 957 - 0
ipa/ipa_v3/ipa_uc_mhi.c

@@ -0,0 +1,957 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2015-2019, The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/ipa.h>
+#include "ipa_i.h"
+
+/* MHI uC interface definitions */
+#define IPA_HW_INTERFACE_MHI_VERSION            0x0004
+
+#define IPA_HW_MAX_NUMBER_OF_CHANNELS	2
+#define IPA_HW_MAX_NUMBER_OF_EVENTRINGS	2
+#define IPA_HW_MAX_CHANNEL_HANDLE	(IPA_HW_MAX_NUMBER_OF_CHANNELS-1)
+
+/**
+ * Values that represent the MHI commands from CPU to IPA HW.
+ * @IPA_CPU_2_HW_CMD_MHI_INIT: Initialize HW to be ready for MHI processing.
+ *	Once operation was completed HW shall respond with
+ *	IPA_HW_2_CPU_RESPONSE_CMD_COMPLETED.
+ * @IPA_CPU_2_HW_CMD_MHI_INIT_CHANNEL: Initialize specific channel to be ready
+ *	to serve MHI transfers. Once initialization was completed HW shall
+ *	respond with IPA_HW_2_CPU_RESPONSE_MHI_CHANGE_CHANNEL_STATE.
+ *		IPA_HW_MHI_CHANNEL_STATE_ENABLE
+ * @IPA_CPU_2_HW_CMD_MHI_UPDATE_MSI: Update MHI MSI interrupts data.
+ *	Once operation was completed HW shall respond with
+ *	IPA_HW_2_CPU_RESPONSE_CMD_COMPLETED.
+ * @IPA_CPU_2_HW_CMD_MHI_CHANGE_CHANNEL_STATE: Change specific channel
+ *	processing state following host request. Once operation was completed
+ *	HW shall respond with IPA_HW_2_CPU_RESPONSE_MHI_CHANGE_CHANNEL_STATE.
+ * @IPA_CPU_2_HW_CMD_MHI_DL_UL_SYNC_INFO: Info related to DL UL syncronization.
+ * @IPA_CPU_2_HW_CMD_MHI_STOP_EVENT_UPDATE: Cmd to stop event ring processing.
+ */
+enum ipa_cpu_2_hw_mhi_commands {
+	IPA_CPU_2_HW_CMD_MHI_INIT
+		= FEATURE_ENUM_VAL(IPA_HW_FEATURE_MHI, 0),
+	IPA_CPU_2_HW_CMD_MHI_INIT_CHANNEL
+		= FEATURE_ENUM_VAL(IPA_HW_FEATURE_MHI, 1),
+	IPA_CPU_2_HW_CMD_MHI_UPDATE_MSI
+		= FEATURE_ENUM_VAL(IPA_HW_FEATURE_MHI, 2),
+	IPA_CPU_2_HW_CMD_MHI_CHANGE_CHANNEL_STATE
+		= FEATURE_ENUM_VAL(IPA_HW_FEATURE_MHI, 3),
+	IPA_CPU_2_HW_CMD_MHI_DL_UL_SYNC_INFO
+		= FEATURE_ENUM_VAL(IPA_HW_FEATURE_MHI, 4),
+	IPA_CPU_2_HW_CMD_MHI_STOP_EVENT_UPDATE
+		= FEATURE_ENUM_VAL(IPA_HW_FEATURE_MHI, 5)
+};
+
+/**
+ * Values that represent MHI related HW responses to CPU commands.
+ * @IPA_HW_2_CPU_RESPONSE_MHI_CHANGE_CHANNEL_STATE: Response to
+ *	IPA_CPU_2_HW_CMD_MHI_INIT_CHANNEL or
+ *	IPA_CPU_2_HW_CMD_MHI_CHANGE_CHANNEL_STATE commands.
+ */
+enum ipa_hw_2_cpu_mhi_responses {
+	IPA_HW_2_CPU_RESPONSE_MHI_CHANGE_CHANNEL_STATE
+		= FEATURE_ENUM_VAL(IPA_HW_FEATURE_MHI, 0),
+};
+
+/**
+ * Values that represent MHI related HW event to be sent to CPU.
+ * @IPA_HW_2_CPU_EVENT_MHI_CHANNEL_ERROR: Event specify the device detected an
+ *	error in an element from the transfer ring associated with the channel
+ * @IPA_HW_2_CPU_EVENT_MHI_CHANNEL_WAKE_UP_REQUEST: Event specify a transport
+ *	interrupt was asserted when MHI engine is suspended
+ */
+enum ipa_hw_2_cpu_mhi_events {
+	IPA_HW_2_CPU_EVENT_MHI_CHANNEL_ERROR
+		= FEATURE_ENUM_VAL(IPA_HW_FEATURE_MHI, 0),
+	IPA_HW_2_CPU_EVENT_MHI_CHANNEL_WAKE_UP_REQUEST
+		= FEATURE_ENUM_VAL(IPA_HW_FEATURE_MHI, 1),
+};
+
+/**
+ * Channel error types.
+ * @IPA_HW_CHANNEL_ERROR_NONE: No error persists.
+ * @IPA_HW_CHANNEL_INVALID_RE_ERROR: Invalid Ring Element was detected
+ */
+enum ipa_hw_channel_errors {
+	IPA_HW_CHANNEL_ERROR_NONE,
+	IPA_HW_CHANNEL_INVALID_RE_ERROR
+};
+
+/**
+ * MHI error types.
+ * @IPA_HW_INVALID_MMIO_ERROR: Invalid data read from MMIO space
+ * @IPA_HW_INVALID_CHANNEL_ERROR: Invalid data read from channel context array
+ * @IPA_HW_INVALID_EVENT_ERROR: Invalid data read from event ring context array
+ * @IPA_HW_NO_ED_IN_RING_ERROR: No event descriptors are available to report on
+ *	secondary event ring
+ * @IPA_HW_LINK_ERROR: Link error
+ */
+enum ipa_hw_mhi_errors {
+	IPA_HW_INVALID_MMIO_ERROR
+		= FEATURE_ENUM_VAL(IPA_HW_FEATURE_MHI, 0),
+	IPA_HW_INVALID_CHANNEL_ERROR
+		= FEATURE_ENUM_VAL(IPA_HW_FEATURE_MHI, 1),
+	IPA_HW_INVALID_EVENT_ERROR
+		= FEATURE_ENUM_VAL(IPA_HW_FEATURE_MHI, 2),
+	IPA_HW_NO_ED_IN_RING_ERROR
+		= FEATURE_ENUM_VAL(IPA_HW_FEATURE_MHI, 4),
+	IPA_HW_LINK_ERROR
+		= FEATURE_ENUM_VAL(IPA_HW_FEATURE_MHI, 5),
+};
+
+
+/**
+ * Structure referring to the common and MHI section of 128B shared memory
+ * located in offset zero of SW Partition in IPA SRAM.
+ * The shared memory is used for communication between IPA HW and CPU.
+ * @common: common section in IPA SRAM
+ * @interfaceVersionMhi: The MHI interface version as reported by HW
+ * @mhiState: Overall MHI state
+ * @reserved_2B: reserved
+ * @mhiCnl0State: State of MHI channel 0.
+ *	The state carries information regarding the error type.
+ *	See IPA_HW_MHI_CHANNEL_STATES.
+ * @mhiCnl0State: State of MHI channel 1.
+ * @mhiCnl0State: State of MHI channel 2.
+ * @mhiCnl0State: State of MHI channel 3
+ * @mhiCnl0State: State of MHI channel 4.
+ * @mhiCnl0State: State of MHI channel 5.
+ * @mhiCnl0State: State of MHI channel 6.
+ * @mhiCnl0State: State of MHI channel 7.
+ * @reserved_37_34: reserved
+ * @reserved_3B_38: reserved
+ * @reserved_3F_3C: reserved
+ */
+struct IpaHwSharedMemMhiMapping_t {
+	struct IpaHwSharedMemCommonMapping_t common;
+	u16 interfaceVersionMhi;
+	u8 mhiState;
+	u8 reserved_2B;
+	u8 mhiCnl0State;
+	u8 mhiCnl1State;
+	u8 mhiCnl2State;
+	u8 mhiCnl3State;
+	u8 mhiCnl4State;
+	u8 mhiCnl5State;
+	u8 mhiCnl6State;
+	u8 mhiCnl7State;
+	u32 reserved_37_34;
+	u32 reserved_3B_38;
+	u32 reserved_3F_3C;
+};
+
+
+/**
+ * Structure holding the parameters for IPA_CPU_2_HW_CMD_MHI_INIT command.
+ * Parameters are sent as pointer thus should be reside in address accessible
+ * to HW.
+ * @msiAddress: The MSI base (in device space) used for asserting the interrupt
+ *	(MSI) associated with the event ring
+ * mmioBaseAddress: The address (in device space) of MMIO structure in
+ *	host space
+ * deviceMhiCtrlBaseAddress: Base address of the memory region in the device
+ *	address space where the MHI control data structures are allocated by
+ *	the host, including channel context array, event context array,
+ *	and rings. This value is used for host/device address translation.
+ * deviceMhiDataBaseAddress: Base address of the memory region in the device
+ *	address space where the MHI data buffers are allocated by the host.
+ *	This value is used for host/device address translation.
+ * firstChannelIndex: First channel ID. Doorbell 0 is mapped to this channel
+ * firstEventRingIndex: First event ring ID. Doorbell 16 is mapped to this
+ *	event ring.
+ */
+struct IpaHwMhiInitCmdData_t {
+	u32 msiAddress;
+	u32 mmioBaseAddress;
+	u32 deviceMhiCtrlBaseAddress;
+	u32 deviceMhiDataBaseAddress;
+	u32 firstChannelIndex;
+	u32 firstEventRingIndex;
+};
+
+/**
+ * Structure holding the parameters for IPA_CPU_2_HW_CMD_MHI_INIT_CHANNEL
+ *	command. Parameters are sent as 32b immediate parameters.
+ * @hannelHandle: The channel identifier as allocated by driver.
+ *	value is within the range 0 to IPA_HW_MAX_CHANNEL_HANDLE
+ * @contexArrayIndex: Unique index for channels, between 0 and 255. The index is
+ *	used as an index in channel context array structures.
+ * @bamPipeId: The IPA pipe number for pipe dedicated for this channel
+ * @channelDirection: The direction of the channel as defined in the channel
+ *	type field (CHTYPE) in the channel context data structure.
+ * @reserved: reserved.
+ */
+union IpaHwMhiInitChannelCmdData_t {
+	struct IpaHwMhiInitChannelCmdParams_t {
+		u32 channelHandle:8;
+		u32 contexArrayIndex:8;
+		u32 bamPipeId:6;
+		u32 channelDirection:2;
+		u32 reserved:8;
+	} params;
+	u32 raw32b;
+};
+
+/**
+ * Structure holding the parameters for IPA_CPU_2_HW_CMD_MHI_UPDATE_MSI command.
+ * @msiAddress_low: The MSI lower base addr (in device space) used for asserting
+ *	the interrupt (MSI) associated with the event ring.
+ * @msiAddress_hi: The MSI higher base addr (in device space) used for asserting
+ *	the interrupt (MSI) associated with the event ring.
+ * @msiMask: Mask indicating number of messages assigned by the host to device
+ * @msiData: Data Pattern to use when generating the MSI
+ */
+struct IpaHwMhiMsiCmdData_t {
+	u32 msiAddress_low;
+	u32 msiAddress_hi;
+	u32 msiMask;
+	u32 msiData;
+};
+
+/**
+ * Structure holding the parameters for
+ * IPA_CPU_2_HW_CMD_MHI_CHANGE_CHANNEL_STATE command.
+ * Parameters are sent as 32b immediate parameters.
+ * @requestedState: The requested channel state as was indicated from Host.
+ *	Use IPA_HW_MHI_CHANNEL_STATES to specify the requested state
+ * @channelHandle: The channel identifier as allocated by driver.
+ *	value is within the range 0 to IPA_HW_MAX_CHANNEL_HANDLE
+ * @LPTransitionRejected: Indication that low power state transition was
+ *	rejected
+ * @reserved: reserved
+ */
+union IpaHwMhiChangeChannelStateCmdData_t {
+	struct IpaHwMhiChangeChannelStateCmdParams_t {
+		u32 requestedState:8;
+		u32 channelHandle:8;
+		u32 LPTransitionRejected:8;
+		u32 reserved:8;
+	} params;
+	u32 raw32b;
+};
+
+/**
+ * Structure holding the parameters for
+ *	IPA_CPU_2_HW_CMD_MHI_STOP_EVENT_UPDATE command.
+ * Parameters are sent as 32b immediate parameters.
+ * @channelHandle: The channel identifier as allocated by driver.
+ *	value is within the range 0 to IPA_HW_MAX_CHANNEL_HANDLE
+ * @reserved: reserved
+ */
+union IpaHwMhiStopEventUpdateData_t {
+	struct IpaHwMhiStopEventUpdateDataParams_t {
+		u32 channelHandle:8;
+		u32 reserved:24;
+	} params;
+	u32 raw32b;
+};
+
+/**
+ * Structure holding the parameters for
+ *	IPA_HW_2_CPU_RESPONSE_MHI_CHANGE_CHANNEL_STATE response.
+ * Parameters are sent as 32b immediate parameters.
+ * @state: The new channel state. In case state is not as requested this is
+ *	error indication for the last command
+ * @channelHandle: The channel identifier
+ * @additonalParams: For stop: the number of pending transport descriptors
+ * currently queued
+ */
+union IpaHwMhiChangeChannelStateResponseData_t {
+	struct IpaHwMhiChangeChannelStateResponseParams_t {
+		u32 state:8;
+		u32 channelHandle:8;
+		u32 additonalParams:16;
+	} params;
+	u32 raw32b;
+};
+
+/**
+ * Structure holding the parameters for
+ *	IPA_HW_2_CPU_EVENT_MHI_CHANNEL_ERROR event.
+ * Parameters are sent as 32b immediate parameters.
+ * @errorType: Type of error - IPA_HW_CHANNEL_ERRORS
+ * @channelHandle: The channel identifier as allocated by driver.
+ *	value is within the range 0 to IPA_HW_MAX_CHANNEL_HANDLE
+ * @reserved: reserved
+ */
+union IpaHwMhiChannelErrorEventData_t {
+	struct IpaHwMhiChannelErrorEventParams_t {
+		u32 errorType:8;
+		u32 channelHandle:8;
+		u32 reserved:16;
+	} params;
+	u32 raw32b;
+};
+
+/**
+ * Structure holding the parameters for
+ *	IPA_HW_2_CPU_EVENT_MHI_CHANNEL_WAKE_UP_REQUEST event.
+ * Parameters are sent as 32b immediate parameters.
+ * @channelHandle: The channel identifier as allocated by driver.
+ *	value is within the range 0 to IPA_HW_MAX_CHANNEL_HANDLE
+ * @reserved: reserved
+ */
+union IpaHwMhiChannelWakeupEventData_t {
+	struct IpaHwMhiChannelWakeupEventParams_t {
+		u32 channelHandle:8;
+		u32 reserved:24;
+	} params;
+	u32 raw32b;
+};
+
+/**
+ * Structure holding the MHI Common statistics
+ * @numULDLSync: Number of times UL activity trigged due to DL activity
+ * @numULTimerExpired: Number of times UL Accm Timer expired
+ */
+struct IpaHwStatsMhiCmnInfoData_t {
+	u32 numULDLSync;
+	u32 numULTimerExpired;
+	u32 numChEvCtxWpRead;
+	u32 reserved;
+};
+
+/**
+ * Structure holding the MHI Channel statistics
+ * @doorbellInt: The number of doorbell int
+ * @reProccesed: The number of ring elements processed
+ * @bamFifoFull: Number of times Bam Fifo got full
+ * @bamFifoEmpty: Number of times Bam Fifo got empty
+ * @bamFifoUsageHigh: Number of times Bam fifo usage went above 75%
+ * @bamFifoUsageLow: Number of times Bam fifo usage went below 25%
+ * @bamInt: Number of BAM Interrupts
+ * @ringFull: Number of times Transfer Ring got full
+ * @ringEmpty: umber of times Transfer Ring got empty
+ * @ringUsageHigh: Number of times Transfer Ring usage went above 75%
+ * @ringUsageLow: Number of times Transfer Ring usage went below 25%
+ * @delayedMsi: Number of times device triggered MSI to host after
+ *	Interrupt Moderation Timer expiry
+ * @immediateMsi: Number of times device triggered MSI to host immediately
+ * @thresholdMsi: Number of times device triggered MSI due to max pending
+ *	events threshold reached
+ * @numSuspend: Number of times channel was suspended
+ * @numResume: Number of times channel was suspended
+ * @num_OOB: Number of times we indicated that we are OOB
+ * @num_OOB_timer_expiry: Number of times we indicated that we are OOB
+ *	after timer expiry
+ * @num_OOB_moderation_timer_start: Number of times we started timer after
+ *	sending OOB and hitting OOB again before we processed threshold
+ *	number of packets
+ * @num_db_mode_evt: Number of times we indicated that we are in Doorbell mode
+ */
+struct IpaHwStatsMhiCnlInfoData_t {
+	u32 doorbellInt;
+	u32 reProccesed;
+	u32 bamFifoFull;
+	u32 bamFifoEmpty;
+	u32 bamFifoUsageHigh;
+	u32 bamFifoUsageLow;
+	u32 bamInt;
+	u32 ringFull;
+	u32 ringEmpty;
+	u32 ringUsageHigh;
+	u32 ringUsageLow;
+	u32 delayedMsi;
+	u32 immediateMsi;
+	u32 thresholdMsi;
+	u32 numSuspend;
+	u32 numResume;
+	u32 num_OOB;
+	u32 num_OOB_timer_expiry;
+	u32 num_OOB_moderation_timer_start;
+	u32 num_db_mode_evt;
+};
+
+/**
+ * Structure holding the MHI statistics
+ * @mhiCmnStats: Stats pertaining to MHI
+ * @mhiCnlStats: Stats pertaining to each channel
+ */
+struct IpaHwStatsMhiInfoData_t {
+	struct IpaHwStatsMhiCmnInfoData_t mhiCmnStats;
+	struct IpaHwStatsMhiCnlInfoData_t mhiCnlStats[
+						IPA_HW_MAX_NUMBER_OF_CHANNELS];
+};
+
+/**
+ * Structure holding the MHI Common Config info
+ * @isDlUlSyncEnabled: Flag to indicate if DL-UL synchronization is enabled
+ * @UlAccmVal: Out Channel(UL) accumulation time in ms when DL UL Sync is
+ *	enabled
+ * @ulMsiEventThreshold: Threshold at which HW fires MSI to host for UL events
+ * @dlMsiEventThreshold: Threshold at which HW fires MSI to host for DL events
+ */
+struct IpaHwConfigMhiCmnInfoData_t {
+	u8 isDlUlSyncEnabled;
+	u8 UlAccmVal;
+	u8 ulMsiEventThreshold;
+	u8 dlMsiEventThreshold;
+};
+
+/**
+ * Structure holding the parameters for MSI info data
+ * @msiAddress_low: The MSI lower base addr (in device space) used for asserting
+ *	the interrupt (MSI) associated with the event ring.
+ * @msiAddress_hi: The MSI higher base addr (in device space) used for asserting
+ *	the interrupt (MSI) associated with the event ring.
+ * @msiMask: Mask indicating number of messages assigned by the host to device
+ * @msiData: Data Pattern to use when generating the MSI
+ */
+struct IpaHwConfigMhiMsiInfoData_t {
+	u32 msiAddress_low;
+	u32 msiAddress_hi;
+	u32 msiMask;
+	u32 msiData;
+};
+
+/**
+ * Structure holding the MHI Channel Config info
+ * @transferRingSize: The Transfer Ring size in terms of Ring Elements
+ * @transferRingIndex: The Transfer Ring channel number as defined by host
+ * @eventRingIndex: The Event Ring Index associated with this Transfer Ring
+ * @bamPipeIndex: The BAM Pipe associated with this channel
+ * @isOutChannel: Indication for the direction of channel
+ * @reserved_0: Reserved byte for maintaining 4byte alignment
+ * @reserved_1: Reserved byte for maintaining 4byte alignment
+ */
+struct IpaHwConfigMhiCnlInfoData_t {
+	u16 transferRingSize;
+	u8  transferRingIndex;
+	u8  eventRingIndex;
+	u8  bamPipeIndex;
+	u8  isOutChannel;
+	u8  reserved_0;
+	u8  reserved_1;
+};
+
+/**
+ * Structure holding the MHI Event Config info
+ * @msiVec: msi vector to invoke MSI interrupt
+ * @intmodtValue: Interrupt moderation timer (in milliseconds)
+ * @eventRingSize: The Event Ring size in terms of Ring Elements
+ * @eventRingIndex: The Event Ring number as defined by host
+ * @reserved_0: Reserved byte for maintaining 4byte alignment
+ * @reserved_1: Reserved byte for maintaining 4byte alignment
+ * @reserved_2: Reserved byte for maintaining 4byte alignment
+ */
+struct IpaHwConfigMhiEventInfoData_t {
+	u32 msiVec;
+	u16 intmodtValue;
+	u16 eventRingSize;
+	u8  eventRingIndex;
+	u8  reserved_0;
+	u8  reserved_1;
+	u8  reserved_2;
+};
+
+/**
+ * Structure holding the MHI Config info
+ * @mhiCmnCfg: Common Config pertaining to MHI
+ * @mhiMsiCfg: Config pertaining to MSI config
+ * @mhiCnlCfg: Config pertaining to each channel
+ * @mhiEvtCfg: Config pertaining to each event Ring
+ */
+struct IpaHwConfigMhiInfoData_t {
+	struct IpaHwConfigMhiCmnInfoData_t mhiCmnCfg;
+	struct IpaHwConfigMhiMsiInfoData_t mhiMsiCfg;
+	struct IpaHwConfigMhiCnlInfoData_t mhiCnlCfg[
+						IPA_HW_MAX_NUMBER_OF_CHANNELS];
+	struct IpaHwConfigMhiEventInfoData_t mhiEvtCfg[
+					IPA_HW_MAX_NUMBER_OF_EVENTRINGS];
+};
+
+
+struct ipa3_uc_mhi_ctx {
+	u8 expected_responseOp;
+	u32 expected_responseParams;
+	void (*ready_cb)(void);
+	void (*wakeup_request_cb)(void);
+	u32 mhi_uc_stats_ofst;
+	struct IpaHwStatsMhiInfoData_t *mhi_uc_stats_mmio;
+};
+
+#define PRINT_COMMON_STATS(x) \
+	(nBytes += scnprintf(&dbg_buff[nBytes], size - nBytes, \
+	#x "=0x%x\n", ipa3_uc_mhi_ctx->mhi_uc_stats_mmio->mhiCmnStats.x))
+
+#define PRINT_CHANNEL_STATS(ch, x) \
+	(nBytes += scnprintf(&dbg_buff[nBytes], size - nBytes, \
+	#x "=0x%x\n", ipa3_uc_mhi_ctx->mhi_uc_stats_mmio->mhiCnlStats[ch].x))
+
+struct ipa3_uc_mhi_ctx *ipa3_uc_mhi_ctx;
+
+static int ipa3_uc_mhi_response_hdlr(struct IpaHwSharedMemCommonMapping_t
+	*uc_sram_mmio, u32 *uc_status)
+{
+	IPADBG("responseOp=%d\n", uc_sram_mmio->responseOp);
+	if (uc_sram_mmio->responseOp == ipa3_uc_mhi_ctx->expected_responseOp &&
+	    uc_sram_mmio->responseParams ==
+	    ipa3_uc_mhi_ctx->expected_responseParams) {
+		*uc_status = 0;
+		return 0;
+	}
+	return -EINVAL;
+}
+
+static void ipa3_uc_mhi_event_hdlr(struct IpaHwSharedMemCommonMapping_t
+	*uc_sram_mmio)
+{
+	if (ipa3_ctx->uc_ctx.uc_sram_mmio->eventOp ==
+	    IPA_HW_2_CPU_EVENT_MHI_CHANNEL_ERROR) {
+		union IpaHwMhiChannelErrorEventData_t evt;
+
+		IPAERR("Channel error\n");
+		evt.raw32b = uc_sram_mmio->eventParams;
+		IPAERR("errorType=%d channelHandle=%d reserved=%d\n",
+			evt.params.errorType, evt.params.channelHandle,
+			evt.params.reserved);
+	} else if (ipa3_ctx->uc_ctx.uc_sram_mmio->eventOp ==
+		   IPA_HW_2_CPU_EVENT_MHI_CHANNEL_WAKE_UP_REQUEST) {
+		union IpaHwMhiChannelWakeupEventData_t evt;
+
+		IPADBG("WakeUp channel request\n");
+		evt.raw32b = uc_sram_mmio->eventParams;
+		IPADBG("channelHandle=%d reserved=%d\n",
+			evt.params.channelHandle, evt.params.reserved);
+		ipa3_uc_mhi_ctx->wakeup_request_cb();
+	}
+}
+
+static void ipa3_uc_mhi_event_log_info_hdlr(
+	struct IpaHwEventLogInfoData_t *uc_event_top_mmio)
+{
+	struct Ipa3HwEventInfoData_t *evt_info_ptr;
+	u32 size;
+
+	if ((uc_event_top_mmio->protocolMask & (1 << IPA_HW_FEATURE_MHI))
+		== 0) {
+		IPAERR("MHI feature missing 0x%x\n",
+			uc_event_top_mmio->protocolMask);
+		return;
+	}
+
+	evt_info_ptr = &uc_event_top_mmio->statsInfo;
+	size = evt_info_ptr->featureInfo[IPA_HW_FEATURE_MHI].params.size;
+	if (size != sizeof(struct IpaHwStatsMhiInfoData_t)) {
+		IPAERR("mhi stats sz invalid exp=%zu is=%u\n",
+			sizeof(struct IpaHwStatsMhiInfoData_t),
+			size);
+		return;
+	}
+
+	ipa3_uc_mhi_ctx->mhi_uc_stats_ofst =
+		evt_info_ptr->baseAddrOffset +
+		evt_info_ptr->featureInfo[IPA_HW_FEATURE_MHI].params.offset;
+	IPAERR("MHI stats ofst=0x%x\n", ipa3_uc_mhi_ctx->mhi_uc_stats_ofst);
+	if (ipa3_uc_mhi_ctx->mhi_uc_stats_ofst +
+		sizeof(struct IpaHwStatsMhiInfoData_t) >=
+		ipa3_ctx->ctrl->ipa_reg_base_ofst +
+		ipahal_get_reg_n_ofst(IPA_SW_AREA_RAM_DIRECT_ACCESS_n, 0) +
+		ipa3_ctx->smem_sz) {
+		IPAERR("uc_mhi_stats 0x%x outside SRAM\n",
+			ipa3_uc_mhi_ctx->mhi_uc_stats_ofst);
+		return;
+	}
+
+	ipa3_uc_mhi_ctx->mhi_uc_stats_mmio =
+		ioremap(ipa3_ctx->ipa_wrapper_base +
+		ipa3_uc_mhi_ctx->mhi_uc_stats_ofst,
+		sizeof(struct IpaHwStatsMhiInfoData_t));
+	if (!ipa3_uc_mhi_ctx->mhi_uc_stats_mmio) {
+		IPAERR("fail to ioremap uc mhi stats\n");
+		return;
+	}
+}
+
+int ipa3_uc_mhi_init(void (*ready_cb)(void), void (*wakeup_request_cb)(void))
+{
+	struct ipa3_uc_hdlrs hdlrs;
+
+	if (ipa3_uc_mhi_ctx) {
+		IPAERR("Already initialized\n");
+		return -EFAULT;
+	}
+
+	ipa3_uc_mhi_ctx = kzalloc(sizeof(*ipa3_uc_mhi_ctx), GFP_KERNEL);
+	if (!ipa3_uc_mhi_ctx) {
+		IPAERR("no mem\n");
+		return -ENOMEM;
+	}
+
+	ipa3_uc_mhi_ctx->ready_cb = ready_cb;
+	ipa3_uc_mhi_ctx->wakeup_request_cb = wakeup_request_cb;
+
+	memset(&hdlrs, 0, sizeof(hdlrs));
+	hdlrs.ipa_uc_loaded_hdlr = ipa3_uc_mhi_ctx->ready_cb;
+	hdlrs.ipa3_uc_response_hdlr = ipa3_uc_mhi_response_hdlr;
+	hdlrs.ipa_uc_event_hdlr = ipa3_uc_mhi_event_hdlr;
+	hdlrs.ipa_uc_event_log_info_hdlr = ipa3_uc_mhi_event_log_info_hdlr;
+	ipa3_uc_register_handlers(IPA_HW_FEATURE_MHI, &hdlrs);
+
+	IPADBG("Done\n");
+	return 0;
+}
+
+void ipa3_uc_mhi_cleanup(void)
+{
+	struct ipa3_uc_hdlrs null_hdlrs = { 0 };
+
+	IPADBG("Enter\n");
+
+	if (!ipa3_uc_mhi_ctx) {
+		IPAERR("ipa3_uc_mhi_ctx is not initialized\n");
+		return;
+	}
+	ipa3_uc_register_handlers(IPA_HW_FEATURE_MHI, &null_hdlrs);
+	kfree(ipa3_uc_mhi_ctx);
+	ipa3_uc_mhi_ctx = NULL;
+
+	IPADBG("Done\n");
+}
+
+int ipa3_uc_mhi_init_engine(struct ipa_mhi_msi_info *msi, u32 mmio_addr,
+	u32 host_ctrl_addr, u32 host_data_addr, u32 first_ch_idx,
+	u32 first_evt_idx)
+{
+	int res;
+	struct ipa_mem_buffer mem;
+	struct IpaHwMhiInitCmdData_t *init_cmd_data;
+	struct IpaHwMhiMsiCmdData_t *msi_cmd;
+
+	if (!ipa3_uc_mhi_ctx) {
+		IPAERR("Not initialized\n");
+		return -EFAULT;
+	}
+
+	IPA_ACTIVE_CLIENTS_INC_SIMPLE();
+
+	res = ipa3_uc_update_hw_flags(0);
+	if (res) {
+		IPAERR("ipa3_uc_update_hw_flags failed %d\n", res);
+		goto disable_clks;
+	}
+
+	mem.size = sizeof(*init_cmd_data);
+	mem.base = dma_alloc_coherent(ipa3_ctx->pdev, mem.size, &mem.phys_base,
+		GFP_KERNEL);
+	if (!mem.base) {
+		IPAERR("fail to alloc DMA buff of size %d\n", mem.size);
+		res = -ENOMEM;
+		goto disable_clks;
+	}
+	init_cmd_data = (struct IpaHwMhiInitCmdData_t *)mem.base;
+	init_cmd_data->msiAddress = msi->addr_low;
+	init_cmd_data->mmioBaseAddress = mmio_addr;
+	init_cmd_data->deviceMhiCtrlBaseAddress = host_ctrl_addr;
+	init_cmd_data->deviceMhiDataBaseAddress = host_data_addr;
+	init_cmd_data->firstChannelIndex = first_ch_idx;
+	init_cmd_data->firstEventRingIndex = first_evt_idx;
+	res = ipa3_uc_send_cmd((u32)mem.phys_base, IPA_CPU_2_HW_CMD_MHI_INIT, 0,
+		false, HZ);
+	if (res) {
+		IPAERR("ipa3_uc_send_cmd failed %d\n", res);
+		dma_free_coherent(ipa3_ctx->pdev, mem.size, mem.base,
+			mem.phys_base);
+		goto disable_clks;
+	}
+
+	dma_free_coherent(ipa3_ctx->pdev, mem.size, mem.base, mem.phys_base);
+
+	mem.size = sizeof(*msi_cmd);
+	mem.base = dma_alloc_coherent(ipa3_ctx->pdev, mem.size, &mem.phys_base,
+		GFP_KERNEL);
+	if (!mem.base) {
+		IPAERR("fail to alloc DMA buff of size %d\n", mem.size);
+		res = -ENOMEM;
+		goto disable_clks;
+	}
+
+	msi_cmd = (struct IpaHwMhiMsiCmdData_t *)mem.base;
+	msi_cmd->msiAddress_hi = msi->addr_hi;
+	msi_cmd->msiAddress_low = msi->addr_low;
+	msi_cmd->msiData = msi->data;
+	msi_cmd->msiMask = msi->mask;
+	res = ipa3_uc_send_cmd((u32)mem.phys_base,
+		IPA_CPU_2_HW_CMD_MHI_UPDATE_MSI, 0, false, HZ);
+	if (res) {
+		IPAERR("ipa3_uc_send_cmd failed %d\n", res);
+		dma_free_coherent(ipa3_ctx->pdev, mem.size, mem.base,
+			mem.phys_base);
+		goto disable_clks;
+	}
+
+	dma_free_coherent(ipa3_ctx->pdev, mem.size, mem.base, mem.phys_base);
+
+	res = 0;
+
+disable_clks:
+	IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
+	return res;
+
+}
+
+int ipa3_uc_mhi_init_channel(int ipa_ep_idx, int channelHandle,
+	int contexArrayIndex, int channelDirection)
+
+{
+	int res;
+	union IpaHwMhiInitChannelCmdData_t init_cmd;
+	union IpaHwMhiChangeChannelStateResponseData_t uc_rsp;
+
+	if (!ipa3_uc_mhi_ctx) {
+		IPAERR("Not initialized\n");
+		return -EFAULT;
+	}
+
+	if (ipa_ep_idx < 0  || ipa_ep_idx >= ipa3_ctx->ipa_num_pipes) {
+		IPAERR("Invalid ipa_ep_idx.\n");
+		return -EINVAL;
+	}
+
+	IPA_ACTIVE_CLIENTS_INC_SIMPLE();
+
+	memset(&uc_rsp, 0, sizeof(uc_rsp));
+	uc_rsp.params.state = IPA_HW_MHI_CHANNEL_STATE_RUN;
+	uc_rsp.params.channelHandle = channelHandle;
+	ipa3_uc_mhi_ctx->expected_responseOp =
+		IPA_HW_2_CPU_RESPONSE_MHI_CHANGE_CHANNEL_STATE;
+	ipa3_uc_mhi_ctx->expected_responseParams = uc_rsp.raw32b;
+
+	memset(&init_cmd, 0, sizeof(init_cmd));
+	init_cmd.params.channelHandle = channelHandle;
+	init_cmd.params.contexArrayIndex = contexArrayIndex;
+	init_cmd.params.bamPipeId = ipa_ep_idx;
+	init_cmd.params.channelDirection = channelDirection;
+
+	res = ipa3_uc_send_cmd(init_cmd.raw32b,
+		IPA_CPU_2_HW_CMD_MHI_INIT_CHANNEL, 0, false, HZ);
+	if (res) {
+		IPAERR("ipa3_uc_send_cmd failed %d\n", res);
+		goto disable_clks;
+	}
+
+	res = 0;
+
+disable_clks:
+	IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
+	return res;
+}
+
+
+int ipa3_uc_mhi_reset_channel(int channelHandle)
+{
+	union IpaHwMhiChangeChannelStateCmdData_t cmd;
+	union IpaHwMhiChangeChannelStateResponseData_t uc_rsp;
+	int res;
+
+	if (!ipa3_uc_mhi_ctx) {
+		IPAERR("Not initialized\n");
+		return -EFAULT;
+	}
+	IPA_ACTIVE_CLIENTS_INC_SIMPLE();
+
+	memset(&uc_rsp, 0, sizeof(uc_rsp));
+	uc_rsp.params.state = IPA_HW_MHI_CHANNEL_STATE_DISABLE;
+	uc_rsp.params.channelHandle = channelHandle;
+	ipa3_uc_mhi_ctx->expected_responseOp =
+		IPA_HW_2_CPU_RESPONSE_MHI_CHANGE_CHANNEL_STATE;
+	ipa3_uc_mhi_ctx->expected_responseParams = uc_rsp.raw32b;
+
+	memset(&cmd, 0, sizeof(cmd));
+	cmd.params.requestedState = IPA_HW_MHI_CHANNEL_STATE_DISABLE;
+	cmd.params.channelHandle = channelHandle;
+	res = ipa3_uc_send_cmd(cmd.raw32b,
+		IPA_CPU_2_HW_CMD_MHI_CHANGE_CHANNEL_STATE, 0, false, HZ);
+	if (res) {
+		IPAERR("ipa3_uc_send_cmd failed %d\n", res);
+		goto disable_clks;
+	}
+
+	res = 0;
+
+disable_clks:
+	IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
+	return res;
+}
+
+int ipa3_uc_mhi_suspend_channel(int channelHandle)
+{
+	union IpaHwMhiChangeChannelStateCmdData_t cmd;
+	union IpaHwMhiChangeChannelStateResponseData_t uc_rsp;
+	int res;
+
+	if (!ipa3_uc_mhi_ctx) {
+		IPAERR("Not initialized\n");
+		return -EFAULT;
+	}
+	IPA_ACTIVE_CLIENTS_INC_SIMPLE();
+
+	memset(&uc_rsp, 0, sizeof(uc_rsp));
+	uc_rsp.params.state = IPA_HW_MHI_CHANNEL_STATE_SUSPEND;
+	uc_rsp.params.channelHandle = channelHandle;
+	ipa3_uc_mhi_ctx->expected_responseOp =
+		IPA_HW_2_CPU_RESPONSE_MHI_CHANGE_CHANNEL_STATE;
+	ipa3_uc_mhi_ctx->expected_responseParams = uc_rsp.raw32b;
+
+	memset(&cmd, 0, sizeof(cmd));
+	cmd.params.requestedState = IPA_HW_MHI_CHANNEL_STATE_SUSPEND;
+	cmd.params.channelHandle = channelHandle;
+	res = ipa3_uc_send_cmd(cmd.raw32b,
+		IPA_CPU_2_HW_CMD_MHI_CHANGE_CHANNEL_STATE, 0, false, HZ);
+	if (res) {
+		IPAERR("ipa3_uc_send_cmd failed %d\n", res);
+		goto disable_clks;
+	}
+
+	res = 0;
+
+disable_clks:
+	IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
+	return res;
+}
+
+int ipa3_uc_mhi_resume_channel(int channelHandle, bool LPTransitionRejected)
+{
+	union IpaHwMhiChangeChannelStateCmdData_t cmd;
+	union IpaHwMhiChangeChannelStateResponseData_t uc_rsp;
+	int res;
+
+	if (!ipa3_uc_mhi_ctx) {
+		IPAERR("Not initialized\n");
+		return -EFAULT;
+	}
+	IPA_ACTIVE_CLIENTS_INC_SIMPLE();
+
+	memset(&uc_rsp, 0, sizeof(uc_rsp));
+	uc_rsp.params.state = IPA_HW_MHI_CHANNEL_STATE_RUN;
+	uc_rsp.params.channelHandle = channelHandle;
+	ipa3_uc_mhi_ctx->expected_responseOp =
+		IPA_HW_2_CPU_RESPONSE_MHI_CHANGE_CHANNEL_STATE;
+	ipa3_uc_mhi_ctx->expected_responseParams = uc_rsp.raw32b;
+
+	memset(&cmd, 0, sizeof(cmd));
+	cmd.params.requestedState = IPA_HW_MHI_CHANNEL_STATE_RUN;
+	cmd.params.channelHandle = channelHandle;
+	cmd.params.LPTransitionRejected = LPTransitionRejected;
+	res = ipa3_uc_send_cmd(cmd.raw32b,
+		IPA_CPU_2_HW_CMD_MHI_CHANGE_CHANNEL_STATE, 0, false, HZ);
+	if (res) {
+		IPAERR("ipa3_uc_send_cmd failed %d\n", res);
+		goto disable_clks;
+	}
+
+	res = 0;
+
+disable_clks:
+	IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
+	return res;
+}
+
+int ipa3_uc_mhi_stop_event_update_channel(int channelHandle)
+{
+	union IpaHwMhiStopEventUpdateData_t cmd;
+	int res;
+
+	if (!ipa3_uc_mhi_ctx) {
+		IPAERR("Not initialized\n");
+		return -EFAULT;
+	}
+	IPA_ACTIVE_CLIENTS_INC_SIMPLE();
+
+	memset(&cmd, 0, sizeof(cmd));
+	cmd.params.channelHandle = channelHandle;
+
+	ipa3_uc_mhi_ctx->expected_responseOp =
+		IPA_CPU_2_HW_CMD_MHI_STOP_EVENT_UPDATE;
+	ipa3_uc_mhi_ctx->expected_responseParams = cmd.raw32b;
+
+	res = ipa3_uc_send_cmd(cmd.raw32b,
+		IPA_CPU_2_HW_CMD_MHI_STOP_EVENT_UPDATE, 0, false, HZ);
+	if (res) {
+		IPAERR("ipa3_uc_send_cmd failed %d\n", res);
+		goto disable_clks;
+	}
+
+	res = 0;
+disable_clks:
+	IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
+	return res;
+}
+
+int ipa3_uc_mhi_send_dl_ul_sync_info(union IpaHwMhiDlUlSyncCmdData_t *cmd)
+{
+	int res;
+
+	if (!ipa3_uc_mhi_ctx) {
+		IPAERR("Not initialized\n");
+		return -EFAULT;
+	}
+
+	IPADBG("isDlUlSyncEnabled=0x%x UlAccmVal=0x%x\n",
+		cmd->params.isDlUlSyncEnabled, cmd->params.UlAccmVal);
+	IPADBG("ulMsiEventThreshold=0x%x dlMsiEventThreshold=0x%x\n",
+		cmd->params.ulMsiEventThreshold,
+		cmd->params.dlMsiEventThreshold);
+
+	IPA_ACTIVE_CLIENTS_INC_SIMPLE();
+
+	res = ipa3_uc_send_cmd(cmd->raw32b,
+		IPA_CPU_2_HW_CMD_MHI_DL_UL_SYNC_INFO, 0, false, HZ);
+	if (res) {
+		IPAERR("ipa3_uc_send_cmd failed %d\n", res);
+		goto disable_clks;
+	}
+
+	res = 0;
+disable_clks:
+	IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
+	return res;
+}
+
+int ipa3_uc_mhi_print_stats(char *dbg_buff, int size)
+{
+	int nBytes = 0;
+	int i;
+
+	if (!ipa3_uc_mhi_ctx->mhi_uc_stats_mmio) {
+		IPAERR("MHI uc stats is not valid\n");
+		return 0;
+	}
+
+	nBytes += scnprintf(&dbg_buff[nBytes], size - nBytes,
+		"Common Stats:\n");
+	PRINT_COMMON_STATS(numULDLSync);
+	PRINT_COMMON_STATS(numULTimerExpired);
+	PRINT_COMMON_STATS(numChEvCtxWpRead);
+
+	for (i = 0; i < IPA_HW_MAX_NUMBER_OF_CHANNELS; i++) {
+		nBytes += scnprintf(&dbg_buff[nBytes], size - nBytes,
+			"Channel %d Stats:\n", i);
+		PRINT_CHANNEL_STATS(i, doorbellInt);
+		PRINT_CHANNEL_STATS(i, reProccesed);
+		PRINT_CHANNEL_STATS(i, bamFifoFull);
+		PRINT_CHANNEL_STATS(i, bamFifoEmpty);
+		PRINT_CHANNEL_STATS(i, bamFifoUsageHigh);
+		PRINT_CHANNEL_STATS(i, bamFifoUsageLow);
+		PRINT_CHANNEL_STATS(i, bamInt);
+		PRINT_CHANNEL_STATS(i, ringFull);
+		PRINT_CHANNEL_STATS(i, ringEmpty);
+		PRINT_CHANNEL_STATS(i, ringUsageHigh);
+		PRINT_CHANNEL_STATS(i, ringUsageLow);
+		PRINT_CHANNEL_STATS(i, delayedMsi);
+		PRINT_CHANNEL_STATS(i, immediateMsi);
+		PRINT_CHANNEL_STATS(i, thresholdMsi);
+		PRINT_CHANNEL_STATS(i, numSuspend);
+		PRINT_CHANNEL_STATS(i, numResume);
+		PRINT_CHANNEL_STATS(i, num_OOB);
+		PRINT_CHANNEL_STATS(i, num_OOB_timer_expiry);
+		PRINT_CHANNEL_STATS(i, num_OOB_moderation_timer_start);
+		PRINT_CHANNEL_STATS(i, num_db_mode_evt);
+	}
+
+	return nBytes;
+}

+ 635 - 0
ipa/ipa_v3/ipa_uc_ntn.c

@@ -0,0 +1,635 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2016-2020, The Linux Foundation. All rights reserved.
+ */
+
+#include "ipa_i.h"
+
+#define IPA_UC_NTN_DB_PA_TX 0x79620DC
+#define IPA_UC_NTN_DB_PA_RX 0x79620D8
+
+static void ipa3_uc_ntn_event_log_info_handler(
+struct IpaHwEventLogInfoData_t *uc_event_top_mmio)
+{
+	struct Ipa3HwEventInfoData_t *statsPtr = &uc_event_top_mmio->statsInfo;
+
+	if ((uc_event_top_mmio->protocolMask &
+		(1 << IPA_HW_PROTOCOL_ETH)) == 0) {
+		IPAERR("NTN protocol missing 0x%x\n",
+			uc_event_top_mmio->protocolMask);
+		return;
+	}
+
+	if (statsPtr->featureInfo[IPA_HW_PROTOCOL_ETH].params.size !=
+		sizeof(struct Ipa3HwStatsNTNInfoData_t)) {
+		IPAERR("NTN stats sz invalid exp=%zu is=%u\n",
+			sizeof(struct Ipa3HwStatsNTNInfoData_t),
+			statsPtr->featureInfo[IPA_HW_PROTOCOL_ETH].params.size);
+		return;
+	}
+
+	ipa3_ctx->uc_ntn_ctx.ntn_uc_stats_ofst =
+		uc_event_top_mmio->statsInfo.baseAddrOffset +
+		statsPtr->featureInfo[IPA_HW_PROTOCOL_ETH].params.offset;
+	IPAERR("NTN stats ofst=0x%x\n", ipa3_ctx->uc_ntn_ctx.ntn_uc_stats_ofst);
+	if (ipa3_ctx->uc_ntn_ctx.ntn_uc_stats_ofst +
+		sizeof(struct Ipa3HwStatsNTNInfoData_t) >=
+		ipa3_ctx->ctrl->ipa_reg_base_ofst +
+		ipahal_get_reg_n_ofst(IPA_SW_AREA_RAM_DIRECT_ACCESS_n, 0) +
+		ipa3_ctx->smem_sz) {
+		IPAERR("uc_ntn_stats 0x%x outside SRAM\n",
+			   ipa3_ctx->uc_ntn_ctx.ntn_uc_stats_ofst);
+		return;
+	}
+
+	ipa3_ctx->uc_ntn_ctx.ntn_uc_stats_mmio =
+		ioremap(ipa3_ctx->ipa_wrapper_base +
+		ipa3_ctx->uc_ntn_ctx.ntn_uc_stats_ofst,
+		sizeof(struct Ipa3HwStatsNTNInfoData_t));
+	if (!ipa3_ctx->uc_ntn_ctx.ntn_uc_stats_mmio) {
+		IPAERR("fail to ioremap uc ntn stats\n");
+		return;
+	}
+}
+
+/**
+ * ipa2_get_wdi_stats() - Query WDI statistics from uc
+ * @stats:	[inout] stats blob from client populated by driver
+ *
+ * Returns:	0 on success, negative on failure
+ *
+ * @note Cannot be called from atomic context
+ *
+ */
+int ipa3_get_ntn_stats(struct Ipa3HwStatsNTNInfoData_t *stats)
+{
+#define TX_STATS(y) stats->tx_ch_stats[0].y = \
+	ipa3_ctx->uc_ntn_ctx.ntn_uc_stats_mmio->tx_ch_stats[0].y
+#define RX_STATS(y) stats->rx_ch_stats[0].y = \
+	ipa3_ctx->uc_ntn_ctx.ntn_uc_stats_mmio->rx_ch_stats[0].y
+
+	if (unlikely(!ipa3_ctx)) {
+		IPAERR("IPA driver was not initialized\n");
+		return -EINVAL;
+	}
+
+	if (!stats || !ipa3_ctx->uc_ntn_ctx.ntn_uc_stats_mmio) {
+		IPAERR("bad parms stats=%pK ntn_stats=%pK\n",
+			stats,
+			ipa3_ctx->uc_ntn_ctx.ntn_uc_stats_mmio);
+		return -EINVAL;
+	}
+
+	IPA_ACTIVE_CLIENTS_INC_SIMPLE();
+
+	TX_STATS(num_pkts_processed);
+	TX_STATS(ring_stats.ringFull);
+	TX_STATS(ring_stats.ringEmpty);
+	TX_STATS(ring_stats.ringUsageHigh);
+	TX_STATS(ring_stats.ringUsageLow);
+	TX_STATS(ring_stats.RingUtilCount);
+	TX_STATS(gsi_stats.bamFifoFull);
+	TX_STATS(gsi_stats.bamFifoEmpty);
+	TX_STATS(gsi_stats.bamFifoUsageHigh);
+	TX_STATS(gsi_stats.bamFifoUsageLow);
+	TX_STATS(gsi_stats.bamUtilCount);
+	TX_STATS(num_db);
+	TX_STATS(num_qmb_int_handled);
+	TX_STATS(ipa_pipe_number);
+
+	RX_STATS(num_pkts_processed);
+	RX_STATS(ring_stats.ringFull);
+	RX_STATS(ring_stats.ringEmpty);
+	RX_STATS(ring_stats.ringUsageHigh);
+	RX_STATS(ring_stats.ringUsageLow);
+	RX_STATS(ring_stats.RingUtilCount);
+	RX_STATS(gsi_stats.bamFifoFull);
+	RX_STATS(gsi_stats.bamFifoEmpty);
+	RX_STATS(gsi_stats.bamFifoUsageHigh);
+	RX_STATS(gsi_stats.bamFifoUsageLow);
+	RX_STATS(gsi_stats.bamUtilCount);
+	RX_STATS(num_db);
+	RX_STATS(num_qmb_int_handled);
+	RX_STATS(ipa_pipe_number);
+
+	IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
+
+	return 0;
+}
+
+
+int ipa3_ntn_uc_reg_rdyCB(void (*ipa_ready_cb)(void *), void *user_data)
+{
+	int ret;
+
+	if (!ipa3_ctx) {
+		IPAERR("IPA ctx is null\n");
+		return -ENXIO;
+	}
+
+	ret = ipa3_uc_state_check();
+	if (ret) {
+		ipa3_ctx->uc_ntn_ctx.uc_ready_cb = ipa_ready_cb;
+		ipa3_ctx->uc_ntn_ctx.priv = user_data;
+		return 0;
+	}
+
+	return -EEXIST;
+}
+
+void ipa3_ntn_uc_dereg_rdyCB(void)
+{
+	ipa3_ctx->uc_ntn_ctx.uc_ready_cb = NULL;
+	ipa3_ctx->uc_ntn_ctx.priv = NULL;
+}
+
+static void ipa3_uc_ntn_loaded_handler(void)
+{
+	if (!ipa3_ctx) {
+		IPAERR("IPA ctx is null\n");
+		return;
+	}
+
+	if (ipa3_ctx->uc_ntn_ctx.uc_ready_cb) {
+		ipa3_ctx->uc_ntn_ctx.uc_ready_cb(
+			ipa3_ctx->uc_ntn_ctx.priv);
+
+		ipa3_ctx->uc_ntn_ctx.uc_ready_cb =
+			NULL;
+		ipa3_ctx->uc_ntn_ctx.priv = NULL;
+	}
+}
+
+int ipa3_ntn_init(void)
+{
+	struct ipa3_uc_hdlrs uc_ntn_cbs = { 0 };
+
+	uc_ntn_cbs.ipa_uc_event_log_info_hdlr =
+		ipa3_uc_ntn_event_log_info_handler;
+	uc_ntn_cbs.ipa_uc_loaded_hdlr =
+		ipa3_uc_ntn_loaded_handler;
+
+	ipa3_uc_register_handlers(IPA_HW_FEATURE_NTN, &uc_ntn_cbs);
+
+	return 0;
+}
+
+static int ipa3_uc_send_ntn_setup_pipe_cmd(
+	struct ipa_ntn_setup_info *ntn_info, u8 dir)
+{
+	int ipa_ep_idx;
+	int result = 0;
+	struct ipa_mem_buffer cmd;
+	struct Ipa3HwNtnSetUpCmdData_t *Ntn_params;
+	struct IpaHwOffloadSetUpCmdData_t *cmd_data;
+	struct IpaHwOffloadSetUpCmdData_t_v4_0 *cmd_data_v4_0;
+
+	if (ntn_info == NULL) {
+		IPAERR("invalid input\n");
+		return -EINVAL;
+	}
+
+	ipa_ep_idx = ipa_get_ep_mapping(ntn_info->client);
+	if (ipa_ep_idx == -1) {
+		IPAERR("fail to get ep idx.\n");
+		return -EFAULT;
+	}
+
+	IPADBG("client=%d ep=%d\n", ntn_info->client, ipa_ep_idx);
+
+	IPADBG("ring_base_pa = 0x%pa\n",
+			&ntn_info->ring_base_pa);
+	IPADBG("ring_base_iova = 0x%pa\n",
+			&ntn_info->ring_base_iova);
+	IPADBG("ntn_ring_size = %d\n", ntn_info->ntn_ring_size);
+	IPADBG("buff_pool_base_pa = 0x%pa\n", &ntn_info->buff_pool_base_pa);
+	IPADBG("buff_pool_base_iova = 0x%pa\n", &ntn_info->buff_pool_base_iova);
+	IPADBG("num_buffers = %d\n", ntn_info->num_buffers);
+	IPADBG("data_buff_size = %d\n", ntn_info->data_buff_size);
+	IPADBG("tail_ptr_base_pa = 0x%pa\n", &ntn_info->ntn_reg_base_ptr_pa);
+	if (ipa3_ctx->ipa_hw_type >= IPA_HW_v4_0)
+		cmd.size = sizeof(*cmd_data_v4_0);
+	else
+		cmd.size = sizeof(*cmd_data);
+	cmd.base = dma_alloc_coherent(ipa3_ctx->uc_pdev, cmd.size,
+			&cmd.phys_base, GFP_KERNEL);
+	if (cmd.base == NULL) {
+		IPAERR("fail to get DMA memory.\n");
+		return -ENOMEM;
+	}
+
+	if (ipa3_ctx->ipa_hw_type >= IPA_HW_v4_0) {
+		cmd_data_v4_0 = (struct IpaHwOffloadSetUpCmdData_t_v4_0 *)
+			cmd.base;
+		cmd_data_v4_0->protocol = IPA_HW_PROTOCOL_ETH;
+		Ntn_params = &cmd_data_v4_0->SetupCh_params.NtnSetupCh_params;
+	} else {
+		cmd_data = (struct IpaHwOffloadSetUpCmdData_t *)cmd.base;
+		cmd_data->protocol = IPA_HW_PROTOCOL_ETH;
+		Ntn_params = &cmd_data->SetupCh_params.NtnSetupCh_params;
+	}
+
+	if (ntn_info->smmu_enabled) {
+		Ntn_params->ring_base_pa = (u32)ntn_info->ring_base_iova;
+		Ntn_params->buff_pool_base_pa =
+			(u32)ntn_info->buff_pool_base_iova;
+	} else {
+		Ntn_params->ring_base_pa = ntn_info->ring_base_pa;
+		Ntn_params->buff_pool_base_pa = ntn_info->buff_pool_base_pa;
+	}
+
+	Ntn_params->ntn_ring_size = ntn_info->ntn_ring_size;
+	Ntn_params->num_buffers = ntn_info->num_buffers;
+	Ntn_params->ntn_reg_base_ptr_pa = ntn_info->ntn_reg_base_ptr_pa;
+	Ntn_params->data_buff_size = ntn_info->data_buff_size;
+	Ntn_params->ipa_pipe_number = ipa_ep_idx;
+	Ntn_params->dir = dir;
+
+	result = ipa3_uc_send_cmd((u32)(cmd.phys_base),
+				IPA_CPU_2_HW_CMD_OFFLOAD_CHANNEL_SET_UP,
+				IPA_HW_2_CPU_OFFLOAD_CMD_STATUS_SUCCESS,
+				false, 10*HZ);
+	if (result)
+		result = -EFAULT;
+
+	dma_free_coherent(ipa3_ctx->uc_pdev, cmd.size, cmd.base, cmd.phys_base);
+	return result;
+}
+
+static int ipa3_smmu_map_uc_ntn_pipes(struct ipa_ntn_setup_info *params,
+	bool map)
+{
+	struct iommu_domain *smmu_domain;
+	int result;
+	int i;
+	u64 iova;
+	phys_addr_t pa;
+	u64 iova_p;
+	phys_addr_t pa_p;
+	u32 size_p;
+
+	if (params->data_buff_size > PAGE_SIZE) {
+		IPAERR("invalid data buff size\n");
+		return -EINVAL;
+	}
+
+	result = ipa3_smmu_map_peer_reg(rounddown(params->ntn_reg_base_ptr_pa,
+		PAGE_SIZE), map, IPA_SMMU_CB_UC);
+	if (result) {
+		IPAERR("failed to %s uC regs %d\n",
+			map ? "map" : "unmap", result);
+		goto fail;
+	}
+
+	if (params->smmu_enabled) {
+		IPADBG("smmu is enabled on EMAC\n");
+		result = ipa3_smmu_map_peer_buff((u64)params->ring_base_iova,
+			params->ntn_ring_size, map, params->ring_base_sgt,
+			IPA_SMMU_CB_UC);
+		if (result) {
+			IPAERR("failed to %s ntn ring %d\n",
+				map ? "map" : "unmap", result);
+			goto fail_map_ring;
+		}
+		result = ipa3_smmu_map_peer_buff(
+			(u64)params->buff_pool_base_iova,
+			params->num_buffers * 4, map,
+			params->buff_pool_base_sgt, IPA_SMMU_CB_UC);
+		if (result) {
+			IPAERR("failed to %s pool buffs %d\n",
+				map ? "map" : "unmap", result);
+			goto fail_map_buffer_smmu_enabled;
+		}
+	} else {
+		IPADBG("smmu is disabled on EMAC\n");
+		result = ipa3_smmu_map_peer_buff((u64)params->ring_base_pa,
+			params->ntn_ring_size, map, NULL, IPA_SMMU_CB_UC);
+		if (result) {
+			IPAERR("failed to %s ntn ring %d\n",
+				map ? "map" : "unmap", result);
+			goto fail_map_ring;
+		}
+		result = ipa3_smmu_map_peer_buff(params->buff_pool_base_pa,
+			params->num_buffers * 4, map, NULL, IPA_SMMU_CB_UC);
+		if (result) {
+			IPAERR("failed to %s pool buffs %d\n",
+				map ? "map" : "unmap", result);
+			goto fail_map_buffer_smmu_disabled;
+		}
+	}
+
+	if (ipa3_ctx->s1_bypass_arr[IPA_SMMU_CB_AP]) {
+		IPADBG("AP SMMU is set to s1 bypass\n");
+		return 0;
+	}
+
+	smmu_domain = ipa3_get_smmu_domain();
+	if (!smmu_domain) {
+		IPAERR("invalid smmu domain\n");
+		return -EINVAL;
+	}
+
+	for (i = 0; i < params->num_buffers; i++) {
+		iova = (u64)params->data_buff_list[i].iova;
+		pa = (phys_addr_t)params->data_buff_list[i].pa;
+		IPA_SMMU_ROUND_TO_PAGE(iova, pa, params->data_buff_size, iova_p,
+			pa_p, size_p);
+		IPADBG("%s 0x%llx to 0x%pa size %d\n", map ? "mapping" :
+			"unmapping", iova_p, &pa_p, size_p);
+		if (map) {
+			result = ipa3_iommu_map(smmu_domain, iova_p, pa_p,
+				size_p, IOMMU_READ | IOMMU_WRITE);
+			if (result)
+				IPAERR("Fail to map 0x%llx\n", iova);
+		} else {
+			result = iommu_unmap(smmu_domain, iova_p, size_p);
+			if (result != params->data_buff_size)
+				IPAERR("Fail to unmap 0x%llx\n", iova);
+		}
+		if (result) {
+			if (params->smmu_enabled)
+				goto fail_map_data_buff_smmu_enabled;
+			else
+				goto fail_map_data_buff_smmu_disabled;
+		}
+	}
+	return 0;
+
+fail_map_data_buff_smmu_enabled:
+	ipa3_smmu_map_peer_buff((u64)params->buff_pool_base_iova,
+		params->num_buffers * 4, !map, NULL, IPA_SMMU_CB_UC);
+	goto fail_map_buffer_smmu_enabled;
+fail_map_data_buff_smmu_disabled:
+	ipa3_smmu_map_peer_buff(params->buff_pool_base_pa,
+		params->num_buffers * 4, !map, NULL, IPA_SMMU_CB_UC);
+	goto fail_map_buffer_smmu_disabled;
+fail_map_buffer_smmu_enabled:
+	ipa3_smmu_map_peer_buff((u64)params->ring_base_iova,
+		params->ntn_ring_size, !map, params->ring_base_sgt,
+		IPA_SMMU_CB_UC);
+	goto fail_map_ring;
+fail_map_buffer_smmu_disabled:
+	ipa3_smmu_map_peer_buff((u64)params->ring_base_pa,
+			params->ntn_ring_size, !map, NULL, IPA_SMMU_CB_UC);
+fail_map_ring:
+	ipa3_smmu_map_peer_reg(rounddown(params->ntn_reg_base_ptr_pa,
+		PAGE_SIZE), !map, IPA_SMMU_CB_UC);
+fail:
+	return result;
+}
+
+/**
+ * ipa3_setup_uc_ntn_pipes() - setup uc offload pipes
+ */
+int ipa3_setup_uc_ntn_pipes(struct ipa_ntn_conn_in_params *in,
+	ipa_notify_cb notify, void *priv, u8 hdr_len,
+	struct ipa_ntn_conn_out_params *outp)
+{
+	struct ipa3_ep_context *ep_ul;
+	struct ipa3_ep_context *ep_dl;
+	int ipa_ep_idx_ul;
+	int ipa_ep_idx_dl;
+	int result = 0;
+
+	if (in == NULL) {
+		IPAERR("invalid input\n");
+		return -EINVAL;
+	}
+
+	ipa_ep_idx_ul = ipa_get_ep_mapping(in->ul.client);
+	if (ipa_ep_idx_ul == IPA_EP_NOT_ALLOCATED ||
+		ipa_ep_idx_ul >= IPA3_MAX_NUM_PIPES) {
+		IPAERR("fail to alloc UL EP ipa_ep_idx_ul=%d\n",
+			ipa_ep_idx_ul);
+		return -EFAULT;
+	}
+
+	ipa_ep_idx_dl = ipa_get_ep_mapping(in->dl.client);
+	if (ipa_ep_idx_dl == IPA_EP_NOT_ALLOCATED ||
+		ipa_ep_idx_dl >= IPA3_MAX_NUM_PIPES) {
+		IPAERR("fail to alloc DL EP ipa_ep_idx_dl=%d\n",
+			ipa_ep_idx_dl);
+		return -EFAULT;
+	}
+
+	ep_ul = &ipa3_ctx->ep[ipa_ep_idx_ul];
+	ep_dl = &ipa3_ctx->ep[ipa_ep_idx_dl];
+
+	if (ep_ul->valid || ep_dl->valid) {
+		IPAERR("EP already allocated ul:%d dl:%d\n",
+			   ep_ul->valid, ep_dl->valid);
+		return -EFAULT;
+	}
+
+	memset(ep_ul, 0, offsetof(struct ipa3_ep_context, sys));
+	memset(ep_dl, 0, offsetof(struct ipa3_ep_context, sys));
+
+	IPA_ACTIVE_CLIENTS_INC_SIMPLE();
+
+	/* setup ul ep cfg */
+	ep_ul->valid = 1;
+	ep_ul->client = in->ul.client;
+	ep_ul->client_notify = notify;
+	ep_ul->priv = priv;
+
+	memset(&ep_ul->cfg, 0, sizeof(ep_ul->cfg));
+	ep_ul->cfg.nat.nat_en = IPA_SRC_NAT;
+	ep_ul->cfg.hdr.hdr_len = hdr_len;
+	ep_ul->cfg.mode.mode = IPA_BASIC;
+
+	if (ipa3_cfg_ep(ipa_ep_idx_ul, &ep_ul->cfg)) {
+		IPAERR("fail to setup ul pipe cfg\n");
+		result = -EFAULT;
+		goto fail;
+	}
+
+	result = ipa3_smmu_map_uc_ntn_pipes(&in->ul, true);
+	if (result) {
+		IPAERR("failed to map SMMU for UL %d\n", result);
+		goto fail;
+	}
+
+	result = ipa3_enable_data_path(ipa_ep_idx_ul);
+	if (result) {
+		IPAERR("Enable data path failed res=%d pipe=%d.\n", result,
+			ipa_ep_idx_ul);
+		result = -EFAULT;
+		goto fail_smmu_unmap_ul;
+	}
+
+	if (ipa3_uc_send_ntn_setup_pipe_cmd(&in->ul, IPA_NTN_RX_DIR)) {
+		IPAERR("fail to send cmd to uc for ul pipe\n");
+		result = -EFAULT;
+		goto fail_disable_dp_ul;
+	}
+	ipa3_install_dflt_flt_rules(ipa_ep_idx_ul);
+	outp->ul_uc_db_pa = IPA_UC_NTN_DB_PA_RX;
+	ep_ul->uc_offload_state |= IPA_UC_OFFLOAD_CONNECTED;
+	IPADBG("client %d (ep: %d) connected\n", in->ul.client,
+		ipa_ep_idx_ul);
+
+	/* setup dl ep cfg */
+	ep_dl->valid = 1;
+	ep_dl->client = in->dl.client;
+	memset(&ep_dl->cfg, 0, sizeof(ep_ul->cfg));
+	ep_dl->cfg.nat.nat_en = IPA_BYPASS_NAT;
+	ep_dl->cfg.hdr.hdr_len = hdr_len;
+	ep_dl->cfg.mode.mode = IPA_BASIC;
+
+	if (ipa3_cfg_ep(ipa_ep_idx_dl, &ep_dl->cfg)) {
+		IPAERR("fail to setup dl pipe cfg\n");
+		result = -EFAULT;
+		goto fail_disable_dp_ul;
+	}
+
+	result = ipa3_smmu_map_uc_ntn_pipes(&in->dl, true);
+	if (result) {
+		IPAERR("failed to map SMMU for DL %d\n", result);
+		goto fail_disable_dp_ul;
+	}
+
+	result = ipa3_enable_data_path(ipa_ep_idx_dl);
+	if (result) {
+		IPAERR("Enable data path failed res=%d pipe=%d.\n", result,
+			ipa_ep_idx_dl);
+		result = -EFAULT;
+		goto fail_smmu_unmap_dl;
+	}
+
+	if (ipa3_uc_send_ntn_setup_pipe_cmd(&in->dl, IPA_NTN_TX_DIR)) {
+		IPAERR("fail to send cmd to uc for dl pipe\n");
+		result = -EFAULT;
+		goto fail_disable_dp_dl;
+	}
+	outp->dl_uc_db_pa = IPA_UC_NTN_DB_PA_TX;
+	ep_dl->uc_offload_state |= IPA_UC_OFFLOAD_CONNECTED;
+
+	IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
+	IPADBG("client %d (ep: %d) connected\n", in->dl.client,
+		ipa_ep_idx_dl);
+
+	return 0;
+
+fail_disable_dp_dl:
+	ipa3_disable_data_path(ipa_ep_idx_dl);
+fail_smmu_unmap_dl:
+	ipa3_smmu_map_uc_ntn_pipes(&in->dl, false);
+fail_disable_dp_ul:
+	ipa3_disable_data_path(ipa_ep_idx_ul);
+fail_smmu_unmap_ul:
+	ipa3_smmu_map_uc_ntn_pipes(&in->ul, false);
+fail:
+	IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
+	return result;
+}
+
+/**
+ * ipa3_tear_down_uc_offload_pipes() - tear down uc offload pipes
+ */
+
+int ipa3_tear_down_uc_offload_pipes(int ipa_ep_idx_ul,
+		int ipa_ep_idx_dl, struct ipa_ntn_conn_in_params *params)
+{
+	struct ipa_mem_buffer cmd;
+	struct ipa3_ep_context *ep_ul, *ep_dl;
+	struct IpaHwOffloadCommonChCmdData_t *cmd_data;
+	struct IpaHwOffloadCommonChCmdData_t_v4_0 *cmd_data_v4_0;
+	union Ipa3HwNtnCommonChCmdData_t *tear;
+	int result = 0;
+
+	IPADBG("ep_ul = %d\n", ipa_ep_idx_ul);
+	IPADBG("ep_dl = %d\n", ipa_ep_idx_dl);
+
+	ep_ul = &ipa3_ctx->ep[ipa_ep_idx_ul];
+	ep_dl = &ipa3_ctx->ep[ipa_ep_idx_dl];
+
+	if (ep_ul->uc_offload_state != IPA_UC_OFFLOAD_CONNECTED ||
+		ep_dl->uc_offload_state != IPA_UC_OFFLOAD_CONNECTED) {
+		IPAERR("channel bad state: ul %d dl %d\n",
+			ep_ul->uc_offload_state, ep_dl->uc_offload_state);
+		return -EFAULT;
+	}
+
+	atomic_set(&ep_ul->disconnect_in_progress, 1);
+	atomic_set(&ep_dl->disconnect_in_progress, 1);
+
+	if (ipa3_ctx->ipa_hw_type >= IPA_HW_v4_0)
+		cmd.size = sizeof(*cmd_data_v4_0);
+	else
+		cmd.size = sizeof(*cmd_data);
+	cmd.base = dma_alloc_coherent(ipa3_ctx->uc_pdev, cmd.size,
+		&cmd.phys_base, GFP_KERNEL);
+	if (cmd.base == NULL) {
+		IPAERR("fail to get DMA memory.\n");
+		return -ENOMEM;
+	}
+
+	IPA_ACTIVE_CLIENTS_INC_SIMPLE();
+	if (ipa3_ctx->ipa_hw_type >= IPA_HW_v4_0) {
+		cmd_data_v4_0 = (struct IpaHwOffloadCommonChCmdData_t_v4_0 *)
+			cmd.base;
+		cmd_data_v4_0->protocol = IPA_HW_PROTOCOL_ETH;
+		tear = &cmd_data_v4_0->CommonCh_params.NtnCommonCh_params;
+	} else {
+		cmd_data = (struct IpaHwOffloadCommonChCmdData_t *)cmd.base;
+		cmd_data->protocol = IPA_HW_PROTOCOL_ETH;
+		tear = &cmd_data->CommonCh_params.NtnCommonCh_params;
+	}
+
+	/* teardown the DL pipe */
+	ipa3_disable_data_path(ipa_ep_idx_dl);
+	/*
+	 * Reset ep before sending cmd otherwise disconnect
+	 * during data transfer will result into
+	 * enormous suspend interrupts
+	 */
+	memset(&ipa3_ctx->ep[ipa_ep_idx_dl], 0, sizeof(struct ipa3_ep_context));
+	IPADBG("dl client (ep: %d) disconnected\n", ipa_ep_idx_dl);
+	tear->params.ipa_pipe_number = ipa_ep_idx_dl;
+	result = ipa3_uc_send_cmd((u32)(cmd.phys_base),
+				IPA_CPU_2_HW_CMD_OFFLOAD_TEAR_DOWN,
+				IPA_HW_2_CPU_OFFLOAD_CMD_STATUS_SUCCESS,
+				false, 10*HZ);
+	if (result) {
+		IPAERR("fail to tear down dl pipe\n");
+		result = -EFAULT;
+		goto fail;
+	}
+
+	/* unmap the DL pipe */
+	result = ipa3_smmu_map_uc_ntn_pipes(&params->dl, false);
+	if (result) {
+		IPAERR("failed to unmap SMMU for DL %d\n", result);
+		goto fail;
+	}
+
+	/* teardown the UL pipe */
+	ipa3_disable_data_path(ipa_ep_idx_ul);
+
+	tear->params.ipa_pipe_number = ipa_ep_idx_ul;
+	result = ipa3_uc_send_cmd((u32)(cmd.phys_base),
+				IPA_CPU_2_HW_CMD_OFFLOAD_TEAR_DOWN,
+				IPA_HW_2_CPU_OFFLOAD_CMD_STATUS_SUCCESS,
+				false, 10*HZ);
+	if (result) {
+		IPAERR("fail to tear down ul pipe\n");
+		result = -EFAULT;
+		goto fail;
+	}
+
+	/* unmap the UL pipe */
+	result = ipa3_smmu_map_uc_ntn_pipes(&params->ul, false);
+	if (result) {
+		IPAERR("failed to unmap SMMU for UL %d\n", result);
+		goto fail;
+	}
+
+	ipa3_delete_dflt_flt_rules(ipa_ep_idx_ul);
+	memset(&ipa3_ctx->ep[ipa_ep_idx_ul], 0, sizeof(struct ipa3_ep_context));
+	IPADBG("ul client (ep: %d) disconnected\n", ipa_ep_idx_ul);
+
+fail:
+	dma_free_coherent(ipa3_ctx->uc_pdev, cmd.size, cmd.base, cmd.phys_base);
+	IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
+	return result;
+}

+ 748 - 0
ipa/ipa_v3/ipa_uc_offload_i.h

@@ -0,0 +1,748 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2016-2019, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _IPA_UC_OFFLOAD_I_H_
+#define _IPA_UC_OFFLOAD_I_H_
+
+#include <linux/ipa.h>
+#include "ipa_i.h"
+
+/*
+ * Neutrino protocol related data structures
+ */
+
+#define IPA_UC_MAX_NTN_TX_CHANNELS 1
+#define IPA_UC_MAX_NTN_RX_CHANNELS 1
+
+#define IPA_NTN_TX_DIR 1
+#define IPA_NTN_RX_DIR 2
+
+#define MAX_CH_STATS_SUPPORTED 5
+#define DIR_CONSUMER 0
+#define DIR_PRODUCER 1
+
+#define MAX_AQC_CHANNELS 2
+#define MAX_11AD_CHANNELS 5
+#define MAX_WDI2_CHANNELS 2
+#define MAX_WDI3_CHANNELS 2
+#define MAX_MHIP_CHANNELS 4
+#define MAX_USB_CHANNELS 2
+
+#define BW_QUOTA_MONITORING_MAX_ADDR_OFFSET 8
+#define BW_MONITORING_MAX_THRESHOLD 3
+/**
+ *  @brief   Enum value determined based on the feature it
+ *           corresponds to
+ *  +----------------+----------------+
+ *  |    3 bits      |     5 bits     |
+ *  +----------------+----------------+
+ *  |   HW_FEATURE   |     OPCODE     |
+ *  +----------------+----------------+
+ *
+ */
+#define FEATURE_ENUM_VAL(feature, opcode) ((feature << 5) | opcode)
+#define EXTRACT_UC_FEATURE(value) (value >> 5)
+
+#define IPA_HW_NUM_FEATURES 0x8
+
+/**
+ * enum ipa3_hw_features - Values that represent the features supported
+ * in IPA HW
+ * @IPA_HW_FEATURE_COMMON : Feature related to common operation of IPA HW
+ * @IPA_HW_FEATURE_MHI : Feature related to MHI operation in IPA HW
+ * @IPA_HW_FEATURE_POWER_COLLAPSE: Feature related to IPA Power collapse
+ * @IPA_HW_FEATURE_WDI : Feature related to WDI operation in IPA HW
+ * @IPA_HW_FEATURE_NTN : Feature related to NTN operation in IPA HW
+ * @IPA_HW_FEATURE_OFFLOAD : Feature related to several protocols operation in
+ *				IPA HW. use protocol field to
+ *				 determine (e.g. IPA_HW_PROTOCOL_11ad).
+ */
+enum ipa3_hw_features {
+	IPA_HW_FEATURE_COMMON		=	0x0,
+	IPA_HW_FEATURE_MHI		=	0x1,
+	IPA_HW_FEATURE_POWER_COLLAPSE	=	0x2,
+	IPA_HW_FEATURE_WDI		=	0x3,
+	IPA_HW_FEATURE_ZIP		=	0x4,
+	IPA_HW_FEATURE_NTN		=	0x5,
+	IPA_HW_FEATURE_OFFLOAD		=	0x6,
+	IPA_HW_FEATURE_MAX		=	IPA_HW_NUM_FEATURES
+};
+
+/**
+ * enum ipa4_hw_protocol - Values that represent the protocols supported
+ * in IPA HW when using the IPA_HW_FEATURE_OFFLOAD feature.
+ * @IPA_HW_FEATURE_COMMON : protocol related to common operation of IPA HW
+ * @IPA_HW_PROTOCOL_AQC : protocol related to AQC operation in IPA HW
+ * @IPA_HW_PROTOCOL_11ad: protocol related to 11ad operation in IPA HW
+ * @IPA_HW_PROTOCOL_WDI : protocol related to WDI operation in IPA HW
+ * @IPA_HW_PROTOCOL_WDI3: protocol related to WDI3 operation in IPA HW
+ * @IPA_HW_PROTOCOL_ETH : protocol related to ETH operation in IPA HW
+ * @IPA_HW_PROTOCOL_MHIP: protocol related to MHIP operation in IPA HW
+ * @IPA_HW_PROTOCOL_USB : protocol related to USB operation in IPA HW
+ */
+enum ipa4_hw_protocol {
+	IPA_HW_PROTOCOL_COMMON = 0x0,
+	IPA_HW_PROTOCOL_AQC = 0x1,
+	IPA_HW_PROTOCOL_11ad = 0x2,
+	IPA_HW_PROTOCOL_WDI = 0x3,
+	IPA_HW_PROTOCOL_WDI3 = 0x4,
+	IPA_HW_PROTOCOL_ETH = 0x5,
+	IPA_HW_PROTOCOL_MHIP = 0x6,
+	IPA_HW_PROTOCOL_USB = 0x7,
+	IPA_HW_PROTOCOL_MAX
+};
+
+/**
+ * enum ipa3_hw_2_cpu_events - Values that represent HW event to be sent to CPU.
+ * @IPA_HW_2_CPU_EVENT_NO_OP : No event present
+ * @IPA_HW_2_CPU_EVENT_ERROR : Event specify a system error is detected by the
+ *  device
+ * @IPA_HW_2_CPU_EVENT_LOG_INFO : Event providing logging specific information
+ * @IPA_HW_2_CPU_POST_EVNT_RING_NOTIFICAITON : Event to notify APPS
+ */
+enum ipa3_hw_2_cpu_events {
+	IPA_HW_2_CPU_EVENT_NO_OP     =
+		FEATURE_ENUM_VAL(IPA_HW_FEATURE_COMMON, 0),
+	IPA_HW_2_CPU_EVENT_ERROR     =
+		FEATURE_ENUM_VAL(IPA_HW_FEATURE_COMMON, 1),
+	IPA_HW_2_CPU_EVENT_LOG_INFO  =
+		FEATURE_ENUM_VAL(IPA_HW_FEATURE_COMMON, 2),
+	IPA_HW_2_CPU_EVNT_RING_NOTIFY  =
+		FEATURE_ENUM_VAL(IPA_HW_FEATURE_COMMON, 3),
+};
+
+/**
+ * enum ipa3_hw_errors - Common error types.
+ * @IPA_HW_ERROR_NONE : No error persists
+ * @IPA_HW_INVALID_DOORBELL_ERROR : Invalid data read from doorbell
+ * @IPA_HW_DMA_ERROR : Unexpected DMA error
+ * @IPA_HW_FATAL_SYSTEM_ERROR : HW has crashed and requires reset.
+ * @IPA_HW_INVALID_OPCODE : Invalid opcode sent
+ * @IPA_HW_INVALID_PARAMS : Invalid params for the requested command
+ * @IPA_HW_GSI_CH_NOT_EMPTY_FAILURE : GSI channel emptiness validation failed
+ * @IPA_HW_CONS_STOP_FAILURE : NTN/ETH CONS stop failed
+ * @IPA_HW_PROD_STOP_FAILURE : NTN/ETH PROD stop failed
+ */
+enum ipa3_hw_errors {
+	IPA_HW_ERROR_NONE              =
+		FEATURE_ENUM_VAL(IPA_HW_FEATURE_COMMON, 0),
+	IPA_HW_INVALID_DOORBELL_ERROR  =
+		FEATURE_ENUM_VAL(IPA_HW_FEATURE_COMMON, 1),
+	IPA_HW_DMA_ERROR               =
+		FEATURE_ENUM_VAL(IPA_HW_FEATURE_COMMON, 2),
+	IPA_HW_FATAL_SYSTEM_ERROR      =
+		FEATURE_ENUM_VAL(IPA_HW_FEATURE_COMMON, 3),
+	IPA_HW_INVALID_OPCODE          =
+		FEATURE_ENUM_VAL(IPA_HW_FEATURE_COMMON, 4),
+	IPA_HW_INVALID_PARAMS        =
+		FEATURE_ENUM_VAL(IPA_HW_FEATURE_COMMON, 5),
+	IPA_HW_CONS_DISABLE_CMD_GSI_STOP_FAILURE =
+		FEATURE_ENUM_VAL(IPA_HW_FEATURE_COMMON, 6),
+	IPA_HW_PROD_DISABLE_CMD_GSI_STOP_FAILURE =
+		FEATURE_ENUM_VAL(IPA_HW_FEATURE_COMMON, 7),
+	IPA_HW_GSI_CH_NOT_EMPTY_FAILURE =
+		FEATURE_ENUM_VAL(IPA_HW_FEATURE_COMMON, 8),
+	IPA_HW_CONS_STOP_FAILURE =
+		FEATURE_ENUM_VAL(IPA_HW_FEATURE_COMMON, 9),
+	IPA_HW_PROD_STOP_FAILURE =
+		FEATURE_ENUM_VAL(IPA_HW_FEATURE_COMMON, 10)
+};
+
+/**
+ * struct IpaHwSharedMemCommonMapping_t - Structure referring to the common
+ * section in 128B shared memory located in offset zero of SW Partition in IPA
+ * SRAM.
+ * @cmdOp : CPU->HW command opcode. See IPA_CPU_2_HW_COMMANDS
+ * @cmdParams : CPU->HW command parameter lower 32bit.
+ * @cmdParams_hi : CPU->HW command parameter higher 32bit.
+ * of parameters (immediate parameters) and point on structure in system memory
+ * (in such case the address must be accessible for HW)
+ * @responseOp : HW->CPU response opcode. See IPA_HW_2_CPU_RESPONSES
+ * @responseParams : HW->CPU response parameter. The parameter filed can hold 32
+ * bits of parameters (immediate parameters) and point on structure in system
+ * memory
+ * @eventOp : HW->CPU event opcode. See IPA_HW_2_CPU_EVENTS
+ * @eventParams : HW->CPU event parameter. The parameter filed can hold 32
+ *		bits of parameters (immediate parameters) and point on
+ *		structure in system memory
+ * @firstErrorAddress : Contains the address of first error-source on SNOC
+ * @hwState : State of HW. The state carries information regarding the
+ *				error type.
+ * @warningCounter : The warnings counter. The counter carries information
+ *						regarding non fatal errors in HW
+ * @interfaceVersionCommon : The Common interface version as reported by HW
+ * @responseParams_1: offset addr for uC stats
+ *
+ * The shared memory is used for communication between IPA HW and CPU.
+ */
+struct IpaHwSharedMemCommonMapping_t {
+	u8  cmdOp;
+	u8  reserved_01;
+	u16 reserved_03_02;
+	u32 cmdParams;
+	u32 cmdParams_hi;
+	u8  responseOp;
+	u8  reserved_0D;
+	u16 reserved_0F_0E;
+	u32 responseParams;
+	u8  eventOp;
+	u8  reserved_15;
+	u16 reserved_17_16;
+	u32 eventParams;
+	u32 firstErrorAddress;
+	u8  hwState;
+	u8  warningCounter;
+	u16 reserved_23_22;
+	u16 interfaceVersionCommon;
+	u16 reserved_27_26;
+	u32 responseParams_1;
+} __packed;
+
+/**
+ * union Ipa3HwFeatureInfoData_t - parameters for stats/config blob
+ *
+ * @offset : Location of a feature within the EventInfoData
+ * @size : Size of the feature
+ */
+union Ipa3HwFeatureInfoData_t {
+	struct IpaHwFeatureInfoParams_t {
+		u32 offset:16;
+		u32 size:16;
+	} __packed params;
+	u32 raw32b;
+} __packed;
+
+/**
+ * union IpaHwErrorEventData_t - HW->CPU Common Events
+ * @errorType : Entered when a system error is detected by the HW. Type of
+ * error is specified by IPA_HW_ERRORS
+ * @reserved : Reserved
+ */
+union IpaHwErrorEventData_t {
+	struct IpaHwErrorEventParams_t {
+		u32 errorType:8;
+		u32 reserved:24;
+	} __packed params;
+	u32 raw32b;
+} __packed;
+
+/**
+ * struct Ipa3HwEventInfoData_t - Structure holding the parameters for
+ * statistics and config info
+ *
+ * @baseAddrOffset : Base Address Offset of the statistics or config
+ * structure from IPA_WRAPPER_BASE
+ * @Ipa3HwFeatureInfoData_t : Location and size of each feature within
+ * the statistics or config structure
+ *
+ * @note    Information about each feature in the featureInfo[]
+ * array is populated at predefined indices per the IPA_HW_FEATURES
+ * enum definition
+ */
+struct Ipa3HwEventInfoData_t {
+	u32 baseAddrOffset;
+	union Ipa3HwFeatureInfoData_t featureInfo[IPA_HW_NUM_FEATURES];
+} __packed;
+
+/**
+ * struct IpaHwEventLogInfoData_t - Structure holding the parameters for
+ * IPA_HW_2_CPU_EVENT_LOG_INFO Event
+ *
+ * @protocolMask : Mask indicating the protocols enabled in HW.
+ * Refer IPA_HW_FEATURE_MASK
+ * @circBuffBaseAddrOffset : Base Address Offset of the Circular Event
+ * Log Buffer structure
+ * @statsInfo : Statistics related information
+ * @configInfo : Configuration related information
+ *
+ * @note    The offset location of this structure from IPA_WRAPPER_BASE
+ * will be provided as Event Params for the IPA_HW_2_CPU_EVENT_LOG_INFO
+ * Event
+ */
+struct IpaHwEventLogInfoData_t {
+	u32 protocolMask;
+	u32 circBuffBaseAddrOffset;
+	struct Ipa3HwEventInfoData_t statsInfo;
+	struct Ipa3HwEventInfoData_t configInfo;
+
+} __packed;
+
+/**
+ * struct ipa3_uc_ntn_ctx
+ * @ntn_uc_stats_ofst: Neutrino stats offset
+ * @ntn_uc_stats_mmio: Neutrino stats
+ * @priv: private data of client
+ * @uc_ready_cb: uc Ready cb
+ */
+struct ipa3_uc_ntn_ctx {
+	u32 ntn_uc_stats_ofst;
+	struct Ipa3HwStatsNTNInfoData_t *ntn_uc_stats_mmio;
+	void *priv;
+	ipa_uc_ready_cb uc_ready_cb;
+};
+
+/**
+ * enum ipa3_hw_ntn_channel_states - Values that represent NTN
+ * channel state machine.
+ * @IPA_HW_NTN_CHANNEL_STATE_INITED_DISABLED : Channel is
+ *			initialized but disabled
+ * @IPA_HW_NTN_CHANNEL_STATE_RUNNING : Channel is running.
+ *     Entered after SET_UP_COMMAND is processed successfully
+ * @IPA_HW_NTN_CHANNEL_STATE_ERROR : Channel is in error state
+ * @IPA_HW_NTN_CHANNEL_STATE_INVALID : Invalid state. Shall not
+ * be in use in operational scenario
+ *
+ * These states apply to both Tx and Rx paths. These do not reflect the
+ * sub-state the state machine may be in.
+ */
+enum ipa3_hw_ntn_channel_states {
+	IPA_HW_NTN_CHANNEL_STATE_INITED_DISABLED = 1,
+	IPA_HW_NTN_CHANNEL_STATE_RUNNING  = 2,
+	IPA_HW_NTN_CHANNEL_STATE_ERROR    = 3,
+	IPA_HW_NTN_CHANNEL_STATE_INVALID  = 0xFF
+};
+
+/**
+ * enum ipa3_hw_ntn_channel_errors - List of NTN Channel error
+ * types. This is present in the event param
+ * @IPA_HW_NTN_CH_ERR_NONE: No error persists
+ * @IPA_HW_NTN_TX_FSM_ERROR: Error in the state machine
+ *		transition
+ * @IPA_HW_NTN_TX_COMP_RE_FETCH_FAIL: Error while calculating
+ *		num RE to bring
+ * @IPA_HW_NTN_RX_RING_WP_UPDATE_FAIL: Write pointer update
+ *		failed in Rx ring
+ * @IPA_HW_NTN_RX_FSM_ERROR: Error in the state machine
+ *		transition
+ * @IPA_HW_NTN_RX_CACHE_NON_EMPTY:
+ * @IPA_HW_NTN_CH_ERR_RESERVED:
+ *
+ * These states apply to both Tx and Rx paths. These do not
+ * reflect the sub-state the state machine may be in.
+ */
+enum ipa3_hw_ntn_channel_errors {
+	IPA_HW_NTN_CH_ERR_NONE            = 0,
+	IPA_HW_NTN_TX_RING_WP_UPDATE_FAIL = 1,
+	IPA_HW_NTN_TX_FSM_ERROR           = 2,
+	IPA_HW_NTN_TX_COMP_RE_FETCH_FAIL  = 3,
+	IPA_HW_NTN_RX_RING_WP_UPDATE_FAIL = 4,
+	IPA_HW_NTN_RX_FSM_ERROR           = 5,
+	IPA_HW_NTN_RX_CACHE_NON_EMPTY     = 6,
+	IPA_HW_NTN_CH_ERR_RESERVED        = 0xFF
+};
+
+
+/**
+ * struct Ipa3HwNtnSetUpCmdData_t  - Ntn setup command data
+ * @ring_base_pa: physical address of the base of the Tx/Rx NTN
+ *  ring
+ * @buff_pool_base_pa: physical address of the base of the Tx/Rx
+ *  buffer pool
+ * @ntn_ring_size: size of the Tx/Rx NTN ring
+ * @num_buffers: Rx/tx buffer pool size
+ * @ntn_reg_base_ptr_pa: physical address of the Tx/Rx NTN
+ *  Ring's tail pointer
+ * @ipa_pipe_number: IPA pipe number that has to be used for the
+ *  Tx/Rx path
+ * @dir: Tx/Rx Direction
+ * @data_buff_size: size of the each data buffer allocated in
+ *  DDR
+ */
+struct Ipa3HwNtnSetUpCmdData_t {
+	u32 ring_base_pa;
+	u32 buff_pool_base_pa;
+	u16 ntn_ring_size;
+	u16 num_buffers;
+	u32 ntn_reg_base_ptr_pa;
+	u8  ipa_pipe_number;
+	u8  dir;
+	u16 data_buff_size;
+
+} __packed;
+
+/**
+ * struct Ipa3HwNtnCommonChCmdData_t - Structure holding the
+ * parameters for Ntn Tear down command data params
+ *
+ *@ipa_pipe_number: IPA pipe number. This could be Tx or an Rx pipe
+ */
+union Ipa3HwNtnCommonChCmdData_t {
+	struct IpaHwNtnCommonChCmdParams_t {
+		u32  ipa_pipe_number :8;
+		u32  reserved        :24;
+	} __packed params;
+	uint32_t raw32b;
+} __packed;
+
+/**
+ * struct NTN3RxInfoData_t - NTN Structure holding the Rx pipe
+ * information
+ *
+ *@num_pkts_processed: Number of packets processed - cumulative
+ *
+ *@ring_stats:
+ *@gsi_stats:
+ *@num_db: Number of times the doorbell was rung
+ *@num_qmb_int_handled: Number of QMB interrupts handled
+ *@ipa_pipe_number: The IPA Rx/Tx pipe number.
+ */
+struct NTN3RxInfoData_t {
+	u32  num_pkts_processed;
+	struct IpaHwRingStats_t ring_stats;
+	struct IpaHwBamStats_t gsi_stats;
+	u32 num_db;
+	u32 num_qmb_int_handled;
+	u32 ipa_pipe_number;
+} __packed;
+
+
+/**
+ * struct NTN3TxInfoData_t - Structure holding the NTN Tx channel
+ * Ensure that this is always word aligned
+ *
+ *@num_pkts_processed: Number of packets processed - cumulative
+ *@tail_ptr_val: Latest value of doorbell written to copy engine
+ *@num_db_fired: Number of DB from uC FW to Copy engine
+ *
+ *@tx_comp_ring_stats:
+ *@bam_stats:
+ *@num_db: Number of times the doorbell was rung
+ *@num_qmb_int_handled: Number of QMB interrupts handled
+ */
+struct NTN3TxInfoData_t {
+	u32  num_pkts_processed;
+	struct IpaHwRingStats_t ring_stats;
+	struct IpaHwBamStats_t gsi_stats;
+	u32 num_db;
+	u32 num_qmb_int_handled;
+	u32 ipa_pipe_number;
+} __packed;
+
+
+/**
+ * struct Ipa3HwStatsNTNInfoData_t - Structure holding the NTN Tx
+ * channel Ensure that this is always word aligned
+ *
+ */
+struct Ipa3HwStatsNTNInfoData_t {
+	struct NTN3RxInfoData_t rx_ch_stats[IPA_UC_MAX_NTN_RX_CHANNELS];
+	struct NTN3TxInfoData_t tx_ch_stats[IPA_UC_MAX_NTN_TX_CHANNELS];
+} __packed;
+
+
+/*
+ * uC offload related data structures
+ */
+#define IPA_UC_OFFLOAD_CONNECTED BIT(0)
+#define IPA_UC_OFFLOAD_ENABLED BIT(1)
+#define IPA_UC_OFFLOAD_RESUMED BIT(2)
+
+/**
+ * enum ipa_cpu_2_hw_offload_commands -  Values that represent
+ * the offload commands from CPU
+ * @IPA_CPU_2_HW_CMD_OFFLOAD_CHANNEL_SET_UP : Command to set up
+ * Offload protocol's Tx/Rx Path
+ * @IPA_CPU_2_HW_CMD_OFFLOAD_TEAR_DOWN : Command to tear down
+ * Offload protocol's Tx/ Rx Path
+ * @IPA_CPU_2_HW_CMD_PERIPHERAL_INIT :Command to initialize peripheral
+ * @IPA_CPU_2_HW_CMD_PERIPHERAL_DEINIT : Command to deinitialize peripheral
+ * @IPA_CPU_2_HW_CMD_OFFLOAD_STATS_ALLOC: Command to start the
+ * uC stats calculation for a particular protocol
+ * @IPA_CPU_2_HW_CMD_OFFLOAD_STATS_DEALLOC: Command to stop the
+ * uC stats calculation for a particular protocol
+ * @IPA_CPU_2_HW_CMD_QUOTA_MONITORING : Command to start the Quota monitoring
+ * @IPA_CPU_2_HW_CMD_BW_MONITORING : Command to start the BW monitoring
+ */
+enum ipa_cpu_2_hw_offload_commands {
+	IPA_CPU_2_HW_CMD_OFFLOAD_CHANNEL_SET_UP  =
+		FEATURE_ENUM_VAL(IPA_HW_FEATURE_OFFLOAD, 1),
+	IPA_CPU_2_HW_CMD_OFFLOAD_TEAR_DOWN =
+		FEATURE_ENUM_VAL(IPA_HW_FEATURE_OFFLOAD, 2),
+	IPA_CPU_2_HW_CMD_PERIPHERAL_INIT =
+		FEATURE_ENUM_VAL(IPA_HW_FEATURE_OFFLOAD, 3),
+	IPA_CPU_2_HW_CMD_PERIPHERAL_DEINIT =
+		FEATURE_ENUM_VAL(IPA_HW_FEATURE_OFFLOAD, 4),
+	IPA_CPU_2_HW_CMD_OFFLOAD_STATS_ALLOC =
+		FEATURE_ENUM_VAL(IPA_HW_FEATURE_OFFLOAD, 5),
+	IPA_CPU_2_HW_CMD_OFFLOAD_STATS_DEALLOC =
+		FEATURE_ENUM_VAL(IPA_HW_FEATURE_OFFLOAD, 6),
+	IPA_CPU_2_HW_CMD_QUOTA_MONITORING =
+		FEATURE_ENUM_VAL(IPA_HW_FEATURE_OFFLOAD, 7),
+	IPA_CPU_2_HW_CMD_BW_MONITORING =
+		FEATURE_ENUM_VAL(IPA_HW_FEATURE_OFFLOAD, 8),
+};
+
+/**
+ * struct IpaHwOffloadStatsDeAllocCmdData_t - protocol info for
+ * uC stats stop
+ * @protocol: Enum that indicates the protocol type
+ */
+struct IpaHwOffloadStatsDeAllocCmdData_t {
+	uint32_t protocol;
+} __packed;
+
+/**
+ * enum ipa3_hw_offload_channel_states - Values that represent
+ * offload channel state machine.
+ * @IPA_HW_OFFLOAD_CHANNEL_STATE_INITED_DISABLED : Channel is
+ *			initialized but disabled
+ * @IPA_HW_OFFLOAD_CHANNEL_STATE_RUNNING : Channel is running.
+ *			Entered after SET_UP_COMMAND is processed successfully
+ * @IPA_HW_OFFLOAD_CHANNEL_STATE_ERROR : Channel is in error state
+ * @IPA_HW_OFFLOAD_CHANNEL_STATE_INVALID : Invalid state. Shall not
+ *				be in use in operational scenario
+ *
+ * These states apply to both Tx and Rx paths. These do not
+ * reflect the sub-state the state machine may be in
+ */
+enum ipa3_hw_offload_channel_states {
+	IPA_HW_OFFLOAD_CHANNEL_STATE_INITED_DISABLED = 1,
+	IPA_HW_OFFLOAD_CHANNEL_STATE_RUNNING  = 2,
+	IPA_HW_OFFLOAD_CHANNEL_STATE_ERROR    = 3,
+	IPA_HW_OFFLOAD_CHANNEL_STATE_INVALID  = 0xFF
+};
+
+
+/**
+ * enum ipa3_hw_2_cpu_cmd_resp_status -  Values that represent
+ * offload related command response status to be sent to CPU.
+ */
+enum ipa3_hw_2_cpu_offload_cmd_resp_status {
+	IPA_HW_2_CPU_OFFLOAD_CMD_STATUS_SUCCESS  =
+		FEATURE_ENUM_VAL(IPA_HW_FEATURE_OFFLOAD, 0),
+	IPA_HW_2_CPU_OFFLOAD_MAX_TX_CHANNELS  =
+		FEATURE_ENUM_VAL(IPA_HW_FEATURE_OFFLOAD, 1),
+	IPA_HW_2_CPU_OFFLOAD_TX_RING_OVERRUN_POSSIBILITY  =
+		FEATURE_ENUM_VAL(IPA_HW_FEATURE_OFFLOAD, 2),
+	IPA_HW_2_CPU_OFFLOAD_TX_RING_SET_UP_FAILURE  =
+		FEATURE_ENUM_VAL(IPA_HW_FEATURE_OFFLOAD, 3),
+	IPA_HW_2_CPU_OFFLOAD_TX_RING_PARAMS_UNALIGNED  =
+		FEATURE_ENUM_VAL(IPA_HW_FEATURE_OFFLOAD, 4),
+	IPA_HW_2_CPU_OFFLOAD_UNKNOWN_TX_CHANNEL  =
+		FEATURE_ENUM_VAL(IPA_HW_FEATURE_OFFLOAD, 5),
+	IPA_HW_2_CPU_OFFLOAD_TX_INVALID_FSM_TRANSITION  =
+		FEATURE_ENUM_VAL(IPA_HW_FEATURE_OFFLOAD, 6),
+	IPA_HW_2_CPU_OFFLOAD_TX_FSM_TRANSITION_ERROR  =
+		FEATURE_ENUM_VAL(IPA_HW_FEATURE_OFFLOAD, 7),
+	IPA_HW_2_CPU_OFFLOAD_MAX_RX_CHANNELS  =
+		FEATURE_ENUM_VAL(IPA_HW_FEATURE_OFFLOAD, 8),
+	IPA_HW_2_CPU_OFFLOAD_RX_RING_PARAMS_UNALIGNED  =
+		FEATURE_ENUM_VAL(IPA_HW_FEATURE_OFFLOAD, 9),
+	IPA_HW_2_CPU_OFFLOAD_RX_RING_SET_UP_FAILURE  =
+		FEATURE_ENUM_VAL(IPA_HW_FEATURE_OFFLOAD, 10),
+	IPA_HW_2_CPU_OFFLOAD_UNKNOWN_RX_CHANNEL  =
+		FEATURE_ENUM_VAL(IPA_HW_FEATURE_OFFLOAD, 11),
+	IPA_HW_2_CPU_OFFLOAD_RX_INVALID_FSM_TRANSITION  =
+		FEATURE_ENUM_VAL(IPA_HW_FEATURE_OFFLOAD, 12),
+	IPA_HW_2_CPU_OFFLOAD_RX_FSM_TRANSITION_ERROR  =
+		FEATURE_ENUM_VAL(IPA_HW_FEATURE_OFFLOAD, 13),
+	IPA_HW_2_CPU_OFFLOAD_RX_RING_OVERRUN_POSSIBILITY  =
+		FEATURE_ENUM_VAL(IPA_HW_FEATURE_OFFLOAD, 14),
+};
+
+/**
+ * struct IpaHw11adSetupCmdData_t  - 11ad setup channel command data
+ * @dir: Direction RX/TX
+ * @wifi_ch: 11ad peripheral pipe number
+ * @gsi_ch: GSI Channel number
+ * @reserved: 8 bytes padding
+ * @wifi_hp_addr_lsb: Head/Tail pointer absolute address
+ * @wifi_hp_addr_msb: Head/Tail pointer absolute address
+ */
+struct IpaHw11adSetupCmdData_t {
+	u8 dir;
+	u8 wifi_ch;
+	u8 gsi_ch;
+	u8 reserved;
+	u32 wifi_hp_addr_lsb;
+	u32 wifi_hp_addr_msb;
+} __packed;
+
+
+/**
+ * struct IpaHw11adCommonChCmdData_t - 11ad tear down channel command data
+ * @gsi_ch: GSI Channel number
+ * @reserved_0: padding
+ * @reserved_1: padding
+ */
+struct IpaHw11adCommonChCmdData_t {
+	u8 gsi_ch;
+	u8 reserved_0;
+	u16 reserved_1;
+} __packed;
+
+/**
+ * struct IpaHw11adInitCmdData_t - 11ad peripheral init command data
+ * @periph_baddr_lsb: Peripheral Base Address LSB (pa/IOVA)
+ * @periph_baddr_msb: Peripheral Base Address MSB (pa/IOVA)
+ */
+struct IpaHw11adInitCmdData_t {
+	u32 periph_baddr_lsb;
+	u32 periph_baddr_msb;
+} __packed;
+
+/**
+ * struct IpaHw11adDeinitCmdData_t - 11ad peripheral deinit command data
+ * @reserved: Reserved for future
+ */
+struct IpaHw11adDeinitCmdData_t {
+	u32 reserved;
+};
+
+/**
+ * struct IpaHwSetUpCmd  - Structure holding the parameters
+ * for IPA_CPU_2_HW_CMD_OFFLOAD_CHANNEL_SET_UP
+ *
+ *
+ */
+union IpaHwSetUpCmd {
+	struct Ipa3HwNtnSetUpCmdData_t NtnSetupCh_params;
+	struct IpaHw11adSetupCmdData_t	W11AdSetupCh_params;
+} __packed;
+
+struct IpaHwOffloadSetUpCmdData_t {
+	u8 protocol;
+	union IpaHwSetUpCmd SetupCh_params;
+} __packed;
+
+struct IpaCommonMonitoringParams_t {
+	/* max 8 */
+	uint8_t  Num;
+	/* Sampling interval in ms */
+	uint8_t  Interval;
+	uint16_t Offset[BW_QUOTA_MONITORING_MAX_ADDR_OFFSET];
+} __packed; // 18 bytes
+
+struct IpaWdiQuotaMonitoringParams_t {
+	uint64_t Quota;
+	struct IpaCommonMonitoringParams_t info;
+} __packed;
+
+struct IpaWdiBwMonitoringParams_t {
+	uint64_t BwThreshold[BW_MONITORING_MAX_THRESHOLD];
+	struct IpaCommonMonitoringParams_t info;
+	uint8_t NumThresh;
+	/*Variable to Start Stop Bw Monitoring*/
+	uint8_t Stop;
+} __packed;
+
+union IpaQuotaMonitoringParams_t {
+	struct IpaWdiQuotaMonitoringParams_t WdiQM;
+} __packed;
+
+union IpaBwMonitoringParams_t {
+	struct IpaWdiBwMonitoringParams_t WdiBw;
+} __packed;
+
+struct IpaQuotaMonitoring_t {
+	/* indicates below union needs to be interpreted */
+	uint32_t protocol;
+	union IpaQuotaMonitoringParams_t  params;
+} __packed;
+
+struct IpaBwMonitoring_t {
+	/* indicates below union needs to be interpreted */
+	uint32_t protocol;
+	union IpaBwMonitoringParams_t   params;
+} __packed;
+
+
+struct IpaHwOffloadSetUpCmdData_t_v4_0 {
+	u32 protocol;
+	union IpaHwSetUpCmd SetupCh_params;
+} __packed;
+
+/**
+ * struct IpaHwCommonChCmd  - Structure holding the parameters
+ * for IPA_CPU_2_HW_CMD_OFFLOAD_TEAR_DOWN
+ *
+ *
+ */
+union IpaHwCommonChCmd {
+	union Ipa3HwNtnCommonChCmdData_t NtnCommonCh_params;
+	struct IpaHw11adCommonChCmdData_t W11AdCommonCh_params;
+} __packed;
+
+struct IpaHwOffloadCommonChCmdData_t {
+	u8 protocol;
+	union IpaHwCommonChCmd CommonCh_params;
+} __packed;
+
+enum EVENT_2_CPU_OPCODE {
+	BW_NOTIFY = 0x0,
+	QUOTA_NOTIFY = 0x1,
+};
+
+struct EventStructureBwMonitoring_t {
+	uint32_t ThresholdIndex;
+	uint64_t throughput;
+} __packed;
+
+struct EventStructureQuotaMonitoring_t {
+	/* indicate threshold has reached */
+	uint32_t ThreasholdReached;
+	uint64_t usage;
+} __packed;
+
+union EventParamFormat_t {
+	struct EventStructureBwMonitoring_t bw_param;
+	struct EventStructureQuotaMonitoring_t quota_param;
+} __packed;
+
+/* EVT RING STRUCTURE
+ *	|	Word|	bit	|	Field	|
+ *	-----------------------------
+ *	|	0	|0	-	8|	Protocol|
+ *	|		|8	-	16|	Reserved0|
+ *	|		|16	-	24|	Opcode	|
+ *	|		|24	-	31|	Reserved1|
+ *	|	1	|0	-	31|	Word1	|
+ *	|	2	|0	-	31|	Word2	|
+ *	|	3	|0	-	31|	Word3	|
+ */
+struct eventElement_t {
+	uint8_t Protocol;
+	uint8_t Reserved0;
+	uint8_t Opcode;
+	uint8_t Reserved1;
+	union EventParamFormat_t Value;
+} __packed;
+
+struct IpaHwOffloadCommonChCmdData_t_v4_0 {
+	u32 protocol;
+	union IpaHwCommonChCmd CommonCh_params;
+} __packed;
+
+
+/**
+ * union IpaHwPeripheralInitCmd - Structure holding the parameters
+ * for IPA_CPU_2_HW_CMD_PERIPHERAL_INIT
+ *
+ */
+union IpaHwPeripheralInitCmd {
+	struct IpaHw11adInitCmdData_t W11AdInit_params;
+} __packed;
+
+struct IpaHwPeripheralInitCmdData_t {
+	u32 protocol;
+	union IpaHwPeripheralInitCmd Init_params;
+} __packed;
+
+/**
+ * union IpaHwPeripheralDeinitCmd - Structure holding the parameters
+ * for IPA_CPU_2_HW_CMD_PERIPHERAL_DEINIT
+ *
+ */
+union IpaHwPeripheralDeinitCmd {
+	struct IpaHw11adDeinitCmdData_t W11AdDeinit_params;
+} __packed;
+
+struct IpaHwPeripheralDeinitCmdData_t {
+	u32 protocol;
+	union IpaHwPeripheralDeinitCmd PeripheralDeinit_params;
+
+} __packed;
+
+#endif /* _IPA_UC_OFFLOAD_I_H_ */

+ 3092 - 0
ipa/ipa_v3/ipa_uc_wdi.c

@@ -0,0 +1,3092 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2012-2019, The Linux Foundation. All rights reserved.
+ */
+
+#include "ipa_i.h"
+#include <linux/dmapool.h>
+#include <linux/delay.h>
+#include <linux/mm.h>
+#include "ipa_qmi_service.h"
+
+#define IPA_HOLB_TMR_DIS 0x0
+
+#define IPA_HW_INTERFACE_WDI_VERSION 0x0001
+#define IPA_HW_WDI_RX_MBOX_START_INDEX 48
+#define IPA_HW_WDI_TX_MBOX_START_INDEX 50
+#define IPA_WDI_RING_ALIGNMENT 8
+
+#define IPA_GSI_EVT_RING_INT_MODT (32 * 1) /* 1ms under 32KHz clock */
+
+#define IPA_AGGR_PKT_LIMIT 1
+#define IPA_AGGR_HARD_BYTE_LIMIT 2 /*2 Kbytes Agger hard byte limit*/
+#define UPDATE_RI_MODERATION_THRESHOLD		8
+
+
+#define IPA_WDI_CONNECTED BIT(0)
+#define IPA_WDI_ENABLED BIT(1)
+#define IPA_WDI_RESUMED BIT(2)
+#define IPA_UC_POLL_SLEEP_USEC 100
+
+#define GSI_STOP_MAX_RETRY_CNT 10
+
+struct ipa_wdi_res {
+	struct ipa_wdi_buffer_info *res;
+	unsigned int nents;
+	bool valid;
+};
+
+static struct ipa_wdi_res wdi_res[IPA_WDI_MAX_RES];
+
+static void ipa3_uc_wdi_loaded_handler(void);
+
+/**
+ * enum ipa_hw_2_cpu_wdi_events - Values that represent HW event to be sent to
+ * CPU.
+ * @IPA_HW_2_CPU_EVENT_WDI_ERROR : Event to specify that HW detected an error
+ * in WDI
+ */
+enum ipa_hw_2_cpu_wdi_events {
+	IPA_HW_2_CPU_EVENT_WDI_ERROR =
+		FEATURE_ENUM_VAL(IPA_HW_FEATURE_WDI, 0),
+};
+
+/**
+ * enum ipa_hw_wdi_channel_states - Values that represent WDI channel state
+ * machine.
+ * @IPA_HW_WDI_CHANNEL_STATE_INITED_DISABLED : Channel is initialized but
+ * disabled
+ * @IPA_HW_WDI_CHANNEL_STATE_ENABLED_SUSPEND : Channel is enabled but in
+ * suspended state
+ * @IPA_HW_WDI_CHANNEL_STATE_RUNNING : Channel is running. Entered after
+ * SET_UP_COMMAND is processed successfully
+ * @IPA_HW_WDI_CHANNEL_STATE_ERROR : Channel is in error state
+ * @IPA_HW_WDI_CHANNEL_STATE_INVALID : Invalid state. Shall not be in use in
+ * operational scenario
+ *
+ * These states apply to both Tx and Rx paths. These do not reflect the
+ * sub-state the state machine may be in.
+ */
+enum ipa_hw_wdi_channel_states {
+	IPA_HW_WDI_CHANNEL_STATE_INITED_DISABLED = 1,
+	IPA_HW_WDI_CHANNEL_STATE_ENABLED_SUSPEND = 2,
+	IPA_HW_WDI_CHANNEL_STATE_RUNNING         = 3,
+	IPA_HW_WDI_CHANNEL_STATE_ERROR           = 4,
+	IPA_HW_WDI_CHANNEL_STATE_INVALID         = 0xFF
+};
+
+/**
+ * enum ipa3_cpu_2_hw_commands -  Values that represent the WDI commands from
+ * CPU
+ * @IPA_CPU_2_HW_CMD_WDI_TX_SET_UP : Command to set up WDI Tx Path
+ * @IPA_CPU_2_HW_CMD_WDI_RX_SET_UP : Command to set up WDI Rx Path
+ * @IPA_CPU_2_HW_CMD_WDI_RX_EXT_CFG : Provide extended config info for Rx path
+ * @IPA_CPU_2_HW_CMD_WDI_CH_ENABLE : Command to enable a channel
+ * @IPA_CPU_2_HW_CMD_WDI_CH_DISABLE : Command to disable a channel
+ * @IPA_CPU_2_HW_CMD_WDI_CH_SUSPEND : Command to suspend a channel
+ * @IPA_CPU_2_HW_CMD_WDI_CH_RESUME : Command to resume a channel
+ * @IPA_CPU_2_HW_CMD_WDI_TEAR_DOWN : Command to tear down WDI Tx/ Rx Path
+ */
+enum ipa_cpu_2_hw_wdi_commands {
+	IPA_CPU_2_HW_CMD_WDI_TX_SET_UP  =
+		FEATURE_ENUM_VAL(IPA_HW_FEATURE_WDI, 0),
+	IPA_CPU_2_HW_CMD_WDI_RX_SET_UP  =
+		FEATURE_ENUM_VAL(IPA_HW_FEATURE_WDI, 1),
+	IPA_CPU_2_HW_CMD_WDI_RX_EXT_CFG =
+		FEATURE_ENUM_VAL(IPA_HW_FEATURE_WDI, 2),
+	IPA_CPU_2_HW_CMD_WDI_CH_ENABLE  =
+		FEATURE_ENUM_VAL(IPA_HW_FEATURE_WDI, 3),
+	IPA_CPU_2_HW_CMD_WDI_CH_DISABLE =
+		FEATURE_ENUM_VAL(IPA_HW_FEATURE_WDI, 4),
+	IPA_CPU_2_HW_CMD_WDI_CH_SUSPEND =
+		FEATURE_ENUM_VAL(IPA_HW_FEATURE_WDI, 5),
+	IPA_CPU_2_HW_CMD_WDI_CH_RESUME  =
+		FEATURE_ENUM_VAL(IPA_HW_FEATURE_WDI, 6),
+	IPA_CPU_2_HW_CMD_WDI_TEAR_DOWN  =
+		FEATURE_ENUM_VAL(IPA_HW_FEATURE_WDI, 7),
+};
+
+/**
+ * enum ipa_hw_2_cpu_cmd_resp_status -  Values that represent WDI related
+ * command response status to be sent to CPU.
+ */
+enum ipa_hw_2_cpu_cmd_resp_status {
+	IPA_HW_2_CPU_WDI_CMD_STATUS_SUCCESS            =
+		FEATURE_ENUM_VAL(IPA_HW_FEATURE_WDI, 0),
+	IPA_HW_2_CPU_MAX_WDI_TX_CHANNELS               =
+		FEATURE_ENUM_VAL(IPA_HW_FEATURE_WDI, 1),
+	IPA_HW_2_CPU_WDI_CE_RING_OVERRUN_POSSIBILITY   =
+		FEATURE_ENUM_VAL(IPA_HW_FEATURE_WDI, 2),
+	IPA_HW_2_CPU_WDI_CE_RING_SET_UP_FAILURE        =
+		FEATURE_ENUM_VAL(IPA_HW_FEATURE_WDI, 3),
+	IPA_HW_2_CPU_WDI_CE_RING_PARAMS_UNALIGNED      =
+		FEATURE_ENUM_VAL(IPA_HW_FEATURE_WDI, 4),
+	IPA_HW_2_CPU_WDI_COMP_RING_OVERRUN_POSSIBILITY =
+		FEATURE_ENUM_VAL(IPA_HW_FEATURE_WDI, 5),
+	IPA_HW_2_CPU_WDI_COMP_RING_SET_UP_FAILURE      =
+		FEATURE_ENUM_VAL(IPA_HW_FEATURE_WDI, 6),
+	IPA_HW_2_CPU_WDI_COMP_RING_PARAMS_UNALIGNED    =
+		FEATURE_ENUM_VAL(IPA_HW_FEATURE_WDI, 7),
+	IPA_HW_2_CPU_WDI_UNKNOWN_TX_CHANNEL            =
+		FEATURE_ENUM_VAL(IPA_HW_FEATURE_WDI, 8),
+	IPA_HW_2_CPU_WDI_TX_INVALID_FSM_TRANSITION     =
+		FEATURE_ENUM_VAL(IPA_HW_FEATURE_WDI, 9),
+	IPA_HW_2_CPU_WDI_TX_FSM_TRANSITION_ERROR       =
+		FEATURE_ENUM_VAL(IPA_HW_FEATURE_WDI, 10),
+	IPA_HW_2_CPU_MAX_WDI_RX_CHANNELS               =
+		FEATURE_ENUM_VAL(IPA_HW_FEATURE_WDI, 11),
+	IPA_HW_2_CPU_WDI_RX_RING_PARAMS_UNALIGNED      =
+		FEATURE_ENUM_VAL(IPA_HW_FEATURE_WDI, 12),
+	IPA_HW_2_CPU_WDI_RX_RING_SET_UP_FAILURE        =
+		FEATURE_ENUM_VAL(IPA_HW_FEATURE_WDI, 13),
+	IPA_HW_2_CPU_WDI_UNKNOWN_RX_CHANNEL            =
+		FEATURE_ENUM_VAL(IPA_HW_FEATURE_WDI, 14),
+	IPA_HW_2_CPU_WDI_RX_INVALID_FSM_TRANSITION     =
+		FEATURE_ENUM_VAL(IPA_HW_FEATURE_WDI, 15),
+	IPA_HW_2_CPU_WDI_RX_FSM_TRANSITION_ERROR       =
+		FEATURE_ENUM_VAL(IPA_HW_FEATURE_WDI, 16),
+};
+
+/**
+ * enum ipa_hw_wdi_errors - WDI specific error types.
+ * @IPA_HW_WDI_ERROR_NONE : No error persists
+ * @IPA_HW_WDI_CHANNEL_ERROR : Error is specific to channel
+ */
+enum ipa_hw_wdi_errors {
+	IPA_HW_WDI_ERROR_NONE    = 0,
+	IPA_HW_WDI_CHANNEL_ERROR = 1
+};
+
+/**
+ * enum ipa_hw_wdi_ch_errors = List of WDI Channel error types. This is present
+ * in the event param.
+ * @IPA_HW_WDI_CH_ERR_NONE : No error persists
+ * @IPA_HW_WDI_TX_COMP_RING_WP_UPDATE_FAIL : Write pointer update failed in Tx
+ * Completion ring
+ * @IPA_HW_WDI_TX_FSM_ERROR : Error in the state machine transition
+ * @IPA_HW_WDI_TX_COMP_RE_FETCH_FAIL : Error while calculating num RE to bring
+ * @IPA_HW_WDI_CH_ERR_RESERVED : Reserved - Not available for CPU to use
+ */
+enum ipa_hw_wdi_ch_errors {
+	IPA_HW_WDI_CH_ERR_NONE                 = 0,
+	IPA_HW_WDI_TX_COMP_RING_WP_UPDATE_FAIL = 1,
+	IPA_HW_WDI_TX_FSM_ERROR                = 2,
+	IPA_HW_WDI_TX_COMP_RE_FETCH_FAIL       = 3,
+	IPA_HW_WDI_CH_ERR_RESERVED             = 0xFF
+};
+
+/**
+ * struct IpaHwSharedMemWdiMapping_t  - Structure referring to the common and
+ * WDI section of 128B shared memory located in offset zero of SW Partition in
+ * IPA SRAM.
+ *
+ * The shared memory is used for communication between IPA HW and CPU.
+ */
+struct IpaHwSharedMemWdiMapping_t {
+	struct IpaHwSharedMemCommonMapping_t common;
+	u32 reserved_2B_28;
+	u32 reserved_2F_2C;
+	u32 reserved_33_30;
+	u32 reserved_37_34;
+	u32 reserved_3B_38;
+	u32 reserved_3F_3C;
+	u16 interfaceVersionWdi;
+	u16 reserved_43_42;
+	u8  wdi_tx_ch_0_state;
+	u8  wdi_rx_ch_0_state;
+	u16 reserved_47_46;
+} __packed;
+
+/**
+ * struct IpaHwWdiTxSetUpCmdData_t - Structure holding the parameters for
+ * IPA_CPU_2_HW_CMD_WDI_TX_SET_UP command.
+ * @comp_ring_base_pa : This is the physical address of the base of the Tx
+ * completion ring
+ * @comp_ring_size : This is the size of the Tx completion ring
+ * @reserved_comp_ring : Reserved field for expansion of Completion ring params
+ * @ce_ring_base_pa : This is the physical address of the base of the Copy
+ * Engine Source Ring
+ * @ce_ring_size : Copy Engine Ring size
+ * @reserved_ce_ring : Reserved field for expansion of CE ring params
+ * @ce_ring_doorbell_pa : This is the physical address of the doorbell that the
+ * IPA uC has to write into to trigger the copy engine
+ * @num_tx_buffers : Number of pkt buffers allocated. The size of the CE ring
+ * and the Tx completion ring has to be atleast ( num_tx_buffers + 1)
+ * @ipa_pipe_number : This is the IPA pipe number that has to be used for the
+ * Tx path
+ * @reserved : Reserved field
+ *
+ * Parameters are sent as pointer thus should be reside in address accessible
+ * to HW
+ */
+struct IpaHwWdiTxSetUpCmdData_t {
+	u32 comp_ring_base_pa;
+	u16 comp_ring_size;
+	u16 reserved_comp_ring;
+	u32 ce_ring_base_pa;
+	u16 ce_ring_size;
+	u16 reserved_ce_ring;
+	u32 ce_ring_doorbell_pa;
+	u16 num_tx_buffers;
+	u8  ipa_pipe_number;
+	u8  reserved;
+} __packed;
+
+struct IpaHwWdi2TxSetUpCmdData_t {
+	u32 comp_ring_base_pa;
+	u32 comp_ring_base_pa_hi;
+	u16 comp_ring_size;
+	u16 reserved_comp_ring;
+	u32 ce_ring_base_pa;
+	u32 ce_ring_base_pa_hi;
+	u16 ce_ring_size;
+	u16 reserved_ce_ring;
+	u32 ce_ring_doorbell_pa;
+	u32 ce_ring_doorbell_pa_hi;
+	u16 num_tx_buffers;
+	u8  ipa_pipe_number;
+	u8  reserved;
+} __packed;
+/**
+ * struct IpaHwWdiRxSetUpCmdData_t -  Structure holding the parameters for
+ * IPA_CPU_2_HW_CMD_WDI_RX_SET_UP command.
+ * @rx_ring_base_pa : This is the physical address of the base of the Rx ring
+ * (containing Rx buffers)
+ * @rx_ring_size : This is the size of the Rx ring
+ * @rx_ring_rp_pa : This is the physical address of the location through which
+ * IPA uc is expected to communicate about the Read pointer into the Rx Ring
+ * @ipa_pipe_number : This is the IPA pipe number that has to be used for the
+ * Rx path
+ *
+ * Parameters are sent as pointer thus should be reside in address accessible
+ * to HW
+ */
+struct IpaHwWdiRxSetUpCmdData_t {
+	u32 rx_ring_base_pa;
+	u32 rx_ring_size;
+	u32 rx_ring_rp_pa;
+	u8  ipa_pipe_number;
+} __packed;
+
+struct IpaHwWdi2RxSetUpCmdData_t {
+	u32 rx_ring_base_pa;
+	u32 rx_ring_base_pa_hi;
+	u32 rx_ring_size;
+	u32 rx_ring_rp_pa;
+	u32 rx_ring_rp_pa_hi;
+	u32 rx_comp_ring_base_pa;
+	u32 rx_comp_ring_base_pa_hi;
+	u32 rx_comp_ring_size;
+	u32 rx_comp_ring_wp_pa;
+	u32 rx_comp_ring_wp_pa_hi;
+	u8  ipa_pipe_number;
+} __packed;
+/**
+ * union IpaHwWdiRxExtCfgCmdData_t - Structure holding the parameters for
+ * IPA_CPU_2_HW_CMD_WDI_RX_EXT_CFG command.
+ * @ipa_pipe_number : The IPA pipe number for which this config is passed
+ * @qmap_id : QMAP ID to be set in the metadata register
+ * @reserved : Reserved
+ *
+ * The parameters are passed as immediate params in the shared memory
+ */
+union IpaHwWdiRxExtCfgCmdData_t {
+	struct IpaHwWdiRxExtCfgCmdParams_t {
+		u32 ipa_pipe_number:8;
+		u32 qmap_id:8;
+		u32 reserved:16;
+	} __packed params;
+	u32 raw32b;
+} __packed;
+
+/**
+ * union IpaHwWdiCommonChCmdData_t -  Structure holding the parameters for
+ * IPA_CPU_2_HW_CMD_WDI_TEAR_DOWN,
+ * IPA_CPU_2_HW_CMD_WDI_CH_ENABLE,
+ * IPA_CPU_2_HW_CMD_WDI_CH_DISABLE,
+ * IPA_CPU_2_HW_CMD_WDI_CH_SUSPEND,
+ * IPA_CPU_2_HW_CMD_WDI_CH_RESUME command.
+ * @ipa_pipe_number :  The IPA pipe number. This could be Tx or an Rx pipe
+ * @reserved : Reserved
+ *
+ * The parameters are passed as immediate params in the shared memory
+ */
+union IpaHwWdiCommonChCmdData_t {
+	struct IpaHwWdiCommonChCmdParams_t {
+		u32 ipa_pipe_number:8;
+		u32 reserved:24;
+	} __packed params;
+	u32 raw32b;
+} __packed;
+
+/**
+ * union IpaHwWdiErrorEventData_t - parameters for IPA_HW_2_CPU_EVENT_WDI_ERROR
+ * event.
+ * @wdi_error_type : The IPA pipe number to be torn down. This could be Tx or
+ * an Rx pipe
+ * @reserved : Reserved
+ * @ipa_pipe_number : IPA pipe number on which error has happened. Applicable
+ * only if error type indicates channel error
+ * @wdi_ch_err_type : Information about the channel error (if available)
+ *
+ * The parameters are passed as immediate params in the shared memory
+ */
+union IpaHwWdiErrorEventData_t {
+	struct IpaHwWdiErrorEventParams_t {
+		u32 wdi_error_type:8;
+		u32 reserved:8;
+		u32 ipa_pipe_number:8;
+		u32 wdi_ch_err_type:8;
+	} __packed params;
+	u32 raw32b;
+} __packed;
+
+static void ipa3_uc_wdi_event_log_info_handler(
+struct IpaHwEventLogInfoData_t *uc_event_top_mmio)
+
+{
+	struct Ipa3HwEventInfoData_t *stats_ptr = &uc_event_top_mmio->statsInfo;
+
+	if ((uc_event_top_mmio->protocolMask &
+		(1 << IPA_HW_PROTOCOL_WDI)) == 0) {
+		IPAERR("WDI protocol missing 0x%x\n",
+			uc_event_top_mmio->protocolMask);
+		return;
+	}
+
+	if (stats_ptr->featureInfo[IPA_HW_PROTOCOL_WDI].params.size !=
+		sizeof(struct IpaHwStatsWDIInfoData_t)) {
+		IPAERR("wdi stats sz invalid exp=%zu is=%u\n",
+			sizeof(struct IpaHwStatsWDIInfoData_t),
+			stats_ptr->featureInfo[
+				IPA_HW_PROTOCOL_WDI].params.size);
+		return;
+	}
+
+	ipa3_ctx->uc_wdi_ctx.wdi_uc_stats_ofst =
+		stats_ptr->baseAddrOffset +
+		stats_ptr->featureInfo[IPA_HW_PROTOCOL_WDI].params.offset;
+	IPAERR("WDI stats ofst=0x%x\n", ipa3_ctx->uc_wdi_ctx.wdi_uc_stats_ofst);
+	if (ipa3_ctx->uc_wdi_ctx.wdi_uc_stats_ofst +
+		sizeof(struct IpaHwStatsWDIInfoData_t) >=
+		ipa3_ctx->ctrl->ipa_reg_base_ofst +
+		ipahal_get_reg_n_ofst(IPA_SW_AREA_RAM_DIRECT_ACCESS_n, 0) +
+		ipa3_ctx->smem_sz) {
+		IPAERR("uc_wdi_stats 0x%x outside SRAM\n",
+			ipa3_ctx->uc_wdi_ctx.wdi_uc_stats_ofst);
+		return;
+	}
+
+	ipa3_ctx->uc_wdi_ctx.wdi_uc_stats_mmio =
+		ioremap(ipa3_ctx->ipa_wrapper_base +
+		ipa3_ctx->uc_wdi_ctx.wdi_uc_stats_ofst,
+		sizeof(struct IpaHwStatsWDIInfoData_t));
+	if (!ipa3_ctx->uc_wdi_ctx.wdi_uc_stats_mmio) {
+		IPAERR("fail to ioremap uc wdi stats\n");
+		return;
+	}
+}
+
+static void ipa3_uc_wdi_event_handler(struct IpaHwSharedMemCommonMapping_t
+				     *uc_sram_mmio)
+
+{
+	union IpaHwWdiErrorEventData_t wdi_evt;
+	struct IpaHwSharedMemWdiMapping_t *wdi_sram_mmio_ext;
+
+	if (uc_sram_mmio->eventOp ==
+		IPA_HW_2_CPU_EVENT_WDI_ERROR) {
+		wdi_evt.raw32b = uc_sram_mmio->eventParams;
+		IPADBG("uC WDI evt errType=%u pipe=%d cherrType=%u\n",
+			wdi_evt.params.wdi_error_type,
+			wdi_evt.params.ipa_pipe_number,
+			wdi_evt.params.wdi_ch_err_type);
+		wdi_sram_mmio_ext =
+			(struct IpaHwSharedMemWdiMapping_t *)
+			uc_sram_mmio;
+		IPADBG("tx_ch_state=%u rx_ch_state=%u\n",
+			wdi_sram_mmio_ext->wdi_tx_ch_0_state,
+			wdi_sram_mmio_ext->wdi_rx_ch_0_state);
+	}
+}
+
+/**
+ * ipa3_get_wdi_gsi_stats() - Query WDI gsi stats from uc
+ * @stats:	[inout] stats blob from client populated by driver
+ *
+ * Returns:	0 on success, negative on failure
+ *
+ * @note Cannot be called from atomic context
+ *
+ */
+int ipa3_get_wdi_gsi_stats(struct ipa_uc_dbg_ring_stats *stats)
+{
+	int i;
+
+	if (!ipa3_ctx->wdi2_ctx.dbg_stats.uc_dbg_stats_mmio) {
+		IPAERR("bad NULL parms for wdi_gsi_stats\n");
+		return -EINVAL;
+	}
+
+	IPA_ACTIVE_CLIENTS_INC_SIMPLE();
+	for (i = 0; i < MAX_WDI2_CHANNELS; i++) {
+		stats->ring[i].ringFull = ioread32(
+			ipa3_ctx->wdi2_ctx.dbg_stats.uc_dbg_stats_mmio
+			+ i * IPA3_UC_DEBUG_STATS_OFF +
+			IPA3_UC_DEBUG_STATS_RINGFULL_OFF);
+		stats->ring[i].ringEmpty = ioread32(
+			ipa3_ctx->wdi2_ctx.dbg_stats.uc_dbg_stats_mmio
+			+ i * IPA3_UC_DEBUG_STATS_OFF +
+			IPA3_UC_DEBUG_STATS_RINGEMPTY_OFF);
+		stats->ring[i].ringUsageHigh = ioread32(
+			ipa3_ctx->wdi2_ctx.dbg_stats.uc_dbg_stats_mmio
+			+ i * IPA3_UC_DEBUG_STATS_OFF +
+			IPA3_UC_DEBUG_STATS_RINGUSAGEHIGH_OFF);
+		stats->ring[i].ringUsageLow = ioread32(
+			ipa3_ctx->wdi2_ctx.dbg_stats.uc_dbg_stats_mmio
+			+ i * IPA3_UC_DEBUG_STATS_OFF +
+			IPA3_UC_DEBUG_STATS_RINGUSAGELOW_OFF);
+		stats->ring[i].RingUtilCount = ioread32(
+			ipa3_ctx->wdi2_ctx.dbg_stats.uc_dbg_stats_mmio
+			+ i * IPA3_UC_DEBUG_STATS_OFF +
+			IPA3_UC_DEBUG_STATS_RINGUTILCOUNT_OFF);
+	}
+	IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
+
+	return 0;
+}
+
+/**
+ * ipa3_get_wdi_stats() - Query WDI statistics from uc
+ * @stats:	[inout] stats blob from client populated by driver
+ *
+ * Returns:	0 on success, negative on failure
+ *
+ * @note Cannot be called from atomic context
+ *
+ */
+int ipa3_get_wdi_stats(struct IpaHwStatsWDIInfoData_t *stats)
+{
+#define TX_STATS(y) stats->tx_ch_stats.y = \
+	ipa3_ctx->uc_wdi_ctx.wdi_uc_stats_mmio->tx_ch_stats.y
+#define RX_STATS(y) stats->rx_ch_stats.y = \
+	ipa3_ctx->uc_wdi_ctx.wdi_uc_stats_mmio->rx_ch_stats.y
+
+	if (!stats || !ipa3_ctx->uc_wdi_ctx.wdi_uc_stats_mmio) {
+		IPAERR("bad parms stats=%pK wdi_stats=%pK\n",
+			stats,
+			ipa3_ctx->uc_wdi_ctx.wdi_uc_stats_mmio);
+		return -EINVAL;
+	}
+	IPA_ACTIVE_CLIENTS_INC_SIMPLE();
+
+	TX_STATS(num_pkts_processed);
+	TX_STATS(copy_engine_doorbell_value);
+	TX_STATS(num_db_fired);
+	TX_STATS(tx_comp_ring_stats.ringFull);
+	TX_STATS(tx_comp_ring_stats.ringEmpty);
+	TX_STATS(tx_comp_ring_stats.ringUsageHigh);
+	TX_STATS(tx_comp_ring_stats.ringUsageLow);
+	TX_STATS(tx_comp_ring_stats.RingUtilCount);
+	TX_STATS(bam_stats.bamFifoFull);
+	TX_STATS(bam_stats.bamFifoEmpty);
+	TX_STATS(bam_stats.bamFifoUsageHigh);
+	TX_STATS(bam_stats.bamFifoUsageLow);
+	TX_STATS(bam_stats.bamUtilCount);
+	TX_STATS(num_db);
+	TX_STATS(num_unexpected_db);
+	TX_STATS(num_bam_int_handled);
+	TX_STATS(num_bam_int_in_non_running_state);
+	TX_STATS(num_qmb_int_handled);
+	TX_STATS(num_bam_int_handled_while_wait_for_bam);
+
+	RX_STATS(max_outstanding_pkts);
+	RX_STATS(num_pkts_processed);
+	RX_STATS(rx_ring_rp_value);
+	RX_STATS(rx_ind_ring_stats.ringFull);
+	RX_STATS(rx_ind_ring_stats.ringEmpty);
+	RX_STATS(rx_ind_ring_stats.ringUsageHigh);
+	RX_STATS(rx_ind_ring_stats.ringUsageLow);
+	RX_STATS(rx_ind_ring_stats.RingUtilCount);
+	RX_STATS(bam_stats.bamFifoFull);
+	RX_STATS(bam_stats.bamFifoEmpty);
+	RX_STATS(bam_stats.bamFifoUsageHigh);
+	RX_STATS(bam_stats.bamFifoUsageLow);
+	RX_STATS(bam_stats.bamUtilCount);
+	RX_STATS(num_bam_int_handled);
+	RX_STATS(num_db);
+	RX_STATS(num_unexpected_db);
+	RX_STATS(num_pkts_in_dis_uninit_state);
+	RX_STATS(num_ic_inj_vdev_change);
+	RX_STATS(num_ic_inj_fw_desc_change);
+	RX_STATS(num_qmb_int_handled);
+	RX_STATS(reserved1);
+	RX_STATS(reserved2);
+
+	IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
+
+	return 0;
+}
+
+int ipa3_wdi_init(void)
+{
+	struct ipa3_uc_hdlrs uc_wdi_cbs = { 0 };
+
+	uc_wdi_cbs.ipa_uc_event_hdlr = ipa3_uc_wdi_event_handler;
+	uc_wdi_cbs.ipa_uc_event_log_info_hdlr =
+		ipa3_uc_wdi_event_log_info_handler;
+	uc_wdi_cbs.ipa_uc_loaded_hdlr =
+		ipa3_uc_wdi_loaded_handler;
+
+	ipa3_uc_register_handlers(IPA_HW_FEATURE_WDI, &uc_wdi_cbs);
+
+	return 0;
+}
+
+static int ipa_create_ap_smmu_mapping_pa(phys_addr_t pa, size_t len,
+		bool device, unsigned long *iova)
+{
+	struct ipa_smmu_cb_ctx *cb = ipa3_get_smmu_ctx(IPA_SMMU_CB_AP);
+	unsigned long va = roundup(cb->next_addr, PAGE_SIZE);
+	int prot = IOMMU_READ | IOMMU_WRITE;
+	size_t true_len = roundup(len + pa - rounddown(pa, PAGE_SIZE),
+			PAGE_SIZE);
+	int ret;
+
+	if (!cb->valid) {
+		IPAERR("No SMMU CB setup\n");
+		return -EINVAL;
+	}
+
+	if (len > PAGE_SIZE)
+		va = roundup(cb->next_addr, len);
+
+	ret = ipa3_iommu_map(cb->iommu_domain, va, rounddown(pa, PAGE_SIZE),
+			true_len,
+			device ? (prot | IOMMU_MMIO) : prot);
+	if (ret) {
+		IPAERR("iommu map failed for pa=%pa len=%zu\n", &pa, true_len);
+		return -EINVAL;
+	}
+
+	ipa3_ctx->wdi_map_cnt++;
+	cb->next_addr = va + true_len;
+	*iova = va + pa - rounddown(pa, PAGE_SIZE);
+	return 0;
+}
+
+static int ipa_create_uc_smmu_mapping_pa(phys_addr_t pa, size_t len,
+		bool device, unsigned long *iova)
+{
+	struct ipa_smmu_cb_ctx *cb = ipa3_get_smmu_ctx(IPA_SMMU_CB_UC);
+	unsigned long va = roundup(cb->next_addr, PAGE_SIZE);
+	int prot = IOMMU_READ | IOMMU_WRITE;
+	size_t true_len = roundup(len + pa - rounddown(pa, PAGE_SIZE),
+			PAGE_SIZE);
+	int ret;
+
+	if (!cb->valid) {
+		IPAERR("No SMMU CB setup\n");
+		return -EINVAL;
+	}
+
+	ret = ipa3_iommu_map(cb->iommu_domain, va, rounddown(pa, PAGE_SIZE),
+			true_len,
+			device ? (prot | IOMMU_MMIO) : prot);
+	if (ret) {
+		IPAERR("iommu map failed for pa=%pa len=%zu\n", &pa, true_len);
+		return -EINVAL;
+	}
+
+	ipa3_ctx->wdi_map_cnt++;
+	cb->next_addr = va + true_len;
+	*iova = va + pa - rounddown(pa, PAGE_SIZE);
+	return 0;
+}
+
+static int ipa_create_ap_smmu_mapping_sgt(struct sg_table *sgt,
+		unsigned long *iova)
+{
+	struct ipa_smmu_cb_ctx *cb = ipa3_get_smmu_ctx(IPA_SMMU_CB_AP);
+	unsigned long va = roundup(cb->next_addr, PAGE_SIZE);
+	int prot = IOMMU_READ | IOMMU_WRITE;
+	int ret, i;
+	struct scatterlist *sg;
+	unsigned long start_iova = va;
+	phys_addr_t phys;
+	size_t len = 0;
+	int count = 0;
+
+	if (!cb->valid) {
+		IPAERR("No SMMU CB setup\n");
+		return -EINVAL;
+	}
+	if (!sgt) {
+		IPAERR("Bad parameters, scatter / gather list is NULL\n");
+		return -EINVAL;
+	}
+
+	for_each_sg(sgt->sgl, sg, sgt->nents, i) {
+		/* directly get sg_tbl PA from wlan-driver */
+		len += PAGE_ALIGN(sg->offset + sg->length);
+	}
+
+	if (len > PAGE_SIZE) {
+		va = roundup(cb->next_addr,
+				roundup_pow_of_two(len));
+		start_iova = va;
+	}
+
+	for_each_sg(sgt->sgl, sg, sgt->nents, i) {
+		/* directly get sg_tbl PA from wlan-driver */
+		phys = sg->dma_address;
+		len = PAGE_ALIGN(sg->offset + sg->length);
+
+		ret = ipa3_iommu_map(cb->iommu_domain, va, phys, len, prot);
+		if (ret) {
+			IPAERR("iommu map failed for pa=%pa len=%zu\n",
+					&phys, len);
+			goto bad_mapping;
+		}
+		va += len;
+		ipa3_ctx->wdi_map_cnt++;
+		count++;
+	}
+	cb->next_addr = va;
+	*iova = start_iova;
+
+	return 0;
+
+bad_mapping:
+	for_each_sg(sgt->sgl, sg, count, i)
+		iommu_unmap(cb->iommu_domain, sg_dma_address(sg),
+				sg_dma_len(sg));
+	return -EINVAL;
+}
+
+
+static int ipa_create_uc_smmu_mapping_sgt(struct sg_table *sgt,
+		unsigned long *iova)
+{
+	struct ipa_smmu_cb_ctx *cb = ipa3_get_smmu_ctx(IPA_SMMU_CB_UC);
+	unsigned long va = roundup(cb->next_addr, PAGE_SIZE);
+	int prot = IOMMU_READ | IOMMU_WRITE;
+	int ret;
+	int i;
+	struct scatterlist *sg;
+	unsigned long start_iova = va;
+	phys_addr_t phys;
+	size_t len;
+	int count = 0;
+
+	if (!cb->valid) {
+		IPAERR("No SMMU CB setup\n");
+		return -EINVAL;
+	}
+	if (!sgt) {
+		IPAERR("Bad parameters, scatter / gather list is NULL\n");
+		return -EINVAL;
+	}
+
+	for_each_sg(sgt->sgl, sg, sgt->nents, i) {
+		/* directly get sg_tbl PA from wlan-driver */
+		phys = sg->dma_address;
+		len = PAGE_ALIGN(sg->offset + sg->length);
+
+		ret = ipa3_iommu_map(cb->iommu_domain, va, phys, len, prot);
+		if (ret) {
+			IPAERR("iommu map failed for pa=%pa len=%zu\n",
+					&phys, len);
+			goto bad_mapping;
+		}
+		va += len;
+		ipa3_ctx->wdi_map_cnt++;
+		count++;
+	}
+	cb->next_addr = va;
+	*iova = start_iova;
+
+	return 0;
+
+bad_mapping:
+	for_each_sg(sgt->sgl, sg, count, i)
+		iommu_unmap(cb->iommu_domain, sg_dma_address(sg),
+				sg_dma_len(sg));
+	return -EINVAL;
+}
+
+static void ipa_release_ap_smmu_mappings(enum ipa_client_type client)
+{
+	struct ipa_smmu_cb_ctx *cb = ipa3_get_smmu_ctx(IPA_SMMU_CB_AP);
+	int i, j, start, end;
+
+	if (IPA_CLIENT_IS_CONS(client)) {
+		start = IPA_WDI_TX_RING_RES;
+		if (ipa3_ctx->ipa_wdi3_over_gsi)
+			end = IPA_WDI_TX_DB_RES;
+		else
+			end = IPA_WDI_CE_DB_RES;
+	} else {
+		start = IPA_WDI_RX_RING_RES;
+		if (ipa3_ctx->ipa_wdi2 ||
+			ipa3_ctx->ipa_wdi3_over_gsi)
+			end = IPA_WDI_RX_COMP_RING_WP_RES;
+		else
+			end = IPA_WDI_RX_RING_RP_RES;
+	}
+
+	for (i = start; i <= end; i++) {
+		if (wdi_res[i].valid) {
+			for (j = 0; j < wdi_res[i].nents; j++) {
+				iommu_unmap(cb->iommu_domain,
+					wdi_res[i].res[j].iova,
+					wdi_res[i].res[j].size);
+				ipa3_ctx->wdi_map_cnt--;
+			}
+			kfree(wdi_res[i].res);
+			wdi_res[i].valid = false;
+		}
+	}
+
+	if (ipa3_ctx->wdi_map_cnt == 0)
+		cb->next_addr = cb->va_end;
+}
+
+static void ipa_release_uc_smmu_mappings(enum ipa_client_type client)
+{
+	struct ipa_smmu_cb_ctx *cb = ipa3_get_smmu_ctx(IPA_SMMU_CB_UC);
+	int i;
+	int j;
+	int start;
+	int end;
+
+	if (IPA_CLIENT_IS_CONS(client)) {
+		start = IPA_WDI_TX_RING_RES;
+		end = IPA_WDI_CE_DB_RES;
+	} else {
+		start = IPA_WDI_RX_RING_RES;
+		if (ipa3_ctx->ipa_wdi2)
+			end = IPA_WDI_RX_COMP_RING_WP_RES;
+		else
+			end = IPA_WDI_RX_RING_RP_RES;
+	}
+
+	for (i = start; i <= end; i++) {
+		if (wdi_res[i].valid) {
+			for (j = 0; j < wdi_res[i].nents; j++) {
+				iommu_unmap(cb->iommu_domain,
+					wdi_res[i].res[j].iova,
+					wdi_res[i].res[j].size);
+				ipa3_ctx->wdi_map_cnt--;
+			}
+			kfree(wdi_res[i].res);
+			wdi_res[i].valid = false;
+		}
+	}
+
+	if (ipa3_ctx->wdi_map_cnt == 0)
+		cb->next_addr = cb->va_end;
+
+}
+
+static void ipa_save_uc_smmu_mapping_pa(int res_idx, phys_addr_t pa,
+		unsigned long iova, size_t len)
+{
+	IPADBG("--res_idx=%d pa=0x%pa iova=0x%lx sz=0x%zx\n", res_idx,
+		&pa, iova, len);
+	wdi_res[res_idx].res = kzalloc(sizeof(*wdi_res[res_idx].res),
+		GFP_KERNEL);
+	if (!wdi_res[res_idx].res) {
+		WARN_ON(1);
+		return;
+	}
+	wdi_res[res_idx].nents = 1;
+	wdi_res[res_idx].valid = true;
+	wdi_res[res_idx].res->pa = rounddown(pa, PAGE_SIZE);
+	wdi_res[res_idx].res->iova = rounddown(iova, PAGE_SIZE);
+	wdi_res[res_idx].res->size = roundup(len + pa - rounddown(pa,
+				PAGE_SIZE), PAGE_SIZE);
+	IPADBG("res_idx=%d pa=0x%pa iova=0x%lx sz=0x%zx\n", res_idx,
+			&wdi_res[res_idx].res->pa, wdi_res[res_idx].res->iova,
+			wdi_res[res_idx].res->size);
+}
+
+static void ipa_save_uc_smmu_mapping_sgt(int res_idx, struct sg_table *sgt,
+		unsigned long iova)
+{
+	int i;
+	struct scatterlist *sg;
+	unsigned long curr_iova = iova;
+
+	if (!sgt) {
+		IPAERR("Bad parameters, scatter / gather list is NULL\n");
+		return;
+	}
+
+	wdi_res[res_idx].res = kcalloc(sgt->nents,
+		sizeof(*wdi_res[res_idx].res),
+			GFP_KERNEL);
+	if (!wdi_res[res_idx].res) {
+		WARN_ON(1);
+		return;
+	}
+	wdi_res[res_idx].nents = sgt->nents;
+	wdi_res[res_idx].valid = true;
+	for_each_sg(sgt->sgl, sg, sgt->nents, i) {
+		/* directly get sg_tbl PA from wlan */
+		wdi_res[res_idx].res[i].pa = sg->dma_address;
+		wdi_res[res_idx].res[i].iova = curr_iova;
+		wdi_res[res_idx].res[i].size = PAGE_ALIGN(sg->offset +
+				sg->length);
+		IPADBG("res_idx=%d pa=0x%pa iova=0x%lx sz=0x%zx\n", res_idx,
+			&wdi_res[res_idx].res[i].pa,
+			wdi_res[res_idx].res[i].iova,
+			wdi_res[res_idx].res[i].size);
+		curr_iova += wdi_res[res_idx].res[i].size;
+	}
+}
+
+int ipa_create_uc_smmu_mapping(int res_idx, bool wlan_smmu_en,
+		phys_addr_t pa, struct sg_table *sgt, size_t len, bool device,
+		unsigned long *iova)
+{
+	/* support for SMMU on WLAN but no SMMU on IPA */
+	if (wlan_smmu_en && ipa3_ctx->s1_bypass_arr[IPA_SMMU_CB_UC]) {
+		IPAERR("Unsupported SMMU pairing\n");
+		return -EINVAL;
+	}
+
+	/* legacy: no SMMUs on either end */
+	if (!wlan_smmu_en && ipa3_ctx->s1_bypass_arr[IPA_SMMU_CB_UC]) {
+		*iova = pa;
+		return 0;
+	}
+
+	/* no SMMU on WLAN but SMMU on IPA */
+	if (!wlan_smmu_en && !ipa3_ctx->s1_bypass_arr[IPA_SMMU_CB_UC]) {
+		if (ipa_create_uc_smmu_mapping_pa(pa, len,
+			(res_idx == IPA_WDI_CE_DB_RES) ? true : false, iova)) {
+			IPAERR("Fail to create mapping res %d\n", res_idx);
+			return -EFAULT;
+		}
+		ipa_save_uc_smmu_mapping_pa(res_idx, pa, *iova, len);
+		return 0;
+	}
+
+	/* SMMU on WLAN and SMMU on IPA */
+	if (wlan_smmu_en && !ipa3_ctx->s1_bypass_arr[IPA_SMMU_CB_UC]) {
+		switch (res_idx) {
+		case IPA_WDI_RX_RING_RP_RES:
+		case IPA_WDI_RX_COMP_RING_WP_RES:
+		case IPA_WDI_CE_DB_RES:
+		case IPA_WDI_TX_DB_RES:
+			if (ipa_create_uc_smmu_mapping_pa(pa, len,
+				(res_idx == IPA_WDI_CE_DB_RES) ? true : false,
+				iova)) {
+				IPAERR("Fail to create mapping res %d\n",
+						res_idx);
+				return -EFAULT;
+			}
+			ipa_save_uc_smmu_mapping_pa(res_idx, pa, *iova, len);
+			break;
+		case IPA_WDI_RX_RING_RES:
+		case IPA_WDI_RX_COMP_RING_RES:
+		case IPA_WDI_TX_RING_RES:
+		case IPA_WDI_CE_RING_RES:
+			if (ipa_create_uc_smmu_mapping_sgt(sgt, iova)) {
+				IPAERR("Fail to create mapping res %d\n",
+						res_idx);
+				WARN_ON(1);
+				return -EFAULT;
+			}
+			ipa_save_uc_smmu_mapping_sgt(res_idx, sgt, *iova);
+			break;
+		default:
+			WARN_ON(1);
+		}
+	}
+
+	return 0;
+}
+
+void ipa3_release_wdi3_gsi_smmu_mappings(u8 dir)
+{
+	struct ipa_smmu_cb_ctx *cb = ipa3_get_smmu_ctx(IPA_SMMU_CB_AP);
+	int i, j, start, end;
+
+	if (dir == IPA_WDI3_TX_DIR) {
+		start = IPA_WDI_TX_RING_RES;
+		end = IPA_WDI_TX_DB_RES;
+	} else {
+		start = IPA_WDI_RX_RING_RES;
+		end = IPA_WDI_RX_COMP_RING_WP_RES;
+	}
+
+	for (i = start; i <= end; i++) {
+		if (wdi_res[i].valid) {
+			for (j = 0; j < wdi_res[i].nents; j++) {
+				iommu_unmap(cb->iommu_domain,
+					wdi_res[i].res[j].iova,
+					wdi_res[i].res[j].size);
+				ipa3_ctx->wdi_map_cnt--;
+			}
+			kfree(wdi_res[i].res);
+			wdi_res[i].valid = false;
+		}
+	}
+
+	if (ipa3_ctx->wdi_map_cnt == 0)
+		cb->next_addr = cb->va_end;
+}
+
+int ipa_create_gsi_smmu_mapping(int res_idx, bool wlan_smmu_en,
+		phys_addr_t pa, struct sg_table *sgt, size_t len, bool device,
+		unsigned long *iova)
+{
+	/* support for SMMU on WLAN but no SMMU on IPA */
+	if (wlan_smmu_en && ipa3_ctx->s1_bypass_arr[IPA_SMMU_CB_AP]) {
+		IPAERR("Unsupported SMMU pairing\n");
+		return -EINVAL;
+	}
+
+	/* legacy: no SMMUs on either end */
+	if (!wlan_smmu_en && ipa3_ctx->s1_bypass_arr[IPA_SMMU_CB_AP]) {
+		*iova = pa;
+		return 0;
+	}
+
+	/* no SMMU on WLAN but SMMU on IPA */
+	if (!wlan_smmu_en && !ipa3_ctx->s1_bypass_arr[IPA_SMMU_CB_AP]) {
+		if (ipa_create_ap_smmu_mapping_pa(pa, len,
+				(res_idx == IPA_WDI_CE_DB_RES) ? true : false,
+					iova)) {
+			IPAERR("Fail to create mapping res %d\n",
+					res_idx);
+			return -EFAULT;
+		}
+		ipa_save_uc_smmu_mapping_pa(res_idx, pa, *iova, len);
+		return 0;
+	}
+	/* SMMU on WLAN and SMMU on IPA */
+	if (wlan_smmu_en && !ipa3_ctx->s1_bypass_arr[IPA_SMMU_CB_AP]) {
+		switch (res_idx) {
+		case IPA_WDI_RX_RING_RP_RES:
+		case IPA_WDI_RX_COMP_RING_WP_RES:
+		case IPA_WDI_CE_DB_RES:
+		case IPA_WDI_TX_DB_RES:
+			if (ipa_create_ap_smmu_mapping_pa(pa, len,
+				(res_idx == IPA_WDI_CE_DB_RES) ? true : false,
+						iova)) {
+				IPAERR("Fail to create mapping res %d\n",
+						res_idx);
+				return -EFAULT;
+			}
+			ipa_save_uc_smmu_mapping_pa(res_idx, pa, *iova, len);
+			break;
+		case IPA_WDI_RX_RING_RES:
+		case IPA_WDI_RX_COMP_RING_RES:
+		case IPA_WDI_TX_RING_RES:
+		case IPA_WDI_CE_RING_RES:
+			if (ipa_create_ap_smmu_mapping_sgt(sgt, iova)) {
+				IPAERR("Fail to create mapping res %d\n",
+						res_idx);
+				return -EFAULT;
+			}
+			ipa_save_uc_smmu_mapping_sgt(res_idx, sgt, *iova);
+			break;
+		default:
+			WARN_ON(1);
+		}
+	}
+	return 0;
+}
+
+static void ipa_gsi_evt_ring_err_cb(struct gsi_evt_err_notify *notify)
+{
+	switch (notify->evt_id) {
+	case GSI_EVT_OUT_OF_BUFFERS_ERR:
+		IPAERR("Got GSI_EVT_OUT_OF_BUFFERS_ERR\n");
+		break;
+	case GSI_EVT_OUT_OF_RESOURCES_ERR:
+		IPAERR("Got GSI_EVT_OUT_OF_RESOURCES_ERR\n");
+		break;
+	case GSI_EVT_UNSUPPORTED_INTER_EE_OP_ERR:
+		IPAERR("Got GSI_EVT_UNSUPPORTED_INTER_EE_OP_ERR\n");
+		break;
+	case GSI_EVT_EVT_RING_EMPTY_ERR:
+		IPAERR("Got GSI_EVT_EVT_RING_EMPTY_ERR\n");
+		break;
+	default:
+		IPAERR("Unexpected err evt: %d\n", notify->evt_id);
+	}
+	ipa_assert();
+}
+
+static void ipa_gsi_chan_err_cb(struct gsi_chan_err_notify *notify)
+{
+	switch (notify->evt_id) {
+	case GSI_CHAN_INVALID_TRE_ERR:
+		IPAERR("Got GSI_CHAN_INVALID_TRE_ERR\n");
+		break;
+	case GSI_CHAN_NON_ALLOCATED_EVT_ACCESS_ERR:
+		IPAERR("Got GSI_CHAN_NON_ALLOCATED_EVT_ACCESS_ERR\n");
+		break;
+	case GSI_CHAN_OUT_OF_BUFFERS_ERR:
+		IPAERR("Got GSI_CHAN_OUT_OF_BUFFERS_ERR\n");
+		break;
+	case GSI_CHAN_OUT_OF_RESOURCES_ERR:
+		IPAERR("Got GSI_CHAN_OUT_OF_RESOURCES_ERR\n");
+		break;
+	case GSI_CHAN_UNSUPPORTED_INTER_EE_OP_ERR:
+		IPAERR("Got GSI_CHAN_UNSUPPORTED_INTER_EE_OP_ERR\n");
+		break;
+	case GSI_CHAN_HWO_1_ERR:
+		IPAERR("Got GSI_CHAN_HWO_1_ERR\n");
+		break;
+	default:
+		IPAERR("Unexpected err evt: %d\n", notify->evt_id);
+	}
+	ipa_assert();
+}
+static int ipa3_wdi2_gsi_alloc_evt_ring(
+			struct gsi_evt_ring_props *evt_ring_props,
+			enum ipa_client_type client,
+			unsigned long *evt_ring_hdl)
+{
+	union __packed gsi_evt_scratch evt_scratch;
+	int result = -EFAULT;
+
+	/* GSI EVENT RING allocation */
+	evt_ring_props->intf = GSI_EVT_CHTYPE_WDI2_EV;
+	evt_ring_props->intr = GSI_INTR_IRQ;
+
+	if (IPA_CLIENT_IS_PROD(client))
+		evt_ring_props->re_size = GSI_EVT_RING_RE_SIZE_8B;
+	else
+		evt_ring_props->re_size = GSI_EVT_RING_RE_SIZE_16B;
+
+	evt_ring_props->exclusive = true;
+	evt_ring_props->err_cb = ipa_gsi_evt_ring_err_cb;
+	evt_ring_props->user_data = NULL;
+	evt_ring_props->int_modt = IPA_GSI_EVT_RING_INT_MODT;
+	evt_ring_props->int_modc = 1;
+	IPADBG("GSI evt ring len: %d\n", evt_ring_props->ring_len);
+	IPADBG("client=%d moderation threshold cycles=%u cnt=%u\n",
+			client,
+			evt_ring_props->int_modt,
+			evt_ring_props->int_modc);
+
+
+	result = gsi_alloc_evt_ring(evt_ring_props,
+			ipa3_ctx->gsi_dev_hdl, evt_ring_hdl);
+	IPADBG("gsi_alloc_evt_ring result: %d\n", result);
+	if (result != GSI_STATUS_SUCCESS)
+		goto fail_alloc_evt_ring;
+
+	evt_scratch.wdi.update_ri_moderation_config =
+				UPDATE_RI_MODERATION_THRESHOLD;
+	evt_scratch.wdi.update_ri_mod_timer_running = 0;
+	evt_scratch.wdi.evt_comp_count = 0;
+	evt_scratch.wdi.last_update_ri = 0;
+	evt_scratch.wdi.resvd1 = 0;
+	evt_scratch.wdi.resvd2 = 0;
+	result = gsi_write_evt_ring_scratch(*evt_ring_hdl, evt_scratch);
+	if (result != GSI_STATUS_SUCCESS) {
+		IPAERR("Error writing WDI event ring scratch: %d\n", result);
+		gsi_dealloc_evt_ring(*evt_ring_hdl);
+		return -EFAULT;
+	}
+
+fail_alloc_evt_ring:
+	return result;
+
+}
+static int ipa3_wdi2_gsi_alloc_channel_ring(
+				struct gsi_chan_props *channel_props,
+				enum ipa_client_type client,
+				unsigned long *chan_hdl,
+				unsigned long evt_ring_hdl)
+{
+	int result = -EFAULT;
+	const struct ipa_gsi_ep_config *ep_cfg;
+
+	ep_cfg = ipa3_get_gsi_ep_info(client);
+	if (!ep_cfg) {
+		IPAERR("Failed getting GSI EP info for client=%d\n",
+				client);
+		return -EPERM;
+	}
+
+	if (IPA_CLIENT_IS_PROD(client)) {
+		IPAERR("Client is PROD\n");
+		channel_props->dir = GSI_CHAN_DIR_TO_GSI;
+		channel_props->re_size = GSI_CHAN_RE_SIZE_16B;
+	} else {
+		IPAERR("Client is CONS");
+		channel_props->dir = GSI_CHAN_DIR_FROM_GSI;
+		channel_props->re_size = GSI_CHAN_RE_SIZE_8B;
+	}
+
+	channel_props->prot = GSI_CHAN_PROT_WDI2;
+	channel_props->ch_id = ep_cfg->ipa_gsi_chan_num;
+	channel_props->evt_ring_hdl = evt_ring_hdl;
+
+	IPADBG("ch_id: %d\n", channel_props->ch_id);
+	IPADBG("evt_ring_hdl: %ld\n", channel_props->evt_ring_hdl);
+	IPADBG("re_size: %d\n", channel_props->re_size);
+	IPADBG("Config GSI xfer cb func");
+	IPADBG("GSI channel ring len: %d\n", channel_props->ring_len);
+	channel_props->xfer_cb = NULL;
+
+	IPADBG("channel ring  base vaddr = 0x%pa\n",
+			channel_props->ring_base_vaddr);
+
+	channel_props->use_db_eng = GSI_CHAN_DB_MODE;
+	channel_props->max_prefetch = GSI_ONE_PREFETCH_SEG;
+	channel_props->prefetch_mode = ep_cfg->prefetch_mode;
+	channel_props->low_weight = 1;
+	channel_props->err_cb = ipa_gsi_chan_err_cb;
+
+	IPADBG("Allocating GSI channel\n");
+	result =  gsi_alloc_channel(channel_props,
+			ipa3_ctx->gsi_dev_hdl,
+			chan_hdl);
+	if (result != GSI_STATUS_SUCCESS)
+		goto fail_alloc_channel;
+
+	IPADBG("gsi_chan_hdl: %ld\n", *chan_hdl);
+
+fail_alloc_channel:
+	return result;
+}
+
+
+int ipa3_connect_gsi_wdi_pipe(struct ipa_wdi_in_params *in,
+	struct ipa_wdi_out_params *out)
+{
+	u32 len;
+	int ipa_ep_idx, num_ring_ele;
+	int result = -EFAULT;
+	enum gsi_status gsi_res;
+	struct ipa3_ep_context *ep;
+	struct ipa_ep_cfg_ctrl ep_cfg_ctrl;
+	struct gsi_chan_props gsi_channel_props;
+	struct gsi_evt_ring_props gsi_evt_ring_props;
+	union __packed gsi_channel_scratch gsi_scratch;
+	phys_addr_t pa;
+	unsigned long va;
+	unsigned long wifi_rx_ri_addr = 0;
+	u32 gsi_db_reg_phs_addr_lsb;
+	u32 gsi_db_reg_phs_addr_msb;
+
+	ipa_ep_idx = ipa3_get_ep_mapping(in->sys.client);
+	if (ipa_ep_idx == -1) {
+		IPAERR("fail to alloc EP.\n");
+		goto fail;
+	}
+
+	ep = &ipa3_ctx->ep[ipa_ep_idx];
+
+	if (ep->valid) {
+		IPAERR("EP already allocated.\n");
+		goto fail;
+	}
+
+	IPA_ACTIVE_CLIENTS_INC_EP(in->sys.client);
+
+	memset(&ipa3_ctx->ep[ipa_ep_idx], 0, sizeof(struct ipa3_ep_context));
+	memset(&gsi_evt_ring_props, 0, sizeof(gsi_evt_ring_props));
+	memset(&gsi_channel_props, 0, sizeof(gsi_channel_props));
+	memset(&gsi_scratch, 0, sizeof(gsi_scratch));
+
+	IPADBG("client=%d ep=%d\n", in->sys.client, ipa_ep_idx);
+
+	if (IPA_CLIENT_IS_CONS(in->sys.client)) {
+		if (in->smmu_enabled) {
+			IPADBG("comp_ring_size=%d\n",
+				in->u.dl_smmu.comp_ring_size);
+			IPADBG("ce_ring_size=%d\n", in->u.dl_smmu.ce_ring_size);
+			IPADBG("ce_ring_doorbell_pa=0x%pa\n",
+					&in->u.dl_smmu.ce_door_bell_pa);
+			IPADBG("num_tx_buffers=%d\n",
+				in->u.dl_smmu.num_tx_buffers);
+		} else {
+			IPADBG("comp_ring_base_pa=0x%pa\n",
+					&in->u.dl.comp_ring_base_pa);
+			IPADBG("comp_ring_size=%d\n", in->u.dl.comp_ring_size);
+			IPADBG("ce_ring_base_pa=0x%pa\n",
+				&in->u.dl.ce_ring_base_pa);
+			IPADBG("ce_ring_size=%d\n", in->u.dl.ce_ring_size);
+			IPADBG("ce_ring_doorbell_pa=0x%pa\n",
+					&in->u.dl.ce_door_bell_pa);
+			IPADBG("num_tx_buffers=%d\n", in->u.dl.num_tx_buffers);
+		}
+	} else {
+		if (in->smmu_enabled) {
+			IPADBG("rx_ring_size=%d\n",
+				in->u.ul_smmu.rdy_ring_size);
+			IPADBG("rx_ring_rp_pa=0x%pa\n",
+				&in->u.ul_smmu.rdy_ring_rp_pa);
+			IPADBG("rx_comp_ring_size=%d\n",
+				in->u.ul_smmu.rdy_comp_ring_size);
+			IPADBG("rx_comp_ring_wp_pa=0x%pa\n",
+				&in->u.ul_smmu.rdy_comp_ring_wp_pa);
+			ipa3_ctx->wdi2_ctx.rdy_ring_rp_pa =
+				in->u.ul_smmu.rdy_ring_rp_pa;
+			ipa3_ctx->wdi2_ctx.rdy_ring_size =
+				in->u.ul_smmu.rdy_ring_size;
+			ipa3_ctx->wdi2_ctx.rdy_comp_ring_wp_pa =
+				in->u.ul_smmu.rdy_comp_ring_wp_pa;
+			ipa3_ctx->wdi2_ctx.rdy_comp_ring_size =
+				in->u.ul_smmu.rdy_comp_ring_size;
+		} else {
+			IPADBG("rx_ring_base_pa=0x%pa\n",
+				&in->u.ul.rdy_ring_base_pa);
+			IPADBG("rx_ring_size=%d\n",
+				in->u.ul.rdy_ring_size);
+			IPADBG("rx_ring_rp_pa=0x%pa\n",
+				&in->u.ul.rdy_ring_rp_pa);
+			IPADBG("rx_comp_ring_base_pa=0x%pa\n",
+				&in->u.ul.rdy_comp_ring_base_pa);
+			IPADBG("rx_comp_ring_size=%d\n",
+				in->u.ul.rdy_comp_ring_size);
+			IPADBG("rx_comp_ring_wp_pa=0x%pa\n",
+				&in->u.ul.rdy_comp_ring_wp_pa);
+			ipa3_ctx->wdi2_ctx.rdy_ring_base_pa =
+				in->u.ul.rdy_ring_base_pa;
+			ipa3_ctx->wdi2_ctx.rdy_ring_rp_pa =
+				in->u.ul.rdy_ring_rp_pa;
+			ipa3_ctx->wdi2_ctx.rdy_ring_size =
+				in->u.ul.rdy_ring_size;
+			ipa3_ctx->wdi2_ctx.rdy_comp_ring_base_pa =
+				in->u.ul.rdy_comp_ring_base_pa;
+			ipa3_ctx->wdi2_ctx.rdy_comp_ring_wp_pa =
+				in->u.ul.rdy_comp_ring_wp_pa;
+			ipa3_ctx->wdi2_ctx.rdy_comp_ring_size =
+				in->u.ul.rdy_comp_ring_size;
+		}
+	}
+	if (IPA_CLIENT_IS_CONS(in->sys.client)) {
+		len = in->smmu_enabled ? in->u.dl_smmu.comp_ring_size :
+			in->u.dl.comp_ring_size;
+		IPADBG("TX ring smmu_en=%d ring_size=%d %d\n",
+				in->smmu_enabled,
+				in->u.dl_smmu.comp_ring_size,
+				in->u.dl.comp_ring_size);
+		if (ipa_create_gsi_smmu_mapping(IPA_WDI_TX_RING_RES,
+					in->smmu_enabled,
+					in->u.dl.comp_ring_base_pa,
+					&in->u.dl_smmu.comp_ring,
+					len,
+					false,
+					&va)) {
+			IPAERR("fail to create gsi mapping TX ring.\n");
+			result = -ENOMEM;
+			goto gsi_timeout;
+		}
+		gsi_channel_props.ring_base_addr = va;
+		gsi_channel_props.ring_base_vaddr = NULL;
+		gsi_channel_props.ring_len = len;
+
+		len = in->smmu_enabled ? in->u.dl_smmu.ce_ring_size :
+			in->u.dl.ce_ring_size;
+		IPADBG("CE ring smmu_en=%d ring_size=%d %d\n",
+				in->smmu_enabled,
+				in->u.dl_smmu.ce_ring_size,
+				in->u.dl.ce_ring_size);
+
+		/* WA: wlan passed ce_ring sg_table PA directly */
+		if (ipa_create_gsi_smmu_mapping(IPA_WDI_CE_RING_RES,
+					in->smmu_enabled,
+					in->u.dl.ce_ring_base_pa,
+					&in->u.dl_smmu.ce_ring,
+					len,
+					false,
+					&va)) {
+			IPAERR("fail to create gsi mapping CE ring.\n");
+			result = -ENOMEM;
+			goto gsi_timeout;
+		}
+		gsi_evt_ring_props.ring_base_addr = va;
+		gsi_evt_ring_props.ring_base_vaddr = NULL;
+		gsi_evt_ring_props.ring_len = len;
+		pa = in->smmu_enabled ? in->u.dl_smmu.ce_door_bell_pa :
+			in->u.dl.ce_door_bell_pa;
+		if (ipa_create_gsi_smmu_mapping(IPA_WDI_CE_DB_RES,
+					in->smmu_enabled,
+					pa,
+					NULL,
+					4,
+					true,
+					&va)) {
+			IPAERR("fail to create gsi mapping CE DB.\n");
+			result = -ENOMEM;
+			goto gsi_timeout;
+		}
+		gsi_evt_ring_props.rp_update_addr = va;
+	} else {
+		len = in->smmu_enabled ? in->u.ul_smmu.rdy_ring_size :
+			in->u.ul.rdy_ring_size;
+		IPADBG("RX ring smmu_en=%d ring_size=%d %d\n",
+				in->smmu_enabled,
+				in->u.ul_smmu.rdy_ring_size,
+				in->u.ul.rdy_ring_size);
+		if (ipa_create_gsi_smmu_mapping(IPA_WDI_RX_RING_RES,
+					in->smmu_enabled,
+					in->u.ul.rdy_ring_base_pa,
+					&in->u.ul_smmu.rdy_ring,
+					len,
+					false,
+					&va)) {
+			IPAERR("fail to create gsi RX ring.\n");
+			result = -ENOMEM;
+			goto gsi_timeout;
+		}
+		gsi_channel_props.ring_base_addr = va;
+		gsi_channel_props.ring_base_vaddr =  NULL;
+		gsi_channel_props.ring_len = len;
+		pa = in->smmu_enabled ? in->u.ul_smmu.rdy_ring_rp_pa :
+			in->u.ul.rdy_ring_rp_pa;
+		if (ipa_create_gsi_smmu_mapping(IPA_WDI_RX_RING_RP_RES,
+					in->smmu_enabled,
+					pa,
+					NULL,
+					4,
+					false,
+					&wifi_rx_ri_addr)) {
+			IPAERR("fail to create gsi RX rng RP\n");
+			result = -ENOMEM;
+			goto gsi_timeout;
+		}
+		len = in->smmu_enabled ?
+			in->u.ul_smmu.rdy_comp_ring_size :
+			in->u.ul.rdy_comp_ring_size;
+		IPADBG("RX ring smmu_en=%d comp_ring_size=%d %d\n",
+				in->smmu_enabled,
+				in->u.ul_smmu.rdy_comp_ring_size,
+				in->u.ul.rdy_comp_ring_size);
+		if (ipa_create_gsi_smmu_mapping(
+					IPA_WDI_RX_COMP_RING_RES,
+					in->smmu_enabled,
+					in->u.ul.rdy_comp_ring_base_pa,
+					&in->u.ul_smmu.rdy_comp_ring,
+					len,
+					false,
+					&va)) {
+			IPAERR("fail to create gsi RX comp_ring.\n");
+			result = -ENOMEM;
+			goto gsi_timeout;
+		}
+		gsi_evt_ring_props.ring_base_addr = va;
+		gsi_evt_ring_props.ring_base_vaddr = NULL;
+		gsi_evt_ring_props.ring_len = len;
+		pa = in->smmu_enabled ?
+			in->u.ul_smmu.rdy_comp_ring_wp_pa :
+			in->u.ul.rdy_comp_ring_wp_pa;
+		if (ipa_create_gsi_smmu_mapping(
+					IPA_WDI_RX_COMP_RING_WP_RES,
+					in->smmu_enabled,
+					pa,
+					NULL,
+					4,
+					false,
+					&va)) {
+			IPAERR("fail to create gsi RX comp_rng WP\n");
+			result = -ENOMEM;
+			goto gsi_timeout;
+		}
+		gsi_evt_ring_props.rp_update_addr = va;
+	}
+
+	ep->valid = 1;
+	ep->client = in->sys.client;
+	ep->keep_ipa_awake = in->sys.keep_ipa_awake;
+	ep->skip_ep_cfg = in->sys.skip_ep_cfg;
+	ep->client_notify = in->sys.notify;
+	ep->priv = in->sys.priv;
+	if (IPA_CLIENT_IS_PROD(in->sys.client)) {
+		memset(&ep_cfg_ctrl, 0, sizeof(struct ipa_ep_cfg_ctrl));
+		ep_cfg_ctrl.ipa_ep_delay = true;
+		ipa3_cfg_ep_ctrl(ipa_ep_idx, &ep_cfg_ctrl);
+	}
+
+	if (IPA_CLIENT_IS_CONS(in->sys.client)) {
+		in->sys.ipa_ep_cfg.aggr.aggr_en = IPA_ENABLE_AGGR;
+		in->sys.ipa_ep_cfg.aggr.aggr = IPA_GENERIC;
+		in->sys.ipa_ep_cfg.aggr.aggr_pkt_limit = IPA_AGGR_PKT_LIMIT;
+		in->sys.ipa_ep_cfg.aggr.aggr_byte_limit =
+						IPA_AGGR_HARD_BYTE_LIMIT;
+		in->sys.ipa_ep_cfg.aggr.aggr_hard_byte_limit_en =
+						IPA_ENABLE_AGGR;
+	}
+	if (!ep->skip_ep_cfg) {
+		if (ipa3_cfg_ep(ipa_ep_idx, &in->sys.ipa_ep_cfg)) {
+			IPAERR("fail to configure EP.\n");
+			goto ipa_cfg_ep_fail;
+		}
+		IPADBG("ep configuration successful\n");
+	} else {
+		IPADBG("Skipping endpoint configuration.\n");
+	}
+	result = ipa3_wdi2_gsi_alloc_evt_ring(&gsi_evt_ring_props,
+				in->sys.client,
+				&ep->gsi_evt_ring_hdl);
+	if (result)
+		goto fail_alloc_evt_ring;
+	/*copy mem info */
+	ep->gsi_mem_info.evt_ring_len = gsi_evt_ring_props.ring_len;
+	ep->gsi_mem_info.evt_ring_base_addr = gsi_evt_ring_props.ring_base_addr;
+	ep->gsi_mem_info.evt_ring_base_vaddr =
+				gsi_evt_ring_props.ring_base_vaddr;
+	IPAERR("evt ring len: %d\n", ep->gsi_mem_info.evt_ring_len);
+	IPAERR("element size: %d\n", gsi_evt_ring_props.re_size);
+
+	result = ipa3_wdi2_gsi_alloc_channel_ring(&gsi_channel_props,
+					in->sys.client,
+				&ep->gsi_chan_hdl, ep->gsi_evt_ring_hdl);
+	if (result)
+		goto fail_alloc_channel;
+	ep->gsi_mem_info.chan_ring_len = gsi_channel_props.ring_len;
+	ep->gsi_mem_info.chan_ring_base_addr = gsi_channel_props.ring_base_addr;
+	ep->gsi_mem_info.chan_ring_base_vaddr =
+		gsi_channel_props.ring_base_vaddr;
+
+	num_ring_ele = ep->gsi_mem_info.evt_ring_len/gsi_evt_ring_props.re_size;
+	IPAERR("UPDATE_RI_MODERATION_THRESHOLD: %d\n", num_ring_ele);
+	if (ipa3_ctx->ipa_hw_type < IPA_HW_v4_7) {
+		if (IPA_CLIENT_IS_PROD(in->sys.client)) {
+			gsi_scratch.wdi.wifi_rx_ri_addr_low =
+				wifi_rx_ri_addr & 0xFFFFFFFF;
+			gsi_scratch.wdi.wifi_rx_ri_addr_high =
+				(wifi_rx_ri_addr & 0xFFFFF00000000) >> 32;
+			gsi_scratch.wdi.wdi_rx_vdev_id = 0xff;
+			gsi_scratch.wdi.wdi_rx_fw_desc = 0xff;
+			gsi_scratch.wdi.endp_metadatareg_offset =
+						ipahal_get_reg_mn_ofst(
+						IPA_ENDP_INIT_HDR_METADATA_n, 0,
+								ipa_ep_idx)/4;
+			gsi_scratch.wdi.qmap_id = 0;
+		}
+		gsi_scratch.wdi.update_ri_moderation_threshold =
+			min(UPDATE_RI_MODERATION_THRESHOLD, num_ring_ele);
+		gsi_scratch.wdi.update_ri_moderation_counter = 0;
+		gsi_scratch.wdi.wdi_rx_tre_proc_in_progress = 0;
+	} else {
+		if (IPA_CLIENT_IS_PROD(in->sys.client)) {
+			gsi_scratch.wdi2_new.wifi_rx_ri_addr_low =
+				wifi_rx_ri_addr & 0xFFFFFFFF;
+			gsi_scratch.wdi2_new.wifi_rx_ri_addr_high =
+				(wifi_rx_ri_addr & 0xFFFFF00000000) >> 32;
+			gsi_scratch.wdi2_new.wdi_rx_vdev_id = 0xff;
+			gsi_scratch.wdi2_new.wdi_rx_fw_desc = 0xff;
+			gsi_scratch.wdi2_new.endp_metadatareg_offset =
+						ipahal_get_reg_mn_ofst(
+						IPA_ENDP_INIT_HDR_METADATA_n, 0,
+								ipa_ep_idx)/4;
+			gsi_scratch.wdi2_new.qmap_id = 0;
+		}
+		gsi_scratch.wdi2_new.update_ri_moderation_threshold =
+			min(UPDATE_RI_MODERATION_THRESHOLD, num_ring_ele);
+		gsi_scratch.wdi2_new.update_ri_moderation_counter = 0;
+		gsi_scratch.wdi2_new.wdi_rx_tre_proc_in_progress = 0;
+	}
+
+	result = gsi_write_channel_scratch(ep->gsi_chan_hdl,
+			gsi_scratch);
+	if (result != GSI_STATUS_SUCCESS) {
+		IPAERR("gsi_write_channel_scratch failed %d\n",
+				result);
+		goto fail_write_channel_scratch;
+	}
+
+	/* for AP+STA stats update */
+	if (in->wdi_notify)
+		ipa3_ctx->uc_wdi_ctx.stats_notify = in->wdi_notify;
+	else
+		IPADBG("in->wdi_notify is null\n");
+
+	ipa3_enable_data_path(ipa_ep_idx);
+
+	if (!ep->skip_ep_cfg && IPA_CLIENT_IS_PROD(in->sys.client))
+		ipa3_install_dflt_flt_rules(ipa_ep_idx);
+
+	if (!ep->keep_ipa_awake)
+		IPA_ACTIVE_CLIENTS_DEC_EP(in->sys.client);
+
+	IPADBG("GSI connected.\n");
+	gsi_res = gsi_query_channel_db_addr(ep->gsi_chan_hdl,
+			&gsi_db_reg_phs_addr_lsb,
+			&gsi_db_reg_phs_addr_msb);
+	out->uc_door_bell_pa = gsi_db_reg_phs_addr_lsb;
+	IPADBG("GSI query result: %d\n", gsi_res);
+	IPADBG("GSI lsb addr: %d\n", gsi_db_reg_phs_addr_lsb);
+	IPADBG("GSI msb addr: %d\n", gsi_db_reg_phs_addr_msb);
+
+	ep->gsi_offload_state |= IPA_WDI_CONNECTED;
+	out->clnt_hdl = ipa_ep_idx;
+	return 0;
+
+fail_write_channel_scratch:
+	gsi_dealloc_channel(ep->gsi_chan_hdl);
+fail_alloc_channel:
+	if (ep->gsi_evt_ring_hdl != ~0) {
+		gsi_dealloc_evt_ring(ep->gsi_evt_ring_hdl);
+		ep->gsi_evt_ring_hdl = ~0;
+	}
+fail_alloc_evt_ring:
+ipa_cfg_ep_fail:
+	memset(&ipa3_ctx->ep[ipa_ep_idx], 0, sizeof(struct ipa3_ep_context));
+gsi_timeout:
+	ipa_release_ap_smmu_mappings(in->sys.client);
+	IPA_ACTIVE_CLIENTS_DEC_EP(in->sys.client);
+fail:
+	return result;
+}
+
+/**
+ * ipa3_connect_wdi_pipe() - WDI client connect
+ * @in:	[in] input parameters from client
+ * @out: [out] output params to client
+ *
+ * Returns:	0 on success, negative on failure
+ *
+ * Note:	Should not be called from atomic context
+ */
+int ipa3_connect_wdi_pipe(struct ipa_wdi_in_params *in,
+		struct ipa_wdi_out_params *out)
+{
+	int ipa_ep_idx;
+	int result = -EFAULT;
+	struct ipa3_ep_context *ep;
+	struct ipa_mem_buffer cmd;
+	struct IpaHwWdiTxSetUpCmdData_t *tx;
+	struct IpaHwWdiRxSetUpCmdData_t *rx;
+	struct IpaHwWdi2TxSetUpCmdData_t *tx_2;
+	struct IpaHwWdi2RxSetUpCmdData_t *rx_2;
+
+	struct ipa_ep_cfg_ctrl ep_cfg_ctrl;
+	unsigned long va;
+	phys_addr_t pa;
+	u32 len;
+
+	if (in == NULL || out == NULL || in->sys.client >= IPA_CLIENT_MAX) {
+		IPAERR("bad parm. in=%pK out=%pK\n", in, out);
+		if (in)
+			IPAERR("client = %d\n", in->sys.client);
+		return -EINVAL;
+	}
+
+	if (!in->smmu_enabled) {
+		if (IPA_CLIENT_IS_CONS(in->sys.client)) {
+			if (in->u.dl.comp_ring_base_pa %
+				IPA_WDI_RING_ALIGNMENT ||
+				in->u.dl.ce_ring_base_pa %
+				IPA_WDI_RING_ALIGNMENT) {
+				IPAERR("alignment failure on TX\n");
+					return -EINVAL;
+			}
+		} else {
+			if (in->u.ul.rdy_ring_base_pa %
+				IPA_WDI_RING_ALIGNMENT) {
+				IPAERR("alignment failure on RX\n");
+				return -EINVAL;
+			}
+		}
+	}
+
+	if (ipa3_ctx->ipa_wdi2_over_gsi)
+		return ipa3_connect_gsi_wdi_pipe(in, out);
+
+	result = ipa3_uc_state_check();
+	if (result)
+		return result;
+
+	ipa_ep_idx = ipa3_get_ep_mapping(in->sys.client);
+	if (ipa_ep_idx == -1) {
+		IPAERR("fail to alloc EP.\n");
+		goto fail;
+	}
+
+	ep = &ipa3_ctx->ep[ipa_ep_idx];
+
+	if (ep->valid) {
+		IPAERR("EP already allocated.\n");
+		goto fail;
+	}
+
+	memset(&ipa3_ctx->ep[ipa_ep_idx], 0, sizeof(struct ipa3_ep_context));
+	IPA_ACTIVE_CLIENTS_INC_EP(in->sys.client);
+
+	IPADBG("client=%d ep=%d\n", in->sys.client, ipa_ep_idx);
+	if (IPA_CLIENT_IS_CONS(in->sys.client)) {
+		if (ipa3_ctx->ipa_wdi2)
+			cmd.size = sizeof(*tx_2);
+		else
+			cmd.size = sizeof(*tx);
+		if (in->smmu_enabled) {
+			IPADBG("comp_ring_size=%d\n",
+				in->u.dl_smmu.comp_ring_size);
+			IPADBG("ce_ring_size=%d\n", in->u.dl_smmu.ce_ring_size);
+			IPADBG("ce_ring_doorbell_pa=0x%pa\n",
+					&in->u.dl_smmu.ce_door_bell_pa);
+			IPADBG("num_tx_buffers=%d\n",
+				in->u.dl_smmu.num_tx_buffers);
+		} else {
+			IPADBG("comp_ring_base_pa=0x%pa\n",
+					&in->u.dl.comp_ring_base_pa);
+			IPADBG("comp_ring_size=%d\n", in->u.dl.comp_ring_size);
+			IPADBG("ce_ring_base_pa=0x%pa\n",
+				&in->u.dl.ce_ring_base_pa);
+			IPADBG("ce_ring_size=%d\n", in->u.dl.ce_ring_size);
+			IPADBG("ce_ring_doorbell_pa=0x%pa\n",
+					&in->u.dl.ce_door_bell_pa);
+			IPADBG("num_tx_buffers=%d\n", in->u.dl.num_tx_buffers);
+		}
+	} else {
+		if (ipa3_ctx->ipa_wdi2)
+			cmd.size = sizeof(*rx_2);
+		else
+			cmd.size = sizeof(*rx);
+		if (in->smmu_enabled) {
+			IPADBG("rx_ring_size=%d\n",
+				in->u.ul_smmu.rdy_ring_size);
+			IPADBG("rx_ring_rp_pa=0x%pa\n",
+				&in->u.ul_smmu.rdy_ring_rp_pa);
+			IPADBG("rx_comp_ring_size=%d\n",
+				in->u.ul_smmu.rdy_comp_ring_size);
+			IPADBG("rx_comp_ring_wp_pa=0x%pa\n",
+				&in->u.ul_smmu.rdy_comp_ring_wp_pa);
+			ipa3_ctx->uc_ctx.rdy_ring_rp_pa =
+				in->u.ul_smmu.rdy_ring_rp_pa;
+			ipa3_ctx->uc_ctx.rdy_ring_size =
+				in->u.ul_smmu.rdy_ring_size;
+			ipa3_ctx->uc_ctx.rdy_comp_ring_wp_pa =
+				in->u.ul_smmu.rdy_comp_ring_wp_pa;
+			ipa3_ctx->uc_ctx.rdy_comp_ring_size =
+				in->u.ul_smmu.rdy_comp_ring_size;
+		} else {
+			IPADBG("rx_ring_base_pa=0x%pa\n",
+				&in->u.ul.rdy_ring_base_pa);
+			IPADBG("rx_ring_size=%d\n",
+				in->u.ul.rdy_ring_size);
+			IPADBG("rx_ring_rp_pa=0x%pa\n",
+				&in->u.ul.rdy_ring_rp_pa);
+			IPADBG("rx_comp_ring_base_pa=0x%pa\n",
+				&in->u.ul.rdy_comp_ring_base_pa);
+			IPADBG("rx_comp_ring_size=%d\n",
+				in->u.ul.rdy_comp_ring_size);
+			IPADBG("rx_comp_ring_wp_pa=0x%pa\n",
+				&in->u.ul.rdy_comp_ring_wp_pa);
+			ipa3_ctx->uc_ctx.rdy_ring_base_pa =
+				in->u.ul.rdy_ring_base_pa;
+			ipa3_ctx->uc_ctx.rdy_ring_rp_pa =
+				in->u.ul.rdy_ring_rp_pa;
+			ipa3_ctx->uc_ctx.rdy_ring_size =
+				in->u.ul.rdy_ring_size;
+			ipa3_ctx->uc_ctx.rdy_comp_ring_base_pa =
+				in->u.ul.rdy_comp_ring_base_pa;
+			ipa3_ctx->uc_ctx.rdy_comp_ring_wp_pa =
+				in->u.ul.rdy_comp_ring_wp_pa;
+			ipa3_ctx->uc_ctx.rdy_comp_ring_size =
+				in->u.ul.rdy_comp_ring_size;
+		}
+	}
+
+	cmd.base = dma_alloc_coherent(ipa3_ctx->uc_pdev, cmd.size,
+			&cmd.phys_base, GFP_KERNEL);
+	if (cmd.base == NULL) {
+		IPAERR("fail to get DMA memory.\n");
+		result = -ENOMEM;
+		goto dma_alloc_fail;
+	}
+
+	if (IPA_CLIENT_IS_CONS(in->sys.client)) {
+		if (ipa3_ctx->ipa_wdi2) {
+			tx_2 = (struct IpaHwWdi2TxSetUpCmdData_t *)cmd.base;
+
+			len = in->smmu_enabled ? in->u.dl_smmu.comp_ring_size :
+				in->u.dl.comp_ring_size;
+			IPADBG("TX_2 ring smmu_en=%d ring_size=%d %d\n",
+				in->smmu_enabled,
+				in->u.dl_smmu.comp_ring_size,
+				in->u.dl.comp_ring_size);
+			if (ipa_create_uc_smmu_mapping(IPA_WDI_TX_RING_RES,
+					in->smmu_enabled,
+					in->u.dl.comp_ring_base_pa,
+					&in->u.dl_smmu.comp_ring,
+					len,
+					false,
+					&va)) {
+				IPAERR("fail to create uc mapping TX ring.\n");
+				result = -ENOMEM;
+				goto uc_timeout;
+			}
+			tx_2->comp_ring_base_pa_hi =
+				(u32) ((va & 0xFFFFFFFF00000000) >> 32);
+			tx_2->comp_ring_base_pa = (u32) (va & 0xFFFFFFFF);
+			tx_2->comp_ring_size = len;
+			IPADBG("TX_2 comp_ring_base_pa_hi=0x%08x :0x%08x\n",
+					tx_2->comp_ring_base_pa_hi,
+					tx_2->comp_ring_base_pa);
+
+			len = in->smmu_enabled ? in->u.dl_smmu.ce_ring_size :
+				in->u.dl.ce_ring_size;
+			IPADBG("TX_2 CE ring smmu_en=%d ring_size=%d %d\n",
+					in->smmu_enabled,
+					in->u.dl_smmu.ce_ring_size,
+					in->u.dl.ce_ring_size);
+			/* WA: wlan passed ce_ring sg_table PA directly */
+			if (ipa_create_uc_smmu_mapping(IPA_WDI_CE_RING_RES,
+						in->smmu_enabled,
+						in->u.dl.ce_ring_base_pa,
+						&in->u.dl_smmu.ce_ring,
+						len,
+						false,
+						&va)) {
+				IPAERR("fail to create uc mapping CE ring.\n");
+				result = -ENOMEM;
+				goto uc_timeout;
+			}
+			tx_2->ce_ring_base_pa_hi =
+				(u32) ((va & 0xFFFFFFFF00000000) >> 32);
+			tx_2->ce_ring_base_pa = (u32) (va & 0xFFFFFFFF);
+			tx_2->ce_ring_size = len;
+			IPADBG("TX_2 ce_ring_base_pa_hi=0x%08x :0x%08x\n",
+					tx_2->ce_ring_base_pa_hi,
+					tx_2->ce_ring_base_pa);
+
+			pa = in->smmu_enabled ? in->u.dl_smmu.ce_door_bell_pa :
+				in->u.dl.ce_door_bell_pa;
+			if (ipa_create_uc_smmu_mapping(IPA_WDI_CE_DB_RES,
+						in->smmu_enabled,
+						pa,
+						NULL,
+						4,
+						true,
+						&va)) {
+				IPAERR("fail to create uc mapping CE DB.\n");
+				result = -ENOMEM;
+				goto uc_timeout;
+			}
+			tx_2->ce_ring_doorbell_pa_hi =
+				(u32) ((va & 0xFFFFFFFF00000000) >> 32);
+			tx_2->ce_ring_doorbell_pa = (u32) (va & 0xFFFFFFFF);
+			IPADBG("TX_2 ce_ring_doorbell_pa_hi=0x%08x :0x%08x\n",
+					tx_2->ce_ring_doorbell_pa_hi,
+					tx_2->ce_ring_doorbell_pa);
+
+			tx_2->num_tx_buffers = in->smmu_enabled ?
+				in->u.dl_smmu.num_tx_buffers :
+				in->u.dl.num_tx_buffers;
+			tx_2->ipa_pipe_number = ipa_ep_idx;
+		} else {
+			tx = (struct IpaHwWdiTxSetUpCmdData_t *)cmd.base;
+
+			len = in->smmu_enabled ? in->u.dl_smmu.comp_ring_size :
+				in->u.dl.comp_ring_size;
+			IPADBG("TX ring smmu_en=%d ring_size=%d %d\n",
+					in->smmu_enabled,
+					in->u.dl_smmu.comp_ring_size,
+					in->u.dl.comp_ring_size);
+			if (ipa_create_uc_smmu_mapping(IPA_WDI_TX_RING_RES,
+						in->smmu_enabled,
+						in->u.dl.comp_ring_base_pa,
+						&in->u.dl_smmu.comp_ring,
+						len,
+						false,
+						&va)) {
+				IPAERR("fail to create uc mapping TX ring.\n");
+				result = -ENOMEM;
+				goto uc_timeout;
+			}
+			tx->comp_ring_base_pa = va;
+			tx->comp_ring_size = len;
+			len = in->smmu_enabled ? in->u.dl_smmu.ce_ring_size :
+				in->u.dl.ce_ring_size;
+			IPADBG("TX CE ring smmu_en=%d ring_size=%d %d 0x%lx\n",
+					in->smmu_enabled,
+					in->u.dl_smmu.ce_ring_size,
+					in->u.dl.ce_ring_size,
+					va);
+			if (ipa_create_uc_smmu_mapping(IPA_WDI_CE_RING_RES,
+						in->smmu_enabled,
+						in->u.dl.ce_ring_base_pa,
+						&in->u.dl_smmu.ce_ring,
+						len,
+						false,
+						&va)) {
+				IPAERR("fail to create uc mapping CE ring.\n");
+				result = -ENOMEM;
+				goto uc_timeout;
+			}
+			tx->ce_ring_base_pa = va;
+			tx->ce_ring_size = len;
+			pa = in->smmu_enabled ? in->u.dl_smmu.ce_door_bell_pa :
+				in->u.dl.ce_door_bell_pa;
+			if (ipa_create_uc_smmu_mapping(IPA_WDI_CE_DB_RES,
+						in->smmu_enabled,
+						pa,
+						NULL,
+						4,
+						true,
+						&va)) {
+				IPAERR("fail to create uc mapping CE DB.\n");
+				result = -ENOMEM;
+				goto uc_timeout;
+			}
+
+			IPADBG("CE doorbell pa: 0x%pa va:0x%lx\n", &pa, va);
+			IPADBG("Is wdi_over_pcie ? (%s)\n",
+				ipa3_ctx->wdi_over_pcie ? "Yes":"No");
+
+			if (ipa3_ctx->wdi_over_pcie)
+				tx->ce_ring_doorbell_pa = pa;
+			else
+				tx->ce_ring_doorbell_pa = va;
+
+			tx->num_tx_buffers = in->smmu_enabled ?
+				in->u.dl_smmu.num_tx_buffers :
+				in->u.dl.num_tx_buffers;
+			tx->ipa_pipe_number = ipa_ep_idx;
+		}
+		out->uc_door_bell_pa = ipa3_ctx->ipa_wrapper_base +
+				ipahal_get_reg_base() +
+				ipahal_get_reg_mn_ofst(IPA_UC_MAILBOX_m_n,
+				IPA_HW_WDI_TX_MBOX_START_INDEX/32,
+				IPA_HW_WDI_TX_MBOX_START_INDEX % 32);
+	} else {
+		if (ipa3_ctx->ipa_wdi2) {
+			rx_2 = (struct IpaHwWdi2RxSetUpCmdData_t *)cmd.base;
+
+			len = in->smmu_enabled ? in->u.ul_smmu.rdy_ring_size :
+				in->u.ul.rdy_ring_size;
+			IPADBG("RX_2 ring smmu_en=%d ring_size=%d %d\n",
+				in->smmu_enabled,
+				in->u.ul_smmu.rdy_ring_size,
+				in->u.ul.rdy_ring_size);
+			if (ipa_create_uc_smmu_mapping(IPA_WDI_RX_RING_RES,
+						in->smmu_enabled,
+						in->u.ul.rdy_ring_base_pa,
+						&in->u.ul_smmu.rdy_ring,
+						len,
+						false,
+						&va)) {
+				IPAERR("fail to create uc RX_2 ring.\n");
+				result = -ENOMEM;
+				goto uc_timeout;
+			}
+			rx_2->rx_ring_base_pa_hi =
+				(u32) ((va & 0xFFFFFFFF00000000) >> 32);
+			rx_2->rx_ring_base_pa = (u32) (va & 0xFFFFFFFF);
+			rx_2->rx_ring_size = len;
+			IPADBG("RX_2 rx_ring_base_pa_hi=0x%08x:0x%08x\n",
+					rx_2->rx_ring_base_pa_hi,
+					rx_2->rx_ring_base_pa);
+
+			pa = in->smmu_enabled ? in->u.ul_smmu.rdy_ring_rp_pa :
+				in->u.ul.rdy_ring_rp_pa;
+			if (ipa_create_uc_smmu_mapping(IPA_WDI_RX_RING_RP_RES,
+						in->smmu_enabled,
+						pa,
+						NULL,
+						4,
+						false,
+						&va)) {
+				IPAERR("fail to create uc RX_2 rng RP\n");
+				result = -ENOMEM;
+				goto uc_timeout;
+			}
+			rx_2->rx_ring_rp_pa_hi =
+				(u32) ((va & 0xFFFFFFFF00000000) >> 32);
+			rx_2->rx_ring_rp_pa = (u32) (va & 0xFFFFFFFF);
+			IPADBG("RX_2 rx_ring_rp_pa_hi=0x%08x :0x%08x\n",
+					rx_2->rx_ring_rp_pa_hi,
+					rx_2->rx_ring_rp_pa);
+			len = in->smmu_enabled ?
+				in->u.ul_smmu.rdy_comp_ring_size :
+				in->u.ul.rdy_comp_ring_size;
+			IPADBG("RX_2 ring smmu_en=%d comp_ring_size=%d %d\n",
+					in->smmu_enabled,
+					in->u.ul_smmu.rdy_comp_ring_size,
+					in->u.ul.rdy_comp_ring_size);
+			if (ipa_create_uc_smmu_mapping(IPA_WDI_RX_COMP_RING_RES,
+						in->smmu_enabled,
+						in->u.ul.rdy_comp_ring_base_pa,
+						&in->u.ul_smmu.rdy_comp_ring,
+						len,
+						false,
+						&va)) {
+				IPAERR("fail to create uc RX_2 comp_ring.\n");
+				result = -ENOMEM;
+				goto uc_timeout;
+			}
+			rx_2->rx_comp_ring_base_pa_hi =
+				(u32) ((va & 0xFFFFFFFF00000000) >> 32);
+			rx_2->rx_comp_ring_base_pa = (u32) (va & 0xFFFFFFFF);
+			rx_2->rx_comp_ring_size = len;
+			IPADBG("RX_2 rx_comp_ring_base_pa_hi=0x%08x:0x%08x\n",
+					rx_2->rx_comp_ring_base_pa_hi,
+					rx_2->rx_comp_ring_base_pa);
+
+			pa = in->smmu_enabled ?
+				in->u.ul_smmu.rdy_comp_ring_wp_pa :
+				in->u.ul.rdy_comp_ring_wp_pa;
+			if (ipa_create_uc_smmu_mapping(
+						IPA_WDI_RX_COMP_RING_WP_RES,
+						in->smmu_enabled,
+						pa,
+						NULL,
+						4,
+						false,
+						&va)) {
+				IPAERR("fail to create uc RX_2 comp_rng WP\n");
+				result = -ENOMEM;
+				goto uc_timeout;
+			}
+			rx_2->rx_comp_ring_wp_pa_hi =
+				(u32) ((va & 0xFFFFFFFF00000000) >> 32);
+			rx_2->rx_comp_ring_wp_pa = (u32) (va & 0xFFFFFFFF);
+			IPADBG("RX_2 rx_comp_ring_wp_pa_hi=0x%08x:0x%08x\n",
+					rx_2->rx_comp_ring_wp_pa_hi,
+					rx_2->rx_comp_ring_wp_pa);
+			rx_2->ipa_pipe_number = ipa_ep_idx;
+		} else {
+			rx = (struct IpaHwWdiRxSetUpCmdData_t *)cmd.base;
+
+			len = in->smmu_enabled ? in->u.ul_smmu.rdy_ring_size :
+				in->u.ul.rdy_ring_size;
+			IPADBG("RX ring smmu_en=%d ring_size=%d %d\n",
+					in->smmu_enabled,
+					in->u.ul_smmu.rdy_ring_size,
+					in->u.ul.rdy_ring_size);
+			if (ipa_create_uc_smmu_mapping(IPA_WDI_RX_RING_RES,
+						in->smmu_enabled,
+						in->u.ul.rdy_ring_base_pa,
+						&in->u.ul_smmu.rdy_ring,
+						len,
+						false,
+						&va)) {
+				IPAERR("fail to create uc mapping RX ring.\n");
+				result = -ENOMEM;
+				goto uc_timeout;
+			}
+			rx->rx_ring_base_pa = va;
+			rx->rx_ring_size = len;
+
+			pa = in->smmu_enabled ? in->u.ul_smmu.rdy_ring_rp_pa :
+				in->u.ul.rdy_ring_rp_pa;
+			if (ipa_create_uc_smmu_mapping(IPA_WDI_RX_RING_RP_RES,
+						in->smmu_enabled,
+						pa,
+						NULL,
+						4,
+						false,
+						&va)) {
+				IPAERR("fail to create uc mapping RX rng RP\n");
+				result = -ENOMEM;
+				goto uc_timeout;
+			}
+			rx->rx_ring_rp_pa = va;
+			rx->ipa_pipe_number = ipa_ep_idx;
+		}
+		out->uc_door_bell_pa = ipa3_ctx->ipa_wrapper_base +
+				ipahal_get_reg_base() +
+				ipahal_get_reg_mn_ofst(IPA_UC_MAILBOX_m_n,
+					IPA_HW_WDI_RX_MBOX_START_INDEX/32,
+					IPA_HW_WDI_RX_MBOX_START_INDEX % 32);
+	}
+
+	ep->valid = 1;
+	ep->client = in->sys.client;
+	ep->keep_ipa_awake = in->sys.keep_ipa_awake;
+	result = ipa3_disable_data_path(ipa_ep_idx);
+	if (result) {
+		IPAERR("disable data path failed res=%d clnt=%d.\n", result,
+			ipa_ep_idx);
+		goto uc_timeout;
+	}
+	if (IPA_CLIENT_IS_PROD(in->sys.client)) {
+		memset(&ep_cfg_ctrl, 0, sizeof(struct ipa_ep_cfg_ctrl));
+		ep_cfg_ctrl.ipa_ep_delay = true;
+		ipa3_cfg_ep_ctrl(ipa_ep_idx, &ep_cfg_ctrl);
+	}
+
+	result = ipa3_uc_send_cmd((u32)(cmd.phys_base),
+				IPA_CLIENT_IS_CONS(in->sys.client) ?
+				IPA_CPU_2_HW_CMD_WDI_TX_SET_UP :
+				IPA_CPU_2_HW_CMD_WDI_RX_SET_UP,
+				IPA_HW_2_CPU_WDI_CMD_STATUS_SUCCESS,
+				false, 10*HZ);
+
+	if (result) {
+		result = -EFAULT;
+		goto uc_timeout;
+	}
+
+	ep->skip_ep_cfg = in->sys.skip_ep_cfg;
+	ep->client_notify = in->sys.notify;
+	ep->priv = in->sys.priv;
+
+	/* for AP+STA stats update */
+	if (in->wdi_notify)
+		ipa3_ctx->uc_wdi_ctx.stats_notify = in->wdi_notify;
+	else
+		IPADBG("in->wdi_notify is null\n");
+
+	if (IPA_CLIENT_IS_CONS(in->sys.client)) {
+		in->sys.ipa_ep_cfg.aggr.aggr_en = IPA_ENABLE_AGGR;
+		in->sys.ipa_ep_cfg.aggr.aggr = IPA_GENERIC;
+		in->sys.ipa_ep_cfg.aggr.aggr_pkt_limit = IPA_AGGR_PKT_LIMIT;
+		in->sys.ipa_ep_cfg.aggr.aggr_byte_limit =
+						IPA_AGGR_HARD_BYTE_LIMIT;
+		in->sys.ipa_ep_cfg.aggr.aggr_hard_byte_limit_en =
+						IPA_ENABLE_AGGR;
+	}
+	if (!ep->skip_ep_cfg) {
+		if (ipa3_cfg_ep(ipa_ep_idx, &in->sys.ipa_ep_cfg)) {
+			IPAERR("fail to configure EP.\n");
+			goto ipa_cfg_ep_fail;
+		}
+		IPADBG("ep configuration successful\n");
+	} else {
+		IPADBG("Skipping endpoint configuration.\n");
+	}
+
+	ipa3_enable_data_path(ipa_ep_idx);
+
+	out->clnt_hdl = ipa_ep_idx;
+
+	if (!ep->skip_ep_cfg && IPA_CLIENT_IS_PROD(in->sys.client))
+		ipa3_install_dflt_flt_rules(ipa_ep_idx);
+
+	if (!ep->keep_ipa_awake)
+		IPA_ACTIVE_CLIENTS_DEC_EP(in->sys.client);
+
+	dma_free_coherent(ipa3_ctx->uc_pdev, cmd.size, cmd.base, cmd.phys_base);
+	ep->uc_offload_state |= IPA_WDI_CONNECTED;
+	IPADBG("client %d (ep: %d) connected\n", in->sys.client, ipa_ep_idx);
+
+	return 0;
+
+ipa_cfg_ep_fail:
+	memset(&ipa3_ctx->ep[ipa_ep_idx], 0, sizeof(struct ipa3_ep_context));
+uc_timeout:
+	ipa_release_uc_smmu_mappings(in->sys.client);
+	dma_free_coherent(ipa3_ctx->uc_pdev, cmd.size, cmd.base, cmd.phys_base);
+dma_alloc_fail:
+	IPA_ACTIVE_CLIENTS_DEC_EP(in->sys.client);
+fail:
+	return result;
+}
+
+int ipa3_disconnect_gsi_wdi_pipe(u32 clnt_hdl)
+{
+	int result = 0;
+	struct ipa3_ep_context *ep;
+
+	ep = &ipa3_ctx->ep[clnt_hdl];
+
+	if (ep->gsi_offload_state != IPA_WDI_CONNECTED) {
+		IPAERR("WDI channel bad state %d\n", ep->gsi_offload_state);
+		return -EFAULT;
+	}
+
+	if (!ep->keep_ipa_awake)
+		IPA_ACTIVE_CLIENTS_INC_EP(ipa3_get_client_mapping(clnt_hdl));
+
+	ipa3_reset_gsi_channel(clnt_hdl);
+	ipa3_reset_gsi_event_ring(clnt_hdl);
+
+	if (!ep->keep_ipa_awake)
+		IPA_ACTIVE_CLIENTS_DEC_EP(ipa3_get_client_mapping(clnt_hdl));
+
+	result = ipa3_release_gsi_channel(clnt_hdl);
+	if (result) {
+		IPAERR("GSI dealloc channel failed %d\n",
+				result);
+		goto fail_dealloc_channel;
+	}
+	ipa_release_ap_smmu_mappings(clnt_hdl);
+
+	/* for AP+STA stats update */
+	if (ipa3_ctx->uc_wdi_ctx.stats_notify)
+		ipa3_ctx->uc_wdi_ctx.stats_notify = NULL;
+	else
+		IPADBG("uc_wdi_ctx.stats_notify already null\n");
+	if (ipa3_ctx->ipa_hw_type >= IPA_HW_v4_5 &&
+		ipa3_ctx->ipa_hw_type != IPA_HW_v4_7)
+		ipa3_uc_debug_stats_dealloc(IPA_HW_PROTOCOL_WDI);
+	IPADBG("client (ep: %d) disconnected\n", clnt_hdl);
+
+fail_dealloc_channel:
+	return result;
+}
+
+/**
+ * ipa3_disconnect_wdi_pipe() - WDI client disconnect
+ * @clnt_hdl:	[in] opaque client handle assigned by IPA to client
+ *
+ * Returns:	0 on success, negative on failure
+ *
+ * Note:	Should not be called from atomic context
+ */
+int ipa3_disconnect_wdi_pipe(u32 clnt_hdl)
+{
+	int result = 0;
+	struct ipa3_ep_context *ep;
+	union IpaHwWdiCommonChCmdData_t tear;
+
+	if (clnt_hdl >= ipa3_ctx->ipa_num_pipes ||
+	    ipa3_ctx->ep[clnt_hdl].valid == 0) {
+		IPAERR("bad parm, %d\n", clnt_hdl);
+		return -EINVAL;
+	}
+
+	if (ipa3_ctx->ipa_wdi2_over_gsi)
+		return ipa3_disconnect_gsi_wdi_pipe(clnt_hdl);
+
+	result = ipa3_uc_state_check();
+	if (result)
+		return result;
+
+	IPADBG("ep=%d\n", clnt_hdl);
+
+	ep = &ipa3_ctx->ep[clnt_hdl];
+
+	if (ep->uc_offload_state != IPA_WDI_CONNECTED) {
+		IPAERR("WDI channel bad state %d\n", ep->uc_offload_state);
+		return -EFAULT;
+	}
+
+	if (!ep->keep_ipa_awake)
+		IPA_ACTIVE_CLIENTS_INC_EP(ipa3_get_client_mapping(clnt_hdl));
+
+	tear.params.ipa_pipe_number = clnt_hdl;
+
+	result = ipa3_uc_send_cmd(tear.raw32b,
+				IPA_CPU_2_HW_CMD_WDI_TEAR_DOWN,
+				IPA_HW_2_CPU_WDI_CMD_STATUS_SUCCESS,
+				false, 10*HZ);
+
+	if (result) {
+		result = -EFAULT;
+		goto uc_timeout;
+	}
+
+	ipa3_delete_dflt_flt_rules(clnt_hdl);
+	ipa_release_uc_smmu_mappings(ep->client);
+
+	memset(&ipa3_ctx->ep[clnt_hdl], 0, sizeof(struct ipa3_ep_context));
+
+	IPADBG("client (ep: %d) disconnected\n", clnt_hdl);
+
+	/* for AP+STA stats update */
+	if (ipa3_ctx->uc_wdi_ctx.stats_notify)
+		ipa3_ctx->uc_wdi_ctx.stats_notify = NULL;
+	else
+		IPADBG("uc_wdi_ctx.stats_notify already null\n");
+
+uc_timeout:
+	IPA_ACTIVE_CLIENTS_DEC_EP(ipa3_get_client_mapping(clnt_hdl));
+	return result;
+}
+
+int ipa3_enable_gsi_wdi_pipe(u32 clnt_hdl)
+{
+	int result = 0;
+	struct ipa3_ep_context *ep;
+	struct ipa_ep_cfg_ctrl ep_cfg_ctrl;
+	int ipa_ep_idx;
+
+	IPADBG("ep=%d\n", clnt_hdl);
+
+	ep = &ipa3_ctx->ep[clnt_hdl];
+	if (ep->gsi_offload_state != IPA_WDI_CONNECTED) {
+		IPAERR("WDI channel bad state %d\n", ep->gsi_offload_state);
+		return -EFAULT;
+	}
+
+	ipa_ep_idx = ipa3_get_ep_mapping(ipa3_get_client_mapping(clnt_hdl));
+	if (ipa_ep_idx == -1) {
+		IPAERR("fail to alloc EP.\n");
+		return -EPERM;
+	}
+
+	IPA_ACTIVE_CLIENTS_INC_EP(ipa3_get_client_mapping(clnt_hdl));
+
+	memset(&ep_cfg_ctrl, 0, sizeof(struct ipa_ep_cfg_ctrl));
+	ipa3_cfg_ep_ctrl(ipa_ep_idx, &ep_cfg_ctrl);
+
+	IPA_ACTIVE_CLIENTS_DEC_EP(ipa3_get_client_mapping(clnt_hdl));
+	ep->gsi_offload_state |= IPA_WDI_ENABLED;
+	IPADBG("client (ep: %d) enabled\n", clnt_hdl);
+
+	return result;
+}
+int ipa3_disable_gsi_wdi_pipe(u32 clnt_hdl)
+{
+	int result = 0;
+	struct ipa3_ep_context *ep;
+	struct ipa_ep_cfg_ctrl ep_cfg_ctrl;
+	u32 cons_hdl;
+
+	IPADBG("ep=%d\n", clnt_hdl);
+
+	ep = &ipa3_ctx->ep[clnt_hdl];
+
+	if (ep->gsi_offload_state != (IPA_WDI_CONNECTED | IPA_WDI_ENABLED)) {
+		IPAERR("WDI channel bad state %d\n", ep->gsi_offload_state);
+		return -EFAULT;
+	}
+	IPA_ACTIVE_CLIENTS_INC_EP(ipa3_get_client_mapping(clnt_hdl));
+
+	result = ipa3_disable_data_path(clnt_hdl);
+	if (result) {
+		IPAERR("disable data path failed res=%d clnt=%d.\n", result,
+				clnt_hdl);
+		goto gsi_timeout;
+	}
+
+	/**
+	 * To avoid data stall during continuous SAP on/off before
+	 * setting delay to IPA Consumer pipe (Client Producer),
+	 * remove delay and enable holb on IPA Producer pipe
+	 */
+	if (IPA_CLIENT_IS_PROD(ep->client)) {
+		IPADBG("Stopping PROD channel - hdl=%d clnt=%d\n",
+			clnt_hdl, ep->client);
+		/* remove delay on wlan-prod pipe*/
+		memset(&ep_cfg_ctrl, 0, sizeof(struct ipa_ep_cfg_ctrl));
+		ipa3_cfg_ep_ctrl(clnt_hdl, &ep_cfg_ctrl);
+
+		cons_hdl = ipa3_get_ep_mapping(IPA_CLIENT_WLAN1_CONS);
+		if (cons_hdl == IPA_EP_NOT_ALLOCATED) {
+			IPAERR("Client %u is not mapped\n",
+				IPA_CLIENT_WLAN1_CONS);
+			goto gsi_timeout;
+		}
+		if (ipa3_ctx->ep[cons_hdl].valid == 1) {
+			result = ipa3_disable_data_path(cons_hdl);
+			if (result) {
+				IPAERR("disable data path failed\n");
+				IPAERR("res=%d clnt=%d\n",
+						result, cons_hdl);
+				goto gsi_timeout;
+			}
+		}
+		usleep_range(IPA_UC_POLL_SLEEP_USEC * IPA_UC_POLL_SLEEP_USEC,
+			IPA_UC_POLL_SLEEP_USEC * IPA_UC_POLL_SLEEP_USEC);
+
+	}
+
+	/* Set the delay after disabling IPA Producer pipe */
+	if (IPA_CLIENT_IS_PROD(ep->client)) {
+		memset(&ep_cfg_ctrl, 0, sizeof(struct ipa_ep_cfg_ctrl));
+		ep_cfg_ctrl.ipa_ep_delay = true;
+		ipa3_cfg_ep_ctrl(clnt_hdl, &ep_cfg_ctrl);
+	}
+	ep->gsi_offload_state &= ~IPA_WDI_ENABLED;
+	IPADBG("client (ep: %d) disabled\n", clnt_hdl);
+
+gsi_timeout:
+	IPA_ACTIVE_CLIENTS_DEC_EP(ipa3_get_client_mapping(clnt_hdl));
+	return result;
+}
+/**
+ * ipa3_enable_wdi_pipe() - WDI client enable
+ * @clnt_hdl:	[in] opaque client handle assigned by IPA to client
+ *
+ * Returns:	0 on success, negative on failure
+ *
+ * Note:	Should not be called from atomic context
+ */
+int ipa3_enable_wdi_pipe(u32 clnt_hdl)
+{
+	int result = 0;
+	struct ipa3_ep_context *ep;
+	union IpaHwWdiCommonChCmdData_t enable;
+	struct ipa_ep_cfg_holb holb_cfg;
+
+	if (clnt_hdl >= ipa3_ctx->ipa_num_pipes ||
+	    ipa3_ctx->ep[clnt_hdl].valid == 0) {
+		IPAERR("bad parm, %d\n", clnt_hdl);
+		return -EINVAL;
+	}
+
+	if (ipa3_ctx->ipa_wdi2_over_gsi)
+		return ipa3_enable_gsi_wdi_pipe(clnt_hdl);
+
+	result = ipa3_uc_state_check();
+	if (result)
+		return result;
+
+	IPADBG("ep=%d\n", clnt_hdl);
+
+	ep = &ipa3_ctx->ep[clnt_hdl];
+
+	if (ep->uc_offload_state != IPA_WDI_CONNECTED) {
+		IPAERR("WDI channel bad state %d\n", ep->uc_offload_state);
+		return -EFAULT;
+	}
+	IPA_ACTIVE_CLIENTS_INC_EP(ipa3_get_client_mapping(clnt_hdl));
+	enable.params.ipa_pipe_number = clnt_hdl;
+
+	result = ipa3_uc_send_cmd(enable.raw32b,
+		IPA_CPU_2_HW_CMD_WDI_CH_ENABLE,
+		IPA_HW_2_CPU_WDI_CMD_STATUS_SUCCESS,
+		false, 10*HZ);
+
+	if (result) {
+		result = -EFAULT;
+		goto uc_timeout;
+	}
+
+	if (IPA_CLIENT_IS_CONS(ep->client)) {
+		memset(&holb_cfg, 0, sizeof(holb_cfg));
+		holb_cfg.en = IPA_HOLB_TMR_DIS;
+		holb_cfg.tmr_val = 0;
+		result = ipa3_cfg_ep_holb(clnt_hdl, &holb_cfg);
+	}
+
+	ep->uc_offload_state |= IPA_WDI_ENABLED;
+	IPADBG("client (ep: %d) enabled\n", clnt_hdl);
+
+uc_timeout:
+	IPA_ACTIVE_CLIENTS_DEC_EP(ipa3_get_client_mapping(clnt_hdl));
+	return result;
+}
+
+/**
+ * ipa3_disable_wdi_pipe() - WDI client disable
+ * @clnt_hdl:	[in] opaque client handle assigned by IPA to client
+ *
+ * Returns:	0 on success, negative on failure
+ *
+ * Note:	Should not be called from atomic context
+ */
+int ipa3_disable_wdi_pipe(u32 clnt_hdl)
+{
+	int result = 0;
+	struct ipa3_ep_context *ep;
+	union IpaHwWdiCommonChCmdData_t disable;
+	struct ipa_ep_cfg_ctrl ep_cfg_ctrl;
+	u32 cons_hdl;
+
+	if (clnt_hdl >= ipa3_ctx->ipa_num_pipes ||
+	    ipa3_ctx->ep[clnt_hdl].valid == 0) {
+		IPAERR("bad parm, %d\n", clnt_hdl);
+		return -EINVAL;
+	}
+
+	if (ipa3_ctx->ipa_wdi2_over_gsi)
+		return ipa3_disable_gsi_wdi_pipe(clnt_hdl);
+
+	result = ipa3_uc_state_check();
+	if (result)
+		return result;
+
+	IPADBG("ep=%d\n", clnt_hdl);
+
+	ep = &ipa3_ctx->ep[clnt_hdl];
+
+	if (ep->uc_offload_state != (IPA_WDI_CONNECTED | IPA_WDI_ENABLED)) {
+		IPAERR("WDI channel bad state %d\n", ep->uc_offload_state);
+		return -EFAULT;
+	}
+	IPA_ACTIVE_CLIENTS_INC_EP(ipa3_get_client_mapping(clnt_hdl));
+
+	result = ipa3_disable_data_path(clnt_hdl);
+	if (result) {
+		IPAERR("disable data path failed res=%d clnt=%d.\n", result,
+			clnt_hdl);
+		result = -EPERM;
+		goto uc_timeout;
+	}
+
+	/**
+	 * To avoid data stall during continuous SAP on/off before
+	 * setting delay to IPA Consumer pipe (Client Producer),
+	 * remove delay and enable holb on IPA Producer pipe
+	 */
+	if (IPA_CLIENT_IS_PROD(ep->client)) {
+		IPADBG("Stopping PROD channel - hdl=%d clnt=%d\n",
+			clnt_hdl, ep->client);
+		/* remove delay on wlan-prod pipe*/
+		memset(&ep_cfg_ctrl, 0, sizeof(struct ipa_ep_cfg_ctrl));
+		ipa3_cfg_ep_ctrl(clnt_hdl, &ep_cfg_ctrl);
+
+		cons_hdl = ipa3_get_ep_mapping(IPA_CLIENT_WLAN1_CONS);
+		if (cons_hdl == IPA_EP_NOT_ALLOCATED) {
+			IPAERR("Client %u is not mapped\n",
+				IPA_CLIENT_WLAN1_CONS);
+			goto uc_timeout;
+		}
+		if (ipa3_ctx->ep[cons_hdl].valid == 1) {
+			result = ipa3_disable_data_path(cons_hdl);
+			if (result) {
+				IPAERR("disable data path failed\n");
+				IPAERR("res=%d clnt=%d\n",
+					result, cons_hdl);
+				result = -EPERM;
+				goto uc_timeout;
+			}
+		}
+		usleep_range(IPA_UC_POLL_SLEEP_USEC * IPA_UC_POLL_SLEEP_USEC,
+			IPA_UC_POLL_SLEEP_USEC * IPA_UC_POLL_SLEEP_USEC);
+
+	}
+
+	disable.params.ipa_pipe_number = clnt_hdl;
+	result = ipa3_uc_send_cmd(disable.raw32b,
+		IPA_CPU_2_HW_CMD_WDI_CH_DISABLE,
+		IPA_HW_2_CPU_WDI_CMD_STATUS_SUCCESS,
+		false, 10*HZ);
+
+	if (result) {
+		result = -EFAULT;
+		goto uc_timeout;
+	}
+
+	/* Set the delay after disabling IPA Producer pipe */
+	if (IPA_CLIENT_IS_PROD(ep->client)) {
+		memset(&ep_cfg_ctrl, 0, sizeof(struct ipa_ep_cfg_ctrl));
+		ep_cfg_ctrl.ipa_ep_delay = true;
+		ipa3_cfg_ep_ctrl(clnt_hdl, &ep_cfg_ctrl);
+	}
+	ep->uc_offload_state &= ~IPA_WDI_ENABLED;
+	IPADBG("client (ep: %d) disabled\n", clnt_hdl);
+
+
+uc_timeout:
+	IPA_ACTIVE_CLIENTS_DEC_EP(ipa3_get_client_mapping(clnt_hdl));
+	return result;
+}
+
+int ipa3_resume_gsi_wdi_pipe(u32 clnt_hdl)
+{
+	int result = 0;
+	struct ipa3_ep_context *ep;
+	struct ipa_ep_cfg_ctrl ep_cfg_ctrl;
+	struct gsi_chan_info chan_info;
+	union __packed gsi_channel_scratch gsi_scratch;
+	struct IpaHwOffloadStatsAllocCmdData_t *pcmd_t = NULL;
+
+	IPADBG("ep=%d\n", clnt_hdl);
+	ep = &ipa3_ctx->ep[clnt_hdl];
+
+	if (ep->gsi_offload_state != (IPA_WDI_CONNECTED | IPA_WDI_ENABLED)) {
+		IPAERR("WDI channel bad state %d\n", ep->gsi_offload_state);
+		return -EFAULT;
+	}
+	IPA_ACTIVE_CLIENTS_INC_EP(ipa3_get_client_mapping(clnt_hdl));
+
+	memset(&ep_cfg_ctrl, 0, sizeof(struct ipa_ep_cfg_ctrl));
+	result = ipa3_cfg_ep_ctrl(clnt_hdl, &ep_cfg_ctrl);
+	if (result)
+		IPAERR("client (ep: %d) fail un-susp/delay result=%d\n",
+				clnt_hdl, result);
+	else
+		IPADBG("client (ep: %d) un-susp/delay\n", clnt_hdl);
+
+	result =  gsi_start_channel(ep->gsi_chan_hdl);
+	if (result != GSI_STATUS_SUCCESS) {
+		IPAERR("gsi_start_channel failed %d\n", result);
+		ipa_assert();
+	}
+	pcmd_t = &ipa3_ctx->gsi_info[IPA_HW_PROTOCOL_WDI];
+	/* start uC gsi dbg stats monitor */
+	if (ipa3_ctx->ipa_hw_type >= IPA_HW_v4_5 &&
+		ipa3_ctx->ipa_hw_type != IPA_HW_v4_7) {
+		if (IPA_CLIENT_IS_PROD(ep->client)) {
+			pcmd_t->ch_id_info[0].ch_id
+				= ep->gsi_chan_hdl;
+			pcmd_t->ch_id_info[0].dir
+				= DIR_PRODUCER;
+		} else {
+			pcmd_t->ch_id_info[1].ch_id
+				= ep->gsi_chan_hdl;
+			pcmd_t->ch_id_info[1].dir
+				= DIR_CONSUMER;
+		}
+		ipa3_uc_debug_stats_alloc(
+			ipa3_ctx->gsi_info[IPA_HW_PROTOCOL_WDI]);
+	}
+	gsi_query_channel_info(ep->gsi_chan_hdl, &chan_info);
+	gsi_read_channel_scratch(ep->gsi_chan_hdl, &gsi_scratch);
+	IPADBG("ch=%lu channel base = 0x%llx , event base 0x%llx\n",
+				ep->gsi_chan_hdl,
+				ep->gsi_mem_info.chan_ring_base_addr,
+				ep->gsi_mem_info.evt_ring_base_addr);
+	IPADBG("RP=0x%llx WP=0x%llx ev_valid=%d ERP=0x%llx EWP=0x%llx\n",
+			chan_info.rp, chan_info.wp, chan_info.evt_valid,
+			chan_info.evt_rp, chan_info.evt_wp);
+	IPADBG("Scratch 0 = %x Scratch 1 = %x Scratch 2 = %x Scratch 3 = %x\n",
+				gsi_scratch.data.word1, gsi_scratch.data.word2,
+				gsi_scratch.data.word3, gsi_scratch.data.word4);
+
+	ep->gsi_offload_state |= IPA_WDI_RESUMED;
+	IPADBG("exit\n");
+	return result;
+}
+
+/**
+ * ipa3_resume_wdi_pipe() - WDI client resume
+ * @clnt_hdl:	[in] opaque client handle assigned by IPA to client
+ *
+ * Returns:	0 on success, negative on failure
+ *
+ * Note:	Should not be called from atomic context
+ */
+int ipa3_resume_wdi_pipe(u32 clnt_hdl)
+{
+	int result = 0;
+	struct ipa3_ep_context *ep;
+	union IpaHwWdiCommonChCmdData_t resume;
+	struct ipa_ep_cfg_ctrl ep_cfg_ctrl;
+
+	if (clnt_hdl >= ipa3_ctx->ipa_num_pipes ||
+	    ipa3_ctx->ep[clnt_hdl].valid == 0) {
+		IPAERR("bad parm, %d\n", clnt_hdl);
+		return -EINVAL;
+	}
+
+	if (ipa3_ctx->ipa_wdi2_over_gsi)
+		return ipa3_resume_gsi_wdi_pipe(clnt_hdl);
+
+	result = ipa3_uc_state_check();
+	if (result)
+		return result;
+
+	IPADBG("ep=%d\n", clnt_hdl);
+
+	ep = &ipa3_ctx->ep[clnt_hdl];
+
+	if (ep->uc_offload_state != (IPA_WDI_CONNECTED | IPA_WDI_ENABLED)) {
+		IPAERR("WDI channel bad state %d\n", ep->uc_offload_state);
+		return -EFAULT;
+	}
+	IPA_ACTIVE_CLIENTS_INC_EP(ipa3_get_client_mapping(clnt_hdl));
+	resume.params.ipa_pipe_number = clnt_hdl;
+
+	result = ipa3_uc_send_cmd(resume.raw32b,
+		IPA_CPU_2_HW_CMD_WDI_CH_RESUME,
+		IPA_HW_2_CPU_WDI_CMD_STATUS_SUCCESS,
+		false, 10*HZ);
+
+	if (result) {
+		result = -EFAULT;
+		IPA_ACTIVE_CLIENTS_DEC_EP(ipa3_get_client_mapping(clnt_hdl));
+		goto uc_timeout;
+	}
+
+	memset(&ep_cfg_ctrl, 0, sizeof(struct ipa_ep_cfg_ctrl));
+	result = ipa3_cfg_ep_ctrl(clnt_hdl, &ep_cfg_ctrl);
+	if (result)
+		IPAERR("client (ep: %d) fail un-susp/delay result=%d\n",
+				clnt_hdl, result);
+	else
+		IPADBG("client (ep: %d) un-susp/delay\n", clnt_hdl);
+
+	ep->uc_offload_state |= IPA_WDI_RESUMED;
+	IPADBG("client (ep: %d) resumed\n", clnt_hdl);
+
+uc_timeout:
+	return result;
+}
+
+int ipa3_suspend_gsi_wdi_pipe(u32 clnt_hdl)
+{
+	int ipa_ep_idx;
+	struct ipa3_ep_context *ep;
+	int res = 0;
+	u32 source_pipe_bitmask = 0;
+	bool disable_force_clear = false;
+	struct ipahal_ep_cfg_ctrl_scnd ep_ctrl_scnd = { 0 };
+	int retry_cnt = 0;
+	struct gsi_chan_info chan_info;
+	union __packed gsi_channel_scratch gsi_scratch;
+	struct IpaHwOffloadStatsAllocCmdData_t *pcmd_t = NULL;
+
+	ipa_ep_idx = ipa3_get_ep_mapping(ipa3_get_client_mapping(clnt_hdl));
+	if (ipa_ep_idx < 0) {
+		IPAERR("IPA client mapping failed\n");
+		return -EPERM;
+	}
+	ep = &ipa3_ctx->ep[ipa_ep_idx];
+
+	if (ep->gsi_offload_state != (IPA_WDI_CONNECTED | IPA_WDI_ENABLED |
+				IPA_WDI_RESUMED)) {
+		IPAERR("WDI channel bad state %d\n", ep->gsi_offload_state);
+		return -EFAULT;
+	}
+	if (ep->valid) {
+		IPADBG("suspended pipe %d\n", ipa_ep_idx);
+		source_pipe_bitmask = 1 <<
+			ipa3_get_ep_mapping(ep->client);
+		res = ipa3_enable_force_clear(clnt_hdl,
+				false, source_pipe_bitmask);
+		if (res) {
+			/*
+			 * assuming here modem SSR, AP can remove
+			 * the delay in this case
+			 */
+			IPAERR("failed to force clear %d\n", res);
+			IPAERR("remove delay from SCND reg\n");
+			ep_ctrl_scnd.endp_delay = false;
+			ipahal_write_reg_n_fields(
+					IPA_ENDP_INIT_CTRL_SCND_n, clnt_hdl,
+					&ep_ctrl_scnd);
+		} else {
+			disable_force_clear = true;
+		}
+retry_gsi_stop:
+		res = ipa3_stop_gsi_channel(ipa_ep_idx);
+		if (res != 0 && res != -GSI_STATUS_AGAIN &&
+				res != -GSI_STATUS_TIMED_OUT) {
+			IPAERR("failed to stop channel res = %d\n", res);
+			goto fail_stop_channel;
+		} else if (res == -GSI_STATUS_AGAIN) {
+			IPADBG("GSI stop channel failed retry cnt = %d\n",
+						retry_cnt);
+			retry_cnt++;
+			if (retry_cnt >= GSI_STOP_MAX_RETRY_CNT)
+				goto fail_stop_channel;
+			goto retry_gsi_stop;
+		} else {
+			IPADBG("GSI channel %ld STOP\n", ep->gsi_chan_hdl);
+		}
+		gsi_query_channel_info(ep->gsi_chan_hdl, &chan_info);
+		gsi_read_channel_scratch(ep->gsi_chan_hdl, &gsi_scratch);
+		IPADBG("ch=%lu channel base = 0x%llx , event base 0x%llx\n",
+				ep->gsi_chan_hdl,
+				ep->gsi_mem_info.chan_ring_base_addr,
+				ep->gsi_mem_info.evt_ring_base_addr);
+		IPADBG("RP=0x%llx WP=0x%llx ev_valid=%d ERP=0x%llx",
+				chan_info.rp, chan_info.wp,
+				chan_info.evt_valid, chan_info.evt_rp);
+		IPADBG("EWP=0x%llx\n", chan_info.evt_wp);
+		IPADBG("Scratch 0 = %x Scratch 1 = %x Scratch 2 = %x",
+				gsi_scratch.data.word1, gsi_scratch.data.word2,
+				gsi_scratch.data.word3);
+		IPADBG("Scratch 3 = %x\n", gsi_scratch.data.word4);
+	}
+	pcmd_t = &ipa3_ctx->gsi_info[IPA_HW_PROTOCOL_WDI];
+	/* stop uC gsi dbg stats monitor */
+	if (ipa3_ctx->ipa_hw_type >= IPA_HW_v4_5 &&
+		ipa3_ctx->ipa_hw_type != IPA_HW_v4_7) {
+		if (IPA_CLIENT_IS_PROD(ep->client)) {
+			pcmd_t->ch_id_info[0].ch_id
+				= 0xff;
+			pcmd_t->ch_id_info[0].dir
+				= DIR_PRODUCER;
+		} else {
+			pcmd_t->ch_id_info[1].ch_id
+				= 0xff;
+			pcmd_t->ch_id_info[1].dir
+				= DIR_CONSUMER;
+		}
+		ipa3_uc_debug_stats_alloc(
+			ipa3_ctx->gsi_info[IPA_HW_PROTOCOL_WDI]);
+	}
+	if (disable_force_clear)
+		ipa3_disable_force_clear(clnt_hdl);
+	IPA_ACTIVE_CLIENTS_DEC_EP(ipa3_get_client_mapping(clnt_hdl));
+	ep->gsi_offload_state &= ~IPA_WDI_RESUMED;
+	return res;
+fail_stop_channel:
+	ipa_assert();
+	return res;
+}
+
+/**
+ * ipa3_suspend_wdi_pipe() - WDI client suspend
+ * @clnt_hdl:	[in] opaque client handle assigned by IPA to client
+ *
+ * Returns:	0 on success, negative on failure
+ *
+ * Note:	Should not be called from atomic context
+ */
+int ipa3_suspend_wdi_pipe(u32 clnt_hdl)
+{
+	int result = 0;
+	struct ipa3_ep_context *ep;
+	union IpaHwWdiCommonChCmdData_t suspend;
+	struct ipa_ep_cfg_ctrl ep_cfg_ctrl;
+	u32 source_pipe_bitmask = 0;
+	bool disable_force_clear = false;
+	struct ipahal_ep_cfg_ctrl_scnd ep_ctrl_scnd = { 0 };
+
+	if (clnt_hdl >= ipa3_ctx->ipa_num_pipes ||
+	    ipa3_ctx->ep[clnt_hdl].valid == 0) {
+		IPAERR("bad parm, %d\n", clnt_hdl);
+		return -EINVAL;
+	}
+
+	if (ipa3_ctx->ipa_wdi2_over_gsi)
+		return ipa3_suspend_gsi_wdi_pipe(clnt_hdl);
+
+	result = ipa3_uc_state_check();
+	if (result)
+		return result;
+
+	IPADBG("ep=%d\n", clnt_hdl);
+
+	ep = &ipa3_ctx->ep[clnt_hdl];
+
+	if (ep->uc_offload_state != (IPA_WDI_CONNECTED | IPA_WDI_ENABLED |
+				IPA_WDI_RESUMED)) {
+		IPAERR("WDI channel bad state %d\n", ep->uc_offload_state);
+		return -EFAULT;
+	}
+
+	suspend.params.ipa_pipe_number = clnt_hdl;
+
+	if (IPA_CLIENT_IS_PROD(ep->client)) {
+		/*
+		 * For WDI 2.0 need to ensure pipe will be empty before suspend
+		 * as IPA uC will fail to suspend the pipe otherwise.
+		 */
+		if (ipa3_ctx->ipa_wdi2) {
+			source_pipe_bitmask = 1 <<
+					ipa3_get_ep_mapping(ep->client);
+			result = ipa3_enable_force_clear(clnt_hdl,
+				false, source_pipe_bitmask);
+			if (result) {
+				/*
+				 * assuming here modem SSR, AP can remove
+				 * the delay in this case
+				 */
+				IPAERR("failed to force clear %d\n", result);
+				IPAERR("remove delay from SCND reg\n");
+				ep_ctrl_scnd.endp_delay = false;
+				ipahal_write_reg_n_fields(
+					IPA_ENDP_INIT_CTRL_SCND_n, clnt_hdl,
+					&ep_ctrl_scnd);
+			} else {
+				disable_force_clear = true;
+			}
+		}
+
+		IPADBG("Post suspend event first for IPA Producer\n");
+		IPADBG("Client: %d clnt_hdl: %d\n", ep->client, clnt_hdl);
+		result = ipa3_uc_send_cmd(suspend.raw32b,
+			IPA_CPU_2_HW_CMD_WDI_CH_SUSPEND,
+			IPA_HW_2_CPU_WDI_CMD_STATUS_SUCCESS,
+			false, 10*HZ);
+
+		if (result) {
+			result = -EFAULT;
+			goto uc_timeout;
+		}
+	}
+
+	memset(&ep_cfg_ctrl, 0, sizeof(struct ipa_ep_cfg_ctrl));
+	if (IPA_CLIENT_IS_CONS(ep->client)) {
+		if (ipa3_ctx->ipa_hw_type < IPA_HW_v4_0) {
+			ep_cfg_ctrl.ipa_ep_suspend = true;
+			result = ipa3_cfg_ep_ctrl(clnt_hdl, &ep_cfg_ctrl);
+			if (result)
+				IPAERR("(ep: %d) failed to suspend result=%d\n",
+						clnt_hdl, result);
+			else
+				IPADBG("(ep: %d) suspended\n", clnt_hdl);
+		}
+	} else {
+		ep_cfg_ctrl.ipa_ep_delay = true;
+		result = ipa3_cfg_ep_ctrl(clnt_hdl, &ep_cfg_ctrl);
+		if (result)
+			IPAERR("client (ep: %d) failed to delay result=%d\n",
+					clnt_hdl, result);
+		else
+			IPADBG("client (ep: %d) delayed\n", clnt_hdl);
+	}
+
+	if (IPA_CLIENT_IS_CONS(ep->client)) {
+		result = ipa3_uc_send_cmd(suspend.raw32b,
+			IPA_CPU_2_HW_CMD_WDI_CH_SUSPEND,
+			IPA_HW_2_CPU_WDI_CMD_STATUS_SUCCESS,
+			false, 10*HZ);
+
+		if (result) {
+			result = -EFAULT;
+			goto uc_timeout;
+		}
+	}
+
+	if (disable_force_clear)
+		ipa3_disable_force_clear(clnt_hdl);
+
+	ipa3_ctx->tag_process_before_gating = true;
+	IPA_ACTIVE_CLIENTS_DEC_EP(ipa3_get_client_mapping(clnt_hdl));
+	ep->uc_offload_state &= ~IPA_WDI_RESUMED;
+	IPADBG("client (ep: %d) suspended\n", clnt_hdl);
+
+uc_timeout:
+	return result;
+}
+
+/**
+ * ipa_broadcast_wdi_quota_reach_ind() - quota reach
+ * @uint32_t fid: [in] input netdev ID
+ * @uint64_t num_bytes: [in] used bytes
+ *
+ * Returns:	0 on success, negative on failure
+ */
+int ipa3_broadcast_wdi_quota_reach_ind(uint32_t fid,
+	uint64_t num_bytes)
+{
+	IPAERR("Quota reached indication on fid(%d) Mbytes(%lu)\n",
+			  fid, (unsigned long)num_bytes);
+	ipa3_broadcast_quota_reach_ind(0, IPA_UPSTEAM_WLAN);
+	return 0;
+}
+
+int ipa3_write_qmapid_gsi_wdi_pipe(u32 clnt_hdl, u8 qmap_id)
+{
+	int result = 0;
+	struct ipa3_ep_context *ep;
+	union __packed gsi_wdi_channel_scratch3_reg gsi_scratch3;
+	union __packed gsi_wdi2_channel_scratch2_reg gsi_scratch2;
+
+	ep = &ipa3_ctx->ep[clnt_hdl];
+	IPA_ACTIVE_CLIENTS_INC_EP(ipa3_get_client_mapping(clnt_hdl));
+
+	if (ipa3_ctx->ipa_hw_type < IPA_HW_v4_7) {
+		memset(&gsi_scratch3, 0, sizeof(gsi_scratch3));
+		gsi_scratch3.wdi.qmap_id = qmap_id;
+		gsi_scratch3.wdi.endp_metadatareg_offset =
+			ipahal_get_reg_mn_ofst(
+				IPA_ENDP_INIT_HDR_METADATA_n, 0, clnt_hdl)/4;
+		result = gsi_write_channel_scratch3_reg(ep->gsi_chan_hdl,
+								gsi_scratch3);
+	} else {
+		memset(&gsi_scratch2, 0, sizeof(gsi_scratch2));
+		gsi_scratch2.wdi.qmap_id = qmap_id;
+		gsi_scratch2.wdi.endp_metadatareg_offset =
+			ipahal_get_reg_mn_ofst(
+				IPA_ENDP_INIT_HDR_METADATA_n, 0, clnt_hdl)/4;
+		result = gsi_write_channel_scratch2_reg(ep->gsi_chan_hdl,
+								gsi_scratch2);
+	}
+	if (result != GSI_STATUS_SUCCESS) {
+		IPAERR("gsi_write_channel_scratch failed %d\n",
+			result);
+		goto fail_write_channel_scratch;
+	}
+
+	IPADBG("client (ep: %d) qmap_id %d updated\n", clnt_hdl, qmap_id);
+	IPA_ACTIVE_CLIENTS_DEC_EP(ipa3_get_client_mapping(clnt_hdl));
+	return 0;
+fail_write_channel_scratch:
+	ipa_assert();
+	return result;
+}
+int ipa3_write_qmapid_wdi_pipe(u32 clnt_hdl, u8 qmap_id)
+{
+	int result = 0;
+	struct ipa3_ep_context *ep;
+	union IpaHwWdiRxExtCfgCmdData_t qmap;
+
+	if (clnt_hdl >= ipa3_ctx->ipa_num_pipes ||
+	    ipa3_ctx->ep[clnt_hdl].valid == 0) {
+		IPAERR_RL("bad parm, %d\n", clnt_hdl);
+		return -EINVAL;
+	}
+	if (ipa3_ctx->ipa_wdi2_over_gsi)
+		return ipa3_write_qmapid_gsi_wdi_pipe(clnt_hdl, qmap_id);
+
+	result = ipa3_uc_state_check();
+	if (result)
+		return result;
+
+	IPADBG("ep=%d\n", clnt_hdl);
+
+	ep = &ipa3_ctx->ep[clnt_hdl];
+
+	if (!(ep->uc_offload_state & IPA_WDI_CONNECTED)) {
+		IPAERR_RL("WDI channel bad state %d\n", ep->uc_offload_state);
+		return -EFAULT;
+	}
+	IPA_ACTIVE_CLIENTS_INC_EP(ipa3_get_client_mapping(clnt_hdl));
+	qmap.params.ipa_pipe_number = clnt_hdl;
+	qmap.params.qmap_id = qmap_id;
+
+	result = ipa3_uc_send_cmd(qmap.raw32b,
+		IPA_CPU_2_HW_CMD_WDI_RX_EXT_CFG,
+		IPA_HW_2_CPU_WDI_CMD_STATUS_SUCCESS,
+		false, 10*HZ);
+
+	if (result) {
+		result = -EFAULT;
+		goto uc_timeout;
+	}
+
+	IPADBG("client (ep: %d) qmap_id %d updated\n", clnt_hdl, qmap_id);
+
+uc_timeout:
+	IPA_ACTIVE_CLIENTS_DEC_EP(ipa3_get_client_mapping(clnt_hdl));
+	return result;
+}
+
+/**
+ * ipa3_uc_reg_rdyCB() - To register uC
+ * ready CB if uC not ready
+ * @inout:	[in/out] input/output parameters
+ * from/to client
+ *
+ * Returns:	0 on success, negative on failure
+ *
+ */
+int ipa3_uc_reg_rdyCB(
+	struct ipa_wdi_uc_ready_params *inout)
+{
+	int result = 0;
+
+	if (inout == NULL) {
+		IPAERR("bad parm. inout=%pK ", inout);
+		return -EINVAL;
+	}
+
+	result = ipa3_uc_state_check();
+	if (result) {
+		inout->is_uC_ready = false;
+		ipa3_ctx->uc_wdi_ctx.uc_ready_cb = inout->notify;
+		ipa3_ctx->uc_wdi_ctx.priv = inout->priv;
+	} else {
+		inout->is_uC_ready = true;
+	}
+
+	return 0;
+}
+
+/**
+ * ipa3_uc_dereg_rdyCB() - To de-register uC ready CB
+ *
+ * Returns:	0 on success, negative on failure
+ *
+ */
+int ipa3_uc_dereg_rdyCB(void)
+{
+	ipa3_ctx->uc_wdi_ctx.uc_ready_cb = NULL;
+	ipa3_ctx->uc_wdi_ctx.priv = NULL;
+
+	return 0;
+}
+
+
+/**
+ * ipa3_uc_wdi_get_dbpa() - To retrieve
+ * doorbell physical address of wlan pipes
+ * @param:  [in/out] input/output parameters
+ *          from/to client
+ *
+ * Returns:	0 on success, negative on failure
+ *
+ */
+int ipa3_uc_wdi_get_dbpa(
+	struct ipa_wdi_db_params *param)
+{
+	if (param == NULL || param->client >= IPA_CLIENT_MAX) {
+		IPAERR("bad parm. param=%pK ", param);
+		if (param)
+			IPAERR("client = %d\n", param->client);
+		return -EINVAL;
+	}
+
+	if (IPA_CLIENT_IS_CONS(param->client)) {
+		param->uc_door_bell_pa = ipa3_ctx->ipa_wrapper_base +
+				ipahal_get_reg_base() +
+				ipahal_get_reg_mn_ofst(IPA_UC_MAILBOX_m_n,
+					IPA_HW_WDI_TX_MBOX_START_INDEX/32,
+					IPA_HW_WDI_TX_MBOX_START_INDEX % 32);
+	} else {
+		param->uc_door_bell_pa = ipa3_ctx->ipa_wrapper_base +
+				ipahal_get_reg_base() +
+				ipahal_get_reg_mn_ofst(IPA_UC_MAILBOX_m_n,
+					IPA_HW_WDI_RX_MBOX_START_INDEX/32,
+					IPA_HW_WDI_RX_MBOX_START_INDEX % 32);
+	}
+
+	return 0;
+}
+
+static void ipa3_uc_wdi_loaded_handler(void)
+{
+	if (!ipa3_ctx) {
+		IPAERR("IPA ctx is null\n");
+		return;
+	}
+
+	if (ipa3_ctx->uc_wdi_ctx.uc_ready_cb) {
+		ipa3_ctx->uc_wdi_ctx.uc_ready_cb(
+			ipa3_ctx->uc_wdi_ctx.priv);
+
+		ipa3_ctx->uc_wdi_ctx.uc_ready_cb =
+			NULL;
+		ipa3_ctx->uc_wdi_ctx.priv = NULL;
+	}
+}
+
+int ipa3_create_wdi_mapping(u32 num_buffers, struct ipa_wdi_buffer_info *info)
+{
+	struct ipa_smmu_cb_ctx *cb = ipa3_get_smmu_ctx(IPA_SMMU_CB_WLAN);
+	int i;
+	int ret = 0;
+	int prot = IOMMU_READ | IOMMU_WRITE;
+
+	if (!info) {
+		IPAERR("info = %pK\n", info);
+		return -EINVAL;
+	}
+
+	if (!cb->valid) {
+		IPAERR("No SMMU CB setup\n");
+		return -EINVAL;
+	}
+
+	if (ipa3_ctx->s1_bypass_arr[IPA_SMMU_CB_WLAN]) {
+		IPAERR("IPA SMMU not enabled\n");
+		return -EINVAL;
+	}
+
+	for (i = 0; i < num_buffers; i++) {
+		IPADBG_LOW("i=%d pa=0x%pa iova=0x%lx sz=0x%zx\n", i,
+			&info[i].pa, info[i].iova, info[i].size);
+		info[i].result = ipa3_iommu_map(cb->iommu_domain,
+			rounddown(info[i].iova, PAGE_SIZE),
+			rounddown(info[i].pa, PAGE_SIZE),
+			roundup(info[i].size + info[i].pa -
+				rounddown(info[i].pa, PAGE_SIZE), PAGE_SIZE),
+			prot);
+	}
+
+	return ret;
+}
+
+int ipa3_release_wdi_mapping(u32 num_buffers, struct ipa_wdi_buffer_info *info)
+{
+	struct ipa_smmu_cb_ctx *cb = ipa3_get_smmu_ctx(IPA_SMMU_CB_WLAN);
+	int i;
+	int ret = 0;
+
+	if (!info) {
+		IPAERR("info = %pK\n", info);
+		return -EINVAL;
+	}
+
+	if (!cb->valid) {
+		IPAERR("No SMMU CB setup\n");
+		return -EINVAL;
+	}
+
+	for (i = 0; i < num_buffers; i++) {
+		IPADBG_LOW("i=%d pa=0x%pa iova=0x%lx sz=0x%zx\n", i,
+			&info[i].pa, info[i].iova, info[i].size);
+		info[i].result = iommu_unmap(cb->iommu_domain,
+			rounddown(info[i].iova, PAGE_SIZE),
+			roundup(info[i].size + info[i].pa -
+				rounddown(info[i].pa, PAGE_SIZE), PAGE_SIZE));
+	}
+
+	return ret;
+}

+ 9097 - 0
ipa/ipa_v3/ipa_utils.c

@@ -0,0 +1,9097 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2012-2020, The Linux Foundation. All rights reserved.
+ */
+
+#include <net/ip.h>
+#include <linux/genalloc.h>	/* gen_pool_alloc() */
+#include <linux/io.h>
+#include <linux/ratelimit.h>
+#include <linux/interconnect.h>
+#include <linux/msm_gsi.h>
+#include <linux/elf.h>
+#include "ipa_i.h"
+#include "ipahal/ipahal.h"
+#include "ipahal/ipahal_fltrt.h"
+#include "ipahal/ipahal_hw_stats.h"
+#include "../ipa_rm_i.h"
+
+/*
+ * The following for adding code (ie. for EMULATION) not found on x86.
+ */
+#if defined(CONFIG_IPA_EMULATION)
+# include "ipa_emulation_stubs.h"
+#endif
+
+#define IPA_V3_0_CLK_RATE_SVS2 (37.5 * 1000 * 1000UL)
+#define IPA_V3_0_CLK_RATE_SVS (75 * 1000 * 1000UL)
+#define IPA_V3_0_CLK_RATE_NOMINAL (150 * 1000 * 1000UL)
+#define IPA_V3_0_CLK_RATE_TURBO (200 * 1000 * 1000UL)
+
+#define IPA_V3_5_CLK_RATE_SVS2 (100 * 1000 * 1000UL)
+#define IPA_V3_5_CLK_RATE_SVS (200 * 1000 * 1000UL)
+#define IPA_V3_5_CLK_RATE_NOMINAL (400 * 1000 * 1000UL)
+#define IPA_V3_5_CLK_RATE_TURBO (42640 * 10 * 1000UL)
+
+#define IPA_V4_0_CLK_RATE_SVS2 (60 * 1000 * 1000UL)
+#define IPA_V4_0_CLK_RATE_SVS (125 * 1000 * 1000UL)
+#define IPA_V4_0_CLK_RATE_NOMINAL (220 * 1000 * 1000UL)
+#define IPA_V4_0_CLK_RATE_TURBO (250 * 1000 * 1000UL)
+
+#define IPA_MAX_HOLB_TMR_VAL (4294967296 - 1)
+
+#define IPA_V3_0_BW_THRESHOLD_TURBO_MBPS (1000)
+#define IPA_V3_0_BW_THRESHOLD_NOMINAL_MBPS (600)
+#define IPA_V3_0_BW_THRESHOLD_SVS_MBPS (310)
+
+#define IPA_ENDP_INIT_HDR_METADATA_n_MUX_ID_BMASK 0xFF0000
+#define IPA_ENDP_INIT_HDR_METADATA_n_MUX_ID_SHFT 0x10
+
+/* Max pipes + ICs for TAG process */
+#define IPA_TAG_MAX_DESC (IPA3_MAX_NUM_PIPES + 6)
+
+#define IPA_TAG_SLEEP_MIN_USEC (1000)
+#define IPA_TAG_SLEEP_MAX_USEC (2000)
+#define IPA_FORCE_CLOSE_TAG_PROCESS_TIMEOUT (10 * HZ)
+#define IPA_BCR_REG_VAL_v3_0 (0x00000001)
+#define IPA_BCR_REG_VAL_v3_5 (0x0000003B)
+#define IPA_BCR_REG_VAL_v4_0 (0x00000039)
+#define IPA_BCR_REG_VAL_v4_2 (0x00000000)
+#define IPA_AGGR_GRAN_MIN (1)
+#define IPA_AGGR_GRAN_MAX (32)
+#define IPA_EOT_COAL_GRAN_MIN (1)
+#define IPA_EOT_COAL_GRAN_MAX (16)
+
+#define IPA_FILT_ROUT_HASH_REG_VAL_v4_2 (0x00000000)
+#define IPA_DMA_TASK_FOR_GSI_TIMEOUT_MSEC (15)
+#define IPA_COAL_CLOSE_FRAME_CMD_TIMEOUT_MSEC (500)
+
+#define IPA_AGGR_BYTE_LIMIT (\
+		IPA_ENDP_INIT_AGGR_N_AGGR_BYTE_LIMIT_BMSK >> \
+		IPA_ENDP_INIT_AGGR_N_AGGR_BYTE_LIMIT_SHFT)
+#define IPA_AGGR_PKT_LIMIT (\
+		IPA_ENDP_INIT_AGGR_n_AGGR_PKT_LIMIT_BMSK >> \
+		IPA_ENDP_INIT_AGGR_n_AGGR_PKT_LIMIT_SHFT)
+
+/* In IPAv3 only endpoints 0-3 can be configured to deaggregation */
+#define IPA_EP_SUPPORTS_DEAGGR(idx) ((idx) >= 0 && (idx) <= 3)
+
+#define IPA_TAG_TIMER_TIMESTAMP_SHFT (14) /* ~0.8msec */
+#define IPA_NAT_TIMER_TIMESTAMP_SHFT (24) /* ~0.8sec */
+
+/*
+ * Units of time per a specific granularity
+ * The limitation based on H/W HOLB/AGGR time limit field width
+ */
+#define IPA_TIMER_SCALED_TIME_LIMIT 31
+
+/* HPS, DPS sequencers Types*/
+
+/* DMA Only */
+#define IPA_DPS_HPS_SEQ_TYPE_DMA_ONLY  0x00000000
+/* DMA + decipher */
+#define IPA_DPS_HPS_SEQ_TYPE_DMA_DEC 0x00000011
+/* Packet Processing + no decipher + uCP (for Ethernet Bridging) */
+#define IPA_DPS_HPS_SEQ_TYPE_PKT_PROCESS_NO_DEC_UCP 0x00000002
+/* Packet Processing + decipher + uCP */
+#define IPA_DPS_HPS_SEQ_TYPE_PKT_PROCESS_DEC_UCP 0x00000013
+/* Packet Processing + no decipher + no uCP */
+#define IPA_DPS_HPS_SEQ_TYPE_PKT_PROCESS_NO_DEC_NO_UCP 0x00000006
+/* Packet Processing + decipher + no uCP */
+#define IPA_DPS_HPS_SEQ_TYPE_PKT_PROCESS_DEC_NO_UCP 0x00000017
+/* 2 Packet Processing pass + no decipher + uCP */
+#define IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP 0x00000004
+/* 2 Packet Processing pass + decipher + uCP */
+#define IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_DEC_UCP 0x00000015
+/* 2 Packet Processing pass + no decipher + uCP + HPS REP DMA Parser. */
+#define IPA_DPS_HPS_REP_SEQ_TYPE_2PKT_PROC_PASS_NO_DEC_UCP_DMAP 0x00000804
+/* Packet Processing + no decipher + no uCP + HPS REP DMA Parser.*/
+#define IPA_DPS_HPS_REP_SEQ_TYPE_PKT_PROCESS_NO_DEC_NO_UCP_DMAP 0x00000806
+/* COMP/DECOMP */
+#define IPA_DPS_HPS_SEQ_TYPE_DMA_COMP_DECOMP 0x00000020
+/* 2 Packet Processing + no decipher + 2 uCP */
+#define IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_2ND_UCP 0x0000000a
+/* 2 Packet Processing + decipher + 2 uCP */
+#define IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_DEC_2ND_UCP 0x0000001b
+/* 3 Packet Processing + no decipher + 2 uCP */
+#define IPA_DPS_HPS_SEQ_TYPE_3RD_PKT_PROCESS_PASS_NO_DEC_2ND_UCP 0x0000000c
+/* 3 Packet Processing + decipher + 2 uCP */
+#define IPA_DPS_HPS_SEQ_TYPE_3RD_PKT_PROCESS_PASS_DEC_2ND_UCP 0x0000001d
+/* 2 Packet Processing + no decipher + 2 uCP + HPS REP DMA Parser */
+#define IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_2ND_UCP_DMAP 0x0000080a
+/* 3 Packet Processing + no decipher + 2 uCP + HPS REP DMA Parser */
+#define IPA_DPS_HPS_SEQ_TYPE_3RD_PKT_PROCESS_PASS_NO_DEC_2ND_UCP_DMAP 0x0000080c
+/* Invalid sequencer type */
+#define IPA_DPS_HPS_SEQ_TYPE_INVALID 0xFFFFFFFF
+
+#define IPA_DPS_HPS_SEQ_TYPE_IS_DMA(seq_type) \
+	(seq_type == IPA_DPS_HPS_SEQ_TYPE_DMA_ONLY || \
+	seq_type == IPA_DPS_HPS_SEQ_TYPE_DMA_DEC || \
+	seq_type == IPA_DPS_HPS_SEQ_TYPE_DMA_COMP_DECOMP)
+
+
+/* Resource Group index*/
+#define IPA_v3_0_GROUP_UL		(0)
+#define IPA_v3_0_GROUP_DL		(1)
+#define IPA_v3_0_GROUP_DPL		IPA_v3_0_GROUP_DL
+#define IPA_v3_0_GROUP_DIAG		(2)
+#define IPA_v3_0_GROUP_DMA		(3)
+#define IPA_v3_0_GROUP_IMM_CMD		IPA_v3_0_GROUP_UL
+#define IPA_v3_0_GROUP_Q6ZIP		(4)
+#define IPA_v3_0_GROUP_Q6ZIP_GENERAL	IPA_v3_0_GROUP_Q6ZIP
+#define IPA_v3_0_GROUP_UC_RX_Q		(5)
+#define IPA_v3_0_GROUP_Q6ZIP_ENGINE	IPA_v3_0_GROUP_UC_RX_Q
+#define IPA_v3_0_GROUP_MAX		(6)
+
+#define IPA_v3_5_GROUP_LWA_DL		(0) /* currently not used */
+#define IPA_v3_5_MHI_GROUP_PCIE	IPA_v3_5_GROUP_LWA_DL
+#define IPA_v3_5_GROUP_UL_DL		(1)
+#define IPA_v3_5_MHI_GROUP_DDR		IPA_v3_5_GROUP_UL_DL
+#define IPA_v3_5_MHI_GROUP_DMA		(2)
+#define IPA_v3_5_GROUP_UC_RX_Q		(3) /* currently not used */
+#define IPA_v3_5_SRC_GROUP_MAX		(4)
+#define IPA_v3_5_DST_GROUP_MAX		(3)
+
+#define IPA_v4_0_GROUP_LWA_DL		(0)
+#define IPA_v4_0_MHI_GROUP_PCIE		(0)
+#define IPA_v4_0_ETHERNET		(0)
+#define IPA_v4_0_GROUP_UL_DL		(1)
+#define IPA_v4_0_MHI_GROUP_DDR		(1)
+#define IPA_v4_0_MHI_GROUP_DMA		(2)
+#define IPA_v4_0_GROUP_UC_RX_Q		(3)
+#define IPA_v4_0_SRC_GROUP_MAX		(4)
+#define IPA_v4_0_DST_GROUP_MAX		(4)
+
+#define IPA_v4_2_GROUP_UL_DL		(0)
+#define IPA_v4_2_SRC_GROUP_MAX		(1)
+#define IPA_v4_2_DST_GROUP_MAX		(1)
+
+#define IPA_v4_5_MHI_GROUP_PCIE		(0)
+#define IPA_v4_5_GROUP_UL_DL		(1)
+#define IPA_v4_5_MHI_GROUP_DDR		(1)
+#define IPA_v4_5_MHI_GROUP_DMA		(2)
+#define IPA_v4_5_MHI_GROUP_QDSS		(3)
+#define IPA_v4_5_GROUP_UC_RX_Q		(4)
+#define IPA_v4_5_SRC_GROUP_MAX		(5)
+#define IPA_v4_5_DST_GROUP_MAX		(5)
+
+#define IPA_v4_7_GROUP_UL_DL		(0)
+#define IPA_v4_7_SRC_GROUP_MAX		(1)
+#define IPA_v4_7_DST_GROUP_MAX		(1)
+
+#define IPA_v4_9_GROUP_UL_DL		(0)
+#define IPA_v4_9_SRC_GROUP_MAX		(1)
+#define IPA_v4_9_DST_GROUP_MAX		(1)
+
+#define IPA_GROUP_MAX IPA_v3_0_GROUP_MAX
+
+enum ipa_rsrc_grp_type_src {
+	IPA_v3_0_RSRC_GRP_TYPE_SRC_PKT_CONTEXTS,
+	IPA_v3_0_RSRC_GRP_TYPE_SRC_HDR_SECTORS,
+	IPA_v3_0_RSRC_GRP_TYPE_SRC_HDRI1_BUFFER,
+	IPA_v3_0_RSRC_GRP_TYPE_SRC_DESCRIPTOR_LISTS,
+	IPA_v3_0_RSRC_GRP_TYPE_SRC_DESCRIPTOR_BUFF,
+	IPA_v3_0_RSRC_GRP_TYPE_SRC_HDRI2_BUFFERS,
+	IPA_v3_0_RSRC_GRP_TYPE_SRC_HPS_DMARS,
+	IPA_v3_0_RSRC_GRP_TYPE_SRC_ACK_ENTRIES,
+	IPA_v3_0_RSRC_GRP_TYPE_SRC_MAX,
+
+	IPA_v3_5_RSRC_GRP_TYPE_SRC_PKT_CONTEXTS = 0,
+	IPA_v3_5_RSRC_GRP_TYPE_SRC_DESCRIPTOR_LISTS,
+	IPA_v3_5_RSRC_GRP_TYPE_SRC_DESCRIPTOR_BUFF,
+	IPA_v3_5_RSRC_GRP_TYPE_SRC_HPS_DMARS,
+	IPA_v3_5_RSRC_GRP_TYPE_SRC_ACK_ENTRIES,
+	IPA_v3_5_RSRC_GRP_TYPE_SRC_MAX,
+
+	IPA_v4_0_RSRC_GRP_TYPE_SRC_PKT_CONTEXTS = 0,
+	IPA_v4_0_RSRC_GRP_TYPE_SRC_DESCRIPTOR_LISTS,
+	IPA_v4_0_RSRC_GRP_TYPE_SRC_DESCRIPTOR_BUFF,
+	IPA_v4_0_RSRC_GRP_TYPE_SRC_HPS_DMARS,
+	IPA_v4_0_RSRC_GRP_TYPE_SRC_ACK_ENTRIES,
+	IPA_v4_0_RSRC_GRP_TYPE_SRC_MAX
+};
+
+#define IPA_RSRC_GRP_TYPE_SRC_MAX IPA_v3_0_RSRC_GRP_TYPE_SRC_MAX
+
+enum ipa_rsrc_grp_type_dst {
+	IPA_v3_0_RSRC_GRP_TYPE_DST_DATA_SECTORS,
+	IPA_v3_0_RSRC_GRP_TYPE_DST_DATA_SECTOR_LISTS,
+	IPA_v3_0_RSRC_GRP_TYPE_DST_DPS_DMARS,
+	IPA_v3_0_RSRC_GRP_TYPE_DST_MAX,
+
+	IPA_v3_5_RSRC_GRP_TYPE_DST_DATA_SECTORS = 0,
+	IPA_v3_5_RSRC_GRP_TYPE_DST_DPS_DMARS,
+	IPA_v3_5_RSRC_GRP_TYPE_DST_MAX,
+
+	IPA_v4_0_RSRC_GRP_TYPE_DST_DATA_SECTORS = 0,
+	IPA_v4_0_RSRC_GRP_TYPE_DST_DPS_DMARS,
+	IPA_v4_0_RSRC_GRP_TYPE_DST_MAX,
+};
+#define IPA_RSRC_GRP_TYPE_DST_MAX IPA_v3_0_RSRC_GRP_TYPE_DST_MAX
+
+enum ipa_rsrc_grp_type_rx {
+	IPA_RSRC_GRP_TYPE_RX_HPS_CMDQ,
+	IPA_RSRC_GRP_TYPE_RX_MAX
+};
+
+enum ipa_rsrc_grp_rx_hps_weight_config {
+	IPA_RSRC_GRP_TYPE_RX_HPS_WEIGHT_CONFIG,
+	IPA_RSRC_GRP_TYPE_RX_HPS_WEIGHT_MAX
+};
+
+struct rsrc_min_max {
+	u32 min;
+	u32 max;
+};
+
+enum ipa_ver {
+	IPA_3_0,
+	IPA_3_5,
+	IPA_3_5_MHI,
+	IPA_3_5_1,
+	IPA_4_0,
+	IPA_4_0_MHI,
+	IPA_4_1,
+	IPA_4_1_APQ,
+	IPA_4_2,
+	IPA_4_5,
+	IPA_4_5_MHI,
+	IPA_4_5_APQ,
+	IPA_4_7,
+	IPA_4_9,
+	IPA_VER_MAX,
+};
+
+
+static const struct rsrc_min_max ipa3_rsrc_src_grp_config
+	[IPA_VER_MAX][IPA_RSRC_GRP_TYPE_SRC_MAX][IPA_GROUP_MAX] = {
+	[IPA_3_0] = {
+		/* UL	DL	DIAG	DMA	Not Used	uC Rx */
+		[IPA_v3_0_RSRC_GRP_TYPE_SRC_PKT_CONTEXTS] = {
+		{3, 255}, {3, 255}, {1, 255}, {1, 255}, {1, 255}, {2, 255} },
+		[IPA_v3_0_RSRC_GRP_TYPE_SRC_HDR_SECTORS] = {
+		{0, 255}, {0, 255}, {0, 255}, {0, 255}, {0, 255}, {0, 255} },
+		[IPA_v3_0_RSRC_GRP_TYPE_SRC_HDRI1_BUFFER] = {
+		{0, 255}, {0, 255}, {0, 255}, {0, 255}, {0, 255}, {0, 255} },
+		[IPA_v3_0_RSRC_GRP_TYPE_SRC_DESCRIPTOR_LISTS] = {
+		{14, 14}, {16, 16}, {5, 5}, {5, 5},  {0, 0}, {8, 8} },
+		[IPA_v3_0_RSRC_GRP_TYPE_SRC_DESCRIPTOR_BUFF] = {
+		{19, 19}, {26, 26}, {3, 3}, {7, 7}, {0, 0}, {8, 8} },
+		[IPA_v3_0_RSRC_GRP_TYPE_SRC_HDRI2_BUFFERS] = {
+		{0, 255}, {0, 255}, {0, 255}, {0, 255}, {0, 255}, {0, 255} },
+		[IPA_v3_0_RSRC_GRP_TYPE_SRC_HPS_DMARS] = {
+		{0, 255}, {0, 255}, {0, 255}, {0, 255}, {0, 255}, {0, 255} },
+		[IPA_v3_0_RSRC_GRP_TYPE_SRC_ACK_ENTRIES] = {
+		{14, 14}, {16, 16}, {5, 5}, {5, 5}, {0, 0}, {8, 8} },
+	},
+	[IPA_3_5] = {
+		/* LWA_DL  UL_DL    unused  UC_RX_Q, other are invalid */
+		[IPA_v3_5_RSRC_GRP_TYPE_SRC_PKT_CONTEXTS] = {
+		{0, 0}, {1, 255}, {0, 0}, {1, 255}, {0, 0}, {0, 0} },
+		[IPA_v3_5_RSRC_GRP_TYPE_SRC_DESCRIPTOR_LISTS] = {
+		{0, 0}, {10, 10}, {0, 0}, {8, 8}, {0, 0}, {0, 0} },
+		[IPA_v3_5_RSRC_GRP_TYPE_SRC_DESCRIPTOR_BUFF] = {
+		{0, 0}, {14, 14}, {0, 0}, {8, 8}, {0, 0}, {0, 0} },
+		[IPA_v3_5_RSRC_GRP_TYPE_SRC_HPS_DMARS] = {
+		{0, 255}, {0, 255}, {0, 255}, {0, 255},  {0, 0}, {0, 0} },
+		[IPA_v3_5_RSRC_GRP_TYPE_SRC_ACK_ENTRIES] = {
+		{0, 0}, {20, 20}, {0, 0}, {14, 14}, {0, 0}, {0, 0} },
+	},
+	[IPA_3_5_MHI] = {
+		/* PCIE  DDR     DMA  unused, other are invalid */
+		[IPA_v3_5_RSRC_GRP_TYPE_SRC_PKT_CONTEXTS] = {
+		{4, 4}, {5, 5}, {1, 1}, {0, 0}, {0, 0}, {0, 0} },
+		[IPA_v3_5_RSRC_GRP_TYPE_SRC_DESCRIPTOR_LISTS] = {
+		{10, 10}, {10, 10}, {8, 8}, {0, 0}, {0, 0}, {0, 0} },
+		[IPA_v3_5_RSRC_GRP_TYPE_SRC_DESCRIPTOR_BUFF] = {
+		{12, 12}, {12, 12}, {8, 8}, {0, 0}, {0, 0}, {0, 0} },
+		[IPA_v3_5_RSRC_GRP_TYPE_SRC_HPS_DMARS] = {
+		{0, 255}, {0, 255}, {0, 255}, {0, 255},  {0, 0}, {0, 0} },
+		[IPA_v3_5_RSRC_GRP_TYPE_SRC_ACK_ENTRIES] = {
+		{14, 14}, {14, 14}, {14, 14}, {0, 0}, {0, 0}, {0, 0} },
+	},
+	[IPA_3_5_1] = {
+		/* LWA_DL  UL_DL    unused  UC_RX_Q, other are invalid */
+		[IPA_v3_5_RSRC_GRP_TYPE_SRC_PKT_CONTEXTS] = {
+		{1, 255}, {1, 255}, {0, 0}, {1, 255}, {0, 0}, {0, 0} },
+		[IPA_v3_5_RSRC_GRP_TYPE_SRC_DESCRIPTOR_LISTS] = {
+		{10, 10}, {10, 10}, {0, 0}, {8, 8}, {0, 0}, {0, 0} },
+		[IPA_v3_5_RSRC_GRP_TYPE_SRC_DESCRIPTOR_BUFF] = {
+		{12, 12}, {14, 14}, {0, 0}, {8, 8}, {0, 0}, {0, 0} },
+		[IPA_v3_5_RSRC_GRP_TYPE_SRC_HPS_DMARS] = {
+		{0, 255}, {0, 255}, {0, 255}, {0, 255},  {0, 0}, {0, 0} },
+		[IPA_v3_5_RSRC_GRP_TYPE_SRC_ACK_ENTRIES] = {
+		{14, 14}, {20, 20}, {0, 0}, {14, 14}, {0, 0}, {0, 0} },
+	},
+	[IPA_4_0] = {
+		/* LWA_DL  UL_DL    unused  UC_RX_Q, other are invalid */
+		[IPA_v4_0_RSRC_GRP_TYPE_SRC_PKT_CONTEXTS] = {
+		{1, 255}, {1, 255}, {0, 0}, {1, 255}, {0, 0}, {0, 0} },
+		[IPA_v4_0_RSRC_GRP_TYPE_SRC_DESCRIPTOR_LISTS] = {
+		{10, 10}, {10, 10}, {0, 0}, {8, 8}, {0, 0}, {0, 0} },
+		[IPA_v4_0_RSRC_GRP_TYPE_SRC_DESCRIPTOR_BUFF] = {
+		{12, 12}, {14, 14}, {0, 0}, {8, 8}, {0, 0}, {0, 0} },
+		[IPA_v4_0_RSRC_GRP_TYPE_SRC_HPS_DMARS] = {
+		{0, 255}, {0, 255}, {0, 255}, {0, 255},  {0, 0}, {0, 0} },
+		[IPA_v4_0_RSRC_GRP_TYPE_SRC_ACK_ENTRIES] = {
+		{14, 14}, {20, 20}, {0, 0}, {14, 14}, {0, 0}, {0, 0} },
+	},
+	[IPA_4_0_MHI] = {
+		/* PCIE  DDR     DMA  unused, other are invalid */
+		[IPA_v4_0_RSRC_GRP_TYPE_SRC_PKT_CONTEXTS] = {
+		{4, 4}, {5, 5}, {1, 1}, {0, 0}, {0, 0}, {0, 0} },
+		[IPA_v4_0_RSRC_GRP_TYPE_SRC_DESCRIPTOR_LISTS] = {
+		{10, 10}, {10, 10}, {8, 8}, {0, 0}, {0, 0}, {0, 0} },
+		[IPA_v4_0_RSRC_GRP_TYPE_SRC_DESCRIPTOR_BUFF] = {
+		{12, 12}, {12, 12}, {8, 8}, {0, 0}, {0, 0}, {0, 0} },
+		[IPA_v4_0_RSRC_GRP_TYPE_SRC_HPS_DMARS] = {
+		{0, 255}, {0, 255}, {0, 255}, {0, 255},  {0, 0}, {0, 0} },
+		[IPA_v4_0_RSRC_GRP_TYPE_SRC_ACK_ENTRIES] = {
+		{14, 14}, {14, 14}, {14, 14}, {0, 0}, {0, 0}, {0, 0} },
+	},
+	[IPA_4_1] = {
+		/* LWA_DL  UL_DL    unused  UC_RX_Q, other are invalid */
+		[IPA_v4_0_RSRC_GRP_TYPE_SRC_PKT_CONTEXTS] = {
+		{1, 63}, {1, 63}, {0, 0}, {1, 63}, {0, 0}, {0, 0} },
+		[IPA_v4_0_RSRC_GRP_TYPE_SRC_DESCRIPTOR_LISTS] = {
+		{10, 10}, {10, 10}, {0, 0}, {8, 8}, {0, 0}, {0, 0} },
+		[IPA_v4_0_RSRC_GRP_TYPE_SRC_DESCRIPTOR_BUFF] = {
+		{12, 12}, {14, 14}, {0, 0}, {8, 8}, {0, 0}, {0, 0} },
+		[IPA_v4_0_RSRC_GRP_TYPE_SRC_HPS_DMARS] = {
+		{0, 63}, {0, 63}, {0, 63}, {0, 63},  {0, 0}, {0, 0} },
+		[IPA_v4_0_RSRC_GRP_TYPE_SRC_ACK_ENTRIES] = {
+		{14, 14}, {20, 20}, {0, 0}, {14, 14}, {0, 0}, {0, 0} },
+	},
+	[IPA_4_2] = {
+		/* UL_DL   other are invalid */
+		[IPA_v4_0_RSRC_GRP_TYPE_SRC_PKT_CONTEXTS] = {
+		{3, 63}, {0, 0}, {0, 0}, {0, 0}, {0, 0}, {0, 0} },
+		[IPA_v4_0_RSRC_GRP_TYPE_SRC_DESCRIPTOR_LISTS] = {
+		{3, 3}, {0, 0}, {0, 0}, {0, 0}, {0, 0}, {0, 0} },
+		[IPA_v4_0_RSRC_GRP_TYPE_SRC_DESCRIPTOR_BUFF] = {
+		{10, 10}, {0, 0}, {0, 0}, {0, 0}, {0, 0}, {0, 0} },
+		[IPA_v4_0_RSRC_GRP_TYPE_SRC_HPS_DMARS] = {
+		{1, 1}, {0, 0}, {0, 0},  {0, 0}, {0, 0}, {0, 0} },
+		[IPA_v4_0_RSRC_GRP_TYPE_SRC_ACK_ENTRIES] = {
+		{5, 5}, {0, 0}, {0, 0}, {0, 0}, {0, 0}, {0, 0} },
+	},
+	[IPA_4_5] = {
+		/* unused  UL_DL  unused  unused  UC_RX_Q N/A */
+		[IPA_v4_0_RSRC_GRP_TYPE_SRC_PKT_CONTEXTS] = {
+		{0, 0}, {1, 11}, {0, 0}, {0, 0}, {1, 63}, {0, 0} },
+		[IPA_v4_0_RSRC_GRP_TYPE_SRC_DESCRIPTOR_LISTS] = {
+		{0, 0}, {14, 14}, {0, 0}, {0, 0}, {3, 3}, {0, 0} },
+		[IPA_v4_0_RSRC_GRP_TYPE_SRC_DESCRIPTOR_BUFF] = {
+		{0, 0}, {18, 18}, {0, 0}, {0, 0}, {8, 8}, {0, 0} },
+		[IPA_v4_0_RSRC_GRP_TYPE_SRC_HPS_DMARS] = {
+		{0, 63}, {0, 63}, {0, 63}, {0, 63},  {0, 63}, {0, 0} },
+		[IPA_v4_0_RSRC_GRP_TYPE_SRC_ACK_ENTRIES] = {
+		{0, 0}, {24, 24}, {0, 0}, {0, 0}, {8, 8}, {0, 0} },
+	},
+	[IPA_4_5_MHI] = {
+		/* PCIE  DDR  DMA  QDSS  unused  N/A */
+		[IPA_v4_0_RSRC_GRP_TYPE_SRC_PKT_CONTEXTS] = {
+		{3, 8}, {4, 11}, {1, 1}, {1, 1}, {0, 0}, {0, 0} },
+		[IPA_v4_0_RSRC_GRP_TYPE_SRC_DESCRIPTOR_LISTS] = {
+		{9, 9}, {12, 12}, {2, 2}, {2, 2}, {0, 0}, {0, 0} },
+		[IPA_v4_0_RSRC_GRP_TYPE_SRC_DESCRIPTOR_BUFF] = {
+		{9, 9}, {14, 14}, {4, 4}, {4, 4}, {0, 0}, {0, 0} },
+		[IPA_v4_0_RSRC_GRP_TYPE_SRC_HPS_DMARS] = {
+		{0, 63}, {0, 63}, {0, 63}, {0, 63},  {0, 63}, {0, 0} },
+		[IPA_v4_0_RSRC_GRP_TYPE_SRC_ACK_ENTRIES] = {
+		{22, 22}, {16, 16}, {6, 6}, {2, 2}, {0, 0}, {0, 0} },
+	},
+	[IPA_4_5_APQ] = {
+		/* unused  UL_DL  unused  unused  UC_RX_Q N/A */
+		[IPA_v4_0_RSRC_GRP_TYPE_SRC_PKT_CONTEXTS] = {
+		{0, 0}, {1, 11}, {0, 0}, {0, 0}, {1, 63}, {0, 0} },
+		[IPA_v4_0_RSRC_GRP_TYPE_SRC_DESCRIPTOR_LISTS] = {
+		{0, 0}, {14, 14}, {0, 0}, {0, 0}, {3, 3}, {0, 0} },
+		[IPA_v4_0_RSRC_GRP_TYPE_SRC_DESCRIPTOR_BUFF] = {
+		{0, 0}, {18, 18}, {0, 0}, {0, 0}, {8, 8}, {0, 0} },
+		[IPA_v4_0_RSRC_GRP_TYPE_SRC_HPS_DMARS] = {
+		{0, 63}, {0, 63}, {0, 63}, {0, 63},  {0, 63}, {0, 0} },
+		[IPA_v4_0_RSRC_GRP_TYPE_SRC_ACK_ENTRIES] = {
+		{0, 0}, {24, 24}, {0, 0}, {0, 0}, {8, 8}, {0, 0} },
+	},
+	[IPA_4_7] = {
+		/* UL_DL   other are invalid */
+		[IPA_v4_0_RSRC_GRP_TYPE_SRC_PKT_CONTEXTS] = {
+		{8, 8}, {0, 0}, {0, 0}, {0, 0}, {0, 0}, {0, 0} },
+		[IPA_v4_0_RSRC_GRP_TYPE_SRC_DESCRIPTOR_LISTS] = {
+		{8, 8}, {0, 0}, {0, 0}, {0, 0}, {0, 0}, {0, 0} },
+		[IPA_v4_0_RSRC_GRP_TYPE_SRC_DESCRIPTOR_BUFF] = {
+		{18, 18}, {0, 0}, {0, 0}, {0, 0}, {0, 0}, {0, 0} },
+		[IPA_v4_0_RSRC_GRP_TYPE_SRC_HPS_DMARS] = {
+		{2, 2}, {0, 0}, {0, 0},  {0, 0}, {0, 0}, {0, 0} },
+		[IPA_v4_0_RSRC_GRP_TYPE_SRC_ACK_ENTRIES] = {
+		{15, 15}, {0, 0}, {0, 0}, {0, 0}, {0, 0}, {0, 0} },
+	},
+	[IPA_4_9] = {
+		/* UL_DL   other are invalid */
+		[IPA_v4_0_RSRC_GRP_TYPE_SRC_PKT_CONTEXTS] = {
+		{1, 12}, {0, 0}, {0, 0}, {0, 0}, {0, 0}, {0, 0} },
+		[IPA_v4_0_RSRC_GRP_TYPE_SRC_DESCRIPTOR_LISTS] = {
+		{20, 20}, {0, 0}, {0, 0}, {0, 0}, {0, 0}, {0, 0} },
+		[IPA_v4_0_RSRC_GRP_TYPE_SRC_DESCRIPTOR_BUFF] = {
+		{38, 38}, {0, 0}, {0, 0}, {0, 0}, {0, 0}, {0, 0} },
+		[IPA_v4_0_RSRC_GRP_TYPE_SRC_HPS_DMARS] = {
+		{0, 4}, {0, 0}, {0, 0},  {0, 0}, {0, 0}, {0, 0} },
+		[IPA_v4_0_RSRC_GRP_TYPE_SRC_ACK_ENTRIES] = {
+		{30, 30}, {0, 0}, {0, 0}, {0, 0}, {0, 0}, {0, 0} },
+	},
+
+};
+
+static const struct rsrc_min_max ipa3_rsrc_dst_grp_config
+	[IPA_VER_MAX][IPA_RSRC_GRP_TYPE_DST_MAX][IPA_GROUP_MAX] = {
+	[IPA_3_0] = {
+		/* UL	DL/DPL	DIAG	DMA  Q6zip_gen Q6zip_eng */
+		[IPA_v3_0_RSRC_GRP_TYPE_DST_DATA_SECTORS] = {
+		{2, 2}, {3, 3}, {0, 0}, {2, 2}, {3, 3}, {3, 3} },
+		[IPA_v3_0_RSRC_GRP_TYPE_DST_DATA_SECTOR_LISTS] = {
+		{0, 255}, {0, 255}, {0, 255}, {0, 255}, {0, 255}, {0, 255} },
+		[IPA_v3_0_RSRC_GRP_TYPE_DST_DPS_DMARS] = {
+		{1, 1}, {1, 1}, {1, 1}, {1, 1}, {1, 1}, {0, 0} },
+	},
+	[IPA_3_5] = {
+		/* unused UL/DL/DPL unused N/A    N/A     N/A */
+		[IPA_v3_5_RSRC_GRP_TYPE_DST_DATA_SECTORS] = {
+		{4, 4}, {4, 4}, {3, 3}, {0, 0}, {0, 0}, {0, 0} },
+		[IPA_v3_5_RSRC_GRP_TYPE_DST_DPS_DMARS] = {
+		{2, 255}, {1, 255}, {1, 2}, {0, 0}, {0, 0}, {0, 0} },
+	},
+	[IPA_3_5_MHI] = {
+		/* PCIE  DDR     DMA     N/A     N/A     N/A */
+		[IPA_v3_5_RSRC_GRP_TYPE_DST_DATA_SECTORS] = {
+		{4, 4}, {4, 4}, {3, 3}, {0, 0}, {0, 0}, {0, 0} },
+		[IPA_v3_5_RSRC_GRP_TYPE_DST_DPS_DMARS] = {
+		{2, 255}, {1, 255}, {1, 2}, {0, 0}, {0, 0}, {0, 0} },
+	},
+	[IPA_3_5_1] = {
+		/* LWA_DL UL/DL/DPL unused N/A   N/A     N/A */
+		[IPA_v3_5_RSRC_GRP_TYPE_DST_DATA_SECTORS] = {
+		{4, 4}, {4, 4}, {3, 3}, {0, 0}, {0, 0}, {0, 0} },
+		[IPA_v3_5_RSRC_GRP_TYPE_DST_DPS_DMARS] = {
+		{2, 255}, {1, 255}, {1, 2}, {0, 0}, {0, 0}, {0, 0} },
+	},
+	[IPA_4_0] = {
+		/* LWA_DL UL/DL/DPL uC, other are invalid */
+		[IPA_v4_0_RSRC_GRP_TYPE_DST_DATA_SECTORS] = {
+		{4, 4}, {4, 4}, {3, 3}, {2, 2}, {0, 0}, {0, 0} },
+		[IPA_v4_0_RSRC_GRP_TYPE_DST_DPS_DMARS] = {
+		{2, 255}, {1, 255}, {1, 2}, {0, 2}, {0, 0}, {0, 0} },
+	},
+	[IPA_4_0_MHI] = {
+		/* LWA_DL UL/DL/DPL uC, other are invalid */
+		[IPA_v4_0_RSRC_GRP_TYPE_DST_DATA_SECTORS] = {
+		{4, 4}, {4, 4}, {3, 3}, {2, 2}, {0, 0}, {0, 0} },
+		[IPA_v4_0_RSRC_GRP_TYPE_DST_DPS_DMARS] = {
+		{2, 255}, {1, 255}, {1, 2}, {0, 2}, {0, 0}, {0, 0} },
+	},
+	[IPA_4_1] = {
+		/* LWA_DL UL/DL/DPL uC, other are invalid */
+		[IPA_v4_0_RSRC_GRP_TYPE_DST_DATA_SECTORS] = {
+		{4, 4}, {4, 4}, {3, 3}, {2, 2}, {0, 0}, {0, 0} },
+		[IPA_v4_0_RSRC_GRP_TYPE_DST_DPS_DMARS] = {
+		{2, 63}, {1, 63}, {1, 2}, {0, 2}, {0, 0}, {0, 0} },
+	},
+	[IPA_4_2] = {
+		/* UL/DL/DPL, other are invalid */
+		[IPA_v4_0_RSRC_GRP_TYPE_DST_DATA_SECTORS] = {
+		{3, 3}, {0, 0}, {0, 0}, {0, 0}, {0, 0}, {0, 0} },
+		[IPA_v4_0_RSRC_GRP_TYPE_DST_DPS_DMARS] = {
+		{1, 63}, {0, 0}, {0, 0}, {0, 0}, {0, 0}, {0, 0} },
+	},
+	[IPA_4_5] = {
+		/* unused  UL/DL/DPL unused  unused  uC  N/A */
+		[IPA_v4_0_RSRC_GRP_TYPE_DST_DATA_SECTORS] = {
+		{0, 0}, {16, 16}, {2, 2}, {2, 2}, {0, 0}, {0, 0} },
+		[IPA_v4_0_RSRC_GRP_TYPE_DST_DPS_DMARS] = {
+		{0, 0}, {2, 63}, {1, 2}, {1, 2}, {0, 2}, {0, 0} },
+	},
+	[IPA_4_5_MHI] = {
+		/* PCIE/DPL  DDR  DMA/CV2X  QDSS  uC  N/A */
+		[IPA_v4_0_RSRC_GRP_TYPE_DST_DATA_SECTORS] = {
+		{16, 16}, {5, 5}, {2, 2}, {2, 2}, {0, 0}, {0, 0} },
+		[IPA_v4_0_RSRC_GRP_TYPE_DST_DPS_DMARS] = {
+		{2, 63}, {1, 63}, {1, 2}, {1, 2}, {0, 2}, {0, 0} },
+	},
+	[IPA_4_5_APQ] = {
+		/* unused  UL/DL/DPL unused  unused  uC  N/A */
+		[IPA_v4_0_RSRC_GRP_TYPE_DST_DATA_SECTORS] = {
+		{0, 0}, {16, 16}, {2, 2}, {2, 2}, {0, 0}, {0, 0} },
+		[IPA_v4_0_RSRC_GRP_TYPE_DST_DPS_DMARS] = {
+		{0, 0}, {2, 63}, {1, 2}, {1, 2}, {0, 2}, {0, 0} },
+	},
+	[IPA_4_7] = {
+		/* UL/DL/DPL, other are invalid */
+		[IPA_v4_0_RSRC_GRP_TYPE_DST_DATA_SECTORS] = {
+		{7, 7}, {0, 0}, {0, 0}, {0, 0}, {0, 0}, {0, 0} },
+		[IPA_v4_0_RSRC_GRP_TYPE_DST_DPS_DMARS] = {
+		{2, 2}, {0, 0}, {0, 0}, {0, 0}, {0, 0}, {0, 0} },
+	},
+	[IPA_4_9] = {
+		/* UL/DL/DPL, other are invalid */
+		[IPA_v4_0_RSRC_GRP_TYPE_DST_DATA_SECTORS] = {
+		{9, 9}, {0, 0}, {0, 0}, {0, 0}, {0, 0}, {0, 0} },
+		[IPA_v4_0_RSRC_GRP_TYPE_DST_DPS_DMARS] = {
+		{2, 3}, {0, 0}, {0, 0}, {0, 0}, {0, 0}, {0, 0} },
+	},
+
+};
+
+static const struct rsrc_min_max ipa3_rsrc_rx_grp_config
+	[IPA_VER_MAX][IPA_RSRC_GRP_TYPE_RX_MAX][IPA_GROUP_MAX] = {
+	[IPA_3_0] = {
+		/* UL	DL	DIAG	DMA	unused	uC Rx */
+		[IPA_RSRC_GRP_TYPE_RX_HPS_CMDQ] = {
+		{16, 16}, {24, 24}, {8, 8}, {8, 8}, {0, 0}, {8, 8} },
+	},
+	[IPA_3_5] = {
+		/* unused UL_DL	unused UC_RX_Q   N/A     N/A */
+		[IPA_RSRC_GRP_TYPE_RX_HPS_CMDQ] = {
+		{0, 0}, {7, 7}, {0, 0}, {2, 2}, {0, 0}, {0, 0} },
+	},
+	[IPA_3_5_MHI] = {
+		/* PCIE   DDR	 DMA   unused   N/A     N/A */
+		[IPA_RSRC_GRP_TYPE_RX_HPS_CMDQ] = {
+		{3, 3}, {7, 7}, {2, 2}, {0, 0}, {0, 0}, {0, 0} },
+	},
+	[IPA_3_5_1] = {
+		/* LWA_DL UL_DL	unused   UC_RX_Q N/A     N/A */
+		[IPA_RSRC_GRP_TYPE_RX_HPS_CMDQ] = {
+		{3, 3}, {7, 7}, {0, 0}, {2, 2}, {0, 0}, {0, 0} },
+	},
+	[IPA_4_0] = {
+		/* LWA_DL UL_DL	unused UC_RX_Q, other are invalid */
+		[IPA_RSRC_GRP_TYPE_RX_HPS_CMDQ] = {
+		{3, 3}, {7, 7}, {0, 0}, {2, 2}, {0, 0}, {0, 0} },
+	},
+	[IPA_4_0_MHI] = {
+		/* PCIE   DDR	  DMA     unused   N/A     N/A */
+		[IPA_RSRC_GRP_TYPE_RX_HPS_CMDQ] = {
+		{3, 3}, {7, 7}, {2, 2}, {0, 0}, {0, 0}, {0, 0} },
+	},
+	[IPA_4_1] = {
+		/* LWA_DL UL_DL	unused UC_RX_Q, other are invalid */
+		[IPA_RSRC_GRP_TYPE_RX_HPS_CMDQ] = {
+		{3, 3}, {7, 7}, {0, 0}, {2, 2}, {0, 0}, {0, 0} },
+	},
+	[IPA_4_2] = {
+		/* UL_DL, other are invalid */
+		[IPA_RSRC_GRP_TYPE_RX_HPS_CMDQ] = {
+		{4, 4}, {0, 0}, {0, 0}, {0, 0}, {0, 0}, {0, 0} },
+	},
+	[IPA_4_5] = {
+		/* unused  UL_DL  unused unused  UC_RX_Q  N/A */
+		[IPA_RSRC_GRP_TYPE_RX_HPS_CMDQ] = {
+		{0, 0}, {3, 3}, {0, 0}, {0, 0}, {0, 0}, {0, 0} },
+	},
+	[IPA_4_5_MHI] = {
+		/* PCIE  DDR  DMA  QDSS  unused  N/A */
+		[IPA_RSRC_GRP_TYPE_RX_HPS_CMDQ] = {
+		{3, 3}, {3, 3}, {3, 3}, {3, 3}, {0, 0}, {0, 0} },
+	},
+	[IPA_4_5_APQ] = {
+		/* unused  UL_DL  unused unused  UC_RX_Q  N/A */
+		[IPA_RSRC_GRP_TYPE_RX_HPS_CMDQ] = {
+		{0, 0}, {3, 3}, {0, 0}, {0, 0}, {0, 0}, {0, 0} },
+	},
+	[IPA_4_7] = {
+		/* unused  UL_DL  unused unused  UC_RX_Q  N/A */
+		[IPA_RSRC_GRP_TYPE_RX_HPS_CMDQ] = {
+		{3, 3}, {0, 0}, {0, 0}, {0, 0}, {0, 0}, {0, 0} },
+	},
+	[IPA_4_9] = {
+		/* unused  UL_DL  unused unused  UC_RX_Q  N/A */
+		[IPA_RSRC_GRP_TYPE_RX_HPS_CMDQ] = {
+		{3, 3}, {0, 0}, {0, 0}, {0, 0}, {0, 0}, {0, 0} },
+	},
+
+};
+
+static const u32 ipa3_rsrc_rx_grp_hps_weight_config
+	[IPA_VER_MAX][IPA_RSRC_GRP_TYPE_RX_HPS_WEIGHT_MAX][IPA_GROUP_MAX] = {
+	[IPA_3_0] = {
+		/* UL	DL	DIAG	DMA	unused	uC Rx */
+		[IPA_RSRC_GRP_TYPE_RX_HPS_WEIGHT_CONFIG] = { 0, 0, 0, 0, 0, 0 },
+	},
+	[IPA_3_5] = {
+		/* unused UL_DL	unused UC_RX_Q   N/A     N/A */
+		[IPA_RSRC_GRP_TYPE_RX_HPS_WEIGHT_CONFIG] = { 1, 1, 1, 1, 0, 0 },
+	},
+	[IPA_3_5_MHI] = {
+		/* PCIE   DDR	     DMA       unused   N/A        N/A */
+		[IPA_RSRC_GRP_TYPE_RX_HPS_WEIGHT_CONFIG] = { 3, 5, 1, 1, 0, 0 },
+	},
+	[IPA_3_5_1] = {
+		/* LWA_DL UL_DL	unused   UC_RX_Q N/A     N/A */
+		[IPA_RSRC_GRP_TYPE_RX_HPS_WEIGHT_CONFIG] = { 1, 1, 1, 1, 0, 0 },
+	},
+	[IPA_4_0] = {
+		/* LWA_DL UL_DL	unused UC_RX_Q N/A */
+		[IPA_RSRC_GRP_TYPE_RX_HPS_WEIGHT_CONFIG] = { 1, 1, 1, 1, 0, 0 },
+	},
+	[IPA_4_0_MHI] = {
+		/* PCIE   DDR	     DMA       unused   N/A        N/A */
+		[IPA_RSRC_GRP_TYPE_RX_HPS_WEIGHT_CONFIG] = { 3, 5, 1, 1, 0, 0 },
+	},
+	[IPA_4_1] = {
+		/* LWA_DL UL_DL	unused UC_RX_Q, other are invalid */
+		[IPA_RSRC_GRP_TYPE_RX_HPS_WEIGHT_CONFIG] = { 1, 1, 1, 1, 0, 0 },
+	},
+};
+
+enum ipa_ees {
+	IPA_EE_AP = 0,
+	IPA_EE_Q6 = 1,
+	IPA_EE_UC = 2,
+};
+
+enum ipa_qmb_instance_type {
+	IPA_QMB_INSTANCE_DDR = 0,
+	IPA_QMB_INSTANCE_PCIE = 1,
+	IPA_QMB_INSTANCE_MAX
+};
+
+#define QMB_MASTER_SELECT_DDR IPA_QMB_INSTANCE_DDR
+#define QMB_MASTER_SELECT_PCIE IPA_QMB_INSTANCE_PCIE
+
+struct ipa_qmb_outstanding {
+	u16 ot_reads;
+	u16 ot_writes;
+	u16 ot_read_beats;
+};
+
+/*TODO: Update correct values of max_read_beats for all targets*/
+
+static const struct ipa_qmb_outstanding ipa3_qmb_outstanding
+		[IPA_VER_MAX][IPA_QMB_INSTANCE_MAX] = {
+	[IPA_3_0][IPA_QMB_INSTANCE_DDR]		= {8, 8, 0},
+	[IPA_3_0][IPA_QMB_INSTANCE_PCIE]	= {8, 2, 0},
+	[IPA_3_5][IPA_QMB_INSTANCE_DDR]		= {8, 8, 0},
+	[IPA_3_5][IPA_QMB_INSTANCE_PCIE]	= {12, 4, 0},
+	[IPA_3_5_MHI][IPA_QMB_INSTANCE_DDR]	= {8, 8, 0},
+	[IPA_3_5_MHI][IPA_QMB_INSTANCE_PCIE]	= {12, 4, 0},
+	[IPA_3_5_1][IPA_QMB_INSTANCE_DDR]	= {8, 8, 0},
+	[IPA_3_5_1][IPA_QMB_INSTANCE_PCIE]	= {12, 4, 0},
+	[IPA_4_0][IPA_QMB_INSTANCE_DDR]		= {12, 8, 120},
+	[IPA_4_0][IPA_QMB_INSTANCE_PCIE]	= {12, 4, 0},
+	[IPA_4_0_MHI][IPA_QMB_INSTANCE_DDR]	= {12, 8, 0},
+	[IPA_4_0_MHI][IPA_QMB_INSTANCE_PCIE]	= {12, 4, 0},
+	[IPA_4_1][IPA_QMB_INSTANCE_DDR]		= {12, 8, 120},
+	[IPA_4_1][IPA_QMB_INSTANCE_PCIE]	= {12, 4, 0},
+	[IPA_4_2][IPA_QMB_INSTANCE_DDR]		= {12, 8, 0},
+	[IPA_4_5][IPA_QMB_INSTANCE_DDR]		= {16, 8, 120},
+	[IPA_4_5][IPA_QMB_INSTANCE_PCIE]	= {12, 8, 0},
+	[IPA_4_5_MHI][IPA_QMB_INSTANCE_DDR]	= {16, 8, 120},
+	[IPA_4_5_MHI][IPA_QMB_INSTANCE_PCIE]	= {12, 8, 0},
+	[IPA_4_5_APQ][IPA_QMB_INSTANCE_DDR]	= {16, 8, 120},
+	[IPA_4_5_APQ][IPA_QMB_INSTANCE_PCIE]	= {12, 8, 0},
+	[IPA_4_7][IPA_QMB_INSTANCE_DDR]	        = {13, 12, 120},
+	[IPA_4_9][IPA_QMB_INSTANCE_DDR]	        = {16, 8, 120},
+};
+
+struct ipa_ep_configuration {
+	bool valid;
+	int group_num;
+	bool support_flt;
+	int sequencer_type;
+	u8 qmb_master_sel;
+	struct ipa_gsi_ep_config ipa_gsi_ep_info;
+};
+
+/* clients not included in the list below are considered as invalid */
+static const struct ipa_ep_configuration ipa3_ep_mapping
+					[IPA_VER_MAX][IPA_CLIENT_MAX] = {
+	[IPA_3_0][IPA_CLIENT_WLAN1_PROD]          = {
+			true, IPA_v3_0_GROUP_UL, true,
+			IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP,
+			QMB_MASTER_SELECT_DDR,
+			{ 10, 1, 8, 16, IPA_EE_UC } },
+	[IPA_3_0][IPA_CLIENT_USB_PROD]            = {
+			true, IPA_v3_0_GROUP_UL, true,
+			IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP,
+			QMB_MASTER_SELECT_DDR,
+			{ 1, 3, 8, 16, IPA_EE_AP } },
+	[IPA_3_0][IPA_CLIENT_APPS_LAN_PROD] = {
+			true, IPA_v3_0_GROUP_DL, false,
+			IPA_DPS_HPS_SEQ_TYPE_PKT_PROCESS_NO_DEC_UCP,
+			QMB_MASTER_SELECT_DDR,
+			{ 14, 11, 8, 16, IPA_EE_AP } },
+	[IPA_3_0][IPA_CLIENT_APPS_WAN_PROD] = {
+			true, IPA_v3_0_GROUP_UL, true,
+			IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP,
+			QMB_MASTER_SELECT_DDR,
+			{ 3, 5, 16, 32, IPA_EE_AP } },
+	[IPA_3_0][IPA_CLIENT_APPS_CMD_PROD]	  = {
+			true, IPA_v3_0_GROUP_IMM_CMD, false,
+			IPA_DPS_HPS_SEQ_TYPE_DMA_ONLY,
+			QMB_MASTER_SELECT_DDR,
+			{ 22, 6, 18, 28, IPA_EE_AP } },
+	[IPA_3_0][IPA_CLIENT_ODU_PROD]            = {
+			true, IPA_v3_0_GROUP_UL, true,
+			IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP,
+			QMB_MASTER_SELECT_DDR,
+			{ 12, 9, 8, 16, IPA_EE_AP } },
+	[IPA_3_0][IPA_CLIENT_MHI_PROD]            = {
+			true, IPA_v3_0_GROUP_UL, true,
+			IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP,
+			QMB_MASTER_SELECT_PCIE,
+			{ 0, 0, 8, 16, IPA_EE_AP } },
+	[IPA_3_0][IPA_CLIENT_Q6_LAN_PROD]         = {
+			true, IPA_v3_0_GROUP_UL, false,
+			IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP,
+			QMB_MASTER_SELECT_DDR,
+			{ 9, 4, 8, 12, IPA_EE_Q6 } },
+	[IPA_3_0][IPA_CLIENT_Q6_WAN_PROD]         = {
+			true, IPA_v3_0_GROUP_DL, true,
+			IPA_DPS_HPS_SEQ_TYPE_PKT_PROCESS_NO_DEC_UCP,
+			QMB_MASTER_SELECT_DDR,
+			{ 5, 0, 16, 32, IPA_EE_Q6 } },
+	[IPA_3_0][IPA_CLIENT_Q6_CMD_PROD] = {
+			true, IPA_v3_0_GROUP_IMM_CMD, false,
+			IPA_DPS_HPS_SEQ_TYPE_PKT_PROCESS_NO_DEC_UCP,
+			QMB_MASTER_SELECT_DDR,
+			{ 6, 1, 18, 28, IPA_EE_Q6 } },
+	[IPA_3_0][IPA_CLIENT_Q6_DECOMP_PROD]      = {
+			true, IPA_v3_0_GROUP_Q6ZIP,
+			false, IPA_DPS_HPS_SEQ_TYPE_PKT_PROCESS_NO_DEC_UCP,
+			QMB_MASTER_SELECT_DDR,
+			{ 7, 2, 0, 0, IPA_EE_Q6 } },
+	[IPA_3_0][IPA_CLIENT_Q6_DECOMP2_PROD]     = {
+			true, IPA_v3_0_GROUP_Q6ZIP,
+			false, IPA_DPS_HPS_SEQ_TYPE_PKT_PROCESS_NO_DEC_UCP,
+			QMB_MASTER_SELECT_DDR,
+			{ 8, 3, 0, 0, IPA_EE_Q6 } },
+	[IPA_3_0][IPA_CLIENT_MEMCPY_DMA_SYNC_PROD] = {
+			true, IPA_v3_0_GROUP_DMA, false,
+			IPA_DPS_HPS_SEQ_TYPE_DMA_ONLY,
+			QMB_MASTER_SELECT_PCIE,
+			{ 12, 9, 8, 16, IPA_EE_AP } },
+	[IPA_3_0][IPA_CLIENT_MEMCPY_DMA_ASYNC_PROD] = {
+			true, IPA_v3_0_GROUP_DMA, false,
+			IPA_DPS_HPS_SEQ_TYPE_DMA_ONLY,
+			QMB_MASTER_SELECT_PCIE,
+			{ 13, 10, 8, 16, IPA_EE_AP } },
+	[IPA_3_0][IPA_CLIENT_ETHERNET_PROD]          = {
+			true, IPA_v3_0_GROUP_UL, true,
+			IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP,
+			QMB_MASTER_SELECT_DDR,
+			{2, 0, 8, 16, IPA_EE_UC} },
+	/* Only for test purpose */
+	[IPA_3_0][IPA_CLIENT_TEST_PROD]           = {
+			true, IPA_v3_0_GROUP_UL, true,
+			IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP,
+			QMB_MASTER_SELECT_DDR,
+			{ 1, 3, 8, 16, IPA_EE_AP } },
+	[IPA_3_0][IPA_CLIENT_TEST1_PROD]          = {
+			true, IPA_v3_0_GROUP_UL, true,
+			IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP,
+			QMB_MASTER_SELECT_DDR,
+			{ 1, 3, 8, 16, IPA_EE_AP } },
+	[IPA_3_0][IPA_CLIENT_TEST2_PROD]          = {
+			true, IPA_v3_0_GROUP_UL, true,
+			IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP,
+			QMB_MASTER_SELECT_DDR,
+			{ 3, 5, 16, 32, IPA_EE_AP } },
+	[IPA_3_0][IPA_CLIENT_TEST3_PROD]          = {
+			true, IPA_v3_0_GROUP_UL, true,
+			IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP,
+			QMB_MASTER_SELECT_DDR,
+			{ 12, 9, 8, 16, IPA_EE_AP } },
+	[IPA_3_0][IPA_CLIENT_TEST4_PROD]          = {
+			true, IPA_v3_0_GROUP_UL, true,
+			IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP,
+			QMB_MASTER_SELECT_DDR,
+			{ 13, 10, 8, 16, IPA_EE_AP } },
+
+	[IPA_3_0][IPA_CLIENT_WLAN1_CONS]          = {
+			true, IPA_v3_0_GROUP_DL, false,
+			IPA_DPS_HPS_SEQ_TYPE_INVALID,
+			QMB_MASTER_SELECT_DDR,
+			{ 25, 4, 8, 8, IPA_EE_UC } },
+	[IPA_3_0][IPA_CLIENT_WLAN2_CONS]          = {
+			true, IPA_v3_0_GROUP_DL, false,
+			IPA_DPS_HPS_SEQ_TYPE_INVALID,
+			QMB_MASTER_SELECT_DDR,
+			{ 27, 4, 8, 8, IPA_EE_AP } },
+	[IPA_3_0][IPA_CLIENT_WLAN3_CONS]          = {
+			true, IPA_v3_0_GROUP_DL, false,
+			IPA_DPS_HPS_SEQ_TYPE_INVALID,
+			QMB_MASTER_SELECT_DDR,
+			{ 28, 13, 8, 8, IPA_EE_AP } },
+	[IPA_3_0][IPA_CLIENT_WLAN4_CONS]          = {
+			true, IPA_v3_0_GROUP_DL, false,
+			IPA_DPS_HPS_SEQ_TYPE_INVALID,
+			QMB_MASTER_SELECT_DDR,
+			{ 29, 14, 8, 8, IPA_EE_AP } },
+	[IPA_3_0][IPA_CLIENT_USB_CONS]            = {
+			true, IPA_v3_0_GROUP_DL, false,
+			IPA_DPS_HPS_SEQ_TYPE_INVALID,
+			QMB_MASTER_SELECT_DDR,
+			{ 26, 12, 8, 8, IPA_EE_AP } },
+	[IPA_3_0][IPA_CLIENT_USB_DPL_CONS]        = {
+			true, IPA_v3_0_GROUP_DPL, false,
+			IPA_DPS_HPS_SEQ_TYPE_INVALID,
+			QMB_MASTER_SELECT_DDR,
+			{ 17, 2, 8, 12, IPA_EE_AP } },
+	[IPA_3_0][IPA_CLIENT_APPS_LAN_CONS]       = {
+			true, IPA_v3_0_GROUP_UL, false,
+			IPA_DPS_HPS_SEQ_TYPE_INVALID,
+			QMB_MASTER_SELECT_DDR,
+			{ 15, 7, 8, 12, IPA_EE_AP } },
+	[IPA_3_0][IPA_CLIENT_APPS_WAN_CONS]       = {
+			true, IPA_v3_0_GROUP_DL, false,
+			IPA_DPS_HPS_SEQ_TYPE_INVALID,
+			QMB_MASTER_SELECT_DDR,
+			{ 16, 8, 8, 12, IPA_EE_AP } },
+	[IPA_3_0][IPA_CLIENT_ODU_EMB_CONS]        = {
+			true, IPA_v3_0_GROUP_DL, false,
+			IPA_DPS_HPS_SEQ_TYPE_INVALID,
+			QMB_MASTER_SELECT_DDR,
+			{ 23, 1, 8, 8, IPA_EE_AP } },
+	[IPA_3_0][IPA_CLIENT_MHI_CONS]            = {
+			true, IPA_v3_0_GROUP_DL, false,
+			IPA_DPS_HPS_SEQ_TYPE_INVALID,
+			QMB_MASTER_SELECT_PCIE,
+			{ 23, 1, 8, 8, IPA_EE_AP } },
+	[IPA_3_0][IPA_CLIENT_Q6_LAN_CONS]         = {
+			true, IPA_v3_0_GROUP_DL, false,
+			IPA_DPS_HPS_SEQ_TYPE_INVALID,
+			QMB_MASTER_SELECT_DDR,
+			{ 19, 6, 8, 12, IPA_EE_Q6 } },
+	[IPA_3_0][IPA_CLIENT_Q6_WAN_CONS]         = {
+			true, IPA_v3_0_GROUP_UL, false,
+			IPA_DPS_HPS_SEQ_TYPE_INVALID,
+			QMB_MASTER_SELECT_DDR,
+			{ 18, 5, 8, 12, IPA_EE_Q6 } },
+	[IPA_3_0][IPA_CLIENT_Q6_DUN_CONS]         = {
+			true, IPA_v3_0_GROUP_DIAG, false,
+			IPA_DPS_HPS_SEQ_TYPE_INVALID,
+			QMB_MASTER_SELECT_DDR,
+			{ 30, 7, 4, 4, IPA_EE_Q6 } },
+	[IPA_3_0][IPA_CLIENT_Q6_DECOMP_CONS] = {
+			true, IPA_v3_0_GROUP_Q6ZIP, false,
+			IPA_DPS_HPS_SEQ_TYPE_INVALID,
+			QMB_MASTER_SELECT_DDR,
+			{ 21, 8, 4, 4, IPA_EE_Q6 } },
+	[IPA_3_0][IPA_CLIENT_Q6_DECOMP2_CONS] = {
+			true, IPA_v3_0_GROUP_Q6ZIP, false,
+			IPA_DPS_HPS_SEQ_TYPE_INVALID,
+			QMB_MASTER_SELECT_DDR,
+			{ 4, 9, 4, 4, IPA_EE_Q6 } },
+	[IPA_3_0][IPA_CLIENT_MEMCPY_DMA_SYNC_CONS] = {
+			true, IPA_v3_0_GROUP_DMA, false,
+			IPA_DPS_HPS_SEQ_TYPE_INVALID,
+			QMB_MASTER_SELECT_PCIE,
+			{ 28, 13, 8, 8, IPA_EE_AP } },
+	[IPA_3_0][IPA_CLIENT_MEMCPY_DMA_ASYNC_CONS] = {
+			true, IPA_v3_0_GROUP_DMA, false,
+			IPA_DPS_HPS_SEQ_TYPE_INVALID,
+			QMB_MASTER_SELECT_PCIE,
+			{ 29, 14, 8, 8, IPA_EE_AP } },
+	[IPA_3_0][IPA_CLIENT_ETHERNET_CONS]          = {
+			true, IPA_v3_0_GROUP_DL, false,
+			IPA_DPS_HPS_SEQ_TYPE_INVALID,
+			QMB_MASTER_SELECT_DDR,
+			{24, 3, 8, 8, IPA_EE_UC} },
+	/* Only for test purpose */
+	[IPA_3_0][IPA_CLIENT_TEST_CONS]           = {
+			true, IPA_v3_0_GROUP_DL, false,
+			IPA_DPS_HPS_SEQ_TYPE_INVALID,
+			QMB_MASTER_SELECT_DDR,
+			{ 26, 12, 8, 8, IPA_EE_AP } },
+	[IPA_3_0][IPA_CLIENT_TEST1_CONS]          = {
+			true, IPA_v3_0_GROUP_DL, false,
+			IPA_DPS_HPS_SEQ_TYPE_INVALID,
+			QMB_MASTER_SELECT_DDR,
+			{ 26, 12, 8, 8, IPA_EE_AP } },
+	[IPA_3_0][IPA_CLIENT_TEST2_CONS]          = {
+			true, IPA_v3_0_GROUP_DL, false,
+			IPA_DPS_HPS_SEQ_TYPE_INVALID,
+			QMB_MASTER_SELECT_DDR,
+			{ 27, 4, 8, 8, IPA_EE_AP } },
+	[IPA_3_0][IPA_CLIENT_TEST3_CONS]          = {
+			true, IPA_v3_0_GROUP_DL, false,
+			IPA_DPS_HPS_SEQ_TYPE_INVALID,
+			QMB_MASTER_SELECT_DDR,
+			{ 28, 13, 8, 8, IPA_EE_AP } },
+	[IPA_3_0][IPA_CLIENT_TEST4_CONS]          = {
+			true, IPA_v3_0_GROUP_DL, false,
+			IPA_DPS_HPS_SEQ_TYPE_INVALID,
+			QMB_MASTER_SELECT_DDR,
+			{ 29, 14, 8, 8, IPA_EE_AP } },
+	/* Dummy consumer (pipe 31) is used in L2TP rt rule */
+	[IPA_3_0][IPA_CLIENT_DUMMY_CONS]          = {
+			true, IPA_v3_0_GROUP_DL, false,
+			IPA_DPS_HPS_SEQ_TYPE_INVALID,
+			QMB_MASTER_SELECT_DDR,
+			{ 31, 31, 8, 8, IPA_EE_AP } },
+
+	/* IPA_3_5 */
+	[IPA_3_5][IPA_CLIENT_WLAN1_PROD]          = {
+			true, IPA_v3_5_GROUP_UL_DL, true,
+			IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP,
+			QMB_MASTER_SELECT_DDR,
+			{ 6, 1, 8, 16, IPA_EE_UC } },
+	[IPA_3_5][IPA_CLIENT_USB_PROD]            = {
+			true, IPA_v3_5_GROUP_UL_DL, true,
+			IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP,
+			QMB_MASTER_SELECT_DDR,
+			{ 0, 7, 8, 16, IPA_EE_AP } },
+	[IPA_3_5][IPA_CLIENT_APPS_LAN_PROD]   = {
+			true, IPA_v3_5_GROUP_UL_DL, false,
+			IPA_DPS_HPS_SEQ_TYPE_PKT_PROCESS_NO_DEC_UCP,
+			QMB_MASTER_SELECT_DDR,
+			{ 8, 9, 8, 16, IPA_EE_AP } },
+	[IPA_3_5][IPA_CLIENT_APPS_WAN_PROD] = {
+			true, IPA_v3_5_GROUP_UL_DL, true,
+			IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP,
+			QMB_MASTER_SELECT_DDR,
+			{ 2, 3, 16, 32, IPA_EE_AP } },
+	[IPA_3_5][IPA_CLIENT_APPS_CMD_PROD]	  = {
+			true, IPA_v3_5_GROUP_UL_DL, false,
+			IPA_DPS_HPS_SEQ_TYPE_DMA_ONLY,
+			QMB_MASTER_SELECT_DDR,
+			{ 5, 4, 20, 23, IPA_EE_AP } },
+	[IPA_3_5][IPA_CLIENT_ODU_PROD]            = {
+			true, IPA_v3_5_GROUP_UL_DL, true,
+			IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP,
+			QMB_MASTER_SELECT_DDR,
+			{ 1, 0, 8, 16, IPA_EE_UC } },
+	[IPA_3_5][IPA_CLIENT_Q6_LAN_PROD]         = {
+			true, IPA_v3_5_GROUP_UL_DL, true,
+			IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP,
+			QMB_MASTER_SELECT_DDR,
+			{ 3, 0, 16, 32, IPA_EE_Q6 } },
+	[IPA_3_5][IPA_CLIENT_Q6_CMD_PROD]	  = {
+			true, IPA_v3_5_GROUP_UL_DL, false,
+			IPA_DPS_HPS_SEQ_TYPE_PKT_PROCESS_NO_DEC_UCP,
+			QMB_MASTER_SELECT_DDR,
+			{ 4, 1, 20, 23, IPA_EE_Q6 } },
+	/* Only for test purpose */
+	[IPA_3_5][IPA_CLIENT_TEST_PROD]           = {
+			true, IPA_v3_5_GROUP_UL_DL, true,
+			IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP,
+			QMB_MASTER_SELECT_DDR,
+			{0, 7, 8, 16, IPA_EE_AP } },
+	[IPA_3_5][IPA_CLIENT_TEST1_PROD]          = {
+			true, IPA_v3_5_GROUP_UL_DL, true,
+			IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP,
+			QMB_MASTER_SELECT_DDR,
+			{0, 7, 8, 16, IPA_EE_AP } },
+	[IPA_3_5][IPA_CLIENT_TEST2_PROD]          = {
+			true, IPA_v3_5_GROUP_UL_DL, true,
+			IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP,
+			QMB_MASTER_SELECT_DDR,
+			{ 1, 0, 8, 16, IPA_EE_AP } },
+	[IPA_3_5][IPA_CLIENT_TEST3_PROD]          = {
+			true, IPA_v3_5_GROUP_UL_DL, true,
+			IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP,
+			QMB_MASTER_SELECT_DDR,
+			{7, 8, 8, 16, IPA_EE_AP } },
+	[IPA_3_5][IPA_CLIENT_TEST4_PROD]          = {
+			true, IPA_v3_5_GROUP_UL_DL, true,
+			IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP,
+			QMB_MASTER_SELECT_DDR,
+			{ 8, 9, 8, 16, IPA_EE_AP } },
+
+	[IPA_3_5][IPA_CLIENT_WLAN1_CONS]          = {
+			true, IPA_v3_5_GROUP_UL_DL, false,
+			IPA_DPS_HPS_SEQ_TYPE_INVALID,
+			QMB_MASTER_SELECT_DDR,
+			{ 16, 3, 8, 8, IPA_EE_UC } },
+	[IPA_3_5][IPA_CLIENT_WLAN2_CONS]          = {
+			true, IPA_v3_5_GROUP_UL_DL, false,
+			IPA_DPS_HPS_SEQ_TYPE_INVALID,
+			QMB_MASTER_SELECT_DDR,
+			{ 18, 12, 8, 8, IPA_EE_AP } },
+	[IPA_3_5][IPA_CLIENT_WLAN3_CONS]          = {
+			true, IPA_v3_5_GROUP_UL_DL, false,
+			IPA_DPS_HPS_SEQ_TYPE_INVALID,
+			QMB_MASTER_SELECT_DDR,
+			{ 19, 13, 8, 8, IPA_EE_AP } },
+	[IPA_3_5][IPA_CLIENT_USB_CONS]            = {
+			true, IPA_v3_5_GROUP_UL_DL, false,
+			IPA_DPS_HPS_SEQ_TYPE_INVALID,
+			QMB_MASTER_SELECT_PCIE,
+			{ 17, 11, 8, 8, IPA_EE_AP } },
+	[IPA_3_5][IPA_CLIENT_USB_DPL_CONS]        = {
+			true, IPA_v3_5_GROUP_UL_DL, false,
+			IPA_DPS_HPS_SEQ_TYPE_INVALID,
+			QMB_MASTER_SELECT_DDR,
+			{ 14, 10, 4, 6, IPA_EE_AP } },
+	[IPA_3_5][IPA_CLIENT_APPS_LAN_CONS]       = {
+			true, IPA_v3_5_GROUP_UL_DL, false,
+			IPA_DPS_HPS_SEQ_TYPE_INVALID,
+			QMB_MASTER_SELECT_DDR,
+			{ 9, 5, 8, 12, IPA_EE_AP } },
+	[IPA_3_5][IPA_CLIENT_APPS_WAN_CONS]       = {
+			true, IPA_v3_5_GROUP_UL_DL, false,
+			IPA_DPS_HPS_SEQ_TYPE_INVALID,
+			QMB_MASTER_SELECT_DDR,
+			{ 10, 6, 8, 12, IPA_EE_AP } },
+	[IPA_3_5][IPA_CLIENT_ODU_EMB_CONS]        = {
+			true, IPA_v3_5_GROUP_UL_DL, false,
+			IPA_DPS_HPS_SEQ_TYPE_INVALID,
+			QMB_MASTER_SELECT_DDR,
+			{ 15, 1, 8, 8, IPA_EE_AP } },
+	[IPA_3_5][IPA_CLIENT_Q6_LAN_CONS]         = {
+			true, IPA_v3_5_GROUP_UL_DL, false,
+			IPA_DPS_HPS_SEQ_TYPE_INVALID,
+			QMB_MASTER_SELECT_DDR,
+			{ 13, 3, 8, 12, IPA_EE_Q6 } },
+	[IPA_3_5][IPA_CLIENT_Q6_WAN_CONS]         = {
+			true, IPA_v3_5_GROUP_UL_DL, false,
+			IPA_DPS_HPS_SEQ_TYPE_INVALID,
+			QMB_MASTER_SELECT_DDR,
+			{ 12, 2, 8, 12, IPA_EE_Q6 } },
+	/* Only for test purpose */
+	/* MBIM aggregation test pipes should have the same QMB as USB_CONS */
+	[IPA_3_5][IPA_CLIENT_TEST_CONS]           = {
+			true, IPA_v3_5_GROUP_UL_DL, false,
+			IPA_DPS_HPS_SEQ_TYPE_INVALID,
+			QMB_MASTER_SELECT_PCIE,
+			{ 15, 1, 8, 8, IPA_EE_AP } },
+	[IPA_3_5][IPA_CLIENT_TEST1_CONS]           = {
+			true, IPA_v3_5_GROUP_UL_DL, false,
+			IPA_DPS_HPS_SEQ_TYPE_INVALID,
+			QMB_MASTER_SELECT_DDR,
+			{ 15, 1, 8, 8, IPA_EE_AP } },
+	[IPA_3_5][IPA_CLIENT_TEST2_CONS]          = {
+			true, IPA_v3_5_GROUP_UL_DL, false,
+			IPA_DPS_HPS_SEQ_TYPE_INVALID,
+			QMB_MASTER_SELECT_PCIE,
+			{ 17, 11, 8, 8, IPA_EE_AP } },
+	[IPA_3_5][IPA_CLIENT_TEST3_CONS]          = {
+			true, IPA_v3_5_GROUP_UL_DL, false,
+			IPA_DPS_HPS_SEQ_TYPE_INVALID,
+			QMB_MASTER_SELECT_DDR,
+			{ 18, 12, 8, 8, IPA_EE_AP } },
+	[IPA_3_5][IPA_CLIENT_TEST4_CONS]          = {
+			true, IPA_v3_5_GROUP_UL_DL, false,
+			IPA_DPS_HPS_SEQ_TYPE_INVALID,
+			QMB_MASTER_SELECT_PCIE,
+			{ 19, 13, 8, 8, IPA_EE_AP } },
+	/* Dummy consumer (pipe 31) is used in L2TP rt rule */
+	[IPA_3_5][IPA_CLIENT_DUMMY_CONS]          = {
+			true, IPA_v3_5_GROUP_UL_DL, false,
+			IPA_DPS_HPS_SEQ_TYPE_INVALID,
+			QMB_MASTER_SELECT_PCIE,
+			{ 31, 31, 8, 8, IPA_EE_AP } },
+
+	/* IPA_3_5_MHI */
+	[IPA_3_5_MHI][IPA_CLIENT_USB_PROD]            = {
+			false, IPA_EP_NOT_ALLOCATED, false,
+			IPA_DPS_HPS_SEQ_TYPE_INVALID,
+			QMB_MASTER_SELECT_DDR,
+			{ -1, -1, -1, -1, -1 } },
+	[IPA_3_5_MHI][IPA_CLIENT_APPS_WAN_PROD]   = {
+			true, IPA_v3_5_MHI_GROUP_DDR, true,
+			IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP,
+			QMB_MASTER_SELECT_DDR,
+			{ 2, 3, 16, 32, IPA_EE_AP } },
+	[IPA_3_5_MHI][IPA_CLIENT_APPS_CMD_PROD]	  = {
+			true, IPA_v3_5_MHI_GROUP_DDR, false,
+			IPA_DPS_HPS_SEQ_TYPE_DMA_ONLY,
+			QMB_MASTER_SELECT_DDR,
+			{ 5, 4, 20, 23, IPA_EE_AP } },
+	[IPA_3_5_MHI][IPA_CLIENT_MHI_PROD]            = {
+			true, IPA_v3_5_MHI_GROUP_PCIE, true,
+			IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP,
+			QMB_MASTER_SELECT_PCIE,
+			{ 1, 0, 8, 16, IPA_EE_AP } },
+	[IPA_3_5_MHI][IPA_CLIENT_Q6_LAN_PROD]         = {
+			true, IPA_v3_5_MHI_GROUP_DDR, true,
+			IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP,
+			QMB_MASTER_SELECT_DDR,
+			{ 3, 0, 16, 32, IPA_EE_Q6 } },
+	[IPA_3_5_MHI][IPA_CLIENT_Q6_WAN_PROD]         = {
+			true, IPA_v3_5_MHI_GROUP_DDR, true,
+			IPA_DPS_HPS_SEQ_TYPE_PKT_PROCESS_NO_DEC_UCP,
+			QMB_MASTER_SELECT_DDR,
+			{ 6, 4, 10, 30, IPA_EE_Q6 } },
+	[IPA_3_5_MHI][IPA_CLIENT_Q6_CMD_PROD]	  = {
+			true, IPA_v3_5_MHI_GROUP_PCIE, false,
+			IPA_DPS_HPS_SEQ_TYPE_PKT_PROCESS_NO_DEC_UCP,
+			QMB_MASTER_SELECT_DDR,
+			{ 4, 1, 20, 23, IPA_EE_Q6 } },
+	[IPA_3_5_MHI][IPA_CLIENT_MEMCPY_DMA_SYNC_PROD] = {
+			true, IPA_v3_5_MHI_GROUP_DMA, false,
+			IPA_DPS_HPS_SEQ_TYPE_DMA_ONLY,
+			QMB_MASTER_SELECT_DDR,
+			{ 7, 8, 8, 16, IPA_EE_AP } },
+	[IPA_3_5_MHI][IPA_CLIENT_MEMCPY_DMA_ASYNC_PROD] = {
+			true, IPA_v3_5_MHI_GROUP_DMA, false,
+			IPA_DPS_HPS_SEQ_TYPE_DMA_ONLY,
+			QMB_MASTER_SELECT_DDR,
+			{ 8, 9, 8, 16, IPA_EE_AP } },
+	/* Only for test purpose */
+	[IPA_3_5_MHI][IPA_CLIENT_TEST_PROD]           = {
+			true, IPA_v3_5_MHI_GROUP_DDR, true,
+			IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP,
+			QMB_MASTER_SELECT_DDR,
+			{0, 7, 8, 16, IPA_EE_AP } },
+	[IPA_3_5_MHI][IPA_CLIENT_TEST1_PROD]          = {
+			0, IPA_v3_5_MHI_GROUP_DDR, true,
+			IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP,
+			QMB_MASTER_SELECT_DDR,
+			{0, 7, 8, 16, IPA_EE_AP } },
+	[IPA_3_5_MHI][IPA_CLIENT_TEST2_PROD]          = {
+			true, IPA_v3_5_MHI_GROUP_PCIE, true,
+			IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP,
+			QMB_MASTER_SELECT_PCIE,
+			{ 1, 0, 8, 16, IPA_EE_AP } },
+	[IPA_3_5_MHI][IPA_CLIENT_TEST3_PROD]          = {
+			true, IPA_v3_5_MHI_GROUP_DMA, true,
+			IPA_DPS_HPS_SEQ_TYPE_DMA_ONLY,
+			QMB_MASTER_SELECT_DDR,
+			{ 7, 8, 8, 16, IPA_EE_AP } },
+	[IPA_3_5_MHI][IPA_CLIENT_TEST4_PROD]          = {
+			true, IPA_v3_5_MHI_GROUP_DMA, true,
+			IPA_DPS_HPS_SEQ_TYPE_DMA_ONLY,
+			QMB_MASTER_SELECT_DDR,
+			{ 8, 9, 8, 16, IPA_EE_AP } },
+
+	[IPA_3_5_MHI][IPA_CLIENT_WLAN1_CONS]          = {
+			true, IPA_v3_5_MHI_GROUP_DDR, false,
+			IPA_DPS_HPS_SEQ_TYPE_INVALID,
+			QMB_MASTER_SELECT_DDR,
+			{ 16, 3, 8, 8, IPA_EE_UC } },
+	[IPA_3_5_MHI][IPA_CLIENT_USB_CONS]            = {
+			false, IPA_EP_NOT_ALLOCATED, false,
+			IPA_DPS_HPS_SEQ_TYPE_INVALID,
+			QMB_MASTER_SELECT_DDR,
+			{ -1, -1, -1, -1, -1 } },
+	[IPA_3_5_MHI][IPA_CLIENT_USB_DPL_CONS]        = {
+			false, IPA_EP_NOT_ALLOCATED, false,
+			IPA_DPS_HPS_SEQ_TYPE_INVALID,
+			QMB_MASTER_SELECT_DDR,
+			{ -1, -1, -1, -1, -1 } },
+	[IPA_3_5_MHI][IPA_CLIENT_APPS_LAN_CONS]       = {
+			true, IPA_v3_5_MHI_GROUP_DDR, false,
+			IPA_DPS_HPS_SEQ_TYPE_INVALID,
+			QMB_MASTER_SELECT_DDR,
+			{ 9, 5, 8, 12, IPA_EE_AP } },
+	[IPA_3_5_MHI][IPA_CLIENT_APPS_WAN_CONS]       = {
+			true, IPA_v3_5_MHI_GROUP_DDR, false,
+			IPA_DPS_HPS_SEQ_TYPE_INVALID,
+			QMB_MASTER_SELECT_DDR,
+			{ 10, 6, 8, 12, IPA_EE_AP } },
+	[IPA_3_5_MHI][IPA_CLIENT_MHI_CONS]            = {
+			true, IPA_v3_5_MHI_GROUP_PCIE, false,
+			IPA_DPS_HPS_SEQ_TYPE_INVALID,
+			QMB_MASTER_SELECT_PCIE,
+			{ 15, 1, 8, 8, IPA_EE_AP } },
+	[IPA_3_5_MHI][IPA_CLIENT_Q6_LAN_CONS]         = {
+			true, IPA_v3_5_MHI_GROUP_DDR, false,
+			IPA_DPS_HPS_SEQ_TYPE_INVALID,
+			QMB_MASTER_SELECT_DDR,
+			{ 13, 3, 8, 12, IPA_EE_Q6 } },
+	[IPA_3_5_MHI][IPA_CLIENT_Q6_WAN_CONS]         = {
+			true, IPA_v3_5_MHI_GROUP_DDR, false,
+			IPA_DPS_HPS_SEQ_TYPE_INVALID,
+			QMB_MASTER_SELECT_DDR,
+			{ 12, 2, 8, 12, IPA_EE_Q6 } },
+	[IPA_3_5_MHI][IPA_CLIENT_MEMCPY_DMA_SYNC_CONS] = {
+			true, IPA_v3_5_MHI_GROUP_DMA, false,
+			IPA_DPS_HPS_SEQ_TYPE_INVALID,
+			QMB_MASTER_SELECT_PCIE,
+			{ 18, 12, 8, 8, IPA_EE_AP } },
+	[IPA_3_5_MHI][IPA_CLIENT_MEMCPY_DMA_ASYNC_CONS] = {
+			true, IPA_v3_5_MHI_GROUP_DMA, false,
+			IPA_DPS_HPS_SEQ_TYPE_INVALID,
+			QMB_MASTER_SELECT_PCIE,
+			{ 19, 13, 8, 8, IPA_EE_AP } },
+	/* Only for test purpose */
+	[IPA_3_5_MHI][IPA_CLIENT_TEST_CONS]           = {
+			true, IPA_v3_5_MHI_GROUP_PCIE, false,
+			IPA_DPS_HPS_SEQ_TYPE_INVALID,
+			QMB_MASTER_SELECT_PCIE,
+			{ 15, 1, 8, 8, IPA_EE_AP } },
+	[IPA_3_5_MHI][IPA_CLIENT_TEST1_CONS]           = {
+			true, IPA_v3_5_MHI_GROUP_PCIE, false,
+			IPA_DPS_HPS_SEQ_TYPE_INVALID,
+			QMB_MASTER_SELECT_PCIE,
+			{ 15, 1, 8, 8, IPA_EE_AP } },
+	[IPA_3_5_MHI][IPA_CLIENT_TEST2_CONS]          = {
+			true, IPA_v3_5_MHI_GROUP_DDR, false,
+			IPA_DPS_HPS_SEQ_TYPE_INVALID,
+			QMB_MASTER_SELECT_DDR,
+			{ 17, 11, 8, 8, IPA_EE_AP } },
+	[IPA_3_5_MHI][IPA_CLIENT_TEST3_CONS]          = {
+			true, IPA_v3_5_MHI_GROUP_DMA, false,
+			IPA_DPS_HPS_SEQ_TYPE_INVALID,
+			QMB_MASTER_SELECT_PCIE,
+			{ 18, 12, 8, 8, IPA_EE_AP } },
+	[IPA_3_5_MHI][IPA_CLIENT_TEST4_CONS]          = {
+			true, IPA_v3_5_MHI_GROUP_DMA, false,
+			IPA_DPS_HPS_SEQ_TYPE_INVALID,
+			QMB_MASTER_SELECT_PCIE,
+			{ 19, 13, 8, 8, IPA_EE_AP } },
+	/* Dummy consumer (pipe 31) is used in L2TP rt rule */
+	[IPA_3_5_MHI][IPA_CLIENT_DUMMY_CONS]          = {
+			true, IPA_v3_5_MHI_GROUP_DMA, false,
+			IPA_DPS_HPS_SEQ_TYPE_INVALID,
+			QMB_MASTER_SELECT_PCIE,
+			{ 31, 31, 8, 8, IPA_EE_AP } },
+
+	/* IPA_3_5_1 */
+	[IPA_3_5_1][IPA_CLIENT_WLAN1_PROD]          = {
+			true, IPA_v3_5_GROUP_UL_DL, true,
+			IPA_DPS_HPS_REP_SEQ_TYPE_2PKT_PROC_PASS_NO_DEC_UCP_DMAP,
+			QMB_MASTER_SELECT_DDR,
+			{ 7, 1, 8, 16, IPA_EE_UC } },
+	[IPA_3_5_1][IPA_CLIENT_USB_PROD]            = {
+			true, IPA_v3_5_GROUP_UL_DL, true,
+			IPA_DPS_HPS_REP_SEQ_TYPE_2PKT_PROC_PASS_NO_DEC_UCP_DMAP,
+			QMB_MASTER_SELECT_DDR,
+			{ 0, 0, 8, 16, IPA_EE_AP } },
+	[IPA_3_5_1][IPA_CLIENT_APPS_LAN_PROD] = {
+			true, IPA_v3_5_GROUP_UL_DL, false,
+			IPA_DPS_HPS_SEQ_TYPE_PKT_PROCESS_NO_DEC_UCP,
+			QMB_MASTER_SELECT_DDR,
+			{ 8, 7, 8, 16, IPA_EE_AP } },
+	[IPA_3_5_1][IPA_CLIENT_APPS_WAN_PROD] = {
+			true, IPA_v3_5_GROUP_UL_DL, true,
+			IPA_DPS_HPS_REP_SEQ_TYPE_2PKT_PROC_PASS_NO_DEC_UCP_DMAP,
+			QMB_MASTER_SELECT_DDR,
+			{ 2, 3, 16, 32, IPA_EE_AP } },
+	[IPA_3_5_1][IPA_CLIENT_APPS_CMD_PROD]		= {
+			true, IPA_v3_5_GROUP_UL_DL, false,
+			IPA_DPS_HPS_SEQ_TYPE_DMA_ONLY,
+			QMB_MASTER_SELECT_DDR,
+			{ 5, 4, 20, 23, IPA_EE_AP } },
+	[IPA_3_5_1][IPA_CLIENT_Q6_LAN_PROD]         = {
+			true, IPA_v3_5_GROUP_UL_DL, true,
+			IPA_DPS_HPS_SEQ_TYPE_PKT_PROCESS_NO_DEC_UCP,
+			QMB_MASTER_SELECT_DDR,
+			{ 3, 0, 16, 32, IPA_EE_Q6 } },
+	[IPA_3_5_1][IPA_CLIENT_Q6_WAN_PROD]         = {
+			true, IPA_v3_5_GROUP_UL_DL, true,
+			IPA_DPS_HPS_SEQ_TYPE_PKT_PROCESS_NO_DEC_UCP,
+			QMB_MASTER_SELECT_DDR,
+			{ 6, 4, 12, 30, IPA_EE_Q6 } },
+	[IPA_3_5_1][IPA_CLIENT_Q6_CMD_PROD]	    = {
+			true, IPA_v3_5_GROUP_UL_DL, false,
+			IPA_DPS_HPS_SEQ_TYPE_PKT_PROCESS_NO_DEC_UCP,
+			QMB_MASTER_SELECT_DDR,
+			{ 4, 1, 20, 23, IPA_EE_Q6 } },
+	/* Only for test purpose */
+	[IPA_3_5_1][IPA_CLIENT_TEST_PROD]           = {
+			true, IPA_v3_5_GROUP_UL_DL, true,
+			IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP,
+			QMB_MASTER_SELECT_DDR,
+			{ 0, 0, 8, 16, IPA_EE_AP } },
+	[IPA_3_5_1][IPA_CLIENT_TEST1_PROD]          = {
+			true, IPA_v3_5_GROUP_UL_DL, true,
+			IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP,
+			QMB_MASTER_SELECT_DDR,
+			{ 0, 0, 8, 16, IPA_EE_AP } },
+	[IPA_3_5_1][IPA_CLIENT_TEST2_PROD]          = {
+			true, IPA_v3_5_GROUP_UL_DL, true,
+			IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP,
+			QMB_MASTER_SELECT_DDR,
+			{ 2, 3, 16, 32, IPA_EE_AP } },
+	[IPA_3_5_1][IPA_CLIENT_TEST3_PROD]          = {
+			true, IPA_v3_5_GROUP_UL_DL, true,
+			IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP,
+			QMB_MASTER_SELECT_DDR,
+			{ 4, 1, 20, 23, IPA_EE_Q6 } },
+	[IPA_3_5_1][IPA_CLIENT_TEST4_PROD]          = {
+			true, IPA_v3_5_GROUP_UL_DL, true,
+			IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP,
+			QMB_MASTER_SELECT_DDR,
+			{ 1, 0, 8, 16, IPA_EE_UC } },
+
+	[IPA_3_5_1][IPA_CLIENT_WLAN1_CONS]          = {
+			true, IPA_v3_5_GROUP_UL_DL, false,
+			IPA_DPS_HPS_SEQ_TYPE_INVALID,
+			QMB_MASTER_SELECT_DDR,
+			{ 16, 11, 8, 8, IPA_EE_UC } },
+	[IPA_3_5_1][IPA_CLIENT_WLAN2_CONS]          =  {
+			true, IPA_v3_5_GROUP_UL_DL, false,
+			IPA_DPS_HPS_SEQ_TYPE_INVALID,
+			QMB_MASTER_SELECT_DDR,
+			{ 18, 9, 8, 8, IPA_EE_AP } },
+	[IPA_3_5_1][IPA_CLIENT_WLAN3_CONS]          =  {
+			true, IPA_v3_5_GROUP_UL_DL, false,
+			IPA_DPS_HPS_SEQ_TYPE_INVALID,
+			QMB_MASTER_SELECT_DDR,
+			{ 19, 10, 8, 8, IPA_EE_AP } },
+	[IPA_3_5_1][IPA_CLIENT_USB_CONS]            = {
+			true, IPA_v3_5_GROUP_UL_DL, false,
+			IPA_DPS_HPS_SEQ_TYPE_INVALID,
+			QMB_MASTER_SELECT_DDR,
+			{ 17, 8, 8, 8, IPA_EE_AP } },
+	[IPA_3_5_1][IPA_CLIENT_USB_DPL_CONS]        = {
+			true, IPA_v3_5_GROUP_UL_DL, false,
+			IPA_DPS_HPS_SEQ_TYPE_INVALID,
+			QMB_MASTER_SELECT_DDR,
+			{ 11, 2, 4, 6, IPA_EE_AP } },
+	[IPA_3_5_1][IPA_CLIENT_APPS_LAN_CONS]       = {
+			true, IPA_v3_5_GROUP_UL_DL, false,
+			IPA_DPS_HPS_SEQ_TYPE_INVALID,
+			QMB_MASTER_SELECT_DDR,
+			{ 9, 5, 8, 12, IPA_EE_AP } },
+	[IPA_3_5_1][IPA_CLIENT_APPS_WAN_CONS]       = {
+			true, IPA_v3_5_GROUP_UL_DL, false,
+			IPA_DPS_HPS_SEQ_TYPE_INVALID,
+			QMB_MASTER_SELECT_DDR,
+			{ 10, 6, 8, 12, IPA_EE_AP } },
+	[IPA_3_5_1][IPA_CLIENT_Q6_LAN_CONS]         = {
+			true, IPA_v3_5_GROUP_UL_DL, false,
+			IPA_DPS_HPS_SEQ_TYPE_INVALID,
+			QMB_MASTER_SELECT_DDR,
+			{ 13, 3, 8, 12, IPA_EE_Q6 } },
+	[IPA_3_5_1][IPA_CLIENT_Q6_WAN_CONS]         = {
+			true, IPA_v3_5_GROUP_UL_DL, false,
+			IPA_DPS_HPS_SEQ_TYPE_INVALID,
+			QMB_MASTER_SELECT_DDR,
+			{ 12, 2, 8, 12, IPA_EE_Q6 } },
+	/* Only for test purpose */
+	[IPA_3_5_1][IPA_CLIENT_TEST_CONS]           = {
+			true, IPA_v3_5_GROUP_UL_DL,
+			false,
+			IPA_DPS_HPS_SEQ_TYPE_INVALID,
+			QMB_MASTER_SELECT_DDR,
+			{ 17, 8, 8, 8, IPA_EE_AP } },
+	[IPA_3_5_1][IPA_CLIENT_TEST1_CONS]          = {
+			true, IPA_v3_5_GROUP_UL_DL,
+			false,
+			IPA_DPS_HPS_SEQ_TYPE_INVALID,
+			QMB_MASTER_SELECT_DDR,
+			{ 17, 8, 8, 8, IPA_EE_AP } },
+	[IPA_3_5_1][IPA_CLIENT_TEST2_CONS]          = {
+			true, IPA_v3_5_GROUP_UL_DL,
+			false,
+			IPA_DPS_HPS_SEQ_TYPE_INVALID,
+			QMB_MASTER_SELECT_DDR,
+			{ 18, 9, 8, 8, IPA_EE_AP } },
+	[IPA_3_5_1][IPA_CLIENT_TEST3_CONS]          = {
+			true, IPA_v3_5_GROUP_UL_DL,
+			false,
+			IPA_DPS_HPS_SEQ_TYPE_INVALID,
+			QMB_MASTER_SELECT_DDR,
+			{ 19, 10, 8, 8, IPA_EE_AP } },
+	[IPA_3_5_1][IPA_CLIENT_TEST4_CONS]          = {
+			true, IPA_v3_5_GROUP_UL_DL,
+			false,
+			IPA_DPS_HPS_SEQ_TYPE_INVALID,
+			QMB_MASTER_SELECT_DDR,
+			{ 11, 2, 4, 6, IPA_EE_AP } },
+	/* Dummy consumer (pipe 31) is used in L2TP rt rule */
+	[IPA_3_5_1][IPA_CLIENT_DUMMY_CONS]          = {
+			true, IPA_v3_5_GROUP_UL_DL,
+			false,
+			IPA_DPS_HPS_SEQ_TYPE_INVALID,
+			QMB_MASTER_SELECT_DDR,
+			{ 31, 31, 8, 8, IPA_EE_AP } },
+
+	/* IPA_4_0 */
+	[IPA_4_0][IPA_CLIENT_WLAN1_PROD]          = {
+			true, IPA_v4_0_GROUP_UL_DL,
+			true,
+			IPA_DPS_HPS_REP_SEQ_TYPE_2PKT_PROC_PASS_NO_DEC_UCP_DMAP,
+			QMB_MASTER_SELECT_DDR,
+			{ 6, 2, 8, 16, IPA_EE_UC } },
+	[IPA_4_0][IPA_CLIENT_USB_PROD]            = {
+			true, IPA_v4_0_GROUP_UL_DL,
+			true,
+			IPA_DPS_HPS_REP_SEQ_TYPE_2PKT_PROC_PASS_NO_DEC_UCP_DMAP,
+			QMB_MASTER_SELECT_DDR,
+			{ 0, 8, 8, 16, IPA_EE_AP } },
+	[IPA_4_0][IPA_CLIENT_APPS_LAN_PROD]   = {
+			true, IPA_v4_0_GROUP_UL_DL,
+			false,
+			IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP,
+			QMB_MASTER_SELECT_DDR,
+			{ 8, 10, 8, 16, IPA_EE_AP } },
+	[IPA_4_0][IPA_CLIENT_APPS_WAN_PROD] = {
+			true, IPA_v4_0_GROUP_UL_DL,
+			true,
+			IPA_DPS_HPS_REP_SEQ_TYPE_2PKT_PROC_PASS_NO_DEC_UCP_DMAP,
+			QMB_MASTER_SELECT_DDR,
+			{ 2, 3, 16, 32, IPA_EE_AP } },
+	[IPA_4_0][IPA_CLIENT_APPS_CMD_PROD]	  = {
+			true, IPA_v4_0_GROUP_UL_DL,
+			false,
+			IPA_DPS_HPS_SEQ_TYPE_DMA_ONLY,
+			QMB_MASTER_SELECT_DDR,
+			{ 5, 4, 20, 24, IPA_EE_AP } },
+	[IPA_4_0][IPA_CLIENT_ODU_PROD]            = {
+			true, IPA_v4_0_GROUP_UL_DL,
+			true,
+			IPA_DPS_HPS_REP_SEQ_TYPE_2PKT_PROC_PASS_NO_DEC_UCP_DMAP,
+			QMB_MASTER_SELECT_DDR,
+			{ 1, 0, 8, 16, IPA_EE_AP } },
+	[IPA_4_0][IPA_CLIENT_ETHERNET_PROD]	  = {
+			true, IPA_v4_0_GROUP_UL_DL,
+			true,
+			IPA_DPS_HPS_REP_SEQ_TYPE_2PKT_PROC_PASS_NO_DEC_UCP_DMAP,
+			QMB_MASTER_SELECT_DDR,
+			{ 9, 0, 8, 16, IPA_EE_UC } },
+	[IPA_4_0][IPA_CLIENT_Q6_WAN_PROD]         = {
+			true, IPA_v4_0_GROUP_UL_DL,
+			true,
+			IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP,
+			QMB_MASTER_SELECT_DDR,
+			{ 3, 0, 16, 32, IPA_EE_Q6 } },
+	[IPA_4_0][IPA_CLIENT_Q6_CMD_PROD]	  = {
+			true, IPA_v4_0_GROUP_UL_DL,
+			false,
+			IPA_DPS_HPS_SEQ_TYPE_PKT_PROCESS_NO_DEC_UCP,
+			QMB_MASTER_SELECT_DDR,
+			{ 4, 1, 20, 24, IPA_EE_Q6 } },
+	/* Only for test purpose */
+	[IPA_4_0][IPA_CLIENT_TEST_PROD]           = {
+			true, IPA_v4_0_GROUP_UL_DL,
+			true,
+			IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP,
+			QMB_MASTER_SELECT_DDR,
+			{0, 8, 8, 16, IPA_EE_AP } },
+	[IPA_4_0][IPA_CLIENT_TEST1_PROD]          = {
+			true, IPA_v4_0_GROUP_UL_DL,
+			true,
+			IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP,
+			QMB_MASTER_SELECT_DDR,
+			{0, 8, 8, 16, IPA_EE_AP } },
+	[IPA_4_0][IPA_CLIENT_TEST2_PROD]          = {
+			true, IPA_v4_0_GROUP_UL_DL,
+			true,
+			IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP,
+			QMB_MASTER_SELECT_DDR,
+			{ 1, 0, 8, 16, IPA_EE_AP } },
+	[IPA_4_0][IPA_CLIENT_TEST3_PROD]          = {
+			true, IPA_v4_0_GROUP_UL_DL,
+			true,
+			IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP,
+			QMB_MASTER_SELECT_DDR,
+			{ 7, 9, 8, 16, IPA_EE_AP } },
+	[IPA_4_0][IPA_CLIENT_TEST4_PROD]          = {
+			true, IPA_v4_0_GROUP_UL_DL,
+			true,
+			IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP,
+			QMB_MASTER_SELECT_DDR,
+			{8, 10, 8, 16, IPA_EE_AP } },
+
+
+	[IPA_4_0][IPA_CLIENT_WLAN1_CONS]          = {
+			true, IPA_v4_0_GROUP_UL_DL,
+			false,
+			IPA_DPS_HPS_SEQ_TYPE_INVALID,
+			QMB_MASTER_SELECT_DDR,
+			{ 18, 3, 6, 9, IPA_EE_UC } },
+	[IPA_4_0][IPA_CLIENT_WLAN2_CONS]          = {
+			true, IPA_v4_0_GROUP_UL_DL,
+			false,
+			IPA_DPS_HPS_SEQ_TYPE_INVALID,
+			QMB_MASTER_SELECT_DDR,
+			{ 20, 13, 9, 9, IPA_EE_AP } },
+	[IPA_4_0][IPA_CLIENT_WLAN3_CONS]          = {
+			true, IPA_v4_0_GROUP_UL_DL,
+			false,
+			IPA_DPS_HPS_SEQ_TYPE_INVALID,
+			QMB_MASTER_SELECT_DDR,
+			{ 21, 14, 9, 9, IPA_EE_AP } },
+	[IPA_4_0][IPA_CLIENT_USB_CONS]            = {
+			true, IPA_v4_0_GROUP_UL_DL,
+			false,
+			IPA_DPS_HPS_SEQ_TYPE_INVALID,
+			QMB_MASTER_SELECT_DDR,
+			{ 19, 12, 9, 9, IPA_EE_AP } },
+	[IPA_4_0][IPA_CLIENT_USB_DPL_CONS]        = {
+			true, IPA_v4_0_GROUP_UL_DL,
+			false,
+			IPA_DPS_HPS_SEQ_TYPE_INVALID,
+			QMB_MASTER_SELECT_DDR,
+			{ 15, 7, 5, 5, IPA_EE_AP } },
+	[IPA_4_0][IPA_CLIENT_APPS_LAN_CONS]       = {
+			true, IPA_v4_0_GROUP_UL_DL,
+			false,
+			IPA_DPS_HPS_SEQ_TYPE_INVALID,
+			QMB_MASTER_SELECT_DDR,
+			{ 10, 5, 9, 9, IPA_EE_AP } },
+	[IPA_4_0][IPA_CLIENT_APPS_WAN_CONS]       = {
+			true, IPA_v4_0_GROUP_UL_DL,
+			false,
+			IPA_DPS_HPS_SEQ_TYPE_INVALID,
+			QMB_MASTER_SELECT_DDR,
+			{ 11, 6, 9, 9, IPA_EE_AP } },
+	[IPA_4_0][IPA_CLIENT_ODU_EMB_CONS]        = {
+			true, IPA_v4_0_GROUP_UL_DL,
+			false,
+			IPA_DPS_HPS_SEQ_TYPE_INVALID,
+			QMB_MASTER_SELECT_DDR,
+			{ 17, 1, 17, 17, IPA_EE_AP } },
+	[IPA_4_0][IPA_CLIENT_ETHERNET_CONS]	  = {
+			true, IPA_v4_0_GROUP_UL_DL,
+			false,
+			IPA_DPS_HPS_SEQ_TYPE_INVALID,
+			QMB_MASTER_SELECT_DDR,
+			{ 22, 1, 17, 17, IPA_EE_UC } },
+	[IPA_4_0][IPA_CLIENT_Q6_LAN_CONS]         = {
+			true, IPA_v4_0_GROUP_UL_DL,
+			false,
+			IPA_DPS_HPS_SEQ_TYPE_INVALID,
+			QMB_MASTER_SELECT_DDR,
+			{ 14, 4, 9, 9, IPA_EE_Q6 } },
+	[IPA_4_0][IPA_CLIENT_Q6_WAN_CONS]         = {
+			true, IPA_v4_0_GROUP_UL_DL,
+			false,
+			IPA_DPS_HPS_SEQ_TYPE_INVALID,
+			QMB_MASTER_SELECT_DDR,
+			{ 13, 3, 9, 9, IPA_EE_Q6 } },
+	[IPA_4_0][IPA_CLIENT_Q6_LTE_WIFI_AGGR_CONS] = {
+			true, IPA_v4_0_GROUP_UL_DL,
+			false,
+			IPA_DPS_HPS_SEQ_TYPE_INVALID,
+			QMB_MASTER_SELECT_DDR,
+			{ 16, 5, 9, 9, IPA_EE_Q6 } },
+	/* Only for test purpose */
+	/* MBIM aggregation test pipes should have the same QMB as USB_CONS */
+	[IPA_4_0][IPA_CLIENT_TEST_CONS]           = {
+			true, IPA_v4_0_GROUP_UL_DL,
+			false,
+			IPA_DPS_HPS_SEQ_TYPE_INVALID,
+			QMB_MASTER_SELECT_DDR,
+			{ 11, 6, 9, 9, IPA_EE_AP } },
+	[IPA_4_0][IPA_CLIENT_TEST1_CONS]           = {
+			true, IPA_v4_0_GROUP_UL_DL,
+			false,
+			IPA_DPS_HPS_SEQ_TYPE_INVALID,
+			QMB_MASTER_SELECT_DDR,
+			{ 11, 6, 9, 9, IPA_EE_AP } },
+	[IPA_4_0][IPA_CLIENT_TEST2_CONS]          = {
+			true, IPA_v4_0_GROUP_UL_DL,
+			false,
+			IPA_DPS_HPS_SEQ_TYPE_INVALID,
+			QMB_MASTER_SELECT_DDR,
+			{ 12, 2, 5, 5, IPA_EE_AP } },
+	[IPA_4_0][IPA_CLIENT_TEST3_CONS]          = {
+			true, IPA_v4_0_GROUP_UL_DL,
+			false,
+			IPA_DPS_HPS_SEQ_TYPE_INVALID,
+			QMB_MASTER_SELECT_DDR,
+			{ 19, 12, 9, 9, IPA_EE_AP } },
+	[IPA_4_0][IPA_CLIENT_TEST4_CONS]          = {
+			true, IPA_v4_0_GROUP_UL_DL,
+			false,
+			IPA_DPS_HPS_SEQ_TYPE_INVALID,
+			QMB_MASTER_SELECT_DDR,
+			{ 21, 14, 9, 9, IPA_EE_AP } },
+	/* Dummy consumer (pipe 31) is used in L2TP rt rule */
+	[IPA_4_0][IPA_CLIENT_DUMMY_CONS]          = {
+			true, IPA_v4_0_GROUP_UL_DL,
+			false,
+			IPA_DPS_HPS_SEQ_TYPE_INVALID,
+			QMB_MASTER_SELECT_DDR,
+			{ 31, 31, 8, 8, IPA_EE_AP } },
+
+	/* IPA_4_0_MHI */
+	[IPA_4_0_MHI][IPA_CLIENT_APPS_WAN_PROD]   = {
+			true, IPA_v4_0_MHI_GROUP_DDR,
+			true,
+			IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP,
+			QMB_MASTER_SELECT_DDR,
+			{ 2, 3, 16, 32, IPA_EE_AP } },
+	[IPA_4_0_MHI][IPA_CLIENT_APPS_CMD_PROD]	  = {
+			true, IPA_v4_0_MHI_GROUP_DDR,
+			false,
+			IPA_DPS_HPS_SEQ_TYPE_DMA_ONLY,
+			QMB_MASTER_SELECT_DDR,
+			{ 5, 4, 20, 24, IPA_EE_AP } },
+	[IPA_4_0_MHI][IPA_CLIENT_MHI_PROD]            = {
+			true, IPA_v4_0_MHI_GROUP_PCIE,
+			true,
+			IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP,
+			QMB_MASTER_SELECT_PCIE,
+			{ 1, 0, 8, 16, IPA_EE_AP } },
+	[IPA_4_0_MHI][IPA_CLIENT_Q6_WAN_PROD]         = {
+			true, IPA_v4_0_GROUP_UL_DL,
+			true,
+			IPA_DPS_HPS_SEQ_TYPE_PKT_PROCESS_NO_DEC_UCP,
+			QMB_MASTER_SELECT_DDR,
+			{ 3, 0, 16, 32, IPA_EE_Q6 } },
+	[IPA_4_0_MHI][IPA_CLIENT_Q6_CMD_PROD]	  = {
+			true, IPA_v4_0_MHI_GROUP_PCIE,
+			false,
+			IPA_DPS_HPS_SEQ_TYPE_PKT_PROCESS_NO_DEC_UCP,
+			QMB_MASTER_SELECT_DDR,
+			{ 4, 1, 20, 24, IPA_EE_Q6 } },
+	[IPA_4_0_MHI][IPA_CLIENT_MEMCPY_DMA_SYNC_PROD] = {
+			true, IPA_v4_0_MHI_GROUP_DMA,
+			false,
+			IPA_DPS_HPS_SEQ_TYPE_DMA_ONLY,
+			QMB_MASTER_SELECT_DDR,
+			{ 7, 9, 8, 16, IPA_EE_AP } },
+	[IPA_4_0_MHI][IPA_CLIENT_MEMCPY_DMA_ASYNC_PROD] = {
+			true, IPA_v4_0_MHI_GROUP_DMA,
+			false,
+			IPA_DPS_HPS_SEQ_TYPE_DMA_ONLY,
+			QMB_MASTER_SELECT_DDR,
+			{ 8, 10, 8, 16, IPA_EE_AP } },
+	/* Only for test purpose */
+	[IPA_4_0_MHI][IPA_CLIENT_TEST_PROD]           = {
+			true, IPA_v4_0_GROUP_UL_DL,
+			true,
+			IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP,
+			QMB_MASTER_SELECT_DDR,
+			{0, 8, 8, 16, IPA_EE_AP } },
+	[IPA_4_0][IPA_CLIENT_TEST1_PROD]          = {
+			true, IPA_v4_0_GROUP_UL_DL,
+			true,
+			IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP,
+			QMB_MASTER_SELECT_DDR,
+			{0, 8, 8, 16, IPA_EE_AP } },
+	[IPA_4_0_MHI][IPA_CLIENT_TEST2_PROD]          = {
+			true, IPA_v4_0_GROUP_UL_DL,
+			true,
+			IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP,
+			QMB_MASTER_SELECT_DDR,
+			{ 1, 0, 8, 16, IPA_EE_AP } },
+	[IPA_4_0_MHI][IPA_CLIENT_TEST3_PROD]          = {
+			true, IPA_v4_0_GROUP_UL_DL,
+			true,
+			IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP,
+			QMB_MASTER_SELECT_DDR,
+			{ 7, 9, 8, 16, IPA_EE_AP } },
+	[IPA_4_0_MHI][IPA_CLIENT_TEST4_PROD]          = {
+			true, IPA_v4_0_GROUP_UL_DL,
+			true,
+			IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP,
+			QMB_MASTER_SELECT_DDR,
+			{ 8, 10, 8, 16, IPA_EE_AP } },
+	[IPA_4_0_MHI][IPA_CLIENT_APPS_LAN_CONS]       = {
+			true, IPA_v4_0_MHI_GROUP_DDR,
+			false,
+			IPA_DPS_HPS_SEQ_TYPE_INVALID,
+			QMB_MASTER_SELECT_DDR,
+			{ 10, 5, 9, 9, IPA_EE_AP } },
+	[IPA_4_0_MHI][IPA_CLIENT_APPS_WAN_CONS]       = {
+			true, IPA_v4_0_MHI_GROUP_DDR,
+			false,
+			IPA_DPS_HPS_SEQ_TYPE_INVALID,
+			QMB_MASTER_SELECT_DDR,
+			{ 11, 6, 9, 9, IPA_EE_AP } },
+	[IPA_4_0_MHI][IPA_CLIENT_MHI_CONS]            = {
+			true, IPA_v4_0_MHI_GROUP_PCIE,
+			false,
+			IPA_DPS_HPS_SEQ_TYPE_INVALID,
+			QMB_MASTER_SELECT_PCIE,
+			{ 17, 1, 17, 17, IPA_EE_AP } },
+	[IPA_4_0_MHI][IPA_CLIENT_Q6_LAN_CONS]         = {
+			true, IPA_v4_0_MHI_GROUP_DDR,
+			false,
+			IPA_DPS_HPS_SEQ_TYPE_INVALID,
+			QMB_MASTER_SELECT_DDR,
+			{ 14, 4, 9, 9, IPA_EE_Q6 } },
+	[IPA_4_0_MHI][IPA_CLIENT_Q6_WAN_CONS]         = {
+			true, IPA_v4_0_MHI_GROUP_DDR,
+			false,
+			IPA_DPS_HPS_SEQ_TYPE_INVALID,
+			QMB_MASTER_SELECT_DDR,
+			{ 13, 3, 9, 9, IPA_EE_Q6 } },
+	[IPA_4_0_MHI][IPA_CLIENT_MEMCPY_DMA_SYNC_CONS] = {
+			true, IPA_v4_0_MHI_GROUP_DMA,
+			false,
+			IPA_DPS_HPS_SEQ_TYPE_INVALID,
+			QMB_MASTER_SELECT_PCIE,
+			{ 20, 13, 9, 9, IPA_EE_AP } },
+	[IPA_4_0_MHI][IPA_CLIENT_MEMCPY_DMA_ASYNC_CONS] = {
+			true, IPA_v4_0_MHI_GROUP_DMA,
+			false,
+			IPA_DPS_HPS_SEQ_TYPE_INVALID,
+			QMB_MASTER_SELECT_PCIE,
+			{ 21, 14, 9, 9, IPA_EE_AP } },
+	[IPA_4_0_MHI][IPA_CLIENT_Q6_LTE_WIFI_AGGR_CONS] = {
+			true, IPA_v4_0_GROUP_UL_DL,
+			false,
+			IPA_DPS_HPS_SEQ_TYPE_INVALID,
+			QMB_MASTER_SELECT_DDR,
+			{ 16, 5, 9, 9, IPA_EE_Q6 } },
+	[IPA_4_0_MHI][IPA_CLIENT_USB_DPL_CONS]        = {
+			true, IPA_v4_0_MHI_GROUP_DDR,
+			false,
+			IPA_DPS_HPS_SEQ_TYPE_INVALID,
+			QMB_MASTER_SELECT_DDR,
+			{ 15, 7, 5, 5, IPA_EE_AP, GSI_ESCAPE_BUF_ONLY } },
+	[IPA_4_0_MHI][IPA_CLIENT_MHI_DPL_CONS]        = {
+			true, IPA_v4_0_MHI_GROUP_PCIE,
+			false,
+			IPA_DPS_HPS_SEQ_TYPE_INVALID,
+			QMB_MASTER_SELECT_PCIE,
+			{ 12, 2, 5, 5, IPA_EE_AP, GSI_ESCAPE_BUF_ONLY } },
+	/* Only for test purpose */
+	[IPA_4_0_MHI][IPA_CLIENT_TEST_CONS]           = {
+			true, IPA_v4_0_GROUP_UL_DL,
+			false,
+			IPA_DPS_HPS_SEQ_TYPE_INVALID,
+			QMB_MASTER_SELECT_PCIE,
+			{ 11, 6, 9, 9, IPA_EE_AP } },
+	[IPA_4_0_MHI][IPA_CLIENT_TEST1_CONS]           = {
+			true, IPA_v4_0_GROUP_UL_DL,
+			false,
+			IPA_DPS_HPS_SEQ_TYPE_INVALID,
+			QMB_MASTER_SELECT_PCIE,
+			{ 11, 6, 9, 9, IPA_EE_AP } },
+	[IPA_4_0_MHI][IPA_CLIENT_TEST2_CONS]          = {
+			true, IPA_v4_0_GROUP_UL_DL,
+			false,
+			IPA_DPS_HPS_SEQ_TYPE_INVALID,
+			QMB_MASTER_SELECT_DDR,
+			{ 12, 2, 5, 5, IPA_EE_AP } },
+	[IPA_4_0_MHI][IPA_CLIENT_TEST3_CONS]          = {
+			true, IPA_v4_0_GROUP_UL_DL,
+			false,
+			IPA_DPS_HPS_SEQ_TYPE_INVALID,
+			QMB_MASTER_SELECT_PCIE,
+			{ 19, 12, 9, 9, IPA_EE_AP } },
+	[IPA_4_0_MHI][IPA_CLIENT_TEST4_CONS]          = {
+			true, IPA_v4_0_GROUP_UL_DL,
+			false,
+			IPA_DPS_HPS_SEQ_TYPE_INVALID,
+			QMB_MASTER_SELECT_PCIE,
+			{ 21, 14, 9, 9, IPA_EE_AP } },
+	/* Dummy consumer (pipe 31) is used in L2TP rt rule */
+	[IPA_4_0_MHI][IPA_CLIENT_DUMMY_CONS]          = {
+			true, IPA_v4_0_GROUP_UL_DL,
+			false,
+			IPA_DPS_HPS_SEQ_TYPE_INVALID,
+			QMB_MASTER_SELECT_DDR,
+			{ 31, 31, 8, 8, IPA_EE_AP } },
+	/* IPA_4_1 */
+	[IPA_4_1][IPA_CLIENT_WLAN1_PROD]          = {
+			true, IPA_v4_0_GROUP_UL_DL,
+			true,
+			IPA_DPS_HPS_REP_SEQ_TYPE_2PKT_PROC_PASS_NO_DEC_UCP_DMAP,
+			QMB_MASTER_SELECT_DDR,
+			{ 6, 2, 8, 16, IPA_EE_UC } },
+	[IPA_4_1][IPA_CLIENT_WLAN2_PROD]          = {
+			true, IPA_v4_0_GROUP_UL_DL,
+			true,
+			IPA_DPS_HPS_REP_SEQ_TYPE_2PKT_PROC_PASS_NO_DEC_UCP_DMAP,
+			QMB_MASTER_SELECT_DDR,
+			{ 7, 9, 8, 16, IPA_EE_AP } },
+	[IPA_4_1][IPA_CLIENT_USB_PROD]            = {
+			true, IPA_v4_0_GROUP_UL_DL,
+			true,
+			IPA_DPS_HPS_REP_SEQ_TYPE_2PKT_PROC_PASS_NO_DEC_UCP_DMAP,
+			QMB_MASTER_SELECT_DDR,
+			{ 0, 8, 8, 16, IPA_EE_AP } },
+	[IPA_4_1][IPA_CLIENT_APPS_LAN_PROD]   = {
+			true, IPA_v4_0_GROUP_UL_DL,
+			false,
+			IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP,
+			QMB_MASTER_SELECT_DDR,
+			{ 8, 10, 8, 16, IPA_EE_AP } },
+	[IPA_4_1][IPA_CLIENT_APPS_WAN_PROD] = {
+			true, IPA_v4_0_GROUP_UL_DL,
+			true,
+			IPA_DPS_HPS_REP_SEQ_TYPE_2PKT_PROC_PASS_NO_DEC_UCP_DMAP,
+			QMB_MASTER_SELECT_DDR,
+			{ 2, 3, 16, 32, IPA_EE_AP } },
+	[IPA_4_1][IPA_CLIENT_APPS_CMD_PROD]	  = {
+			true, IPA_v4_0_GROUP_UL_DL,
+			false,
+			IPA_DPS_HPS_SEQ_TYPE_DMA_ONLY,
+			QMB_MASTER_SELECT_DDR,
+			{ 5, 4, 20, 24, IPA_EE_AP } },
+	[IPA_4_1][IPA_CLIENT_ODU_PROD]            = {
+			true, IPA_v4_0_GROUP_UL_DL,
+			true,
+			IPA_DPS_HPS_REP_SEQ_TYPE_2PKT_PROC_PASS_NO_DEC_UCP_DMAP,
+			QMB_MASTER_SELECT_DDR,
+			{ 1, 0, 8, 16, IPA_EE_AP } },
+	[IPA_4_1][IPA_CLIENT_ETHERNET_PROD] = {
+			true, IPA_v4_0_GROUP_UL_DL,
+			true,
+			IPA_DPS_HPS_REP_SEQ_TYPE_2PKT_PROC_PASS_NO_DEC_UCP_DMAP,
+			QMB_MASTER_SELECT_DDR,
+			{ 9, 0, 8, 16, IPA_EE_UC } },
+	[IPA_4_1][IPA_CLIENT_Q6_WAN_PROD]         = {
+			true, IPA_v4_0_GROUP_UL_DL,
+			true,
+			IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP,
+			QMB_MASTER_SELECT_DDR,
+			{ 3, 0, 16, 32, IPA_EE_Q6 } },
+	[IPA_4_1][IPA_CLIENT_Q6_CMD_PROD]	  = {
+			true, IPA_v4_0_GROUP_UL_DL,
+			false,
+			IPA_DPS_HPS_SEQ_TYPE_PKT_PROCESS_NO_DEC_UCP,
+			QMB_MASTER_SELECT_DDR,
+			{ 4, 1, 20, 24, IPA_EE_Q6 } },
+	/* Only for test purpose */
+	[IPA_4_1][IPA_CLIENT_TEST_PROD]           = {
+			true, IPA_v4_0_GROUP_UL_DL,
+			true,
+			IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP,
+			QMB_MASTER_SELECT_DDR,
+			{0, 8, 8, 16, IPA_EE_AP } },
+	[IPA_4_1][IPA_CLIENT_TEST1_PROD]          = {
+			true, IPA_v4_0_GROUP_UL_DL,
+			true,
+			IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP,
+			QMB_MASTER_SELECT_DDR,
+			{ 0, 8, 8, 16, IPA_EE_AP } },
+	[IPA_4_1][IPA_CLIENT_TEST2_PROD]          = {
+			true, IPA_v4_0_GROUP_UL_DL,
+			true,
+			IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP,
+			QMB_MASTER_SELECT_DDR,
+			{ 1, 0, 8, 16, IPA_EE_AP } },
+	[IPA_4_1][IPA_CLIENT_TEST3_PROD]          = {
+			true, IPA_v4_0_GROUP_UL_DL,
+			true,
+			IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP,
+			QMB_MASTER_SELECT_DDR,
+			{7, 9, 8, 16, IPA_EE_AP } },
+	[IPA_4_1][IPA_CLIENT_TEST4_PROD]          = {
+			true, IPA_v4_0_GROUP_UL_DL,
+			true,
+			IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP,
+			QMB_MASTER_SELECT_DDR,
+			{ 8, 10, 8, 16, IPA_EE_AP } },
+
+
+	[IPA_4_1][IPA_CLIENT_WLAN1_CONS]          = {
+			true, IPA_v4_0_GROUP_UL_DL,
+			false,
+			IPA_DPS_HPS_SEQ_TYPE_INVALID,
+			QMB_MASTER_SELECT_DDR,
+			{ 18, 3, 9, 9, IPA_EE_UC } },
+	[IPA_4_1][IPA_CLIENT_WLAN2_CONS]          = {
+			true, IPA_v4_0_GROUP_UL_DL,
+			false,
+			IPA_DPS_HPS_SEQ_TYPE_INVALID,
+			QMB_MASTER_SELECT_DDR,
+			{ 17, 1, 8, 13, IPA_EE_AP } },
+	[IPA_4_1][IPA_CLIENT_WLAN3_CONS]          = {
+			true, IPA_v4_0_GROUP_UL_DL,
+			false,
+			IPA_DPS_HPS_SEQ_TYPE_INVALID,
+			QMB_MASTER_SELECT_DDR,
+			{ 21, 14, 9, 9, IPA_EE_AP } },
+	[IPA_4_1][IPA_CLIENT_USB_CONS]            = {
+			true, IPA_v4_0_GROUP_UL_DL,
+			false,
+			IPA_DPS_HPS_SEQ_TYPE_INVALID,
+			QMB_MASTER_SELECT_DDR,
+			{ 19, 12, 9, 9, IPA_EE_AP } },
+	[IPA_4_1][IPA_CLIENT_USB_DPL_CONS]        = {
+			true, IPA_v4_0_GROUP_UL_DL,
+			false,
+			IPA_DPS_HPS_SEQ_TYPE_INVALID,
+			QMB_MASTER_SELECT_DDR,
+			{ 15, 7, 5, 5, IPA_EE_AP } },
+	[IPA_4_1][IPA_CLIENT_APPS_LAN_CONS]       = {
+			true, IPA_v4_0_GROUP_UL_DL,
+			false,
+			IPA_DPS_HPS_SEQ_TYPE_INVALID,
+			QMB_MASTER_SELECT_DDR,
+			{ 10, 5, 9, 9, IPA_EE_AP } },
+	[IPA_4_1][IPA_CLIENT_APPS_WAN_CONS]       = {
+			true, IPA_v4_0_GROUP_UL_DL,
+			false,
+			IPA_DPS_HPS_SEQ_TYPE_INVALID,
+			QMB_MASTER_SELECT_DDR,
+			{ 11, 6, 9, 9, IPA_EE_AP } },
+	[IPA_4_1][IPA_CLIENT_ODL_DPL_CONS]        = {
+			true, IPA_v4_0_GROUP_UL_DL,
+			false,
+			IPA_DPS_HPS_SEQ_TYPE_INVALID,
+			QMB_MASTER_SELECT_DDR,
+			{ 12, 2, 9, 9, IPA_EE_AP } },
+	[IPA_4_1][IPA_CLIENT_ETHERNET_CONS]	  = {
+			true, IPA_v4_0_GROUP_UL_DL,
+			false,
+			IPA_DPS_HPS_SEQ_TYPE_INVALID,
+			QMB_MASTER_SELECT_DDR,
+			{ 22, 1, 9, 9, IPA_EE_UC } },
+	[IPA_4_1][IPA_CLIENT_Q6_LAN_CONS]         = {
+			true, IPA_v4_0_GROUP_UL_DL,
+			false,
+			IPA_DPS_HPS_SEQ_TYPE_INVALID,
+			QMB_MASTER_SELECT_DDR,
+			{ 14, 4, 9, 9, IPA_EE_Q6 } },
+	[IPA_4_1][IPA_CLIENT_Q6_WAN_CONS]         = {
+			true, IPA_v4_0_GROUP_UL_DL,
+			false,
+			IPA_DPS_HPS_SEQ_TYPE_INVALID,
+			QMB_MASTER_SELECT_DDR,
+			{ 13, 3, 9, 9, IPA_EE_Q6 } },
+	[IPA_4_1][IPA_CLIENT_Q6_LTE_WIFI_AGGR_CONS] = {
+			true, IPA_v4_0_GROUP_UL_DL,
+			false,
+			IPA_DPS_HPS_SEQ_TYPE_INVALID,
+			QMB_MASTER_SELECT_DDR,
+			{ 16, 5, 9, 9, IPA_EE_Q6 } },
+	/* Only for test purpose */
+	/* MBIM aggregation test pipes should have the same QMB as USB_CONS */
+	[IPA_4_1][IPA_CLIENT_TEST_CONS]           = {
+			true, IPA_v4_0_GROUP_UL_DL,
+			false,
+			IPA_DPS_HPS_SEQ_TYPE_INVALID,
+			QMB_MASTER_SELECT_DDR,
+			{ 11, 6, 9, 9, IPA_EE_AP } },
+	[IPA_4_1][IPA_CLIENT_TEST1_CONS]           = {
+			true, IPA_v4_0_GROUP_UL_DL,
+			false,
+			IPA_DPS_HPS_SEQ_TYPE_INVALID,
+			QMB_MASTER_SELECT_DDR,
+			{ 11, 6, 9, 9, IPA_EE_AP } },
+	[IPA_4_1][IPA_CLIENT_TEST2_CONS]          = {
+			true, IPA_v4_0_GROUP_UL_DL,
+			false,
+			IPA_DPS_HPS_SEQ_TYPE_INVALID,
+			QMB_MASTER_SELECT_DDR,
+			{ 12, 2, 9, 9, IPA_EE_AP } },
+	[IPA_4_1][IPA_CLIENT_TEST3_CONS]          = {
+			true, IPA_v4_0_GROUP_UL_DL,
+			false,
+			IPA_DPS_HPS_SEQ_TYPE_INVALID,
+			QMB_MASTER_SELECT_DDR,
+			{ 19, 12, 9, 9, IPA_EE_AP } },
+	[IPA_4_1][IPA_CLIENT_TEST4_CONS]          = {
+			true, IPA_v4_0_GROUP_UL_DL,
+			false,
+			IPA_DPS_HPS_SEQ_TYPE_INVALID,
+			QMB_MASTER_SELECT_DDR,
+			{ 21, 14, 9, 9, IPA_EE_AP } },
+	/* Dummy consumer (pipe 31) is used in L2TP rt rule */
+	[IPA_4_1][IPA_CLIENT_DUMMY_CONS]          = {
+			true, IPA_v4_0_GROUP_UL_DL,
+			false,
+			IPA_DPS_HPS_SEQ_TYPE_INVALID,
+			QMB_MASTER_SELECT_DDR,
+			{ 31, 31, 8, 8, IPA_EE_AP } },
+
+	/* MHI PRIME PIPES - Client producer / IPA Consumer pipes */
+	[IPA_4_1_APQ][IPA_CLIENT_MHI_PRIME_DPL_PROD] = {
+			true, IPA_v4_0_GROUP_UL_DL,
+			true,
+			IPA_DPS_HPS_SEQ_TYPE_DMA_ONLY,
+			QMB_MASTER_SELECT_DDR,
+			{7, 9, 8, 16, IPA_EE_AP } },
+	[IPA_4_1_APQ][IPA_CLIENT_MHI_PRIME_TETH_PROD] = {
+			true, IPA_v4_0_GROUP_UL_DL,
+			true,
+			IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP,
+			QMB_MASTER_SELECT_DDR,
+			{ 1, 0, 8, 16, IPA_EE_AP } },
+	[IPA_4_1_APQ][IPA_CLIENT_MHI_PRIME_RMNET_PROD] = {
+			true, IPA_v4_0_GROUP_UL_DL,
+			true,
+			IPA_DPS_HPS_SEQ_TYPE_DMA_ONLY,
+			QMB_MASTER_SELECT_DDR,
+			{ 2, 3, 16, 32, IPA_EE_AP } },
+	/* MHI PRIME PIPES - Client Consumer / IPA Producer pipes */
+	[IPA_4_1_APQ][IPA_CLIENT_MHI_PRIME_TETH_CONS] = {
+			true, IPA_v4_0_GROUP_UL_DL,
+			false,
+			IPA_DPS_HPS_SEQ_TYPE_INVALID,
+			QMB_MASTER_SELECT_DDR,
+			{ 20, 13, 9, 9, IPA_EE_AP } },
+	[IPA_4_1_APQ][IPA_CLIENT_MHI_PRIME_RMNET_CONS] = {
+			true, IPA_v4_0_GROUP_UL_DL,
+			false,
+			IPA_DPS_HPS_SEQ_TYPE_INVALID,
+			QMB_MASTER_SELECT_DDR,
+			{ 17, 14, 9, 9, IPA_EE_AP } },
+
+	/* IPA_4_2 */
+	[IPA_4_2][IPA_CLIENT_WLAN1_PROD]          = {
+			true, IPA_v4_2_GROUP_UL_DL,
+			true,
+			IPA_DPS_HPS_REP_SEQ_TYPE_PKT_PROCESS_NO_DEC_NO_UCP_DMAP,
+			QMB_MASTER_SELECT_DDR,
+			{ 3, 7, 6, 7, IPA_EE_AP, GSI_USE_PREFETCH_BUFS} },
+	[IPA_4_2][IPA_CLIENT_USB_PROD]            = {
+			true, IPA_v4_2_GROUP_UL_DL,
+			true,
+			IPA_DPS_HPS_REP_SEQ_TYPE_PKT_PROCESS_NO_DEC_NO_UCP_DMAP,
+			QMB_MASTER_SELECT_DDR,
+			{ 0, 5, 8, 9, IPA_EE_AP, GSI_ESCAPE_BUF_ONLY} },
+	[IPA_4_2][IPA_CLIENT_APPS_LAN_PROD]   = {
+			true, IPA_v4_2_GROUP_UL_DL,
+			false,
+			IPA_DPS_HPS_SEQ_TYPE_PKT_PROCESS_NO_DEC_NO_UCP,
+			QMB_MASTER_SELECT_DDR,
+			{ 2, 6, 8, 9, IPA_EE_AP, GSI_ESCAPE_BUF_ONLY} },
+	[IPA_4_2][IPA_CLIENT_APPS_WAN_PROD] = {
+			true, IPA_v4_2_GROUP_UL_DL,
+			true,
+			IPA_DPS_HPS_REP_SEQ_TYPE_PKT_PROCESS_NO_DEC_NO_UCP_DMAP,
+			QMB_MASTER_SELECT_DDR,
+			{ 1, 0, 8, 12, IPA_EE_AP, GSI_ESCAPE_BUF_ONLY} },
+	[IPA_4_2][IPA_CLIENT_APPS_CMD_PROD]	  = {
+			true, IPA_v4_2_GROUP_UL_DL,
+			false,
+			IPA_DPS_HPS_SEQ_TYPE_DMA_ONLY,
+			QMB_MASTER_SELECT_DDR,
+			{ 6, 1, 20, 20, IPA_EE_AP, GSI_USE_PREFETCH_BUFS} },
+	[IPA_4_2][IPA_CLIENT_Q6_WAN_PROD]         = {
+			true, IPA_v4_2_GROUP_UL_DL,
+			true,
+			IPA_DPS_HPS_SEQ_TYPE_PKT_PROCESS_NO_DEC_NO_UCP,
+			QMB_MASTER_SELECT_DDR,
+			{ 4, 0, 8, 12, IPA_EE_Q6, GSI_USE_PREFETCH_BUFS} },
+	[IPA_4_2][IPA_CLIENT_Q6_CMD_PROD]	  = {
+			true, IPA_v4_2_GROUP_UL_DL,
+			false,
+			IPA_DPS_HPS_SEQ_TYPE_PKT_PROCESS_NO_DEC_NO_UCP,
+			QMB_MASTER_SELECT_DDR,
+			{ 5, 1, 20, 20, IPA_EE_Q6, GSI_USE_PREFETCH_BUFS} },
+	[IPA_4_2][IPA_CLIENT_ETHERNET_PROD] = {
+			true, IPA_v4_2_GROUP_UL_DL,
+			true,
+			IPA_DPS_HPS_SEQ_TYPE_PKT_PROCESS_NO_DEC_NO_UCP,
+			QMB_MASTER_SELECT_DDR,
+			{ 7, 0, 8, 10, IPA_EE_UC, GSI_USE_PREFETCH_BUFS} },
+	/* Only for test purpose */
+	[IPA_4_2][IPA_CLIENT_TEST_PROD]           = {
+			true, IPA_v4_2_GROUP_UL_DL,
+			true,
+			IPA_DPS_HPS_SEQ_TYPE_PKT_PROCESS_NO_DEC_NO_UCP,
+			QMB_MASTER_SELECT_DDR,
+			{0, 5, 8, 9, IPA_EE_AP, GSI_ESCAPE_BUF_ONLY} },
+	[IPA_4_2][IPA_CLIENT_TEST1_PROD]          = {
+			true, IPA_v4_2_GROUP_UL_DL,
+			true,
+			IPA_DPS_HPS_SEQ_TYPE_PKT_PROCESS_NO_DEC_NO_UCP,
+			QMB_MASTER_SELECT_DDR,
+			{ 0, 5, 8, 9, IPA_EE_AP, GSI_ESCAPE_BUF_ONLY} },
+	[IPA_4_2][IPA_CLIENT_TEST2_PROD]          = {
+			true, IPA_v4_2_GROUP_UL_DL,
+			true,
+			IPA_DPS_HPS_SEQ_TYPE_PKT_PROCESS_NO_DEC_NO_UCP,
+			QMB_MASTER_SELECT_DDR,
+			{ 3, 7, 6, 7, IPA_EE_AP, GSI_USE_PREFETCH_BUFS} },
+	[IPA_4_2][IPA_CLIENT_TEST3_PROD]          = {
+			true, IPA_v4_2_GROUP_UL_DL,
+			true,
+			IPA_DPS_HPS_SEQ_TYPE_PKT_PROCESS_NO_DEC_NO_UCP,
+			QMB_MASTER_SELECT_DDR,
+			{1, 0, 8, 12, IPA_EE_AP, GSI_ESCAPE_BUF_ONLY} },
+	[IPA_4_2][IPA_CLIENT_TEST4_PROD]          = {
+			true, IPA_v4_2_GROUP_UL_DL,
+			true,
+			IPA_DPS_HPS_SEQ_TYPE_PKT_PROCESS_NO_DEC_NO_UCP,
+			QMB_MASTER_SELECT_DDR,
+			{ 7, 0, 8, 10, IPA_EE_AP, GSI_USE_PREFETCH_BUFS} },
+
+
+	[IPA_4_2][IPA_CLIENT_WLAN1_CONS]          = {
+			true, IPA_v4_2_GROUP_UL_DL,
+			false,
+			IPA_DPS_HPS_SEQ_TYPE_INVALID,
+			QMB_MASTER_SELECT_DDR,
+			{ 14, 8, 6, 9, IPA_EE_AP, GSI_USE_PREFETCH_BUFS} },
+	[IPA_4_2][IPA_CLIENT_USB_CONS]            = {
+			true, IPA_v4_2_GROUP_UL_DL,
+			false,
+			IPA_DPS_HPS_SEQ_TYPE_INVALID,
+			QMB_MASTER_SELECT_DDR,
+			{ 15, 9, 6, 6, IPA_EE_AP, GSI_ESCAPE_BUF_ONLY} },
+	[IPA_4_2][IPA_CLIENT_USB_DPL_CONS]        = {
+			true, IPA_v4_2_GROUP_UL_DL,
+			false,
+			IPA_DPS_HPS_SEQ_TYPE_INVALID,
+			QMB_MASTER_SELECT_DDR,
+			{ 12, 4, 4, 4, IPA_EE_AP, GSI_ESCAPE_BUF_ONLY} },
+	[IPA_4_2][IPA_CLIENT_APPS_LAN_CONS]       = {
+			true, IPA_v4_2_GROUP_UL_DL,
+			false,
+			IPA_DPS_HPS_SEQ_TYPE_INVALID,
+			QMB_MASTER_SELECT_DDR,
+			{ 8, 2, 6, 6, IPA_EE_AP, GSI_ESCAPE_BUF_ONLY} },
+	[IPA_4_2][IPA_CLIENT_APPS_WAN_CONS]       = {
+			true, IPA_v4_2_GROUP_UL_DL,
+			false,
+			IPA_DPS_HPS_SEQ_TYPE_INVALID,
+			QMB_MASTER_SELECT_DDR,
+			{ 9, 3, 6, 6, IPA_EE_AP, GSI_ESCAPE_BUF_ONLY} },
+	[IPA_4_2][IPA_CLIENT_Q6_LAN_CONS]         = {
+			true, IPA_v4_2_GROUP_UL_DL,
+			false,
+			IPA_DPS_HPS_SEQ_TYPE_INVALID,
+			QMB_MASTER_SELECT_DDR,
+			{ 11, 3, 6, 6, IPA_EE_Q6, GSI_ESCAPE_BUF_ONLY} },
+	[IPA_4_2][IPA_CLIENT_Q6_WAN_CONS]         = {
+			true, IPA_v4_2_GROUP_UL_DL,
+			false,
+			IPA_DPS_HPS_SEQ_TYPE_INVALID,
+			QMB_MASTER_SELECT_DDR,
+			{ 10, 2, 6, 6, IPA_EE_Q6,  GSI_ESCAPE_BUF_ONLY} },
+	[IPA_4_2][IPA_CLIENT_Q6_LTE_WIFI_AGGR_CONS] = {
+			true, IPA_v4_2_GROUP_UL_DL,
+			false,
+			IPA_DPS_HPS_SEQ_TYPE_INVALID,
+			QMB_MASTER_SELECT_DDR,
+			{ 13, 4, 6, 6, IPA_EE_Q6, GSI_ESCAPE_BUF_ONLY} },
+	[IPA_4_2][IPA_CLIENT_ETHERNET_CONS] = {
+			true, IPA_v4_2_GROUP_UL_DL,
+			false,
+			IPA_DPS_HPS_SEQ_TYPE_INVALID,
+			QMB_MASTER_SELECT_DDR,
+			{ 16, 1, 6, 6, IPA_EE_UC, GSI_USE_PREFETCH_BUFS} },
+	/* Only for test purpose */
+	/* MBIM aggregation test pipes should have the same QMB as USB_CONS */
+	[IPA_4_2][IPA_CLIENT_TEST_CONS]           = {
+			true, IPA_v4_2_GROUP_UL_DL,
+			false,
+			IPA_DPS_HPS_SEQ_TYPE_INVALID,
+			QMB_MASTER_SELECT_DDR,
+			{ 15, 9, 6, 6, IPA_EE_AP, GSI_ESCAPE_BUF_ONLY} },
+	[IPA_4_2][IPA_CLIENT_TEST1_CONS]           = {
+			true, IPA_v4_2_GROUP_UL_DL,
+			false,
+			IPA_DPS_HPS_SEQ_TYPE_INVALID,
+			QMB_MASTER_SELECT_DDR,
+			{ 15, 9, 6, 6, IPA_EE_AP, GSI_ESCAPE_BUF_ONLY} },
+	[IPA_4_2][IPA_CLIENT_TEST2_CONS]          = {
+			true, IPA_v4_2_GROUP_UL_DL,
+			false,
+			IPA_DPS_HPS_SEQ_TYPE_INVALID,
+			QMB_MASTER_SELECT_DDR,
+			{ 12, 4, 4, 4, IPA_EE_AP, GSI_ESCAPE_BUF_ONLY} },
+	[IPA_4_2][IPA_CLIENT_TEST3_CONS]          = {
+			true, IPA_v4_2_GROUP_UL_DL,
+			false,
+			IPA_DPS_HPS_SEQ_TYPE_INVALID,
+			QMB_MASTER_SELECT_DDR,
+			{ 14, 8, 6, 9, IPA_EE_AP, GSI_USE_PREFETCH_BUFS} },
+	[IPA_4_2][IPA_CLIENT_TEST4_CONS]          = {
+			true, IPA_v4_2_GROUP_UL_DL,
+			false,
+			IPA_DPS_HPS_SEQ_TYPE_INVALID,
+			QMB_MASTER_SELECT_DDR,
+			{ 9, 3, 6, 6, IPA_EE_AP, GSI_ESCAPE_BUF_ONLY} },
+	/* Dummy consumer (pipe 31) is used in L2TP rt rule */
+	[IPA_4_2][IPA_CLIENT_DUMMY_CONS]          = {
+			true, IPA_v4_2_GROUP_UL_DL,
+			false,
+			IPA_DPS_HPS_SEQ_TYPE_INVALID,
+			QMB_MASTER_SELECT_DDR,
+			{ 31, 31, 8, 8, IPA_EE_AP, GSI_USE_PREFETCH_BUFS} },
+
+	/* IPA_4_5 */
+	[IPA_4_5][IPA_CLIENT_WLAN2_PROD]          = {
+			true, IPA_v4_5_GROUP_UL_DL,
+			true,
+			IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP,
+			QMB_MASTER_SELECT_DDR,
+			{ 9, 12, 8, 16, IPA_EE_AP, GSI_FREE_PRE_FETCH, 2 } },
+	[IPA_4_5][IPA_CLIENT_USB_PROD]            = {
+			true, IPA_v4_5_GROUP_UL_DL,
+			true,
+			IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP,
+			QMB_MASTER_SELECT_DDR,
+			{ 1, 0, 8, 16, IPA_EE_AP, GSI_ESCAPE_BUF_ONLY, 0 } },
+	[IPA_4_5][IPA_CLIENT_APPS_LAN_PROD]	  = {
+			true, IPA_v4_5_GROUP_UL_DL,
+			false,
+			IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP,
+			QMB_MASTER_SELECT_DDR,
+			{ 11, 14, 10, 16, IPA_EE_AP, GSI_SMART_PRE_FETCH, 2 } },
+	[IPA_4_5][IPA_CLIENT_APPS_WAN_PROD]	  = {
+			true, IPA_v4_5_GROUP_UL_DL,
+			true,
+			IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP,
+			QMB_MASTER_SELECT_DDR,
+			{ 2, 7, 16, 32, IPA_EE_AP, GSI_SMART_PRE_FETCH, 7 } },
+	[IPA_4_5][IPA_CLIENT_APPS_CMD_PROD]	  = {
+			true, IPA_v4_5_GROUP_UL_DL,
+			false,
+			IPA_DPS_HPS_SEQ_TYPE_DMA_ONLY,
+			QMB_MASTER_SELECT_DDR,
+			{ 7, 9, 20, 24, IPA_EE_AP, GSI_ESCAPE_BUF_ONLY, 0 } },
+	[IPA_4_5][IPA_CLIENT_ODU_PROD]            = {
+			true, IPA_v4_5_GROUP_UL_DL,
+			true,
+			IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP,
+			QMB_MASTER_SELECT_DDR,
+			{ 3, 5, 8, 16, IPA_EE_AP, GSI_SMART_PRE_FETCH, 3 } },
+	[IPA_4_5][IPA_CLIENT_ETHERNET_PROD]	  = {
+			true, IPA_v4_5_GROUP_UL_DL,
+			true,
+			IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP,
+			QMB_MASTER_SELECT_DDR,
+			{ 12, 0, 8, 16, IPA_EE_UC, GSI_SMART_PRE_FETCH, 3 } },
+	[IPA_4_5][IPA_CLIENT_Q6_WAN_PROD]         = {
+			true, IPA_v4_5_GROUP_UL_DL,
+			true,
+			IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_DEC_UCP,
+			QMB_MASTER_SELECT_DDR,
+			{ 5, 0, 16, 28, IPA_EE_Q6, GSI_SMART_PRE_FETCH, 2 } },
+	[IPA_4_5][IPA_CLIENT_Q6_CMD_PROD]	  = {
+			true, IPA_v4_5_GROUP_UL_DL,
+			false,
+			IPA_DPS_HPS_SEQ_TYPE_PKT_PROCESS_NO_DEC_UCP,
+			QMB_MASTER_SELECT_DDR,
+			{ 6, 1, 20, 24, IPA_EE_Q6, GSI_ESCAPE_BUF_ONLY, 0 } },
+	[IPA_4_5][IPA_CLIENT_Q6_DL_NLO_DATA_PROD] = {
+			true, IPA_v4_5_GROUP_UL_DL,
+			true,
+			IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_DEC_UCP,
+			QMB_MASTER_SELECT_DDR,
+			{ 8, 2, 27, 32, IPA_EE_Q6, GSI_FREE_PRE_FETCH, 3 } },
+	/* Only for test purpose */
+	[IPA_4_5][IPA_CLIENT_TEST_PROD]           = {
+			true, IPA_v4_5_GROUP_UL_DL,
+			true,
+			IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP,
+			QMB_MASTER_SELECT_DDR,
+			{ 1, 0, 8, 16, IPA_EE_AP } },
+	[IPA_4_5][IPA_CLIENT_TEST1_PROD]          = {
+			true, IPA_v4_5_GROUP_UL_DL,
+			true,
+			IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP,
+			QMB_MASTER_SELECT_DDR,
+			{ 1, 0, 8, 16, IPA_EE_AP } },
+	[IPA_4_5][IPA_CLIENT_TEST2_PROD]          = {
+			true, IPA_v4_5_GROUP_UL_DL,
+			true,
+			IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP,
+			QMB_MASTER_SELECT_DDR,
+			{ 3, 5, 8, 16, IPA_EE_AP } },
+	[IPA_4_5][IPA_CLIENT_TEST3_PROD]          = {
+			true, IPA_v4_5_GROUP_UL_DL,
+			true,
+			IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP,
+			QMB_MASTER_SELECT_DDR,
+			{ 9, 12, 8, 16, IPA_EE_AP } },
+	[IPA_4_5][IPA_CLIENT_TEST4_PROD]          = {
+			true, IPA_v4_5_GROUP_UL_DL,
+			true,
+			IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP,
+			QMB_MASTER_SELECT_DDR,
+			{ 11, 14, 8, 16, IPA_EE_AP } },
+
+	[IPA_4_5][IPA_CLIENT_WLAN2_CONS]          = {
+			true, IPA_v4_5_GROUP_UL_DL,
+			false,
+			IPA_DPS_HPS_SEQ_TYPE_INVALID,
+			QMB_MASTER_SELECT_DDR,
+			{ 24, 3, 8, 14, IPA_EE_AP, GSI_SMART_PRE_FETCH, 3 } },
+	[IPA_4_5][IPA_CLIENT_USB_CONS]            = {
+			true, IPA_v4_5_GROUP_UL_DL,
+			false,
+			IPA_DPS_HPS_SEQ_TYPE_INVALID,
+			QMB_MASTER_SELECT_DDR,
+			{ 26, 17, 9, 9, IPA_EE_AP, GSI_ESCAPE_BUF_ONLY, 0 } },
+	[IPA_4_5][IPA_CLIENT_USB_DPL_CONS]        = {
+			true, IPA_v4_5_GROUP_UL_DL,
+			false,
+			IPA_DPS_HPS_SEQ_TYPE_INVALID,
+			QMB_MASTER_SELECT_DDR,
+			{ 15, 15, 5, 5, IPA_EE_AP, GSI_ESCAPE_BUF_ONLY, 0 } },
+	[IPA_4_5][IPA_CLIENT_ODL_DPL_CONS]        = {
+			true, IPA_v4_5_GROUP_UL_DL,
+			false,
+			IPA_DPS_HPS_SEQ_TYPE_INVALID,
+			QMB_MASTER_SELECT_DDR,
+			{ 22, 2, 5, 5, IPA_EE_AP, GSI_ESCAPE_BUF_ONLY, 0 } },
+	[IPA_4_5][IPA_CLIENT_APPS_LAN_CONS]       = {
+			true, IPA_v4_5_GROUP_UL_DL,
+			false,
+			IPA_DPS_HPS_SEQ_TYPE_INVALID,
+			QMB_MASTER_SELECT_DDR,
+			{ 16, 10, 9, 9, IPA_EE_AP, GSI_ESCAPE_BUF_ONLY, 0 } },
+	[IPA_4_5][IPA_CLIENT_APPS_WAN_COAL_CONS]       = {
+			true, IPA_v4_5_GROUP_UL_DL,
+			false,
+			IPA_DPS_HPS_SEQ_TYPE_INVALID,
+			QMB_MASTER_SELECT_DDR,
+			{ 13, 4, 8, 11, IPA_EE_AP, GSI_SMART_PRE_FETCH, 3 } },
+	[IPA_4_5][IPA_CLIENT_APPS_WAN_CONS]       = {
+			true, IPA_v4_5_GROUP_UL_DL,
+			false,
+			IPA_DPS_HPS_SEQ_TYPE_INVALID,
+			QMB_MASTER_SELECT_DDR,
+			{ 14, 1, 9, 9, IPA_EE_AP, GSI_ESCAPE_BUF_ONLY, 0 } },
+	[IPA_4_5][IPA_CLIENT_ODU_EMB_CONS]        = {
+			true, IPA_v4_5_GROUP_UL_DL,
+			false,
+			IPA_DPS_HPS_SEQ_TYPE_INVALID,
+			QMB_MASTER_SELECT_DDR,
+			{ 30, 6, 9, 9, IPA_EE_AP, GSI_SMART_PRE_FETCH, 4 } },
+	[IPA_4_5][IPA_CLIENT_ETHERNET_CONS]	  = {
+			true, IPA_v4_5_GROUP_UL_DL,
+			false,
+			IPA_DPS_HPS_SEQ_TYPE_INVALID,
+			QMB_MASTER_SELECT_DDR,
+			{ 28, 1, 9, 9, IPA_EE_UC, GSI_SMART_PRE_FETCH, 4 } },
+	[IPA_4_5][IPA_CLIENT_Q6_LAN_CONS]         = {
+			true, IPA_v4_5_GROUP_UL_DL,
+			false,
+			IPA_DPS_HPS_SEQ_TYPE_INVALID,
+			QMB_MASTER_SELECT_DDR,
+			{ 17, 3, 9, 9, IPA_EE_Q6, GSI_ESCAPE_BUF_ONLY, 0 } },
+	[IPA_4_5][IPA_CLIENT_Q6_WAN_CONS]         = {
+			true, IPA_v4_5_GROUP_UL_DL,
+			false,
+			IPA_DPS_HPS_SEQ_TYPE_INVALID,
+			QMB_MASTER_SELECT_DDR,
+			{ 21, 7, 9, 9, IPA_EE_Q6, GSI_ESCAPE_BUF_ONLY, 0 } },
+	[IPA_4_5][IPA_CLIENT_Q6_UL_NLO_DATA_CONS] = {
+			true, IPA_v4_5_GROUP_UL_DL,
+			false,
+			IPA_DPS_HPS_SEQ_TYPE_INVALID,
+			QMB_MASTER_SELECT_DDR,
+			{ 19, 5, 5, 5, IPA_EE_Q6, GSI_SMART_PRE_FETCH, 2 } },
+	[IPA_4_5][IPA_CLIENT_Q6_UL_NLO_ACK_CONS]  = {
+			true, IPA_v4_5_GROUP_UL_DL,
+			false,
+			IPA_DPS_HPS_SEQ_TYPE_INVALID,
+			QMB_MASTER_SELECT_DDR,
+			{ 20, 6, 5, 5, IPA_EE_Q6, GSI_SMART_PRE_FETCH, 2 } },
+	[IPA_4_5][IPA_CLIENT_Q6_QBAP_STATUS_CONS] = {
+			true, IPA_v4_5_GROUP_UL_DL,
+			false,
+			IPA_DPS_HPS_SEQ_TYPE_INVALID,
+			QMB_MASTER_SELECT_DDR,
+			{ 18, 4, 9, 9, IPA_EE_Q6, GSI_ESCAPE_BUF_ONLY, 0 } },
+	/* Only for test purpose */
+	/* MBIM aggregation test pipes should have the same QMB as USB_CONS */
+	[IPA_4_5][IPA_CLIENT_TEST_CONS]           = {
+			true, IPA_v4_5_GROUP_UL_DL,
+			false,
+			IPA_DPS_HPS_SEQ_TYPE_INVALID,
+			QMB_MASTER_SELECT_DDR,
+			{ 14, 1, 9, 9, IPA_EE_AP } },
+	[IPA_4_5][IPA_CLIENT_TEST1_CONS]          = {
+			true, IPA_v4_5_GROUP_UL_DL,
+			false,
+			IPA_DPS_HPS_SEQ_TYPE_INVALID,
+			QMB_MASTER_SELECT_DDR,
+			{ 14, 1, 9, 9, IPA_EE_AP } },
+	[IPA_4_5][IPA_CLIENT_TEST2_CONS]          = {
+			true, IPA_v4_5_GROUP_UL_DL,
+			false,
+			IPA_DPS_HPS_SEQ_TYPE_INVALID,
+			QMB_MASTER_SELECT_DDR,
+			{ 24, 3, 8, 14, IPA_EE_AP } },
+	[IPA_4_5][IPA_CLIENT_TEST3_CONS]          = {
+			true, IPA_v4_5_GROUP_UL_DL,
+			false,
+			IPA_DPS_HPS_SEQ_TYPE_INVALID,
+			QMB_MASTER_SELECT_DDR,
+			{ 26, 17, 9, 9, IPA_EE_AP } },
+	[IPA_4_5][IPA_CLIENT_TEST4_CONS]          = {
+			true, IPA_v4_5_GROUP_UL_DL,
+			false,
+			IPA_DPS_HPS_SEQ_TYPE_INVALID,
+			QMB_MASTER_SELECT_DDR,
+			{ 27, 18, 9, 9, IPA_EE_AP } },
+	/* Dummy consumer (pipe 31) is used in L2TP rt rule */
+	[IPA_4_5][IPA_CLIENT_DUMMY_CONS]          = {
+			true, IPA_v4_5_GROUP_UL_DL,
+			false,
+			IPA_DPS_HPS_SEQ_TYPE_INVALID,
+			QMB_MASTER_SELECT_DDR,
+			{ 31, 31, 8, 8, IPA_EE_AP } },
+
+	/* IPA_4_5_MHI */
+	[IPA_4_5_MHI][IPA_CLIENT_APPS_CMD_PROD]		= {
+			true, IPA_v4_5_MHI_GROUP_DDR,
+			false,
+			IPA_DPS_HPS_SEQ_TYPE_DMA_ONLY,
+			QMB_MASTER_SELECT_DDR,
+			{ 7, 9, 20, 24, IPA_EE_AP, GSI_ESCAPE_BUF_ONLY, 0 } },
+	[IPA_4_5_MHI][IPA_CLIENT_Q6_WAN_PROD]		= {
+			true, IPA_v4_5_MHI_GROUP_DDR,
+			true,
+			IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_DEC_UCP,
+			QMB_MASTER_SELECT_DDR,
+			{ 5, 0, 16, 28, IPA_EE_Q6, GSI_SMART_PRE_FETCH, 2 } },
+	[IPA_4_5_MHI][IPA_CLIENT_Q6_CMD_PROD]		= {
+			true, IPA_v4_5_MHI_GROUP_PCIE,
+			false,
+			IPA_DPS_HPS_SEQ_TYPE_PKT_PROCESS_NO_DEC_UCP,
+			QMB_MASTER_SELECT_DDR,
+			{ 6, 1, 20, 24, IPA_EE_Q6, GSI_ESCAPE_BUF_ONLY, 0 } },
+	[IPA_4_5_MHI][IPA_CLIENT_Q6_DL_NLO_DATA_PROD]	= {
+			true, IPA_v4_5_MHI_GROUP_DDR,
+			true,
+			IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_DEC_UCP,
+			QMB_MASTER_SELECT_DDR,
+			{ 8, 2, 27, 32, IPA_EE_Q6, GSI_FREE_PRE_FETCH, 3 } },
+	[IPA_4_5_MHI][IPA_CLIENT_Q6_AUDIO_DMA_MHI_PROD]	= {
+			true, IPA_v4_5_MHI_GROUP_DMA,
+			false,
+			IPA_DPS_HPS_SEQ_TYPE_DMA_ONLY,
+			QMB_MASTER_SELECT_DDR,
+			{ 4, 8, 8, 16, IPA_EE_Q6, GSI_SMART_PRE_FETCH, 3 } },
+	[IPA_4_5_MHI][IPA_CLIENT_MHI_PROD]		= {
+			true, IPA_v4_5_MHI_GROUP_PCIE,
+			true,
+			IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP,
+			QMB_MASTER_SELECT_PCIE,
+			{ 1, 0, 16, 20, IPA_EE_AP, GSI_SMART_PRE_FETCH, 7 } },
+	[IPA_4_5_MHI][IPA_CLIENT_MEMCPY_DMA_SYNC_PROD]	= {
+			true, IPA_v4_5_MHI_GROUP_DMA,
+			false,
+			IPA_DPS_HPS_SEQ_TYPE_DMA_ONLY,
+			QMB_MASTER_SELECT_DDR,
+			{ 9, 12, 8, 16, IPA_EE_AP, GSI_ESCAPE_BUF_ONLY, 0 } },
+	[IPA_4_5_MHI][IPA_CLIENT_MEMCPY_DMA_ASYNC_PROD]	= {
+			true, IPA_v4_5_MHI_GROUP_DMA,
+			false,
+			IPA_DPS_HPS_SEQ_TYPE_DMA_ONLY,
+			QMB_MASTER_SELECT_DDR,
+			{ 10, 13, 8, 16, IPA_EE_AP, GSI_ESCAPE_BUF_ONLY, 0 } },
+	/* Only for test purpose */
+	[IPA_4_5_MHI][IPA_CLIENT_TEST_PROD]           = {
+			true, QMB_MASTER_SELECT_DDR,
+			true,
+			IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP,
+			QMB_MASTER_SELECT_DDR,
+			{ 1, 0, 8, 16, IPA_EE_AP } },
+
+	[IPA_4_5_MHI][IPA_CLIENT_APPS_LAN_CONS]		= {
+			true, IPA_v4_5_MHI_GROUP_DDR,
+			false,
+			IPA_DPS_HPS_SEQ_TYPE_INVALID,
+			QMB_MASTER_SELECT_DDR,
+			{ 16, 10, 9, 9, IPA_EE_AP, GSI_ESCAPE_BUF_ONLY, 0 } },
+	[IPA_4_5_MHI][IPA_CLIENT_USB_DPL_CONS]        = {
+			true, IPA_v4_5_MHI_GROUP_DDR,
+			false,
+			IPA_DPS_HPS_SEQ_TYPE_INVALID,
+			QMB_MASTER_SELECT_DDR,
+			{ 15, 15, 5, 5, IPA_EE_AP, GSI_ESCAPE_BUF_ONLY, 0 } },
+	[IPA_4_5_MHI][IPA_CLIENT_Q6_LAN_CONS]		= {
+			true, IPA_v4_5_MHI_GROUP_DDR,
+			false,
+			IPA_DPS_HPS_SEQ_TYPE_INVALID,
+			QMB_MASTER_SELECT_DDR,
+			{ 17, 3, 9, 9, IPA_EE_Q6, GSI_ESCAPE_BUF_ONLY, 0 } },
+	[IPA_4_5_MHI][IPA_CLIENT_Q6_WAN_CONS]		= {
+			true, IPA_v4_5_MHI_GROUP_DDR,
+			false,
+			IPA_DPS_HPS_SEQ_TYPE_INVALID,
+			QMB_MASTER_SELECT_DDR,
+			{ 21, 7, 9, 9, IPA_EE_Q6, GSI_ESCAPE_BUF_ONLY, 0 } },
+	[IPA_4_5_MHI][IPA_CLIENT_Q6_UL_NLO_DATA_CONS]	= {
+			true, IPA_v4_5_MHI_GROUP_DDR,
+			false,
+			IPA_DPS_HPS_SEQ_TYPE_INVALID,
+			QMB_MASTER_SELECT_DDR,
+			{ 19, 5, 5, 5, IPA_EE_Q6, GSI_SMART_PRE_FETCH, 2 } },
+	[IPA_4_5_MHI][IPA_CLIENT_Q6_UL_NLO_ACK_CONS]	= {
+			true, IPA_v4_5_MHI_GROUP_DDR,
+			false,
+			IPA_DPS_HPS_SEQ_TYPE_INVALID,
+			QMB_MASTER_SELECT_DDR,
+			{ 20, 6, 5, 5, IPA_EE_Q6, GSI_SMART_PRE_FETCH, 2 } },
+	[IPA_4_5_MHI][IPA_CLIENT_Q6_QBAP_STATUS_CONS]	= {
+			true, IPA_v4_5_MHI_GROUP_DDR,
+			false,
+			IPA_DPS_HPS_SEQ_TYPE_INVALID,
+			QMB_MASTER_SELECT_DDR,
+			{ 18, 4, 9, 9, IPA_EE_Q6, GSI_ESCAPE_BUF_ONLY, 0 } },
+	[IPA_4_5_MHI][IPA_CLIENT_Q6_AUDIO_DMA_MHI_CONS]	= {
+			true, IPA_v4_5_MHI_GROUP_DMA,
+			false,
+			IPA_DPS_HPS_SEQ_TYPE_INVALID,
+			QMB_MASTER_SELECT_PCIE,
+			{ 29, 9, 9, 9, IPA_EE_Q6, GSI_SMART_PRE_FETCH, 4 } },
+	[IPA_4_5_MHI][IPA_CLIENT_MEMCPY_DMA_SYNC_CONS]	= {
+			true, IPA_v4_5_MHI_GROUP_DMA,
+			false,
+			IPA_DPS_HPS_SEQ_TYPE_INVALID,
+			QMB_MASTER_SELECT_PCIE,
+			{ 26, 17, 9, 9, IPA_EE_AP, GSI_ESCAPE_BUF_ONLY, 0 } },
+	[IPA_4_5_MHI][IPA_CLIENT_MEMCPY_DMA_ASYNC_CONS]	= {
+			true, IPA_v4_5_MHI_GROUP_DMA,
+			false,
+			IPA_DPS_HPS_SEQ_TYPE_INVALID,
+			QMB_MASTER_SELECT_PCIE,
+			{ 27, 18, 9, 9, IPA_EE_AP, GSI_ESCAPE_BUF_ONLY, 0 } },
+	[IPA_4_5_MHI][IPA_CLIENT_MHI_CONS]		= {
+			true, IPA_v4_5_MHI_GROUP_PCIE,
+			false,
+			IPA_DPS_HPS_SEQ_TYPE_INVALID,
+			QMB_MASTER_SELECT_PCIE,
+			{ 14, 1, 9, 9, IPA_EE_AP, GSI_SMART_PRE_FETCH, 4 } },
+	[IPA_4_5_MHI][IPA_CLIENT_MHI_DPL_CONS]		= {
+			true, IPA_v4_5_MHI_GROUP_PCIE,
+			false,
+			IPA_DPS_HPS_SEQ_TYPE_INVALID,
+			QMB_MASTER_SELECT_PCIE,
+			{ 22, 2, 5, 5, IPA_EE_AP, GSI_ESCAPE_BUF_ONLY, 0 } },
+
+	/* Dummy consumer (pipe 31) is used in L2TP rt rule */
+	[IPA_4_5_MHI][IPA_CLIENT_DUMMY_CONS]          = {
+			true, QMB_MASTER_SELECT_DDR,
+			false,
+			IPA_DPS_HPS_SEQ_TYPE_INVALID,
+			QMB_MASTER_SELECT_DDR,
+			{ 31, 31, 8, 8, IPA_EE_AP } },
+
+	/* IPA_4_5 APQ */
+	[IPA_4_5_APQ][IPA_CLIENT_WLAN2_PROD]          = {
+			true, IPA_v4_5_GROUP_UL_DL,
+			true,
+			IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP,
+			QMB_MASTER_SELECT_DDR,
+			{ 9, 3, 8, 16, IPA_EE_AP, GSI_FREE_PRE_FETCH, 2 } },
+	[IPA_4_5_APQ][IPA_CLIENT_WIGIG_PROD]          = {
+			true, IPA_v4_5_GROUP_UL_DL,
+			true,
+			IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP,
+			QMB_MASTER_SELECT_DDR,
+			{ 1, 1, 8, 16, IPA_EE_AP, GSI_SMART_PRE_FETCH, 3 } },
+	[IPA_4_5_APQ][IPA_CLIENT_USB_PROD]            = {
+			true, IPA_v4_5_GROUP_UL_DL,
+			true,
+			IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP,
+			QMB_MASTER_SELECT_DDR,
+			{ 0, 0, 8, 16, IPA_EE_AP, GSI_ESCAPE_BUF_ONLY, 0 } },
+	[IPA_4_5_APQ][IPA_CLIENT_APPS_LAN_PROD]	  = {
+			true, IPA_v4_5_GROUP_UL_DL,
+			false,
+			IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP,
+			QMB_MASTER_SELECT_DDR,
+			{ 11, 4, 8, 16, IPA_EE_AP, GSI_ESCAPE_BUF_ONLY, 0 } },
+	[IPA_4_5_APQ][IPA_CLIENT_APPS_CMD_PROD]	  = {
+			true, IPA_v4_5_GROUP_UL_DL,
+			false,
+			IPA_DPS_HPS_SEQ_TYPE_DMA_ONLY,
+			QMB_MASTER_SELECT_DDR,
+			{ 7, 12, 20, 24, IPA_EE_AP, GSI_ESCAPE_BUF_ONLY, 0 } },
+	/* MHI PRIME PIPES - Client producer / IPA Consumer pipes */
+	[IPA_4_5_APQ][IPA_CLIENT_MHI_PRIME_DPL_PROD] = {
+			true, IPA_v4_5_GROUP_UL_DL,
+			true,
+			IPA_DPS_HPS_SEQ_TYPE_DMA_ONLY,
+			QMB_MASTER_SELECT_DDR,
+			{3, 2, 8, 16, IPA_EE_AP, GSI_ESCAPE_BUF_ONLY, 0 } },
+	[IPA_4_5_APQ][IPA_CLIENT_MHI_PRIME_TETH_PROD] = {
+			true, IPA_v4_5_GROUP_UL_DL,
+			true,
+			IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP,
+			QMB_MASTER_SELECT_DDR,
+			{ 2, 7, 8, 16, IPA_EE_AP, GSI_SMART_PRE_FETCH, 3 } },
+	[IPA_4_5_APQ][IPA_CLIENT_MHI_PRIME_RMNET_PROD] = {
+			true, IPA_v4_5_GROUP_UL_DL,
+			true,
+			IPA_DPS_HPS_SEQ_TYPE_DMA_ONLY,
+			QMB_MASTER_SELECT_DDR,
+			{ 4, 11, 16, 32, IPA_EE_AP, GSI_SMART_PRE_FETCH, 7 } },
+	/* Only for test purpose */
+	[IPA_4_5_APQ][IPA_CLIENT_TEST_PROD]           = {
+			true, IPA_v4_5_GROUP_UL_DL,
+			true,
+			IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP,
+			QMB_MASTER_SELECT_DDR,
+			{ 0, 0, 8, 16, IPA_EE_AP } },
+	[IPA_4_5_APQ][IPA_CLIENT_TEST1_PROD]          = {
+			true, IPA_v4_5_GROUP_UL_DL,
+			true,
+			IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP,
+			QMB_MASTER_SELECT_DDR,
+			{ 0, 0, 8, 16, IPA_EE_AP } },
+	[IPA_4_5_APQ][IPA_CLIENT_TEST2_PROD]          = {
+			true, IPA_v4_5_GROUP_UL_DL,
+			true,
+			IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP,
+			QMB_MASTER_SELECT_DDR,
+			{ 1, 1, 8, 16, IPA_EE_AP } },
+	[IPA_4_5_APQ][IPA_CLIENT_TEST3_PROD]          = {
+			true, IPA_v4_5_GROUP_UL_DL,
+			true,
+			IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP,
+			QMB_MASTER_SELECT_DDR,
+			{ 9, 3, 8, 16, IPA_EE_AP } },
+	[IPA_4_5_APQ][IPA_CLIENT_TEST4_PROD]          = {
+			true, IPA_v4_5_GROUP_UL_DL,
+			true,
+			IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP,
+			QMB_MASTER_SELECT_DDR,
+			{ 10, 10, 8, 16, IPA_EE_AP } },
+
+	[IPA_4_5_APQ][IPA_CLIENT_WLAN2_CONS]          = {
+			true, IPA_v4_5_GROUP_UL_DL,
+			false,
+			IPA_DPS_HPS_SEQ_TYPE_INVALID,
+			QMB_MASTER_SELECT_DDR,
+			{ 23, 8, 8, 14, IPA_EE_AP, GSI_SMART_PRE_FETCH, 3 } },
+	[IPA_4_5_APQ][IPA_CLIENT_WIGIG1_CONS]          = {
+			true, IPA_v4_5_GROUP_UL_DL,
+			false,
+			IPA_DPS_HPS_SEQ_TYPE_INVALID,
+			QMB_MASTER_SELECT_DDR,
+			{ 14, 14, 8, 14, IPA_EE_AP, GSI_SMART_PRE_FETCH, 3 } },
+	[IPA_4_5_APQ][IPA_CLIENT_WIGIG2_CONS]          = {
+			true, IPA_v4_5_GROUP_UL_DL,
+			false,
+			IPA_DPS_HPS_SEQ_TYPE_INVALID,
+			QMB_MASTER_SELECT_DDR,
+			{ 20, 18, 8, 14, IPA_EE_AP, GSI_SMART_PRE_FETCH, 3 } },
+	[IPA_4_5_APQ][IPA_CLIENT_WIGIG3_CONS]          = {
+			true, IPA_v4_5_GROUP_UL_DL,
+			false,
+			IPA_DPS_HPS_SEQ_TYPE_INVALID,
+			QMB_MASTER_SELECT_DDR,
+			{ 22, 5, 8, 14, IPA_EE_AP, GSI_SMART_PRE_FETCH, 3 } },
+	[IPA_4_5_APQ][IPA_CLIENT_WIGIG4_CONS]          = {
+			true, IPA_v4_5_GROUP_UL_DL,
+			false,
+			IPA_DPS_HPS_SEQ_TYPE_INVALID,
+			QMB_MASTER_SELECT_DDR,
+			{ 29, 10, 8, 14, IPA_EE_AP, GSI_SMART_PRE_FETCH, 3 } },
+	[IPA_4_5_APQ][IPA_CLIENT_USB_CONS]            = {
+			true, IPA_v4_5_GROUP_UL_DL,
+			false,
+			IPA_DPS_HPS_SEQ_TYPE_INVALID,
+			QMB_MASTER_SELECT_DDR,
+			{ 24, 9, 9, 9, IPA_EE_AP, GSI_ESCAPE_BUF_ONLY, 0 } },
+	[IPA_4_5_APQ][IPA_CLIENT_USB_DPL_CONS]        = {
+			true, IPA_v4_5_GROUP_UL_DL,
+			false,
+			IPA_DPS_HPS_SEQ_TYPE_INVALID,
+			QMB_MASTER_SELECT_DDR,
+			{ 16, 16, 5, 5, IPA_EE_AP, GSI_ESCAPE_BUF_ONLY, 0 } },
+	[IPA_4_5_APQ][IPA_CLIENT_APPS_LAN_CONS]       = {
+			true, IPA_v4_5_GROUP_UL_DL,
+			false,
+			IPA_DPS_HPS_SEQ_TYPE_INVALID,
+			QMB_MASTER_SELECT_DDR,
+			{ 13, 13, 9, 9, IPA_EE_AP, GSI_ESCAPE_BUF_ONLY, 0 } },
+	[IPA_4_5_APQ][IPA_CLIENT_ODL_DPL_CONS]       = {
+			true, IPA_v4_5_GROUP_UL_DL,
+			false,
+			IPA_DPS_HPS_SEQ_TYPE_INVALID,
+			QMB_MASTER_SELECT_DDR,
+			{ 21, 19, 9, 9, IPA_EE_AP, GSI_ESCAPE_BUF_ONLY, 0 } },
+	/* MHI PRIME PIPES - Client Consumer / IPA Producer pipes */
+	[IPA_4_5_APQ][IPA_CLIENT_MHI_PRIME_TETH_CONS] = {
+			true, IPA_v4_5_GROUP_UL_DL,
+			false,
+			IPA_DPS_HPS_SEQ_TYPE_INVALID,
+			QMB_MASTER_SELECT_DDR,
+			{ 28, 6, 8, 9, IPA_EE_AP, GSI_SMART_PRE_FETCH, 3 } },
+	[IPA_4_5_APQ][IPA_CLIENT_MHI_PRIME_RMNET_CONS] = {
+			true, IPA_v4_5_GROUP_UL_DL,
+			false,
+			IPA_DPS_HPS_SEQ_TYPE_INVALID,
+			QMB_MASTER_SELECT_DDR,
+			{ 17, 17, 8, 9, IPA_EE_AP, GSI_SMART_PRE_FETCH, 4 } },
+	/* Only for test purpose */
+	/* MBIM aggregation test pipes should have the same QMB as USB_CONS */
+	[IPA_4_5_APQ][IPA_CLIENT_TEST_CONS]           = {
+			true, IPA_v4_5_GROUP_UL_DL,
+			false,
+			IPA_DPS_HPS_SEQ_TYPE_INVALID,
+			QMB_MASTER_SELECT_DDR,
+			{ 16, 16, 5, 5, IPA_EE_AP } },
+	[IPA_4_5_APQ][IPA_CLIENT_TEST1_CONS]          = {
+			true, IPA_v4_5_GROUP_UL_DL,
+			false,
+			IPA_DPS_HPS_SEQ_TYPE_INVALID,
+			QMB_MASTER_SELECT_DDR,
+			{ 16, 16, 5, 5, IPA_EE_AP } },
+	[IPA_4_5_APQ][IPA_CLIENT_TEST2_CONS]          = {
+			true, IPA_v4_5_GROUP_UL_DL,
+			false,
+			IPA_DPS_HPS_SEQ_TYPE_INVALID,
+			QMB_MASTER_SELECT_DDR,
+			{ 22, 5, 9, 9, IPA_EE_AP } },
+	[IPA_4_5_APQ][IPA_CLIENT_TEST3_CONS]          = {
+			true, IPA_v4_5_GROUP_UL_DL,
+			false,
+			IPA_DPS_HPS_SEQ_TYPE_INVALID,
+			QMB_MASTER_SELECT_DDR,
+			{ 24, 9, 9, 9, IPA_EE_AP } },
+	[IPA_4_5_APQ][IPA_CLIENT_TEST4_CONS]          = {
+			true, IPA_v4_5_GROUP_UL_DL,
+			false,
+			IPA_DPS_HPS_SEQ_TYPE_INVALID,
+			QMB_MASTER_SELECT_DDR,
+			{ 23, 8, 8, 13, IPA_EE_AP } },
+	/* Dummy consumer (pipe 31) is used in L2TP rt rule */
+	[IPA_4_5_APQ][IPA_CLIENT_DUMMY_CONS]          = {
+			true, IPA_v4_5_GROUP_UL_DL,
+			false,
+			IPA_DPS_HPS_SEQ_TYPE_INVALID,
+			QMB_MASTER_SELECT_DDR,
+			{ 31, 31, 8, 8, IPA_EE_AP } },
+
+	/* IPA_4_7 */
+	[IPA_4_7][IPA_CLIENT_WLAN1_PROD]          = {
+			true, IPA_v4_7_GROUP_UL_DL,
+			true,
+			IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP,
+			QMB_MASTER_SELECT_DDR,
+			{ 3, 3, 8, 16, IPA_EE_AP, GSI_SMART_PRE_FETCH, 3 } },
+	[IPA_4_7][IPA_CLIENT_USB_PROD]            = {
+			true, IPA_v4_7_GROUP_UL_DL,
+			true,
+			IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP,
+			QMB_MASTER_SELECT_DDR,
+			{ 0, 0, 8, 16, IPA_EE_AP, GSI_ESCAPE_BUF_ONLY, 0 } },
+	[IPA_4_7][IPA_CLIENT_APPS_LAN_PROD]	  = {
+			true, IPA_v4_7_GROUP_UL_DL,
+			false,
+			IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP,
+			QMB_MASTER_SELECT_DDR,
+			{ 4, 4, 8, 16, IPA_EE_AP, GSI_SMART_PRE_FETCH, 3 } },
+	[IPA_4_7][IPA_CLIENT_APPS_WAN_PROD]	  = {
+			true, IPA_v4_7_GROUP_UL_DL,
+			true,
+			IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP,
+			QMB_MASTER_SELECT_DDR,
+			{ 2, 2, 16, 32, IPA_EE_AP, GSI_SMART_PRE_FETCH, 7 } },
+	[IPA_4_7][IPA_CLIENT_APPS_CMD_PROD]	  = {
+			true, IPA_v4_7_GROUP_UL_DL,
+			false,
+			IPA_DPS_HPS_SEQ_TYPE_DMA_ONLY,
+			QMB_MASTER_SELECT_DDR,
+			{ 7, 5, 20, 24, IPA_EE_AP, GSI_ESCAPE_BUF_ONLY, 8 } },
+	[IPA_4_7][IPA_CLIENT_Q6_WAN_PROD]         = {
+			true, IPA_v4_7_GROUP_UL_DL,
+			true,
+			IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_DEC_UCP,
+			QMB_MASTER_SELECT_DDR,
+			{ 5, 0, 16, 28, IPA_EE_Q6, GSI_SMART_PRE_FETCH, 2 } },
+	[IPA_4_7][IPA_CLIENT_Q6_CMD_PROD]	  = {
+			true, IPA_v4_7_GROUP_UL_DL,
+			false,
+			IPA_DPS_HPS_SEQ_TYPE_PKT_PROCESS_NO_DEC_UCP,
+			QMB_MASTER_SELECT_DDR,
+			{ 6, 1, 20, 24, IPA_EE_Q6, GSI_ESCAPE_BUF_ONLY, 8 } },
+	[IPA_4_7][IPA_CLIENT_Q6_DL_NLO_DATA_PROD] = {
+			true, IPA_v4_7_GROUP_UL_DL,
+			true,
+			IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_DEC_UCP,
+			QMB_MASTER_SELECT_DDR,
+			{ 8, 2, 27, 32, IPA_EE_Q6, GSI_FREE_PRE_FETCH, 3 } },
+	/* Only for test purpose */
+	[IPA_4_7][IPA_CLIENT_TEST_PROD]           = {
+			true, IPA_v4_7_GROUP_UL_DL,
+			true,
+			IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP,
+			QMB_MASTER_SELECT_DDR,
+			{ 0, 0, 8, 16, IPA_EE_AP } },
+	[IPA_4_7][IPA_CLIENT_TEST1_PROD]          = {
+			true, IPA_v4_7_GROUP_UL_DL,
+			true,
+			IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP,
+			QMB_MASTER_SELECT_DDR,
+			{ 0, 0, 8, 16, IPA_EE_AP } },
+	[IPA_4_7][IPA_CLIENT_TEST2_PROD]          = {
+			true, IPA_v4_7_GROUP_UL_DL,
+			true,
+			IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP,
+			QMB_MASTER_SELECT_DDR,
+			{ 1, 1, 8, 16, IPA_EE_AP } },
+	[IPA_4_7][IPA_CLIENT_TEST3_PROD]          = {
+			true, IPA_v4_7_GROUP_UL_DL,
+			true,
+			IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP,
+			QMB_MASTER_SELECT_DDR,
+			{ 2, 2, 16, 32, IPA_EE_AP } },
+	[IPA_4_7][IPA_CLIENT_TEST4_PROD]          = {
+			true, IPA_v4_7_GROUP_UL_DL,
+			true,
+			IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP,
+			QMB_MASTER_SELECT_DDR,
+			{ 1, 1, 8, 16, IPA_EE_AP } },
+
+	[IPA_4_7][IPA_CLIENT_WLAN1_CONS]          = {
+			true, IPA_v4_7_GROUP_UL_DL,
+			false,
+			IPA_DPS_HPS_SEQ_TYPE_INVALID,
+			QMB_MASTER_SELECT_DDR,
+			{ 18, 9, 8, 13, IPA_EE_AP, GSI_SMART_PRE_FETCH, 3 } },
+	[IPA_4_7][IPA_CLIENT_USB_CONS]            = {
+			true, IPA_v4_7_GROUP_UL_DL,
+			false,
+			IPA_DPS_HPS_SEQ_TYPE_INVALID,
+			QMB_MASTER_SELECT_DDR,
+			{ 19, 10, 9, 9, IPA_EE_AP, GSI_ESCAPE_BUF_ONLY, 0 } },
+	[IPA_4_7][IPA_CLIENT_USB_DPL_CONS]        = {
+			true, IPA_v4_7_GROUP_UL_DL,
+			false,
+			IPA_DPS_HPS_SEQ_TYPE_INVALID,
+			QMB_MASTER_SELECT_DDR,
+			{ 17, 8, 5, 5, IPA_EE_AP, GSI_ESCAPE_BUF_ONLY, 0 } },
+	[IPA_4_7][IPA_CLIENT_ODL_DPL_CONS]        = {
+			true, IPA_v4_7_GROUP_UL_DL,
+			false,
+			IPA_DPS_HPS_SEQ_TYPE_INVALID,
+			QMB_MASTER_SELECT_DDR,
+			{ 22, 13, 9, 9, IPA_EE_AP, GSI_ESCAPE_BUF_ONLY, 0 } },
+	[IPA_4_7][IPA_CLIENT_APPS_LAN_CONS]       = {
+			true, IPA_v4_7_GROUP_UL_DL,
+			false,
+			IPA_DPS_HPS_SEQ_TYPE_INVALID,
+			QMB_MASTER_SELECT_DDR,
+			{ 9, 14, 9, 9, IPA_EE_AP, GSI_ESCAPE_BUF_ONLY, 0 } },
+	[IPA_4_7][IPA_CLIENT_APPS_WAN_CONS]       = {
+			true, IPA_v4_7_GROUP_UL_DL,
+			false,
+			IPA_DPS_HPS_SEQ_TYPE_INVALID,
+			QMB_MASTER_SELECT_DDR,
+			{ 16, 7, 9, 9, IPA_EE_AP, GSI_ESCAPE_BUF_ONLY, 0 } },
+	[IPA_4_7][IPA_CLIENT_APPS_WAN_COAL_CONS]       = {
+			true, IPA_v4_7_GROUP_UL_DL,
+			false,
+			IPA_DPS_HPS_SEQ_TYPE_INVALID,
+			QMB_MASTER_SELECT_DDR,
+			{ 15, 6, 8, 11, IPA_EE_AP, GSI_SMART_PRE_FETCH, 3 } },
+	[IPA_4_7][IPA_CLIENT_Q6_LAN_CONS]         = {
+			true, IPA_v4_7_GROUP_UL_DL,
+			false,
+			IPA_DPS_HPS_SEQ_TYPE_INVALID,
+			QMB_MASTER_SELECT_DDR,
+			{ 10, 3, 9, 9, IPA_EE_Q6, GSI_ESCAPE_BUF_ONLY, 0 } },
+	[IPA_4_7][IPA_CLIENT_Q6_WAN_CONS]         = {
+			true, IPA_v4_7_GROUP_UL_DL,
+			false,
+			IPA_DPS_HPS_SEQ_TYPE_INVALID,
+			QMB_MASTER_SELECT_DDR,
+			{ 14, 7, 9, 9, IPA_EE_Q6, GSI_ESCAPE_BUF_ONLY, 0 } },
+	[IPA_4_7][IPA_CLIENT_Q6_UL_NLO_DATA_CONS] = {
+			true, IPA_v4_7_GROUP_UL_DL,
+			false,
+			IPA_DPS_HPS_SEQ_TYPE_INVALID,
+			QMB_MASTER_SELECT_DDR,
+			{ 12, 5, 5, 5, IPA_EE_Q6, GSI_SMART_PRE_FETCH, 2 } },
+	[IPA_4_7][IPA_CLIENT_Q6_UL_NLO_ACK_CONS]  = {
+			true, IPA_v4_7_GROUP_UL_DL,
+			false,
+			IPA_DPS_HPS_SEQ_TYPE_INVALID,
+			QMB_MASTER_SELECT_DDR,
+			{ 13, 6, 5, 5, IPA_EE_Q6, GSI_SMART_PRE_FETCH, 2 } },
+	[IPA_4_7][IPA_CLIENT_Q6_QBAP_STATUS_CONS] = {
+			true, IPA_v4_7_GROUP_UL_DL,
+			false,
+			IPA_DPS_HPS_SEQ_TYPE_INVALID,
+			QMB_MASTER_SELECT_DDR,
+			{ 11, 4, 9, 9, IPA_EE_Q6, GSI_ESCAPE_BUF_ONLY, 0 } },
+	/* Only for test purpose */
+	/* MBIM aggregation test pipes should have the same QMB as USB_CONS */
+	[IPA_4_7][IPA_CLIENT_TEST_CONS]           = {
+			true, IPA_v4_7_GROUP_UL_DL,
+			false,
+			IPA_DPS_HPS_SEQ_TYPE_INVALID,
+			QMB_MASTER_SELECT_DDR,
+			{ 16, 7, 9, 9, IPA_EE_AP } },
+	[IPA_4_7][IPA_CLIENT_TEST1_CONS]          = {
+			true, IPA_v4_7_GROUP_UL_DL,
+			false,
+			IPA_DPS_HPS_SEQ_TYPE_INVALID,
+			QMB_MASTER_SELECT_DDR,
+			{ 16, 7, 9, 9, IPA_EE_AP } },
+	[IPA_4_7][IPA_CLIENT_TEST2_CONS]          = {
+			true, IPA_v4_7_GROUP_UL_DL,
+			false,
+			IPA_DPS_HPS_SEQ_TYPE_INVALID,
+			QMB_MASTER_SELECT_DDR,
+			{ 21, 12, 9, 9, IPA_EE_AP } },
+	[IPA_4_7][IPA_CLIENT_TEST3_CONS]          = {
+			true, IPA_v4_7_GROUP_UL_DL,
+			false,
+			IPA_DPS_HPS_SEQ_TYPE_INVALID,
+			QMB_MASTER_SELECT_DDR,
+			{ 19, 10, 9, 9, IPA_EE_AP } },
+	[IPA_4_7][IPA_CLIENT_TEST4_CONS]          = {
+			true, IPA_v4_7_GROUP_UL_DL,
+			false,
+			IPA_DPS_HPS_SEQ_TYPE_INVALID,
+			QMB_MASTER_SELECT_DDR,
+			{ 20, 11, 9, 9, IPA_EE_AP } },
+	/* Dummy consumer (pipe 31) is used in L2TP rt rule */
+	[IPA_4_7][IPA_CLIENT_DUMMY_CONS]          = {
+			true, IPA_v4_7_GROUP_UL_DL,
+			false,
+			IPA_DPS_HPS_SEQ_TYPE_INVALID,
+			QMB_MASTER_SELECT_DDR,
+			{ 31, 31, 8, 8, IPA_EE_AP } },
+
+	/* IPA_4_9 */
+	[IPA_4_9][IPA_CLIENT_USB_PROD]          = {
+			true, IPA_v4_9_GROUP_UL_DL,
+			true,
+			IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP,
+			QMB_MASTER_SELECT_DDR,
+			{ 0, 0, 8, 16, IPA_EE_AP, GSI_ESCAPE_BUF_ONLY, 0 } },
+	[IPA_4_9][IPA_CLIENT_APPS_WAN_PROD]	  = {
+			true, IPA_v4_9_GROUP_UL_DL,
+			true,
+			IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP,
+			QMB_MASTER_SELECT_DDR,
+			{ 2, 2, 16, 32, IPA_EE_AP, GSI_SMART_PRE_FETCH, 8 } },
+	[IPA_4_9][IPA_CLIENT_WLAN1_PROD]          = {
+			true, IPA_v4_9_GROUP_UL_DL,
+			true,
+			IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP,
+			QMB_MASTER_SELECT_DDR,
+			{ 3, 3, 8, 16, IPA_EE_AP, GSI_FREE_PRE_FETCH, 2 } },
+	[IPA_4_9][IPA_CLIENT_APPS_LAN_PROD]	  = {
+			true, IPA_v4_9_GROUP_UL_DL,
+			false,
+			IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP,
+			QMB_MASTER_SELECT_DDR,
+			{ 4, 4, 10, 16, IPA_EE_AP, GSI_SMART_PRE_FETCH, 2 } },
+	[IPA_4_9][IPA_CLIENT_WIGIG_PROD]          = {
+			true, IPA_v4_9_GROUP_UL_DL,
+			true,
+			IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP,
+			QMB_MASTER_SELECT_DDR,
+			{ 9, 5, 8, 16, IPA_EE_AP, GSI_SMART_PRE_FETCH, 3 } },
+	[IPA_4_9][IPA_CLIENT_APPS_CMD_PROD]	  = {
+			true, IPA_v4_9_GROUP_UL_DL,
+			false,
+			IPA_DPS_HPS_SEQ_TYPE_DMA_ONLY,
+			QMB_MASTER_SELECT_DDR,
+			{ 7, 6, 20, 24, IPA_EE_AP, GSI_ESCAPE_BUF_ONLY, 0 } },
+	[IPA_4_9][IPA_CLIENT_Q6_WAN_PROD]         = {
+			true, IPA_v4_9_GROUP_UL_DL,
+			true,
+			IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_DEC_UCP,
+			QMB_MASTER_SELECT_DDR,
+			{ 5, 0, 16, 28, IPA_EE_Q6, GSI_SMART_PRE_FETCH, 2 } },
+	[IPA_4_9][IPA_CLIENT_Q6_CMD_PROD]	  = {
+			true, IPA_v4_9_GROUP_UL_DL,
+			false,
+			IPA_DPS_HPS_SEQ_TYPE_PKT_PROCESS_NO_DEC_UCP,
+			QMB_MASTER_SELECT_DDR,
+			{ 6, 1, 20, 24, IPA_EE_Q6, GSI_ESCAPE_BUF_ONLY, 0 } },
+	[IPA_4_9][IPA_CLIENT_Q6_DL_NLO_DATA_PROD] = {
+			true, IPA_v4_9_GROUP_UL_DL,
+			true,
+			IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_DEC_UCP,
+			QMB_MASTER_SELECT_DDR,
+			{ 8, 2, 27, 32, IPA_EE_Q6, GSI_FREE_PRE_FETCH, 3 } },
+
+
+	[IPA_4_9][IPA_CLIENT_APPS_WAN_COAL_CONS]       = {
+			true, IPA_v4_9_GROUP_UL_DL,
+			false,
+			IPA_DPS_HPS_SEQ_TYPE_INVALID,
+			QMB_MASTER_SELECT_DDR,
+			{ 19, 11, 8, 11, IPA_EE_AP, GSI_SMART_PRE_FETCH, 3 } },
+	[IPA_4_9][IPA_CLIENT_APPS_WAN_CONS]       = {
+			true, IPA_v4_9_GROUP_UL_DL,
+			false,
+			IPA_DPS_HPS_SEQ_TYPE_INVALID,
+			QMB_MASTER_SELECT_DDR,
+			{ 20, 12, 9, 9, IPA_EE_AP, GSI_ESCAPE_BUF_ONLY, 0 } },
+	[IPA_4_9][IPA_CLIENT_USB_DPL_CONS]            = {
+			true, IPA_v4_9_GROUP_UL_DL,
+			false,
+			IPA_DPS_HPS_SEQ_TYPE_INVALID,
+			QMB_MASTER_SELECT_DDR,
+			{ 21, 13, 5, 5, IPA_EE_AP, GSI_ESCAPE_BUF_ONLY, 0 } },
+	[IPA_4_9][IPA_CLIENT_ODL_DPL_CONS]        = {
+			true, IPA_v4_9_GROUP_UL_DL,
+			false,
+			IPA_DPS_HPS_SEQ_TYPE_INVALID,
+			QMB_MASTER_SELECT_DDR,
+			{ 22, 14, 9, 9, IPA_EE_AP, GSI_ESCAPE_BUF_ONLY, 0 } },
+	[IPA_4_9][IPA_CLIENT_WIGIG1_CONS]          = {
+			true, IPA_v4_9_GROUP_UL_DL,
+			false,
+			IPA_DPS_HPS_SEQ_TYPE_INVALID,
+			QMB_MASTER_SELECT_DDR,
+			{ 23, 15, 8, 14, IPA_EE_AP, GSI_SMART_PRE_FETCH, 2 } },
+	[IPA_4_9][IPA_CLIENT_WLAN1_CONS]          = {
+			true, IPA_v4_9_GROUP_UL_DL,
+			false,
+			IPA_DPS_HPS_SEQ_TYPE_INVALID,
+			QMB_MASTER_SELECT_DDR,
+			{ 24, 16, 8, 14, IPA_EE_AP, GSI_SMART_PRE_FETCH, 3 } },
+	[IPA_4_9][IPA_CLIENT_USB_CONS]            = {
+			true, IPA_v4_9_GROUP_UL_DL,
+			false,
+			IPA_DPS_HPS_SEQ_TYPE_INVALID,
+			QMB_MASTER_SELECT_DDR,
+			{ 25, 17, 9, 9, IPA_EE_AP, GSI_ESCAPE_BUF_ONLY, 0 } },
+	[IPA_4_9][IPA_CLIENT_WIGIG2_CONS]          = {
+			true, IPA_v4_9_GROUP_UL_DL,
+			false,
+			IPA_DPS_HPS_SEQ_TYPE_INVALID,
+			QMB_MASTER_SELECT_DDR,
+			{ 26, 18, 8, 14, IPA_EE_AP, GSI_SMART_PRE_FETCH, 2 } },
+	[IPA_4_9][IPA_CLIENT_WIGIG3_CONS]          = {
+			true, IPA_v4_9_GROUP_UL_DL,
+			false,
+			IPA_DPS_HPS_SEQ_TYPE_INVALID,
+			QMB_MASTER_SELECT_DDR,
+			{ 27, 19, 8, 14, IPA_EE_AP, GSI_SMART_PRE_FETCH, 2 } },
+	[IPA_4_9][IPA_CLIENT_WIGIG4_CONS]          = {
+			true, IPA_v4_9_GROUP_UL_DL,
+			false,
+			IPA_DPS_HPS_SEQ_TYPE_INVALID,
+			QMB_MASTER_SELECT_DDR,
+			{ 28, 20, 8, 14, IPA_EE_AP, GSI_SMART_PRE_FETCH, 2 } },
+	[IPA_4_9][IPA_CLIENT_APPS_LAN_CONS]       = {
+			true, IPA_v4_9_GROUP_UL_DL,
+			false,
+			IPA_DPS_HPS_SEQ_TYPE_INVALID,
+			QMB_MASTER_SELECT_DDR,
+			{ 11, 7, 9, 9, IPA_EE_AP, GSI_ESCAPE_BUF_ONLY, 0 } },
+	[IPA_4_9][IPA_CLIENT_Q6_LAN_CONS]         = {
+			true, IPA_v4_9_GROUP_UL_DL,
+			false,
+			IPA_DPS_HPS_SEQ_TYPE_INVALID,
+			QMB_MASTER_SELECT_DDR,
+			{ 12, 3, 9, 9, IPA_EE_Q6, GSI_ESCAPE_BUF_ONLY, 0 } },
+	[IPA_4_9][IPA_CLIENT_Q6_QBAP_STATUS_CONS] = {
+			true, IPA_v4_9_GROUP_UL_DL,
+			false,
+			IPA_DPS_HPS_SEQ_TYPE_INVALID,
+			QMB_MASTER_SELECT_DDR,
+			{ 13, 4, 9, 9, IPA_EE_Q6, GSI_ESCAPE_BUF_ONLY, 0 } },
+	[IPA_4_9][IPA_CLIENT_Q6_UL_NLO_DATA_CONS] = {
+			true, IPA_v4_9_GROUP_UL_DL,
+			false,
+			IPA_DPS_HPS_SEQ_TYPE_INVALID,
+			QMB_MASTER_SELECT_DDR,
+			{ 14, 5, 5, 5, IPA_EE_Q6, GSI_SMART_PRE_FETCH, 2 } },
+	[IPA_4_9][IPA_CLIENT_Q6_UL_NLO_ACK_CONS]  = {
+			true, IPA_v4_9_GROUP_UL_DL,
+			false,
+			IPA_DPS_HPS_SEQ_TYPE_INVALID,
+			QMB_MASTER_SELECT_DDR,
+			{ 15, 6, 5, 5, IPA_EE_Q6, GSI_SMART_PRE_FETCH, 2 } },
+	[IPA_4_9][IPA_CLIENT_Q6_WAN_CONS]         = {
+			true, IPA_v4_9_GROUP_UL_DL,
+			false,
+			IPA_DPS_HPS_SEQ_TYPE_INVALID,
+			QMB_MASTER_SELECT_DDR,
+			{ 16, 7, 9, 9, IPA_EE_Q6, GSI_ESCAPE_BUF_ONLY, 0 } },
+
+
+};
+
+static struct ipa3_mem_partition ipa_4_1_mem_part = {
+	.ofst_start				= 0x280,
+	.v4_flt_hash_ofst		= 0x288,
+	.v4_flt_hash_size		=  0x78,
+	.v4_flt_hash_size_ddr		= 0x4000,
+	.v4_flt_nhash_ofst		= 0x308,
+	.v4_flt_nhash_size		= 0x78,
+	.v4_flt_nhash_size_ddr		= 0x4000,
+	.v6_flt_hash_ofst		= 0x388,
+	.v6_flt_hash_size		= 0x78,
+	.v6_flt_hash_size_ddr		= 0x4000,
+	.v6_flt_nhash_ofst		= 0x408,
+	.v6_flt_nhash_size		= 0x78,
+	.v6_flt_nhash_size_ddr		= 0x4000,
+	.v4_rt_num_index		= 0xf,
+	.v4_modem_rt_index_lo		= 0x0,
+	.v4_modem_rt_index_hi		= 0x7,
+	.v4_apps_rt_index_lo		= 0x8,
+	.v4_apps_rt_index_hi		= 0xe,
+	.v4_rt_hash_ofst		= 0x488,
+	.v4_rt_hash_size		= 0x78,
+	.v4_rt_hash_size_ddr		= 0x4000,
+	.v4_rt_nhash_ofst		= 0x508,
+	.v4_rt_nhash_size		= 0x78,
+	.v4_rt_nhash_size_ddr		= 0x4000,
+	.v6_rt_num_index		= 0xf,
+	.v6_modem_rt_index_lo		= 0x0,
+	.v6_modem_rt_index_hi		= 0x7,
+	.v6_apps_rt_index_lo		= 0x8,
+	.v6_apps_rt_index_hi		= 0xe,
+	.v6_rt_hash_ofst		= 0x588,
+	.v6_rt_hash_size		= 0x78,
+	.v6_rt_hash_size_ddr		= 0x4000,
+	.v6_rt_nhash_ofst		= 0x608,
+	.v6_rt_nhash_size		= 0x78,
+	.v6_rt_nhash_size_ddr		= 0x4000,
+	.modem_hdr_ofst			= 0x688,
+	.modem_hdr_size			= 0x140,
+	.apps_hdr_ofst			= 0x7c8,
+	.apps_hdr_size			= 0x0,
+	.apps_hdr_size_ddr		= 0x800,
+	.modem_hdr_proc_ctx_ofst	= 0x7d0,
+	.modem_hdr_proc_ctx_size	= 0x200,
+	.apps_hdr_proc_ctx_ofst		= 0x9d0,
+	.apps_hdr_proc_ctx_size		= 0x200,
+	.apps_hdr_proc_ctx_size_ddr	= 0x0,
+	.modem_comp_decomp_ofst		= 0x0,
+	.modem_comp_decomp_size		= 0x0,
+	.modem_ofst			= 0x13f0,
+	.modem_size			= 0x100c,
+	.apps_v4_flt_hash_ofst		= 0x23fc,
+	.apps_v4_flt_hash_size		= 0x0,
+	.apps_v4_flt_nhash_ofst		= 0x23fc,
+	.apps_v4_flt_nhash_size		= 0x0,
+	.apps_v6_flt_hash_ofst		= 0x23fc,
+	.apps_v6_flt_hash_size		= 0x0,
+	.apps_v6_flt_nhash_ofst		= 0x23fc,
+	.apps_v6_flt_nhash_size		= 0x0,
+	.uc_info_ofst			= 0x80,
+	.uc_info_size			= 0x200,
+	.end_ofst			= 0x2800,
+	.apps_v4_rt_hash_ofst		= 0x23fc,
+	.apps_v4_rt_hash_size		= 0x0,
+	.apps_v4_rt_nhash_ofst		= 0x23fc,
+	.apps_v4_rt_nhash_size		= 0x0,
+	.apps_v6_rt_hash_ofst		= 0x23fc,
+	.apps_v6_rt_hash_size		= 0x0,
+	.apps_v6_rt_nhash_ofst		= 0x23fc,
+	.apps_v6_rt_nhash_size		= 0x0,
+	.uc_descriptor_ram_ofst		= 0x2400,
+	.uc_descriptor_ram_size		= 0x400,
+	.pdn_config_ofst		= 0xbd8,
+	.pdn_config_size		= 0x50,
+	.stats_quota_ofst		= 0xc30,
+	.stats_quota_size		= 0x60,
+	.stats_tethering_ofst		= 0xc90,
+	.stats_tethering_size		= 0x140,
+	.stats_flt_v4_ofst		= 0xdd0,
+	.stats_flt_v4_size		= 0x180,
+	.stats_flt_v6_ofst		= 0xf50,
+	.stats_flt_v6_size		= 0x180,
+	.stats_rt_v4_ofst		= 0x10d0,
+	.stats_rt_v4_size		= 0x180,
+	.stats_rt_v6_ofst		= 0x1250,
+	.stats_rt_v6_size		= 0x180,
+	.stats_drop_ofst		= 0x13d0,
+	.stats_drop_size		= 0x20,
+};
+
+static struct ipa3_mem_partition ipa_4_2_mem_part = {
+	.ofst_start				= 0x280,
+	.v4_flt_hash_ofst		= 0x288,
+	.v4_flt_hash_size		= 0x0,
+	.v4_flt_hash_size_ddr		= 0x0,
+	.v4_flt_nhash_ofst		= 0x290,
+	.v4_flt_nhash_size		= 0x78,
+	.v4_flt_nhash_size_ddr		= 0x4000,
+	.v6_flt_hash_ofst		= 0x310,
+	.v6_flt_hash_size		= 0x0,
+	.v6_flt_hash_size_ddr		= 0x0,
+	.v6_flt_nhash_ofst		= 0x318,
+	.v6_flt_nhash_size		= 0x78,
+	.v6_flt_nhash_size_ddr		= 0x4000,
+	.v4_rt_num_index		= 0xf,
+	.v4_modem_rt_index_lo		= 0x0,
+	.v4_modem_rt_index_hi		= 0x7,
+	.v4_apps_rt_index_lo		= 0x8,
+	.v4_apps_rt_index_hi		= 0xe,
+	.v4_rt_hash_ofst		= 0x398,
+	.v4_rt_hash_size		= 0x0,
+	.v4_rt_hash_size_ddr		= 0x0,
+	.v4_rt_nhash_ofst		= 0x3A0,
+	.v4_rt_nhash_size		= 0x78,
+	.v4_rt_nhash_size_ddr		= 0x4000,
+	.v6_rt_num_index		= 0xf,
+	.v6_modem_rt_index_lo		= 0x0,
+	.v6_modem_rt_index_hi		= 0x7,
+	.v6_apps_rt_index_lo		= 0x8,
+	.v6_apps_rt_index_hi		= 0xe,
+	.v6_rt_hash_ofst		= 0x420,
+	.v6_rt_hash_size		= 0x0,
+	.v6_rt_hash_size_ddr		= 0x0,
+	.v6_rt_nhash_ofst		= 0x428,
+	.v6_rt_nhash_size		= 0x78,
+	.v6_rt_nhash_size_ddr		= 0x4000,
+	.modem_hdr_ofst			= 0x4A8,
+	.modem_hdr_size			= 0x140,
+	.apps_hdr_ofst			= 0x5E8,
+	.apps_hdr_size			= 0x0,
+	.apps_hdr_size_ddr		= 0x800,
+	.modem_hdr_proc_ctx_ofst	= 0x5F0,
+	.modem_hdr_proc_ctx_size	= 0x200,
+	.apps_hdr_proc_ctx_ofst		= 0x7F0,
+	.apps_hdr_proc_ctx_size		= 0x200,
+	.apps_hdr_proc_ctx_size_ddr	= 0x0,
+	.modem_comp_decomp_ofst		= 0x0,
+	.modem_comp_decomp_size		= 0x0,
+	.modem_ofst			= 0xbf0,
+	.modem_size			= 0x140c,
+	.apps_v4_flt_hash_ofst		= 0x1bfc,
+	.apps_v4_flt_hash_size		= 0x0,
+	.apps_v4_flt_nhash_ofst		= 0x1bfc,
+	.apps_v4_flt_nhash_size		= 0x0,
+	.apps_v6_flt_hash_ofst		= 0x1bfc,
+	.apps_v6_flt_hash_size		= 0x0,
+	.apps_v6_flt_nhash_ofst		= 0x1bfc,
+	.apps_v6_flt_nhash_size		= 0x0,
+	.uc_info_ofst			= 0x80,
+	.uc_info_size			= 0x200,
+	.end_ofst			= 0x2000,
+	.apps_v4_rt_hash_ofst		= 0x1bfc,
+	.apps_v4_rt_hash_size		= 0x0,
+	.apps_v4_rt_nhash_ofst		= 0x1bfc,
+	.apps_v4_rt_nhash_size		= 0x0,
+	.apps_v6_rt_hash_ofst		= 0x1bfc,
+	.apps_v6_rt_hash_size		= 0x0,
+	.apps_v6_rt_nhash_ofst		= 0x1bfc,
+	.apps_v6_rt_nhash_size		= 0x0,
+	.uc_descriptor_ram_ofst		= 0x2000,
+	.uc_descriptor_ram_size		= 0x0,
+	.pdn_config_ofst		= 0x9F8,
+	.pdn_config_size		= 0x50,
+	.stats_quota_ofst		= 0xa50,
+	.stats_quota_size		= 0x60,
+	.stats_tethering_ofst		= 0xab0,
+	.stats_tethering_size		= 0x140,
+	.stats_flt_v4_ofst		= 0xbf0,
+	.stats_flt_v4_size		= 0x0,
+	.stats_flt_v6_ofst		= 0xbf0,
+	.stats_flt_v6_size		= 0x0,
+	.stats_rt_v4_ofst		= 0xbf0,
+	.stats_rt_v4_size		= 0x0,
+	.stats_rt_v6_ofst		= 0xbf0,
+	.stats_rt_v6_size		= 0x0,
+	.stats_drop_ofst		= 0xbf0,
+	.stats_drop_size		= 0x0,
+};
+
+static struct ipa3_mem_partition ipa_4_5_mem_part = {
+	.uc_info_ofst			= 0x80,
+	.uc_info_size			= 0x200,
+	.ofst_start			= 0x280,
+	.v4_flt_hash_ofst		= 0x288,
+	.v4_flt_hash_size		= 0x78,
+	.v4_flt_hash_size_ddr		= 0x4000,
+	.v4_flt_nhash_ofst		= 0x308,
+	.v4_flt_nhash_size		= 0x78,
+	.v4_flt_nhash_size_ddr		= 0x4000,
+	.v6_flt_hash_ofst		= 0x388,
+	.v6_flt_hash_size		= 0x78,
+	.v6_flt_hash_size_ddr		= 0x4000,
+	.v6_flt_nhash_ofst		= 0x408,
+	.v6_flt_nhash_size		= 0x78,
+	.v6_flt_nhash_size_ddr		= 0x4000,
+	.v4_rt_num_index		= 0xf,
+	.v4_modem_rt_index_lo		= 0x0,
+	.v4_modem_rt_index_hi		= 0x7,
+	.v4_apps_rt_index_lo		= 0x8,
+	.v4_apps_rt_index_hi		= 0xe,
+	.v4_rt_hash_ofst		= 0x488,
+	.v4_rt_hash_size		= 0x78,
+	.v4_rt_hash_size_ddr		= 0x4000,
+	.v4_rt_nhash_ofst		= 0x508,
+	.v4_rt_nhash_size		= 0x78,
+	.v4_rt_nhash_size_ddr		= 0x4000,
+	.v6_rt_num_index		= 0xf,
+	.v6_modem_rt_index_lo		= 0x0,
+	.v6_modem_rt_index_hi		= 0x7,
+	.v6_apps_rt_index_lo		= 0x8,
+	.v6_apps_rt_index_hi		= 0xe,
+	.v6_rt_hash_ofst		= 0x588,
+	.v6_rt_hash_size		= 0x78,
+	.v6_rt_hash_size_ddr		= 0x4000,
+	.v6_rt_nhash_ofst		= 0x608,
+	.v6_rt_nhash_size		= 0x78,
+	.v6_rt_nhash_size_ddr		= 0x4000,
+	.modem_hdr_ofst			= 0x688,
+	.modem_hdr_size			= 0x240,
+	.apps_hdr_ofst			= 0x8c8,
+	.apps_hdr_size			= 0x200,
+	.apps_hdr_size_ddr		= 0x800,
+	.modem_hdr_proc_ctx_ofst	= 0xad0,
+	.modem_hdr_proc_ctx_size	= 0xb20,
+	.apps_hdr_proc_ctx_ofst		= 0x15f0,
+	.apps_hdr_proc_ctx_size		= 0x200,
+	.apps_hdr_proc_ctx_size_ddr	= 0x0,
+	.nat_tbl_ofst			= 0x1800,
+	.nat_tbl_size			= 0xd00,
+	.stats_quota_ofst		= 0x2510,
+	.stats_quota_size		= 0x78,
+	.stats_tethering_ofst		= 0x2588,
+	.stats_tethering_size		= 0x238,
+	.stats_flt_v4_ofst		= 0,
+	.stats_flt_v4_size		= 0,
+	.stats_flt_v6_ofst		= 0,
+	.stats_flt_v6_size		= 0,
+	.stats_rt_v4_ofst		= 0,
+	.stats_rt_v4_size		= 0,
+	.stats_rt_v6_ofst		= 0,
+	.stats_rt_v6_size		= 0,
+	.stats_fnr_ofst			= 0x27c0,
+	.stats_fnr_size			= 0x800,
+	.stats_drop_ofst		= 0x2fc0,
+	.stats_drop_size		= 0x20,
+	.modem_comp_decomp_ofst		= 0x0,
+	.modem_comp_decomp_size		= 0x0,
+	.modem_ofst			= 0x2fe8,
+	.modem_size			= 0x800,
+	.apps_v4_flt_hash_ofst	= 0x2718,
+	.apps_v4_flt_hash_size	= 0x0,
+	.apps_v4_flt_nhash_ofst	= 0x2718,
+	.apps_v4_flt_nhash_size	= 0x0,
+	.apps_v6_flt_hash_ofst	= 0x2718,
+	.apps_v6_flt_hash_size	= 0x0,
+	.apps_v6_flt_nhash_ofst	= 0x2718,
+	.apps_v6_flt_nhash_size	= 0x0,
+	.apps_v4_rt_hash_ofst	= 0x2718,
+	.apps_v4_rt_hash_size	= 0x0,
+	.apps_v4_rt_nhash_ofst	= 0x2718,
+	.apps_v4_rt_nhash_size	= 0x0,
+	.apps_v6_rt_hash_ofst	= 0x2718,
+	.apps_v6_rt_hash_size	= 0x0,
+	.apps_v6_rt_nhash_ofst	= 0x2718,
+	.apps_v6_rt_nhash_size	= 0x0,
+	.uc_descriptor_ram_ofst	= 0x3800,
+	.uc_descriptor_ram_size	= 0x1000,
+	.pdn_config_ofst	= 0x4800,
+	.pdn_config_size	= 0x50,
+	.end_ofst		= 0x4850,
+};
+
+static struct ipa3_mem_partition ipa_4_7_mem_part = {
+	.uc_info_ofst			= 0x80,
+	.uc_info_size			= 0x200,
+	.ofst_start			= 0x280,
+	.v4_flt_hash_ofst		= 0x288,
+	.v4_flt_hash_size		=  0x78,
+	.v4_flt_hash_size_ddr		= 0x4000,
+	.v4_flt_nhash_ofst		= 0x308,
+	.v4_flt_nhash_size		= 0x78,
+	.v4_flt_nhash_size_ddr		= 0x4000,
+	.v6_flt_hash_ofst		= 0x388,
+	.v6_flt_hash_size		= 0x78,
+	.v6_flt_hash_size_ddr		= 0x4000,
+	.v6_flt_nhash_ofst		= 0x408,
+	.v6_flt_nhash_size		= 0x78,
+	.v6_flt_nhash_size_ddr		= 0x4000,
+	.v4_rt_num_index		= 0xf,
+	.v4_modem_rt_index_lo		= 0x0,
+	.v4_modem_rt_index_hi		= 0x7,
+	.v4_apps_rt_index_lo		= 0x8,
+	.v4_apps_rt_index_hi		= 0xe,
+	.v4_rt_hash_ofst		= 0x488,
+	.v4_rt_hash_size		= 0x78,
+	.v4_rt_hash_size_ddr		= 0x4000,
+	.v4_rt_nhash_ofst		= 0x508,
+	.v4_rt_nhash_size		= 0x78,
+	.v4_rt_nhash_size_ddr		= 0x4000,
+	.v6_rt_num_index		= 0xf,
+	.v6_modem_rt_index_lo		= 0x0,
+	.v6_modem_rt_index_hi		= 0x7,
+	.v6_apps_rt_index_lo		= 0x8,
+	.v6_apps_rt_index_hi		= 0xe,
+	.v6_rt_hash_ofst		= 0x588,
+	.v6_rt_hash_size		= 0x78,
+	.v6_rt_hash_size_ddr		= 0x4000,
+	.v6_rt_nhash_ofst		= 0x608,
+	.v6_rt_nhash_size		= 0x78,
+	.v6_rt_nhash_size_ddr		= 0x4000,
+	.modem_hdr_ofst			= 0x688,
+	.modem_hdr_size			= 0x240,
+	.apps_hdr_ofst			= 0x8c8,
+	.apps_hdr_size			= 0x200,
+	.apps_hdr_size_ddr		= 0x800,
+	.modem_hdr_proc_ctx_ofst	= 0xad0,
+	.modem_hdr_proc_ctx_size	= 0x200,
+	.apps_hdr_proc_ctx_ofst		= 0xcd0,
+	.apps_hdr_proc_ctx_size		= 0x200,
+	.apps_hdr_proc_ctx_size_ddr	= 0x0,
+	.nat_tbl_ofst			= 0xee0,
+	.nat_tbl_size			= 0xd00,
+	.pdn_config_ofst		= 0x1be8,
+	.pdn_config_size		= 0x50,
+	.stats_quota_ofst		= 0x1c40,
+	.stats_quota_size		= 0x78,
+	.stats_tethering_ofst		= 0x1cb8,
+	.stats_tethering_size		= 0x238,
+	.stats_flt_v4_ofst		= 0,
+	.stats_flt_v4_size		= 0,
+	.stats_flt_v6_ofst		= 0,
+	.stats_flt_v6_size		= 0,
+	.stats_rt_v4_ofst		= 0,
+	.stats_rt_v4_size		= 0,
+	.stats_rt_v6_ofst		= 0,
+	.stats_rt_v6_size		= 0,
+	.stats_fnr_ofst			= 0x1ef0,
+	.stats_fnr_size			= 0x0,
+	.stats_drop_ofst		= 0x1ef0,
+	.stats_drop_size		= 0x20,
+	.modem_comp_decomp_ofst		= 0x0,
+	.modem_comp_decomp_size		= 0x0,
+	.modem_ofst			= 0x1f18,
+	.modem_size			= 0x100c,
+	.apps_v4_flt_hash_ofst	= 0x1f18,
+	.apps_v4_flt_hash_size	= 0x0,
+	.apps_v4_flt_nhash_ofst	= 0x1f18,
+	.apps_v4_flt_nhash_size	= 0x0,
+	.apps_v6_flt_hash_ofst	= 0x1f18,
+	.apps_v6_flt_hash_size	= 0x0,
+	.apps_v6_flt_nhash_ofst	= 0x1f18,
+	.apps_v6_flt_nhash_size	= 0x0,
+	.apps_v4_rt_hash_ofst	= 0x1f18,
+	.apps_v4_rt_hash_size	= 0x0,
+	.apps_v4_rt_nhash_ofst	= 0x1f18,
+	.apps_v4_rt_nhash_size	= 0x0,
+	.apps_v6_rt_hash_ofst	= 0x1f18,
+	.apps_v6_rt_hash_size	= 0x0,
+	.apps_v6_rt_nhash_ofst	= 0x1f18,
+	.apps_v6_rt_nhash_size	= 0x0,
+	.uc_descriptor_ram_ofst	= 0x3000,
+	.uc_descriptor_ram_size	= 0x0000,
+	.end_ofst		= 0x3000,
+};
+
+static struct ipa3_mem_partition ipa_4_9_mem_part = {
+	.uc_info_ofst			= 0x80,
+	.uc_info_size			= 0x200,
+	.ofst_start			= 0x280,
+	.v4_flt_hash_ofst		= 0x288,
+	.v4_flt_hash_size		= 0x78,
+	.v4_flt_hash_size_ddr		= 0x4000,
+	.v4_flt_nhash_ofst		= 0x308,
+	.v4_flt_nhash_size		= 0x78,
+	.v4_flt_nhash_size_ddr		= 0x4000,
+	.v6_flt_hash_ofst		= 0x388,
+	.v6_flt_hash_size		= 0x78,
+	.v6_flt_hash_size_ddr		= 0x4000,
+	.v6_flt_nhash_ofst		= 0x408,
+	.v6_flt_nhash_size		= 0x78,
+	.v6_flt_nhash_size_ddr		= 0x4000,
+	.v4_rt_num_index		= 0xf,
+	.v4_modem_rt_index_lo		= 0x0,
+	.v4_modem_rt_index_hi		= 0x7,
+	.v4_apps_rt_index_lo		= 0x8,
+	.v4_apps_rt_index_hi		= 0xe,
+	.v4_rt_hash_ofst		= 0x488,
+	.v4_rt_hash_size		= 0x78,
+	.v4_rt_hash_size_ddr		= 0x4000,
+	.v4_rt_nhash_ofst		= 0x508,
+	.v4_rt_nhash_size		= 0x78,
+	.v4_rt_nhash_size_ddr		= 0x4000,
+	.v6_rt_num_index		= 0xf,
+	.v6_modem_rt_index_lo		= 0x0,
+	.v6_modem_rt_index_hi		= 0x7,
+	.v6_apps_rt_index_lo		= 0x8,
+	.v6_apps_rt_index_hi		= 0xe,
+	.v6_rt_hash_ofst		= 0x588,
+	.v6_rt_hash_size		= 0x78,
+	.v6_rt_hash_size_ddr		= 0x4000,
+	.v6_rt_nhash_ofst		= 0x608,
+	.v6_rt_nhash_size		= 0x78,
+	.v6_rt_nhash_size_ddr		= 0x4000,
+	.modem_hdr_ofst			= 0x688,
+	.modem_hdr_size			= 0x240,
+	.apps_hdr_ofst			= 0x8c8,
+	.apps_hdr_size			= 0x200,
+	.apps_hdr_size_ddr		= 0x800,
+	.modem_hdr_proc_ctx_ofst	= 0xad0,
+	.modem_hdr_proc_ctx_size	= 0xb20,
+	.apps_hdr_proc_ctx_ofst		= 0x15f0,
+	.apps_hdr_proc_ctx_size		= 0x200,
+	.apps_hdr_proc_ctx_size_ddr	= 0x0,
+	.nat_tbl_ofst            = 0x1800,
+	.nat_tbl_size            = 0xd00,
+	.stats_quota_ofst		= 0x2510,
+	.stats_quota_size		= 0x78,
+	.stats_tethering_ofst		= 0x2588,
+	.stats_tethering_size		= 0x238,
+	.stats_flt_v4_ofst		= 0,
+	.stats_flt_v4_size		= 0,
+	.stats_flt_v6_ofst		= 0,
+	.stats_flt_v6_size		= 0,
+	.stats_rt_v4_ofst		= 0,
+	.stats_rt_v4_size		= 0,
+	.stats_rt_v6_ofst		= 0,
+	.stats_rt_v6_size		= 0,
+	.stats_fnr_ofst			= 0x27c0,
+	.stats_fnr_size			= 0x800,
+	.stats_drop_ofst		= 0x2fc0,
+	.stats_drop_size		= 0x20,
+	.modem_comp_decomp_ofst		= 0x0,
+	.modem_comp_decomp_size		= 0x0,
+	.modem_ofst			= 0x2fe8,
+	.modem_size			= 0x800,
+	.apps_v4_flt_hash_ofst	= 0x2718,
+	.apps_v4_flt_hash_size	= 0x0,
+	.apps_v4_flt_nhash_ofst	= 0x2718,
+	.apps_v4_flt_nhash_size	= 0x0,
+	.apps_v6_flt_hash_ofst	= 0x2718,
+	.apps_v6_flt_hash_size	= 0x0,
+	.apps_v6_flt_nhash_ofst	= 0x2718,
+	.apps_v6_flt_nhash_size	= 0x0,
+	.apps_v4_rt_hash_ofst	= 0x2718,
+	.apps_v4_rt_hash_size	= 0x0,
+	.apps_v4_rt_nhash_ofst	= 0x2718,
+	.apps_v4_rt_nhash_size	= 0x0,
+	.apps_v6_rt_hash_ofst	= 0x2718,
+	.apps_v6_rt_hash_size	= 0x0,
+	.apps_v6_rt_nhash_ofst	= 0x2718,
+	.apps_v6_rt_nhash_size	= 0x0,
+	.uc_descriptor_ram_ofst	= 0x3800,
+	.uc_descriptor_ram_size	= 0x1000,
+	.pdn_config_ofst	= 0x4800,
+	.pdn_config_size	= 0x50,
+	.end_ofst		= 0x4850,
+};
+
+/**
+ * ipa3_get_clients_from_rm_resource() - get IPA clients which are related to an
+ * IPA_RM resource
+ *
+ * @resource: [IN] IPA Resource Manager resource
+ * @clients: [OUT] Empty array which will contain the list of clients. The
+ *         caller must initialize this array.
+ *
+ * Return codes: 0 on success, negative on failure.
+ */
+int ipa3_get_clients_from_rm_resource(
+	enum ipa_rm_resource_name resource,
+	struct ipa3_client_names *clients)
+{
+	int i = 0;
+
+	if (resource < 0 ||
+	    resource >= IPA_RM_RESOURCE_MAX ||
+	    !clients) {
+		IPAERR("Bad parameters\n");
+		return -EINVAL;
+	}
+
+	switch (resource) {
+	case IPA_RM_RESOURCE_USB_CONS:
+		if (ipa3_get_ep_mapping(IPA_CLIENT_USB_CONS) != -1)
+			clients->names[i++] = IPA_CLIENT_USB_CONS;
+		break;
+	case IPA_RM_RESOURCE_USB_DPL_CONS:
+		if (ipa3_get_ep_mapping(IPA_CLIENT_USB_DPL_CONS) != -1)
+			clients->names[i++] = IPA_CLIENT_USB_DPL_CONS;
+		break;
+	case IPA_RM_RESOURCE_HSIC_CONS:
+		clients->names[i++] = IPA_CLIENT_HSIC1_CONS;
+		break;
+	case IPA_RM_RESOURCE_WLAN_CONS:
+		clients->names[i++] = IPA_CLIENT_WLAN1_CONS;
+		clients->names[i++] = IPA_CLIENT_WLAN2_CONS;
+		clients->names[i++] = IPA_CLIENT_WLAN3_CONS;
+		break;
+	case IPA_RM_RESOURCE_MHI_CONS:
+		clients->names[i++] = IPA_CLIENT_MHI_CONS;
+		break;
+	case IPA_RM_RESOURCE_ODU_ADAPT_CONS:
+		clients->names[i++] = IPA_CLIENT_ODU_EMB_CONS;
+		clients->names[i++] = IPA_CLIENT_ODU_TETH_CONS;
+		break;
+	case IPA_RM_RESOURCE_ETHERNET_CONS:
+		clients->names[i++] = IPA_CLIENT_ETHERNET_CONS;
+		break;
+	case IPA_RM_RESOURCE_USB_PROD:
+		if (ipa3_get_ep_mapping(IPA_CLIENT_USB_PROD) != -1)
+			clients->names[i++] = IPA_CLIENT_USB_PROD;
+		break;
+	case IPA_RM_RESOURCE_HSIC_PROD:
+		clients->names[i++] = IPA_CLIENT_HSIC1_PROD;
+		break;
+	case IPA_RM_RESOURCE_MHI_PROD:
+		clients->names[i++] = IPA_CLIENT_MHI_PROD;
+		break;
+	case IPA_RM_RESOURCE_ODU_ADAPT_PROD:
+		clients->names[i++] = IPA_CLIENT_ODU_PROD;
+		break;
+	case IPA_RM_RESOURCE_ETHERNET_PROD:
+		clients->names[i++] = IPA_CLIENT_ETHERNET_PROD;
+		break;
+	default:
+		break;
+	}
+	clients->length = i;
+
+	return 0;
+}
+
+/**
+ * ipa3_should_pipe_be_suspended() - returns true when the client's pipe should
+ * be suspended during a power save scenario. False otherwise.
+ *
+ * @client: [IN] IPA client
+ */
+bool ipa3_should_pipe_be_suspended(enum ipa_client_type client)
+{
+	struct ipa3_ep_context *ep;
+	int ipa_ep_idx;
+
+	ipa_ep_idx = ipa3_get_ep_mapping(client);
+	if (ipa_ep_idx == -1) {
+		IPAERR("Invalid client.\n");
+		WARN_ON(1);
+		return false;
+	}
+
+	ep = &ipa3_ctx->ep[ipa_ep_idx];
+
+	/*
+	 * starting IPA 4.0 pipe no longer can be suspended. Instead,
+	 * the corresponding GSI channel should be stopped. Usually client
+	 * driver will take care of stopping the channel. For client drivers
+	 * that are not stopping the channel, IPA RM will do that based on
+	 * ipa3_should_pipe_channel_be_stopped().
+	 */
+	if (ipa3_ctx->ipa_hw_type >= IPA_HW_v4_0)
+		return false;
+
+	if (ep->keep_ipa_awake)
+		return false;
+
+	if (client == IPA_CLIENT_USB_CONS     ||
+	    client == IPA_CLIENT_USB_DPL_CONS ||
+	    client == IPA_CLIENT_MHI_CONS     ||
+	    client == IPA_CLIENT_MHI_DPL_CONS ||
+	    client == IPA_CLIENT_HSIC1_CONS   ||
+	    client == IPA_CLIENT_WLAN1_CONS   ||
+	    client == IPA_CLIENT_WLAN2_CONS   ||
+	    client == IPA_CLIENT_WLAN3_CONS   ||
+	    client == IPA_CLIENT_WLAN4_CONS   ||
+	    client == IPA_CLIENT_ODU_EMB_CONS ||
+	    client == IPA_CLIENT_ODU_TETH_CONS ||
+	    client == IPA_CLIENT_ETHERNET_CONS)
+		return true;
+
+	return false;
+}
+
+/**
+ * ipa3_should_pipe_channel_be_stopped() - returns true when the client's
+ * channel should be stopped during a power save scenario. False otherwise.
+ * Most client already stops the GSI channel on suspend, and are not included
+ * in the list below.
+ *
+ * @client: [IN] IPA client
+ */
+static bool ipa3_should_pipe_channel_be_stopped(enum ipa_client_type client)
+{
+	struct ipa3_ep_context *ep;
+	int ipa_ep_idx;
+
+	if (ipa3_ctx->ipa_hw_type < IPA_HW_v4_0)
+		return false;
+
+	ipa_ep_idx = ipa3_get_ep_mapping(client);
+	if (ipa_ep_idx == -1) {
+		IPAERR("Invalid client.\n");
+		WARN_ON(1);
+		return false;
+	}
+
+	ep = &ipa3_ctx->ep[ipa_ep_idx];
+
+	if (ep->keep_ipa_awake)
+		return false;
+
+	if (client == IPA_CLIENT_ODU_EMB_CONS ||
+	    client == IPA_CLIENT_ODU_TETH_CONS)
+		return true;
+
+	return false;
+}
+
+/**
+ * ipa3_suspend_resource_sync() - suspend client endpoints related to the IPA_RM
+ * resource and decrement active clients counter, which may result in clock
+ * gating of IPA clocks.
+ *
+ * @resource: [IN] IPA Resource Manager resource
+ *
+ * Return codes: 0 on success, negative on failure.
+ */
+int ipa3_suspend_resource_sync(enum ipa_rm_resource_name resource)
+{
+	struct ipa3_client_names clients;
+	int res;
+	int index;
+	struct ipa_ep_cfg_ctrl suspend;
+	enum ipa_client_type client;
+	int ipa_ep_idx;
+	bool pipe_suspended = false;
+
+	memset(&clients, 0, sizeof(clients));
+	res = ipa3_get_clients_from_rm_resource(resource, &clients);
+	if (res) {
+		IPAERR("Bad params.\n");
+		return res;
+	}
+
+	for (index = 0; index < clients.length; index++) {
+		client = clients.names[index];
+		ipa_ep_idx = ipa3_get_ep_mapping(client);
+		if (ipa_ep_idx == -1) {
+			IPAERR("Invalid client.\n");
+			res = -EINVAL;
+			continue;
+		}
+		ipa3_ctx->resume_on_connect[client] = false;
+		if (ipa3_ctx->ep[ipa_ep_idx].client == client &&
+		    ipa3_should_pipe_be_suspended(client)) {
+			if (ipa3_ctx->ep[ipa_ep_idx].valid) {
+				/* suspend endpoint */
+				memset(&suspend, 0, sizeof(suspend));
+				suspend.ipa_ep_suspend = true;
+				ipa3_cfg_ep_ctrl(ipa_ep_idx, &suspend);
+				pipe_suspended = true;
+			}
+		}
+
+		if (ipa3_ctx->ep[ipa_ep_idx].client == client &&
+			ipa3_should_pipe_channel_be_stopped(client)) {
+			if (ipa3_ctx->ep[ipa_ep_idx].valid) {
+				/* Stop GSI channel */
+				res = ipa3_stop_gsi_channel(ipa_ep_idx);
+				if (res) {
+					IPAERR("failed stop gsi ch %lu\n",
+					ipa3_ctx->ep[ipa_ep_idx].gsi_chan_hdl);
+					return res;
+				}
+			}
+		}
+	}
+	/* Sleep ~1 msec */
+	if (pipe_suspended)
+		usleep_range(1000, 2000);
+
+	/* before gating IPA clocks do TAG process */
+	ipa3_ctx->tag_process_before_gating = true;
+	IPA_ACTIVE_CLIENTS_DEC_RESOURCE(ipa_rm_resource_str(resource));
+
+	return 0;
+}
+
+/**
+ * ipa3_suspend_resource_no_block() - suspend client endpoints related to the
+ * IPA_RM resource and decrement active clients counter. This function is
+ * guaranteed to avoid sleeping.
+ *
+ * @resource: [IN] IPA Resource Manager resource
+ *
+ * Return codes: 0 on success, negative on failure.
+ */
+int ipa3_suspend_resource_no_block(enum ipa_rm_resource_name resource)
+{
+	int res;
+	struct ipa3_client_names clients;
+	int index;
+	enum ipa_client_type client;
+	struct ipa_ep_cfg_ctrl suspend;
+	int ipa_ep_idx;
+	struct ipa_active_client_logging_info log_info;
+
+	memset(&clients, 0, sizeof(clients));
+	res = ipa3_get_clients_from_rm_resource(resource, &clients);
+	if (res) {
+		IPAERR(
+			"ipa3_get_clients_from_rm_resource() failed, name = %d.\n",
+			resource);
+		goto bail;
+	}
+
+	for (index = 0; index < clients.length; index++) {
+		client = clients.names[index];
+		ipa_ep_idx = ipa3_get_ep_mapping(client);
+		if (ipa_ep_idx == -1) {
+			IPAERR("Invalid client.\n");
+			res = -EINVAL;
+			continue;
+		}
+		ipa3_ctx->resume_on_connect[client] = false;
+		if (ipa3_ctx->ep[ipa_ep_idx].client == client &&
+		    ipa3_should_pipe_be_suspended(client)) {
+			if (ipa3_ctx->ep[ipa_ep_idx].valid) {
+				/* suspend endpoint */
+				memset(&suspend, 0, sizeof(suspend));
+				suspend.ipa_ep_suspend = true;
+				ipa3_cfg_ep_ctrl(ipa_ep_idx, &suspend);
+			}
+		}
+
+		if (ipa3_ctx->ep[ipa_ep_idx].client == client &&
+			ipa3_should_pipe_channel_be_stopped(client)) {
+			res = -EPERM;
+			goto bail;
+		}
+	}
+
+	if (res == 0) {
+		IPA_ACTIVE_CLIENTS_PREP_RESOURCE(log_info,
+				ipa_rm_resource_str(resource));
+		/* before gating IPA clocks do TAG process */
+		ipa3_ctx->tag_process_before_gating = true;
+		ipa3_dec_client_disable_clks_no_block(&log_info);
+	}
+bail:
+	return res;
+}
+
+/**
+ * ipa3_resume_resource() - resume client endpoints related to the IPA_RM
+ * resource.
+ *
+ * @resource: [IN] IPA Resource Manager resource
+ *
+ * Return codes: 0 on success, negative on failure.
+ */
+int ipa3_resume_resource(enum ipa_rm_resource_name resource)
+{
+
+	struct ipa3_client_names clients;
+	int res;
+	int index;
+	struct ipa_ep_cfg_ctrl suspend;
+	enum ipa_client_type client;
+	int ipa_ep_idx;
+
+	memset(&clients, 0, sizeof(clients));
+	res = ipa3_get_clients_from_rm_resource(resource, &clients);
+	if (res) {
+		IPAERR("ipa3_get_clients_from_rm_resource() failed.\n");
+		return res;
+	}
+
+	for (index = 0; index < clients.length; index++) {
+		client = clients.names[index];
+		ipa_ep_idx = ipa3_get_ep_mapping(client);
+		if (ipa_ep_idx == -1) {
+			IPAERR("Invalid client.\n");
+			res = -EINVAL;
+			continue;
+		}
+		/*
+		 * The related ep, will be resumed on connect
+		 * while its resource is granted
+		 */
+		ipa3_ctx->resume_on_connect[client] = true;
+		IPADBG("%d will be resumed on connect.\n", client);
+		if (ipa3_ctx->ep[ipa_ep_idx].client == client &&
+		    ipa3_should_pipe_be_suspended(client)) {
+			if (ipa3_ctx->ep[ipa_ep_idx].valid) {
+				memset(&suspend, 0, sizeof(suspend));
+				suspend.ipa_ep_suspend = false;
+				ipa3_cfg_ep_ctrl(ipa_ep_idx, &suspend);
+			}
+		}
+
+		if (ipa3_ctx->ep[ipa_ep_idx].client == client &&
+			ipa3_should_pipe_channel_be_stopped(client)) {
+			if (ipa3_ctx->ep[ipa_ep_idx].valid) {
+				res = gsi_start_channel(
+					ipa3_ctx->ep[ipa_ep_idx].gsi_chan_hdl);
+				if (res) {
+					IPAERR("failed to start gsi ch %lu\n",
+					ipa3_ctx->ep[ipa_ep_idx].gsi_chan_hdl);
+					return res;
+				}
+			}
+		}
+	}
+
+	return res;
+}
+
+/**
+ * ipa3_get_hw_type_index() - Get HW type index which is used as the entry index
+ *	for ep\resource groups related arrays .
+ *
+ * Return value: HW type index
+ */
+static u8 ipa3_get_hw_type_index(void)
+{
+	u8 hw_type_index;
+
+	switch (ipa3_ctx->ipa_hw_type) {
+	case IPA_HW_v3_0:
+	case IPA_HW_v3_1:
+		hw_type_index = IPA_3_0;
+		break;
+	case IPA_HW_v3_5:
+		hw_type_index = IPA_3_5;
+		/*
+		 *this flag is initialized only after fw load trigger from
+		 * user space (ipa3_write)
+		 */
+		if (ipa3_ctx->ipa_config_is_mhi)
+			hw_type_index = IPA_3_5_MHI;
+		break;
+	case IPA_HW_v3_5_1:
+		hw_type_index = IPA_3_5_1;
+		break;
+	case IPA_HW_v4_0:
+		hw_type_index = IPA_4_0;
+		/*
+		 *this flag is initialized only after fw load trigger from
+		 * user space (ipa3_write)
+		 */
+		if (ipa3_ctx->ipa_config_is_mhi)
+			hw_type_index = IPA_4_0_MHI;
+		break;
+	case IPA_HW_v4_1:
+		hw_type_index = IPA_4_1;
+		break;
+	case IPA_HW_v4_2:
+		hw_type_index = IPA_4_2;
+		break;
+	case IPA_HW_v4_5:
+		hw_type_index = IPA_4_5;
+		if (ipa3_ctx->ipa_config_is_mhi)
+			hw_type_index = IPA_4_5_MHI;
+		if (ipa3_ctx->platform_type == IPA_PLAT_TYPE_APQ)
+			hw_type_index = IPA_4_5_APQ;
+		break;
+	case IPA_HW_v4_7:
+		hw_type_index = IPA_4_7;
+		break;
+	case IPA_HW_v4_9:
+		hw_type_index = IPA_4_9;
+		break;
+	default:
+		IPAERR("Incorrect IPA version %d\n", ipa3_ctx->ipa_hw_type);
+		hw_type_index = IPA_3_0;
+		break;
+	}
+
+	return hw_type_index;
+}
+
+/**
+ * _ipa_sram_settings_read_v3_0() - Read SRAM settings from HW
+ *
+ * Returns:	None
+ */
+void _ipa_sram_settings_read_v3_0(void)
+{
+	struct ipahal_reg_shared_mem_size smem_sz;
+
+	memset(&smem_sz, 0, sizeof(smem_sz));
+
+	ipahal_read_reg_fields(IPA_SHARED_MEM_SIZE, &smem_sz);
+
+	ipa3_ctx->smem_restricted_bytes = smem_sz.shared_mem_baddr;
+	ipa3_ctx->smem_sz = smem_sz.shared_mem_sz;
+
+	/* reg fields are in 8B units */
+	ipa3_ctx->smem_restricted_bytes *= 8;
+	ipa3_ctx->smem_sz *= 8;
+	ipa3_ctx->smem_reqd_sz = IPA_MEM_PART(end_ofst);
+	ipa3_ctx->hdr_tbl_lcl = false;
+	ipa3_ctx->hdr_proc_ctx_tbl_lcl = true;
+
+	/*
+	 * when proc ctx table is located in internal memory,
+	 * modem entries resides first.
+	 */
+	if (ipa3_ctx->hdr_proc_ctx_tbl_lcl) {
+		ipa3_ctx->hdr_proc_ctx_tbl.start_offset =
+			IPA_MEM_PART(modem_hdr_proc_ctx_size);
+	}
+	ipa3_ctx->ip4_rt_tbl_hash_lcl =	false;
+	ipa3_ctx->ip4_rt_tbl_nhash_lcl = false;
+	ipa3_ctx->ip6_rt_tbl_hash_lcl = false;
+	ipa3_ctx->ip6_rt_tbl_nhash_lcl = false;
+	ipa3_ctx->ip4_flt_tbl_hash_lcl = false;
+	ipa3_ctx->ip4_flt_tbl_nhash_lcl = false;
+	ipa3_ctx->ip6_flt_tbl_hash_lcl = false;
+	ipa3_ctx->ip6_flt_tbl_nhash_lcl = false;
+}
+
+/**
+ * ipa3_cfg_route() - configure IPA route
+ * @route: IPA route
+ *
+ * Return codes:
+ * 0: success
+ */
+int ipa3_cfg_route(struct ipahal_reg_route *route)
+{
+
+	IPADBG("disable_route_block=%d, default_pipe=%d, default_hdr_tbl=%d\n",
+		route->route_dis,
+		route->route_def_pipe,
+		route->route_def_hdr_table);
+	IPADBG("default_hdr_ofst=%d, default_frag_pipe=%d\n",
+		route->route_def_hdr_ofst,
+		route->route_frag_def_pipe);
+
+	IPADBG("default_retain_hdr=%d\n",
+		route->route_def_retain_hdr);
+
+	if (route->route_dis) {
+		IPAERR("Route disable is not supported!\n");
+		return -EPERM;
+	}
+
+	IPA_ACTIVE_CLIENTS_INC_SIMPLE();
+
+	ipahal_write_reg_fields(IPA_ROUTE, route);
+
+	IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
+
+	return 0;
+}
+
+/**
+ * ipa3_cfg_filter() - configure filter
+ * @disable: disable value
+ *
+ * Return codes:
+ * 0: success
+ */
+int ipa3_cfg_filter(u32 disable)
+{
+	IPAERR_RL("Filter disable is not supported!\n");
+	return -EPERM;
+}
+
+/**
+ * ipa_disable_hashing_rt_flt_v4_2() - Disable filer and route hashing.
+ *
+ * Return codes: 0 for success, negative value for failure
+ */
+static int ipa_disable_hashing_rt_flt_v4_2(void)
+{
+
+	IPADBG("Disable hashing for filter and route table in IPA 4.2 HW\n");
+	ipahal_write_reg(IPA_FILT_ROUT_HASH_EN,
+					IPA_FILT_ROUT_HASH_REG_VAL_v4_2);
+	return 0;
+}
+
+
+/**
+ * ipa_comp_cfg() - Configure QMB/Master port selection
+ *
+ * Returns:	None
+ */
+static void ipa_comp_cfg(void)
+{
+	struct ipahal_reg_comp_cfg comp_cfg;
+
+	/* IPAv4 specific, on NON-MHI config*/
+	if ((ipa3_ctx->ipa_hw_type == IPA_HW_v4_0) &&
+		(!ipa3_ctx->ipa_config_is_mhi)) {
+
+		ipahal_read_reg_fields(IPA_COMP_CFG, &comp_cfg);
+		IPADBG("Before comp config\n");
+		IPADBG("ipa_qmb_select_by_address_global_en = %d\n",
+			comp_cfg.ipa_qmb_select_by_address_global_en);
+
+		IPADBG("ipa_qmb_select_by_address_prod_en = %d\n",
+				comp_cfg.ipa_qmb_select_by_address_prod_en);
+
+		IPADBG("ipa_qmb_select_by_address_cons_en = %d\n",
+				comp_cfg.ipa_qmb_select_by_address_cons_en);
+
+		comp_cfg.ipa_qmb_select_by_address_global_en = false;
+		comp_cfg.ipa_qmb_select_by_address_prod_en = false;
+		comp_cfg.ipa_qmb_select_by_address_cons_en = false;
+
+		ipahal_write_reg_fields(IPA_COMP_CFG, &comp_cfg);
+
+		ipahal_read_reg_fields(IPA_COMP_CFG, &comp_cfg);
+		IPADBG("After comp config\n");
+		IPADBG("ipa_qmb_select_by_address_global_en = %d\n",
+			comp_cfg.ipa_qmb_select_by_address_global_en);
+
+		IPADBG("ipa_qmb_select_by_address_prod_en = %d\n",
+				comp_cfg.ipa_qmb_select_by_address_prod_en);
+
+		IPADBG("ipa_qmb_select_by_address_cons_en = %d\n",
+				comp_cfg.ipa_qmb_select_by_address_cons_en);
+	}
+
+	if (ipa3_ctx->ipa_hw_type >= IPA_HW_v4_0) {
+		ipahal_read_reg_fields(IPA_COMP_CFG, &comp_cfg);
+		IPADBG("Before comp config\n");
+		IPADBG("gsi_multi_inorder_rd_dis = %d\n",
+			comp_cfg.gsi_multi_inorder_rd_dis);
+
+		IPADBG("gsi_multi_inorder_wr_dis = %d\n",
+			comp_cfg.gsi_multi_inorder_wr_dis);
+
+		comp_cfg.gsi_multi_inorder_rd_dis = true;
+		comp_cfg.gsi_multi_inorder_wr_dis = true;
+
+		ipahal_write_reg_fields(IPA_COMP_CFG, &comp_cfg);
+
+		ipahal_read_reg_fields(IPA_COMP_CFG, &comp_cfg);
+		IPADBG("After comp config\n");
+		IPADBG("gsi_multi_inorder_rd_dis = %d\n",
+			comp_cfg.gsi_multi_inorder_rd_dis);
+
+		IPADBG("gsi_multi_inorder_wr_dis = %d\n",
+			comp_cfg.gsi_multi_inorder_wr_dis);
+	}
+
+	/* set GSI_MULTI_AXI_MASTERS_DIS = true after HW.4.1 */
+	if ((ipa3_ctx->ipa_hw_type == IPA_HW_v4_1) ||
+		(ipa3_ctx->ipa_hw_type == IPA_HW_v4_2)) {
+		ipahal_read_reg_fields(IPA_COMP_CFG, &comp_cfg);
+		IPADBG("Before comp config\n");
+		IPADBG("gsi_multi_axi_masters_dis = %d\n",
+			comp_cfg.gsi_multi_axi_masters_dis);
+
+		comp_cfg.gsi_multi_axi_masters_dis = true;
+
+		ipahal_write_reg_fields(IPA_COMP_CFG, &comp_cfg);
+
+		ipahal_read_reg_fields(IPA_COMP_CFG, &comp_cfg);
+		IPADBG("After comp config\n");
+		IPADBG("gsi_multi_axi_masters_dis = %d\n",
+			comp_cfg.gsi_multi_axi_masters_dis);
+	}
+}
+
+/**
+ * ipa3_cfg_qsb() - Configure IPA QSB maximal reads and writes
+ *
+ * Returns:	None
+ */
+static void ipa3_cfg_qsb(void)
+{
+	u8 hw_type_idx;
+	const struct ipa_qmb_outstanding *qmb_ot;
+	struct ipahal_reg_qsb_max_reads max_reads = { 0 };
+	struct ipahal_reg_qsb_max_writes max_writes = { 0 };
+
+	hw_type_idx = ipa3_get_hw_type_index();
+
+	/*
+	 * Read the register values before writing to them to ensure
+	 * other values are not overwritten
+	 */
+	ipahal_read_reg_fields(IPA_QSB_MAX_WRITES, &max_writes);
+	ipahal_read_reg_fields(IPA_QSB_MAX_READS, &max_reads);
+
+	qmb_ot = &(ipa3_qmb_outstanding[hw_type_idx][IPA_QMB_INSTANCE_DDR]);
+	max_reads.qmb_0_max_reads = qmb_ot->ot_reads;
+	max_writes.qmb_0_max_writes = qmb_ot->ot_writes;
+	max_reads.qmb_0_max_read_beats = qmb_ot->ot_read_beats;
+
+	qmb_ot = &(ipa3_qmb_outstanding[hw_type_idx][IPA_QMB_INSTANCE_PCIE]);
+	max_reads.qmb_1_max_reads = qmb_ot->ot_reads;
+	max_writes.qmb_1_max_writes = qmb_ot->ot_writes;
+
+	ipahal_write_reg_fields(IPA_QSB_MAX_WRITES, &max_writes);
+	ipahal_write_reg_fields(IPA_QSB_MAX_READS, &max_reads);
+}
+
+/* relevant starting IPA4.5 */
+static void ipa_cfg_qtime(void)
+{
+	struct ipahal_reg_qtime_timestamp_cfg ts_cfg;
+	struct ipahal_reg_timers_pulse_gran_cfg gran_cfg;
+	struct ipahal_reg_timers_xo_clk_div_cfg div_cfg;
+	u32 val;
+
+	/* Configure timestamp resolution */
+	memset(&ts_cfg, 0, sizeof(ts_cfg));
+	ts_cfg.dpl_timestamp_lsb = IPA_TAG_TIMER_TIMESTAMP_SHFT;
+	ts_cfg.dpl_timestamp_sel = true;
+	ts_cfg.tag_timestamp_lsb = IPA_TAG_TIMER_TIMESTAMP_SHFT;
+	ts_cfg.nat_timestamp_lsb = IPA_NAT_TIMER_TIMESTAMP_SHFT;
+	val = ipahal_read_reg(IPA_QTIME_TIMESTAMP_CFG);
+	IPADBG("qtime timestamp before cfg: 0x%x\n", val);
+	ipahal_write_reg_fields(IPA_QTIME_TIMESTAMP_CFG, &ts_cfg);
+	val = ipahal_read_reg(IPA_QTIME_TIMESTAMP_CFG);
+	IPADBG("qtime timestamp after cfg: 0x%x\n", val);
+
+	/* Configure timers pulse generators granularity */
+	memset(&gran_cfg, 0, sizeof(gran_cfg));
+	gran_cfg.gran_0 = IPA_TIMERS_TIME_GRAN_100_USEC;
+	gran_cfg.gran_1 = IPA_TIMERS_TIME_GRAN_1_MSEC;
+	gran_cfg.gran_2 = IPA_TIMERS_TIME_GRAN_1_MSEC;
+	val = ipahal_read_reg(IPA_TIMERS_PULSE_GRAN_CFG);
+	IPADBG("timer pulse granularity before cfg: 0x%x\n", val);
+	ipahal_write_reg_fields(IPA_TIMERS_PULSE_GRAN_CFG, &gran_cfg);
+	val = ipahal_read_reg(IPA_TIMERS_PULSE_GRAN_CFG);
+	IPADBG("timer pulse granularity after cfg: 0x%x\n", val);
+
+	/* Configure timers XO Clock divider */
+	memset(&div_cfg, 0, sizeof(div_cfg));
+	ipahal_read_reg_fields(IPA_TIMERS_XO_CLK_DIV_CFG, &div_cfg);
+	IPADBG("timer XO clk divider before cfg: enabled=%d divider=%u\n",
+		div_cfg.enable, div_cfg.value);
+
+	/* Make sure divider is disabled */
+	if (div_cfg.enable) {
+		div_cfg.enable = false;
+		ipahal_write_reg_fields(IPA_TIMERS_XO_CLK_DIV_CFG, &div_cfg);
+	}
+
+	/* At emulation systems XO clock is lower than on real target.
+	 * (e.g. 19.2Mhz compared to 96Khz)
+	 * Use lowest possible divider.
+	 */
+	if (ipa3_ctx->ipa3_hw_mode == IPA_HW_MODE_VIRTUAL ||
+		ipa3_ctx->ipa3_hw_mode == IPA_HW_MODE_EMULATION) {
+		div_cfg.value = 0;
+	}
+
+	div_cfg.enable = true; /* Enable the divider */
+	ipahal_write_reg_fields(IPA_TIMERS_XO_CLK_DIV_CFG, &div_cfg);
+	ipahal_read_reg_fields(IPA_TIMERS_XO_CLK_DIV_CFG, &div_cfg);
+	IPADBG("timer XO clk divider after cfg: enabled=%d divider=%u\n",
+		div_cfg.enable, div_cfg.value);
+}
+
+/**
+ * ipa3_init_hw() - initialize HW
+ *
+ * Return codes:
+ * 0: success
+ */
+int ipa3_init_hw(void)
+{
+	u32 ipa_version = 0;
+	struct ipahal_reg_counter_cfg cnt_cfg;
+
+	/* Read IPA version and make sure we have access to the registers */
+	ipa_version = ipahal_read_reg(IPA_VERSION);
+	IPADBG("IPA_VERSION=%u\n", ipa_version);
+	if (ipa_version == 0)
+		return -EFAULT;
+
+	switch (ipa3_ctx->ipa_hw_type) {
+	case IPA_HW_v3_0:
+	case IPA_HW_v3_1:
+		ipahal_write_reg(IPA_BCR, IPA_BCR_REG_VAL_v3_0);
+		break;
+	case IPA_HW_v3_5:
+	case IPA_HW_v3_5_1:
+		ipahal_write_reg(IPA_BCR, IPA_BCR_REG_VAL_v3_5);
+		break;
+	case IPA_HW_v4_0:
+	case IPA_HW_v4_1:
+		ipahal_write_reg(IPA_BCR, IPA_BCR_REG_VAL_v4_0);
+		break;
+	case IPA_HW_v4_2:
+		ipahal_write_reg(IPA_BCR, IPA_BCR_REG_VAL_v4_2);
+		break;
+	default:
+		IPADBG("Do not update BCR - hw_type=%d\n",
+			ipa3_ctx->ipa_hw_type);
+		break;
+	}
+
+	if (ipa3_ctx->ipa_hw_type >= IPA_HW_v4_0 &&
+		ipa3_ctx->ipa_hw_type < IPA_HW_v4_5) {
+		struct ipahal_reg_clkon_cfg clkon_cfg;
+		struct ipahal_reg_tx_cfg tx_cfg;
+
+		memset(&clkon_cfg, 0, sizeof(clkon_cfg));
+
+		/*enable open global clocks*/
+		clkon_cfg.open_global_2x_clk = true;
+		clkon_cfg.open_global = true;
+		ipahal_write_reg_fields(IPA_CLKON_CFG, &clkon_cfg);
+
+		ipahal_read_reg_fields(IPA_TX_CFG, &tx_cfg);
+		/* disable PA_MASK_EN to allow holb drop */
+		tx_cfg.pa_mask_en = 0;
+		ipahal_write_reg_fields(IPA_TX_CFG, &tx_cfg);
+	}
+
+	ipa3_cfg_qsb();
+
+	if (ipa3_ctx->ipa_hw_type < IPA_HW_v4_5) {
+		/* set aggr granularity for 0.5 msec*/
+		cnt_cfg.aggr_granularity = GRAN_VALUE_500_USEC;
+		ipahal_write_reg_fields(IPA_COUNTER_CFG, &cnt_cfg);
+	} else {
+		ipa_cfg_qtime();
+	}
+
+	ipa_comp_cfg();
+
+	/*
+	 * In IPA 4.2 filter and routing hashing not supported
+	 * disabling hash enable register.
+	 */
+	if (ipa3_ctx->ipa_fltrt_not_hashable)
+		ipa_disable_hashing_rt_flt_v4_2();
+
+	return 0;
+}
+
+/**
+ * ipa3_get_ep_mapping() - provide endpoint mapping
+ * @client: client type
+ *
+ * Return value: endpoint mapping
+ */
+int ipa3_get_ep_mapping(enum ipa_client_type client)
+{
+	int ipa_ep_idx;
+	u8 hw_idx = ipa3_get_hw_type_index();
+
+	if (client >= IPA_CLIENT_MAX || client < 0) {
+		IPAERR_RL("Bad client number! client =%d\n", client);
+		return IPA_EP_NOT_ALLOCATED;
+	}
+
+	if (!ipa3_ep_mapping[hw_idx][client].valid)
+		return IPA_EP_NOT_ALLOCATED;
+
+	ipa_ep_idx =
+		ipa3_ep_mapping[hw_idx][client].ipa_gsi_ep_info.ipa_ep_num;
+	if (ipa_ep_idx < 0 || (ipa_ep_idx >= IPA3_MAX_NUM_PIPES
+		&& client != IPA_CLIENT_DUMMY_CONS))
+		return IPA_EP_NOT_ALLOCATED;
+
+	return ipa_ep_idx;
+}
+
+/**
+ * ipa3_get_gsi_ep_info() - provide gsi ep information
+ * @client: IPA client value
+ *
+ * Return value: pointer to ipa_gsi_ep_info
+ */
+const struct ipa_gsi_ep_config *ipa3_get_gsi_ep_info
+	(enum ipa_client_type client)
+{
+	int ep_idx;
+
+	ep_idx = ipa3_get_ep_mapping(client);
+	if (ep_idx == IPA_EP_NOT_ALLOCATED)
+		return NULL;
+
+	if (!ipa3_ep_mapping[ipa3_get_hw_type_index()][client].valid)
+		return NULL;
+
+	return &(ipa3_ep_mapping[ipa3_get_hw_type_index()]
+		[client].ipa_gsi_ep_info);
+}
+
+/**
+ * ipa_get_ep_group() - provide endpoint group by client
+ * @client: client type
+ *
+ * Return value: endpoint group
+ */
+int ipa_get_ep_group(enum ipa_client_type client)
+{
+	if (client >= IPA_CLIENT_MAX || client < 0) {
+		IPAERR("Bad client number! client =%d\n", client);
+		return -EINVAL;
+	}
+
+	if (!ipa3_ep_mapping[ipa3_get_hw_type_index()][client].valid)
+		return -EINVAL;
+
+	return ipa3_ep_mapping[ipa3_get_hw_type_index()][client].group_num;
+}
+
+/**
+ * ipa3_get_qmb_master_sel() - provide QMB master selection for the client
+ * @client: client type
+ *
+ * Return value: QMB master index
+ */
+u8 ipa3_get_qmb_master_sel(enum ipa_client_type client)
+{
+	if (client >= IPA_CLIENT_MAX || client < 0) {
+		IPAERR("Bad client number! client =%d\n", client);
+		return -EINVAL;
+	}
+
+	if (!ipa3_ep_mapping[ipa3_get_hw_type_index()][client].valid)
+		return -EINVAL;
+
+	return ipa3_ep_mapping[ipa3_get_hw_type_index()]
+		[client].qmb_master_sel;
+}
+
+/**
+ * ipa3_set_client() - provide client mapping
+ * @client: client type
+ *
+ * Return value: none
+ */
+
+void ipa3_set_client(int index, enum ipacm_client_enum client, bool uplink)
+{
+	if (client > IPACM_CLIENT_MAX || client < IPACM_CLIENT_USB) {
+		IPAERR("Bad client number! client =%d\n", client);
+	} else if (index >= IPA3_MAX_NUM_PIPES || index < 0) {
+		IPAERR("Bad pipe index! index =%d\n", index);
+	} else {
+		ipa3_ctx->ipacm_client[index].client_enum = client;
+		ipa3_ctx->ipacm_client[index].uplink = uplink;
+	}
+}
+/**
+ * ipa3_get_wlan_stats() - get ipa wifi stats
+ *
+ * Return value: success or failure
+ */
+int ipa3_get_wlan_stats(struct ipa_get_wdi_sap_stats *wdi_sap_stats)
+{
+	if (ipa3_ctx->uc_wdi_ctx.stats_notify) {
+		ipa3_ctx->uc_wdi_ctx.stats_notify(IPA_GET_WDI_SAP_STATS,
+			wdi_sap_stats);
+	} else {
+		IPAERR_RL("uc_wdi_ctx.stats_notify NULL\n");
+		return -EFAULT;
+	}
+	return 0;
+}
+
+/**
+ * ipa3_set_wlan_quota() - set ipa wifi quota
+ * @wdi_quota: quota requirement
+ *
+ * Return value: success or failure
+ */
+int ipa3_set_wlan_quota(struct ipa_set_wifi_quota *wdi_quota)
+{
+	if (ipa3_ctx->uc_wdi_ctx.stats_notify) {
+		ipa3_ctx->uc_wdi_ctx.stats_notify(IPA_SET_WIFI_QUOTA,
+			wdi_quota);
+	} else {
+		IPAERR("uc_wdi_ctx.stats_notify NULL\n");
+		return -EFAULT;
+	}
+	return 0;
+}
+
+/**
+ * ipa3_inform_wlan_bw() - inform wlan bw-index
+ *
+ * Return value: success or failure
+ */
+int ipa3_inform_wlan_bw(struct ipa_inform_wlan_bw *wdi_bw)
+{
+	if (ipa3_ctx->uc_wdi_ctx.stats_notify) {
+		ipa3_ctx->uc_wdi_ctx.stats_notify(IPA_INFORM_WLAN_BW,
+			wdi_bw);
+	} else {
+		IPAERR("uc_wdi_ctx.stats_notify NULL\n");
+		return -EFAULT;
+	}
+	return 0;
+}
+
+/**
+ * ipa3_get_client() - provide client mapping
+ * @client: client type
+ *
+ * Return value: client mapping enum
+ */
+enum ipacm_client_enum ipa3_get_client(int pipe_idx)
+{
+	if (pipe_idx >= IPA3_MAX_NUM_PIPES || pipe_idx < 0) {
+		IPAERR("Bad pipe index! pipe_idx =%d\n", pipe_idx);
+		return IPACM_CLIENT_MAX;
+	} else {
+		return ipa3_ctx->ipacm_client[pipe_idx].client_enum;
+	}
+}
+
+/**
+ * ipa2_get_client_uplink() - provide client mapping
+ * @client: client type
+ *
+ * Return value: none
+ */
+bool ipa3_get_client_uplink(int pipe_idx)
+{
+	if (pipe_idx < 0 || pipe_idx >= IPA3_MAX_NUM_PIPES) {
+		IPAERR("invalid pipe idx %d\n", pipe_idx);
+		return false;
+	}
+
+	return ipa3_ctx->ipacm_client[pipe_idx].uplink;
+}
+
+
+/**
+ * ipa3_get_client_mapping() - provide client mapping
+ * @pipe_idx: IPA end-point number
+ *
+ * Return value: client mapping
+ */
+enum ipa_client_type ipa3_get_client_mapping(int pipe_idx)
+{
+	if (pipe_idx >= ipa3_ctx->ipa_num_pipes || pipe_idx < 0) {
+		IPAERR("Bad pipe index!\n");
+		WARN_ON(1);
+		return -EINVAL;
+	}
+
+	return ipa3_ctx->ep[pipe_idx].client;
+}
+
+/**
+ * ipa3_get_client_by_pipe() - return client type relative to pipe
+ * index
+ * @pipe_idx: IPA end-point number
+ *
+ * Return value: client type
+ */
+enum ipa_client_type ipa3_get_client_by_pipe(int pipe_idx)
+{
+	int j = 0;
+
+	for (j = 0; j < IPA_CLIENT_MAX; j++) {
+		const struct ipa_ep_configuration *iec_ptr =
+			&(ipa3_ep_mapping[ipa3_get_hw_type_index()][j]);
+		if (iec_ptr->valid &&
+		    iec_ptr->ipa_gsi_ep_info.ipa_ep_num == pipe_idx)
+			break;
+	}
+
+	if (j == IPA_CLIENT_MAX)
+		IPADBG("Got to IPA_CLIENT_MAX (%d) while searching for (%d)\n",
+		       j, pipe_idx);
+
+	return j;
+}
+
+/**
+ * ipa_init_ep_flt_bitmap() - Initialize the bitmap
+ * that represents the End-points that supports filtering
+ */
+void ipa_init_ep_flt_bitmap(void)
+{
+	enum ipa_client_type cl;
+	u8 hw_idx = ipa3_get_hw_type_index();
+	u32 bitmap;
+	u32 pipe_num;
+	const struct ipa_gsi_ep_config *gsi_ep_ptr;
+
+	bitmap = 0;
+	if (ipa3_ctx->ep_flt_bitmap) {
+		WARN_ON(1);
+		return;
+	}
+
+	for (cl = 0; cl < IPA_CLIENT_MAX ; cl++) {
+		if (ipa3_ep_mapping[hw_idx][cl].support_flt) {
+			gsi_ep_ptr =
+				&ipa3_ep_mapping[hw_idx][cl].ipa_gsi_ep_info;
+			pipe_num =
+				gsi_ep_ptr->ipa_ep_num;
+			bitmap |= (1U << pipe_num);
+			if (bitmap != ipa3_ctx->ep_flt_bitmap) {
+				ipa3_ctx->ep_flt_bitmap = bitmap;
+				ipa3_ctx->ep_flt_num++;
+			}
+		}
+	}
+}
+
+/**
+ * ipa_is_ep_support_flt() - Given an End-point check
+ * whether it supports filtering or not.
+ *
+ * @pipe_idx:
+ *
+ * Return values:
+ * true if supports and false if not
+ */
+bool ipa_is_ep_support_flt(int pipe_idx)
+{
+	if (pipe_idx >= ipa3_ctx->ipa_num_pipes || pipe_idx < 0) {
+		IPAERR("Bad pipe index!\n");
+		return false;
+	}
+
+	return ipa3_ctx->ep_flt_bitmap & (1U<<pipe_idx);
+}
+
+/**
+ * ipa3_cfg_ep_seq() - IPA end-point HPS/DPS sequencer type configuration
+ * @clnt_hdl:	[in] opaque client handle assigned by IPA to client
+ *
+ * Returns:	0 on success, negative on failure
+ *
+ * Note:	Should not be called from atomic context
+ */
+int ipa3_cfg_ep_seq(u32 clnt_hdl, const struct ipa_ep_cfg_seq *seq_cfg)
+{
+	int type;
+
+	if (clnt_hdl >= ipa3_ctx->ipa_num_pipes ||
+	    ipa3_ctx->ep[clnt_hdl].valid == 0) {
+		IPAERR("bad param, clnt_hdl = %d", clnt_hdl);
+		return -EINVAL;
+	}
+
+	if (IPA_CLIENT_IS_CONS(ipa3_ctx->ep[clnt_hdl].client)) {
+		IPAERR("SEQ does not apply to IPA consumer EP %d\n", clnt_hdl);
+		return -EINVAL;
+	}
+
+	/*
+	 * Skip Configure sequencers type for test clients.
+	 * These are configured dynamically in ipa3_cfg_ep_mode
+	 */
+	if (IPA_CLIENT_IS_TEST(ipa3_ctx->ep[clnt_hdl].client)) {
+		IPADBG("Skip sequencers configuration for test clients\n");
+		return 0;
+	}
+
+	if (seq_cfg->set_dynamic)
+		type = seq_cfg->seq_type;
+	else
+		type = ipa3_ep_mapping[ipa3_get_hw_type_index()]
+			[ipa3_ctx->ep[clnt_hdl].client].sequencer_type;
+
+	if (type != IPA_DPS_HPS_SEQ_TYPE_INVALID) {
+		if (ipa3_ctx->ep[clnt_hdl].cfg.mode.mode == IPA_DMA &&
+			!IPA_DPS_HPS_SEQ_TYPE_IS_DMA(type)) {
+			IPAERR("Configuring non-DMA SEQ type to DMA pipe\n");
+			WARN_ON(1);
+			return -EINVAL;
+		}
+		IPA_ACTIVE_CLIENTS_INC_EP(ipa3_get_client_mapping(clnt_hdl));
+		/* Configure sequencers type*/
+
+		IPADBG("set sequencers to sequence 0x%x, ep = %d\n", type,
+				clnt_hdl);
+		ipahal_write_reg_n(IPA_ENDP_INIT_SEQ_n, clnt_hdl, type);
+
+		IPA_ACTIVE_CLIENTS_DEC_EP(ipa3_get_client_mapping(clnt_hdl));
+	} else {
+		IPADBG("should not set sequencer type of ep = %d\n", clnt_hdl);
+	}
+
+	return 0;
+}
+
+/**
+ * ipa3_cfg_ep - IPA end-point configuration
+ * @clnt_hdl:	[in] opaque client handle assigned by IPA to client
+ * @ipa_ep_cfg:	[in] IPA end-point configuration params
+ *
+ * This includes nat, IPv6CT, header, mode, aggregation and route settings and
+ * is a one shot API to configure the IPA end-point fully
+ *
+ * Returns:	0 on success, negative on failure
+ *
+ * Note:	Should not be called from atomic context
+ */
+int ipa3_cfg_ep(u32 clnt_hdl, const struct ipa_ep_cfg *ipa_ep_cfg)
+{
+	int result = -EINVAL;
+
+	if (clnt_hdl >= ipa3_ctx->ipa_num_pipes ||
+	    ipa3_ctx->ep[clnt_hdl].valid == 0 || ipa_ep_cfg == NULL) {
+		IPAERR("bad parm.\n");
+		return -EINVAL;
+	}
+
+	result = ipa3_cfg_ep_hdr(clnt_hdl, &ipa_ep_cfg->hdr);
+	if (result)
+		return result;
+
+	result = ipa3_cfg_ep_hdr_ext(clnt_hdl, &ipa_ep_cfg->hdr_ext);
+	if (result)
+		return result;
+
+	result = ipa3_cfg_ep_aggr(clnt_hdl, &ipa_ep_cfg->aggr);
+	if (result)
+		return result;
+
+	result = ipa3_cfg_ep_cfg(clnt_hdl, &ipa_ep_cfg->cfg);
+	if (result)
+		return result;
+
+	if (IPA_CLIENT_IS_PROD(ipa3_ctx->ep[clnt_hdl].client)) {
+		result = ipa3_cfg_ep_nat(clnt_hdl, &ipa_ep_cfg->nat);
+		if (result)
+			return result;
+
+		if (ipa3_ctx->ipa_hw_type >= IPA_HW_v4_0) {
+			result = ipa3_cfg_ep_conn_track(clnt_hdl,
+				&ipa_ep_cfg->conn_track);
+			if (result)
+				return result;
+		}
+
+		result = ipa3_cfg_ep_mode(clnt_hdl, &ipa_ep_cfg->mode);
+		if (result)
+			return result;
+
+		result = ipa3_cfg_ep_seq(clnt_hdl, &ipa_ep_cfg->seq);
+		if (result)
+			return result;
+
+		result = ipa3_cfg_ep_route(clnt_hdl, &ipa_ep_cfg->route);
+		if (result)
+			return result;
+
+		result = ipa3_cfg_ep_deaggr(clnt_hdl, &ipa_ep_cfg->deaggr);
+		if (result)
+			return result;
+	} else {
+		result = ipa3_cfg_ep_metadata_mask(clnt_hdl,
+				&ipa_ep_cfg->metadata_mask);
+		if (result)
+			return result;
+	}
+
+	return 0;
+}
+
+static const char *ipa3_get_nat_en_str(enum ipa_nat_en_type nat_en)
+{
+	switch (nat_en) {
+	case (IPA_BYPASS_NAT):
+		return "NAT disabled";
+	case (IPA_SRC_NAT):
+		return "Source NAT";
+	case (IPA_DST_NAT):
+		return "Dst NAT";
+	}
+
+	return "undefined";
+}
+
+static const char *ipa3_get_ipv6ct_en_str(enum ipa_ipv6ct_en_type ipv6ct_en)
+{
+	switch (ipv6ct_en) {
+	case (IPA_BYPASS_IPV6CT):
+		return "ipv6ct disabled";
+	case (IPA_ENABLE_IPV6CT):
+		return "ipv6ct enabled";
+	}
+
+	return "undefined";
+}
+
+/**
+ * ipa3_cfg_ep_nat() - IPA end-point NAT configuration
+ * @clnt_hdl:	[in] opaque client handle assigned by IPA to client
+ * @ep_nat:	[in] IPA NAT end-point configuration params
+ *
+ * Returns:	0 on success, negative on failure
+ *
+ * Note:	Should not be called from atomic context
+ */
+int ipa3_cfg_ep_nat(u32 clnt_hdl, const struct ipa_ep_cfg_nat *ep_nat)
+{
+	if (clnt_hdl >= ipa3_ctx->ipa_num_pipes ||
+	    ipa3_ctx->ep[clnt_hdl].valid == 0 || ep_nat == NULL) {
+		IPAERR("bad parm, clnt_hdl = %d , ep_valid = %d\n",
+					clnt_hdl,
+					ipa3_ctx->ep[clnt_hdl].valid);
+		return -EINVAL;
+	}
+
+	if (IPA_CLIENT_IS_CONS(ipa3_ctx->ep[clnt_hdl].client)) {
+		IPAERR("NAT does not apply to IPA out EP %d\n", clnt_hdl);
+		return -EINVAL;
+	}
+
+	IPADBG("pipe=%d, nat_en=%d(%s)\n",
+			clnt_hdl,
+			ep_nat->nat_en,
+			ipa3_get_nat_en_str(ep_nat->nat_en));
+
+	/* copy over EP cfg */
+	ipa3_ctx->ep[clnt_hdl].cfg.nat = *ep_nat;
+
+	IPA_ACTIVE_CLIENTS_INC_EP(ipa3_get_client_mapping(clnt_hdl));
+
+	ipahal_write_reg_n_fields(IPA_ENDP_INIT_NAT_n, clnt_hdl, ep_nat);
+
+	IPA_ACTIVE_CLIENTS_DEC_EP(ipa3_get_client_mapping(clnt_hdl));
+
+	return 0;
+}
+
+/**
+ * ipa3_cfg_ep_conn_track() - IPA end-point IPv6CT configuration
+ * @clnt_hdl:		[in] opaque client handle assigned by IPA to client
+ * @ep_conn_track:	[in] IPA IPv6CT end-point configuration params
+ *
+ * Returns:	0 on success, negative on failure
+ *
+ * Note:	Should not be called from atomic context
+ */
+int ipa3_cfg_ep_conn_track(u32 clnt_hdl,
+	const struct ipa_ep_cfg_conn_track *ep_conn_track)
+{
+	if (clnt_hdl >= ipa3_ctx->ipa_num_pipes ||
+		ipa3_ctx->ep[clnt_hdl].valid == 0 || ep_conn_track == NULL) {
+		IPAERR("bad parm, clnt_hdl = %d , ep_valid = %d\n",
+			clnt_hdl,
+			ipa3_ctx->ep[clnt_hdl].valid);
+		return -EINVAL;
+	}
+
+	if (IPA_CLIENT_IS_CONS(ipa3_ctx->ep[clnt_hdl].client)) {
+		IPAERR("IPv6CT does not apply to IPA out EP %d\n", clnt_hdl);
+		return -EINVAL;
+	}
+
+	IPADBG("pipe=%d, conn_track_en=%d(%s)\n",
+		clnt_hdl,
+		ep_conn_track->conn_track_en,
+		ipa3_get_ipv6ct_en_str(ep_conn_track->conn_track_en));
+
+	/* copy over EP cfg */
+	ipa3_ctx->ep[clnt_hdl].cfg.conn_track = *ep_conn_track;
+
+	IPA_ACTIVE_CLIENTS_INC_EP(ipa3_get_client_mapping(clnt_hdl));
+
+	ipahal_write_reg_n_fields(IPA_ENDP_INIT_CONN_TRACK_n, clnt_hdl,
+		ep_conn_track);
+
+	IPA_ACTIVE_CLIENTS_DEC_EP(ipa3_get_client_mapping(clnt_hdl));
+
+	return 0;
+}
+
+
+/**
+ * ipa3_cfg_ep_status() - IPA end-point status configuration
+ * @clnt_hdl:	[in] opaque client handle assigned by IPA to client
+ * @ipa_ep_cfg:	[in] IPA end-point configuration params
+ *
+ * Returns:	0 on success, negative on failure
+ *
+ * Note:	Should not be called from atomic context
+ */
+int ipa3_cfg_ep_status(u32 clnt_hdl,
+	const struct ipahal_reg_ep_cfg_status *ep_status)
+{
+	if (clnt_hdl >= ipa3_ctx->ipa_num_pipes ||
+	    ipa3_ctx->ep[clnt_hdl].valid == 0 || ep_status == NULL) {
+		IPAERR("bad parm, clnt_hdl = %d , ep_valid = %d\n",
+					clnt_hdl,
+					ipa3_ctx->ep[clnt_hdl].valid);
+		return -EINVAL;
+	}
+
+	IPADBG("pipe=%d, status_en=%d status_ep=%d status_location=%d\n",
+			clnt_hdl,
+			ep_status->status_en,
+			ep_status->status_ep,
+			ep_status->status_location);
+
+	/* copy over EP cfg */
+	ipa3_ctx->ep[clnt_hdl].status = *ep_status;
+
+	IPA_ACTIVE_CLIENTS_INC_EP(ipa3_get_client_mapping(clnt_hdl));
+
+	ipahal_write_reg_n_fields(IPA_ENDP_STATUS_n, clnt_hdl, ep_status);
+
+	IPA_ACTIVE_CLIENTS_DEC_EP(ipa3_get_client_mapping(clnt_hdl));
+
+	return 0;
+}
+
+/**
+ * ipa3_cfg_ep_cfg() - IPA end-point cfg configuration
+ * @clnt_hdl:	[in] opaque client handle assigned by IPA to client
+ * @ipa_ep_cfg:	[in] IPA end-point configuration params
+ *
+ * Returns:	0 on success, negative on failure
+ *
+ * Note:	Should not be called from atomic context
+ */
+int ipa3_cfg_ep_cfg(u32 clnt_hdl, const struct ipa_ep_cfg_cfg *cfg)
+{
+	u8 qmb_master_sel;
+
+	if (clnt_hdl >= ipa3_ctx->ipa_num_pipes ||
+	    ipa3_ctx->ep[clnt_hdl].valid == 0 || cfg == NULL) {
+		IPAERR("bad parm, clnt_hdl = %d , ep_valid = %d\n",
+					clnt_hdl,
+					ipa3_ctx->ep[clnt_hdl].valid);
+		return -EINVAL;
+	}
+
+	/* copy over EP cfg */
+	ipa3_ctx->ep[clnt_hdl].cfg.cfg = *cfg;
+
+	/* Override QMB master selection */
+	qmb_master_sel = ipa3_get_qmb_master_sel(ipa3_ctx->ep[clnt_hdl].client);
+	ipa3_ctx->ep[clnt_hdl].cfg.cfg.gen_qmb_master_sel = qmb_master_sel;
+	IPADBG(
+	       "pipe=%d, frag_ofld_en=%d cs_ofld_en=%d mdata_hdr_ofst=%d gen_qmb_master_sel=%d\n",
+			clnt_hdl,
+			ipa3_ctx->ep[clnt_hdl].cfg.cfg.frag_offload_en,
+			ipa3_ctx->ep[clnt_hdl].cfg.cfg.cs_offload_en,
+			ipa3_ctx->ep[clnt_hdl].cfg.cfg.cs_metadata_hdr_offset,
+			ipa3_ctx->ep[clnt_hdl].cfg.cfg.gen_qmb_master_sel);
+
+	IPA_ACTIVE_CLIENTS_INC_EP(ipa3_get_client_mapping(clnt_hdl));
+
+	ipahal_write_reg_n_fields(IPA_ENDP_INIT_CFG_n, clnt_hdl,
+				  &ipa3_ctx->ep[clnt_hdl].cfg.cfg);
+
+	IPA_ACTIVE_CLIENTS_DEC_EP(ipa3_get_client_mapping(clnt_hdl));
+
+	return 0;
+}
+
+/**
+ * ipa3_cfg_ep_metadata_mask() - IPA end-point meta-data mask configuration
+ * @clnt_hdl:	[in] opaque client handle assigned by IPA to client
+ * @ipa_ep_cfg:	[in] IPA end-point configuration params
+ *
+ * Returns:	0 on success, negative on failure
+ *
+ * Note:	Should not be called from atomic context
+ */
+int ipa3_cfg_ep_metadata_mask(u32 clnt_hdl,
+		const struct ipa_ep_cfg_metadata_mask
+		*metadata_mask)
+{
+	if (clnt_hdl >= ipa3_ctx->ipa_num_pipes ||
+	    ipa3_ctx->ep[clnt_hdl].valid == 0 || metadata_mask == NULL) {
+		IPAERR("bad parm, clnt_hdl = %d , ep_valid = %d\n",
+					clnt_hdl,
+					ipa3_ctx->ep[clnt_hdl].valid);
+		return -EINVAL;
+	}
+
+	IPADBG("pipe=%d, metadata_mask=0x%x\n",
+			clnt_hdl,
+			metadata_mask->metadata_mask);
+
+	/* copy over EP cfg */
+	ipa3_ctx->ep[clnt_hdl].cfg.metadata_mask = *metadata_mask;
+
+	IPA_ACTIVE_CLIENTS_INC_EP(ipa3_get_client_mapping(clnt_hdl));
+
+	ipahal_write_reg_n_fields(IPA_ENDP_INIT_HDR_METADATA_MASK_n,
+		clnt_hdl, metadata_mask);
+
+	IPA_ACTIVE_CLIENTS_DEC_EP(ipa3_get_client_mapping(clnt_hdl));
+
+	return 0;
+}
+
+/**
+ * ipa3_cfg_ep_hdr() -  IPA end-point header configuration
+ * @clnt_hdl:	[in] opaque client handle assigned by IPA to client
+ * @ipa_ep_cfg:	[in] IPA end-point configuration params
+ *
+ * Returns:	0 on success, negative on failure
+ *
+ * Note:	Should not be called from atomic context
+ */
+int ipa3_cfg_ep_hdr(u32 clnt_hdl, const struct ipa_ep_cfg_hdr *ep_hdr)
+{
+	struct ipa3_ep_context *ep;
+
+	if (clnt_hdl >= ipa3_ctx->ipa_num_pipes ||
+	    ipa3_ctx->ep[clnt_hdl].valid == 0 || ep_hdr == NULL) {
+		IPAERR("bad parm, clnt_hdl = %d , ep_valid = %d\n",
+				clnt_hdl, ipa3_ctx->ep[clnt_hdl].valid);
+		return -EINVAL;
+	}
+	IPADBG("pipe=%d metadata_reg_valid=%d\n",
+		clnt_hdl,
+		ep_hdr->hdr_metadata_reg_valid);
+
+	IPADBG("remove_additional=%d, a5_mux=%d, ofst_pkt_size=0x%x\n",
+		ep_hdr->hdr_remove_additional,
+		ep_hdr->hdr_a5_mux,
+		ep_hdr->hdr_ofst_pkt_size);
+
+	IPADBG("ofst_pkt_size_valid=%d, additional_const_len=0x%x\n",
+		ep_hdr->hdr_ofst_pkt_size_valid,
+		ep_hdr->hdr_additional_const_len);
+
+	IPADBG("ofst_metadata=0x%x, ofst_metadata_valid=%d, len=0x%x\n",
+		ep_hdr->hdr_ofst_metadata,
+		ep_hdr->hdr_ofst_metadata_valid,
+		ep_hdr->hdr_len);
+
+	ep = &ipa3_ctx->ep[clnt_hdl];
+
+	/* copy over EP cfg */
+	ep->cfg.hdr = *ep_hdr;
+
+	IPA_ACTIVE_CLIENTS_INC_EP(ipa3_get_client_mapping(clnt_hdl));
+
+	ipahal_write_reg_n_fields(IPA_ENDP_INIT_HDR_n, clnt_hdl, &ep->cfg.hdr);
+
+	IPA_ACTIVE_CLIENTS_DEC_EP(ipa3_get_client_mapping(clnt_hdl));
+
+	return 0;
+}
+
+/**
+ * ipa3_cfg_ep_hdr_ext() -  IPA end-point extended header configuration
+ * @clnt_hdl:	[in] opaque client handle assigned by IPA to client
+ * @ep_hdr_ext:	[in] IPA end-point configuration params
+ *
+ * Returns:	0 on success, negative on failure
+ *
+ * Note:	Should not be called from atomic context
+ */
+int ipa3_cfg_ep_hdr_ext(u32 clnt_hdl,
+		       const struct ipa_ep_cfg_hdr_ext *ep_hdr_ext)
+{
+	struct ipa3_ep_context *ep;
+
+	if (clnt_hdl >= ipa3_ctx->ipa_num_pipes ||
+	    ipa3_ctx->ep[clnt_hdl].valid == 0 || ep_hdr_ext == NULL) {
+		IPAERR("bad parm, clnt_hdl = %d , ep_valid = %d\n",
+				clnt_hdl, ipa3_ctx->ep[clnt_hdl].valid);
+		return -EINVAL;
+	}
+
+	IPADBG("pipe=%d hdr_pad_to_alignment=%d\n",
+		clnt_hdl,
+		ep_hdr_ext->hdr_pad_to_alignment);
+
+	IPADBG("hdr_total_len_or_pad_offset=%d\n",
+		ep_hdr_ext->hdr_total_len_or_pad_offset);
+
+	IPADBG("hdr_payload_len_inc_padding=%d hdr_total_len_or_pad=%d\n",
+		ep_hdr_ext->hdr_payload_len_inc_padding,
+		ep_hdr_ext->hdr_total_len_or_pad);
+
+	IPADBG("hdr_total_len_or_pad_valid=%d hdr_little_endian=%d\n",
+		ep_hdr_ext->hdr_total_len_or_pad_valid,
+		ep_hdr_ext->hdr_little_endian);
+
+	ep = &ipa3_ctx->ep[clnt_hdl];
+
+	/* copy over EP cfg */
+	ep->cfg.hdr_ext = *ep_hdr_ext;
+	ep->cfg.hdr_ext.hdr = &ep->cfg.hdr;
+
+	IPA_ACTIVE_CLIENTS_INC_EP(ipa3_get_client_mapping(clnt_hdl));
+
+	ipahal_write_reg_n_fields(IPA_ENDP_INIT_HDR_EXT_n, clnt_hdl,
+		&ep->cfg.hdr_ext);
+
+	IPA_ACTIVE_CLIENTS_DEC_EP(ipa3_get_client_mapping(clnt_hdl));
+
+	return 0;
+}
+
+/**
+ * ipa3_cfg_ep_ctrl() -  IPA end-point Control configuration
+ * @clnt_hdl:	[in] opaque client handle assigned by IPA to client
+ * @ipa_ep_cfg_ctrl:	[in] IPA end-point configuration params
+ *
+ * Returns:	0 on success, negative on failure
+ */
+int ipa3_cfg_ep_ctrl(u32 clnt_hdl, const struct ipa_ep_cfg_ctrl *ep_ctrl)
+{
+	if (clnt_hdl >= ipa3_ctx->ipa_num_pipes || ep_ctrl == NULL) {
+		IPAERR("bad parm, clnt_hdl = %d\n", clnt_hdl);
+		return -EINVAL;
+	}
+
+	if (ipa3_ctx->ipa_hw_type >= IPA_HW_v4_0 && ep_ctrl->ipa_ep_suspend) {
+		IPAERR("pipe suspend is not supported\n");
+		WARN_ON(1);
+		return -EPERM;
+	}
+
+	if (ipa3_ctx->ipa_endp_delay_wa) {
+		IPAERR("pipe setting delay is not supported\n");
+		return 0;
+	}
+
+	IPADBG("pipe=%d ep_suspend=%d, ep_delay=%d\n",
+		clnt_hdl,
+		ep_ctrl->ipa_ep_suspend,
+		ep_ctrl->ipa_ep_delay);
+
+	ipahal_write_reg_n_fields(IPA_ENDP_INIT_CTRL_n, clnt_hdl, ep_ctrl);
+
+	if (ep_ctrl->ipa_ep_suspend == true &&
+			IPA_CLIENT_IS_CONS(ipa3_ctx->ep[clnt_hdl].client))
+		ipa3_suspend_active_aggr_wa(clnt_hdl);
+
+	return 0;
+}
+
+const char *ipa3_get_mode_type_str(enum ipa_mode_type mode)
+{
+	switch (mode) {
+	case (IPA_BASIC):
+		return "Basic";
+	case (IPA_ENABLE_FRAMING_HDLC):
+		return "HDLC framing";
+	case (IPA_ENABLE_DEFRAMING_HDLC):
+		return "HDLC de-framing";
+	case (IPA_DMA):
+		return "DMA";
+	}
+
+	return "undefined";
+}
+
+/**
+ * ipa3_cfg_ep_mode() - IPA end-point mode configuration
+ * @clnt_hdl:	[in] opaque client handle assigned by IPA to client
+ * @ipa_ep_cfg:	[in] IPA end-point configuration params
+ *
+ * Returns:	0 on success, negative on failure
+ *
+ * Note:	Should not be called from atomic context
+ */
+int ipa3_cfg_ep_mode(u32 clnt_hdl, const struct ipa_ep_cfg_mode *ep_mode)
+{
+	int ep;
+	int type;
+	struct ipahal_reg_endp_init_mode init_mode;
+
+	if (clnt_hdl >= ipa3_ctx->ipa_num_pipes ||
+	    ipa3_ctx->ep[clnt_hdl].valid == 0 || ep_mode == NULL) {
+		IPAERR("bad params clnt_hdl=%d , ep_valid=%d ep_mode=%pK\n",
+				clnt_hdl, ipa3_ctx->ep[clnt_hdl].valid,
+				ep_mode);
+		return -EINVAL;
+	}
+
+	if (IPA_CLIENT_IS_CONS(ipa3_ctx->ep[clnt_hdl].client)) {
+		IPAERR("MODE does not apply to IPA out EP %d\n", clnt_hdl);
+		return -EINVAL;
+	}
+
+	ep = ipa3_get_ep_mapping(ep_mode->dst);
+	if (ep == -1 && ep_mode->mode == IPA_DMA) {
+		IPAERR("dst %d does not exist in DMA mode\n", ep_mode->dst);
+		return -EINVAL;
+	}
+
+	WARN_ON(ep_mode->mode == IPA_DMA && IPA_CLIENT_IS_PROD(ep_mode->dst));
+
+	if (!IPA_CLIENT_IS_CONS(ep_mode->dst))
+		ep = ipa3_get_ep_mapping(IPA_CLIENT_APPS_LAN_CONS);
+
+	IPADBG("pipe=%d mode=%d(%s), dst_client_number=%d\n",
+			clnt_hdl,
+			ep_mode->mode,
+			ipa3_get_mode_type_str(ep_mode->mode),
+			ep_mode->dst);
+
+	/* copy over EP cfg */
+	ipa3_ctx->ep[clnt_hdl].cfg.mode = *ep_mode;
+	ipa3_ctx->ep[clnt_hdl].dst_pipe_index = ep;
+
+	IPA_ACTIVE_CLIENTS_INC_EP(ipa3_get_client_mapping(clnt_hdl));
+
+	init_mode.dst_pipe_number = ipa3_ctx->ep[clnt_hdl].dst_pipe_index;
+	init_mode.ep_mode = *ep_mode;
+	ipahal_write_reg_n_fields(IPA_ENDP_INIT_MODE_n, clnt_hdl, &init_mode);
+
+	 /* Configure sequencers type for test clients*/
+	if (IPA_CLIENT_IS_TEST(ipa3_ctx->ep[clnt_hdl].client)) {
+		if (ep_mode->mode == IPA_DMA)
+			type = IPA_DPS_HPS_SEQ_TYPE_DMA_ONLY;
+		else
+			/* In IPA4.2 only single pass only supported*/
+			if (ipa3_ctx->ipa_hw_type == IPA_HW_v4_2)
+				type =
+				IPA_DPS_HPS_SEQ_TYPE_PKT_PROCESS_NO_DEC_NO_UCP;
+			else
+				type =
+			IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP;
+
+		IPADBG(" set sequencers to sequance 0x%x, ep = %d\n", type,
+				clnt_hdl);
+		ipahal_write_reg_n(IPA_ENDP_INIT_SEQ_n, clnt_hdl, type);
+	}
+	IPA_ACTIVE_CLIENTS_DEC_EP(ipa3_get_client_mapping(clnt_hdl));
+
+	return 0;
+}
+
+const char *ipa3_get_aggr_enable_str(enum ipa_aggr_en_type aggr_en)
+{
+	switch (aggr_en) {
+	case (IPA_BYPASS_AGGR):
+			return "no aggregation";
+	case (IPA_ENABLE_AGGR):
+			return "aggregation enabled";
+	case (IPA_ENABLE_DEAGGR):
+		return "de-aggregation enabled";
+	}
+
+	return "undefined";
+}
+
+const char *ipa3_get_aggr_type_str(enum ipa_aggr_type aggr_type)
+{
+	switch (aggr_type) {
+	case (IPA_MBIM_16):
+			return "MBIM_16";
+	case (IPA_HDLC):
+		return "HDLC";
+	case (IPA_TLP):
+			return "TLP";
+	case (IPA_RNDIS):
+			return "RNDIS";
+	case (IPA_GENERIC):
+			return "GENERIC";
+	case (IPA_QCMAP):
+			return "QCMAP";
+	case (IPA_COALESCE):
+			return "COALESCE";
+	}
+	return "undefined";
+}
+
+static u32 ipa3_time_gran_usec_step(enum ipa_timers_time_gran_type gran)
+{
+	switch (gran) {
+	case IPA_TIMERS_TIME_GRAN_10_USEC:		return 10;
+	case IPA_TIMERS_TIME_GRAN_20_USEC:		return 20;
+	case IPA_TIMERS_TIME_GRAN_50_USEC:		return 50;
+	case IPA_TIMERS_TIME_GRAN_100_USEC:		return 100;
+	case IPA_TIMERS_TIME_GRAN_1_MSEC:		return 1000;
+	case IPA_TIMERS_TIME_GRAN_10_MSEC:		return 10000;
+	case IPA_TIMERS_TIME_GRAN_100_MSEC:		return 100000;
+	case IPA_TIMERS_TIME_GRAN_NEAR_HALF_SEC:	return 655350;
+	default:
+		IPAERR("Invalid granularity time unit %d\n", gran);
+		ipa_assert();
+		break;
+	}
+
+	return 100;
+}
+
+/*
+ * ipa3_process_timer_cfg() - Check and produce timer config
+ *
+ * Relevant for IPA 4.5 and above
+ *
+ * Assumes clocks are voted
+ */
+static int ipa3_process_timer_cfg(u32 time_us,
+	u8 *pulse_gen, u8 *time_units)
+{
+	struct ipahal_reg_timers_pulse_gran_cfg gran_cfg;
+	u32 gran0_step, gran1_step;
+
+	IPADBG("time in usec=%u\n", time_us);
+
+	if (ipa3_ctx->ipa_hw_type < IPA_HW_v4_5) {
+		IPAERR("Invalid IPA version %d\n", ipa3_ctx->ipa_hw_type);
+		return -EPERM;
+	}
+
+	if (!time_us) {
+		*pulse_gen = 0;
+		*time_units = 0;
+		return 0;
+	}
+
+	ipahal_read_reg_fields(IPA_TIMERS_PULSE_GRAN_CFG, &gran_cfg);
+
+	gran0_step = ipa3_time_gran_usec_step(gran_cfg.gran_0);
+	gran1_step = ipa3_time_gran_usec_step(gran_cfg.gran_1);
+	/* gran_2 is not used by AP */
+
+	IPADBG("gran0 usec step=%u  gran1 usec step=%u\n",
+		gran0_step, gran1_step);
+
+	/* Lets try pulse generator #0 granularity */
+	if (!(time_us % gran0_step)) {
+		if ((time_us / gran0_step) <= IPA_TIMER_SCALED_TIME_LIMIT) {
+			*pulse_gen = 0;
+			*time_units = time_us / gran0_step;
+			IPADBG("Matched: generator=0, units=%u\n",
+				*time_units);
+			return 0;
+		}
+		IPADBG("gran0 cannot be used due to range limit\n");
+	}
+
+	/* Lets try pulse generator #1 granularity */
+	if (!(time_us % gran1_step)) {
+		if ((time_us / gran1_step) <= IPA_TIMER_SCALED_TIME_LIMIT) {
+			*pulse_gen = 1;
+			*time_units = time_us / gran1_step;
+			IPADBG("Matched: generator=1, units=%u\n",
+				*time_units);
+			return 0;
+		}
+		IPADBG("gran1 cannot be used due to range limit\n");
+	}
+
+	IPAERR("Cannot match requested time to configured granularities\n");
+	return -EPERM;
+}
+
+/**
+ * ipa3_cfg_ep_aggr() - IPA end-point aggregation configuration
+ * @clnt_hdl:	[in] opaque client handle assigned by IPA to client
+ * @ipa_ep_cfg:	[in] IPA end-point configuration params
+ *
+ * Returns:	0 on success, negative on failure
+ *
+ * Note:	Should not be called from atomic context
+ */
+int ipa3_cfg_ep_aggr(u32 clnt_hdl, const struct ipa_ep_cfg_aggr *ep_aggr)
+{
+	int res = 0;
+
+	if (clnt_hdl >= ipa3_ctx->ipa_num_pipes ||
+	    ipa3_ctx->ep[clnt_hdl].valid == 0 || ep_aggr == NULL) {
+		IPAERR("bad parm, clnt_hdl = %d , ep_valid = %d\n",
+			clnt_hdl, ipa3_ctx->ep[clnt_hdl].valid);
+		return -EINVAL;
+	}
+
+	if (ep_aggr->aggr_en == IPA_ENABLE_DEAGGR &&
+	    !IPA_EP_SUPPORTS_DEAGGR(clnt_hdl)) {
+		IPAERR("pipe=%d cannot be configured to DEAGGR\n", clnt_hdl);
+		WARN_ON(1);
+		return -EINVAL;
+	}
+
+	IPADBG("pipe=%d en=%d(%s), type=%d(%s), byte_limit=%d, time_limit=%d\n",
+			clnt_hdl,
+			ep_aggr->aggr_en,
+			ipa3_get_aggr_enable_str(ep_aggr->aggr_en),
+			ep_aggr->aggr,
+			ipa3_get_aggr_type_str(ep_aggr->aggr),
+			ep_aggr->aggr_byte_limit,
+			ep_aggr->aggr_time_limit);
+	IPADBG("hard_byte_limit_en=%d aggr_sw_eof_active=%d\n",
+		ep_aggr->aggr_hard_byte_limit_en,
+		ep_aggr->aggr_sw_eof_active);
+
+	/* copy over EP cfg */
+	ipa3_ctx->ep[clnt_hdl].cfg.aggr = *ep_aggr;
+
+	IPA_ACTIVE_CLIENTS_INC_EP(ipa3_get_client_mapping(clnt_hdl));
+
+	if (ipa3_ctx->ipa_hw_type >= IPA_HW_v4_5) {
+		res = ipa3_process_timer_cfg(ep_aggr->aggr_time_limit,
+			&ipa3_ctx->ep[clnt_hdl].cfg.aggr.pulse_generator,
+			&ipa3_ctx->ep[clnt_hdl].cfg.aggr.scaled_time);
+		if (res) {
+			IPAERR("failed to process AGGR timer tmr=%u\n",
+				ep_aggr->aggr_time_limit);
+			ipa_assert();
+			res = -EINVAL;
+			goto complete;
+		}
+		/*
+		 * HW bug on IPA4.5 where gran is used from pipe 0 instead of
+		 * coal pipe. Add this check to make sure that RSC pipe will use
+		 * gran 0 per the requested time needed; pipe 0 will use always
+		 * gran 0 as gran 0 is the POR value of it and s/w never change
+		 * it.
+		 */
+		if (ipa3_ctx->ipa_hw_type == IPA_HW_v4_5 &&
+		    ipa3_get_client_mapping(clnt_hdl) ==
+		    IPA_CLIENT_APPS_WAN_COAL_CONS &&
+		    ipa3_ctx->ep[clnt_hdl].cfg.aggr.pulse_generator != 0) {
+			IPAERR("coal pipe using GRAN_SEL = %d\n",
+			       ipa3_ctx->ep[clnt_hdl].cfg.aggr.pulse_generator);
+			ipa_assert();
+		}
+	} else {
+		/*
+		 * Global aggregation granularity is 0.5msec.
+		 * So if H/W programmed with 1msec, it will be
+		 *  0.5msec defacto.
+		 * So finest granularity is 0.5msec
+		 */
+		if (ep_aggr->aggr_time_limit % 500) {
+			IPAERR("given time limit %u is not in 0.5msec\n",
+				ep_aggr->aggr_time_limit);
+			WARN_ON(1);
+			res = -EINVAL;
+			goto complete;
+		}
+
+		/* Due to described above global granularity */
+		ipa3_ctx->ep[clnt_hdl].cfg.aggr.aggr_time_limit *= 2;
+	}
+
+	ipahal_write_reg_n_fields(IPA_ENDP_INIT_AGGR_n, clnt_hdl,
+			&ipa3_ctx->ep[clnt_hdl].cfg.aggr);
+complete:
+	IPA_ACTIVE_CLIENTS_DEC_EP(ipa3_get_client_mapping(clnt_hdl));
+	return res;
+}
+
+/**
+ * ipa3_cfg_ep_route() - IPA end-point routing configuration
+ * @clnt_hdl:	[in] opaque client handle assigned by IPA to client
+ * @ipa_ep_cfg:	[in] IPA end-point configuration params
+ *
+ * Returns:	0 on success, negative on failure
+ *
+ * Note:	Should not be called from atomic context
+ */
+int ipa3_cfg_ep_route(u32 clnt_hdl, const struct ipa_ep_cfg_route *ep_route)
+{
+	struct ipahal_reg_endp_init_route init_rt;
+
+	if (clnt_hdl >= ipa3_ctx->ipa_num_pipes ||
+	    ipa3_ctx->ep[clnt_hdl].valid == 0 || ep_route == NULL) {
+		IPAERR("bad parm, clnt_hdl = %d , ep_valid = %d\n",
+			clnt_hdl, ipa3_ctx->ep[clnt_hdl].valid);
+		return -EINVAL;
+	}
+
+	if (IPA_CLIENT_IS_CONS(ipa3_ctx->ep[clnt_hdl].client)) {
+		IPAERR("ROUTE does not apply to IPA out EP %d\n",
+				clnt_hdl);
+		return -EINVAL;
+	}
+
+	/*
+	 * if DMA mode was configured previously for this EP, return with
+	 * success
+	 */
+	if (ipa3_ctx->ep[clnt_hdl].cfg.mode.mode == IPA_DMA) {
+		IPADBG("DMA enabled for ep %d, dst pipe is part of DMA\n",
+				clnt_hdl);
+		return 0;
+	}
+
+	if (ep_route->rt_tbl_hdl)
+		IPAERR("client specified non-zero RT TBL hdl - ignore it\n");
+
+	IPADBG("pipe=%d, rt_tbl_hdl=%d\n",
+			clnt_hdl,
+			ep_route->rt_tbl_hdl);
+
+	/* always use "default" routing table when programming EP ROUTE reg */
+	ipa3_ctx->ep[clnt_hdl].rt_tbl_idx =
+		IPA_MEM_PART(v4_apps_rt_index_lo);
+
+	if (ipa3_ctx->ipa_hw_type < IPA_HW_v4_0) {
+		IPA_ACTIVE_CLIENTS_INC_EP(ipa3_get_client_mapping(clnt_hdl));
+
+		init_rt.route_table_index = ipa3_ctx->ep[clnt_hdl].rt_tbl_idx;
+		ipahal_write_reg_n_fields(IPA_ENDP_INIT_ROUTE_n,
+			clnt_hdl, &init_rt);
+
+		IPA_ACTIVE_CLIENTS_DEC_EP(ipa3_get_client_mapping(clnt_hdl));
+	}
+
+	return 0;
+}
+
+#define MAX_ALLOWED_BASE_VAL 0x1f
+#define MAX_ALLOWED_SCALE_VAL 0x1f
+
+/**
+ * ipa3_cal_ep_holb_scale_base_val - calculate base and scale value from tmr_val
+ *
+ * In IPA4.2 HW version need configure base and scale value in HOL timer reg
+ * @tmr_val: [in] timer value for HOL timer
+ * @ipa_ep_cfg: [out] Fill IPA end-point configuration base and scale value
+ *			and return
+ */
+void ipa3_cal_ep_holb_scale_base_val(u32 tmr_val,
+				struct ipa_ep_cfg_holb *ep_holb)
+{
+	u32 base_val, scale, scale_val = 1, base = 2;
+
+	for (scale = 0; scale <= MAX_ALLOWED_SCALE_VAL; scale++) {
+		base_val = tmr_val/scale_val;
+		if (scale != 0)
+			scale_val *= base;
+		if (base_val <= MAX_ALLOWED_BASE_VAL)
+			break;
+	}
+	ep_holb->base_val = base_val;
+	ep_holb->scale = scale_val;
+
+}
+
+/**
+ * ipa3_cfg_ep_holb() - IPA end-point holb configuration
+ *
+ * If an IPA producer pipe is full, IPA HW by default will block
+ * indefinitely till space opens up. During this time no packets
+ * including those from unrelated pipes will be processed. Enabling
+ * HOLB means IPA HW will be allowed to drop packets as/when needed
+ * and indefinite blocking is avoided.
+ *
+ * @clnt_hdl:	[in] opaque client handle assigned by IPA to client
+ * @ipa_ep_cfg:	[in] IPA end-point configuration params
+ *
+ * Returns:	0 on success, negative on failure
+ */
+int ipa3_cfg_ep_holb(u32 clnt_hdl, const struct ipa_ep_cfg_holb *ep_holb)
+{
+	if (clnt_hdl >= ipa3_ctx->ipa_num_pipes ||
+	    ipa3_ctx->ep[clnt_hdl].valid == 0 || ep_holb == NULL ||
+	    ep_holb->tmr_val > ipa3_ctx->ctrl->max_holb_tmr_val ||
+	    ep_holb->en > 1) {
+		IPAERR("bad parm.\n");
+		return -EINVAL;
+	}
+
+	if (IPA_CLIENT_IS_PROD(ipa3_ctx->ep[clnt_hdl].client)) {
+		IPAERR("HOLB does not apply to IPA in EP %d\n", clnt_hdl);
+		return -EINVAL;
+	}
+
+	ipa3_ctx->ep[clnt_hdl].holb = *ep_holb;
+
+	IPA_ACTIVE_CLIENTS_INC_EP(ipa3_get_client_mapping(clnt_hdl));
+
+	ipahal_write_reg_n_fields(IPA_ENDP_INIT_HOL_BLOCK_EN_n, clnt_hdl,
+		ep_holb);
+
+	/* IPA4.5 issue requires HOLB_EN to be written twice */
+	if (ipa3_ctx->ipa_hw_type >= IPA_HW_v4_5)
+		ipahal_write_reg_n_fields(IPA_ENDP_INIT_HOL_BLOCK_EN_n,
+			clnt_hdl, ep_holb);
+
+	/* Configure timer */
+	if (ipa3_ctx->ipa_hw_type == IPA_HW_v4_2) {
+		ipa3_cal_ep_holb_scale_base_val(ep_holb->tmr_val,
+				&ipa3_ctx->ep[clnt_hdl].holb);
+		goto success;
+	}
+	if (ipa3_ctx->ipa_hw_type >= IPA_HW_v4_5) {
+		int res;
+
+		res = ipa3_process_timer_cfg(ep_holb->tmr_val * 1000,
+			&ipa3_ctx->ep[clnt_hdl].holb.pulse_generator,
+			&ipa3_ctx->ep[clnt_hdl].holb.scaled_time);
+		if (res) {
+			IPAERR("failed to process HOLB timer tmr=%u\n",
+				ep_holb->tmr_val);
+			ipa_assert();
+			return res;
+		}
+	}
+
+success:
+	ipahal_write_reg_n_fields(IPA_ENDP_INIT_HOL_BLOCK_TIMER_n,
+		clnt_hdl, &ipa3_ctx->ep[clnt_hdl].holb);
+	IPA_ACTIVE_CLIENTS_DEC_EP(ipa3_get_client_mapping(clnt_hdl));
+	IPADBG("cfg holb %u ep=%d tmr=%d\n", ep_holb->en, clnt_hdl,
+		ep_holb->tmr_val);
+	return 0;
+}
+
+/**
+ * ipa3_cfg_ep_holb_by_client() - IPA end-point holb configuration
+ *
+ * Wrapper function for ipa3_cfg_ep_holb() with client name instead of
+ * client handle. This function is used for clients that does not have
+ * client handle.
+ *
+ * @client:	[in] client name
+ * @ipa_ep_cfg:	[in] IPA end-point configuration params
+ *
+ * Returns:	0 on success, negative on failure
+ */
+int ipa3_cfg_ep_holb_by_client(enum ipa_client_type client,
+				const struct ipa_ep_cfg_holb *ep_holb)
+{
+	return ipa3_cfg_ep_holb(ipa3_get_ep_mapping(client), ep_holb);
+}
+
+/**
+ * ipa3_cfg_ep_deaggr() -  IPA end-point deaggregation configuration
+ * @clnt_hdl:	[in] opaque client handle assigned by IPA to client
+ * @ep_deaggr:	[in] IPA end-point configuration params
+ *
+ * Returns:	0 on success, negative on failure
+ *
+ * Note:	Should not be called from atomic context
+ */
+int ipa3_cfg_ep_deaggr(u32 clnt_hdl,
+			const struct ipa_ep_cfg_deaggr *ep_deaggr)
+{
+	struct ipa3_ep_context *ep;
+
+	if (clnt_hdl >= ipa3_ctx->ipa_num_pipes ||
+	    ipa3_ctx->ep[clnt_hdl].valid == 0 || ep_deaggr == NULL) {
+		IPAERR("bad parm, clnt_hdl = %d , ep_valid = %d\n",
+				clnt_hdl, ipa3_ctx->ep[clnt_hdl].valid);
+		return -EINVAL;
+	}
+
+	IPADBG("pipe=%d deaggr_hdr_len=%d\n",
+		clnt_hdl,
+		ep_deaggr->deaggr_hdr_len);
+
+	IPADBG("packet_offset_valid=%d\n",
+		ep_deaggr->packet_offset_valid);
+
+	IPADBG("packet_offset_location=%d max_packet_len=%d\n",
+		ep_deaggr->packet_offset_location,
+		ep_deaggr->max_packet_len);
+
+	ep = &ipa3_ctx->ep[clnt_hdl];
+
+	/* copy over EP cfg */
+	ep->cfg.deaggr = *ep_deaggr;
+
+	IPA_ACTIVE_CLIENTS_INC_EP(ipa3_get_client_mapping(clnt_hdl));
+
+	ipahal_write_reg_n_fields(IPA_ENDP_INIT_DEAGGR_n, clnt_hdl,
+		&ep->cfg.deaggr);
+
+	IPA_ACTIVE_CLIENTS_DEC_EP(ipa3_get_client_mapping(clnt_hdl));
+
+	return 0;
+}
+
+/**
+ * ipa3_cfg_ep_metadata() - IPA end-point metadata configuration
+ * @clnt_hdl:	[in] opaque client handle assigned by IPA to client
+ * @ipa_ep_cfg:	[in] IPA end-point configuration params
+ *
+ * Returns:	0 on success, negative on failure
+ *
+ * Note:	Should not be called from atomic context
+ */
+int ipa3_cfg_ep_metadata(u32 clnt_hdl, const struct ipa_ep_cfg_metadata *ep_md)
+{
+	u32 qmap_id = 0;
+	struct ipa_ep_cfg_metadata ep_md_reg_wrt;
+
+	if (clnt_hdl >= ipa3_ctx->ipa_num_pipes ||
+		ipa3_ctx->ep[clnt_hdl].valid == 0 || ep_md == NULL) {
+		IPAERR("bad parm, clnt_hdl = %d , ep_valid = %d\n",
+					clnt_hdl, ipa3_ctx->ep[clnt_hdl].valid);
+		return -EINVAL;
+	}
+
+	IPADBG("pipe=%d, mux id=%d\n", clnt_hdl, ep_md->qmap_id);
+
+	/* copy over EP cfg */
+	ipa3_ctx->ep[clnt_hdl].cfg.meta = *ep_md;
+
+	IPA_ACTIVE_CLIENTS_INC_EP(ipa3_get_client_mapping(clnt_hdl));
+
+	ep_md_reg_wrt = *ep_md;
+	qmap_id = (ep_md->qmap_id <<
+		IPA_ENDP_INIT_HDR_METADATA_n_MUX_ID_SHFT) &
+		IPA_ENDP_INIT_HDR_METADATA_n_MUX_ID_BMASK;
+
+	/* mark tethering bit for remote modem */
+	if (ipa3_ctx->ipa_hw_type == IPA_HW_v4_1)
+		qmap_id |= IPA_QMAP_TETH_BIT;
+
+	ep_md_reg_wrt.qmap_id = qmap_id;
+	ipahal_write_reg_n_fields(IPA_ENDP_INIT_HDR_METADATA_n, clnt_hdl,
+		&ep_md_reg_wrt);
+	if (ipa3_ctx->ipa_hw_type < IPA_HW_v4_5) {
+		ipa3_ctx->ep[clnt_hdl].cfg.hdr.hdr_metadata_reg_valid = 1;
+		ipahal_write_reg_n_fields(IPA_ENDP_INIT_HDR_n, clnt_hdl,
+			&ipa3_ctx->ep[clnt_hdl].cfg.hdr);
+	}
+
+	IPA_ACTIVE_CLIENTS_DEC_EP(ipa3_get_client_mapping(clnt_hdl));
+
+	return 0;
+}
+
+int ipa3_write_qmap_id(struct ipa_ioc_write_qmapid *param_in)
+{
+	struct ipa_ep_cfg_metadata meta;
+	struct ipa3_ep_context *ep;
+	int ipa_ep_idx;
+	int result = -EINVAL;
+
+	if (param_in->client  >= IPA_CLIENT_MAX) {
+		IPAERR_RL("bad parm client:%d\n", param_in->client);
+		goto fail;
+	}
+
+	ipa_ep_idx = ipa3_get_ep_mapping(param_in->client);
+	if (ipa_ep_idx == -1) {
+		IPAERR_RL("Invalid client.\n");
+		goto fail;
+	}
+
+	ep = &ipa3_ctx->ep[ipa_ep_idx];
+	if (!ep->valid) {
+		IPAERR_RL("EP not allocated.\n");
+		goto fail;
+	}
+
+	meta.qmap_id = param_in->qmap_id;
+	if (param_in->client == IPA_CLIENT_USB_PROD ||
+	    param_in->client == IPA_CLIENT_HSIC1_PROD ||
+	    param_in->client == IPA_CLIENT_ODU_PROD ||
+	    param_in->client == IPA_CLIENT_ETHERNET_PROD ||
+		param_in->client == IPA_CLIENT_WIGIG_PROD) {
+		result = ipa3_cfg_ep_metadata(ipa_ep_idx, &meta);
+	} else if (param_in->client == IPA_CLIENT_WLAN1_PROD ||
+			   param_in->client == IPA_CLIENT_WLAN2_PROD) {
+		ipa3_ctx->ep[ipa_ep_idx].cfg.meta = meta;
+		if (param_in->client == IPA_CLIENT_WLAN2_PROD)
+			result = ipa3_write_qmapid_wdi3_gsi_pipe(
+				ipa_ep_idx, meta.qmap_id);
+		else
+			result = ipa3_write_qmapid_wdi_pipe(
+				ipa_ep_idx, meta.qmap_id);
+		if (result)
+			IPAERR_RL("qmap_id %d write failed on ep=%d\n",
+					meta.qmap_id, ipa_ep_idx);
+		result = 0;
+	}
+
+fail:
+	return result;
+}
+
+/**
+ * ipa3_dump_buff_internal() - dumps buffer for debug purposes
+ * @base: buffer base address
+ * @phy_base: buffer physical base address
+ * @size: size of the buffer
+ */
+void ipa3_dump_buff_internal(void *base, dma_addr_t phy_base, u32 size)
+{
+	int i;
+	u32 *cur = (u32 *)base;
+	u8 *byt;
+
+	IPADBG("system phys addr=%pa len=%u\n", &phy_base, size);
+	for (i = 0; i < size / 4; i++) {
+		byt = (u8 *)(cur + i);
+		IPADBG("%2d %08x   %02x %02x %02x %02x\n", i, *(cur + i),
+				byt[0], byt[1], byt[2], byt[3]);
+	}
+	IPADBG("END\n");
+}
+
+/**
+ * ipa3_set_aggr_mode() - Set the aggregation mode which is a global setting
+ * @mode:	[in] the desired aggregation mode for e.g. straight MBIM, QCNCM,
+ * etc
+ *
+ * Returns:	0 on success
+ */
+int ipa3_set_aggr_mode(enum ipa_aggr_mode mode)
+{
+	struct ipahal_reg_qcncm qcncm;
+
+	if (ipa3_ctx->ipa_hw_type >= IPA_HW_v4_0) {
+		if (mode != IPA_MBIM_AGGR) {
+			IPAERR("Only MBIM mode is supported staring 4.0\n");
+			return -EPERM;
+		}
+	} else {
+		IPA_ACTIVE_CLIENTS_INC_SIMPLE();
+		ipahal_read_reg_fields(IPA_QCNCM, &qcncm);
+		qcncm.mode_en = mode;
+		ipahal_write_reg_fields(IPA_QCNCM, &qcncm);
+		IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
+	}
+
+	return 0;
+}
+
+/**
+ * ipa3_set_qcncm_ndp_sig() - Set the NDP signature used for QCNCM aggregation
+ * mode
+ * @sig:	[in] the first 3 bytes of QCNCM NDP signature (expected to be
+ * "QND")
+ *
+ * Set the NDP signature used for QCNCM aggregation mode. The fourth byte
+ * (expected to be 'P') needs to be set using the header addition mechanism
+ *
+ * Returns:	0 on success, negative on failure
+ */
+int ipa3_set_qcncm_ndp_sig(char sig[3])
+{
+	struct ipahal_reg_qcncm qcncm;
+
+	if (ipa3_ctx->ipa_hw_type >= IPA_HW_v4_0) {
+		IPAERR("QCNCM mode is not supported staring 4.0\n");
+		return -EPERM;
+	}
+
+	if (sig == NULL) {
+		IPAERR("bad argument\n");
+		return -EINVAL;
+	}
+	IPA_ACTIVE_CLIENTS_INC_SIMPLE();
+	ipahal_read_reg_fields(IPA_QCNCM, &qcncm);
+	qcncm.mode_val = ((sig[0] << 16) | (sig[1] << 8) | sig[2]);
+	ipahal_write_reg_fields(IPA_QCNCM, &qcncm);
+	IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
+
+	return 0;
+}
+
+/**
+ * ipa3_set_single_ndp_per_mbim() - Enable/disable single NDP per MBIM frame
+ * configuration
+ * @enable:	[in] true for single NDP/MBIM; false otherwise
+ *
+ * Returns:	0 on success
+ */
+int ipa3_set_single_ndp_per_mbim(bool enable)
+{
+	struct ipahal_reg_single_ndp_mode mode;
+
+	if (ipa3_ctx->ipa_hw_type >= IPA_HW_v4_0) {
+		IPAERR("QCNCM mode is not supported staring 4.0\n");
+		return -EPERM;
+	}
+
+	IPA_ACTIVE_CLIENTS_INC_SIMPLE();
+	ipahal_read_reg_fields(IPA_SINGLE_NDP_MODE, &mode);
+	mode.single_ndp_en = enable;
+	ipahal_write_reg_fields(IPA_SINGLE_NDP_MODE, &mode);
+	IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
+
+	return 0;
+}
+
+/**
+ * ipa3_straddle_boundary() - Checks whether a memory buffer straddles a
+ * boundary
+ * @start: start address of the memory buffer
+ * @end: end address of the memory buffer
+ * @boundary: boundary
+ *
+ * Return value:
+ * 1: if the interval [start, end] straddles boundary
+ * 0: otherwise
+ */
+int ipa3_straddle_boundary(u32 start, u32 end, u32 boundary)
+{
+	u32 next_start;
+	u32 prev_end;
+
+	IPADBG("start=%u end=%u boundary=%u\n", start, end, boundary);
+
+	next_start = (start + (boundary - 1)) & ~(boundary - 1);
+	prev_end = ((end + (boundary - 1)) & ~(boundary - 1)) - boundary;
+
+	while (next_start < prev_end)
+		next_start += boundary;
+
+	if (next_start == prev_end)
+		return 1;
+	else
+		return 0;
+}
+
+/**
+ * ipa3_init_mem_partition() - Assigns the static memory partition
+ * based on the IPA version
+ *
+ * Returns:	0 on success
+ */
+int ipa3_init_mem_partition(enum ipa_hw_type type)
+{
+	switch (type) {
+	case IPA_HW_v4_1:
+		ipa3_ctx->ctrl->mem_partition = &ipa_4_1_mem_part;
+		break;
+	case IPA_HW_v4_2:
+		ipa3_ctx->ctrl->mem_partition = &ipa_4_2_mem_part;
+		break;
+	case IPA_HW_v4_5:
+		ipa3_ctx->ctrl->mem_partition = &ipa_4_5_mem_part;
+		break;
+	case IPA_HW_v4_7:
+		ipa3_ctx->ctrl->mem_partition = &ipa_4_7_mem_part;
+		break;
+	case IPA_HW_v4_9:
+		ipa3_ctx->ctrl->mem_partition = &ipa_4_9_mem_part;
+		break;
+	case IPA_HW_None:
+	case IPA_HW_v1_0:
+	case IPA_HW_v1_1:
+	case IPA_HW_v2_0:
+	case IPA_HW_v2_1:
+	case IPA_HW_v2_5:
+	case IPA_HW_v2_6L:
+	case IPA_HW_v3_0:
+	case IPA_HW_v3_1:
+	case IPA_HW_v3_5:
+	case IPA_HW_v3_5_1:
+	case IPA_HW_v4_0:
+		IPAERR("unsupported version %d\n", type);
+		return -EPERM;
+	}
+
+	if (IPA_MEM_PART(uc_info_ofst) & 3) {
+		IPAERR("UC INFO OFST 0x%x is unaligned\n",
+			IPA_MEM_PART(uc_info_ofst));
+		return -ENODEV;
+	}
+
+	IPADBG("UC INFO OFST 0x%x SIZE 0x%x\n",
+		IPA_MEM_PART(uc_info_ofst), IPA_MEM_PART(uc_info_size));
+
+	IPADBG("RAM OFST 0x%x\n", IPA_MEM_PART(ofst_start));
+
+	if (IPA_MEM_PART(v4_flt_hash_ofst) & 7) {
+		IPAERR("V4 FLT HASHABLE OFST 0x%x is unaligned\n",
+			IPA_MEM_PART(v4_flt_hash_ofst));
+		return -ENODEV;
+	}
+
+	IPADBG("V4 FLT HASHABLE OFST 0x%x SIZE 0x%x DDR SIZE 0x%x\n",
+		IPA_MEM_PART(v4_flt_hash_ofst),
+		IPA_MEM_PART(v4_flt_hash_size),
+		IPA_MEM_PART(v4_flt_hash_size_ddr));
+
+	if (IPA_MEM_PART(v4_flt_nhash_ofst) & 7) {
+		IPAERR("V4 FLT NON-HASHABLE OFST 0x%x is unaligned\n",
+			IPA_MEM_PART(v4_flt_nhash_ofst));
+		return -ENODEV;
+	}
+
+	IPADBG("V4 FLT NON-HASHABLE OFST 0x%x SIZE 0x%x DDR SIZE 0x%x\n",
+		IPA_MEM_PART(v4_flt_nhash_ofst),
+		IPA_MEM_PART(v4_flt_nhash_size),
+		IPA_MEM_PART(v4_flt_nhash_size_ddr));
+
+	if (IPA_MEM_PART(v6_flt_hash_ofst) & 7) {
+		IPAERR("V6 FLT HASHABLE OFST 0x%x is unaligned\n",
+			IPA_MEM_PART(v6_flt_hash_ofst));
+		return -ENODEV;
+	}
+
+	IPADBG("V6 FLT HASHABLE OFST 0x%x SIZE 0x%x DDR SIZE 0x%x\n",
+		IPA_MEM_PART(v6_flt_hash_ofst), IPA_MEM_PART(v6_flt_hash_size),
+		IPA_MEM_PART(v6_flt_hash_size_ddr));
+
+	if (IPA_MEM_PART(v6_flt_nhash_ofst) & 7) {
+		IPAERR("V6 FLT NON-HASHABLE OFST 0x%x is unaligned\n",
+			IPA_MEM_PART(v6_flt_nhash_ofst));
+		return -ENODEV;
+	}
+
+	IPADBG("V6 FLT NON-HASHABLE OFST 0x%x SIZE 0x%x DDR SIZE 0x%x\n",
+		IPA_MEM_PART(v6_flt_nhash_ofst),
+		IPA_MEM_PART(v6_flt_nhash_size),
+		IPA_MEM_PART(v6_flt_nhash_size_ddr));
+
+	IPADBG("V4 RT NUM INDEX 0x%x\n", IPA_MEM_PART(v4_rt_num_index));
+
+	IPADBG("V4 RT MODEM INDEXES 0x%x - 0x%x\n",
+		IPA_MEM_PART(v4_modem_rt_index_lo),
+		IPA_MEM_PART(v4_modem_rt_index_hi));
+
+	IPADBG("V4 RT APPS INDEXES 0x%x - 0x%x\n",
+		IPA_MEM_PART(v4_apps_rt_index_lo),
+		IPA_MEM_PART(v4_apps_rt_index_hi));
+
+	if (IPA_MEM_PART(v4_rt_hash_ofst) & 7) {
+		IPAERR("V4 RT HASHABLE OFST 0x%x is unaligned\n",
+			IPA_MEM_PART(v4_rt_hash_ofst));
+		return -ENODEV;
+	}
+
+	IPADBG("V4 RT HASHABLE OFST 0x%x\n", IPA_MEM_PART(v4_rt_hash_ofst));
+
+	IPADBG("V4 RT HASHABLE SIZE 0x%x DDR SIZE 0x%x\n",
+		IPA_MEM_PART(v4_rt_hash_size),
+		IPA_MEM_PART(v4_rt_hash_size_ddr));
+
+	if (IPA_MEM_PART(v4_rt_nhash_ofst) & 7) {
+		IPAERR("V4 RT NON-HASHABLE OFST 0x%x is unaligned\n",
+			IPA_MEM_PART(v4_rt_nhash_ofst));
+		return -ENODEV;
+	}
+
+	IPADBG("V4 RT NON-HASHABLE OFST 0x%x\n",
+		IPA_MEM_PART(v4_rt_nhash_ofst));
+
+	IPADBG("V4 RT HASHABLE SIZE 0x%x DDR SIZE 0x%x\n",
+		IPA_MEM_PART(v4_rt_nhash_size),
+		IPA_MEM_PART(v4_rt_nhash_size_ddr));
+
+	IPADBG("V6 RT NUM INDEX 0x%x\n", IPA_MEM_PART(v6_rt_num_index));
+
+	IPADBG("V6 RT MODEM INDEXES 0x%x - 0x%x\n",
+		IPA_MEM_PART(v6_modem_rt_index_lo),
+		IPA_MEM_PART(v6_modem_rt_index_hi));
+
+	IPADBG("V6 RT APPS INDEXES 0x%x - 0x%x\n",
+		IPA_MEM_PART(v6_apps_rt_index_lo),
+		IPA_MEM_PART(v6_apps_rt_index_hi));
+
+	if (IPA_MEM_PART(v6_rt_hash_ofst) & 7) {
+		IPAERR("V6 RT HASHABLE OFST 0x%x is unaligned\n",
+			IPA_MEM_PART(v6_rt_hash_ofst));
+		return -ENODEV;
+	}
+
+	IPADBG("V6 RT HASHABLE OFST 0x%x\n", IPA_MEM_PART(v6_rt_hash_ofst));
+
+	IPADBG("V6 RT HASHABLE SIZE 0x%x DDR SIZE 0x%x\n",
+		IPA_MEM_PART(v6_rt_hash_size),
+		IPA_MEM_PART(v6_rt_hash_size_ddr));
+
+	if (IPA_MEM_PART(v6_rt_nhash_ofst) & 7) {
+		IPAERR("V6 RT NON-HASHABLE OFST 0x%x is unaligned\n",
+			IPA_MEM_PART(v6_rt_nhash_ofst));
+		return -ENODEV;
+	}
+
+	IPADBG("V6 RT NON-HASHABLE OFST 0x%x\n",
+		IPA_MEM_PART(v6_rt_nhash_ofst));
+
+	IPADBG("V6 RT NON-HASHABLE SIZE 0x%x DDR SIZE 0x%x\n",
+		IPA_MEM_PART(v6_rt_nhash_size),
+		IPA_MEM_PART(v6_rt_nhash_size_ddr));
+
+	if (IPA_MEM_PART(modem_hdr_ofst) & 7) {
+		IPAERR("MODEM HDR OFST 0x%x is unaligned\n",
+			IPA_MEM_PART(modem_hdr_ofst));
+		return -ENODEV;
+	}
+
+	IPADBG("MODEM HDR OFST 0x%x SIZE 0x%x\n",
+		IPA_MEM_PART(modem_hdr_ofst), IPA_MEM_PART(modem_hdr_size));
+
+	if (IPA_MEM_PART(apps_hdr_ofst) & 7) {
+		IPAERR("APPS HDR OFST 0x%x is unaligned\n",
+			IPA_MEM_PART(apps_hdr_ofst));
+		return -ENODEV;
+	}
+
+	IPADBG("APPS HDR OFST 0x%x SIZE 0x%x DDR SIZE 0x%x\n",
+		IPA_MEM_PART(apps_hdr_ofst), IPA_MEM_PART(apps_hdr_size),
+		IPA_MEM_PART(apps_hdr_size_ddr));
+
+	if (IPA_MEM_PART(modem_hdr_proc_ctx_ofst) & 7) {
+		IPAERR("MODEM HDR PROC CTX OFST 0x%x is unaligned\n",
+			IPA_MEM_PART(modem_hdr_proc_ctx_ofst));
+		return -ENODEV;
+	}
+
+	IPADBG("MODEM HDR PROC CTX OFST 0x%x SIZE 0x%x\n",
+		IPA_MEM_PART(modem_hdr_proc_ctx_ofst),
+		IPA_MEM_PART(modem_hdr_proc_ctx_size));
+
+	if (IPA_MEM_PART(apps_hdr_proc_ctx_ofst) & 7) {
+		IPAERR("APPS HDR PROC CTX OFST 0x%x is unaligned\n",
+			IPA_MEM_PART(apps_hdr_proc_ctx_ofst));
+		return -ENODEV;
+	}
+
+	IPADBG("APPS HDR PROC CTX OFST 0x%x SIZE 0x%x DDR SIZE 0x%x\n",
+		IPA_MEM_PART(apps_hdr_proc_ctx_ofst),
+		IPA_MEM_PART(apps_hdr_proc_ctx_size),
+		IPA_MEM_PART(apps_hdr_proc_ctx_size_ddr));
+
+	if (IPA_MEM_PART(pdn_config_ofst) & 7) {
+		IPAERR("PDN CONFIG OFST 0x%x is unaligned\n",
+			IPA_MEM_PART(pdn_config_ofst));
+		return -ENODEV;
+	}
+
+	/*
+	 * Routing rules points to hdr_proc_ctx in 32byte offsets from base.
+	 * Base is modem hdr_proc_ctx first address.
+	 * AP driver install APPS hdr_proc_ctx starting at the beginning of
+	 * apps hdr_proc_ctx part.
+	 * So first apps hdr_proc_ctx offset at some routing
+	 * rule will be modem_hdr_proc_ctx_size >> 5 (32B).
+	 */
+	if (IPA_MEM_PART(modem_hdr_proc_ctx_size) & 31) {
+		IPAERR("MODEM HDR PROC CTX SIZE 0x%x is not 32B aligned\n",
+			IPA_MEM_PART(modem_hdr_proc_ctx_size));
+		return -ENODEV;
+	}
+
+	/*
+	 * AP driver when installing routing rule, it calcs the hdr_proc_ctx
+	 * offset by local offset (from base of apps part) +
+	 * modem_hdr_proc_ctx_size. This is to get offset from modem part base.
+	 * Thus apps part must be adjacent to modem part
+	 */
+	if (IPA_MEM_PART(apps_hdr_proc_ctx_ofst) !=
+		IPA_MEM_PART(modem_hdr_proc_ctx_ofst) +
+		IPA_MEM_PART(modem_hdr_proc_ctx_size)) {
+		IPAERR("APPS HDR PROC CTX SIZE not adjacent to MODEM one!\n");
+		return -ENODEV;
+	}
+
+	IPADBG("NAT TBL OFST 0x%x SIZE 0x%x\n",
+		   IPA_MEM_PART(nat_tbl_ofst),
+		   IPA_MEM_PART(nat_tbl_size));
+
+	if (IPA_MEM_PART(nat_tbl_ofst) & 31) {
+		IPAERR("NAT TBL OFST 0x%x is not aligned properly\n",
+			   IPA_MEM_PART(nat_tbl_ofst));
+		return -ENODEV;
+	}
+
+	IPADBG("PDN CONFIG OFST 0x%x SIZE 0x%x\n",
+		IPA_MEM_PART(pdn_config_ofst),
+		IPA_MEM_PART(pdn_config_size));
+
+	if (IPA_MEM_PART(pdn_config_ofst) & 7) {
+		IPAERR("PDN CONFIG OFST 0x%x is unaligned\n",
+			IPA_MEM_PART(pdn_config_ofst));
+		return -ENODEV;
+	}
+
+	IPADBG("QUOTA STATS OFST 0x%x SIZE 0x%x\n",
+		IPA_MEM_PART(stats_quota_ofst),
+		IPA_MEM_PART(stats_quota_size));
+
+	if (IPA_MEM_PART(stats_quota_ofst) & 7) {
+		IPAERR("QUOTA STATS OFST 0x%x is unaligned\n",
+			IPA_MEM_PART(stats_quota_ofst));
+		return -ENODEV;
+	}
+
+	IPADBG("TETHERING STATS OFST 0x%x SIZE 0x%x\n",
+		IPA_MEM_PART(stats_tethering_ofst),
+		IPA_MEM_PART(stats_tethering_size));
+
+	if (IPA_MEM_PART(stats_tethering_ofst) & 7) {
+		IPAERR("TETHERING STATS OFST 0x%x is unaligned\n",
+			IPA_MEM_PART(stats_tethering_ofst));
+		return -ENODEV;
+	}
+
+	IPADBG("FILTER AND ROUTING STATS OFST 0x%x SIZE 0x%x\n",
+		IPA_MEM_PART(stats_fnr_ofst),
+		IPA_MEM_PART(stats_fnr_size));
+
+	if (IPA_MEM_PART(stats_fnr_ofst) & 7) {
+		IPAERR("FILTER AND ROUTING STATS OFST 0x%x is unaligned\n",
+			IPA_MEM_PART(stats_fnr_ofst));
+		return -ENODEV;
+	}
+
+	IPADBG("DROP STATS OFST 0x%x SIZE 0x%x\n",
+	IPA_MEM_PART(stats_drop_ofst),
+		IPA_MEM_PART(stats_drop_size));
+
+	if (IPA_MEM_PART(stats_drop_ofst) & 7) {
+		IPAERR("DROP STATS OFST 0x%x is unaligned\n",
+			IPA_MEM_PART(stats_drop_ofst));
+		return -ENODEV;
+	}
+
+	IPADBG("V4 APPS HASHABLE FLT OFST 0x%x SIZE 0x%x\n",
+		IPA_MEM_PART(apps_v4_flt_hash_ofst),
+		IPA_MEM_PART(apps_v4_flt_hash_size));
+
+	IPADBG("V4 APPS NON-HASHABLE FLT OFST 0x%x SIZE 0x%x\n",
+		IPA_MEM_PART(apps_v4_flt_nhash_ofst),
+		IPA_MEM_PART(apps_v4_flt_nhash_size));
+
+	IPADBG("V6 APPS HASHABLE FLT OFST 0x%x SIZE 0x%x\n",
+		IPA_MEM_PART(apps_v6_flt_hash_ofst),
+		IPA_MEM_PART(apps_v6_flt_hash_size));
+
+	IPADBG("V6 APPS NON-HASHABLE FLT OFST 0x%x SIZE 0x%x\n",
+		IPA_MEM_PART(apps_v6_flt_nhash_ofst),
+		IPA_MEM_PART(apps_v6_flt_nhash_size));
+
+	IPADBG("RAM END OFST 0x%x\n",
+		IPA_MEM_PART(end_ofst));
+
+	IPADBG("V4 APPS HASHABLE RT OFST 0x%x SIZE 0x%x\n",
+		IPA_MEM_PART(apps_v4_rt_hash_ofst),
+		IPA_MEM_PART(apps_v4_rt_hash_size));
+
+	IPADBG("V4 APPS NON-HASHABLE RT OFST 0x%x SIZE 0x%x\n",
+		IPA_MEM_PART(apps_v4_rt_nhash_ofst),
+		IPA_MEM_PART(apps_v4_rt_nhash_size));
+
+	IPADBG("V6 APPS HASHABLE RT OFST 0x%x SIZE 0x%x\n",
+		IPA_MEM_PART(apps_v6_rt_hash_ofst),
+		IPA_MEM_PART(apps_v6_rt_hash_size));
+
+	IPADBG("V6 APPS NON-HASHABLE RT OFST 0x%x SIZE 0x%x\n",
+		IPA_MEM_PART(apps_v6_rt_nhash_ofst),
+		IPA_MEM_PART(apps_v6_rt_nhash_size));
+
+	if (IPA_MEM_PART(modem_ofst) & 7) {
+		IPAERR("MODEM OFST 0x%x is unaligned\n",
+			IPA_MEM_PART(modem_ofst));
+		return -ENODEV;
+	}
+
+	IPADBG("MODEM OFST 0x%x SIZE 0x%x\n", IPA_MEM_PART(modem_ofst),
+		IPA_MEM_PART(modem_size));
+
+	if (IPA_MEM_PART(uc_descriptor_ram_ofst) & 1023) {
+		IPAERR("UC DESCRIPTOR RAM OFST 0x%x is unaligned\n",
+			IPA_MEM_PART(uc_descriptor_ram_ofst));
+		return -ENODEV;
+	}
+
+	IPADBG("UC DESCRIPTOR RAM OFST 0x%x SIZE 0x%x\n",
+		IPA_MEM_PART(uc_descriptor_ram_ofst),
+		IPA_MEM_PART(uc_descriptor_ram_size));
+
+	return 0;
+}
+
+/**
+ * ipa_ctrl_static_bind() - set the appropriate methods for
+ *  IPA Driver based on the HW version
+ *
+ *  @ctrl: data structure which holds the function pointers
+ *  @hw_type: the HW type in use
+ *
+ *  This function can avoid the runtime assignment by using C99 special
+ *  struct initialization - hard decision... time.vs.mem
+ */
+int ipa3_controller_static_bind(struct ipa3_controller *ctrl,
+		enum ipa_hw_type hw_type)
+{
+	if (hw_type >= IPA_HW_v4_0) {
+		ctrl->ipa_clk_rate_turbo = IPA_V4_0_CLK_RATE_TURBO;
+		ctrl->ipa_clk_rate_nominal = IPA_V4_0_CLK_RATE_NOMINAL;
+		ctrl->ipa_clk_rate_svs = IPA_V4_0_CLK_RATE_SVS;
+		ctrl->ipa_clk_rate_svs2 = IPA_V4_0_CLK_RATE_SVS2;
+	} else if (hw_type >= IPA_HW_v3_5) {
+		ctrl->ipa_clk_rate_turbo = IPA_V3_5_CLK_RATE_TURBO;
+		ctrl->ipa_clk_rate_nominal = IPA_V3_5_CLK_RATE_NOMINAL;
+		ctrl->ipa_clk_rate_svs = IPA_V3_5_CLK_RATE_SVS;
+		ctrl->ipa_clk_rate_svs2 = IPA_V3_5_CLK_RATE_SVS2;
+	} else {
+		ctrl->ipa_clk_rate_turbo = IPA_V3_0_CLK_RATE_TURBO;
+		ctrl->ipa_clk_rate_nominal = IPA_V3_0_CLK_RATE_NOMINAL;
+		ctrl->ipa_clk_rate_svs = IPA_V3_0_CLK_RATE_SVS;
+		ctrl->ipa_clk_rate_svs2 = IPA_V3_0_CLK_RATE_SVS2;
+	}
+
+	ctrl->ipa_init_rt4 = _ipa_init_rt4_v3;
+	ctrl->ipa_init_rt6 = _ipa_init_rt6_v3;
+	ctrl->ipa_init_flt4 = _ipa_init_flt4_v3;
+	ctrl->ipa_init_flt6 = _ipa_init_flt6_v3;
+	ctrl->ipa3_read_ep_reg = _ipa_read_ep_reg_v3_0;
+	ctrl->ipa3_commit_flt = __ipa_commit_flt_v3;
+	ctrl->ipa3_commit_rt = __ipa_commit_rt_v3;
+	ctrl->ipa3_commit_hdr = __ipa_commit_hdr_v3_0;
+	ctrl->ipa3_enable_clks = _ipa_enable_clks_v3_0;
+	ctrl->ipa3_disable_clks = _ipa_disable_clks_v3_0;
+	ctrl->clock_scaling_bw_threshold_svs =
+		IPA_V3_0_BW_THRESHOLD_SVS_MBPS;
+	ctrl->clock_scaling_bw_threshold_nominal =
+		IPA_V3_0_BW_THRESHOLD_NOMINAL_MBPS;
+	ctrl->clock_scaling_bw_threshold_turbo =
+		IPA_V3_0_BW_THRESHOLD_TURBO_MBPS;
+	ctrl->ipa_reg_base_ofst = ipahal_get_reg_base();
+	ctrl->ipa_init_sram = _ipa_init_sram_v3;
+	ctrl->ipa_sram_read_settings = _ipa_sram_settings_read_v3_0;
+	ctrl->ipa_init_hdr = _ipa_init_hdr_v3_0;
+	ctrl->max_holb_tmr_val = IPA_MAX_HOLB_TMR_VAL;
+
+	if (ipa3_ctx->ipa_hw_type >= IPA_HW_v4_0)
+		ctrl->ipa3_read_ep_reg = _ipa_read_ep_reg_v4_0;
+
+	return 0;
+}
+
+void ipa3_skb_recycle(struct sk_buff *skb)
+{
+	struct skb_shared_info *shinfo;
+
+	shinfo = skb_shinfo(skb);
+	memset(shinfo, 0, offsetof(struct skb_shared_info, dataref));
+	atomic_set(&shinfo->dataref, 1);
+
+	memset(skb, 0, offsetof(struct sk_buff, tail));
+	skb->data = skb->head + NET_SKB_PAD;
+	skb_reset_tail_pointer(skb);
+}
+
+int ipa3_alloc_rule_id(struct idr *rule_ids)
+{
+	/* There is two groups of rule-Ids, Modem ones and Apps ones.
+	 * Distinction by high bit: Modem Ids are high bit asserted.
+	 */
+	return idr_alloc(rule_ids, NULL,
+		ipahal_get_low_rule_id(),
+		ipahal_get_rule_id_hi_bit(),
+		GFP_KERNEL);
+}
+
+static int __ipa3_alloc_counter_hdl
+	(struct ipa_ioc_flt_rt_counter_alloc *counter)
+{
+	int id;
+
+	/* assign a handle using idr to this counter block */
+	id = idr_alloc(&ipa3_ctx->flt_rt_counters.hdl, counter,
+		ipahal_get_low_hdl_id(), ipahal_get_high_hdl_id(),
+		GFP_ATOMIC);
+
+	return id;
+}
+
+int ipa3_alloc_counter_id(struct ipa_ioc_flt_rt_counter_alloc *counter)
+{
+	int i, unused_cnt, unused_max, unused_start_id;
+
+	idr_preload(GFP_KERNEL);
+	spin_lock(&ipa3_ctx->flt_rt_counters.hdl_lock);
+
+	/* allocate hw counters */
+	counter->hw_counter.start_id = 0;
+	counter->hw_counter.end_id = 0;
+	unused_cnt = 0;
+	unused_max = 0;
+	unused_start_id = 0;
+	if (counter->hw_counter.num_counters == 0)
+		goto sw_counter_alloc;
+	/* find the start id which can be used for the block */
+	for (i = 0; i < IPA_FLT_RT_HW_COUNTER; i++) {
+		if (!ipa3_ctx->flt_rt_counters.used_hw[i])
+			unused_cnt++;
+		else {
+			/* tracking max unused block in case allow less */
+			if (unused_cnt > unused_max) {
+				unused_start_id = i - unused_cnt + 2;
+				unused_max = unused_cnt;
+			}
+			unused_cnt = 0;
+		}
+		/* find it, break and use this 1st possible block */
+		if (unused_cnt == counter->hw_counter.num_counters) {
+			counter->hw_counter.start_id = i - unused_cnt + 2;
+			counter->hw_counter.end_id = i + 1;
+			break;
+		}
+	}
+	if (counter->hw_counter.start_id == 0) {
+		/* if not able to find such a block but allow less */
+		if (counter->hw_counter.allow_less && unused_max) {
+			/* give the max possible unused blocks */
+			counter->hw_counter.num_counters = unused_max;
+			counter->hw_counter.start_id = unused_start_id;
+			counter->hw_counter.end_id =
+				unused_start_id + unused_max - 1;
+		} else {
+			/* not able to find such a block */
+			counter->hw_counter.num_counters = 0;
+			counter->hw_counter.start_id = 0;
+			counter->hw_counter.end_id = 0;
+			goto err;
+		}
+	}
+
+sw_counter_alloc:
+	/* allocate sw counters */
+	counter->sw_counter.start_id = 0;
+	counter->sw_counter.end_id = 0;
+	unused_cnt = 0;
+	unused_max = 0;
+	unused_start_id = 0;
+	if (counter->sw_counter.num_counters == 0)
+		goto mark_hw_cnt;
+	/* find the start id which can be used for the block */
+	for (i = 0; i < IPA_FLT_RT_SW_COUNTER; i++) {
+		if (!ipa3_ctx->flt_rt_counters.used_sw[i])
+			unused_cnt++;
+		else {
+			/* tracking max unused block in case allow less */
+			if (unused_cnt > unused_max) {
+				unused_start_id = i - unused_cnt +
+					2 + IPA_FLT_RT_HW_COUNTER;
+				unused_max = unused_cnt;
+			}
+			unused_cnt = 0;
+		}
+		/* find it, break and use this 1st possible block */
+		if (unused_cnt == counter->sw_counter.num_counters) {
+			counter->sw_counter.start_id = i - unused_cnt +
+				2 + IPA_FLT_RT_HW_COUNTER;
+			counter->sw_counter.end_id =
+				i + 1 + IPA_FLT_RT_HW_COUNTER;
+			break;
+		}
+	}
+	if (counter->sw_counter.start_id == 0) {
+		/* if not able to find such a block but allow less */
+		if (counter->sw_counter.allow_less && unused_max) {
+			/* give the max possible unused blocks */
+			counter->sw_counter.num_counters = unused_max;
+			counter->sw_counter.start_id = unused_start_id;
+			counter->sw_counter.end_id =
+				unused_start_id + unused_max - 1;
+		} else {
+			/* not able to find such a block */
+			counter->sw_counter.num_counters = 0;
+			counter->sw_counter.start_id = 0;
+			counter->sw_counter.end_id = 0;
+			goto err;
+		}
+	}
+
+mark_hw_cnt:
+	/* add hw counters, set used to 1 */
+	if (counter->hw_counter.num_counters == 0)
+		goto mark_sw_cnt;
+	unused_start_id = counter->hw_counter.start_id;
+	if (unused_start_id < 1 ||
+		unused_start_id > IPA_FLT_RT_HW_COUNTER) {
+		IPAERR("unexpected hw_counter start id %d\n",
+			   unused_start_id);
+		goto err;
+	}
+	for (i = 0; i < counter->hw_counter.num_counters; i++)
+		ipa3_ctx->flt_rt_counters.used_hw[unused_start_id + i - 1]
+			= true;
+mark_sw_cnt:
+	/* add sw counters, set used to 1 */
+	if (counter->sw_counter.num_counters == 0)
+		goto done;
+	unused_start_id = counter->sw_counter.start_id
+		- IPA_FLT_RT_HW_COUNTER;
+	if (unused_start_id < 1 ||
+		unused_start_id > IPA_FLT_RT_SW_COUNTER) {
+		IPAERR("unexpected sw_counter start id %d\n",
+			   unused_start_id);
+		goto err;
+	}
+	for (i = 0; i < counter->sw_counter.num_counters; i++)
+		ipa3_ctx->flt_rt_counters.used_sw[unused_start_id + i - 1]
+			= true;
+done:
+	/* get a handle from idr for dealloc */
+	counter->hdl = __ipa3_alloc_counter_hdl(counter);
+	spin_unlock(&ipa3_ctx->flt_rt_counters.hdl_lock);
+	idr_preload_end();
+	return 0;
+
+err:
+	counter->hdl = -1;
+	spin_unlock(&ipa3_ctx->flt_rt_counters.hdl_lock);
+	idr_preload_end();
+	return -ENOMEM;
+}
+
+void ipa3_counter_remove_hdl(int hdl)
+{
+	struct ipa_ioc_flt_rt_counter_alloc *counter;
+	int offset = 0;
+
+	spin_lock(&ipa3_ctx->flt_rt_counters.hdl_lock);
+	counter = idr_find(&ipa3_ctx->flt_rt_counters.hdl, hdl);
+	if (counter == NULL) {
+		IPAERR("unexpected hdl %d\n", hdl);
+		goto err;
+	}
+	/* remove counters belong to this hdl, set used back to 0 */
+	offset = counter->hw_counter.start_id - 1;
+	if (offset >= 0 && offset + counter->hw_counter.num_counters
+		< IPA_FLT_RT_HW_COUNTER) {
+		memset(&ipa3_ctx->flt_rt_counters.used_hw + offset,
+			   0, counter->hw_counter.num_counters * sizeof(bool));
+	} else {
+		IPAERR("unexpected hdl %d\n", hdl);
+		goto err;
+	}
+	offset = counter->sw_counter.start_id - 1 - IPA_FLT_RT_HW_COUNTER;
+	if (offset >= 0 && offset + counter->sw_counter.num_counters
+		< IPA_FLT_RT_SW_COUNTER) {
+		memset(&ipa3_ctx->flt_rt_counters.used_sw + offset,
+		   0, counter->sw_counter.num_counters * sizeof(bool));
+	} else {
+		IPAERR("unexpected hdl %d\n", hdl);
+		goto err;
+	}
+	/* remove the handle */
+	idr_remove(&ipa3_ctx->flt_rt_counters.hdl, hdl);
+err:
+	spin_unlock(&ipa3_ctx->flt_rt_counters.hdl_lock);
+}
+
+void ipa3_counter_id_remove_all(void)
+{
+	struct ipa_ioc_flt_rt_counter_alloc *counter;
+	int hdl;
+
+	spin_lock(&ipa3_ctx->flt_rt_counters.hdl_lock);
+	/* remove all counters, set used back to 0 */
+	memset(&ipa3_ctx->flt_rt_counters.used_hw, 0,
+		   sizeof(ipa3_ctx->flt_rt_counters.used_hw));
+	memset(&ipa3_ctx->flt_rt_counters.used_sw, 0,
+		   sizeof(ipa3_ctx->flt_rt_counters.used_sw));
+	/* remove all handles */
+	idr_for_each_entry(&ipa3_ctx->flt_rt_counters.hdl, counter, hdl)
+		idr_remove(&ipa3_ctx->flt_rt_counters.hdl, hdl);
+	spin_unlock(&ipa3_ctx->flt_rt_counters.hdl_lock);
+}
+
+int ipa3_id_alloc(void *ptr)
+{
+	int id;
+
+	idr_preload(GFP_KERNEL);
+	spin_lock(&ipa3_ctx->idr_lock);
+	id = idr_alloc(&ipa3_ctx->ipa_idr, ptr, 0, 0, GFP_NOWAIT);
+	spin_unlock(&ipa3_ctx->idr_lock);
+	idr_preload_end();
+
+	return id;
+}
+
+void *ipa3_id_find(u32 id)
+{
+	void *ptr;
+
+	spin_lock(&ipa3_ctx->idr_lock);
+	ptr = idr_find(&ipa3_ctx->ipa_idr, id);
+	spin_unlock(&ipa3_ctx->idr_lock);
+
+	return ptr;
+}
+
+bool ipa3_check_idr_if_freed(void *ptr)
+{
+	int id;
+	void *iter_ptr;
+
+	spin_lock(&ipa3_ctx->idr_lock);
+	idr_for_each_entry(&ipa3_ctx->ipa_idr, iter_ptr, id) {
+		if ((uintptr_t)ptr == (uintptr_t)iter_ptr) {
+			spin_unlock(&ipa3_ctx->idr_lock);
+			return false;
+		}
+	}
+	spin_unlock(&ipa3_ctx->idr_lock);
+	return true;
+}
+
+void ipa3_id_remove(u32 id)
+{
+	spin_lock(&ipa3_ctx->idr_lock);
+	idr_remove(&ipa3_ctx->ipa_idr, id);
+	spin_unlock(&ipa3_ctx->idr_lock);
+}
+
+void ipa3_tag_destroy_imm(void *user1, int user2)
+{
+	ipahal_destroy_imm_cmd(user1);
+}
+
+static void ipa3_tag_free_skb(void *user1, int user2)
+{
+	dev_kfree_skb_any((struct sk_buff *)user1);
+}
+
+#define REQUIRED_TAG_PROCESS_DESCRIPTORS 4
+#define MAX_RETRY_ALLOC 10
+#define ALLOC_MIN_SLEEP_RX 100000
+#define ALLOC_MAX_SLEEP_RX 200000
+
+/* ipa3_tag_process() - Initiates a tag process. Incorporates the input
+ * descriptors
+ *
+ * @desc:	descriptors with commands for IC
+ * @desc_size:	amount of descriptors in the above variable
+ *
+ * Note: The descriptors are copied (if there's room), the client needs to
+ * free his descriptors afterwards
+ *
+ * Return: 0 or negative in case of failure
+ */
+int ipa3_tag_process(struct ipa3_desc desc[],
+	int descs_num,
+	unsigned long timeout)
+{
+	struct ipa3_sys_context *sys;
+	struct ipa3_desc *tag_desc;
+	int desc_idx = 0;
+	struct ipahal_imm_cmd_ip_packet_init pktinit_cmd;
+	struct ipahal_imm_cmd_pyld *cmd_pyld = NULL;
+	struct ipahal_imm_cmd_ip_packet_tag_status status;
+	int i;
+	struct sk_buff *dummy_skb;
+	int res = 0;
+	struct ipa3_tag_completion *comp;
+	int ep_idx;
+	u32 retry_cnt = 0;
+	struct ipahal_reg_valmask valmask;
+	struct ipahal_imm_cmd_register_write reg_write_coal_close;
+
+	/* Not enough room for the required descriptors for the tag process */
+	if (IPA_TAG_MAX_DESC - descs_num < REQUIRED_TAG_PROCESS_DESCRIPTORS) {
+		IPAERR("up to %d descriptors are allowed (received %d)\n",
+		       IPA_TAG_MAX_DESC - REQUIRED_TAG_PROCESS_DESCRIPTORS,
+		       descs_num);
+		return -ENOMEM;
+	}
+
+	ep_idx = ipa3_get_ep_mapping(IPA_CLIENT_APPS_CMD_PROD);
+	if (-1 == ep_idx) {
+		IPAERR("Client %u is not mapped\n",
+			IPA_CLIENT_APPS_CMD_PROD);
+		return -EFAULT;
+	}
+	sys = ipa3_ctx->ep[ep_idx].sys;
+
+	tag_desc = kzalloc(sizeof(*tag_desc) * IPA_TAG_MAX_DESC, GFP_KERNEL);
+	if (!tag_desc) {
+		IPAERR("failed to allocate memory\n");
+		return -ENOMEM;
+	}
+
+	/* Copy the required descriptors from the client now */
+	if (desc) {
+		memcpy(&(tag_desc[0]), desc, descs_num *
+			sizeof(tag_desc[0]));
+		desc_idx += descs_num;
+	} else {
+		res = -EFAULT;
+		IPAERR("desc is NULL\n");
+		goto fail_free_tag_desc;
+	}
+
+	/* IC to close the coal frame before HPS Clear if coal is enabled */
+	if (ipa3_get_ep_mapping(IPA_CLIENT_APPS_WAN_COAL_CONS) != -1) {
+		ep_idx = ipa3_get_ep_mapping(IPA_CLIENT_APPS_WAN_COAL_CONS);
+		reg_write_coal_close.skip_pipeline_clear = false;
+		reg_write_coal_close.pipeline_clear_options = IPAHAL_HPS_CLEAR;
+		reg_write_coal_close.offset = ipahal_get_reg_ofst(
+			IPA_AGGR_FORCE_CLOSE);
+		ipahal_get_aggr_force_close_valmask(ep_idx, &valmask);
+		reg_write_coal_close.value = valmask.val;
+		reg_write_coal_close.value_mask = valmask.mask;
+		cmd_pyld = ipahal_construct_imm_cmd(
+			IPA_IMM_CMD_REGISTER_WRITE,
+			&reg_write_coal_close, false);
+		if (!cmd_pyld) {
+			IPAERR("failed to construct coal close IC\n");
+			res = -ENOMEM;
+			goto fail_free_tag_desc;
+		}
+		ipa3_init_imm_cmd_desc(&tag_desc[desc_idx], cmd_pyld);
+		desc[desc_idx].callback = ipa3_tag_destroy_imm;
+		desc[desc_idx].user1 = cmd_pyld;
+		++desc_idx;
+	}
+
+	/* NO-OP IC for ensuring that IPA pipeline is empty */
+	cmd_pyld = ipahal_construct_nop_imm_cmd(
+		false, IPAHAL_FULL_PIPELINE_CLEAR, false);
+	if (!cmd_pyld) {
+		IPAERR("failed to construct NOP imm cmd\n");
+		res = -ENOMEM;
+		goto fail_free_desc;
+	}
+	ipa3_init_imm_cmd_desc(&tag_desc[desc_idx], cmd_pyld);
+	tag_desc[desc_idx].callback = ipa3_tag_destroy_imm;
+	tag_desc[desc_idx].user1 = cmd_pyld;
+	++desc_idx;
+
+	/* IP_PACKET_INIT IC for tag status to be sent to apps */
+	pktinit_cmd.destination_pipe_index =
+		ipa3_get_ep_mapping(IPA_CLIENT_APPS_LAN_CONS);
+	cmd_pyld = ipahal_construct_imm_cmd(
+		IPA_IMM_CMD_IP_PACKET_INIT, &pktinit_cmd, false);
+	if (!cmd_pyld) {
+		IPAERR("failed to construct ip_packet_init imm cmd\n");
+		res = -ENOMEM;
+		goto fail_free_desc;
+	}
+	ipa3_init_imm_cmd_desc(&tag_desc[desc_idx], cmd_pyld);
+	tag_desc[desc_idx].callback = ipa3_tag_destroy_imm;
+	tag_desc[desc_idx].user1 = cmd_pyld;
+	++desc_idx;
+
+	/* status IC */
+	status.tag = IPA_COOKIE;
+	cmd_pyld = ipahal_construct_imm_cmd(
+		IPA_IMM_CMD_IP_PACKET_TAG_STATUS, &status, false);
+	if (!cmd_pyld) {
+		IPAERR("failed to construct ip_packet_tag_status imm cmd\n");
+		res = -ENOMEM;
+		goto fail_free_desc;
+	}
+	ipa3_init_imm_cmd_desc(&tag_desc[desc_idx], cmd_pyld);
+	tag_desc[desc_idx].callback = ipa3_tag_destroy_imm;
+	tag_desc[desc_idx].user1 = cmd_pyld;
+	++desc_idx;
+
+	comp = kzalloc(sizeof(*comp), GFP_KERNEL);
+	if (!comp) {
+		IPAERR("no mem\n");
+		res = -ENOMEM;
+		goto fail_free_desc;
+	}
+	init_completion(&comp->comp);
+
+	/* completion needs to be released from both here and rx handler */
+	atomic_set(&comp->cnt, 2);
+
+	/* dummy packet to send to IPA. packet payload is a completion object */
+	dummy_skb = alloc_skb(sizeof(comp), GFP_KERNEL);
+	if (!dummy_skb) {
+		IPAERR("failed to allocate memory\n");
+		res = -ENOMEM;
+		goto fail_free_comp;
+	}
+
+	memcpy(skb_put(dummy_skb, sizeof(comp)), &comp, sizeof(comp));
+
+	if (desc_idx >= IPA_TAG_MAX_DESC) {
+		IPAERR("number of commands is out of range\n");
+		res = -ENOBUFS;
+		goto fail_free_skb;
+	}
+
+	tag_desc[desc_idx].pyld = dummy_skb->data;
+	tag_desc[desc_idx].len = dummy_skb->len;
+	tag_desc[desc_idx].type = IPA_DATA_DESC_SKB;
+	tag_desc[desc_idx].callback = ipa3_tag_free_skb;
+	tag_desc[desc_idx].user1 = dummy_skb;
+	desc_idx++;
+retry_alloc:
+	/* send all descriptors to IPA with single EOT */
+	res = ipa3_send(sys, desc_idx, tag_desc, true);
+	if (res) {
+		if (res == -ENOMEM) {
+			if (retry_cnt < MAX_RETRY_ALLOC) {
+				IPADBG(
+				"failed to alloc memory retry cnt = %d\n",
+					retry_cnt);
+				retry_cnt++;
+				usleep_range(ALLOC_MIN_SLEEP_RX,
+					ALLOC_MAX_SLEEP_RX);
+				goto retry_alloc;
+			}
+
+		}
+		IPAERR("failed to send TAG packets %d\n", res);
+		res = -ENOMEM;
+		goto fail_free_skb;
+	}
+	kfree(tag_desc);
+	tag_desc = NULL;
+	ipa3_ctx->tag_process_before_gating = false;
+
+	IPADBG("waiting for TAG response\n");
+	res = wait_for_completion_timeout(&comp->comp, timeout);
+	if (res == 0) {
+		IPAERR("timeout (%lu msec) on waiting for TAG response\n",
+			timeout);
+		WARN_ON(1);
+		if (atomic_dec_return(&comp->cnt) == 0)
+			kfree(comp);
+		return -ETIME;
+	}
+
+	IPADBG("TAG response arrived!\n");
+	if (atomic_dec_return(&comp->cnt) == 0)
+		kfree(comp);
+
+	/*
+	 * sleep for short period to ensure IPA wrote all packets to
+	 * the transport
+	 */
+	usleep_range(IPA_TAG_SLEEP_MIN_USEC, IPA_TAG_SLEEP_MAX_USEC);
+
+	return 0;
+
+fail_free_skb:
+	kfree_skb(dummy_skb);
+fail_free_comp:
+	kfree(comp);
+fail_free_desc:
+	/*
+	 * Free only the first descriptors allocated here.
+	 * [nop, pkt_init, status, dummy_skb]
+	 * The user is responsible to free his allocations
+	 * in case of failure.
+	 * The min is required because we may fail during
+	 * of the initial allocations above
+	 */
+	for (i = descs_num;
+		i < min(REQUIRED_TAG_PROCESS_DESCRIPTORS, desc_idx); i++)
+		if (tag_desc[i].callback)
+			tag_desc[i].callback(tag_desc[i].user1,
+				tag_desc[i].user2);
+fail_free_tag_desc:
+	kfree(tag_desc);
+	return res;
+}
+
+/**
+ * ipa3_tag_generate_force_close_desc() - generate descriptors for force close
+ *					 immediate command
+ *
+ * @desc: descriptors for IC
+ * @desc_size: desc array size
+ * @start_pipe: first pipe to close aggregation
+ * @end_pipe: last (non-inclusive) pipe to close aggregation
+ *
+ * Return: number of descriptors written or negative in case of failure
+ */
+static int ipa3_tag_generate_force_close_desc(struct ipa3_desc desc[],
+	int desc_size, int start_pipe, int end_pipe)
+{
+	int i;
+	struct ipa_ep_cfg_aggr ep_aggr;
+	int desc_idx = 0;
+	int res;
+	struct ipahal_imm_cmd_register_write reg_write_agg_close;
+	struct ipahal_imm_cmd_pyld *cmd_pyld;
+	struct ipahal_reg_valmask valmask;
+
+	for (i = start_pipe; i < end_pipe; i++) {
+		ipahal_read_reg_n_fields(IPA_ENDP_INIT_AGGR_n, i, &ep_aggr);
+		if (!ep_aggr.aggr_en)
+			continue;
+		IPADBG("Force close ep: %d\n", i);
+		if (desc_idx + 1 > desc_size) {
+			IPAERR("Internal error - no descriptors\n");
+			res = -EFAULT;
+			goto fail_no_desc;
+		}
+
+		reg_write_agg_close.skip_pipeline_clear = false;
+		reg_write_agg_close.pipeline_clear_options =
+			IPAHAL_FULL_PIPELINE_CLEAR;
+		reg_write_agg_close.offset =
+			ipahal_get_reg_ofst(IPA_AGGR_FORCE_CLOSE);
+		ipahal_get_aggr_force_close_valmask(i, &valmask);
+		reg_write_agg_close.value = valmask.val;
+		reg_write_agg_close.value_mask = valmask.mask;
+		cmd_pyld = ipahal_construct_imm_cmd(IPA_IMM_CMD_REGISTER_WRITE,
+			&reg_write_agg_close, false);
+		if (!cmd_pyld) {
+			IPAERR("failed to construct register_write imm cmd\n");
+			res = -ENOMEM;
+			goto fail_alloc_reg_write_agg_close;
+		}
+
+		ipa3_init_imm_cmd_desc(&desc[desc_idx], cmd_pyld);
+		desc[desc_idx].callback = ipa3_tag_destroy_imm;
+		desc[desc_idx].user1 = cmd_pyld;
+		++desc_idx;
+	}
+
+	return desc_idx;
+
+fail_alloc_reg_write_agg_close:
+	for (i = 0; i < desc_idx; ++i)
+		if (desc[desc_idx].callback)
+			desc[desc_idx].callback(desc[desc_idx].user1,
+				desc[desc_idx].user2);
+fail_no_desc:
+	return res;
+}
+
+/**
+ * ipa3_tag_aggr_force_close() - Force close aggregation
+ *
+ * @pipe_num: pipe number or -1 for all pipes
+ */
+int ipa3_tag_aggr_force_close(int pipe_num)
+{
+	struct ipa3_desc *desc;
+	int res = -1;
+	int start_pipe;
+	int end_pipe;
+	int num_descs;
+	int num_aggr_descs;
+
+	if (pipe_num < -1 || pipe_num >= (int)ipa3_ctx->ipa_num_pipes) {
+		IPAERR("Invalid pipe number %d\n", pipe_num);
+		return -EINVAL;
+	}
+
+	if (pipe_num == -1) {
+		start_pipe = 0;
+		end_pipe = ipa3_ctx->ipa_num_pipes;
+	} else {
+		start_pipe = pipe_num;
+		end_pipe = pipe_num + 1;
+	}
+
+	num_descs = end_pipe - start_pipe;
+
+	desc = kcalloc(num_descs, sizeof(*desc), GFP_KERNEL);
+	if (!desc) {
+		IPAERR("no mem\n");
+		return -ENOMEM;
+	}
+
+	/* Force close aggregation on all valid pipes with aggregation */
+	num_aggr_descs = ipa3_tag_generate_force_close_desc(desc, num_descs,
+						start_pipe, end_pipe);
+	if (num_aggr_descs < 0) {
+		IPAERR("ipa3_tag_generate_force_close_desc failed %d\n",
+			num_aggr_descs);
+		goto fail_free_desc;
+	}
+
+	res = ipa3_tag_process(desc, num_aggr_descs,
+			      IPA_FORCE_CLOSE_TAG_PROCESS_TIMEOUT);
+
+fail_free_desc:
+	kfree(desc);
+
+	return res;
+}
+
+/**
+ * ipa3_is_ready() - check if IPA module was initialized
+ * successfully
+ *
+ * Return value: true for yes; false for no
+ */
+bool ipa3_is_ready(void)
+{
+	bool complete;
+
+	if (ipa3_ctx == NULL)
+		return false;
+	mutex_lock(&ipa3_ctx->lock);
+	complete = ipa3_ctx->ipa_initialization_complete;
+	mutex_unlock(&ipa3_ctx->lock);
+	return complete;
+}
+
+/**
+ * ipa3_is_client_handle_valid() - check if IPA client handle is valid handle
+ *
+ * Return value: true for yes; false for no
+ */
+bool ipa3_is_client_handle_valid(u32 clnt_hdl)
+{
+	if (clnt_hdl >= 0 && clnt_hdl < ipa3_ctx->ipa_num_pipes)
+		return true;
+	return false;
+}
+
+/**
+ * ipa3_proxy_clk_unvote() - called to remove IPA clock proxy vote
+ *
+ * Return value: none
+ */
+void ipa3_proxy_clk_unvote(void)
+{
+	if (ipa3_ctx == NULL)
+		return;
+	mutex_lock(&ipa3_ctx->q6_proxy_clk_vote_mutex);
+	if (ipa3_ctx->q6_proxy_clk_vote_valid) {
+		IPA_ACTIVE_CLIENTS_DEC_SPECIAL("PROXY_CLK_VOTE");
+		ipa3_ctx->q6_proxy_clk_vote_cnt--;
+		if (ipa3_ctx->q6_proxy_clk_vote_cnt == 0)
+			ipa3_ctx->q6_proxy_clk_vote_valid = false;
+	}
+	mutex_unlock(&ipa3_ctx->q6_proxy_clk_vote_mutex);
+}
+
+/**
+ * ipa3_proxy_clk_vote() - called to add IPA clock proxy vote
+ *
+ * Return value: none
+ */
+void ipa3_proxy_clk_vote(void)
+{
+	if (ipa3_ctx == NULL)
+		return;
+	mutex_lock(&ipa3_ctx->q6_proxy_clk_vote_mutex);
+	if (!ipa3_ctx->q6_proxy_clk_vote_valid ||
+		(ipa3_ctx->q6_proxy_clk_vote_cnt > 0)) {
+		IPA_ACTIVE_CLIENTS_INC_SPECIAL("PROXY_CLK_VOTE");
+		ipa3_ctx->q6_proxy_clk_vote_cnt++;
+		ipa3_ctx->q6_proxy_clk_vote_valid = true;
+	}
+	mutex_unlock(&ipa3_ctx->q6_proxy_clk_vote_mutex);
+}
+
+/**
+ * ipa3_get_smem_restr_bytes()- Return IPA smem restricted bytes
+ *
+ * Return value: u16 - number of IPA smem restricted bytes
+ */
+u16 ipa3_get_smem_restr_bytes(void)
+{
+	if (ipa3_ctx)
+		return ipa3_ctx->smem_restricted_bytes;
+
+	IPAERR("IPA Driver not initialized\n");
+
+	return 0;
+}
+
+/**
+ * ipa3_get_modem_cfg_emb_pipe_flt()- Return ipa3_ctx->modem_cfg_emb_pipe_flt
+ *
+ * Return value: true if modem configures embedded pipe flt, false otherwise
+ */
+bool ipa3_get_modem_cfg_emb_pipe_flt(void)
+{
+	if (ipa3_ctx)
+		return ipa3_ctx->modem_cfg_emb_pipe_flt;
+
+	IPAERR("IPA driver has not been initialized\n");
+
+	return false;
+}
+
+/**
+ * ipa3_get_transport_type()
+ *
+ * Return value: enum ipa_transport_type
+ */
+enum ipa_transport_type ipa3_get_transport_type(void)
+{
+	return IPA_TRANSPORT_TYPE_GSI;
+}
+
+u32 ipa3_get_num_pipes(void)
+{
+	return ipahal_read_reg(IPA_ENABLED_PIPES);
+}
+
+/**
+ * ipa3_disable_apps_wan_cons_deaggr()-
+ * set ipa_ctx->ipa_client_apps_wan_cons_agg_gro
+ *
+ * Return value: 0 or negative in case of failure
+ */
+int ipa3_disable_apps_wan_cons_deaggr(uint32_t agg_size, uint32_t agg_count)
+{
+	int res = -1;
+
+	/* ipahal will adjust limits based on HW capabilities */
+
+	if (ipa3_ctx) {
+		ipa3_ctx->ipa_client_apps_wan_cons_agg_gro = true;
+		return 0;
+	}
+	return res;
+}
+
+static void *ipa3_get_ipc_logbuf(void)
+{
+	if (ipa3_ctx)
+		return ipa3_ctx->logbuf;
+
+	return NULL;
+}
+
+static void *ipa3_get_ipc_logbuf_low(void)
+{
+	if (ipa3_ctx)
+		return ipa3_ctx->logbuf_low;
+
+	return NULL;
+}
+
+static void ipa3_get_holb(int ep_idx, struct ipa_ep_cfg_holb *holb)
+{
+	*holb = ipa3_ctx->ep[ep_idx].holb;
+}
+
+static void ipa3_set_tag_process_before_gating(bool val)
+{
+	ipa3_ctx->tag_process_before_gating = val;
+}
+
+/**
+ * ipa3_is_vlan_mode - check if a LAN driver should load in VLAN mode
+ * @iface - type of vlan capable device
+ * @res - query result: true for vlan mode, false for non vlan mode
+ *
+ * API must be called after ipa_is_ready() returns true, otherwise it will fail
+ *
+ * Returns: 0 on success, negative on failure
+ */
+int ipa3_is_vlan_mode(enum ipa_vlan_ifaces iface, bool *res)
+{
+	if (!res) {
+		IPAERR("NULL out param\n");
+		return -EINVAL;
+	}
+
+	if (iface < 0 || iface >= IPA_VLAN_IF_MAX) {
+		IPAERR("invalid iface %d\n", iface);
+		return -EINVAL;
+	}
+
+	if (!ipa3_is_ready()) {
+		IPAERR("IPA is not ready yet\n");
+		return -ENODEV;
+	}
+
+	*res = ipa3_ctx->vlan_mode_iface[iface];
+
+	IPADBG("Driver %d vlan mode is %d\n", iface, *res);
+	return 0;
+}
+
+int ipa3_bind_api_controller(enum ipa_hw_type ipa_hw_type,
+	struct ipa_api_controller *api_ctrl)
+{
+	if (ipa_hw_type < IPA_HW_v3_0) {
+		IPAERR("Unsupported IPA HW version %d\n", ipa_hw_type);
+		WARN_ON(1);
+		return -EPERM;
+	}
+
+	api_ctrl->ipa_reset_endpoint = NULL;
+	api_ctrl->ipa_clear_endpoint_delay = ipa3_clear_endpoint_delay;
+	api_ctrl->ipa_disable_endpoint = NULL;
+	api_ctrl->ipa_cfg_ep = ipa3_cfg_ep;
+	api_ctrl->ipa_cfg_ep_nat = ipa3_cfg_ep_nat;
+	api_ctrl->ipa_cfg_ep_conn_track = ipa3_cfg_ep_conn_track;
+	api_ctrl->ipa_cfg_ep_hdr = ipa3_cfg_ep_hdr;
+	api_ctrl->ipa_cfg_ep_hdr_ext = ipa3_cfg_ep_hdr_ext;
+	api_ctrl->ipa_cfg_ep_mode = ipa3_cfg_ep_mode;
+	api_ctrl->ipa_cfg_ep_aggr = ipa3_cfg_ep_aggr;
+	api_ctrl->ipa_cfg_ep_deaggr = ipa3_cfg_ep_deaggr;
+	api_ctrl->ipa_cfg_ep_route = ipa3_cfg_ep_route;
+	api_ctrl->ipa_cfg_ep_holb = ipa3_cfg_ep_holb;
+	api_ctrl->ipa_get_holb = ipa3_get_holb;
+	api_ctrl->ipa_set_tag_process_before_gating =
+			ipa3_set_tag_process_before_gating;
+	api_ctrl->ipa_cfg_ep_cfg = ipa3_cfg_ep_cfg;
+	api_ctrl->ipa_cfg_ep_metadata_mask = ipa3_cfg_ep_metadata_mask;
+	api_ctrl->ipa_cfg_ep_holb_by_client = ipa3_cfg_ep_holb_by_client;
+	api_ctrl->ipa_cfg_ep_ctrl = ipa3_cfg_ep_ctrl;
+	api_ctrl->ipa_add_hdr = ipa3_add_hdr;
+	api_ctrl->ipa_add_hdr_usr = ipa3_add_hdr_usr;
+	api_ctrl->ipa_del_hdr = ipa3_del_hdr;
+	api_ctrl->ipa_commit_hdr = ipa3_commit_hdr;
+	api_ctrl->ipa_reset_hdr = ipa3_reset_hdr;
+	api_ctrl->ipa_get_hdr = ipa3_get_hdr;
+	api_ctrl->ipa_put_hdr = ipa3_put_hdr;
+	api_ctrl->ipa_copy_hdr = ipa3_copy_hdr;
+	api_ctrl->ipa_add_hdr_proc_ctx = ipa3_add_hdr_proc_ctx;
+	api_ctrl->ipa_del_hdr_proc_ctx = ipa3_del_hdr_proc_ctx;
+	api_ctrl->ipa_add_rt_rule = ipa3_add_rt_rule;
+	api_ctrl->ipa_add_rt_rule_v2 = ipa3_add_rt_rule_v2;
+	api_ctrl->ipa_add_rt_rule_usr = ipa3_add_rt_rule_usr;
+	api_ctrl->ipa_add_rt_rule_usr_v2 = ipa3_add_rt_rule_usr_v2;
+	api_ctrl->ipa_del_rt_rule = ipa3_del_rt_rule;
+	api_ctrl->ipa_commit_rt = ipa3_commit_rt;
+	api_ctrl->ipa_reset_rt = ipa3_reset_rt;
+	api_ctrl->ipa_get_rt_tbl = ipa3_get_rt_tbl;
+	api_ctrl->ipa_put_rt_tbl = ipa3_put_rt_tbl;
+	api_ctrl->ipa_query_rt_index = ipa3_query_rt_index;
+	api_ctrl->ipa_mdfy_rt_rule = ipa3_mdfy_rt_rule;
+	api_ctrl->ipa_mdfy_rt_rule_v2 = ipa3_mdfy_rt_rule_v2;
+	api_ctrl->ipa_add_flt_rule = ipa3_add_flt_rule;
+	api_ctrl->ipa_add_flt_rule_v2 = ipa3_add_flt_rule_v2;
+	api_ctrl->ipa_add_flt_rule_usr = ipa3_add_flt_rule_usr;
+	api_ctrl->ipa_add_flt_rule_usr_v2 = ipa3_add_flt_rule_usr_v2;
+	api_ctrl->ipa_del_flt_rule = ipa3_del_flt_rule;
+	api_ctrl->ipa_mdfy_flt_rule = ipa3_mdfy_flt_rule;
+	api_ctrl->ipa_mdfy_flt_rule_v2 = ipa3_mdfy_flt_rule_v2;
+	api_ctrl->ipa_commit_flt = ipa3_commit_flt;
+	api_ctrl->ipa_reset_flt = ipa3_reset_flt;
+	api_ctrl->ipa_allocate_nat_device = ipa3_allocate_nat_device;
+	api_ctrl->ipa_allocate_nat_table = ipa3_allocate_nat_table;
+	api_ctrl->ipa_allocate_ipv6ct_table = ipa3_allocate_ipv6ct_table;
+	api_ctrl->ipa_nat_init_cmd = ipa3_nat_init_cmd;
+	api_ctrl->ipa_ipv6ct_init_cmd = ipa3_ipv6ct_init_cmd;
+	api_ctrl->ipa_nat_dma_cmd = ipa3_nat_dma_cmd;
+	api_ctrl->ipa_table_dma_cmd = ipa3_table_dma_cmd;
+	api_ctrl->ipa_nat_del_cmd = ipa3_nat_del_cmd;
+	api_ctrl->ipa_del_nat_table = ipa3_del_nat_table;
+	api_ctrl->ipa_del_ipv6ct_table = ipa3_del_ipv6ct_table;
+	api_ctrl->ipa_nat_mdfy_pdn = ipa3_nat_mdfy_pdn;
+	api_ctrl->ipa_send_msg = ipa3_send_msg;
+	api_ctrl->ipa_register_pull_msg = ipa3_register_pull_msg;
+	api_ctrl->ipa_deregister_pull_msg = ipa3_deregister_pull_msg;
+	api_ctrl->ipa_register_intf = ipa3_register_intf;
+	api_ctrl->ipa_register_intf_ext = ipa3_register_intf_ext;
+	api_ctrl->ipa_deregister_intf = ipa3_deregister_intf;
+	api_ctrl->ipa_set_aggr_mode = ipa3_set_aggr_mode;
+	api_ctrl->ipa_set_qcncm_ndp_sig = ipa3_set_qcncm_ndp_sig;
+	api_ctrl->ipa_set_single_ndp_per_mbim = ipa3_set_single_ndp_per_mbim;
+	api_ctrl->ipa_tx_dp = ipa3_tx_dp;
+	api_ctrl->ipa_tx_dp_mul = ipa3_tx_dp_mul;
+	api_ctrl->ipa_free_skb = ipa3_free_skb;
+	api_ctrl->ipa_setup_sys_pipe = ipa3_setup_sys_pipe;
+	api_ctrl->ipa_teardown_sys_pipe = ipa3_teardown_sys_pipe;
+	api_ctrl->ipa_sys_setup = ipa3_sys_setup;
+	api_ctrl->ipa_sys_teardown = ipa3_sys_teardown;
+	api_ctrl->ipa_sys_update_gsi_hdls = ipa3_sys_update_gsi_hdls;
+	api_ctrl->ipa_connect_wdi_pipe = ipa3_connect_wdi_pipe;
+	api_ctrl->ipa_disconnect_wdi_pipe = ipa3_disconnect_wdi_pipe;
+	api_ctrl->ipa_enable_wdi_pipe = ipa3_enable_wdi_pipe;
+	api_ctrl->ipa_disable_wdi_pipe = ipa3_disable_wdi_pipe;
+	api_ctrl->ipa_resume_wdi_pipe = ipa3_resume_wdi_pipe;
+	api_ctrl->ipa_suspend_wdi_pipe = ipa3_suspend_wdi_pipe;
+	api_ctrl->ipa_get_wdi_stats = ipa3_get_wdi_stats;
+	api_ctrl->ipa_uc_bw_monitor = ipa3_uc_bw_monitor;
+	api_ctrl->ipa_set_wlan_tx_info = ipa3_set_wlan_tx_info;
+	api_ctrl->ipa_get_smem_restr_bytes = ipa3_get_smem_restr_bytes;
+	api_ctrl->ipa_broadcast_wdi_quota_reach_ind =
+			ipa3_broadcast_wdi_quota_reach_ind;
+	api_ctrl->ipa_uc_wdi_get_dbpa = ipa3_uc_wdi_get_dbpa;
+	api_ctrl->ipa_uc_reg_rdyCB = ipa3_uc_reg_rdyCB;
+	api_ctrl->ipa_uc_dereg_rdyCB = ipa3_uc_dereg_rdyCB;
+	api_ctrl->teth_bridge_init = ipa3_teth_bridge_init;
+	api_ctrl->teth_bridge_disconnect = ipa3_teth_bridge_disconnect;
+	api_ctrl->teth_bridge_connect = ipa3_teth_bridge_connect;
+	api_ctrl->ipa_set_client = ipa3_set_client;
+	api_ctrl->ipa_get_client = ipa3_get_client;
+	api_ctrl->ipa_get_client_uplink = ipa3_get_client_uplink;
+	api_ctrl->ipa_dma_init = ipa3_dma_init;
+	api_ctrl->ipa_dma_enable = ipa3_dma_enable;
+	api_ctrl->ipa_dma_disable = ipa3_dma_disable;
+	api_ctrl->ipa_dma_sync_memcpy = ipa3_dma_sync_memcpy;
+	api_ctrl->ipa_dma_async_memcpy = ipa3_dma_async_memcpy;
+	api_ctrl->ipa_dma_uc_memcpy = ipa3_dma_uc_memcpy;
+	api_ctrl->ipa_dma_destroy = ipa3_dma_destroy;
+	api_ctrl->ipa_mhi_init_engine = ipa3_mhi_init_engine;
+	api_ctrl->ipa_connect_mhi_pipe = ipa3_connect_mhi_pipe;
+	api_ctrl->ipa_disconnect_mhi_pipe = ipa3_disconnect_mhi_pipe;
+	api_ctrl->ipa_mhi_stop_gsi_channel = ipa3_mhi_stop_gsi_channel;
+	api_ctrl->ipa_uc_mhi_reset_channel = ipa3_uc_mhi_reset_channel;
+	api_ctrl->ipa_qmi_enable_force_clear_datapath_send =
+			ipa3_qmi_enable_force_clear_datapath_send;
+	api_ctrl->ipa_qmi_disable_force_clear_datapath_send =
+			ipa3_qmi_disable_force_clear_datapath_send;
+	api_ctrl->ipa_mhi_reset_channel_internal =
+			ipa3_mhi_reset_channel_internal;
+	api_ctrl->ipa_mhi_start_channel_internal =
+			ipa3_mhi_start_channel_internal;
+	api_ctrl->ipa_mhi_query_ch_info = ipa3_mhi_query_ch_info;
+	api_ctrl->ipa_mhi_resume_channels_internal =
+			ipa3_mhi_resume_channels_internal;
+	api_ctrl->ipa_has_open_aggr_frame = ipa3_has_open_aggr_frame;
+	api_ctrl->ipa_mhi_destroy_channel = ipa3_mhi_destroy_channel;
+	api_ctrl->ipa_uc_mhi_send_dl_ul_sync_info =
+			ipa3_uc_mhi_send_dl_ul_sync_info;
+	api_ctrl->ipa_uc_mhi_init = ipa3_uc_mhi_init;
+	api_ctrl->ipa_uc_mhi_suspend_channel = ipa3_uc_mhi_suspend_channel;
+	api_ctrl->ipa_uc_mhi_stop_event_update_channel =
+			ipa3_uc_mhi_stop_event_update_channel;
+	api_ctrl->ipa_uc_mhi_cleanup = ipa3_uc_mhi_cleanup;
+	api_ctrl->ipa_uc_state_check = ipa3_uc_state_check;
+	api_ctrl->ipa_write_qmap_id = ipa3_write_qmap_id;
+	api_ctrl->ipa_add_interrupt_handler = ipa3_add_interrupt_handler;
+	api_ctrl->ipa_remove_interrupt_handler = ipa3_remove_interrupt_handler;
+	api_ctrl->ipa_restore_suspend_handler = ipa3_restore_suspend_handler;
+	api_ctrl->ipa_bam_reg_dump = NULL;
+	api_ctrl->ipa_get_ep_mapping = ipa3_get_ep_mapping;
+	api_ctrl->ipa_is_ready = ipa3_is_ready;
+	api_ctrl->ipa_proxy_clk_vote = ipa3_proxy_clk_vote;
+	api_ctrl->ipa_proxy_clk_unvote = ipa3_proxy_clk_unvote;
+	api_ctrl->ipa_is_client_handle_valid = ipa3_is_client_handle_valid;
+	api_ctrl->ipa_get_client_mapping = ipa3_get_client_mapping;
+	api_ctrl->ipa_get_modem_cfg_emb_pipe_flt =
+		ipa3_get_modem_cfg_emb_pipe_flt;
+	api_ctrl->ipa_get_transport_type = ipa3_get_transport_type;
+	api_ctrl->ipa_ap_suspend = ipa3_ap_suspend;
+	api_ctrl->ipa_ap_resume = ipa3_ap_resume;
+	api_ctrl->ipa_get_smmu_domain = ipa3_get_smmu_domain;
+	api_ctrl->ipa_disable_apps_wan_cons_deaggr =
+		ipa3_disable_apps_wan_cons_deaggr;
+	api_ctrl->ipa_get_dma_dev = ipa3_get_dma_dev;
+	api_ctrl->ipa_release_wdi_mapping = ipa3_release_wdi_mapping;
+	api_ctrl->ipa_create_wdi_mapping = ipa3_create_wdi_mapping;
+	api_ctrl->ipa_get_gsi_ep_info = ipa3_get_gsi_ep_info;
+	api_ctrl->ipa_stop_gsi_channel = ipa3_stop_gsi_channel;
+	api_ctrl->ipa_start_gsi_channel = ipa3_start_gsi_channel;
+	api_ctrl->ipa_register_ipa_ready_cb = ipa3_register_ipa_ready_cb;
+	api_ctrl->ipa_inc_client_enable_clks = ipa3_inc_client_enable_clks;
+	api_ctrl->ipa_dec_client_disable_clks = ipa3_dec_client_disable_clks;
+	api_ctrl->ipa_inc_client_enable_clks_no_block =
+		ipa3_inc_client_enable_clks_no_block;
+	api_ctrl->ipa_suspend_resource_no_block =
+		ipa3_suspend_resource_no_block;
+	api_ctrl->ipa_resume_resource = ipa3_resume_resource;
+	api_ctrl->ipa_suspend_resource_sync = ipa3_suspend_resource_sync;
+	api_ctrl->ipa_set_required_perf_profile =
+		ipa3_set_required_perf_profile;
+	api_ctrl->ipa_get_ipc_logbuf = ipa3_get_ipc_logbuf;
+	api_ctrl->ipa_get_ipc_logbuf_low = ipa3_get_ipc_logbuf_low;
+	api_ctrl->ipa_rx_poll = ipa3_rx_poll;
+	api_ctrl->ipa_setup_uc_ntn_pipes = ipa3_setup_uc_ntn_pipes;
+	api_ctrl->ipa_tear_down_uc_offload_pipes =
+		ipa3_tear_down_uc_offload_pipes;
+	api_ctrl->ipa_get_pdev = ipa3_get_pdev;
+	api_ctrl->ipa_ntn_uc_reg_rdyCB = ipa3_ntn_uc_reg_rdyCB;
+	api_ctrl->ipa_ntn_uc_dereg_rdyCB = ipa3_ntn_uc_dereg_rdyCB;
+	api_ctrl->ipa_conn_wdi_pipes = ipa3_conn_wdi3_pipes;
+	api_ctrl->ipa_disconn_wdi_pipes = ipa3_disconn_wdi3_pipes;
+	api_ctrl->ipa_enable_wdi_pipes = ipa3_enable_wdi3_pipes;
+	api_ctrl->ipa_disable_wdi_pipes = ipa3_disable_wdi3_pipes;
+	api_ctrl->ipa_tz_unlock_reg = ipa3_tz_unlock_reg;
+	api_ctrl->ipa_get_smmu_params = ipa3_get_smmu_params;
+	api_ctrl->ipa_is_vlan_mode = ipa3_is_vlan_mode;
+	api_ctrl->ipa_wigig_internal_init = ipa3_wigig_internal_init;
+	api_ctrl->ipa_conn_wigig_rx_pipe_i = ipa3_conn_wigig_rx_pipe_i;
+	api_ctrl->ipa_conn_wigig_client_i = ipa3_conn_wigig_client_i;
+	api_ctrl->ipa_disconn_wigig_pipe_i = ipa3_disconn_wigig_pipe_i;
+	api_ctrl->ipa_wigig_uc_msi_init = ipa3_wigig_uc_msi_init;
+	api_ctrl->ipa_enable_wigig_pipe_i = ipa3_enable_wigig_pipe_i;
+	api_ctrl->ipa_disable_wigig_pipe_i = ipa3_disable_wigig_pipe_i;
+	api_ctrl->ipa_register_client_callback =
+		ipa3_register_client_callback;
+	api_ctrl->ipa_deregister_client_callback =
+		ipa3_deregister_client_callback;
+	api_ctrl->ipa_get_lan_rx_napi = ipa3_get_lan_rx_napi;
+	api_ctrl->ipa_uc_debug_stats_alloc =
+		ipa3_uc_debug_stats_alloc;
+	api_ctrl->ipa_uc_debug_stats_dealloc =
+		ipa3_uc_debug_stats_dealloc;
+	api_ctrl->ipa_get_gsi_stats =
+		ipa3_get_gsi_stats;
+	api_ctrl->ipa_get_prot_id =
+		ipa3_get_prot_id;
+	return 0;
+}
+
+/**
+ * ipa_is_modem_pipe()- Checks if pipe is owned by the modem
+ *
+ * @pipe_idx: pipe number
+ * Return value: true if owned by modem, false otherwize
+ */
+bool ipa_is_modem_pipe(int pipe_idx)
+{
+	int client_idx;
+
+	if (pipe_idx >= ipa3_ctx->ipa_num_pipes || pipe_idx < 0) {
+		IPAERR("Bad pipe index!\n");
+		return false;
+	}
+
+	for (client_idx = 0; client_idx < IPA_CLIENT_MAX; client_idx++) {
+		if (!IPA_CLIENT_IS_Q6_CONS(client_idx) &&
+			!IPA_CLIENT_IS_Q6_PROD(client_idx))
+			continue;
+		if (ipa3_get_ep_mapping(client_idx) == pipe_idx)
+			return true;
+	}
+
+	return false;
+}
+
+static void ipa3_write_rsrc_grp_type_reg(int group_index,
+			enum ipa_rsrc_grp_type_src n, bool src,
+			struct ipahal_reg_rsrc_grp_cfg *val)
+{
+	u8 hw_type_idx;
+
+	hw_type_idx = ipa3_get_hw_type_index();
+
+	switch (hw_type_idx) {
+	case IPA_3_0:
+		if (src) {
+			switch (group_index) {
+			case IPA_v3_0_GROUP_UL:
+			case IPA_v3_0_GROUP_DL:
+				ipahal_write_reg_n_fields(
+					IPA_SRC_RSRC_GRP_01_RSRC_TYPE_n,
+					n, val);
+				break;
+			case IPA_v3_0_GROUP_DIAG:
+			case IPA_v3_0_GROUP_DMA:
+				ipahal_write_reg_n_fields(
+					IPA_SRC_RSRC_GRP_23_RSRC_TYPE_n,
+					n, val);
+				break;
+			case IPA_v3_0_GROUP_Q6ZIP:
+			case IPA_v3_0_GROUP_UC_RX_Q:
+				ipahal_write_reg_n_fields(
+					IPA_SRC_RSRC_GRP_45_RSRC_TYPE_n,
+					n, val);
+				break;
+			default:
+				IPAERR(
+				" Invalid source resource group,index #%d\n",
+				group_index);
+				break;
+			}
+		} else {
+			switch (group_index) {
+			case IPA_v3_0_GROUP_UL:
+			case IPA_v3_0_GROUP_DL:
+				ipahal_write_reg_n_fields(
+					IPA_DST_RSRC_GRP_01_RSRC_TYPE_n,
+					n, val);
+				break;
+			case IPA_v3_0_GROUP_DIAG:
+			case IPA_v3_0_GROUP_DMA:
+				ipahal_write_reg_n_fields(
+					IPA_DST_RSRC_GRP_23_RSRC_TYPE_n,
+					n, val);
+				break;
+			case IPA_v3_0_GROUP_Q6ZIP_GENERAL:
+			case IPA_v3_0_GROUP_Q6ZIP_ENGINE:
+				ipahal_write_reg_n_fields(
+					IPA_DST_RSRC_GRP_45_RSRC_TYPE_n,
+					n, val);
+				break;
+			default:
+				IPAERR(
+				" Invalid destination resource group,index #%d\n",
+				group_index);
+				break;
+			}
+		}
+		break;
+	case IPA_3_5:
+	case IPA_3_5_MHI:
+	case IPA_3_5_1:
+		if (src) {
+			switch (group_index) {
+			case IPA_v3_5_GROUP_LWA_DL:
+			case IPA_v3_5_GROUP_UL_DL:
+				ipahal_write_reg_n_fields(
+					IPA_SRC_RSRC_GRP_01_RSRC_TYPE_n,
+					n, val);
+				break;
+			case IPA_v3_5_MHI_GROUP_DMA:
+			case IPA_v3_5_GROUP_UC_RX_Q:
+				ipahal_write_reg_n_fields(
+					IPA_SRC_RSRC_GRP_23_RSRC_TYPE_n,
+					n, val);
+				break;
+			default:
+				IPAERR(
+				" Invalid source resource group,index #%d\n",
+				group_index);
+				break;
+			}
+		} else {
+			switch (group_index) {
+			case IPA_v3_5_GROUP_LWA_DL:
+			case IPA_v3_5_GROUP_UL_DL:
+				ipahal_write_reg_n_fields(
+					IPA_DST_RSRC_GRP_01_RSRC_TYPE_n,
+					n, val);
+				break;
+			case IPA_v3_5_MHI_GROUP_DMA:
+				ipahal_write_reg_n_fields(
+					IPA_DST_RSRC_GRP_23_RSRC_TYPE_n,
+					n, val);
+				break;
+			default:
+				IPAERR(
+				" Invalid destination resource group,index #%d\n",
+				group_index);
+				break;
+			}
+		}
+		break;
+	case IPA_4_0:
+	case IPA_4_0_MHI:
+	case IPA_4_1:
+		if (src) {
+			switch (group_index) {
+			case IPA_v4_0_GROUP_LWA_DL:
+			case IPA_v4_0_GROUP_UL_DL:
+				ipahal_write_reg_n_fields(
+					IPA_SRC_RSRC_GRP_01_RSRC_TYPE_n,
+					n, val);
+				break;
+			case IPA_v4_0_MHI_GROUP_DMA:
+			case IPA_v4_0_GROUP_UC_RX_Q:
+				ipahal_write_reg_n_fields(
+					IPA_SRC_RSRC_GRP_23_RSRC_TYPE_n,
+					n, val);
+				break;
+			default:
+				IPAERR(
+				" Invalid source resource group,index #%d\n",
+				group_index);
+				break;
+			}
+		} else {
+			switch (group_index) {
+			case IPA_v4_0_GROUP_LWA_DL:
+			case IPA_v4_0_GROUP_UL_DL:
+				ipahal_write_reg_n_fields(
+					IPA_DST_RSRC_GRP_01_RSRC_TYPE_n,
+					n, val);
+				break;
+			case IPA_v4_0_MHI_GROUP_DMA:
+				ipahal_write_reg_n_fields(
+					IPA_DST_RSRC_GRP_23_RSRC_TYPE_n,
+					n, val);
+				break;
+			default:
+				IPAERR(
+				" Invalid destination resource group,index #%d\n",
+				group_index);
+				break;
+			}
+		}
+		break;
+	case IPA_4_2:
+		if (src) {
+			switch (group_index) {
+			case IPA_v4_2_GROUP_UL_DL:
+				ipahal_write_reg_n_fields(
+					IPA_SRC_RSRC_GRP_01_RSRC_TYPE_n,
+					n, val);
+				break;
+			default:
+				IPAERR(
+				" Invalid source resource group,index #%d\n",
+				group_index);
+				break;
+			}
+		} else {
+			switch (group_index) {
+			case IPA_v4_2_GROUP_UL_DL:
+				ipahal_write_reg_n_fields(
+					IPA_DST_RSRC_GRP_01_RSRC_TYPE_n,
+					n, val);
+				break;
+			default:
+				IPAERR(
+				" Invalid destination resource group,index #%d\n",
+				group_index);
+				break;
+			}
+		}
+		break;
+	case IPA_4_5:
+	case IPA_4_5_MHI:
+	case IPA_4_5_APQ:
+		if (src) {
+			switch (group_index) {
+			case IPA_v4_5_MHI_GROUP_PCIE:
+			case IPA_v4_5_GROUP_UL_DL:
+				ipahal_write_reg_n_fields(
+					IPA_SRC_RSRC_GRP_01_RSRC_TYPE_n,
+					n, val);
+				break;
+			case IPA_v4_5_MHI_GROUP_DMA:
+			case IPA_v4_5_MHI_GROUP_QDSS:
+				ipahal_write_reg_n_fields(
+					IPA_SRC_RSRC_GRP_23_RSRC_TYPE_n,
+					n, val);
+				break;
+			case IPA_v4_5_GROUP_UC_RX_Q:
+				ipahal_write_reg_n_fields(
+					IPA_SRC_RSRC_GRP_45_RSRC_TYPE_n,
+					n, val);
+				break;
+			default:
+				IPAERR(
+				" Invalid source resource group,index #%d\n",
+				group_index);
+				break;
+			}
+		} else {
+			switch (group_index) {
+			case IPA_v4_5_MHI_GROUP_PCIE:
+			case IPA_v4_5_GROUP_UL_DL:
+				ipahal_write_reg_n_fields(
+					IPA_DST_RSRC_GRP_01_RSRC_TYPE_n,
+					n, val);
+				break;
+			case IPA_v4_5_MHI_GROUP_DMA:
+			case IPA_v4_5_MHI_GROUP_QDSS:
+				ipahal_write_reg_n_fields(
+					IPA_DST_RSRC_GRP_23_RSRC_TYPE_n,
+					n, val);
+				break;
+			case IPA_v4_5_GROUP_UC_RX_Q:
+				ipahal_write_reg_n_fields(
+					IPA_DST_RSRC_GRP_45_RSRC_TYPE_n,
+					n, val);
+				break;
+			default:
+				IPAERR(
+				" Invalid destination resource group,index #%d\n",
+				group_index);
+				break;
+			}
+		}
+		break;
+	case IPA_4_7:
+		if (src) {
+			switch (group_index) {
+			case IPA_v4_7_GROUP_UL_DL:
+				ipahal_write_reg_n_fields(
+					IPA_SRC_RSRC_GRP_01_RSRC_TYPE_n,
+					n, val);
+				break;
+			default:
+				IPAERR(
+				" Invalid source resource group,index #%d\n",
+				group_index);
+				break;
+			}
+		} else {
+			switch (group_index) {
+			case IPA_v4_7_GROUP_UL_DL:
+				ipahal_write_reg_n_fields(
+					IPA_DST_RSRC_GRP_01_RSRC_TYPE_n,
+					n, val);
+				break;
+			default:
+				IPAERR(
+				" Invalid destination resource group,index #%d\n",
+				group_index);
+				break;
+			}
+		}
+		break;
+	case IPA_4_9:
+		if (src) {
+			switch (group_index) {
+			case IPA_v4_9_GROUP_UL_DL:
+				ipahal_write_reg_n_fields(
+					IPA_SRC_RSRC_GRP_01_RSRC_TYPE_n,
+					n, val);
+				break;
+			default:
+				IPAERR(
+				" Invalid source resource group,index #%d\n",
+				group_index);
+				break;
+			}
+		} else {
+			switch (group_index) {
+			case IPA_v4_9_GROUP_UL_DL:
+				ipahal_write_reg_n_fields(
+					IPA_DST_RSRC_GRP_01_RSRC_TYPE_n,
+					n, val);
+				break;
+			default:
+				IPAERR(
+				" Invalid destination resource group,index #%d\n",
+				group_index);
+				break;
+			}
+		}
+		break;
+
+	default:
+		IPAERR("invalid hw type\n");
+		WARN_ON(1);
+		return;
+	}
+}
+
+static void ipa3_configure_rx_hps_clients(int depth,
+	int max_clnt_in_depth, int base_index, bool min)
+{
+	int i;
+	struct ipahal_reg_rx_hps_clients val;
+	u8 hw_type_idx;
+
+	hw_type_idx = ipa3_get_hw_type_index();
+
+	for (i = 0 ; i < max_clnt_in_depth ; i++) {
+		if (min)
+			val.client_minmax[i] =
+				ipa3_rsrc_rx_grp_config
+				[hw_type_idx]
+				[IPA_RSRC_GRP_TYPE_RX_HPS_CMDQ]
+				[i + base_index].min;
+		else
+			val.client_minmax[i] =
+				ipa3_rsrc_rx_grp_config
+				[hw_type_idx]
+				[IPA_RSRC_GRP_TYPE_RX_HPS_CMDQ]
+				[i + base_index].max;
+	}
+	if (depth) {
+		ipahal_write_reg_fields(min ? IPA_RX_HPS_CLIENTS_MIN_DEPTH_1 :
+					IPA_RX_HPS_CLIENTS_MAX_DEPTH_1,
+					&val);
+	} else {
+		ipahal_write_reg_fields(min ? IPA_RX_HPS_CLIENTS_MIN_DEPTH_0 :
+					IPA_RX_HPS_CLIENTS_MAX_DEPTH_0,
+					&val);
+	}
+}
+
+static void ipa3_configure_rx_hps_weight(void)
+{
+	struct ipahal_reg_rx_hps_weights val;
+	u8 hw_type_idx;
+
+	hw_type_idx = ipa3_get_hw_type_index();
+
+	val.hps_queue_weight_0 =
+			ipa3_rsrc_rx_grp_hps_weight_config
+			[hw_type_idx][IPA_RSRC_GRP_TYPE_RX_HPS_WEIGHT_CONFIG]
+			[0];
+	val.hps_queue_weight_1 =
+			ipa3_rsrc_rx_grp_hps_weight_config
+			[hw_type_idx][IPA_RSRC_GRP_TYPE_RX_HPS_WEIGHT_CONFIG]
+			[1];
+	val.hps_queue_weight_2 =
+			ipa3_rsrc_rx_grp_hps_weight_config
+			[hw_type_idx][IPA_RSRC_GRP_TYPE_RX_HPS_WEIGHT_CONFIG]
+			[2];
+	val.hps_queue_weight_3 =
+			ipa3_rsrc_rx_grp_hps_weight_config
+			[hw_type_idx][IPA_RSRC_GRP_TYPE_RX_HPS_WEIGHT_CONFIG]
+			[3];
+
+	ipahal_write_reg_fields(IPA_HPS_FTCH_ARB_QUEUE_WEIGHT, &val);
+}
+
+static void ipa3_configure_rx_hps(void)
+{
+	int rx_hps_max_clnt_in_depth0;
+
+	IPADBG("Assign RX_HPS CMDQ rsrc groups min-max limits\n");
+
+	/* Starting IPA4.5 we have 5 RX_HPS_CMDQ */
+	if (ipa3_ctx->ipa_hw_type < IPA_HW_v4_5)
+		rx_hps_max_clnt_in_depth0 = 4;
+	else
+		rx_hps_max_clnt_in_depth0 = 5;
+
+	ipa3_configure_rx_hps_clients(0, rx_hps_max_clnt_in_depth0, 0, true);
+	ipa3_configure_rx_hps_clients(0, rx_hps_max_clnt_in_depth0, 0, false);
+
+	/*
+	 * IPA 3.0/3.1 uses 6 RX_HPS_CMDQ and needs depths1 for that
+	 * which has two clients
+	 */
+	if (ipa3_ctx->ipa_hw_type <= IPA_HW_v3_1) {
+		ipa3_configure_rx_hps_clients(1, 2, rx_hps_max_clnt_in_depth0,
+			true);
+		ipa3_configure_rx_hps_clients(1, 2, rx_hps_max_clnt_in_depth0,
+			false);
+	}
+
+	/* Starting IPA4.2 no support to HPS weight config */
+	if (ipa3_ctx->ipa_hw_type >= IPA_HW_v3_5 &&
+		(ipa3_ctx->ipa_hw_type < IPA_HW_v4_2))
+		ipa3_configure_rx_hps_weight();
+}
+
+void ipa3_set_resorce_groups_min_max_limits(void)
+{
+	int i;
+	int j;
+	int src_rsrc_type_max;
+	int dst_rsrc_type_max;
+	int src_grp_idx_max;
+	int dst_grp_idx_max;
+	struct ipahal_reg_rsrc_grp_cfg val;
+	u8 hw_type_idx;
+
+	IPADBG("ENTER\n");
+
+	hw_type_idx = ipa3_get_hw_type_index();
+	switch (hw_type_idx) {
+	case IPA_3_0:
+		src_rsrc_type_max = IPA_v3_0_RSRC_GRP_TYPE_SRC_MAX;
+		dst_rsrc_type_max = IPA_v3_0_RSRC_GRP_TYPE_DST_MAX;
+		src_grp_idx_max = IPA_v3_0_GROUP_MAX;
+		dst_grp_idx_max = IPA_v3_0_GROUP_MAX;
+		break;
+	case IPA_3_5:
+	case IPA_3_5_MHI:
+	case IPA_3_5_1:
+		src_rsrc_type_max = IPA_v3_5_RSRC_GRP_TYPE_SRC_MAX;
+		dst_rsrc_type_max = IPA_v3_5_RSRC_GRP_TYPE_DST_MAX;
+		src_grp_idx_max = IPA_v3_5_SRC_GROUP_MAX;
+		dst_grp_idx_max = IPA_v3_5_DST_GROUP_MAX;
+		break;
+	case IPA_4_0:
+	case IPA_4_0_MHI:
+	case IPA_4_1:
+		src_rsrc_type_max = IPA_v4_0_RSRC_GRP_TYPE_SRC_MAX;
+		dst_rsrc_type_max = IPA_v4_0_RSRC_GRP_TYPE_DST_MAX;
+		src_grp_idx_max = IPA_v4_0_SRC_GROUP_MAX;
+		dst_grp_idx_max = IPA_v4_0_DST_GROUP_MAX;
+		break;
+	case IPA_4_2:
+		src_rsrc_type_max = IPA_v4_0_RSRC_GRP_TYPE_SRC_MAX;
+		dst_rsrc_type_max = IPA_v4_0_RSRC_GRP_TYPE_DST_MAX;
+		src_grp_idx_max = IPA_v4_2_SRC_GROUP_MAX;
+		dst_grp_idx_max = IPA_v4_2_DST_GROUP_MAX;
+		break;
+	case IPA_4_5:
+	case IPA_4_5_MHI:
+	case IPA_4_5_APQ:
+		src_rsrc_type_max = IPA_v4_0_RSRC_GRP_TYPE_SRC_MAX;
+		dst_rsrc_type_max = IPA_v4_0_RSRC_GRP_TYPE_DST_MAX;
+		src_grp_idx_max = IPA_v4_5_SRC_GROUP_MAX;
+		dst_grp_idx_max = IPA_v4_5_DST_GROUP_MAX;
+		break;
+	case IPA_4_7:
+		src_rsrc_type_max = IPA_v4_0_RSRC_GRP_TYPE_SRC_MAX;
+		dst_rsrc_type_max = IPA_v4_0_RSRC_GRP_TYPE_DST_MAX;
+		src_grp_idx_max = IPA_v4_7_SRC_GROUP_MAX;
+		dst_grp_idx_max = IPA_v4_7_DST_GROUP_MAX;
+		break;
+	case IPA_4_9:
+		src_rsrc_type_max = IPA_v4_0_RSRC_GRP_TYPE_SRC_MAX;
+		dst_rsrc_type_max = IPA_v4_0_RSRC_GRP_TYPE_DST_MAX;
+		src_grp_idx_max = IPA_v4_9_SRC_GROUP_MAX;
+		dst_grp_idx_max = IPA_v4_9_DST_GROUP_MAX;
+		break;
+	default:
+		IPAERR("invalid hw type index\n");
+		WARN_ON(1);
+		return;
+	}
+
+	IPADBG("Assign source rsrc groups min-max limits\n");
+	for (i = 0; i < src_rsrc_type_max; i++) {
+		for (j = 0; j < src_grp_idx_max; j = j + 2) {
+			val.x_min =
+			ipa3_rsrc_src_grp_config[hw_type_idx][i][j].min;
+			val.x_max =
+			ipa3_rsrc_src_grp_config[hw_type_idx][i][j].max;
+			val.y_min =
+			ipa3_rsrc_src_grp_config[hw_type_idx][i][j + 1].min;
+			val.y_max =
+			ipa3_rsrc_src_grp_config[hw_type_idx][i][j + 1].max;
+			ipa3_write_rsrc_grp_type_reg(j, i, true, &val);
+		}
+	}
+
+	IPADBG("Assign destination rsrc groups min-max limits\n");
+	for (i = 0; i < dst_rsrc_type_max; i++) {
+		for (j = 0; j < dst_grp_idx_max; j = j + 2) {
+			val.x_min =
+			ipa3_rsrc_dst_grp_config[hw_type_idx][i][j].min;
+			val.x_max =
+			ipa3_rsrc_dst_grp_config[hw_type_idx][i][j].max;
+			val.y_min =
+			ipa3_rsrc_dst_grp_config[hw_type_idx][i][j + 1].min;
+			val.y_max =
+			ipa3_rsrc_dst_grp_config[hw_type_idx][i][j + 1].max;
+			ipa3_write_rsrc_grp_type_reg(j, i, false, &val);
+		}
+	}
+
+	/* move rx_hps resource group configuration from HLOS to TZ
+	 * on real platform with IPA 3.1 or later
+	 */
+	if (ipa3_ctx->ipa_hw_type < IPA_HW_v3_1 ||
+		ipa3_ctx->ipa3_hw_mode == IPA_HW_MODE_VIRTUAL ||
+		ipa3_ctx->ipa3_hw_mode == IPA_HW_MODE_EMULATION) {
+		ipa3_configure_rx_hps();
+	}
+
+	IPADBG("EXIT\n");
+}
+
+static bool ipa3_gsi_channel_is_quite(struct ipa3_ep_context *ep)
+{
+	bool empty;
+
+	gsi_is_channel_empty(ep->gsi_chan_hdl, &empty);
+	if (!empty) {
+		IPADBG("ch %ld not empty\n", ep->gsi_chan_hdl);
+		/* queue a work to start polling if don't have one */
+		atomic_set(&ipa3_ctx->transport_pm.eot_activity, 1);
+		if (!atomic_read(&ep->sys->curr_polling_state))
+			__ipa_gsi_irq_rx_scedule_poll(ep->sys);
+	}
+	return empty;
+}
+
+static int __ipa3_stop_gsi_channel(u32 clnt_hdl)
+{
+	struct ipa_mem_buffer mem;
+	int res = 0;
+	int i;
+	struct ipa3_ep_context *ep;
+	enum ipa_client_type client_type;
+	struct IpaHwOffloadStatsAllocCmdData_t *gsi_info;
+
+	if (clnt_hdl >= ipa3_ctx->ipa_num_pipes ||
+		ipa3_ctx->ep[clnt_hdl].valid == 0) {
+		IPAERR("bad parm.\n");
+		return -EINVAL;
+	}
+
+	ep = &ipa3_ctx->ep[clnt_hdl];
+	client_type = ipa3_get_client_mapping(clnt_hdl);
+	memset(&mem, 0, sizeof(mem));
+
+	/* stop uC gsi dbg stats monitor */
+	if (ipa3_ctx->ipa_hw_type >= IPA_HW_v4_5 &&
+		ipa3_ctx->ipa_hw_type != IPA_HW_v4_7) {
+		switch (client_type) {
+		case IPA_CLIENT_MHI_PRIME_TETH_PROD:
+			gsi_info = &ipa3_ctx->gsi_info[IPA_HW_PROTOCOL_MHIP];
+			gsi_info->ch_id_info[0].ch_id = 0xff;
+			gsi_info->ch_id_info[0].dir = DIR_PRODUCER;
+			ipa3_uc_debug_stats_alloc(*gsi_info);
+			break;
+		case IPA_CLIENT_MHI_PRIME_TETH_CONS:
+			gsi_info = &ipa3_ctx->gsi_info[IPA_HW_PROTOCOL_MHIP];
+			gsi_info->ch_id_info[1].ch_id = 0xff;
+			gsi_info->ch_id_info[1].dir = DIR_CONSUMER;
+			ipa3_uc_debug_stats_alloc(*gsi_info);
+			break;
+		case IPA_CLIENT_MHI_PRIME_RMNET_PROD:
+			gsi_info = &ipa3_ctx->gsi_info[IPA_HW_PROTOCOL_MHIP];
+			gsi_info->ch_id_info[2].ch_id = 0xff;
+			gsi_info->ch_id_info[2].dir = DIR_PRODUCER;
+			ipa3_uc_debug_stats_alloc(*gsi_info);
+			break;
+		case IPA_CLIENT_MHI_PRIME_RMNET_CONS:
+			gsi_info = &ipa3_ctx->gsi_info[IPA_HW_PROTOCOL_MHIP];
+			gsi_info->ch_id_info[3].ch_id = 0xff;
+			gsi_info->ch_id_info[3].dir = DIR_CONSUMER;
+			ipa3_uc_debug_stats_alloc(*gsi_info);
+			break;
+		case IPA_CLIENT_USB_PROD:
+			gsi_info = &ipa3_ctx->gsi_info[IPA_HW_PROTOCOL_USB];
+			gsi_info->ch_id_info[0].ch_id = 0xff;
+			gsi_info->ch_id_info[0].dir = DIR_PRODUCER;
+			ipa3_uc_debug_stats_alloc(*gsi_info);
+			break;
+		case IPA_CLIENT_USB_CONS:
+			gsi_info = &ipa3_ctx->gsi_info[IPA_HW_PROTOCOL_USB];
+			gsi_info->ch_id_info[1].ch_id = 0xff;
+			gsi_info->ch_id_info[1].dir = DIR_CONSUMER;
+			ipa3_uc_debug_stats_alloc(*gsi_info);
+			break;
+		default:
+			IPADBG("client_type %d not supported\n",
+				client_type);
+		}
+	}
+
+	/*
+	 * Apply the GSI stop retry logic if GSI returns err code to retry.
+	 * Apply the retry logic for ipa_client_prod as well as ipa_client_cons.
+	 */
+	for (i = 0; i < IPA_GSI_CHANNEL_STOP_MAX_RETRY; i++) {
+		IPADBG("Calling gsi_stop_channel ch:%lu\n",
+			ep->gsi_chan_hdl);
+		res = gsi_stop_channel(ep->gsi_chan_hdl);
+		IPADBG("gsi_stop_channel ch: %lu returned %d\n",
+			ep->gsi_chan_hdl, res);
+		if (res != -GSI_STATUS_AGAIN && res != -GSI_STATUS_TIMED_OUT)
+			return res;
+		/*
+		 * From >=IPA4.0 version not required to send dma send command,
+		 * this issue was fixed in latest versions.
+		 */
+		if (ipa3_ctx->ipa_hw_type < IPA_HW_v4_0) {
+			IPADBG("Inject a DMA_TASK with 1B packet to IPA\n");
+			/* Send a 1B packet DMA_TASK to IPA and try again */
+			res = ipa3_inject_dma_task_for_gsi();
+			if (res) {
+				IPAERR("Failed to inject DMA TASk for GSI\n");
+				return res;
+			}
+		}
+		/* sleep for short period to flush IPA */
+		usleep_range(IPA_GSI_CHANNEL_STOP_SLEEP_MIN_USEC,
+			IPA_GSI_CHANNEL_STOP_SLEEP_MAX_USEC);
+	}
+
+	IPAERR("Failed  to stop GSI channel with retries\n");
+	return -EFAULT;
+}
+
+/**
+ * ipa3_stop_gsi_channel()- Stops a GSI channel in IPA
+ * @chan_hdl: GSI channel handle
+ *
+ * This function implements the sequence to stop a GSI channel
+ * in IPA. This function returns when the channel is in STOP state.
+ *
+ * Return value: 0 on success, negative otherwise
+ */
+int ipa3_stop_gsi_channel(u32 clnt_hdl)
+{
+	int res;
+
+	IPA_ACTIVE_CLIENTS_INC_EP(ipa3_get_client_mapping(clnt_hdl));
+	res = __ipa3_stop_gsi_channel(clnt_hdl);
+	IPA_ACTIVE_CLIENTS_DEC_EP(ipa3_get_client_mapping(clnt_hdl));
+
+	return res;
+}
+EXPORT_SYMBOL(ipa3_stop_gsi_channel);
+
+static int _ipa_suspend_resume_pipe(enum ipa_client_type client, bool suspend)
+{
+	int ipa_ep_idx, coal_ep_idx;
+	struct ipa3_ep_context *ep;
+	int res;
+
+	if (ipa3_ctx->ipa_hw_type < IPA_HW_v4_0) {
+		IPAERR("not supported\n");
+		return -EPERM;
+	}
+
+	ipa_ep_idx = ipa3_get_ep_mapping(client);
+	if (ipa_ep_idx < 0) {
+		IPADBG("client %d not configured\n", client);
+		return 0;
+	}
+
+	ep = &ipa3_ctx->ep[ipa_ep_idx];
+	if (!ep->valid)
+		return 0;
+
+	coal_ep_idx = ipa3_get_ep_mapping(IPA_CLIENT_APPS_WAN_COAL_CONS);
+
+	IPADBG("%s pipe %d\n", suspend ? "suspend" : "unsuspend", ipa_ep_idx);
+
+	/*
+	 * Configure the callback mode only one time after starting the channel
+	 * otherwise observing IEOB interrupt received before configure callmode
+	 * second time. It was leading race condition in updating current
+	 * polling state.
+	 */
+
+	if (suspend) {
+		res = __ipa3_stop_gsi_channel(ipa_ep_idx);
+		if (res) {
+			IPAERR("failed to stop LAN channel\n");
+			ipa_assert();
+		}
+	} else {
+		res = gsi_start_channel(ep->gsi_chan_hdl);
+		if (res) {
+			IPAERR("failed to start LAN channel\n");
+			ipa_assert();
+		}
+	}
+
+	/* Apps prod pipes use common event ring so cannot configure mode*/
+
+	/*
+	 * Skipping to configure mode for default wan pipe,
+	 * as both pipes using commong event ring. if both pipes
+	 * configure same event ring observing race condition in
+	 * updating current polling state.
+	 */
+
+	if (IPA_CLIENT_IS_APPS_PROD(client) ||
+		(client == IPA_CLIENT_APPS_WAN_CONS &&
+			coal_ep_idx != IPA_EP_NOT_ALLOCATED))
+		return 0;
+
+	if (suspend) {
+		IPADBG("switch ch %ld to poll\n", ep->gsi_chan_hdl);
+		gsi_config_channel_mode(ep->gsi_chan_hdl, GSI_CHAN_MODE_POLL);
+		if (!ipa3_gsi_channel_is_quite(ep))
+			return -EAGAIN;
+	} else if (!atomic_read(&ep->sys->curr_polling_state)) {
+		IPADBG("switch ch %ld to callback\n", ep->gsi_chan_hdl);
+		gsi_config_channel_mode(ep->gsi_chan_hdl,
+			GSI_CHAN_MODE_CALLBACK);
+	}
+
+	return 0;
+}
+
+void ipa3_force_close_coal(void)
+{
+	struct ipa3_desc desc;
+	int ep_idx;
+
+	ep_idx = ipa3_get_ep_mapping(IPA_CLIENT_APPS_WAN_COAL_CONS);
+	if (ep_idx == IPA_EP_NOT_ALLOCATED || (!ipa3_ctx->ep[ep_idx].valid))
+		return;
+
+	ipa3_init_imm_cmd_desc(&desc, ipa3_ctx->coal_cmd_pyld);
+
+	IPADBG("Sending 1 descriptor for coal force close\n");
+	if (ipa3_send_cmd(1, &desc))
+		IPADBG("ipa3_send_cmd timedout\n");
+}
+
+int ipa3_suspend_apps_pipes(bool suspend)
+{
+	int res;
+
+	/* As per HPG first need start/stop coalescing channel
+	 * then default one. Coalescing client number was greater then
+	 * default one so starting the last client.
+	 */
+	res = _ipa_suspend_resume_pipe(IPA_CLIENT_APPS_WAN_COAL_CONS, suspend);
+	if (res == -EAGAIN)
+		goto undo_coal_cons;
+
+	res = _ipa_suspend_resume_pipe(IPA_CLIENT_APPS_WAN_CONS, suspend);
+	if (res == -EAGAIN)
+		goto undo_wan_cons;
+
+	res = _ipa_suspend_resume_pipe(IPA_CLIENT_APPS_LAN_CONS, suspend);
+	if (res == -EAGAIN)
+		goto undo_lan_cons;
+
+	res = _ipa_suspend_resume_pipe(IPA_CLIENT_ODL_DPL_CONS, suspend);
+	if (res == -EAGAIN)
+		goto undo_odl_cons;
+
+	if (suspend) {
+		struct ipahal_reg_tx_wrapper tx;
+		int ep_idx;
+
+		ep_idx = ipa3_get_ep_mapping(IPA_CLIENT_APPS_WAN_COAL_CONS);
+		if (ep_idx == IPA_EP_NOT_ALLOCATED ||
+				(!ipa3_ctx->ep[ep_idx].valid))
+			goto do_prod;
+
+		ipahal_read_reg_fields(IPA_STATE_TX_WRAPPER, &tx);
+		if (tx.coal_slave_open_frame != 0) {
+			IPADBG("COAL frame is open 0x%x\n",
+				tx.coal_slave_open_frame);
+			res = -EAGAIN;
+			goto undo_odl_cons;
+		}
+
+		usleep_range(IPA_TAG_SLEEP_MIN_USEC, IPA_TAG_SLEEP_MAX_USEC);
+
+		res = ipahal_read_reg_n(IPA_SUSPEND_IRQ_INFO_EE_n,
+			ipa3_ctx->ee);
+		if (res) {
+			IPADBG("suspend irq is pending 0x%x\n", res);
+			goto undo_odl_cons;
+		}
+	}
+do_prod:
+	res = _ipa_suspend_resume_pipe(IPA_CLIENT_APPS_LAN_PROD, suspend);
+	if (res == -EAGAIN)
+		goto undo_lan_prod;
+	res = _ipa_suspend_resume_pipe(IPA_CLIENT_APPS_WAN_PROD, suspend);
+	if (res == -EAGAIN)
+		goto undo_wan_prod;
+
+	return 0;
+
+undo_wan_prod:
+	_ipa_suspend_resume_pipe(IPA_CLIENT_APPS_WAN_PROD, !suspend);
+
+undo_lan_prod:
+	_ipa_suspend_resume_pipe(IPA_CLIENT_APPS_LAN_PROD, !suspend);
+
+undo_odl_cons:
+	_ipa_suspend_resume_pipe(IPA_CLIENT_ODL_DPL_CONS, !suspend);
+undo_lan_cons:
+	_ipa_suspend_resume_pipe(IPA_CLIENT_APPS_LAN_CONS, !suspend);
+undo_wan_cons:
+	_ipa_suspend_resume_pipe(IPA_CLIENT_APPS_WAN_COAL_CONS, !suspend);
+	_ipa_suspend_resume_pipe(IPA_CLIENT_APPS_WAN_CONS, !suspend);
+	return res;
+
+undo_coal_cons:
+	_ipa_suspend_resume_pipe(IPA_CLIENT_APPS_WAN_COAL_CONS, !suspend);
+
+	return res;
+}
+
+int ipa3_allocate_dma_task_for_gsi(void)
+{
+	struct ipahal_imm_cmd_dma_task_32b_addr cmd = { 0 };
+
+	IPADBG("Allocate mem\n");
+	ipa3_ctx->dma_task_info.mem.size = IPA_GSI_CHANNEL_STOP_PKT_SIZE;
+	ipa3_ctx->dma_task_info.mem.base = dma_alloc_coherent(ipa3_ctx->pdev,
+		ipa3_ctx->dma_task_info.mem.size,
+		&ipa3_ctx->dma_task_info.mem.phys_base,
+		GFP_KERNEL);
+	if (!ipa3_ctx->dma_task_info.mem.base) {
+		IPAERR("no mem\n");
+		return -EFAULT;
+	}
+
+	cmd.flsh = true;
+	cmd.size1 = ipa3_ctx->dma_task_info.mem.size;
+	cmd.addr1 = ipa3_ctx->dma_task_info.mem.phys_base;
+	cmd.packet_size = ipa3_ctx->dma_task_info.mem.size;
+	ipa3_ctx->dma_task_info.cmd_pyld = ipahal_construct_imm_cmd(
+			IPA_IMM_CMD_DMA_TASK_32B_ADDR, &cmd, false);
+	if (!ipa3_ctx->dma_task_info.cmd_pyld) {
+		IPAERR("failed to construct dma_task_32b_addr cmd\n");
+		dma_free_coherent(ipa3_ctx->pdev,
+			ipa3_ctx->dma_task_info.mem.size,
+			ipa3_ctx->dma_task_info.mem.base,
+			ipa3_ctx->dma_task_info.mem.phys_base);
+		memset(&ipa3_ctx->dma_task_info, 0,
+			sizeof(ipa3_ctx->dma_task_info));
+		return -EFAULT;
+	}
+
+	return 0;
+}
+
+void ipa3_free_dma_task_for_gsi(void)
+{
+	dma_free_coherent(ipa3_ctx->pdev,
+		ipa3_ctx->dma_task_info.mem.size,
+		ipa3_ctx->dma_task_info.mem.base,
+		ipa3_ctx->dma_task_info.mem.phys_base);
+	ipahal_destroy_imm_cmd(ipa3_ctx->dma_task_info.cmd_pyld);
+	memset(&ipa3_ctx->dma_task_info, 0, sizeof(ipa3_ctx->dma_task_info));
+}
+
+int ipa3_allocate_coal_close_frame(void)
+{
+	struct ipahal_imm_cmd_register_write reg_write_cmd = { 0 };
+	struct ipahal_reg_valmask valmask;
+	int ep_idx;
+
+	ep_idx = ipa3_get_ep_mapping(IPA_CLIENT_APPS_WAN_COAL_CONS);
+	if (ep_idx == IPA_EP_NOT_ALLOCATED)
+		return 0;
+	IPADBG("Allocate coal close frame cmd\n");
+	reg_write_cmd.skip_pipeline_clear = false;
+	reg_write_cmd.pipeline_clear_options = IPAHAL_HPS_CLEAR;
+	reg_write_cmd.offset = ipahal_get_reg_ofst(IPA_AGGR_FORCE_CLOSE);
+	ipahal_get_aggr_force_close_valmask(ep_idx, &valmask);
+	reg_write_cmd.value = valmask.val;
+	reg_write_cmd.value_mask = valmask.mask;
+	ipa3_ctx->coal_cmd_pyld =
+		ipahal_construct_imm_cmd(IPA_IMM_CMD_REGISTER_WRITE,
+			&reg_write_cmd, false);
+	if (!ipa3_ctx->coal_cmd_pyld) {
+		IPAERR("fail construct register_write imm cmd\n");
+		ipa_assert();
+		return 0;
+	}
+
+	return 0;
+}
+
+void ipa3_free_coal_close_frame(void)
+{
+	if (ipa3_ctx->coal_cmd_pyld)
+		ipahal_destroy_imm_cmd(ipa3_ctx->coal_cmd_pyld);
+}
+/**
+ * ipa3_inject_dma_task_for_gsi()- Send DMA_TASK to IPA for GSI stop channel
+ *
+ * Send a DMA_TASK of 1B to IPA to unblock GSI channel in STOP_IN_PROG.
+ * Return value: 0 on success, negative otherwise
+ */
+int ipa3_inject_dma_task_for_gsi(void)
+{
+	struct ipa3_desc desc;
+
+	ipa3_init_imm_cmd_desc(&desc, ipa3_ctx->dma_task_info.cmd_pyld);
+
+	IPADBG("sending 1B packet to IPA\n");
+	if (ipa3_send_cmd_timeout(1, &desc,
+		IPA_DMA_TASK_FOR_GSI_TIMEOUT_MSEC)) {
+		IPAERR("ipa3_send_cmd failed\n");
+		return -EFAULT;
+	}
+
+	return 0;
+}
+
+static int ipa3_load_single_fw(const struct firmware *firmware,
+	const struct elf32_phdr *phdr)
+{
+	uint32_t *fw_mem_base;
+	int index;
+	const uint32_t *elf_data_ptr;
+
+	if (phdr->p_offset > firmware->size) {
+		IPAERR("Invalid ELF: offset=%u is beyond elf_size=%zu\n",
+			phdr->p_offset, firmware->size);
+		return -EINVAL;
+	}
+	if ((firmware->size - phdr->p_offset) < phdr->p_filesz) {
+		IPAERR("Invalid ELF: offset=%u filesz=%u elf_size=%zu\n",
+			phdr->p_offset, phdr->p_filesz, firmware->size);
+		return -EINVAL;
+	}
+
+	if (phdr->p_memsz % sizeof(uint32_t)) {
+		IPAERR("FW mem size %u doesn't align to 32bit\n",
+			phdr->p_memsz);
+		return -EFAULT;
+	}
+
+	if (phdr->p_filesz > phdr->p_memsz) {
+		IPAERR("FW image too big src_size=%u dst_size=%u\n",
+			phdr->p_filesz, phdr->p_memsz);
+		return -EFAULT;
+	}
+
+	fw_mem_base = ioremap(phdr->p_vaddr, phdr->p_memsz);
+	if (!fw_mem_base) {
+		IPAERR("Failed to map 0x%x for the size of %u\n",
+			phdr->p_vaddr, phdr->p_memsz);
+		return -ENOMEM;
+	}
+
+	/* Set the entire region to 0s */
+	memset(fw_mem_base, 0, phdr->p_memsz);
+
+	elf_data_ptr = (uint32_t *)(firmware->data + phdr->p_offset);
+
+	/* Write the FW */
+	for (index = 0; index < phdr->p_filesz/sizeof(uint32_t); index++) {
+		writel_relaxed(*elf_data_ptr, &fw_mem_base[index]);
+		elf_data_ptr++;
+	}
+
+	iounmap(fw_mem_base);
+
+	return 0;
+}
+
+struct ipa3_hps_dps_areas_info {
+	u32 dps_abs_addr;
+	u32 dps_sz;
+	u32 hps_abs_addr;
+	u32 hps_sz;
+};
+
+static void ipa3_get_hps_dps_areas_absolute_addr_and_sz(
+	struct ipa3_hps_dps_areas_info *info)
+{
+	u32 dps_area_start;
+	u32 dps_area_end;
+	u32 hps_area_start;
+	u32 hps_area_end;
+
+	if (ipa3_ctx->ipa_hw_type < IPA_HW_v4_5) {
+		dps_area_start = ipahal_get_reg_ofst(IPA_DPS_SEQUENCER_FIRST);
+		dps_area_end = ipahal_get_reg_ofst(IPA_DPS_SEQUENCER_LAST);
+		hps_area_start = ipahal_get_reg_ofst(IPA_HPS_SEQUENCER_FIRST);
+		hps_area_end = ipahal_get_reg_ofst(IPA_HPS_SEQUENCER_LAST);
+
+		info->dps_abs_addr = ipa3_ctx->ipa_wrapper_base +
+			ipahal_get_reg_base() + dps_area_start;
+		info->hps_abs_addr = ipa3_ctx->ipa_wrapper_base +
+			ipahal_get_reg_base() + hps_area_start;
+	} else {
+		dps_area_start = ipahal_read_reg(IPA_DPS_SEQUENCER_FIRST);
+		dps_area_end = ipahal_read_reg(IPA_DPS_SEQUENCER_LAST);
+		hps_area_start = ipahal_read_reg(IPA_HPS_SEQUENCER_FIRST);
+		hps_area_end = ipahal_read_reg(IPA_HPS_SEQUENCER_LAST);
+
+		info->dps_abs_addr = ipa3_ctx->ipa_wrapper_base +
+			dps_area_start;
+		info->hps_abs_addr = ipa3_ctx->ipa_wrapper_base +
+			hps_area_start;
+	}
+
+	info->dps_sz = dps_area_end - dps_area_start + sizeof(u32);
+	info->hps_sz = hps_area_end - hps_area_start + sizeof(u32);
+
+	IPADBG("dps area: start offset=0x%x end offset=0x%x\n",
+		dps_area_start, dps_area_end);
+	IPADBG("hps area: start offset=0x%x end offset=0x%x\n",
+		hps_area_start, hps_area_end);
+}
+
+/**
+ * emulator_load_single_fw() - load firmware into emulator's memory
+ *
+ * @firmware: Structure which contains the FW data from the user space.
+ * @phdr: ELF program header
+ * @loc_to_map: physical location to map into virtual space
+ * @size_to_map: the size of memory to map into virtual space
+ *
+ * Return value: 0 on success, negative otherwise
+ */
+static int emulator_load_single_fw(
+	const struct firmware   *firmware,
+	const struct elf32_phdr *phdr,
+	u32                      loc_to_map,
+	u32                      size_to_map)
+{
+	int index;
+	uint32_t ofb;
+	const uint32_t *elf_data_ptr;
+	void __iomem *fw_base;
+
+	IPADBG("firmware(%pK) phdr(%pK) loc_to_map(0x%X) size_to_map(%u)\n",
+	       firmware, phdr, loc_to_map, size_to_map);
+
+	if (phdr->p_offset > firmware->size) {
+		IPAERR("Invalid ELF: offset=%u is beyond elf_size=%zu\n",
+			phdr->p_offset, firmware->size);
+		return -EINVAL;
+	}
+	if ((firmware->size - phdr->p_offset) < phdr->p_filesz) {
+		IPAERR("Invalid ELF: offset=%u filesz=%u elf_size=%zu\n",
+			phdr->p_offset, phdr->p_filesz, firmware->size);
+		return -EINVAL;
+	}
+
+	if (phdr->p_memsz % sizeof(uint32_t)) {
+		IPAERR("FW mem size %u doesn't align to 32bit\n",
+			phdr->p_memsz);
+		return -EFAULT;
+	}
+
+	if (phdr->p_filesz > phdr->p_memsz) {
+		IPAERR("FW image too big src_size=%u dst_size=%u\n",
+			phdr->p_filesz, phdr->p_memsz);
+		return -EFAULT;
+	}
+
+	IPADBG("ELF: p_memsz(0x%x) p_filesz(0x%x) p_filesz/4(0x%x)\n",
+	       (uint32_t) phdr->p_memsz,
+	       (uint32_t) phdr->p_filesz,
+	       (uint32_t) (phdr->p_filesz/sizeof(uint32_t)));
+
+	fw_base = ioremap(loc_to_map, size_to_map);
+	if (!fw_base) {
+		IPAERR("Failed to map 0x%X for the size of %u\n",
+		       loc_to_map, size_to_map);
+		return -ENOMEM;
+	}
+
+	IPADBG("Physical base(0x%X) mapped to virtual (%pK) with len (%u)\n",
+	       loc_to_map,
+	       fw_base,
+	       size_to_map);
+
+	/* Set the entire region to 0s */
+	ofb = 0;
+	for (index = 0; index < phdr->p_memsz/sizeof(uint32_t); index++) {
+		writel_relaxed(0, fw_base + ofb);
+		ofb += sizeof(uint32_t);
+	}
+
+	elf_data_ptr = (uint32_t *)(firmware->data + phdr->p_offset);
+
+	/* Write the FW */
+	ofb = 0;
+	for (index = 0; index < phdr->p_filesz/sizeof(uint32_t); index++) {
+		writel_relaxed(*elf_data_ptr, fw_base + ofb);
+		elf_data_ptr++;
+		ofb += sizeof(uint32_t);
+	}
+
+	iounmap(fw_base);
+
+	return 0;
+}
+
+/**
+ * ipa3_load_fws() - Load the IPAv3 FWs into IPA&GSI SRAM.
+ *
+ * @firmware: Structure which contains the FW data from the user space.
+ * @gsi_mem_base: GSI base address
+ * @gsi_ver: GSI Version
+ *
+ * Return value: 0 on success, negative otherwise
+ *
+ */
+int ipa3_load_fws(const struct firmware *firmware, phys_addr_t gsi_mem_base,
+	enum gsi_ver gsi_ver)
+{
+	const struct elf32_hdr *ehdr;
+	const struct elf32_phdr *phdr;
+	unsigned long gsi_iram_ofst;
+	unsigned long gsi_iram_size;
+	int rc;
+	struct ipa3_hps_dps_areas_info dps_hps_info;
+
+	if (gsi_ver == GSI_VER_ERR) {
+		IPAERR("Invalid GSI Version\n");
+		return -EINVAL;
+	}
+
+	if (!gsi_mem_base) {
+		IPAERR("Invalid GSI base address\n");
+		return -EINVAL;
+	}
+
+	ipa_assert_on(!firmware);
+	/* One program header per FW image: GSI, DPS and HPS */
+	if (firmware->size < (sizeof(*ehdr) + 3 * sizeof(*phdr))) {
+		IPAERR("Missing ELF and Program headers firmware size=%zu\n",
+			firmware->size);
+		return -EINVAL;
+	}
+
+	ehdr = (struct elf32_hdr *) firmware->data;
+	ipa_assert_on(!ehdr);
+	if (ehdr->e_phnum != 3) {
+		IPAERR("Unexpected number of ELF program headers\n");
+		return -EINVAL;
+	}
+	phdr = (struct elf32_phdr *)(firmware->data + sizeof(*ehdr));
+
+	/*
+	 * Each ELF program header represents a FW image and contains:
+	 *  p_vaddr : The starting address to which the FW needs to loaded.
+	 *  p_memsz : The size of the IRAM (where the image loaded)
+	 *  p_filesz: The size of the FW image embedded inside the ELF
+	 *  p_offset: Absolute offset to the image from the head of the ELF
+	 */
+
+	/* Load GSI FW image */
+	gsi_get_inst_ram_offset_and_size(&gsi_iram_ofst, &gsi_iram_size,
+		gsi_ver);
+	if (phdr->p_vaddr != (gsi_mem_base + gsi_iram_ofst)) {
+		IPAERR(
+			"Invalid GSI FW img load addr vaddr=0x%x gsi_mem_base=%pa gsi_iram_ofst=0x%lx\n"
+			, phdr->p_vaddr, &gsi_mem_base, gsi_iram_ofst);
+		return -EINVAL;
+	}
+	if (phdr->p_memsz > gsi_iram_size) {
+		IPAERR("Invalid GSI FW img size memsz=%d gsi_iram_size=%lu\n",
+			phdr->p_memsz, gsi_iram_size);
+		return -EINVAL;
+	}
+	rc = ipa3_load_single_fw(firmware, phdr);
+	if (rc)
+		return rc;
+
+	phdr++;
+	ipa3_get_hps_dps_areas_absolute_addr_and_sz(&dps_hps_info);
+
+	/* Load IPA DPS FW image */
+	if (phdr->p_vaddr != dps_hps_info.dps_abs_addr) {
+		IPAERR(
+			"Invalid IPA DPS img load addr vaddr=0x%x dps_abs_addr=0x%x\n"
+			, phdr->p_vaddr, dps_hps_info.dps_abs_addr);
+		return -EINVAL;
+	}
+	if (phdr->p_memsz > dps_hps_info.dps_sz) {
+		IPAERR("Invalid IPA DPS img size memsz=%d dps_area_size=%u\n",
+			phdr->p_memsz, dps_hps_info.dps_sz);
+		return -EINVAL;
+	}
+	rc = ipa3_load_single_fw(firmware, phdr);
+	if (rc)
+		return rc;
+
+	phdr++;
+
+	/* Load IPA HPS FW image */
+	if (phdr->p_vaddr != dps_hps_info.hps_abs_addr) {
+		IPAERR(
+			"Invalid IPA HPS img load addr vaddr=0x%x hps_abs_addr=0x%x\n"
+			, phdr->p_vaddr, dps_hps_info.hps_abs_addr);
+		return -EINVAL;
+	}
+	if (phdr->p_memsz > dps_hps_info.hps_sz) {
+		IPAERR("Invalid IPA HPS img size memsz=%d hps_area_size=%u\n",
+			phdr->p_memsz, dps_hps_info.hps_sz);
+		return -EINVAL;
+	}
+	rc = ipa3_load_single_fw(firmware, phdr);
+	if (rc)
+		return rc;
+
+	IPADBG("IPA FWs (GSI FW, DPS and HPS) loaded successfully\n");
+	return 0;
+}
+
+/*
+ * The following needed for the EMULATION system. On a non-emulation
+ * system (ie. the real UE), this functionality is done in the
+ * TZ...
+ */
+
+static void ipa_gsi_setup_reg(void)
+{
+	u32 reg_val, start;
+	int i;
+	const struct ipa_gsi_ep_config *gsi_ep_info_cfg;
+	enum ipa_client_type type;
+
+	IPADBG("Setting up registers in preparation for firmware download\n");
+
+	/* setup IPA_ENDP_GSI_CFG_TLV_n reg */
+	start = 0;
+	ipa3_ctx->ipa_num_pipes = ipa3_get_num_pipes();
+	IPADBG("ipa_num_pipes=%u\n", ipa3_ctx->ipa_num_pipes);
+
+	for (i = 0; i < ipa3_ctx->ipa_num_pipes; i++) {
+		type = ipa3_get_client_by_pipe(i);
+		gsi_ep_info_cfg = ipa3_get_gsi_ep_info(type);
+		IPAERR("for ep %d client is %d gsi_ep_info_cfg=%pK\n",
+			i, type, gsi_ep_info_cfg);
+		if (!gsi_ep_info_cfg)
+			continue;
+		reg_val = ((gsi_ep_info_cfg->ipa_if_tlv << 16) & 0x00FF0000);
+		reg_val += (start & 0xFFFF);
+		start += gsi_ep_info_cfg->ipa_if_tlv;
+		ipahal_write_reg_n(IPA_ENDP_GSI_CFG_TLV_n, i, reg_val);
+	}
+
+	/* setup IPA_ENDP_GSI_CFG_AOS_n reg */
+	for (i = 0; i < ipa3_ctx->ipa_num_pipes; i++) {
+		type = ipa3_get_client_by_pipe(i);
+		gsi_ep_info_cfg = ipa3_get_gsi_ep_info(type);
+		if (!gsi_ep_info_cfg)
+			continue;
+		reg_val = ((gsi_ep_info_cfg->ipa_if_aos << 16) & 0x00FF0000);
+		reg_val += (start & 0xFFFF);
+		start += gsi_ep_info_cfg->ipa_if_aos;
+		ipahal_write_reg_n(IPA_ENDP_GSI_CFG_AOS_n, i, reg_val);
+	}
+
+	/* setup GSI_MAP_EE_n_CH_k_VP_TABLE reg */
+	for (i = 0; i < ipa3_ctx->ipa_num_pipes; i++) {
+		type = ipa3_get_client_by_pipe(i);
+		gsi_ep_info_cfg = ipa3_get_gsi_ep_info(type);
+		if (!gsi_ep_info_cfg)
+			continue;
+		reg_val = i & 0x1F;
+		gsi_map_virtual_ch_to_per_ep(
+			gsi_ep_info_cfg->ee,
+			gsi_ep_info_cfg->ipa_gsi_chan_num,
+			reg_val);
+	}
+
+	/* setup IPA_ENDP_GSI_CFG1_n reg */
+	for (i = 0; i < ipa3_ctx->ipa_num_pipes; i++) {
+		type = ipa3_get_client_by_pipe(i);
+		gsi_ep_info_cfg = ipa3_get_gsi_ep_info(type);
+		if (!gsi_ep_info_cfg)
+			continue;
+		reg_val = (1 << 31) + (1 << 16);
+		ipahal_write_reg_n(IPA_ENDP_GSI_CFG1_n, i, 1<<16);
+		ipahal_write_reg_n(IPA_ENDP_GSI_CFG1_n, i, reg_val);
+		ipahal_write_reg_n(IPA_ENDP_GSI_CFG1_n, i, 1<<16);
+	}
+}
+
+/**
+ * emulator_load_fws() - Load the IPAv3 FWs into IPA&GSI SRAM.
+ *
+ * @firmware: Structure which contains the FW data from the user space.
+ * @transport_mem_base: Where to load
+ * @transport_mem_size: Space available to load into
+ * @gsi_ver: Version of the gsi
+ *
+ * Return value: 0 on success, negative otherwise
+ */
+int emulator_load_fws(
+	const struct firmware *firmware,
+	u32 transport_mem_base,
+	u32 transport_mem_size,
+	enum gsi_ver gsi_ver)
+{
+	const struct elf32_hdr *ehdr;
+	const struct elf32_phdr *phdr;
+	unsigned long gsi_offset, gsi_ram_size;
+	struct ipa3_hps_dps_areas_info dps_hps_info;
+	int rc;
+
+	IPADBG("Loading firmware(%pK)\n", firmware);
+
+	if (!firmware) {
+		IPAERR("firmware pointer passed to function is NULL\n");
+		return -EINVAL;
+	}
+
+	/* One program header per FW image: GSI, DPS and HPS */
+	if (firmware->size < (sizeof(*ehdr) + 3 * sizeof(*phdr))) {
+		IPAERR(
+		    "Missing ELF and Program headers firmware size=%zu\n",
+		    firmware->size);
+		return -EINVAL;
+	}
+
+	ehdr = (struct elf32_hdr *) firmware->data;
+
+	ipa_assert_on(!ehdr);
+
+	if (ehdr->e_phnum != 3) {
+		IPAERR("Unexpected number of ELF program headers\n");
+		return -EINVAL;
+	}
+
+	ipa3_get_hps_dps_areas_absolute_addr_and_sz(&dps_hps_info);
+
+	/*
+	 * Each ELF program header represents a FW image and contains:
+	 *  p_vaddr : The starting address to which the FW needs to loaded.
+	 *  p_memsz : The size of the IRAM (where the image loaded)
+	 *  p_filesz: The size of the FW image embedded inside the ELF
+	 *  p_offset: Absolute offset to the image from the head of the ELF
+	 *
+	 * NOTE WELL: On the emulation platform, the p_vaddr address
+	 *            is not relevant and is unused.  This is because
+	 *            on the emulation platform, the registers'
+	 *            address location is mutable, since it's mapped
+	 *            in via a PCIe probe.  Given this, it is the
+	 *            mapped address info that's used while p_vaddr is
+	 *            ignored.
+	 */
+	phdr = (struct elf32_phdr *)(firmware->data + sizeof(*ehdr));
+
+	phdr += 2;
+
+	/*
+	 * Attempt to load IPA HPS FW image
+	 */
+	if (phdr->p_memsz > dps_hps_info.hps_sz) {
+		IPAERR("Invalid IPA HPS img size memsz=%d hps_size=%u\n",
+		       phdr->p_memsz, dps_hps_info.hps_sz);
+		return -EINVAL;
+	}
+	IPADBG("Loading HPS FW\n");
+	rc = emulator_load_single_fw(
+		firmware, phdr,
+		dps_hps_info.hps_abs_addr, dps_hps_info.hps_sz);
+	if (rc)
+		return rc;
+	IPADBG("Loading HPS FW complete\n");
+
+	--phdr;
+
+	/*
+	 * Attempt to load IPA DPS FW image
+	 */
+	if (phdr->p_memsz > dps_hps_info.dps_sz) {
+		IPAERR("Invalid IPA DPS img size memsz=%d dps_size=%u\n",
+		       phdr->p_memsz, dps_hps_info.dps_sz);
+		return -EINVAL;
+	}
+	IPADBG("Loading DPS FW\n");
+	rc = emulator_load_single_fw(
+		firmware, phdr,
+		dps_hps_info.dps_abs_addr, dps_hps_info.dps_sz);
+	if (rc)
+		return rc;
+	IPADBG("Loading DPS FW complete\n");
+
+	/*
+	 * Run gsi register setup which is normally done in TZ on
+	 * non-EMULATION systems...
+	 */
+	ipa_gsi_setup_reg();
+
+	--phdr;
+
+	gsi_get_inst_ram_offset_and_size(&gsi_offset, &gsi_ram_size, gsi_ver);
+
+	/*
+	 * Attempt to load GSI FW image
+	 */
+	if (phdr->p_memsz > gsi_ram_size) {
+		IPAERR(
+		    "Invalid GSI FW img size memsz=%d gsi_ram_size=%lu\n",
+		    phdr->p_memsz, gsi_ram_size);
+		return -EINVAL;
+	}
+	IPADBG("Loading GSI FW\n");
+	rc = emulator_load_single_fw(
+		firmware, phdr,
+		transport_mem_base + (u32) gsi_offset, gsi_ram_size);
+	if (rc)
+		return rc;
+	IPADBG("Loading GSI FW complete\n");
+
+	IPADBG("IPA FWs (GSI FW, DPS and HPS) loaded successfully\n");
+
+	return 0;
+}
+
+/**
+ * ipa3_is_apq() - indicate apq platform or not
+ *
+ * Return value: true if apq, false if not apq platform
+ *
+ */
+bool ipa3_is_apq(void)
+{
+	if (ipa3_ctx->platform_type == IPA_PLAT_TYPE_APQ)
+		return true;
+	else
+		return false;
+}
+
+/**
+ * ipa_get_fnr_info() - get fnr_info
+ *
+ * Return value: true if set, false if not set
+ *
+ */
+bool ipa_get_fnr_info(struct ipacm_fnr_info *fnr_info)
+{
+	bool res = false;
+
+	if (ipa3_ctx->fnr_info.valid) {
+		fnr_info->valid = ipa3_ctx->fnr_info.valid;
+		fnr_info->hw_counter_offset =
+			ipa3_ctx->fnr_info.hw_counter_offset;
+		fnr_info->sw_counter_offset =
+			ipa3_ctx->fnr_info.sw_counter_offset;
+		res = true;
+	} else {
+		IPAERR("fnr_info not valid!\n");
+		res = false;
+	}
+	return res;
+}
+
+/**
+ * ipa3_disable_prefetch() - disable\enable tx prefetch
+ *
+ * @client: the client which is related to the TX where prefetch will be
+ *          disabled
+ *
+ * Return value: Non applicable
+ *
+ */
+void ipa3_disable_prefetch(enum ipa_client_type client)
+{
+	struct ipahal_reg_tx_cfg cfg;
+	u8 qmb;
+
+	qmb = ipa3_get_qmb_master_sel(client);
+
+	IPADBG("disabling prefetch for qmb %d\n", (int)qmb);
+
+	ipahal_read_reg_fields(IPA_TX_CFG, &cfg);
+	/* QMB0 (DDR) correlates with TX0, QMB1(PCIE) correlates with TX1 */
+	if (qmb == QMB_MASTER_SELECT_DDR)
+		cfg.tx0_prefetch_disable = true;
+	else
+		cfg.tx1_prefetch_disable = true;
+	ipahal_write_reg_fields(IPA_TX_CFG, &cfg);
+}
+
+/**
+ * ipa3_get_pdev() - return a pointer to IPA dev struct
+ *
+ * Return value: a pointer to IPA dev struct
+ *
+ */
+struct device *ipa3_get_pdev(void)
+{
+	if (!ipa3_ctx)
+		return NULL;
+
+	return ipa3_ctx->pdev;
+}
+
+/**
+ * ipa3_enable_dcd() - enable dynamic clock division on IPA
+ *
+ * Return value: Non applicable
+ *
+ */
+void ipa3_enable_dcd(void)
+{
+	struct ipahal_reg_idle_indication_cfg idle_indication_cfg;
+
+	/* recommended values for IPA 3.5 according to IPA HPG */
+	idle_indication_cfg.const_non_idle_enable = false;
+	idle_indication_cfg.enter_idle_debounce_thresh = 256;
+
+	ipahal_write_reg_fields(IPA_IDLE_INDICATION_CFG,
+			&idle_indication_cfg);
+}
+
+void ipa3_init_imm_cmd_desc(struct ipa3_desc *desc,
+	struct ipahal_imm_cmd_pyld *cmd_pyld)
+{
+	memset(desc, 0, sizeof(*desc));
+	desc->opcode = cmd_pyld->opcode;
+	desc->pyld = cmd_pyld->data;
+	desc->len = cmd_pyld->len;
+	desc->type = IPA_IMM_CMD_DESC;
+}
+
+u32 ipa3_get_r_rev_version(void)
+{
+	static u32 r_rev;
+
+	if (r_rev != 0)
+		return r_rev;
+
+	IPA_ACTIVE_CLIENTS_INC_SIMPLE();
+	r_rev = ipahal_read_reg(IPA_VERSION);
+	IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
+
+	return r_rev;
+}
+EXPORT_SYMBOL(ipa3_get_r_rev_version);
+
+/**
+ * ipa3_ctx_get_type() - to get platform type, hw type
+ * and hw mode
+ *
+ * Return value: enumerated types of platform and ipa hw
+ *
+ */
+int ipa3_ctx_get_type(enum ipa_type_mode type)
+{
+	switch (type) {
+	case IPA_HW_TYPE:
+		return ipa3_ctx->ipa_hw_type;
+	case PLATFORM_TYPE:
+		return ipa3_ctx->platform_type;
+	case IPA3_HW_MODE:
+		return ipa3_ctx->ipa3_hw_mode;
+	default:
+		IPAERR("cannot read ipa3_ctx types\n");
+		return 0;
+	}
+}
+
+/**
+ * ipa3_get_gsi_stats() - Query gsi stats from uc
+ * @prot_id: IPA_HW_FEATURE_OFFLOAD protocol id
+ * @stats:	[inout] stats blob from client populated by driver
+ *
+ * @note Cannot be called from atomic context
+ *
+ */
+void ipa3_get_gsi_stats(int prot_id,
+	struct ipa_uc_dbg_ring_stats *stats)
+{
+	switch (prot_id) {
+	case IPA_HW_PROTOCOL_AQC:
+		stats->num_ch = MAX_AQC_CHANNELS;
+		ipa3_get_aqc_gsi_stats(stats);
+		break;
+	case IPA_HW_PROTOCOL_11ad:
+		break;
+	case IPA_HW_PROTOCOL_WDI:
+		stats->num_ch = MAX_WDI2_CHANNELS;
+		ipa3_get_wdi_gsi_stats(stats);
+		break;
+	case IPA_HW_PROTOCOL_WDI3:
+		stats->num_ch = MAX_WDI3_CHANNELS;
+		ipa3_get_wdi3_gsi_stats(stats);
+		break;
+	case IPA_HW_PROTOCOL_ETH:
+		break;
+	case IPA_HW_PROTOCOL_MHIP:
+		stats->num_ch = MAX_MHIP_CHANNELS;
+		ipa3_get_mhip_gsi_stats(stats);
+		break;
+	case IPA_HW_PROTOCOL_USB:
+		stats->num_ch = MAX_USB_CHANNELS;
+		ipa3_get_usb_gsi_stats(stats);
+		break;
+	default:
+		IPAERR("unsupported HW feature %d\n", prot_id);
+	}
+}
+
+/**
+ * ipa3_ctx_get_flag() - to read some ipa3_ctx_flags
+ *
+ * Return value: true/false based on read value
+ *
+ */
+bool ipa3_ctx_get_flag(enum ipa_flag flag)
+{
+	switch (flag) {
+	case IPA_ENDP_DELAY_WA_EN:
+		return ipa3_ctx->ipa_endp_delay_wa;
+	case IPA_HW_STATS_EN:
+		return ipa3_ctx->hw_stats.enabled;
+	case IPA_MHI_EN:
+		return ipa3_ctx->ipa_config_is_mhi;
+	case IPA_FLTRT_NOT_HASHABLE_EN:
+		return ipa3_ctx->ipa_fltrt_not_hashable;
+	default:
+		IPAERR("cannot read ipa3_ctx flags\n");
+		return false;
+	}
+}
+
+/**
+ * ipa3_ctx_get_num_pipes() - to read pipe number from ipa3_ctx
+ *
+ * Return value: unsigned number
+ *
+ */
+u32 ipa3_ctx_get_num_pipes(void)
+{
+	return ipa3_ctx->ipa_num_pipes;
+}
+
+int ipa3_app_clk_vote(
+	enum ipa_app_clock_vote_type vote_type)
+{
+	const char *str_ptr = "APP_VOTE";
+	int ret = 0;
+
+	IPADBG("In\n");
+
+	mutex_lock(&ipa3_ctx->app_clock_vote.mutex);
+
+	switch (vote_type) {
+	case IPA_APP_CLK_VOTE:
+		if ((ipa3_ctx->app_clock_vote.cnt + 1) <= IPA_APP_VOTE_MAX) {
+			ipa3_ctx->app_clock_vote.cnt++;
+			IPA_ACTIVE_CLIENTS_INC_SPECIAL(str_ptr);
+		} else {
+			IPAERR_RL("App vote count max hit\n");
+			ret = -EPERM;
+			break;
+		}
+		break;
+	case IPA_APP_CLK_DEVOTE:
+		if (ipa3_ctx->app_clock_vote.cnt) {
+			ipa3_ctx->app_clock_vote.cnt--;
+			IPA_ACTIVE_CLIENTS_DEC_SPECIAL(str_ptr);
+		}
+		break;
+	case IPA_APP_CLK_RESET_VOTE:
+		while (ipa3_ctx->app_clock_vote.cnt > 0) {
+			IPA_ACTIVE_CLIENTS_DEC_SPECIAL(str_ptr);
+			ipa3_ctx->app_clock_vote.cnt--;
+		}
+		break;
+	default:
+		IPAERR_RL("Unknown vote_type(%u)\n", vote_type);
+		ret = -EPERM;
+		break;
+	}
+
+	mutex_unlock(&ipa3_ctx->app_clock_vote.mutex);
+
+	IPADBG("Out\n");
+
+	return ret;
+}
+
+/*
+ * ipa3_get_prot_id() - Query gsi protocol id
+ * @client: ipa_client_type
+ *
+ * return the prot_id based on the client type,
+ * return -EINVAL when no such mapping exists.
+ */
+int ipa3_get_prot_id(enum ipa_client_type client)
+{
+	int prot_id = -EINVAL;
+
+	switch (client) {
+	case IPA_CLIENT_AQC_ETHERNET_CONS:
+	case IPA_CLIENT_AQC_ETHERNET_PROD:
+		prot_id = IPA_HW_PROTOCOL_AQC;
+		break;
+	case IPA_CLIENT_MHI_PRIME_TETH_PROD:
+	case IPA_CLIENT_MHI_PRIME_TETH_CONS:
+	case IPA_CLIENT_MHI_PRIME_RMNET_PROD:
+	case IPA_CLIENT_MHI_PRIME_RMNET_CONS:
+		prot_id = IPA_HW_PROTOCOL_MHIP;
+		break;
+	case IPA_CLIENT_WLAN1_PROD:
+	case IPA_CLIENT_WLAN1_CONS:
+		prot_id = IPA_HW_PROTOCOL_WDI;
+		break;
+	case IPA_CLIENT_WLAN2_PROD:
+	case IPA_CLIENT_WLAN2_CONS:
+		prot_id = IPA_HW_PROTOCOL_WDI3;
+		break;
+	case IPA_CLIENT_USB_PROD:
+	case IPA_CLIENT_USB_CONS:
+		prot_id = IPA_HW_PROTOCOL_USB;
+		break;
+	case IPA_CLIENT_ETHERNET_PROD:
+	case IPA_CLIENT_ETHERNET_CONS:
+		prot_id = IPA_HW_PROTOCOL_ETH;
+		break;
+	case IPA_CLIENT_WIGIG_PROD:
+	case IPA_CLIENT_WIGIG1_CONS:
+	case IPA_CLIENT_WIGIG2_CONS:
+	case IPA_CLIENT_WIGIG3_CONS:
+	case IPA_CLIENT_WIGIG4_CONS:
+		prot_id = IPA_HW_PROTOCOL_11ad;
+		break;
+	default:
+		IPAERR("unknown prot_id for client %d\n",
+			client);
+	}
+
+	return prot_id;
+}

+ 986 - 0
ipa/ipa_v3/ipa_wdi3_i.c

@@ -0,0 +1,986 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2018 - 2019, The Linux Foundation. All rights reserved.
+ */
+
+#include "ipa_i.h"
+#include <linux/ipa_wdi3.h>
+
+#define UPDATE_RP_MODERATION_CONFIG 1
+#define UPDATE_RP_MODERATION_THRESHOLD 8
+
+#define IPA_WLAN_AGGR_PKT_LIMIT 1
+#define IPA_WLAN_AGGR_BYTE_LIMIT 2 /*2 Kbytes Agger hard byte limit*/
+
+#define IPA_WDI3_GSI_EVT_RING_INT_MODT 32
+
+static void ipa3_wdi3_gsi_evt_ring_err_cb(struct gsi_evt_err_notify *notify)
+{
+	switch (notify->evt_id) {
+	case GSI_EVT_OUT_OF_BUFFERS_ERR:
+		IPAERR("Got GSI_EVT_OUT_OF_BUFFERS_ERR\n");
+		break;
+	case GSI_EVT_OUT_OF_RESOURCES_ERR:
+		IPAERR("Got GSI_EVT_OUT_OF_RESOURCES_ERR\n");
+		break;
+	case GSI_EVT_UNSUPPORTED_INTER_EE_OP_ERR:
+		IPAERR("Got GSI_EVT_UNSUPPORTED_INTER_EE_OP_ERR\n");
+		break;
+	case GSI_EVT_EVT_RING_EMPTY_ERR:
+		IPAERR("Got GSI_EVT_EVT_RING_EMPTY_ERR\n");
+		break;
+	default:
+		IPAERR("Unexpected err evt: %d\n", notify->evt_id);
+	}
+	ipa_assert();
+}
+
+static void ipa3_wdi3_gsi_chan_err_cb(struct gsi_chan_err_notify *notify)
+{
+	switch (notify->evt_id) {
+	case GSI_CHAN_INVALID_TRE_ERR:
+		IPAERR("Got GSI_CHAN_INVALID_TRE_ERR\n");
+		break;
+	case GSI_CHAN_NON_ALLOCATED_EVT_ACCESS_ERR:
+		IPAERR("Got GSI_CHAN_NON_ALLOCATED_EVT_ACCESS_ERR\n");
+		break;
+	case GSI_CHAN_OUT_OF_BUFFERS_ERR:
+		IPAERR("Got GSI_CHAN_OUT_OF_BUFFERS_ERR\n");
+		break;
+	case GSI_CHAN_OUT_OF_RESOURCES_ERR:
+		IPAERR("Got GSI_CHAN_OUT_OF_RESOURCES_ERR\n");
+		break;
+	case GSI_CHAN_UNSUPPORTED_INTER_EE_OP_ERR:
+		IPAERR("Got GSI_CHAN_UNSUPPORTED_INTER_EE_OP_ERR\n");
+		break;
+	case GSI_CHAN_HWO_1_ERR:
+		IPAERR("Got GSI_CHAN_HWO_1_ERR\n");
+		break;
+	default:
+		IPAERR("Unexpected err evt: %d\n", notify->evt_id);
+	}
+	ipa_assert();
+}
+
+static int ipa3_setup_wdi3_gsi_channel(u8 is_smmu_enabled,
+	struct ipa_wdi_pipe_setup_info *info,
+	struct ipa_wdi_pipe_setup_info_smmu *info_smmu, u8 dir,
+	struct ipa3_ep_context *ep)
+{
+	struct gsi_evt_ring_props gsi_evt_ring_props;
+	struct gsi_chan_props gsi_channel_props;
+	union __packed gsi_channel_scratch ch_scratch;
+	union __packed gsi_evt_scratch evt_scratch;
+	const struct ipa_gsi_ep_config *gsi_ep_info;
+	int result, len;
+	unsigned long va;
+	uint32_t addr_low, addr_high;
+
+	if (!info || !info_smmu || !ep) {
+		IPAERR("invalid input\n");
+		return -EINVAL;
+	}
+	/* setup event ring */
+	memset(&gsi_evt_ring_props, 0, sizeof(gsi_evt_ring_props));
+	gsi_evt_ring_props.intf = GSI_EVT_CHTYPE_WDI3_EV;
+	if (ipa3_ctx->ipa_hw_type >= IPA_HW_v4_9) {
+		gsi_evt_ring_props.intr = GSI_INTR_MSI;
+		/* 32 (for Tx) and 8 (for Rx) */
+		if (dir == IPA_WDI3_TX_DIR)
+			gsi_evt_ring_props.re_size = GSI_EVT_RING_RE_SIZE_32B;
+		else
+			gsi_evt_ring_props.re_size = GSI_EVT_RING_RE_SIZE_8B;
+	} else {
+		gsi_evt_ring_props.intr = GSI_INTR_IRQ;
+		/* 16 (for Tx) and 8 (for Rx) */
+		if (dir == IPA_WDI3_TX_DIR)
+			gsi_evt_ring_props.re_size = GSI_EVT_RING_RE_SIZE_16B;
+		else
+			gsi_evt_ring_props.re_size = GSI_EVT_RING_RE_SIZE_8B;
+	}
+	if (!is_smmu_enabled) {
+		gsi_evt_ring_props.ring_len = info->event_ring_size;
+		gsi_evt_ring_props.ring_base_addr =
+			(u64)info->event_ring_base_pa;
+	} else {
+		len = info_smmu->event_ring_size;
+		if (dir == IPA_WDI3_TX_DIR) {
+			if (ipa_create_gsi_smmu_mapping(IPA_WDI_CE_RING_RES,
+				true, info->event_ring_base_pa,
+				&info_smmu->event_ring_base, len,
+				false, &va)) {
+				IPAERR("failed to get smmu mapping\n");
+				return -EFAULT;
+			}
+		} else {
+			if (ipa_create_gsi_smmu_mapping(
+				IPA_WDI_RX_COMP_RING_RES, true,
+				info->event_ring_base_pa,
+				&info_smmu->event_ring_base, len,
+				false, &va)) {
+				IPAERR("failed to get smmu mapping\n");
+				return -EFAULT;
+			}
+		}
+		gsi_evt_ring_props.ring_len = len;
+		gsi_evt_ring_props.ring_base_addr = (u64)va;
+	}
+	gsi_evt_ring_props.int_modt = IPA_WDI3_GSI_EVT_RING_INT_MODT;
+	gsi_evt_ring_props.int_modc = 1;
+	gsi_evt_ring_props.exclusive = true;
+	gsi_evt_ring_props.err_cb = ipa3_wdi3_gsi_evt_ring_err_cb;
+	gsi_evt_ring_props.user_data = NULL;
+
+	result = gsi_alloc_evt_ring(&gsi_evt_ring_props, ipa3_ctx->gsi_dev_hdl,
+		&ep->gsi_evt_ring_hdl);
+	if (result != GSI_STATUS_SUCCESS) {
+		IPAERR("fail to alloc RX event ring\n");
+		result = -EFAULT;
+		goto fail_smmu_mapping;
+	}
+
+	ep->gsi_mem_info.evt_ring_len = gsi_evt_ring_props.ring_len;
+	ep->gsi_mem_info.evt_ring_base_addr =
+		gsi_evt_ring_props.ring_base_addr;
+
+	/* setup channel ring */
+	memset(&gsi_channel_props, 0, sizeof(gsi_channel_props));
+	gsi_channel_props.prot = GSI_CHAN_PROT_WDI3;
+	if (dir == IPA_WDI3_TX_DIR)
+		gsi_channel_props.dir = GSI_CHAN_DIR_FROM_GSI;
+	else
+		gsi_channel_props.dir = GSI_CHAN_DIR_TO_GSI;
+
+	gsi_ep_info = ipa3_get_gsi_ep_info(ep->client);
+	if (!gsi_ep_info) {
+		IPAERR("Failed getting GSI EP info for client=%d\n",
+		       ep->client);
+		result = -EINVAL;
+		goto fail_get_gsi_ep_info;
+	} else
+		gsi_channel_props.ch_id = gsi_ep_info->ipa_gsi_chan_num;
+
+	gsi_channel_props.db_in_bytes = 0;
+	gsi_channel_props.evt_ring_hdl = ep->gsi_evt_ring_hdl;
+	if (ipa3_ctx->ipa_hw_type >= IPA_HW_v4_9) {
+		/* 32 (for Tx) and 64 (for Rx) */
+		if (dir == IPA_WDI3_TX_DIR)
+			gsi_channel_props.re_size = GSI_CHAN_RE_SIZE_32B;
+		else
+			gsi_channel_props.re_size = GSI_CHAN_RE_SIZE_64B;
+	} else
+		gsi_channel_props.re_size = GSI_CHAN_RE_SIZE_16B;
+
+	gsi_channel_props.use_db_eng = GSI_CHAN_DB_MODE;
+	gsi_channel_props.max_prefetch = GSI_ONE_PREFETCH_SEG;
+	gsi_channel_props.prefetch_mode =
+		gsi_ep_info->prefetch_mode;
+	gsi_channel_props.empty_lvl_threshold =
+		gsi_ep_info->prefetch_threshold;
+	gsi_channel_props.low_weight = 1;
+	gsi_channel_props.err_cb = ipa3_wdi3_gsi_chan_err_cb;
+
+	if (!is_smmu_enabled) {
+		gsi_channel_props.ring_len = (u16)info->transfer_ring_size;
+		gsi_channel_props.ring_base_addr =
+			(u64)info->transfer_ring_base_pa;
+	} else {
+		len = info_smmu->transfer_ring_size;
+		if (dir == IPA_WDI3_TX_DIR) {
+			if (ipa_create_gsi_smmu_mapping(IPA_WDI_TX_RING_RES,
+				true, info->transfer_ring_base_pa,
+				&info_smmu->transfer_ring_base, len,
+				false, &va)) {
+				IPAERR("failed to get smmu mapping\n");
+				result = -EFAULT;
+				goto fail_get_gsi_ep_info;
+			}
+		} else {
+			if (ipa_create_gsi_smmu_mapping(
+				IPA_WDI_RX_RING_RES, true,
+				info->transfer_ring_base_pa,
+				&info_smmu->transfer_ring_base, len,
+				false, &va)) {
+				IPAERR("failed to get smmu mapping\n");
+				result = -EFAULT;
+				goto fail_get_gsi_ep_info;
+			}
+		}
+		gsi_channel_props.ring_len = len;
+		gsi_channel_props.ring_base_addr = (u64)va;
+	}
+
+	result = gsi_alloc_channel(&gsi_channel_props, ipa3_ctx->gsi_dev_hdl,
+		&ep->gsi_chan_hdl);
+	if (result != GSI_STATUS_SUCCESS)
+		goto fail_get_gsi_ep_info;
+
+	ep->gsi_mem_info.chan_ring_len = gsi_channel_props.ring_len;
+	ep->gsi_mem_info.chan_ring_base_addr =
+		gsi_channel_props.ring_base_addr;
+
+	/* write event scratch */
+	memset(&evt_scratch, 0, sizeof(evt_scratch));
+	evt_scratch.wdi3.update_rp_moderation_config =
+		UPDATE_RP_MODERATION_CONFIG;
+	result = gsi_write_evt_ring_scratch(ep->gsi_evt_ring_hdl, evt_scratch);
+	if (result != GSI_STATUS_SUCCESS) {
+		IPAERR("failed to write evt ring scratch\n");
+		goto fail_write_scratch;
+	}
+
+	if (!is_smmu_enabled) {
+		IPADBG("smmu disabled\n");
+		if (info->is_evt_rn_db_pcie_addr == true)
+			IPADBG_LOW("is_evt_rn_db_pcie_addr is PCIE addr\n");
+		else
+			IPADBG_LOW("is_evt_rn_db_pcie_addr is DDR addr\n");
+		IPADBG_LOW("LSB 0x%x\n",
+			(u32)info->event_ring_doorbell_pa);
+		IPADBG_LOW("MSB 0x%x\n",
+			(u32)((u64)info->event_ring_doorbell_pa >> 32));
+	} else {
+		IPADBG("smmu enabled\n");
+		if (info_smmu->is_evt_rn_db_pcie_addr == true)
+			IPADBG_LOW("is_evt_rn_db_pcie_addr is PCIE addr\n");
+		else
+			IPADBG_LOW("is_evt_rn_db_pcie_addr is DDR addr\n");
+		IPADBG_LOW("LSB 0x%x\n",
+			(u32)info_smmu->event_ring_doorbell_pa);
+		IPADBG_LOW("MSB 0x%x\n",
+			(u32)((u64)info_smmu->event_ring_doorbell_pa >> 32));
+	}
+
+	if (!is_smmu_enabled) {
+		addr_low = (u32)info->event_ring_doorbell_pa;
+		addr_high = (u32)((u64)info->event_ring_doorbell_pa >> 32);
+	} else {
+		if (dir == IPA_WDI3_TX_DIR) {
+			if (ipa_create_gsi_smmu_mapping(IPA_WDI_CE_DB_RES,
+				true, info_smmu->event_ring_doorbell_pa,
+				NULL, 4, true, &va)) {
+				IPAERR("failed to get smmu mapping\n");
+				result = -EFAULT;
+				goto fail_write_scratch;
+			}
+		} else {
+			if (ipa_create_gsi_smmu_mapping(
+				IPA_WDI_RX_COMP_RING_WP_RES,
+				true, info_smmu->event_ring_doorbell_pa,
+				NULL, 4, true, &va)) {
+				IPAERR("failed to get smmu mapping\n");
+				result = -EFAULT;
+				goto fail_write_scratch;
+			}
+		}
+		addr_low = (u32)va;
+		addr_high = (u32)((u64)va >> 32);
+	}
+
+	/*
+	 * Arch specific:
+	 * pcie addr which are not via smmu, use pa directly!
+	 * pcie and DDR via 2 different port
+	 * assert bit 40 to indicate it is pcie addr
+	 * WDI-3.0, MSM --> pcie via smmu
+	 * WDI-3.0, MDM --> pcie not via smmu + dual port
+	 * assert bit 40 in case
+	 */
+	if ((ipa3_ctx->platform_type == IPA_PLAT_TYPE_MDM) &&
+		is_smmu_enabled) {
+		/*
+		 * Ir-respective of smmu enabled don't use IOVA addr
+		 * since pcie not via smmu in MDM's
+		 */
+		if (info_smmu->is_evt_rn_db_pcie_addr == true) {
+			addr_low = (u32)info_smmu->event_ring_doorbell_pa;
+			addr_high =
+				(u32)((u64)info_smmu->event_ring_doorbell_pa
+				>> 32);
+		}
+	}
+
+	/*
+	 * GSI recomendation to set bit-40 for (mdm targets && pcie addr)
+	 * from wdi-3.0 interface document
+	 */
+	if (!is_smmu_enabled) {
+		if ((ipa3_ctx->platform_type == IPA_PLAT_TYPE_MDM) &&
+			info->is_evt_rn_db_pcie_addr)
+			addr_high |= (1 << 8);
+	} else {
+		if ((ipa3_ctx->platform_type == IPA_PLAT_TYPE_MDM) &&
+			info_smmu->is_evt_rn_db_pcie_addr)
+			addr_high |= (1 << 8);
+	}
+
+	gsi_wdi3_write_evt_ring_db(ep->gsi_evt_ring_hdl,
+			addr_low,
+			addr_high);
+
+	/* write channel scratch */
+	memset(&ch_scratch, 0, sizeof(ch_scratch));
+	ch_scratch.wdi3.update_rp_moderation_threshold =
+		UPDATE_RP_MODERATION_THRESHOLD;
+	if (dir == IPA_WDI3_RX_DIR) {
+		if (!is_smmu_enabled)
+			ch_scratch.wdi3.rx_pkt_offset = info->pkt_offset;
+		else
+			ch_scratch.wdi3.rx_pkt_offset = info_smmu->pkt_offset;
+		/* this metadata reg offset need to be in words */
+		ch_scratch.wdi3.endp_metadata_reg_offset =
+			ipahal_get_reg_mn_ofst(IPA_ENDP_INIT_HDR_METADATA_n, 0,
+				gsi_ep_info->ipa_ep_num) / 4;
+	}
+
+	if (!is_smmu_enabled) {
+		IPADBG_LOW("smmu disabled\n");
+		if (info->is_txr_rn_db_pcie_addr == true)
+			IPADBG_LOW("is_txr_rn_db_pcie_addr is PCIE addr\n");
+		else
+			IPADBG_LOW("is_txr_rn_db_pcie_addr is DDR addr\n");
+		IPADBG_LOW("LSB 0x%x\n",
+			(u32)info->transfer_ring_doorbell_pa);
+		IPADBG_LOW("MSB 0x%x\n",
+			(u32)((u64)info->transfer_ring_doorbell_pa >> 32));
+	} else {
+		IPADBG_LOW("smmu eabled\n");
+		if (info_smmu->is_txr_rn_db_pcie_addr == true)
+			IPADBG_LOW("is_txr_rn_db_pcie_addr is PCIE addr\n");
+		else
+			IPADBG_LOW("is_txr_rn_db_pcie_addr is DDR addr\n");
+		IPADBG_LOW("LSB 0x%x\n",
+			(u32)info_smmu->transfer_ring_doorbell_pa);
+		IPADBG_LOW("MSB 0x%x\n",
+			(u32)((u64)info_smmu->transfer_ring_doorbell_pa >> 32));
+	}
+
+	if (!is_smmu_enabled) {
+		ch_scratch.wdi3.wifi_rp_address_low =
+			(u32)info->transfer_ring_doorbell_pa;
+		ch_scratch.wdi3.wifi_rp_address_high =
+			(u32)((u64)info->transfer_ring_doorbell_pa >> 32);
+	} else {
+		if (dir == IPA_WDI3_TX_DIR) {
+			if (ipa_create_gsi_smmu_mapping(IPA_WDI_TX_DB_RES,
+				true, info_smmu->transfer_ring_doorbell_pa,
+				NULL, 4, true, &va)) {
+				IPAERR("failed to get smmu mapping\n");
+				result = -EFAULT;
+				goto fail_write_scratch;
+			}
+			ch_scratch.wdi3.wifi_rp_address_low = (u32)va;
+			ch_scratch.wdi3.wifi_rp_address_high =
+				(u32)((u64)va >> 32);
+		} else {
+			if (ipa_create_gsi_smmu_mapping(IPA_WDI_RX_RING_RP_RES,
+				true, info_smmu->transfer_ring_doorbell_pa,
+				NULL, 4, true, &va)) {
+				IPAERR("failed to get smmu mapping\n");
+				result = -EFAULT;
+				goto fail_write_scratch;
+			}
+			ch_scratch.wdi3.wifi_rp_address_low = (u32)va;
+			ch_scratch.wdi3.wifi_rp_address_high =
+				(u32)((u64)va >> 32);
+		}
+	}
+
+	/*
+	 * Arch specific:
+	 * pcie addr which are not via smmu, use pa directly!
+	 * pcie and DDR via 2 different port
+	 * assert bit 40 to indicate it is pcie addr
+	 * WDI-3.0, MSM --> pcie via smmu
+	 * WDI-3.0, MDM --> pcie not via smmu + dual port
+	 * assert bit 40 in case
+	 */
+	if ((ipa3_ctx->platform_type == IPA_PLAT_TYPE_MDM) &&
+		is_smmu_enabled) {
+		/*
+		 * Ir-respective of smmu enabled don't use IOVA addr
+		 * since pcie not via smmu in MDM's
+		 */
+		if (info_smmu->is_txr_rn_db_pcie_addr == true) {
+			ch_scratch.wdi3.wifi_rp_address_low =
+				(u32)info_smmu->transfer_ring_doorbell_pa;
+			ch_scratch.wdi3.wifi_rp_address_high =
+				(u32)((u64)info_smmu->transfer_ring_doorbell_pa
+				>> 32);
+		}
+	}
+
+	/*
+	 * GSI recomendation to set bit-40 for (mdm targets && pcie addr)
+	 * from wdi-3.0 interface document
+	 */
+	if (!is_smmu_enabled) {
+		if ((ipa3_ctx->platform_type == IPA_PLAT_TYPE_MDM) &&
+			info->is_txr_rn_db_pcie_addr)
+			ch_scratch.wdi3.wifi_rp_address_high =
+			(u32)((u32)ch_scratch.wdi3.wifi_rp_address_high |
+			(1 << 8));
+	} else {
+		if ((ipa3_ctx->platform_type == IPA_PLAT_TYPE_MDM) &&
+			info_smmu->is_txr_rn_db_pcie_addr)
+			ch_scratch.wdi3.wifi_rp_address_high =
+			(u32)((u32)ch_scratch.wdi3.wifi_rp_address_high |
+			(1 << 8));
+	}
+
+	result = gsi_write_channel_scratch(ep->gsi_chan_hdl, ch_scratch);
+	if (result != GSI_STATUS_SUCCESS) {
+		IPAERR("failed to write evt ring scratch\n");
+		goto fail_write_scratch;
+	}
+	return 0;
+
+fail_write_scratch:
+	gsi_dealloc_channel(ep->gsi_chan_hdl);
+	ep->gsi_chan_hdl = ~0;
+fail_get_gsi_ep_info:
+	gsi_dealloc_evt_ring(ep->gsi_evt_ring_hdl);
+	ep->gsi_evt_ring_hdl = ~0;
+fail_smmu_mapping:
+	ipa3_release_wdi3_gsi_smmu_mappings(dir);
+	return result;
+}
+
+int ipa3_conn_wdi3_pipes(struct ipa_wdi_conn_in_params *in,
+	struct ipa_wdi_conn_out_params *out,
+	ipa_wdi_meter_notifier_cb wdi_notify)
+{
+	enum ipa_client_type rx_client;
+	enum ipa_client_type tx_client;
+	struct ipa3_ep_context *ep_rx;
+	struct ipa3_ep_context *ep_tx;
+	int ipa_ep_idx_rx;
+	int ipa_ep_idx_tx;
+	int result = 0;
+	u32 gsi_db_addr_low, gsi_db_addr_high;
+	void __iomem *db_addr;
+	u32 evt_ring_db_addr_low, evt_ring_db_addr_high;
+
+	/* wdi3 only support over gsi */
+	if (!ipa3_ctx->ipa_wdi3_over_gsi) {
+		IPAERR("wdi3 over uc offload not supported");
+		WARN_ON(1);
+		return -EFAULT;
+	}
+
+	if (in == NULL || out == NULL) {
+		IPAERR("invalid input\n");
+		return -EINVAL;
+	}
+
+	if (in->is_smmu_enabled == false) {
+		rx_client = in->u_rx.rx.client;
+		tx_client = in->u_tx.tx.client;
+	} else {
+		rx_client = in->u_rx.rx_smmu.client;
+		tx_client = in->u_tx.tx_smmu.client;
+	}
+
+	ipa_ep_idx_rx = ipa_get_ep_mapping(rx_client);
+	ipa_ep_idx_tx = ipa_get_ep_mapping(tx_client);
+
+	if (ipa_ep_idx_rx == -1 || ipa_ep_idx_tx == -1) {
+		IPAERR("fail to alloc EP.\n");
+		return -EFAULT;
+	}
+	if (ipa_ep_idx_rx >= IPA3_MAX_NUM_PIPES ||
+		ipa_ep_idx_tx >= IPA3_MAX_NUM_PIPES) {
+		IPAERR("ep out of range.\n");
+		return -EFAULT;
+	}
+
+	ep_rx = &ipa3_ctx->ep[ipa_ep_idx_rx];
+	ep_tx = &ipa3_ctx->ep[ipa_ep_idx_tx];
+
+	if (ep_rx->valid || ep_tx->valid) {
+		IPAERR("EP already allocated.\n");
+		return -EFAULT;
+	}
+
+	memset(ep_rx, 0, offsetof(struct ipa3_ep_context, sys));
+	memset(ep_tx, 0, offsetof(struct ipa3_ep_context, sys));
+
+	IPA_ACTIVE_CLIENTS_INC_SIMPLE();
+
+#ifdef IPA_WAN_MSG_IPv6_ADDR_GW_LEN
+	if (wdi_notify)
+		ipa3_ctx->uc_wdi_ctx.stats_notify = wdi_notify;
+	else
+		IPADBG("wdi_notify is null\n");
+#endif
+
+	/* setup rx ep cfg */
+	ep_rx->valid = 1;
+	ep_rx->client = rx_client;
+	result = ipa3_disable_data_path(ipa_ep_idx_rx);
+	if (result) {
+		IPAERR("disable data path failed res=%d clnt=%d.\n", result,
+			ipa_ep_idx_rx);
+		IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
+		return -EFAULT;
+	}
+	ep_rx->client_notify = in->notify;
+	ep_rx->priv = in->priv;
+
+	if (in->is_smmu_enabled == false)
+		memcpy(&ep_rx->cfg, &in->u_rx.rx.ipa_ep_cfg,
+			sizeof(ep_rx->cfg));
+	else
+		memcpy(&ep_rx->cfg, &in->u_rx.rx_smmu.ipa_ep_cfg,
+			sizeof(ep_rx->cfg));
+
+	if (ipa3_cfg_ep(ipa_ep_idx_rx, &ep_rx->cfg)) {
+		IPAERR("fail to setup rx pipe cfg\n");
+		result = -EFAULT;
+		goto fail;
+	}
+
+	IPADBG("ipa3_ctx->ipa_wdi3_over_gsi %d\n",
+		   ipa3_ctx->ipa_wdi3_over_gsi);
+	/* setup RX gsi channel */
+	if (ipa3_setup_wdi3_gsi_channel(in->is_smmu_enabled,
+		&in->u_rx.rx, &in->u_rx.rx_smmu, IPA_WDI3_RX_DIR,
+		ep_rx)) {
+		IPAERR("fail to setup wdi3 gsi rx channel\n");
+		result = -EFAULT;
+		goto fail;
+	}
+	if (gsi_query_channel_db_addr(ep_rx->gsi_chan_hdl,
+		&gsi_db_addr_low, &gsi_db_addr_high)) {
+		IPAERR("failed to query gsi rx db addr\n");
+		result = -EFAULT;
+		goto fail;
+	}
+	/* only 32 bit lsb is used */
+	out->rx_uc_db_pa = (phys_addr_t)(gsi_db_addr_low);
+	IPADBG("out->rx_uc_db_pa %llu\n", out->rx_uc_db_pa);
+
+	ipa3_install_dflt_flt_rules(ipa_ep_idx_rx);
+	IPADBG("client %d (ep: %d) connected\n", rx_client,
+		ipa_ep_idx_rx);
+
+	/* setup tx ep cfg */
+	ep_tx->valid = 1;
+	ep_tx->client = tx_client;
+	result = ipa3_disable_data_path(ipa_ep_idx_tx);
+	if (result) {
+		IPAERR("disable data path failed res=%d ep=%d.\n", result,
+			ipa_ep_idx_tx);
+		result = -EFAULT;
+		goto fail;
+	}
+
+	if (in->is_smmu_enabled == false)
+		memcpy(&ep_tx->cfg, &in->u_tx.tx.ipa_ep_cfg,
+			sizeof(ep_tx->cfg));
+	else
+		memcpy(&ep_tx->cfg, &in->u_tx.tx_smmu.ipa_ep_cfg,
+			sizeof(ep_tx->cfg));
+
+	ep_tx->cfg.aggr.aggr_en = IPA_ENABLE_AGGR;
+	ep_tx->cfg.aggr.aggr = IPA_GENERIC;
+	ep_tx->cfg.aggr.aggr_byte_limit = IPA_WLAN_AGGR_BYTE_LIMIT;
+	ep_tx->cfg.aggr.aggr_pkt_limit = IPA_WLAN_AGGR_PKT_LIMIT;
+	ep_tx->cfg.aggr.aggr_hard_byte_limit_en = IPA_ENABLE_AGGR;
+	if (ipa3_cfg_ep(ipa_ep_idx_tx, &ep_tx->cfg)) {
+		IPAERR("fail to setup tx pipe cfg\n");
+		result = -EFAULT;
+		goto fail;
+	}
+
+	/* setup TX gsi channel */
+	if (ipa3_setup_wdi3_gsi_channel(in->is_smmu_enabled,
+		&in->u_tx.tx, &in->u_tx.tx_smmu, IPA_WDI3_TX_DIR,
+		ep_tx)) {
+		IPAERR("fail to setup wdi3 gsi tx channel\n");
+		result = -EFAULT;
+		goto fail;
+	}
+	if (gsi_query_channel_db_addr(ep_tx->gsi_chan_hdl,
+		&gsi_db_addr_low, &gsi_db_addr_high)) {
+		IPAERR("failed to query gsi tx db addr\n");
+		result = -EFAULT;
+		goto fail;
+	}
+	/* only 32 bit lsb is used */
+	out->tx_uc_db_pa = (phys_addr_t)(gsi_db_addr_low);
+	IPADBG("out->tx_uc_db_pa %llu\n", out->tx_uc_db_pa);
+	IPADBG("client %d (ep: %d) connected\n", tx_client,
+		ipa_ep_idx_tx);
+
+	/* ring initial event ring dbs */
+	gsi_query_evt_ring_db_addr(ep_rx->gsi_evt_ring_hdl,
+		&evt_ring_db_addr_low, &evt_ring_db_addr_high);
+	IPADBG("evt_ring_hdl %lu, db_addr_low %u db_addr_high %u\n",
+		ep_rx->gsi_evt_ring_hdl, evt_ring_db_addr_low,
+		evt_ring_db_addr_high);
+
+	/* only 32 bit lsb is used */
+	db_addr = ioremap((phys_addr_t)(evt_ring_db_addr_low), 4);
+	/*
+	 * IPA/GSI driver should ring the event DB once after
+	 * initialization of the event, with a value that is
+	 * outside of the ring range. Eg: ring base = 0x1000,
+	 * ring size = 0x100 => AP can write value > 0x1100
+	 * into the doorbell address. Eg: 0x 1110
+	 */
+	iowrite32(in->u_rx.rx.event_ring_size / 4 + 10, db_addr);
+	gsi_query_evt_ring_db_addr(ep_tx->gsi_evt_ring_hdl,
+		&evt_ring_db_addr_low, &evt_ring_db_addr_high);
+
+	/* only 32 bit lsb is used */
+	db_addr = ioremap((phys_addr_t)(evt_ring_db_addr_low), 4);
+	/*
+	 * IPA/GSI driver should ring the event DB once after
+	 * initialization of the event, with a value that is
+	 * outside of the ring range. Eg: ring base = 0x1000,
+	 * ring size = 0x100 => AP can write value > 0x1100
+	 * into the doorbell address. Eg: 0x 1110
+	 */
+	iowrite32(in->u_tx.tx.event_ring_size / 4 + 10, db_addr);
+
+fail:
+	IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
+	return result;
+}
+
+int ipa3_disconn_wdi3_pipes(int ipa_ep_idx_tx, int ipa_ep_idx_rx)
+{
+	struct ipa3_ep_context *ep_tx, *ep_rx;
+	int result = 0;
+
+	/* wdi3 only support over gsi */
+	if (!ipa3_ctx->ipa_wdi3_over_gsi) {
+		IPAERR("wdi3 over uc offload not supported");
+		WARN_ON(1);
+		return -EFAULT;
+	}
+
+	IPADBG("ep_tx = %d\n", ipa_ep_idx_tx);
+	IPADBG("ep_rx = %d\n", ipa_ep_idx_rx);
+
+	if (ipa_ep_idx_tx < 0 || ipa_ep_idx_tx >= IPA3_MAX_NUM_PIPES ||
+		ipa_ep_idx_rx < 0 || ipa_ep_idx_rx >= IPA3_MAX_NUM_PIPES) {
+		IPAERR("invalid ipa ep index\n");
+		return -EINVAL;
+	}
+
+	ep_tx = &ipa3_ctx->ep[ipa_ep_idx_tx];
+	ep_rx = &ipa3_ctx->ep[ipa_ep_idx_rx];
+
+	IPA_ACTIVE_CLIENTS_INC_EP(ipa3_get_client_mapping(ipa_ep_idx_tx));
+	/* tear down tx pipe */
+	result = ipa3_reset_gsi_channel(ipa_ep_idx_tx);
+	if (result != GSI_STATUS_SUCCESS) {
+		IPAERR("failed to reset gsi channel: %d.\n", result);
+		goto exit;
+	}
+	result = gsi_reset_evt_ring(ep_tx->gsi_evt_ring_hdl);
+	if (result != GSI_STATUS_SUCCESS) {
+		IPAERR("failed to reset evt ring: %d.\n", result);
+		goto exit;
+	}
+	result = ipa3_release_gsi_channel(ipa_ep_idx_tx);
+	if (result) {
+		IPAERR("failed to release gsi channel: %d\n", result);
+		goto exit;
+	}
+
+	memset(ep_tx, 0, sizeof(struct ipa3_ep_context));
+	IPADBG("tx client (ep: %d) disconnected\n", ipa_ep_idx_tx);
+
+	/* tear down rx pipe */
+	result = ipa3_reset_gsi_channel(ipa_ep_idx_rx);
+	if (result != GSI_STATUS_SUCCESS) {
+		IPAERR("failed to reset gsi channel: %d.\n", result);
+		goto exit;
+	}
+	result = gsi_reset_evt_ring(ep_rx->gsi_evt_ring_hdl);
+	if (result != GSI_STATUS_SUCCESS) {
+		IPAERR("failed to reset evt ring: %d.\n", result);
+		goto exit;
+	}
+	result = ipa3_release_gsi_channel(ipa_ep_idx_rx);
+	if (result) {
+		IPAERR("failed to release gsi channel: %d\n", result);
+		goto exit;
+	}
+
+	if (ipa3_ctx->ipa_hw_type >= IPA_HW_v4_5)
+		ipa3_uc_debug_stats_dealloc(IPA_HW_PROTOCOL_WDI3);
+	ipa3_delete_dflt_flt_rules(ipa_ep_idx_rx);
+	memset(ep_rx, 0, sizeof(struct ipa3_ep_context));
+	IPADBG("rx client (ep: %d) disconnected\n", ipa_ep_idx_rx);
+
+exit:
+	IPA_ACTIVE_CLIENTS_DEC_EP(ipa3_get_client_by_pipe(ipa_ep_idx_tx));
+	return result;
+}
+
+int ipa3_enable_wdi3_pipes(int ipa_ep_idx_tx, int ipa_ep_idx_rx)
+{
+	struct ipa3_ep_context *ep_tx, *ep_rx;
+	int result = 0;
+
+	/* wdi3 only support over gsi */
+	if (!ipa3_ctx->ipa_wdi3_over_gsi) {
+		IPAERR("wdi3 over uc offload not supported");
+		WARN_ON(1);
+		return -EFAULT;
+	}
+
+	IPADBG("ep_tx = %d\n", ipa_ep_idx_tx);
+	IPADBG("ep_rx = %d\n", ipa_ep_idx_rx);
+
+	ep_tx = &ipa3_ctx->ep[ipa_ep_idx_tx];
+	ep_rx = &ipa3_ctx->ep[ipa_ep_idx_rx];
+
+	IPA_ACTIVE_CLIENTS_INC_EP(ipa3_get_client_mapping(ipa_ep_idx_tx));
+
+	/* start uC event ring */
+	if (ipa3_ctx->ipa_hw_type >= IPA_HW_v4_5) {
+		if (ipa3_ctx->uc_ctx.uc_loaded &&
+			!ipa3_ctx->uc_ctx.uc_event_ring_valid) {
+			if (ipa3_uc_setup_event_ring())	{
+				IPAERR("failed to set uc_event ring\n");
+				return -EFAULT;
+			}
+		} else
+			IPAERR("uc-loaded %d, ring-valid %d\n",
+			ipa3_ctx->uc_ctx.uc_loaded,
+			ipa3_ctx->uc_ctx.uc_event_ring_valid);
+	}
+
+	/* enable data path */
+	result = ipa3_enable_data_path(ipa_ep_idx_rx);
+	if (result) {
+		IPAERR("enable data path failed res=%d clnt=%d\n", result,
+			ipa_ep_idx_rx);
+		goto exit;
+	}
+
+	result = ipa3_enable_data_path(ipa_ep_idx_tx);
+	if (result) {
+		IPAERR("enable data path failed res=%d clnt=%d\n", result,
+			ipa_ep_idx_tx);
+		goto fail_enable_path1;
+	}
+
+	/* start gsi tx channel */
+	result = gsi_start_channel(ep_tx->gsi_chan_hdl);
+	if (result) {
+		IPAERR("failed to start gsi tx channel\n");
+		goto fail_enable_path2;
+	}
+
+	/* start gsi rx channel */
+	result = gsi_start_channel(ep_rx->gsi_chan_hdl);
+	if (result) {
+		IPAERR("failed to start gsi rx channel\n");
+		goto fail_start_channel1;
+	}
+	/* start uC gsi dbg stats monitor */
+	if (ipa3_ctx->ipa_hw_type >= IPA_HW_v4_5) {
+		ipa3_ctx->gsi_info[IPA_HW_PROTOCOL_WDI3].ch_id_info[0].ch_id
+			= ep_rx->gsi_chan_hdl;
+		ipa3_ctx->gsi_info[IPA_HW_PROTOCOL_WDI3].ch_id_info[0].dir
+			= DIR_PRODUCER;
+		ipa3_ctx->gsi_info[IPA_HW_PROTOCOL_WDI3].ch_id_info[1].ch_id
+			= ep_tx->gsi_chan_hdl;
+		ipa3_ctx->gsi_info[IPA_HW_PROTOCOL_WDI3].ch_id_info[1].dir
+			= DIR_CONSUMER;
+		ipa3_uc_debug_stats_alloc(
+			ipa3_ctx->gsi_info[IPA_HW_PROTOCOL_WDI3]);
+	}
+	goto exit;
+
+fail_start_channel1:
+	gsi_stop_channel(ep_tx->gsi_chan_hdl);
+fail_enable_path2:
+	ipa3_disable_data_path(ipa_ep_idx_tx);
+fail_enable_path1:
+	ipa3_disable_data_path(ipa_ep_idx_rx);
+exit:
+	IPA_ACTIVE_CLIENTS_DEC_EP(ipa3_get_client_mapping(ipa_ep_idx_tx));
+	return result;
+}
+
+int ipa3_disable_wdi3_pipes(int ipa_ep_idx_tx, int ipa_ep_idx_rx)
+{
+	int result = 0;
+	struct ipa3_ep_context *ep;
+	u32 source_pipe_bitmask = 0;
+	bool disable_force_clear = false;
+	struct ipahal_ep_cfg_ctrl_scnd ep_ctrl_scnd = { 0 };
+
+	/* wdi3 only support over gsi */
+	if (!ipa3_ctx->ipa_wdi3_over_gsi) {
+		IPAERR("wdi3 over uc offload not supported");
+		WARN_ON(1);
+		return -EFAULT;
+	}
+
+	IPA_ACTIVE_CLIENTS_INC_SIMPLE();
+
+	/* disable tx data path */
+	result = ipa3_disable_data_path(ipa_ep_idx_tx);
+	if (result) {
+		IPAERR("enable data path failed res=%d clnt=%d.\n", result,
+			ipa_ep_idx_tx);
+		result = -EFAULT;
+		goto fail;
+	}
+
+	/* disable rx data path */
+	result = ipa3_disable_data_path(ipa_ep_idx_rx);
+	if (result) {
+		IPAERR("disable data path failed res=%d clnt=%d.\n", result,
+			ipa_ep_idx_rx);
+		result = -EFAULT;
+		goto fail;
+	}
+	/*
+	 * For WDI 3.0 need to ensure pipe will be empty before suspend
+	 * as IPA uC will fail to suspend the pipe otherwise.
+	 */
+	ep = &ipa3_ctx->ep[ipa_ep_idx_rx];
+	source_pipe_bitmask = 1 <<
+			ipa3_get_ep_mapping(ep->client);
+	result = ipa3_enable_force_clear(ipa_ep_idx_rx,
+			false, source_pipe_bitmask);
+	if (result) {
+		/*
+		 * assuming here modem SSR, AP can remove
+		 * the delay in this case
+		 */
+		IPAERR("failed to force clear %d\n", result);
+		IPAERR("remove delay from SCND reg\n");
+		ep_ctrl_scnd.endp_delay = false;
+		ipahal_write_reg_n_fields(
+			IPA_ENDP_INIT_CTRL_SCND_n, ipa_ep_idx_rx,
+			&ep_ctrl_scnd);
+	} else {
+		disable_force_clear = true;
+	}
+
+	/* stop gsi rx channel */
+	result = ipa3_stop_gsi_channel(ipa_ep_idx_rx);
+	if (result) {
+		IPAERR("failed to stop gsi rx channel\n");
+		result = -EFAULT;
+		goto fail;
+	}
+	/* stop gsi tx channel */
+	result = ipa3_stop_gsi_channel(ipa_ep_idx_tx);
+	if (result) {
+		IPAERR("failed to stop gsi tx channel\n");
+		result = -EFAULT;
+		goto fail;
+	}
+	/* stop uC gsi dbg stats monitor */
+	if (ipa3_ctx->ipa_hw_type >= IPA_HW_v4_5) {
+		ipa3_ctx->gsi_info[IPA_HW_PROTOCOL_WDI3].ch_id_info[0].ch_id
+			= 0xff;
+		ipa3_ctx->gsi_info[IPA_HW_PROTOCOL_WDI3].ch_id_info[0].dir
+			= DIR_PRODUCER;
+		ipa3_ctx->gsi_info[IPA_HW_PROTOCOL_WDI3].ch_id_info[1].ch_id
+			= 0xff;
+		ipa3_ctx->gsi_info[IPA_HW_PROTOCOL_WDI3].ch_id_info[1].dir
+			= DIR_CONSUMER;
+		ipa3_uc_debug_stats_alloc(
+			ipa3_ctx->gsi_info[IPA_HW_PROTOCOL_WDI3]);
+	}
+	if (disable_force_clear)
+		ipa3_disable_force_clear(ipa_ep_idx_rx);
+
+fail:
+	IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
+	return result;
+
+}
+
+int ipa3_write_qmapid_wdi3_gsi_pipe(u32 clnt_hdl, u8 qmap_id)
+{
+	int result = 0;
+	struct ipa3_ep_context *ep;
+	union __packed gsi_wdi3_channel_scratch2_reg scratch2_reg;
+
+	memset(&scratch2_reg, 0, sizeof(scratch2_reg));
+	if (clnt_hdl >= ipa3_ctx->ipa_num_pipes ||
+		ipa3_ctx->ep[clnt_hdl].valid == 0) {
+		IPAERR_RL("bad parm, %d\n", clnt_hdl);
+		return -EINVAL;
+	}
+	ep = &ipa3_ctx->ep[clnt_hdl];
+	IPA_ACTIVE_CLIENTS_INC_EP(ipa3_get_client_mapping(clnt_hdl));
+	result = gsi_read_wdi3_channel_scratch2_reg(ep->gsi_chan_hdl,
+			&scratch2_reg);
+
+	if (result != GSI_STATUS_SUCCESS) {
+		IPAERR("failed to read channel scratch2 reg %d\n", result);
+		goto exit;
+	}
+
+	scratch2_reg.wdi.qmap_id = qmap_id;
+	result = gsi_write_wdi3_channel_scratch2_reg(ep->gsi_chan_hdl,
+			scratch2_reg);
+	if (result != GSI_STATUS_SUCCESS) {
+		IPAERR("failed to write channel scratch2 reg %d\n", result);
+		goto exit;
+	}
+
+exit:
+	IPA_ACTIVE_CLIENTS_DEC_EP(ipa3_get_client_mapping(clnt_hdl));
+	return result;
+}
+
+/**
+ * ipa3_get_wdi3_gsi_stats() - Query WDI3 gsi stats from uc
+ * @stats:	[inout] stats blob from client populated by driver
+ *
+ * Returns:	0 on success, negative on failure
+ *
+ * @note Cannot be called from atomic context
+ *
+ */
+int ipa3_get_wdi3_gsi_stats(struct ipa_uc_dbg_ring_stats *stats)
+{
+	int i;
+
+	if (!ipa3_ctx->wdi3_ctx.dbg_stats.uc_dbg_stats_mmio) {
+		IPAERR("bad NULL parms for wdi3_gsi_stats\n");
+		return -EINVAL;
+	}
+
+	IPA_ACTIVE_CLIENTS_INC_SIMPLE();
+	for (i = 0; i < MAX_WDI3_CHANNELS; i++) {
+		stats->ring[i].ringFull = ioread32(
+			ipa3_ctx->wdi3_ctx.dbg_stats.uc_dbg_stats_mmio
+			+ i * IPA3_UC_DEBUG_STATS_OFF +
+			IPA3_UC_DEBUG_STATS_RINGFULL_OFF);
+		stats->ring[i].ringEmpty = ioread32(
+			ipa3_ctx->wdi3_ctx.dbg_stats.uc_dbg_stats_mmio
+			+ i * IPA3_UC_DEBUG_STATS_OFF +
+			IPA3_UC_DEBUG_STATS_RINGEMPTY_OFF);
+		stats->ring[i].ringUsageHigh = ioread32(
+			ipa3_ctx->wdi3_ctx.dbg_stats.uc_dbg_stats_mmio
+			+ i * IPA3_UC_DEBUG_STATS_OFF +
+			IPA3_UC_DEBUG_STATS_RINGUSAGEHIGH_OFF);
+		stats->ring[i].ringUsageLow = ioread32(
+			ipa3_ctx->wdi3_ctx.dbg_stats.uc_dbg_stats_mmio
+			+ i * IPA3_UC_DEBUG_STATS_OFF +
+			IPA3_UC_DEBUG_STATS_RINGUSAGELOW_OFF);
+		stats->ring[i].RingUtilCount = ioread32(
+			ipa3_ctx->wdi3_ctx.dbg_stats.uc_dbg_stats_mmio
+			+ i * IPA3_UC_DEBUG_STATS_OFF +
+			IPA3_UC_DEBUG_STATS_RINGUTILCOUNT_OFF);
+	}
+	IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
+
+	return 0;
+}

+ 1915 - 0
ipa/ipa_v3/ipa_wigig_i.c

@@ -0,0 +1,1915 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.
+ */
+
+#include "ipa_i.h"
+#include <linux/if_ether.h>
+#include <linux/log2.h>
+#include <linux/debugfs.h>
+#include <linux/ipa_wigig.h>
+
+#define IPA_WIGIG_DESC_RING_EL_SIZE	32
+#define IPA_WIGIG_STATUS_RING_EL_SIZE	16
+
+#define GSI_STOP_MAX_RETRY_CNT 10
+
+#define IPA_WIGIG_CONNECTED BIT(0)
+#define IPA_WIGIG_ENABLED BIT(1)
+#define IPA_WIGIG_MSB_MASK 0xFFFFFFFF00000000
+#define IPA_WIGIG_LSB_MASK 0x00000000FFFFFFFF
+#define IPA_WIGIG_MSB(num) ((u32)((num & IPA_WIGIG_MSB_MASK) >> 32))
+#define IPA_WIGIG_LSB(num) ((u32)(num & IPA_WIGIG_LSB_MASK))
+/* extract PCIE addresses [0:39] relevant msb */
+#define IPA_WIGIG_8_MSB_MASK 0xFF00000000
+#define IPA_WIGIG_8_MSB(num) ((u32)((num & IPA_WIGIG_8_MSB_MASK) >> 32))
+#define W11AD_RX 0
+#define W11AD_TX 1
+#define W11AD_TO_GSI_DB_m 1
+#define W11AD_TO_GSI_DB_n 1
+
+static LIST_HEAD(smmu_reg_addr_list);
+static LIST_HEAD(smmu_ring_addr_list);
+static DEFINE_MUTEX(smmu_lock);
+struct dentry *wigig_dent;
+
+struct ipa_wigig_smmu_reg_addr {
+	struct list_head link;
+	phys_addr_t phys_addr;
+	enum ipa_smmu_cb_type cb_type;
+	u8 count;
+};
+
+struct ipa_wigig_smmu_ring_addr {
+	struct list_head link;
+	u64 iova;
+	enum ipa_smmu_cb_type cb_type;
+	u8 count;
+};
+
+
+static int ipa3_wigig_uc_loaded_handler(struct notifier_block *self,
+	unsigned long val, void *data)
+{
+	IPADBG("val %ld\n", val);
+
+	if (!ipa3_ctx) {
+		IPAERR("IPA ctx is null\n");
+		return -EINVAL;
+	}
+
+	WARN_ON(data != ipa3_ctx);
+
+	if (ipa3_ctx->uc_wigig_ctx.uc_ready_cb) {
+		ipa3_ctx->uc_wigig_ctx.uc_ready_cb(
+			ipa3_ctx->uc_wigig_ctx.priv);
+
+		ipa3_ctx->uc_wigig_ctx.uc_ready_cb =
+			NULL;
+		ipa3_ctx->uc_wigig_ctx.priv = NULL;
+	}
+
+	IPADBG("exit\n");
+	return 0;
+}
+
+static struct notifier_block uc_loaded_notifier = {
+	.notifier_call = ipa3_wigig_uc_loaded_handler,
+};
+
+int ipa3_wigig_init_i(void)
+{
+	IPADBG("\n");
+
+	ipa3_uc_register_ready_cb(&uc_loaded_notifier);
+
+	IPADBG("exit\n");
+
+	return 0;
+}
+
+int ipa3_wigig_internal_init(
+	struct ipa_wdi_uc_ready_params *inout,
+	ipa_wigig_misc_int_cb int_notify,
+	phys_addr_t *uc_db_pa)
+{
+	int result = 0;
+
+	IPADBG("\n");
+
+	if (inout == NULL) {
+		IPAERR("inout is NULL");
+		return -EINVAL;
+	}
+
+	if (int_notify == NULL) {
+		IPAERR("int_notify is NULL");
+		return -EINVAL;
+	}
+
+	result = ipa3_uc_state_check();
+	if (result) {
+		inout->is_uC_ready = false;
+		ipa3_ctx->uc_wigig_ctx.uc_ready_cb = inout->notify;
+	} else {
+		inout->is_uC_ready = true;
+	}
+	ipa3_ctx->uc_wigig_ctx.priv = inout->priv;
+	ipa3_ctx->uc_wigig_ctx.misc_notify_cb = int_notify;
+
+	*uc_db_pa = ipa3_ctx->ipa_wrapper_base +
+		ipahal_get_reg_base() +
+		ipahal_get_reg_mn_ofst(
+			IPA_UC_MAILBOX_m_n,
+			W11AD_TO_GSI_DB_m,
+			W11AD_TO_GSI_DB_n);
+
+	IPADBG("exit\n");
+
+	return 0;
+}
+
+static int ipa3_wigig_tx_bit_to_ep(
+	const u8 tx_bit_num,
+	enum ipa_client_type *type)
+{
+	IPADBG("tx_bit_num %d\n", tx_bit_num);
+
+	switch (tx_bit_num) {
+	case 2:
+		*type = IPA_CLIENT_WIGIG1_CONS;
+		break;
+	case 3:
+		*type = IPA_CLIENT_WIGIG2_CONS;
+		break;
+	case 4:
+		*type = IPA_CLIENT_WIGIG3_CONS;
+		break;
+	case 5:
+		*type = IPA_CLIENT_WIGIG4_CONS;
+		break;
+	default:
+		IPAERR("invalid tx_bit_num %d\n", tx_bit_num);
+		return -EINVAL;
+	}
+
+	IPADBG("exit\n");
+	return 0;
+}
+
+static int ipa3_wigig_smmu_map_buffers(bool Rx,
+	struct ipa_wigig_pipe_setup_info_smmu *pipe_smmu,
+	void *buff,
+	bool map)
+{
+	int result;
+
+	/* data buffers */
+	if (Rx) {
+		struct ipa_wigig_rx_pipe_data_buffer_info_smmu *dbuff_smmu =
+			(struct ipa_wigig_rx_pipe_data_buffer_info_smmu *)buff;
+
+		int num_elem =
+			pipe_smmu->desc_ring_size /
+			IPA_WIGIG_DESC_RING_EL_SIZE;
+
+		result = ipa3_smmu_map_peer_buff(
+			dbuff_smmu->data_buffer_base_iova,
+			dbuff_smmu->data_buffer_size * num_elem,
+			map,
+			&dbuff_smmu->data_buffer_base,
+			IPA_SMMU_CB_11AD);
+		if (result) {
+			IPAERR(
+				"failed to %s rx data_buffer %d, num elem %d\n"
+				, map ? "map" : "unmap",
+				result, num_elem);
+			goto fail_map_buff;
+		}
+
+	} else {
+		int i;
+		struct ipa_wigig_tx_pipe_data_buffer_info_smmu *dbuff_smmu =
+			(struct ipa_wigig_tx_pipe_data_buffer_info_smmu *)buff;
+
+		for (i = 0; i < dbuff_smmu->num_buffers; i++) {
+			result = ipa3_smmu_map_peer_buff(
+				*(dbuff_smmu->data_buffer_base_iova + i),
+				dbuff_smmu->data_buffer_size,
+				map,
+				(dbuff_smmu->data_buffer_base + i),
+				IPA_SMMU_CB_11AD);
+			if (result) {
+				IPAERR(
+					"%d: failed to %s tx data buffer %d\n"
+					, i, map ? "map" : "unmap",
+					result);
+				for (i--; i >= 0; i--) {
+					result = ipa3_smmu_map_peer_buff(
+					*(dbuff_smmu->data_buffer_base_iova +
+						i),
+					dbuff_smmu->data_buffer_size,
+					!map,
+					(dbuff_smmu->data_buffer_base +
+						i),
+					IPA_SMMU_CB_11AD);
+				}
+				goto fail_map_buff;
+			}
+		}
+	}
+
+	IPADBG("exit\n");
+	return 0;
+
+fail_map_buff:
+	return result;
+}
+
+static int ipa3_wigig_smmu_map_reg(phys_addr_t phys_addr, bool map,
+	enum ipa_smmu_cb_type cb_type)
+{
+	struct ipa_wigig_smmu_reg_addr *entry;
+	struct ipa_wigig_smmu_reg_addr *next;
+	int result = 0;
+
+	IPADBG("addr %pa, %s\n", &phys_addr, map ? "map" : "unmap");
+	mutex_lock(&smmu_lock);
+	list_for_each_entry_safe(entry, next, &smmu_reg_addr_list, link) {
+		if ((entry->phys_addr == phys_addr) &&
+			(entry->cb_type == cb_type)) {
+			IPADBG("cb %d, page %pa already mapped, ", cb_type,
+				&phys_addr);
+			if (map) {
+				entry->count++;
+				IPADBG("inc to %d\n", (entry->count));
+			} else {
+				--entry->count;
+				IPADBG("dec to %d\n", entry->count);
+				if (!(entry->count)) {
+					IPADBG("unmap and delete\n");
+					result = ipa3_smmu_map_peer_reg(
+						phys_addr, map, cb_type);
+					if (result) {
+						IPAERR("failed to unmap %pa\n",
+							&phys_addr);
+						goto finish;
+					}
+					list_del(&entry->link);
+					kfree(entry);
+				}
+			}
+			goto finish;
+		}
+	}
+	IPADBG("new page found %pa, map and add to list CB %d\n", &phys_addr,
+		cb_type);
+	result = ipa3_smmu_map_peer_reg(phys_addr, map, cb_type);
+	if (result) {
+		IPAERR("failed to map %pa\n", &phys_addr);
+		goto finish;
+	}
+
+	entry = kzalloc(sizeof(*entry), GFP_KERNEL);
+	if (entry == NULL) {
+		IPAERR("couldn't allocate for %pa\n", &phys_addr);
+		ipa3_smmu_map_peer_reg(phys_addr, !map, cb_type);
+		result = -ENOMEM;
+		goto finish;
+	}
+	INIT_LIST_HEAD(&entry->link);
+	entry->phys_addr = phys_addr;
+	entry->cb_type = cb_type;
+	entry->count = 1;
+	list_add(&entry->link, &smmu_reg_addr_list);
+
+finish:
+	mutex_unlock(&smmu_lock);
+	IPADBG("exit\n");
+	return result;
+}
+
+static int ipa3_wigig_smmu_map_ring(u64 iova, u32 size, bool map,
+	struct sg_table *sgt, enum ipa_smmu_cb_type cb_type)
+{
+	struct ipa_wigig_smmu_ring_addr *entry;
+	struct ipa_wigig_smmu_ring_addr *next;
+	int result = 0;
+
+	IPADBG("iova %llX, %s\n", iova, map ? "map" : "unmap");
+	mutex_lock(&smmu_lock);
+	list_for_each_entry_safe(entry, next, &smmu_ring_addr_list, link) {
+		if ((entry->iova == iova) &&
+			(entry->cb_type == cb_type)) {
+			IPADBG("cb %d, page 0x%llX already mapped, ", cb_type,
+				iova);
+			if (map) {
+				entry->count++;
+				IPADBG("inc to %d\n", (entry->count));
+			} else {
+				--entry->count;
+				IPADBG("dec to %d\n", entry->count);
+				if (!(entry->count)) {
+					IPADBG("unmap and delete\n");
+					result = ipa3_smmu_map_peer_buff(
+						iova, size, map, sgt, cb_type);
+					if (result) {
+						IPAERR(
+							"failed to unmap 0x%llX\n",
+							iova);
+						goto finish;
+					}
+					list_del(&entry->link);
+					kfree(entry);
+				}
+			}
+			goto finish;
+		}
+	}
+	IPADBG("new page found 0x%llX, map and add to list\n", iova);
+	result = ipa3_smmu_map_peer_buff(iova, size, map, sgt, cb_type);
+	if (result) {
+		IPAERR("failed to map 0x%llX\n", iova);
+		goto finish;
+	}
+
+	entry = kzalloc(sizeof(*entry), GFP_KERNEL);
+	if (entry == NULL) {
+		IPAERR("couldn't allocate for 0x%llX\n", iova);
+		ipa3_smmu_map_peer_buff(iova, size, !map, sgt, cb_type);
+		result = -ENOMEM;
+		goto finish;
+	}
+	INIT_LIST_HEAD(&entry->link);
+	entry->iova = iova;
+	entry->cb_type = cb_type;
+	entry->count = 1;
+	list_add(&entry->link, &smmu_ring_addr_list);
+
+finish:
+	mutex_unlock(&smmu_lock);
+	IPADBG("exit\n");
+	return result;
+}
+
+static int ipa3_wigig_smmu_map_channel(bool Rx,
+	struct ipa_wigig_pipe_setup_info_smmu *pipe_smmu,
+	void *buff,
+	bool map)
+{
+	int result = 0;
+	struct ipa_smmu_cb_ctx *smmu_ctx = ipa3_get_smmu_ctx(IPA_SMMU_CB_11AD);
+
+	IPADBG("\n");
+
+	/*
+	 * --------------------------------------------------------------------
+	 *  entity         |HWHEAD|HWTAIL|HWHEAD|HWTAIL| misc | buffers| rings|
+	 *                 |Sring |Sring |Dring |Dring | regs |        |      |
+	 * --------------------------------------------------------------------
+	 *  GSI (apps CB)  |  TX  |RX, TX|      |RX, TX|      |        |Rx, TX|
+	 * --------------------------------------------------------------------
+	 *  IPA (11AD CB)  |      |      |      |      |      | RX, TX |      |
+	 * --------------------------------------------------------------------
+	 *  uc (uC CB)     |  RX  |      |  TX  |      |always|        |      |
+	 * --------------------------------------------------------------------
+	 *
+	 * buffers are mapped to 11AD CB. in case this context bank is shared,
+	 * mapping is done by 11ad driver only and applies to both 11ad and
+	 * IPA HWs (page tables are shared). Otherwise, mapping is done here.
+	 */
+
+	if (!smmu_ctx) {
+		IPAERR("11AD SMMU ctx is null\n");
+		return -EINVAL;
+	}
+
+	if (Rx) {
+		IPADBG("RX %s status_ring_HWHEAD_pa %pa uC CB\n",
+			map ? "map" : "unmap",
+			&pipe_smmu->status_ring_HWHEAD_pa);
+		result = ipa3_wigig_smmu_map_reg(
+			rounddown(pipe_smmu->status_ring_HWHEAD_pa, PAGE_SIZE),
+			map,
+			IPA_SMMU_CB_UC);
+		if (result) {
+			IPAERR(
+				"failed to %s status_ring_HWAHEAD %d\n",
+				map ? "map" : "unmap",
+				result);
+			goto fail;
+		}
+	} else {
+		IPADBG("TX %s status_ring_HWHEAD_pa %pa AP CB\n",
+			map ? "map" : "unmap",
+			&pipe_smmu->status_ring_HWHEAD_pa);
+		result = ipa3_wigig_smmu_map_reg(
+			rounddown(pipe_smmu->status_ring_HWHEAD_pa,
+				PAGE_SIZE),
+			map,
+			IPA_SMMU_CB_AP);
+		if (result) {
+			IPAERR(
+				"failed to %s status_ring_HWAHEAD %d\n",
+				map ? "map" : "unmap",
+				result);
+			goto fail;
+		}
+
+		IPADBG("TX %s desc_ring_HWHEAD_pa %pa uC CB\n",
+			map ? "map" : "unmap",
+			&pipe_smmu->desc_ring_HWHEAD_pa);
+		result = ipa3_wigig_smmu_map_reg(
+			rounddown(pipe_smmu->desc_ring_HWHEAD_pa,
+				PAGE_SIZE),
+			map,
+			IPA_SMMU_CB_UC);
+		if (result) {
+			IPAERR("failed to %s desc_ring_HWHEAD %d\n",
+				map ? "map" : "unmap",
+				result);
+			goto fail_desc_HWHEAD;
+		}
+	}
+
+	IPADBG("%s status_ring_HWTAIL_pa %pa AP CB\n",
+		map ? "map" : "unmap",
+		&pipe_smmu->status_ring_HWTAIL_pa);
+	result = ipa3_wigig_smmu_map_reg(
+		rounddown(pipe_smmu->status_ring_HWTAIL_pa, PAGE_SIZE),
+		map,
+		IPA_SMMU_CB_AP);
+	if (result) {
+		IPAERR(
+			"failed to %s status_ring_HWTAIL %d\n",
+			map ? "map" : "unmap",
+			result);
+		goto fail_status_HWTAIL;
+	}
+
+	IPADBG("%s desc_ring_HWTAIL_pa %pa AP CB\n",
+		map ? "map" : "unmap",
+		&pipe_smmu->desc_ring_HWTAIL_pa);
+	result = ipa3_wigig_smmu_map_reg(
+		rounddown(pipe_smmu->desc_ring_HWTAIL_pa, PAGE_SIZE),
+		map,
+		IPA_SMMU_CB_AP);
+	if (result) {
+		IPAERR("failed to %s desc_ring_HWTAIL %d\n",
+			map ? "map" : "unmap",
+			result);
+		goto fail_desc_HWTAIL;
+	}
+
+	/* rings */
+	IPADBG("%s desc_ring_base_iova %llX AP CB\n",
+		map ? "map" : "unmap",
+		pipe_smmu->desc_ring_base_iova);
+	result = ipa3_wigig_smmu_map_ring(
+		pipe_smmu->desc_ring_base_iova,
+		pipe_smmu->desc_ring_size,
+		map,
+		&pipe_smmu->desc_ring_base,
+		IPA_SMMU_CB_AP);
+	if (result) {
+		IPAERR("failed to %s desc_ring_base %d\n",
+			map ? "map" : "unmap",
+			result);
+		goto fail_desc_ring;
+	}
+
+	IPADBG("%s status_ring_base_iova %llX AP CB\n",
+		map ? "map" : "unmap",
+		pipe_smmu->status_ring_base_iova);
+	result = ipa3_wigig_smmu_map_ring(
+		pipe_smmu->status_ring_base_iova,
+		pipe_smmu->status_ring_size,
+		map,
+		&pipe_smmu->status_ring_base,
+		IPA_SMMU_CB_AP);
+	if (result) {
+		IPAERR("failed to %s status_ring_base %d\n",
+			map ? "map" : "unmap",
+			result);
+		goto fail_status_ring;
+	}
+
+	if (!smmu_ctx->shared) {
+		IPADBG("CB not shared - map buffers\n");
+		result = ipa3_wigig_smmu_map_buffers(Rx, pipe_smmu, buff, map);
+		if (result) {
+			IPAERR("failed to %s buffers %d\n",
+				map ? "map" : "unmap",
+				result);
+			goto fail_buffers;
+		}
+	}
+
+	IPADBG("exit\n");
+	return 0;
+fail_buffers:
+	ipa3_wigig_smmu_map_ring(
+		pipe_smmu->status_ring_base_iova, pipe_smmu->status_ring_size,
+		!map, &pipe_smmu->status_ring_base, IPA_SMMU_CB_AP);
+fail_status_ring:
+	ipa3_wigig_smmu_map_ring(
+		pipe_smmu->desc_ring_base_iova,	pipe_smmu->desc_ring_size,
+		!map, &pipe_smmu->desc_ring_base, IPA_SMMU_CB_AP);
+fail_desc_ring:
+	ipa3_wigig_smmu_map_reg(
+		rounddown(pipe_smmu->desc_ring_HWTAIL_pa, PAGE_SIZE),
+		!map, IPA_SMMU_CB_AP);
+fail_desc_HWTAIL:
+	ipa3_wigig_smmu_map_reg(
+		rounddown(pipe_smmu->status_ring_HWTAIL_pa, PAGE_SIZE),
+		!map, IPA_SMMU_CB_AP);
+fail_status_HWTAIL:
+	if (Rx)
+		ipa3_wigig_smmu_map_reg(
+			rounddown(pipe_smmu->status_ring_HWHEAD_pa, PAGE_SIZE),
+			!map, IPA_SMMU_CB_UC);
+	else
+		ipa3_wigig_smmu_map_reg(
+			rounddown(pipe_smmu->desc_ring_HWHEAD_pa, PAGE_SIZE),
+			!map, IPA_SMMU_CB_UC);
+fail_desc_HWHEAD:
+	if (!Rx)
+		ipa3_wigig_smmu_map_reg(
+			rounddown(pipe_smmu->status_ring_HWHEAD_pa, PAGE_SIZE),
+			!map, IPA_SMMU_CB_AP);
+fail:
+	return result;
+}
+
+static void ipa_gsi_chan_err_cb(struct gsi_chan_err_notify *notify)
+{
+	switch (notify->evt_id) {
+	case GSI_CHAN_INVALID_TRE_ERR:
+		IPAERR("Got GSI_CHAN_INVALID_TRE_ERR\n");
+		break;
+	case GSI_CHAN_NON_ALLOCATED_EVT_ACCESS_ERR:
+		IPAERR("Got GSI_CHAN_NON_ALLOCATED_EVT_ACCESS_ERR\n");
+		break;
+	case GSI_CHAN_OUT_OF_BUFFERS_ERR:
+		IPAERR("Got GSI_CHAN_OUT_OF_BUFFERS_ERR\n");
+		break;
+	case GSI_CHAN_OUT_OF_RESOURCES_ERR:
+		IPAERR("Got GSI_CHAN_OUT_OF_RESOURCES_ERR\n");
+		break;
+	case GSI_CHAN_UNSUPPORTED_INTER_EE_OP_ERR:
+		IPAERR("Got GSI_CHAN_UNSUPPORTED_INTER_EE_OP_ERR\n");
+		break;
+	case GSI_CHAN_HWO_1_ERR:
+		IPAERR("Got GSI_CHAN_HWO_1_ERR\n");
+		break;
+	default:
+		IPAERR("Unexpected err evt: %d\n", notify->evt_id);
+	}
+	ipa_assert();
+}
+
+static void ipa_gsi_evt_ring_err_cb(struct gsi_evt_err_notify *notify)
+{
+	switch (notify->evt_id) {
+	case GSI_EVT_OUT_OF_BUFFERS_ERR:
+		IPAERR("Got GSI_EVT_OUT_OF_BUFFERS_ERR\n");
+		break;
+	case GSI_EVT_OUT_OF_RESOURCES_ERR:
+		IPAERR("Got GSI_EVT_OUT_OF_RESOURCES_ERR\n");
+		break;
+	case GSI_EVT_UNSUPPORTED_INTER_EE_OP_ERR:
+		IPAERR("Got GSI_EVT_UNSUPPORTED_INTER_EE_OP_ERR\n");
+		break;
+	case GSI_EVT_EVT_RING_EMPTY_ERR:
+		IPAERR("Got GSI_EVT_EVT_RING_EMPTY_ERR\n");
+		break;
+	default:
+		IPAERR("Unexpected err evt: %d\n", notify->evt_id);
+	}
+	ipa_assert();
+}
+
+static uint16_t int_modt = 15;
+static uint8_t int_modc = 200;
+static uint8_t tx_hwtail_mod_threshold = 200;
+static uint8_t rx_hwtail_mod_threshold = 200;
+
+static int ipa3_wigig_config_gsi(bool Rx,
+	bool smmu_en,
+	void *pipe_info,
+	void *buff,
+	const struct ipa_gsi_ep_config *ep_gsi,
+	struct ipa3_ep_context *ep)
+{
+	struct gsi_evt_ring_props evt_props;
+	struct gsi_chan_props channel_props;
+	union __packed gsi_channel_scratch gsi_scratch;
+	int gsi_res;
+	struct ipa_wigig_pipe_setup_info_smmu *pipe_smmu;
+	struct ipa_wigig_pipe_setup_info *pipe;
+	struct ipa_wigig_rx_pipe_data_buffer_info *rx_dbuff;
+	struct ipa_wigig_rx_pipe_data_buffer_info_smmu *rx_dbuff_smmu;
+	struct ipa_wigig_tx_pipe_data_buffer_info *tx_dbuff;
+	struct ipa_wigig_tx_pipe_data_buffer_info_smmu *tx_dbuff_smmu;
+
+	IPADBG("%s, %s\n", Rx ? "Rx" : "Tx", smmu_en ? "smmu en" : "smmu dis");
+
+	/* alloc event ring */
+	memset(&evt_props, 0, sizeof(evt_props));
+	evt_props.intf = GSI_EVT_CHTYPE_11AD_EV;
+	evt_props.re_size = GSI_EVT_RING_RE_SIZE_16B;
+	evt_props.intr = GSI_INTR_MSI;
+	evt_props.intvec = 0;
+	evt_props.exclusive = true;
+	evt_props.err_cb = ipa_gsi_evt_ring_err_cb;
+	evt_props.user_data = NULL;
+	evt_props.int_modc = int_modc;
+	evt_props.int_modt = int_modt;
+	evt_props.ring_base_vaddr = NULL;
+
+	if (smmu_en) {
+		pipe_smmu = (struct ipa_wigig_pipe_setup_info_smmu *)pipe_info;
+		evt_props.ring_base_addr =
+			pipe_smmu->desc_ring_base_iova;
+		evt_props.ring_len = pipe_smmu->desc_ring_size;
+		evt_props.msi_addr = pipe_smmu->desc_ring_HWTAIL_pa;
+	} else {
+		pipe = (struct ipa_wigig_pipe_setup_info *)pipe_info;
+		evt_props.ring_base_addr = pipe->desc_ring_base_pa;
+		evt_props.ring_len = pipe->desc_ring_size;
+		evt_props.msi_addr = pipe->desc_ring_HWTAIL_pa;
+	}
+
+	gsi_res = gsi_alloc_evt_ring(&evt_props,
+		ipa3_ctx->gsi_dev_hdl,
+		&ep->gsi_evt_ring_hdl);
+	if (gsi_res != GSI_STATUS_SUCCESS) {
+		IPAERR("Error allocating event ring: %d\n", gsi_res);
+		return -EFAULT;
+	}
+
+	/* event scratch not configured by SW for TX channels */
+	if (Rx) {
+		union __packed gsi_evt_scratch evt_scratch;
+
+		memset(&evt_scratch, 0, sizeof(evt_scratch));
+		evt_scratch.w11ad.update_status_hwtail_mod_threshold =
+			rx_hwtail_mod_threshold;
+		gsi_res = gsi_write_evt_ring_scratch(ep->gsi_evt_ring_hdl,
+			evt_scratch);
+		if (gsi_res != GSI_STATUS_SUCCESS) {
+			IPAERR("Error writing WIGIG event ring scratch: %d\n",
+				gsi_res);
+			goto fail_write_evt_scratch;
+		}
+	}
+
+	ep->gsi_mem_info.evt_ring_len = evt_props.ring_len;
+	ep->gsi_mem_info.evt_ring_base_addr = evt_props.ring_base_addr;
+	ep->gsi_mem_info.evt_ring_base_vaddr = evt_props.ring_base_vaddr;
+
+	/* alloc channel ring */
+	memset(&channel_props, 0, sizeof(channel_props));
+	memset(&gsi_scratch, 0, sizeof(gsi_scratch));
+
+	if (Rx)
+		channel_props.dir = GSI_CHAN_DIR_TO_GSI;
+	else
+		channel_props.dir = GSI_CHAN_DIR_FROM_GSI;
+
+	channel_props.re_size = GSI_CHAN_RE_SIZE_16B;
+	channel_props.prot = GSI_CHAN_PROT_11AD;
+	channel_props.ch_id = ep_gsi->ipa_gsi_chan_num;
+	channel_props.evt_ring_hdl = ep->gsi_evt_ring_hdl;
+	channel_props.xfer_cb = NULL;
+
+	channel_props.db_in_bytes = 0;
+	channel_props.use_db_eng = GSI_CHAN_DB_MODE;
+	channel_props.max_prefetch = GSI_ONE_PREFETCH_SEG;
+	channel_props.prefetch_mode = ep_gsi->prefetch_mode;
+	channel_props.empty_lvl_threshold = ep_gsi->prefetch_threshold;
+	channel_props.low_weight = 1;
+	channel_props.err_cb = ipa_gsi_chan_err_cb;
+
+	channel_props.ring_base_vaddr = NULL;
+
+	if (Rx) {
+		if (smmu_en) {
+			rx_dbuff_smmu =
+			(struct ipa_wigig_rx_pipe_data_buffer_info_smmu *)buff;
+
+			channel_props.ring_base_addr =
+				pipe_smmu->status_ring_base_iova;
+			channel_props.ring_len =
+				pipe_smmu->status_ring_size;
+
+			gsi_scratch.rx_11ad.status_ring_hwtail_address_lsb =
+				IPA_WIGIG_LSB(
+					pipe_smmu->status_ring_HWTAIL_pa);
+			gsi_scratch.rx_11ad.status_ring_hwtail_address_msb =
+				IPA_WIGIG_MSB(
+					pipe_smmu->status_ring_HWTAIL_pa);
+
+			gsi_scratch.rx_11ad.data_buffers_base_address_lsb =
+				IPA_WIGIG_LSB(
+					rx_dbuff_smmu->data_buffer_base_iova);
+			gsi_scratch.rx_11ad.data_buffers_base_address_msb =
+				IPA_WIGIG_MSB(
+					rx_dbuff_smmu->data_buffer_base_iova);
+			gsi_scratch.rx_11ad.fixed_data_buffer_size_pow_2 =
+				ilog2(rx_dbuff_smmu->data_buffer_size);
+		} else {
+			rx_dbuff =
+			(struct ipa_wigig_rx_pipe_data_buffer_info *)buff;
+
+			channel_props.ring_base_addr =
+				pipe->status_ring_base_pa;
+			channel_props.ring_len = pipe->status_ring_size;
+
+			gsi_scratch.rx_11ad.status_ring_hwtail_address_lsb =
+				IPA_WIGIG_LSB(pipe->status_ring_HWTAIL_pa);
+			gsi_scratch.rx_11ad.status_ring_hwtail_address_msb =
+				IPA_WIGIG_MSB(pipe->status_ring_HWTAIL_pa);
+
+			gsi_scratch.rx_11ad.data_buffers_base_address_lsb =
+				IPA_WIGIG_LSB(rx_dbuff->data_buffer_base_pa);
+			gsi_scratch.rx_11ad.data_buffers_base_address_msb =
+				IPA_WIGIG_MSB(rx_dbuff->data_buffer_base_pa);
+			gsi_scratch.rx_11ad.fixed_data_buffer_size_pow_2 =
+				ilog2(rx_dbuff->data_buffer_size);
+		}
+		IPADBG("rx scratch: status_ring_hwtail_address_lsb 0x%X\n",
+			gsi_scratch.rx_11ad.status_ring_hwtail_address_lsb);
+		IPADBG("rx scratch: status_ring_hwtail_address_msb 0x%X\n",
+			gsi_scratch.rx_11ad.status_ring_hwtail_address_msb);
+		IPADBG("rx scratch: data_buffers_base_address_lsb 0x%X\n",
+			gsi_scratch.rx_11ad.data_buffers_base_address_lsb);
+		IPADBG("rx scratch: data_buffers_base_address_msb 0x%X\n",
+			gsi_scratch.rx_11ad.data_buffers_base_address_msb);
+		IPADBG("rx scratch: fixed_data_buffer_size_pow_2 %d\n",
+			gsi_scratch.rx_11ad.fixed_data_buffer_size_pow_2);
+		IPADBG("rx scratch 0x[%X][%X][%X][%X]\n",
+			gsi_scratch.data.word1,
+			gsi_scratch.data.word2,
+			gsi_scratch.data.word3,
+			gsi_scratch.data.word4);
+	} else {
+		if (smmu_en) {
+			tx_dbuff_smmu =
+			(struct ipa_wigig_tx_pipe_data_buffer_info_smmu *)buff;
+			channel_props.ring_base_addr =
+				pipe_smmu->desc_ring_base_iova;
+			channel_props.ring_len =
+				pipe_smmu->desc_ring_size;
+
+			gsi_scratch.tx_11ad.status_ring_hwtail_address_lsb =
+				IPA_WIGIG_LSB(
+					pipe_smmu->status_ring_HWTAIL_pa);
+			gsi_scratch.tx_11ad.status_ring_hwhead_address_lsb =
+				IPA_WIGIG_LSB(
+					pipe_smmu->status_ring_HWHEAD_pa);
+			gsi_scratch.tx_11ad.status_ring_hwhead_hwtail_8_msb =
+				IPA_WIGIG_8_MSB(
+					pipe_smmu->status_ring_HWHEAD_pa);
+
+			gsi_scratch.tx_11ad.fixed_data_buffer_size_pow_2 =
+				ilog2(tx_dbuff_smmu->data_buffer_size);
+
+			gsi_scratch.tx_11ad.status_ring_num_elem =
+				pipe_smmu->status_ring_size /
+				IPA_WIGIG_STATUS_RING_EL_SIZE;
+		} else {
+			tx_dbuff =
+			(struct ipa_wigig_tx_pipe_data_buffer_info *)buff;
+
+			channel_props.ring_base_addr = pipe->desc_ring_base_pa;
+			channel_props.ring_len = pipe->desc_ring_size;
+
+			gsi_scratch.tx_11ad.status_ring_hwtail_address_lsb =
+				IPA_WIGIG_LSB(
+					pipe->status_ring_HWTAIL_pa);
+			gsi_scratch.tx_11ad.status_ring_hwhead_address_lsb =
+				IPA_WIGIG_LSB(
+					pipe->status_ring_HWHEAD_pa);
+			gsi_scratch.tx_11ad.status_ring_hwhead_hwtail_8_msb =
+				IPA_WIGIG_8_MSB(pipe->status_ring_HWHEAD_pa);
+
+			gsi_scratch.tx_11ad.status_ring_num_elem =
+				pipe->status_ring_size /
+				IPA_WIGIG_STATUS_RING_EL_SIZE;
+
+			gsi_scratch.tx_11ad.fixed_data_buffer_size_pow_2 =
+				ilog2(tx_dbuff->data_buffer_size);
+		}
+		gsi_scratch.tx_11ad.update_status_hwtail_mod_threshold =
+			tx_hwtail_mod_threshold;
+		IPADBG("tx scratch: status_ring_hwtail_address_lsb 0x%X\n",
+			gsi_scratch.tx_11ad.status_ring_hwtail_address_lsb);
+		IPADBG("tx scratch: status_ring_hwhead_address_lsb 0x%X\n",
+			gsi_scratch.tx_11ad.status_ring_hwhead_address_lsb);
+		IPADBG("tx scratch: status_ring_hwhead_hwtail_8_msb 0x%X\n",
+			gsi_scratch.tx_11ad.status_ring_hwhead_hwtail_8_msb);
+		IPADBG("tx scratch:status_ring_num_elem %d\n",
+			gsi_scratch.tx_11ad.status_ring_num_elem);
+		IPADBG("tx scratch:fixed_data_buffer_size_pow_2 %d\n",
+			gsi_scratch.tx_11ad.fixed_data_buffer_size_pow_2);
+		IPADBG("tx scratch 0x[%X][%X][%X][%X]\n",
+			gsi_scratch.data.word1,
+			gsi_scratch.data.word2,
+			gsi_scratch.data.word3,
+			gsi_scratch.data.word4);
+	}
+
+	IPADBG("ch_id: %d\n", channel_props.ch_id);
+	IPADBG("evt_ring_hdl: %ld\n", channel_props.evt_ring_hdl);
+	IPADBG("re_size: %d\n", channel_props.re_size);
+	IPADBG("GSI channel ring len: %d\n", channel_props.ring_len);
+	IPADBG("channel ring  base addr = 0x%llX\n",
+		(unsigned long long)channel_props.ring_base_addr);
+
+	IPADBG("Allocating GSI channel\n");
+	gsi_res = gsi_alloc_channel(&channel_props,
+		ipa3_ctx->gsi_dev_hdl,
+		&ep->gsi_chan_hdl);
+	if (gsi_res != GSI_STATUS_SUCCESS) {
+		IPAERR("gsi_alloc_channel failed %d\n", gsi_res);
+		goto fail_alloc_channel;
+	}
+
+	IPADBG("Writing Channel scratch\n");
+	ep->gsi_mem_info.chan_ring_len = channel_props.ring_len;
+	ep->gsi_mem_info.chan_ring_base_addr = channel_props.ring_base_addr;
+	ep->gsi_mem_info.chan_ring_base_vaddr =
+		channel_props.ring_base_vaddr;
+
+	gsi_res = gsi_write_channel_scratch(ep->gsi_chan_hdl,
+		gsi_scratch);
+	if (gsi_res != GSI_STATUS_SUCCESS) {
+		IPAERR("gsi_write_channel_scratch failed %d\n",
+			gsi_res);
+		goto fail_write_channel_scratch;
+	}
+
+	IPADBG("exit\n");
+
+	return 0;
+fail_write_channel_scratch:
+	gsi_dealloc_channel(ep->gsi_chan_hdl);
+fail_alloc_channel:
+fail_write_evt_scratch:
+	gsi_dealloc_evt_ring(ep->gsi_evt_ring_hdl);
+	return -EFAULT;
+}
+
+static int ipa3_wigig_config_uc(bool init,
+	bool Rx,
+	u8 wifi_ch,
+	u8 gsi_ch,
+	phys_addr_t HWHEAD)
+{
+	struct ipa_mem_buffer cmd;
+	enum ipa_cpu_2_hw_offload_commands command;
+	int result;
+
+	IPADBG("%s\n", init ? "init" : "Deinit");
+	if (init) {
+		struct IpaHwOffloadSetUpCmdData_t_v4_0 *cmd_data;
+
+		cmd.size = sizeof(*cmd_data);
+		cmd.base = dma_alloc_coherent(ipa3_ctx->uc_pdev, cmd.size,
+			&cmd.phys_base, GFP_KERNEL);
+		if (cmd.base == NULL) {
+			IPAERR("fail to get DMA memory.\n");
+			return -ENOMEM;
+		}
+
+		cmd_data =
+			(struct IpaHwOffloadSetUpCmdData_t_v4_0 *)cmd.base;
+
+		cmd_data->protocol = IPA_HW_PROTOCOL_11ad;
+		cmd_data->SetupCh_params.W11AdSetupCh_params.dir =
+			Rx ? W11AD_RX : W11AD_TX;
+		cmd_data->SetupCh_params.W11AdSetupCh_params.gsi_ch = gsi_ch;
+		cmd_data->SetupCh_params.W11AdSetupCh_params.wifi_ch = wifi_ch;
+		cmd_data->SetupCh_params.W11AdSetupCh_params.wifi_hp_addr_msb =
+			IPA_WIGIG_MSB(HWHEAD);
+		cmd_data->SetupCh_params.W11AdSetupCh_params.wifi_hp_addr_lsb =
+			IPA_WIGIG_LSB(HWHEAD);
+		command = IPA_CPU_2_HW_CMD_OFFLOAD_CHANNEL_SET_UP;
+
+	} else {
+		struct IpaHwOffloadCommonChCmdData_t_v4_0 *cmd_data;
+
+		cmd.size = sizeof(*cmd_data);
+		cmd.base = dma_alloc_coherent(ipa3_ctx->uc_pdev, cmd.size,
+			&cmd.phys_base, GFP_KERNEL);
+		if (cmd.base == NULL) {
+			IPAERR("fail to get DMA memory.\n");
+			return -ENOMEM;
+		}
+
+		cmd_data =
+			(struct IpaHwOffloadCommonChCmdData_t_v4_0 *)cmd.base;
+
+		cmd_data->protocol = IPA_HW_PROTOCOL_11ad;
+		cmd_data->CommonCh_params.W11AdCommonCh_params.gsi_ch = gsi_ch;
+		command = IPA_CPU_2_HW_CMD_OFFLOAD_TEAR_DOWN;
+	}
+
+	IPA_ACTIVE_CLIENTS_INC_SIMPLE();
+
+	result = ipa3_uc_send_cmd((u32)(cmd.phys_base),
+		command,
+		IPA_HW_2_CPU_OFFLOAD_CMD_STATUS_SUCCESS,
+		false, 10 * HZ);
+	if (result) {
+		IPAERR("fail to %s uc for %s gsi channel %d\n",
+			init ? "init" : "deinit",
+			Rx ? "Rx" : "Tx", gsi_ch);
+	}
+
+	dma_free_coherent(ipa3_ctx->uc_pdev,
+		cmd.size, cmd.base, cmd.phys_base);
+	IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
+
+	IPADBG("exit\n");
+	return result;
+}
+
+int ipa3_conn_wigig_rx_pipe_i(void *in, struct ipa_wigig_conn_out_params *out,
+	struct dentry **parent)
+{
+	int ipa_ep_idx;
+	struct ipa3_ep_context *ep;
+	struct ipa_ep_cfg ep_cfg;
+	enum ipa_client_type rx_client = IPA_CLIENT_WIGIG_PROD;
+	bool is_smmu_enabled;
+	struct ipa_wigig_conn_rx_in_params_smmu *input_smmu = NULL;
+	struct ipa_wigig_conn_rx_in_params *input = NULL;
+	const struct ipa_gsi_ep_config *ep_gsi;
+	void *pipe_info;
+	void *buff;
+	phys_addr_t status_ring_HWHEAD_pa;
+	int result;
+
+	IPADBG("\n");
+
+	*parent = wigig_dent;
+
+	ipa_ep_idx = ipa_get_ep_mapping(rx_client);
+	if (ipa_ep_idx == IPA_EP_NOT_ALLOCATED ||
+		ipa_ep_idx >= IPA3_MAX_NUM_PIPES) {
+		IPAERR("fail to get ep (IPA_CLIENT_WIGIG_PROD) %d.\n",
+			ipa_ep_idx);
+		return -EFAULT;
+	}
+
+	ep = &ipa3_ctx->ep[ipa_ep_idx];
+	if (ep->valid) {
+		IPAERR("EP %d already allocated.\n", ipa_ep_idx);
+		return -EFAULT;
+	}
+
+	if (ep->gsi_offload_state) {
+		IPAERR("WIGIG channel bad state 0x%X\n",
+			ep->gsi_offload_state);
+		return -EFAULT;
+	}
+
+	ep_gsi = ipa3_get_gsi_ep_info(rx_client);
+	if (!ep_gsi) {
+		IPAERR("Failed getting GSI EP info for client=%d\n",
+			rx_client);
+		return -EPERM;
+	}
+
+	memset(ep, 0, offsetof(struct ipa3_ep_context, sys));
+
+	IPA_ACTIVE_CLIENTS_INC_SIMPLE();
+
+	/* setup rx ep cfg */
+	ep->valid = 1;
+	ep->client = rx_client;
+	result = ipa3_disable_data_path(ipa_ep_idx);
+	if (result) {
+		IPAERR("disable data path failed res=%d clnt=%d.\n", result,
+			ipa_ep_idx);
+		IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
+		return -EFAULT;
+	}
+
+	is_smmu_enabled = !ipa3_ctx->s1_bypass_arr[IPA_SMMU_CB_11AD];
+	if (is_smmu_enabled) {
+		struct ipa_wigig_rx_pipe_data_buffer_info_smmu *dbuff_smmu;
+
+		input_smmu = (struct ipa_wigig_conn_rx_in_params_smmu *)in;
+		dbuff_smmu = &input_smmu->dbuff_smmu;
+		ep->client_notify = input_smmu->notify;
+		ep->priv = input_smmu->priv;
+
+		IPADBG(
+		"desc_ring_base_iova 0x%llX desc_ring_size %d status_ring_base_iova 0x%llX status_ring_size %d",
+		(unsigned long long)input_smmu->pipe_smmu.desc_ring_base_iova,
+		input_smmu->pipe_smmu.desc_ring_size,
+		(unsigned long long)input_smmu->pipe_smmu.status_ring_base_iova,
+		input_smmu->pipe_smmu.status_ring_size);
+		IPADBG("data_buffer_base_iova 0x%llX data_buffer_size %d",
+			(unsigned long long)dbuff_smmu->data_buffer_base_iova,
+			input_smmu->dbuff_smmu.data_buffer_size);
+
+		if (IPA_WIGIG_MSB(
+			dbuff_smmu->data_buffer_base_iova) &
+			0xFFFFFF00) {
+			IPAERR(
+			"data_buffers_base_address_msb is over the 8 bit limit (0x%llX)\n",
+			(unsigned long long)dbuff_smmu->data_buffer_base_iova);
+			IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
+			return -EFAULT;
+		}
+		if (dbuff_smmu->data_buffer_size >> 16) {
+			IPAERR(
+				"data_buffer_size is over the 16 bit limit (%d)\n"
+				, dbuff_smmu->data_buffer_size);
+			IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
+			return -EFAULT;
+		}
+	} else {
+		input = (struct ipa_wigig_conn_rx_in_params *)in;
+		ep->client_notify = input->notify;
+		ep->priv = input->priv;
+
+		IPADBG(
+			"desc_ring_base_pa %pa desc_ring_size %d status_ring_base_pa %pa status_ring_size %d",
+			&input->pipe.desc_ring_base_pa,
+			input->pipe.desc_ring_size,
+			&input->pipe.status_ring_base_pa,
+			input->pipe.status_ring_size);
+		IPADBG("data_buffer_base_pa %pa data_buffer_size %d",
+			&input->dbuff.data_buffer_base_pa,
+			input->dbuff.data_buffer_size);
+
+		if (
+		IPA_WIGIG_MSB(input->dbuff.data_buffer_base_pa) & 0xFFFFFF00) {
+			IPAERR(
+				"data_buffers_base_address_msb is over the 8 bit limit (0x%pa)\n"
+				, &input->dbuff.data_buffer_base_pa);
+			IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
+			return -EFAULT;
+		}
+		if (input->dbuff.data_buffer_size >> 16) {
+			IPAERR(
+				"data_buffer_size is over the 16 bit limit (0x%X)\n"
+				, input->dbuff.data_buffer_size);
+			IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
+			return -EFAULT;
+		}
+	}
+
+	memset(&ep_cfg, 0, sizeof(ep_cfg));
+	ep_cfg.nat.nat_en = IPA_SRC_NAT;
+	ep_cfg.hdr.hdr_len = ETH_HLEN;
+	ep_cfg.hdr.hdr_ofst_pkt_size_valid = 0;
+	ep_cfg.hdr.hdr_ofst_pkt_size = 0;
+	ep_cfg.hdr.hdr_additional_const_len = 0;
+	ep_cfg.hdr_ext.hdr_little_endian = true;
+	ep_cfg.hdr.hdr_ofst_metadata_valid = 0;
+	ep_cfg.hdr.hdr_metadata_reg_valid = 1;
+	ep_cfg.mode.mode = IPA_BASIC;
+
+
+	if (ipa3_cfg_ep(ipa_ep_idx, &ep_cfg)) {
+		IPAERR("fail to setup rx pipe cfg\n");
+		result = -EFAULT;
+		goto fail;
+	}
+
+	if (is_smmu_enabled) {
+		result = ipa3_wigig_smmu_map_channel(true,
+			&input_smmu->pipe_smmu,
+			&input_smmu->dbuff_smmu,
+			true);
+		if (result) {
+			IPAERR("failed to setup rx pipe smmu map\n");
+			result = -EFAULT;
+			goto fail;
+		}
+
+		pipe_info = &input_smmu->pipe_smmu;
+		buff = &input_smmu->dbuff_smmu;
+		status_ring_HWHEAD_pa =
+			input_smmu->pipe_smmu.status_ring_HWHEAD_pa;
+	} else {
+		pipe_info = &input->pipe;
+		buff = &input->dbuff;
+		status_ring_HWHEAD_pa =
+			input->pipe.status_ring_HWHEAD_pa;
+	}
+
+	result = ipa3_wigig_config_gsi(true,
+		is_smmu_enabled,
+		pipe_info,
+		buff,
+		ep_gsi, ep);
+	if (result)
+		goto fail_gsi;
+
+	result = ipa3_wigig_config_uc(
+		true, true, 0,
+		ep_gsi->ipa_gsi_chan_num,
+		status_ring_HWHEAD_pa);
+	if (result)
+		goto fail_uc_config;
+
+	ipa3_install_dflt_flt_rules(ipa_ep_idx);
+
+	out->client = IPA_CLIENT_WIGIG_PROD;
+	ep->gsi_offload_state |= IPA_WIGIG_CONNECTED;
+
+	IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
+
+	IPADBG("wigig rx pipe connected successfully\n");
+	IPADBG("exit\n");
+
+	return 0;
+
+fail_uc_config:
+	/* Release channel and evt*/
+	ipa3_release_gsi_channel(ipa_ep_idx);
+fail_gsi:
+	if (input_smmu)
+		ipa3_wigig_smmu_map_channel(true, &input_smmu->pipe_smmu,
+			&input_smmu->dbuff_smmu, false);
+fail:
+	IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
+	return result;
+}
+
+int ipa3_conn_wigig_client_i(void *in,
+	struct ipa_wigig_conn_out_params *out,
+	ipa_notify_cb tx_notify,
+	void *priv)
+{
+	int ipa_ep_idx;
+	struct ipa3_ep_context *ep;
+	struct ipa_ep_cfg ep_cfg;
+	enum ipa_client_type tx_client;
+	bool is_smmu_enabled;
+	struct ipa_wigig_conn_tx_in_params_smmu *input_smmu = NULL;
+	struct ipa_wigig_conn_tx_in_params *input = NULL;
+	const struct ipa_gsi_ep_config *ep_gsi;
+	u32 aggr_byte_limit;
+	int result;
+	void *pipe_info;
+	void *buff;
+	phys_addr_t desc_ring_HWHEAD_pa;
+	u8 wifi_ch;
+
+	IPADBG("\n");
+
+	is_smmu_enabled = !ipa3_ctx->s1_bypass_arr[IPA_SMMU_CB_11AD];
+	if (is_smmu_enabled) {
+		input_smmu = (struct ipa_wigig_conn_tx_in_params_smmu *)in;
+
+		IPADBG(
+		"desc_ring_base_iova 0x%llX desc_ring_size %d status_ring_base_iova 0x%llX status_ring_size %d",
+		(unsigned long long)input_smmu->pipe_smmu.desc_ring_base_iova,
+		input_smmu->pipe_smmu.desc_ring_size,
+		(unsigned long long)input_smmu->pipe_smmu.status_ring_base_iova,
+		input_smmu->pipe_smmu.status_ring_size);
+		IPADBG("num buffers %d, data buffer size %d\n",
+			input_smmu->dbuff_smmu.num_buffers,
+			input_smmu->dbuff_smmu.data_buffer_size);
+
+		if (ipa3_wigig_tx_bit_to_ep(input_smmu->int_gen_tx_bit_num,
+			&tx_client)) {
+			return -EINVAL;
+		}
+		if (input_smmu->dbuff_smmu.data_buffer_size >> 16) {
+			IPAERR(
+				"data_buffer_size is over the 16 bit limit (0x%X)\n"
+				, input_smmu->dbuff_smmu.data_buffer_size);
+			return -EFAULT;
+		}
+
+		if (IPA_WIGIG_8_MSB(
+			input_smmu->pipe_smmu.status_ring_HWHEAD_pa)
+			!= IPA_WIGIG_8_MSB(
+				input_smmu->pipe_smmu.status_ring_HWTAIL_pa)) {
+			IPAERR(
+				"status ring HWHEAD and HWTAIL differ in 8 MSbs head 0x%llX tail 0x%llX\n"
+			, input_smmu->pipe_smmu.status_ring_HWHEAD_pa,
+			input_smmu->pipe_smmu.status_ring_HWTAIL_pa);
+			return -EFAULT;
+		}
+
+		wifi_ch = input_smmu->int_gen_tx_bit_num;
+
+		/* convert to kBytes */
+		aggr_byte_limit = IPA_ADJUST_AGGR_BYTE_HARD_LIMIT(
+			input_smmu->dbuff_smmu.data_buffer_size);
+	} else {
+		input = (struct ipa_wigig_conn_tx_in_params *)in;
+
+		IPADBG(
+			"desc_ring_base_pa %pa desc_ring_size %d status_ring_base_pa %pa status_ring_size %d",
+			&input->pipe.desc_ring_base_pa,
+			input->pipe.desc_ring_size,
+			&input->pipe.status_ring_base_pa,
+			input->pipe.status_ring_size);
+		IPADBG("data_buffer_size %d", input->dbuff.data_buffer_size);
+
+		if (ipa3_wigig_tx_bit_to_ep(input->int_gen_tx_bit_num,
+			&tx_client)) {
+			return -EINVAL;
+		}
+
+		if (input->dbuff.data_buffer_size >> 16) {
+			IPAERR(
+				"data_buffer_size is over the 16 bit limit (0x%X)\n"
+				, input->dbuff.data_buffer_size);
+			return -EFAULT;
+		}
+
+		if (IPA_WIGIG_8_MSB(
+			input->pipe.status_ring_HWHEAD_pa)
+			!= IPA_WIGIG_8_MSB(
+				input->pipe.status_ring_HWTAIL_pa)) {
+			IPAERR(
+				"status ring HWHEAD and HWTAIL differ in 8 MSbs head 0x%llX tail 0x%llX\n"
+				, input->pipe.status_ring_HWHEAD_pa,
+				input->pipe.status_ring_HWTAIL_pa);
+			return -EFAULT;
+		}
+
+		wifi_ch = input->int_gen_tx_bit_num;
+
+		/* convert to kBytes */
+		aggr_byte_limit = IPA_ADJUST_AGGR_BYTE_HARD_LIMIT(
+			input->dbuff.data_buffer_size);
+	}
+	IPADBG("client type is %d\n", tx_client);
+
+	ipa_ep_idx = ipa_get_ep_mapping(tx_client);
+	if (ipa_ep_idx == IPA_EP_NOT_ALLOCATED ||
+		ipa_ep_idx >= IPA3_MAX_NUM_PIPES) {
+		IPAERR("fail to get ep (%d) %d.\n",
+			tx_client, ipa_ep_idx);
+		return -EFAULT;
+	}
+
+	ep = &ipa3_ctx->ep[ipa_ep_idx];
+	if (ep->valid) {
+		IPAERR("EP %d already allocated.\n", ipa_ep_idx);
+		return -EFAULT;
+	}
+
+	if (ep->gsi_offload_state) {
+		IPAERR("WIGIG channel bad state 0x%X\n",
+			ep->gsi_offload_state);
+		return -EFAULT;
+	}
+
+	ep_gsi = ipa3_get_gsi_ep_info(tx_client);
+	if (!ep_gsi) {
+		IPAERR("Failed getting GSI EP info for client=%d\n",
+			tx_client);
+		return -EFAULT;
+	}
+
+	memset(ep, 0, offsetof(struct ipa3_ep_context, sys));
+	IPA_ACTIVE_CLIENTS_INC_SIMPLE();
+
+	/* setup tx ep cfg */
+	ep->valid = 1;
+	ep->client = tx_client;
+	result = ipa3_disable_data_path(ipa_ep_idx);
+	if (result) {
+		IPAERR("disable data path failed res=%d clnt=%d.\n", result,
+			ipa_ep_idx);
+		goto fail;
+	}
+
+	ep->client_notify = tx_notify;
+	ep->priv = priv;
+
+	memset(&ep_cfg, 0, sizeof(ep_cfg));
+	ep_cfg.nat.nat_en = IPA_DST_NAT;
+	ep_cfg.hdr.hdr_len = ETH_HLEN;
+	ep_cfg.hdr.hdr_ofst_pkt_size_valid = 0;
+	ep_cfg.hdr.hdr_ofst_pkt_size = 0;
+	ep_cfg.hdr.hdr_additional_const_len = 0;
+	ep_cfg.hdr_ext.hdr_little_endian = true;
+	ep_cfg.mode.mode = IPA_BASIC;
+
+	/* config hard byte limit, max is the buffer size (in kB)*/
+	ep_cfg.aggr.aggr_en = IPA_ENABLE_AGGR;
+	ep_cfg.aggr.aggr = IPA_GENERIC;
+	ep_cfg.aggr.aggr_pkt_limit = 1;
+	ep_cfg.aggr.aggr_byte_limit = aggr_byte_limit;
+	ep_cfg.aggr.aggr_hard_byte_limit_en = IPA_ENABLE_AGGR;
+
+	if (ipa3_cfg_ep(ipa_ep_idx, &ep_cfg)) {
+		IPAERR("fail to setup rx pipe cfg\n");
+		result = -EFAULT;
+		goto fail;
+	}
+
+	if (is_smmu_enabled) {
+		result = ipa3_wigig_smmu_map_channel(false,
+			&input_smmu->pipe_smmu,
+			&input_smmu->dbuff_smmu,
+			true);
+		if (result) {
+			IPAERR(
+				"failed to setup tx pipe smmu map client %d (ep %d)\n"
+			, tx_client, ipa_ep_idx);
+			result = -EFAULT;
+			goto fail;
+		}
+
+		pipe_info = &input_smmu->pipe_smmu;
+		buff = &input_smmu->dbuff_smmu;
+		desc_ring_HWHEAD_pa =
+			input_smmu->pipe_smmu.desc_ring_HWHEAD_pa;
+	} else {
+		pipe_info = &input->pipe;
+		buff = &input->dbuff;
+		desc_ring_HWHEAD_pa =
+			input->pipe.desc_ring_HWHEAD_pa;
+	}
+
+	result = ipa3_wigig_config_gsi(false,
+		is_smmu_enabled,
+		pipe_info,
+		buff,
+		ep_gsi, ep);
+	if (result)
+		goto fail_gsi;
+
+	result = ipa3_wigig_config_uc(
+		true, false, wifi_ch,
+		ep_gsi->ipa_gsi_chan_num,
+		desc_ring_HWHEAD_pa);
+	if (result)
+		goto fail_uc_config;
+
+	out->client = tx_client;
+	ep->gsi_offload_state |= IPA_WIGIG_CONNECTED;
+
+	IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
+
+	IPADBG("wigig client %d (ep %d) connected successfully\n", tx_client,
+		ipa_ep_idx);
+	return 0;
+
+fail_uc_config:
+	/* Release channel and evt*/
+	ipa3_release_gsi_channel(ipa_ep_idx);
+fail_gsi:
+	if (input_smmu)
+		ipa3_wigig_smmu_map_channel(false, &input_smmu->pipe_smmu,
+			&input_smmu->dbuff_smmu, false);
+fail:
+	ep->valid = 0;
+	IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
+	return result;
+}
+
+int ipa3_disconn_wigig_pipe_i(enum ipa_client_type client,
+	struct ipa_wigig_pipe_setup_info_smmu *pipe_smmu,
+	void *dbuff)
+{
+	bool is_smmu_enabled;
+	int ipa_ep_idx;
+	struct ipa3_ep_context *ep;
+	const struct ipa_gsi_ep_config *ep_gsi;
+	int result;
+	bool rx = false;
+
+	IPADBG("\n");
+
+	ipa_ep_idx = ipa_get_ep_mapping(client);
+	if (ipa_ep_idx == IPA_EP_NOT_ALLOCATED ||
+		ipa_ep_idx >= IPA3_MAX_NUM_PIPES) {
+		IPAERR("fail to get ep (%d) %d.\n",
+			client, ipa_ep_idx);
+		return -EFAULT;
+	}
+
+	ep = &ipa3_ctx->ep[ipa_ep_idx];
+	if (!ep->valid) {
+		IPAERR("Invalid EP\n");
+		return -EFAULT;
+	}
+
+	ep_gsi = ipa3_get_gsi_ep_info(client);
+	if (!ep_gsi) {
+		IPAERR("Failed getting GSI EP info for client=%d\n",
+			client);
+		return -EFAULT;
+	}
+
+	if (ep->gsi_offload_state != IPA_WIGIG_CONNECTED) {
+		IPAERR("client in bad state(client %d) 0x%X\n",
+			client, ep->gsi_offload_state);
+		return -EFAULT;
+	}
+
+	if (client == IPA_CLIENT_WIGIG_PROD)
+		rx = true;
+
+	IPA_ACTIVE_CLIENTS_INC_SIMPLE();
+
+	/* Release channel and evt*/
+	result = ipa3_release_gsi_channel(ipa_ep_idx);
+	if (result) {
+		IPAERR("failed to deallocate channel\n");
+		goto fail;
+	}
+
+	/* only gsi ch number and dir are necessary */
+	result = ipa3_wigig_config_uc(
+		false, rx, 0,
+		ep_gsi->ipa_gsi_chan_num, 0);
+	if (result) {
+		IPAERR("failed uC channel teardown %d\n", result);
+		WARN_ON(1);
+	}
+
+	is_smmu_enabled = !ipa3_ctx->s1_bypass_arr[IPA_SMMU_CB_11AD];
+	if (is_smmu_enabled) {
+		if (!pipe_smmu || !dbuff) {
+			IPAERR("smmu input is null %pK %pK\n",
+				pipe_smmu, dbuff);
+			WARN_ON(1);
+		} else {
+			result = ipa3_wigig_smmu_map_channel(rx,
+				pipe_smmu,
+				dbuff,
+				false);
+			if (result) {
+				IPAERR(
+					"failed to unmap pipe smmu %d (ep %d)\n"
+					, client, ipa_ep_idx);
+				result = -EFAULT;
+				goto fail;
+			}
+		}
+
+		if (rx) {
+			if (!list_empty(&smmu_reg_addr_list)) {
+				IPAERR("smmu_reg_addr_list not empty\n");
+				WARN_ON(1);
+			}
+
+			if (!list_empty(&smmu_ring_addr_list)) {
+				IPAERR("smmu_ring_addr_list not empty\n");
+				WARN_ON(1);
+			}
+		}
+	} else if (pipe_smmu || dbuff) {
+		IPAERR("smmu input is not null %pK %pK\n",
+			pipe_smmu, dbuff);
+		WARN_ON(1);
+	}
+
+	memset(ep, 0, sizeof(struct ipa3_ep_context));
+
+	ep->gsi_offload_state = 0;
+
+	IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
+
+	IPADBG("client (ep: %d) disconnected\n", ipa_ep_idx);
+
+	IPADBG("exit\n");
+	return 0;
+
+fail:
+	IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
+	return result;
+}
+
+int ipa3_wigig_uc_msi_init(bool init,
+	phys_addr_t periph_baddr_pa,
+	phys_addr_t pseudo_cause_pa,
+	phys_addr_t int_gen_tx_pa,
+	phys_addr_t int_gen_rx_pa,
+	phys_addr_t dma_ep_misc_pa)
+{
+	int result;
+	struct ipa_mem_buffer cmd;
+	enum ipa_cpu_2_hw_offload_commands command;
+	bool map = false;
+
+	IPADBG("params: %s, %pa, %pa, %pa, %pa, %pa\n",
+		init ? "init" : "deInit",
+		&periph_baddr_pa,
+		&pseudo_cause_pa,
+		&int_gen_tx_pa,
+		&int_gen_rx_pa,
+		&dma_ep_misc_pa);
+
+	/* first make sure registers are SMMU mapped if necessary*/
+	if ((!ipa3_ctx->s1_bypass_arr[IPA_SMMU_CB_UC])) {
+		if (init)
+			map = true;
+
+		IPADBG("SMMU enabled, map %d\n", map);
+
+		result = ipa3_smmu_map_peer_reg(
+			rounddown(pseudo_cause_pa, PAGE_SIZE),
+			map,
+			IPA_SMMU_CB_UC);
+		if (result) {
+			IPAERR(
+				"failed to %s pseudo_cause reg %d\n",
+				map ? "map" : "unmap",
+				result);
+			goto fail;
+		}
+
+		result = ipa3_smmu_map_peer_reg(
+			rounddown(int_gen_tx_pa, PAGE_SIZE),
+			map,
+			IPA_SMMU_CB_UC);
+		if (result) {
+			IPAERR(
+				"failed to %s int_gen_tx reg %d\n",
+				map ? "map" : "unmap",
+				result);
+			goto fail_gen_tx;
+		}
+
+		result = ipa3_smmu_map_peer_reg(
+			rounddown(int_gen_rx_pa, PAGE_SIZE),
+			map,
+			IPA_SMMU_CB_UC);
+		if (result) {
+			IPAERR(
+				"failed to %s int_gen_rx reg %d\n",
+				map ? "map" : "unmap",
+				result);
+			goto fail_gen_rx;
+		}
+
+		result = ipa3_smmu_map_peer_reg(
+			rounddown(dma_ep_misc_pa, PAGE_SIZE),
+			map,
+			IPA_SMMU_CB_UC);
+		if (result) {
+			IPAERR(
+				"failed to %s dma_ep_misc reg %d\n",
+				map ? "map" : "unmap",
+				result);
+			goto fail_dma_ep_misc;
+		}
+	}
+
+	/*  now send the wigig hw base address to uC*/
+	if (init) {
+		struct IpaHwPeripheralInitCmdData_t *cmd_data;
+
+		cmd.size = sizeof(*cmd_data);
+		cmd.base = dma_alloc_coherent(ipa3_ctx->uc_pdev, cmd.size,
+			&cmd.phys_base, GFP_KERNEL);
+		if (cmd.base == NULL) {
+			IPAERR("fail to get DMA memory.\n");
+			result = -ENOMEM;
+			if (map)
+				goto fail_alloc;
+			return result;
+		}
+		cmd_data = (struct IpaHwPeripheralInitCmdData_t *)cmd.base;
+		cmd_data->protocol = IPA_HW_PROTOCOL_11ad;
+		cmd_data->Init_params.W11AdInit_params.periph_baddr_msb =
+			IPA_WIGIG_MSB(periph_baddr_pa);
+		cmd_data->Init_params.W11AdInit_params.periph_baddr_lsb =
+			IPA_WIGIG_LSB(periph_baddr_pa);
+		command = IPA_CPU_2_HW_CMD_PERIPHERAL_INIT;
+	} else {
+		struct IpaHwPeripheralDeinitCmdData_t *cmd_data;
+
+		cmd.size = sizeof(*cmd_data);
+		cmd.base = dma_alloc_coherent(ipa3_ctx->uc_pdev, cmd.size,
+			&cmd.phys_base, GFP_KERNEL);
+		if (cmd.base == NULL) {
+			IPAERR("fail to get DMA memory.\n");
+			result = -ENOMEM;
+			if (map)
+				goto fail_alloc;
+			return result;
+		}
+		cmd_data = (struct IpaHwPeripheralDeinitCmdData_t *)cmd.base;
+		cmd_data->protocol = IPA_HW_PROTOCOL_11ad;
+		command = IPA_CPU_2_HW_CMD_PERIPHERAL_DEINIT;
+	}
+
+	IPA_ACTIVE_CLIENTS_INC_SIMPLE();
+
+	result = ipa3_uc_send_cmd((u32)(cmd.phys_base),
+		command,
+		IPA_HW_2_CPU_OFFLOAD_CMD_STATUS_SUCCESS,
+		false, 10 * HZ);
+	if (result) {
+		IPAERR("fail to %s uc MSI config\n", init ? "init" : "deinit");
+		goto fail_command;
+	}
+
+	dma_free_coherent(ipa3_ctx->uc_pdev, cmd.size,
+		cmd.base, cmd.phys_base);
+	IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
+
+	IPADBG("exit\n");
+
+	return 0;
+fail_command:
+	dma_free_coherent(ipa3_ctx->uc_pdev,
+		cmd.size,
+		cmd.base, cmd.phys_base);
+	IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
+fail_alloc:
+	ipa3_smmu_map_peer_reg(
+		rounddown(dma_ep_misc_pa, PAGE_SIZE), !map, IPA_SMMU_CB_UC);
+fail_dma_ep_misc:
+	ipa3_smmu_map_peer_reg(
+		rounddown(int_gen_rx_pa, PAGE_SIZE), !map, IPA_SMMU_CB_UC);
+fail_gen_rx:
+	ipa3_smmu_map_peer_reg(
+		rounddown(int_gen_tx_pa, PAGE_SIZE), !map, IPA_SMMU_CB_UC);
+fail_gen_tx:
+	ipa3_smmu_map_peer_reg(
+		rounddown(pseudo_cause_pa, PAGE_SIZE), !map, IPA_SMMU_CB_UC);
+fail:
+	return result;
+}
+
+int ipa3_enable_wigig_pipe_i(enum ipa_client_type client)
+{
+	int ipa_ep_idx, res;
+	struct ipa3_ep_context *ep;
+	struct ipa_ep_cfg_ctrl ep_cfg_ctrl;
+	int retry_cnt = 0;
+	uint64_t val;
+
+	IPADBG("\n");
+
+	ipa_ep_idx = ipa_get_ep_mapping(client);
+	if (ipa_ep_idx == IPA_EP_NOT_ALLOCATED ||
+		ipa_ep_idx >= IPA3_MAX_NUM_PIPES) {
+		IPAERR("fail to get ep (%d) %d.\n",
+			client, ipa_ep_idx);
+		return -EFAULT;
+	}
+
+	ep = &ipa3_ctx->ep[ipa_ep_idx];
+
+	if (!ep->valid) {
+		IPAERR("Invalid EP\n");
+		return -EFAULT;
+	}
+
+	if (ep->gsi_offload_state != IPA_WIGIG_CONNECTED) {
+		IPAERR("WIGIG channel bad state 0x%X\n",
+			ep->gsi_offload_state);
+		return -EFAULT;
+	}
+
+	IPA_ACTIVE_CLIENTS_INC_EP(client);
+
+	res = ipa3_enable_data_path(ipa_ep_idx);
+	if (res)
+		goto fail_enable_datapath;
+
+	memset(&ep_cfg_ctrl, 0, sizeof(struct ipa_ep_cfg_ctrl));
+	ipa3_cfg_ep_ctrl(ipa_ep_idx, &ep_cfg_ctrl);
+
+	/* ring the event db (outside the ring boundary)*/
+	val = ep->gsi_mem_info.evt_ring_base_addr +
+		ep->gsi_mem_info.evt_ring_len;
+	res = gsi_ring_evt_ring_db(ep->gsi_evt_ring_hdl, val);
+	if (res) {
+		IPAERR(
+			"fail to ring evt ring db %d. hdl=%lu wp=0x%llx\n"
+			, res, ep->gsi_evt_ring_hdl,
+			(unsigned long long)val);
+		res = -EFAULT;
+		goto fail_ring_evt;
+	}
+
+	IPADBG("start channel\n");
+	res = gsi_start_channel(ep->gsi_chan_hdl);
+	if (res != GSI_STATUS_SUCCESS) {
+		IPAERR("gsi_start_channel failed %d\n", res);
+		WARN_ON(1);
+		res = -EFAULT;
+		goto fail_gsi_start;
+	}
+
+	/* for TX we have to ring the channel db (last desc in the ring) */
+	if (client != IPA_CLIENT_WIGIG_PROD) {
+		uint64_t val;
+
+		val  = ep->gsi_mem_info.chan_ring_base_addr +
+			ep->gsi_mem_info.chan_ring_len -
+			IPA_WIGIG_DESC_RING_EL_SIZE;
+
+		IPADBG("ring ch doorbell (0x%llX) TX %ld\n", val,
+			ep->gsi_chan_hdl);
+		res = gsi_ring_ch_ring_db(ep->gsi_chan_hdl, val);
+		if (res) {
+			IPAERR(
+				"fail to ring channel db %d. hdl=%lu wp=0x%llx\n"
+				, res, ep->gsi_chan_hdl,
+				(unsigned long long)val);
+			res = -EFAULT;
+			goto fail_ring_ch;
+		}
+	}
+
+	ep->gsi_offload_state |= IPA_WIGIG_ENABLED;
+
+	IPADBG("exit\n");
+
+	return 0;
+
+fail_ring_ch:
+	res = ipa3_stop_gsi_channel(ipa_ep_idx);
+	if (res != 0 && res != -GSI_STATUS_AGAIN &&
+		res != -GSI_STATUS_TIMED_OUT) {
+		IPAERR("failed to stop channel res = %d\n", res);
+	} else if (res == -GSI_STATUS_AGAIN) {
+		IPADBG("GSI stop channel failed retry cnt = %d\n",
+			retry_cnt);
+		retry_cnt++;
+		if (retry_cnt < GSI_STOP_MAX_RETRY_CNT)
+			goto fail_ring_ch;
+	} else {
+		IPADBG("GSI channel %ld STOP\n", ep->gsi_chan_hdl);
+	}
+	res = -EFAULT;
+fail_gsi_start:
+fail_ring_evt:
+	ipa3_disable_data_path(ipa_ep_idx);
+fail_enable_datapath:
+	IPA_ACTIVE_CLIENTS_DEC_EP(client);
+	return res;
+}
+
+int ipa3_disable_wigig_pipe_i(enum ipa_client_type client)
+{
+	int ipa_ep_idx, res;
+	struct ipa3_ep_context *ep;
+	struct ipahal_ep_cfg_ctrl_scnd ep_ctrl_scnd = { 0 };
+	struct ipa_ep_cfg_ctrl ep_cfg_ctrl;
+	bool disable_force_clear = false;
+	u32 source_pipe_bitmask = 0;
+	int retry_cnt = 0;
+
+	IPADBG("\n");
+
+	ipa_ep_idx = ipa_get_ep_mapping(client);
+	if (ipa_ep_idx == IPA_EP_NOT_ALLOCATED ||
+		ipa_ep_idx >= IPA3_MAX_NUM_PIPES) {
+		IPAERR("fail to get ep (%d) %d.\n",
+			client, ipa_ep_idx);
+		return -EFAULT;
+	}
+	if (ipa_ep_idx >= IPA3_MAX_NUM_PIPES) {
+		IPAERR("ep %d out of range.\n", ipa_ep_idx);
+		return -EFAULT;
+	}
+
+	ep = &ipa3_ctx->ep[ipa_ep_idx];
+
+	if (!ep->valid) {
+		IPAERR("Invalid EP\n");
+		return -EFAULT;
+	}
+
+	if (ep->gsi_offload_state !=
+		(IPA_WIGIG_CONNECTED | IPA_WIGIG_ENABLED)) {
+		IPAERR("WIGIG channel bad state 0x%X\n",
+			ep->gsi_offload_state);
+		return -EFAULT;
+	}
+
+	IPADBG("pipe %d\n", ipa_ep_idx);
+	source_pipe_bitmask = 1 << ipa_ep_idx;
+	res = ipa3_enable_force_clear(ipa_ep_idx,
+		false, source_pipe_bitmask);
+	if (res) {
+		/*
+		 * assuming here modem SSR, AP can remove
+		 * the delay in this case
+		 */
+		IPAERR("failed to force clear %d\n", res);
+		IPAERR("remove delay from SCND reg\n");
+		ep_ctrl_scnd.endp_delay = false;
+		ipahal_write_reg_n_fields(
+			IPA_ENDP_INIT_CTRL_SCND_n, ipa_ep_idx,
+			&ep_ctrl_scnd);
+	} else {
+		disable_force_clear = true;
+	}
+retry_gsi_stop:
+	res = ipa3_stop_gsi_channel(ipa_ep_idx);
+	if (res != 0 && res != -GSI_STATUS_AGAIN &&
+		res != -GSI_STATUS_TIMED_OUT) {
+		IPAERR("failed to stop channel res = %d\n", res);
+		goto fail_stop_channel;
+	} else if (res == -GSI_STATUS_AGAIN) {
+		IPADBG("GSI stop channel failed retry cnt = %d\n",
+			retry_cnt);
+		retry_cnt++;
+		if (retry_cnt >= GSI_STOP_MAX_RETRY_CNT)
+			goto fail_stop_channel;
+		goto retry_gsi_stop;
+	} else {
+		IPADBG("GSI channel %ld STOP\n", ep->gsi_chan_hdl);
+	}
+
+	res = ipa3_reset_gsi_channel(ipa_ep_idx);
+	if (res != GSI_STATUS_SUCCESS) {
+		IPAERR("Failed to reset chan: %d.\n", res);
+		goto fail_stop_channel;
+	}
+
+	if (disable_force_clear)
+		ipa3_disable_force_clear(ipa_ep_idx);
+
+	res = ipa3_disable_data_path(ipa_ep_idx);
+	if (res) {
+		WARN_ON(1);
+		return res;
+	}
+
+	/* Set the delay after disabling IPA Producer pipe */
+	if (IPA_CLIENT_IS_PROD(ep->client)) {
+		memset(&ep_cfg_ctrl, 0, sizeof(struct ipa_ep_cfg_ctrl));
+		ep_cfg_ctrl.ipa_ep_delay = true;
+		ipa3_cfg_ep_ctrl(ipa_ep_idx, &ep_cfg_ctrl);
+	}
+
+	ep->gsi_offload_state &= ~IPA_WIGIG_ENABLED;
+
+	IPA_ACTIVE_CLIENTS_DEC_EP(ipa3_get_client_mapping(ipa_ep_idx));
+	IPADBG("exit\n");
+	return 0;
+
+fail_stop_channel:
+	ipa_assert();
+	return res;
+}
+
+#ifndef CONFIG_DEBUG_FS
+int ipa3_wigig_init_debugfs_i(struct dentry *parent) { return 0; }
+#else
+int ipa3_wigig_init_debugfs_i(struct dentry *parent)
+{
+	const mode_t read_write_mode = 0664;
+	struct dentry *file = NULL;
+	struct dentry *dent;
+
+	dent = debugfs_create_dir("ipa_wigig", parent);
+	if (IS_ERR_OR_NULL(dent)) {
+		IPAERR("fail to create folder in debug_fs\n");
+		return -EFAULT;
+	}
+
+	wigig_dent = dent;
+
+	file = debugfs_create_u8("modc", read_write_mode, dent,
+		&int_modc);
+	if (IS_ERR_OR_NULL(file)) {
+		IPAERR("fail to create file modc\n");
+		goto fail;
+	}
+
+	file = debugfs_create_u16("modt", read_write_mode, dent,
+		&int_modt);
+	if (IS_ERR_OR_NULL(file)) {
+		IPAERR("fail to create file modt\n");
+		goto fail;
+	}
+
+	file = debugfs_create_u8("rx_mod_th", read_write_mode, dent,
+		&rx_hwtail_mod_threshold);
+	if (IS_ERR_OR_NULL(file)) {
+		IPAERR("fail to create file rx_mod_th\n");
+		goto fail;
+	}
+
+	file = debugfs_create_u8("tx_mod_th", read_write_mode, dent,
+		&tx_hwtail_mod_threshold);
+	if (IS_ERR_OR_NULL(file)) {
+		IPAERR("fail to create file tx_mod_th\n");
+		goto fail;
+	}
+
+	return 0;
+fail:
+	debugfs_remove_recursive(dent);
+	wigig_dent = NULL;
+	return -EFAULT;
+}
+#endif

+ 1787 - 0
ipa/ipa_v3/ipahal/ipahal.c

@@ -0,0 +1,1787 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2016-2019, The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/debugfs.h>
+#include "ipahal.h"
+#include "ipahal_i.h"
+#include "ipahal_reg_i.h"
+#include "ipahal_fltrt_i.h"
+#include "ipahal_hw_stats_i.h"
+#include "ipahal_nat_i.h"
+
+struct ipahal_context *ipahal_ctx;
+
+static const char *ipahal_imm_cmd_name_to_str[IPA_IMM_CMD_MAX] = {
+	__stringify(IPA_IMM_CMD_IP_V4_FILTER_INIT),
+	__stringify(IPA_IMM_CMD_IP_V6_FILTER_INIT),
+	__stringify(IPA_IMM_CMD_IP_V4_NAT_INIT),
+	__stringify(IPA_IMM_CMD_IP_V4_ROUTING_INIT),
+	__stringify(IPA_IMM_CMD_IP_V6_ROUTING_INIT),
+	__stringify(IPA_IMM_CMD_HDR_INIT_LOCAL),
+	__stringify(IPA_IMM_CMD_HDR_INIT_SYSTEM),
+	__stringify(IPA_IMM_CMD_REGISTER_WRITE),
+	__stringify(IPA_IMM_CMD_NAT_DMA),
+	__stringify(IPA_IMM_CMD_IP_PACKET_INIT),
+	__stringify(IPA_IMM_CMD_DMA_SHARED_MEM),
+	__stringify(IPA_IMM_CMD_IP_PACKET_TAG_STATUS),
+	__stringify(IPA_IMM_CMD_DMA_TASK_32B_ADDR),
+	__stringify(IPA_IMM_CMD_TABLE_DMA),
+	__stringify(IPA_IMM_CMD_IP_V6_CT_INIT)
+};
+
+static const char *ipahal_pkt_status_exception_to_str
+	[IPAHAL_PKT_STATUS_EXCEPTION_MAX] = {
+	__stringify(IPAHAL_PKT_STATUS_EXCEPTION_NONE),
+	__stringify(IPAHAL_PKT_STATUS_EXCEPTION_DEAGGR),
+	__stringify(IPAHAL_PKT_STATUS_EXCEPTION_IPTYPE),
+	__stringify(IPAHAL_PKT_STATUS_EXCEPTION_PACKET_LENGTH),
+	__stringify(IPAHAL_PKT_STATUS_EXCEPTION_PACKET_THRESHOLD),
+	__stringify(IPAHAL_PKT_STATUS_EXCEPTION_FRAG_RULE_MISS),
+	__stringify(IPAHAL_PKT_STATUS_EXCEPTION_SW_FILT),
+	__stringify(IPAHAL_PKT_STATUS_EXCEPTION_NAT),
+	__stringify(IPAHAL_PKT_STATUS_EXCEPTION_IPV6CT),
+};
+
+static u16 ipahal_imm_cmd_get_opcode(enum ipahal_imm_cmd_name cmd);
+
+
+static struct ipahal_imm_cmd_pyld *ipa_imm_cmd_construct_dma_task_32b_addr(
+	enum ipahal_imm_cmd_name cmd, const void *params, bool is_atomic_ctx)
+{
+	struct ipahal_imm_cmd_pyld *pyld;
+	struct ipa_imm_cmd_hw_dma_task_32b_addr *data;
+	struct ipahal_imm_cmd_dma_task_32b_addr *dma_params =
+		(struct ipahal_imm_cmd_dma_task_32b_addr *)params;
+
+	pyld = IPAHAL_MEM_ALLOC(sizeof(*pyld) + sizeof(*data), is_atomic_ctx);
+	if (unlikely(!pyld))
+		return pyld;
+
+	/* Currently supports only one packet */
+	pyld->opcode = ipahal_imm_cmd_get_opcode(cmd) + (1 << 8);
+	pyld->len = sizeof(*data);
+	data = (struct ipa_imm_cmd_hw_dma_task_32b_addr *)pyld->data;
+
+	if (unlikely(dma_params->size1 & ~0xFFFF)) {
+		WARN(1, "Size1 is bigger than 16bit width 0x%x\n",
+			dma_params->size1);
+	}
+	if (unlikely(dma_params->packet_size & ~0xFFFF)) {
+		WARN(1, "Pkt size is bigger than 16bit width 0x%x\n",
+			dma_params->packet_size);
+	}
+	data->cmplt = dma_params->cmplt ? 1 : 0;
+	data->eof = dma_params->eof ? 1 : 0;
+	data->flsh = dma_params->flsh ? 1 : 0;
+	data->lock = dma_params->lock ? 1 : 0;
+	data->unlock = dma_params->unlock ? 1 : 0;
+	data->size1 = dma_params->size1;
+	data->addr1 = dma_params->addr1;
+	data->packet_size = dma_params->packet_size;
+
+	return pyld;
+}
+
+static struct ipahal_imm_cmd_pyld *ipa_imm_cmd_construct_ip_packet_tag_status(
+	enum ipahal_imm_cmd_name cmd, const void *params, bool is_atomic_ctx)
+{
+	struct ipahal_imm_cmd_pyld *pyld;
+	struct ipa_imm_cmd_hw_ip_packet_tag_status *data;
+	struct ipahal_imm_cmd_ip_packet_tag_status *tag_params =
+		(struct ipahal_imm_cmd_ip_packet_tag_status *)params;
+
+	pyld = IPAHAL_MEM_ALLOC(sizeof(*pyld) + sizeof(*data), is_atomic_ctx);
+	if (unlikely(!pyld)) {
+		IPAHAL_ERR("kzalloc err\n");
+		return pyld;
+	}
+	pyld->opcode = ipahal_imm_cmd_get_opcode(cmd);
+	pyld->len = sizeof(*data);
+	data = (struct ipa_imm_cmd_hw_ip_packet_tag_status *)pyld->data;
+
+	if (unlikely(tag_params->tag & ~0xFFFFFFFFFFFF)) {
+		IPAHAL_ERR("tag is bigger than 48bit width 0x%llx\n",
+			tag_params->tag);
+		WARN_ON(1);
+	}
+	data->tag = tag_params->tag;
+
+	return pyld;
+}
+
+static struct ipahal_imm_cmd_pyld *ipa_imm_cmd_construct_dma_shared_mem(
+	enum ipahal_imm_cmd_name cmd, const void *params, bool is_atomic_ctx)
+{
+	struct ipahal_imm_cmd_pyld *pyld;
+	struct ipa_imm_cmd_hw_dma_shared_mem *data;
+	struct ipahal_imm_cmd_dma_shared_mem *mem_params =
+		(struct ipahal_imm_cmd_dma_shared_mem *)params;
+
+	pyld = IPAHAL_MEM_ALLOC(sizeof(*pyld) + sizeof(*data), is_atomic_ctx);
+	if (unlikely(!pyld))
+		return pyld;
+
+	pyld->opcode = ipahal_imm_cmd_get_opcode(cmd);
+	pyld->len = sizeof(*data);
+	data = (struct ipa_imm_cmd_hw_dma_shared_mem *)pyld->data;
+
+	if (unlikely(mem_params->size & ~0xFFFF)) {
+		WARN(1, "Size is bigger than 16bit width 0x%x\n",
+			mem_params->size);
+	}
+	if (unlikely(mem_params->local_addr & ~0xFFFF)) {
+		WARN(1, "Local addr is bigger than 16bit width 0x%x\n",
+			mem_params->local_addr);
+	}
+	data->direction = mem_params->is_read ? 1 : 0;
+	data->size = mem_params->size;
+	data->local_addr = mem_params->local_addr;
+	data->system_addr = mem_params->system_addr;
+	data->skip_pipeline_clear = mem_params->skip_pipeline_clear ? 1 : 0;
+	switch (mem_params->pipeline_clear_options) {
+	case IPAHAL_HPS_CLEAR:
+		data->pipeline_clear_options = 0;
+		break;
+	case IPAHAL_SRC_GRP_CLEAR:
+		data->pipeline_clear_options = 1;
+		break;
+	case IPAHAL_FULL_PIPELINE_CLEAR:
+		data->pipeline_clear_options = 2;
+		break;
+	default:
+		IPAHAL_ERR("unsupported pipline clear option %d\n",
+			mem_params->pipeline_clear_options);
+		WARN_ON(1);
+	}
+
+	return pyld;
+}
+
+static struct ipahal_imm_cmd_pyld *ipa_imm_cmd_construct_dma_shared_mem_v_4_0(
+	enum ipahal_imm_cmd_name cmd, const void *params, bool is_atomic_ctx)
+{
+	struct ipahal_imm_cmd_pyld *pyld;
+	struct ipa_imm_cmd_hw_dma_shared_mem_v_4_0 *data;
+	struct ipahal_imm_cmd_dma_shared_mem *mem_params =
+		(struct ipahal_imm_cmd_dma_shared_mem *)params;
+
+	if (unlikely(mem_params->size & ~0xFFFF)) {
+		IPAHAL_ERR("Size is bigger than 16bit width 0x%x\n",
+			mem_params->size);
+		WARN_ON(1);
+		return NULL;
+	}
+	if (unlikely(mem_params->local_addr & ~0xFFFF)) {
+		IPAHAL_ERR("Local addr is bigger than 16bit width 0x%x\n",
+			mem_params->local_addr);
+		WARN_ON(1);
+		return NULL;
+	}
+
+	pyld = IPAHAL_MEM_ALLOC(sizeof(*pyld) + sizeof(*data), is_atomic_ctx);
+	if (unlikely(!pyld)) {
+		WARN_ON(1);
+		return pyld;
+	}
+
+	pyld->opcode = ipahal_imm_cmd_get_opcode(cmd);
+	pyld->len = sizeof(*data);
+	data = (struct ipa_imm_cmd_hw_dma_shared_mem_v_4_0 *)pyld->data;
+
+	data->direction = mem_params->is_read ? 1 : 0;
+	data->clear_after_read = mem_params->clear_after_read;
+	data->size = mem_params->size;
+	data->local_addr = mem_params->local_addr;
+	data->system_addr = mem_params->system_addr;
+	pyld->opcode |= (mem_params->skip_pipeline_clear ? 1 : 0) << 8;
+	switch (mem_params->pipeline_clear_options) {
+	case IPAHAL_HPS_CLEAR:
+		break;
+	case IPAHAL_SRC_GRP_CLEAR:
+		pyld->opcode |= (1 << 9);
+		break;
+	case IPAHAL_FULL_PIPELINE_CLEAR:
+		pyld->opcode |= (2 << 9);
+		break;
+	default:
+		IPAHAL_ERR("unsupported pipline clear option %d\n",
+			mem_params->pipeline_clear_options);
+		WARN_ON(1);
+	}
+
+	return pyld;
+}
+
+static struct ipahal_imm_cmd_pyld *ipa_imm_cmd_construct_register_write(
+	enum ipahal_imm_cmd_name cmd, const void *params, bool is_atomic_ctx)
+{
+	struct ipahal_imm_cmd_pyld *pyld;
+	struct ipa_imm_cmd_hw_register_write *data;
+	struct ipahal_imm_cmd_register_write *regwrt_params =
+		(struct ipahal_imm_cmd_register_write *)params;
+
+	pyld = IPAHAL_MEM_ALLOC(sizeof(*pyld) + sizeof(*data), is_atomic_ctx);
+	if (unlikely(!pyld)) {
+		IPAHAL_ERR("kzalloc err\n");
+		return pyld;
+	}
+	pyld->opcode = ipahal_imm_cmd_get_opcode(cmd);
+	pyld->len = sizeof(*data);
+	data = (struct ipa_imm_cmd_hw_register_write *)pyld->data;
+
+	if (unlikely(regwrt_params->offset & ~0xFFFF)) {
+		IPAHAL_ERR("Offset is bigger than 16bit width 0x%x\n",
+			regwrt_params->offset);
+		WARN_ON(1);
+	}
+	data->offset = regwrt_params->offset;
+	data->value = regwrt_params->value;
+	data->value_mask = regwrt_params->value_mask;
+
+	data->skip_pipeline_clear = regwrt_params->skip_pipeline_clear ? 1 : 0;
+	switch (regwrt_params->pipeline_clear_options) {
+	case IPAHAL_HPS_CLEAR:
+		data->pipeline_clear_options = 0;
+		break;
+	case IPAHAL_SRC_GRP_CLEAR:
+		data->pipeline_clear_options = 1;
+		break;
+	case IPAHAL_FULL_PIPELINE_CLEAR:
+		data->pipeline_clear_options = 2;
+		break;
+	default:
+		IPAHAL_ERR("unsupported pipline clear option %d\n",
+			regwrt_params->pipeline_clear_options);
+		WARN_ON(1);
+	}
+
+	return pyld;
+}
+
+static struct ipahal_imm_cmd_pyld *ipa_imm_cmd_construct_register_write_v_4_0(
+	enum ipahal_imm_cmd_name cmd, const void *params, bool is_atomic_ctx)
+{
+	struct ipahal_imm_cmd_pyld *pyld;
+	struct ipa_imm_cmd_hw_register_write_v_4_0 *data;
+	struct ipahal_imm_cmd_register_write *regwrt_params =
+		(struct ipahal_imm_cmd_register_write *)params;
+
+	if (unlikely(regwrt_params->offset & ~0xFFFF)) {
+		IPAHAL_ERR("Offset is bigger than 16bit width 0x%x\n",
+			regwrt_params->offset);
+		WARN_ON(1);
+		return NULL;
+	}
+
+	pyld = IPAHAL_MEM_ALLOC(sizeof(*pyld) + sizeof(*data), is_atomic_ctx);
+	if (unlikely(!pyld)) {
+		WARN_ON(1);
+		return pyld;
+	}
+	pyld->opcode = ipahal_imm_cmd_get_opcode(cmd);
+	pyld->len = sizeof(*data);
+	data = (struct ipa_imm_cmd_hw_register_write_v_4_0 *)pyld->data;
+
+	data->offset = regwrt_params->offset;
+	data->offset_high = regwrt_params->offset >> 16;
+	data->value = regwrt_params->value;
+	data->value_mask = regwrt_params->value_mask;
+
+	pyld->opcode |= (regwrt_params->skip_pipeline_clear ? 1 : 0) << 8;
+	switch (regwrt_params->pipeline_clear_options) {
+	case IPAHAL_HPS_CLEAR:
+		break;
+	case IPAHAL_SRC_GRP_CLEAR:
+		pyld->opcode |= (1 << 9);
+		break;
+	case IPAHAL_FULL_PIPELINE_CLEAR:
+		pyld->opcode |= (2 << 9);
+		break;
+	default:
+		IPAHAL_ERR("unsupported pipline clear option %d\n",
+			regwrt_params->pipeline_clear_options);
+		WARN_ON(1);
+	}
+
+	return pyld;
+}
+
+static struct ipahal_imm_cmd_pyld *ipa_imm_cmd_construct_ip_packet_init(
+	enum ipahal_imm_cmd_name cmd, const void *params, bool is_atomic_ctx)
+{
+	struct ipahal_imm_cmd_pyld *pyld;
+	struct ipa_imm_cmd_hw_ip_packet_init *data;
+	struct ipahal_imm_cmd_ip_packet_init *pktinit_params =
+		(struct ipahal_imm_cmd_ip_packet_init *)params;
+
+	pyld = IPAHAL_MEM_ALLOC(sizeof(*pyld) + sizeof(*data), is_atomic_ctx);
+	if (unlikely(!pyld)) {
+		IPAHAL_ERR("kzalloc err\n");
+		return pyld;
+	}
+	pyld->opcode = ipahal_imm_cmd_get_opcode(cmd);
+	pyld->len = sizeof(*data);
+	data = (struct ipa_imm_cmd_hw_ip_packet_init *)pyld->data;
+
+	if (unlikely(pktinit_params->destination_pipe_index & ~0x1F)) {
+		IPAHAL_ERR("Dst pipe idx is bigger than 5bit width 0x%x\n",
+			pktinit_params->destination_pipe_index);
+		WARN_ON(1);
+	}
+	data->destination_pipe_index = pktinit_params->destination_pipe_index;
+
+	return pyld;
+}
+
+static struct ipahal_imm_cmd_pyld *ipa_imm_cmd_construct_nat_dma(
+	enum ipahal_imm_cmd_name cmd, const void *params, bool is_atomic_ctx)
+{
+	struct ipahal_imm_cmd_pyld *pyld;
+	struct ipa_imm_cmd_hw_nat_dma *data;
+	struct ipahal_imm_cmd_table_dma *nat_params =
+		(struct ipahal_imm_cmd_table_dma *)params;
+
+	pyld = IPAHAL_MEM_ALLOC(sizeof(*pyld) + sizeof(*data), is_atomic_ctx);
+	if (unlikely(!pyld)) {
+		IPAHAL_ERR("kzalloc err\n");
+		return pyld;
+	}
+	pyld->opcode = ipahal_imm_cmd_get_opcode(cmd);
+	pyld->len = sizeof(*data);
+	data = (struct ipa_imm_cmd_hw_nat_dma *)pyld->data;
+
+	data->table_index = nat_params->table_index;
+	data->base_addr = nat_params->base_addr;
+	data->offset = nat_params->offset;
+	data->data = nat_params->data;
+
+	return pyld;
+}
+
+static struct ipahal_imm_cmd_pyld *ipa_imm_cmd_construct_table_dma_ipav4(
+	enum ipahal_imm_cmd_name cmd, const void *params, bool is_atomic_ctx)
+{
+	struct ipahal_imm_cmd_pyld *pyld;
+	struct ipa_imm_cmd_hw_table_dma_ipav4 *data;
+	struct ipahal_imm_cmd_table_dma *nat_params =
+		(struct ipahal_imm_cmd_table_dma *)params;
+
+	pyld = IPAHAL_MEM_ALLOC(sizeof(*pyld) + sizeof(*data), is_atomic_ctx);
+	if (unlikely(!pyld)) {
+		IPAHAL_ERR("kzalloc err\n");
+		return pyld;
+	}
+	pyld->opcode = ipahal_imm_cmd_get_opcode(cmd);
+	pyld->len = sizeof(*data);
+	data = (struct ipa_imm_cmd_hw_table_dma_ipav4 *)pyld->data;
+
+	data->table_index = nat_params->table_index;
+	data->base_addr = nat_params->base_addr;
+	data->offset = nat_params->offset;
+	data->data = nat_params->data;
+
+	return pyld;
+}
+
+static struct ipahal_imm_cmd_pyld *ipa_imm_cmd_construct_hdr_init_system(
+	enum ipahal_imm_cmd_name cmd, const void *params, bool is_atomic_ctx)
+{
+	struct ipahal_imm_cmd_pyld *pyld;
+	struct ipa_imm_cmd_hw_hdr_init_system *data;
+	struct ipahal_imm_cmd_hdr_init_system *syshdr_params =
+		(struct ipahal_imm_cmd_hdr_init_system *)params;
+
+	pyld = IPAHAL_MEM_ALLOC(sizeof(*pyld) + sizeof(*data), is_atomic_ctx);
+	if (unlikely(!pyld)) {
+		IPAHAL_ERR("kzalloc err\n");
+		return pyld;
+	}
+	pyld->opcode = ipahal_imm_cmd_get_opcode(cmd);
+	pyld->len = sizeof(*data);
+	data = (struct ipa_imm_cmd_hw_hdr_init_system *)pyld->data;
+
+	data->hdr_table_addr = syshdr_params->hdr_table_addr;
+
+	return pyld;
+}
+
+static struct ipahal_imm_cmd_pyld *ipa_imm_cmd_construct_hdr_init_local(
+	enum ipahal_imm_cmd_name cmd, const void *params, bool is_atomic_ctx)
+{
+	struct ipahal_imm_cmd_pyld *pyld;
+	struct ipa_imm_cmd_hw_hdr_init_local *data;
+	struct ipahal_imm_cmd_hdr_init_local *lclhdr_params =
+		(struct ipahal_imm_cmd_hdr_init_local *)params;
+
+	pyld = IPAHAL_MEM_ALLOC(sizeof(*pyld) + sizeof(*data), is_atomic_ctx);
+	if (unlikely(!pyld)) {
+		IPAHAL_ERR("kzalloc err\n");
+		return pyld;
+	}
+	pyld->opcode = ipahal_imm_cmd_get_opcode(cmd);
+	pyld->len = sizeof(*data);
+	data = (struct ipa_imm_cmd_hw_hdr_init_local *)pyld->data;
+
+	if (unlikely(lclhdr_params->size_hdr_table & ~0xFFF)) {
+		IPAHAL_ERR("Hdr tble size is bigger than 12bit width 0x%x\n",
+			lclhdr_params->size_hdr_table);
+		WARN_ON(1);
+	}
+	data->hdr_table_addr = lclhdr_params->hdr_table_addr;
+	data->size_hdr_table = lclhdr_params->size_hdr_table;
+	data->hdr_addr = lclhdr_params->hdr_addr;
+
+	return pyld;
+}
+
+static struct ipahal_imm_cmd_pyld *ipa_imm_cmd_construct_ip_v6_routing_init(
+	enum ipahal_imm_cmd_name cmd, const void *params, bool is_atomic_ctx)
+{
+	struct ipahal_imm_cmd_pyld *pyld;
+	struct ipa_imm_cmd_hw_ip_v6_routing_init *data;
+	struct ipahal_imm_cmd_ip_v6_routing_init *rt6_params =
+		(struct ipahal_imm_cmd_ip_v6_routing_init *)params;
+
+	pyld = IPAHAL_MEM_ALLOC(sizeof(*pyld) + sizeof(*data), is_atomic_ctx);
+	if (unlikely(!pyld)) {
+		IPAHAL_ERR("kzalloc err\n");
+		return pyld;
+	}
+	pyld->opcode = ipahal_imm_cmd_get_opcode(cmd);
+	pyld->len = sizeof(*data);
+	data = (struct ipa_imm_cmd_hw_ip_v6_routing_init *)pyld->data;
+
+	data->hash_rules_addr = rt6_params->hash_rules_addr;
+	data->hash_rules_size = rt6_params->hash_rules_size;
+	data->hash_local_addr = rt6_params->hash_local_addr;
+	data->nhash_rules_addr = rt6_params->nhash_rules_addr;
+	data->nhash_rules_size = rt6_params->nhash_rules_size;
+	data->nhash_local_addr = rt6_params->nhash_local_addr;
+
+	return pyld;
+}
+
+static struct ipahal_imm_cmd_pyld *ipa_imm_cmd_construct_ip_v4_routing_init(
+	enum ipahal_imm_cmd_name cmd, const void *params, bool is_atomic_ctx)
+{
+	struct ipahal_imm_cmd_pyld *pyld;
+	struct ipa_imm_cmd_hw_ip_v4_routing_init *data;
+	struct ipahal_imm_cmd_ip_v4_routing_init *rt4_params =
+		(struct ipahal_imm_cmd_ip_v4_routing_init *)params;
+
+	pyld = IPAHAL_MEM_ALLOC(sizeof(*pyld) + sizeof(*data), is_atomic_ctx);
+	if (unlikely(!pyld)) {
+		IPAHAL_ERR("kzalloc err\n");
+		return pyld;
+	}
+	pyld->opcode = ipahal_imm_cmd_get_opcode(cmd);
+	pyld->len = sizeof(*data);
+	data = (struct ipa_imm_cmd_hw_ip_v4_routing_init *)pyld->data;
+
+	data->hash_rules_addr = rt4_params->hash_rules_addr;
+	data->hash_rules_size = rt4_params->hash_rules_size;
+	data->hash_local_addr = rt4_params->hash_local_addr;
+	data->nhash_rules_addr = rt4_params->nhash_rules_addr;
+	data->nhash_rules_size = rt4_params->nhash_rules_size;
+	data->nhash_local_addr = rt4_params->nhash_local_addr;
+
+	return pyld;
+}
+
+static struct ipahal_imm_cmd_pyld *ipa_imm_cmd_construct_ip_v4_nat_init(
+	enum ipahal_imm_cmd_name cmd, const void *params, bool is_atomic_ctx)
+{
+	struct ipahal_imm_cmd_pyld *pyld;
+	struct ipa_imm_cmd_hw_ip_v4_nat_init *data;
+	struct ipahal_imm_cmd_ip_v4_nat_init *nat4_params =
+		(struct ipahal_imm_cmd_ip_v4_nat_init *)params;
+
+	pyld = IPAHAL_MEM_ALLOC(sizeof(*pyld) + sizeof(*data), is_atomic_ctx);
+	if (unlikely(!pyld)) {
+		IPAHAL_ERR("kzalloc err\n");
+		return pyld;
+	}
+	pyld->opcode = ipahal_imm_cmd_get_opcode(cmd);
+	pyld->len = sizeof(*data);
+	data = (struct ipa_imm_cmd_hw_ip_v4_nat_init *)pyld->data;
+
+	data->ipv4_rules_addr = nat4_params->table_init.base_table_addr;
+	data->ipv4_expansion_rules_addr =
+		nat4_params->table_init.expansion_table_addr;
+	data->index_table_addr = nat4_params->index_table_addr;
+	data->index_table_expansion_addr =
+		nat4_params->index_table_expansion_addr;
+	data->table_index = nat4_params->table_init.table_index;
+	data->ipv4_rules_addr_type =
+		nat4_params->table_init.base_table_addr_shared ? 1 : 0;
+	data->ipv4_expansion_rules_addr_type =
+		nat4_params->table_init.expansion_table_addr_shared ? 1 : 0;
+	data->index_table_addr_type =
+		nat4_params->index_table_addr_shared ? 1 : 0;
+	data->index_table_expansion_addr_type =
+		nat4_params->index_table_expansion_addr_shared ? 1 : 0;
+	data->size_base_tables = nat4_params->table_init.size_base_table;
+	data->size_expansion_tables =
+		nat4_params->table_init.size_expansion_table;
+	data->public_addr_info = nat4_params->public_addr_info;
+
+	return pyld;
+}
+
+static struct ipahal_imm_cmd_pyld *ipa_imm_cmd_construct_ip_v6_ct_init(
+	enum ipahal_imm_cmd_name cmd, const void *params, bool is_atomic_ctx)
+{
+	struct ipahal_imm_cmd_pyld *pyld;
+	struct ipa_imm_cmd_hw_ip_v6_ct_init *data;
+	struct ipahal_imm_cmd_ip_v6_ct_init *ipv6ct_params =
+		(struct ipahal_imm_cmd_ip_v6_ct_init *)params;
+
+	pyld = IPAHAL_MEM_ALLOC(sizeof(*pyld) + sizeof(*data), is_atomic_ctx);
+	if (unlikely(!pyld))
+		return pyld;
+	pyld->opcode = ipahal_imm_cmd_get_opcode(cmd);
+	pyld->len = sizeof(*data);
+	data = (struct ipa_imm_cmd_hw_ip_v6_ct_init *)pyld->data;
+
+	data->table_addr = ipv6ct_params->table_init.base_table_addr;
+	data->expansion_table_addr =
+		ipv6ct_params->table_init.expansion_table_addr;
+	data->table_index = ipv6ct_params->table_init.table_index;
+	data->table_addr_type =
+		ipv6ct_params->table_init.base_table_addr_shared ? 1 : 0;
+	data->expansion_table_addr_type =
+		ipv6ct_params->table_init.expansion_table_addr_shared ? 1 : 0;
+	data->size_base_table = ipv6ct_params->table_init.size_base_table;
+	data->size_expansion_table =
+		ipv6ct_params->table_init.size_expansion_table;
+
+	return pyld;
+}
+
+static struct ipahal_imm_cmd_pyld *ipa_imm_cmd_construct_ip_v6_filter_init(
+	enum ipahal_imm_cmd_name cmd, const void *params, bool is_atomic_ctx)
+{
+	struct ipahal_imm_cmd_pyld *pyld;
+	struct ipa_imm_cmd_hw_ip_v6_filter_init *data;
+	struct ipahal_imm_cmd_ip_v6_filter_init *flt6_params =
+		(struct ipahal_imm_cmd_ip_v6_filter_init *)params;
+
+	pyld = IPAHAL_MEM_ALLOC(sizeof(*pyld) + sizeof(*data), is_atomic_ctx);
+	if (unlikely(!pyld)) {
+		IPAHAL_ERR("kzalloc err\n");
+		return pyld;
+	}
+	pyld->opcode = ipahal_imm_cmd_get_opcode(cmd);
+	pyld->len = sizeof(*data);
+	data = (struct ipa_imm_cmd_hw_ip_v6_filter_init *)pyld->data;
+
+	data->hash_rules_addr = flt6_params->hash_rules_addr;
+	data->hash_rules_size = flt6_params->hash_rules_size;
+	data->hash_local_addr = flt6_params->hash_local_addr;
+	data->nhash_rules_addr = flt6_params->nhash_rules_addr;
+	data->nhash_rules_size = flt6_params->nhash_rules_size;
+	data->nhash_local_addr = flt6_params->nhash_local_addr;
+
+	return pyld;
+}
+
+static struct ipahal_imm_cmd_pyld *ipa_imm_cmd_construct_ip_v4_filter_init(
+	enum ipahal_imm_cmd_name cmd, const void *params, bool is_atomic_ctx)
+{
+	struct ipahal_imm_cmd_pyld *pyld;
+	struct ipa_imm_cmd_hw_ip_v4_filter_init *data;
+	struct ipahal_imm_cmd_ip_v4_filter_init *flt4_params =
+		(struct ipahal_imm_cmd_ip_v4_filter_init *)params;
+
+	pyld = IPAHAL_MEM_ALLOC(sizeof(*pyld) + sizeof(*data), is_atomic_ctx);
+	if (unlikely(!pyld)) {
+		IPAHAL_ERR("kzalloc err\n");
+		return pyld;
+	}
+	pyld->opcode = ipahal_imm_cmd_get_opcode(cmd);
+	pyld->len = sizeof(*data);
+	data = (struct ipa_imm_cmd_hw_ip_v4_filter_init *)pyld->data;
+
+	data->hash_rules_addr = flt4_params->hash_rules_addr;
+	data->hash_rules_size = flt4_params->hash_rules_size;
+	data->hash_local_addr = flt4_params->hash_local_addr;
+	data->nhash_rules_addr = flt4_params->nhash_rules_addr;
+	data->nhash_rules_size = flt4_params->nhash_rules_size;
+	data->nhash_local_addr = flt4_params->nhash_local_addr;
+
+	return pyld;
+}
+
+static struct ipahal_imm_cmd_pyld *ipa_imm_cmd_construct_dummy(
+	enum ipahal_imm_cmd_name cmd, const void *params, bool is_atomic_ctx)
+{
+	IPAHAL_ERR("no construct function for IMM_CMD=%s, IPA ver %d\n",
+		ipahal_imm_cmd_name_str(cmd), ipahal_ctx->hw_type);
+	WARN_ON(1);
+	return NULL;
+}
+
+/*
+ * struct ipahal_imm_cmd_obj - immediate command H/W information for
+ *  specific IPA version
+ * @construct - CB to construct imm command payload from abstracted structure
+ * @opcode - Immediate command OpCode
+ */
+struct ipahal_imm_cmd_obj {
+	struct ipahal_imm_cmd_pyld *(*construct)(enum ipahal_imm_cmd_name cmd,
+		const void *params, bool is_atomic_ctx);
+	u16 opcode;
+};
+
+/*
+ * This table contains the info regard each immediate command for IPAv3
+ *  and later.
+ * Information like: opcode and construct functions.
+ * All the information on the IMM on IPAv3 are statically defined below.
+ * If information is missing regard some IMM on some IPA version,
+ *  the init function will fill it with the information from the previous
+ *  IPA version.
+ * Information is considered missing if all of the fields are 0
+ * If opcode is -1, this means that the IMM is removed on the
+ *  specific version
+ */
+static struct ipahal_imm_cmd_obj
+		ipahal_imm_cmd_objs[IPA_HW_MAX][IPA_IMM_CMD_MAX] = {
+	/* IPAv3 */
+	[IPA_HW_v3_0][IPA_IMM_CMD_IP_V4_FILTER_INIT] = {
+		ipa_imm_cmd_construct_ip_v4_filter_init,
+		3},
+	[IPA_HW_v3_0][IPA_IMM_CMD_IP_V6_FILTER_INIT] = {
+		ipa_imm_cmd_construct_ip_v6_filter_init,
+		4},
+	[IPA_HW_v3_0][IPA_IMM_CMD_IP_V4_NAT_INIT] = {
+		ipa_imm_cmd_construct_ip_v4_nat_init,
+		5},
+	[IPA_HW_v3_0][IPA_IMM_CMD_IP_V4_ROUTING_INIT] = {
+		ipa_imm_cmd_construct_ip_v4_routing_init,
+		7},
+	[IPA_HW_v3_0][IPA_IMM_CMD_IP_V6_ROUTING_INIT] = {
+		ipa_imm_cmd_construct_ip_v6_routing_init,
+		8},
+	[IPA_HW_v3_0][IPA_IMM_CMD_HDR_INIT_LOCAL] = {
+		ipa_imm_cmd_construct_hdr_init_local,
+		9},
+	[IPA_HW_v3_0][IPA_IMM_CMD_HDR_INIT_SYSTEM] = {
+		ipa_imm_cmd_construct_hdr_init_system,
+		10},
+	[IPA_HW_v3_0][IPA_IMM_CMD_REGISTER_WRITE] = {
+		ipa_imm_cmd_construct_register_write,
+		12},
+	[IPA_HW_v3_0][IPA_IMM_CMD_NAT_DMA] = {
+		ipa_imm_cmd_construct_nat_dma,
+		14},
+	[IPA_HW_v3_0][IPA_IMM_CMD_IP_PACKET_INIT] = {
+		ipa_imm_cmd_construct_ip_packet_init,
+		16},
+	[IPA_HW_v3_0][IPA_IMM_CMD_DMA_TASK_32B_ADDR] = {
+		ipa_imm_cmd_construct_dma_task_32b_addr,
+		17},
+	[IPA_HW_v3_0][IPA_IMM_CMD_DMA_SHARED_MEM] = {
+		ipa_imm_cmd_construct_dma_shared_mem,
+		19},
+	[IPA_HW_v3_0][IPA_IMM_CMD_IP_PACKET_TAG_STATUS] = {
+		ipa_imm_cmd_construct_ip_packet_tag_status,
+		20},
+
+	/* IPAv4 */
+	[IPA_HW_v4_0][IPA_IMM_CMD_REGISTER_WRITE] = {
+		ipa_imm_cmd_construct_register_write_v_4_0,
+		12},
+	/* NAT_DMA was renamed to TABLE_DMA for IPAv4 */
+	[IPA_HW_v4_0][IPA_IMM_CMD_NAT_DMA] = {
+		ipa_imm_cmd_construct_dummy,
+		-1},
+	[IPA_HW_v4_0][IPA_IMM_CMD_TABLE_DMA] = {
+		ipa_imm_cmd_construct_table_dma_ipav4,
+		14},
+	[IPA_HW_v4_0][IPA_IMM_CMD_DMA_SHARED_MEM] = {
+		ipa_imm_cmd_construct_dma_shared_mem_v_4_0,
+		19},
+	[IPA_HW_v4_0][IPA_IMM_CMD_IP_V6_CT_INIT] = {
+		ipa_imm_cmd_construct_ip_v6_ct_init,
+		23}
+};
+
+/*
+ * ipahal_imm_cmd_init() - Build the Immediate command information table
+ *  See ipahal_imm_cmd_objs[][] comments
+ */
+static int ipahal_imm_cmd_init(enum ipa_hw_type ipa_hw_type)
+{
+	int i;
+	int j;
+	struct ipahal_imm_cmd_obj zero_obj;
+
+	IPAHAL_DBG_LOW("Entry - HW_TYPE=%d\n", ipa_hw_type);
+
+	if ((ipa_hw_type < 0) || (ipa_hw_type >= IPA_HW_MAX)) {
+		IPAHAL_ERR("invalid IPA HW type (%d)\n", ipa_hw_type);
+		return -EINVAL;
+	}
+
+	memset(&zero_obj, 0, sizeof(zero_obj));
+	for (i = IPA_HW_v3_0 ; i < ipa_hw_type ; i++) {
+		for (j = 0; j < IPA_IMM_CMD_MAX ; j++) {
+			if (!memcmp(&ipahal_imm_cmd_objs[i+1][j], &zero_obj,
+				sizeof(struct ipahal_imm_cmd_obj))) {
+				memcpy(&ipahal_imm_cmd_objs[i+1][j],
+					&ipahal_imm_cmd_objs[i][j],
+					sizeof(struct ipahal_imm_cmd_obj));
+			} else {
+				/*
+				 * explicitly overridden immediate command.
+				 * Check validity
+				 */
+				if (!ipahal_imm_cmd_objs[i+1][j].opcode) {
+					IPAHAL_ERR(
+					  "imm_cmd=%s with zero opcode ipa_ver=%d\n",
+					  ipahal_imm_cmd_name_str(j), i+1);
+					WARN_ON(1);
+				}
+				if (!ipahal_imm_cmd_objs[i+1][j].construct) {
+					IPAHAL_ERR(
+					  "imm_cmd=%s with NULL construct func ipa_ver=%d\n",
+					  ipahal_imm_cmd_name_str(j), i+1);
+					WARN_ON(1);
+				}
+			}
+		}
+	}
+
+	return 0;
+}
+
+/*
+ * ipahal_imm_cmd_name_str() - returns string that represent the imm cmd
+ * @cmd_name: [in] Immediate command name
+ */
+const char *ipahal_imm_cmd_name_str(enum ipahal_imm_cmd_name cmd_name)
+{
+	if (cmd_name < 0 || cmd_name >= IPA_IMM_CMD_MAX) {
+		IPAHAL_ERR("requested name of invalid imm_cmd=%d\n", cmd_name);
+		return "Invalid IMM_CMD";
+	}
+
+	return ipahal_imm_cmd_name_to_str[cmd_name];
+}
+
+/*
+ * ipahal_imm_cmd_get_opcode() - Get the fixed opcode of the immediate command
+ */
+static u16 ipahal_imm_cmd_get_opcode(enum ipahal_imm_cmd_name cmd)
+{
+	u32 opcode;
+
+	if (cmd >= IPA_IMM_CMD_MAX) {
+		IPAHAL_ERR("Invalid immediate command imm_cmd=%u\n", cmd);
+		ipa_assert();
+		return -EFAULT;
+	}
+
+	IPAHAL_DBG_LOW("Get opcode of IMM_CMD=%s\n",
+		ipahal_imm_cmd_name_str(cmd));
+	opcode = ipahal_imm_cmd_objs[ipahal_ctx->hw_type][cmd].opcode;
+	if (opcode == -1) {
+		IPAHAL_ERR("Try to get opcode of obsolete IMM_CMD=%s\n",
+			ipahal_imm_cmd_name_str(cmd));
+		ipa_assert();
+		return -EFAULT;
+	}
+
+	return opcode;
+}
+
+/*
+ * ipahal_construct_imm_cmd() - Construct immdiate command
+ * This function builds imm cmd bulk that can be be sent to IPA
+ * The command will be allocated dynamically.
+ * After done using it, call ipahal_destroy_imm_cmd() to release it
+ */
+struct ipahal_imm_cmd_pyld *ipahal_construct_imm_cmd(
+	enum ipahal_imm_cmd_name cmd, const void *params, bool is_atomic_ctx)
+{
+	if (!params) {
+		IPAHAL_ERR("Input error: params=%pK\n", params);
+		ipa_assert();
+		return NULL;
+	}
+
+	if (cmd >= IPA_IMM_CMD_MAX) {
+		IPAHAL_ERR("Invalid immediate command %u\n", cmd);
+		return NULL;
+	}
+
+	IPAHAL_DBG_LOW("construct IMM_CMD:%s\n", ipahal_imm_cmd_name_str(cmd));
+	return ipahal_imm_cmd_objs[ipahal_ctx->hw_type][cmd].construct(
+		cmd, params, is_atomic_ctx);
+}
+
+/*
+ * ipahal_construct_nop_imm_cmd() - Construct immediate comamnd for NO-Op
+ * Core driver may want functionality to inject NOP commands to IPA
+ *  to ensure e.g., PIPLINE clear before someother operation.
+ * The functionality given by this function can be reached by
+ *  ipahal_construct_imm_cmd(). This function is helper to the core driver
+ *  to reach this NOP functionlity easily.
+ * @skip_pipline_clear: if to skip pipeline clear waiting (don't wait)
+ * @pipline_clr_opt: options for pipeline clear waiting
+ * @is_atomic_ctx: is called in atomic context or can sleep?
+ */
+struct ipahal_imm_cmd_pyld *ipahal_construct_nop_imm_cmd(
+	bool skip_pipline_clear,
+	enum ipahal_pipeline_clear_option pipline_clr_opt,
+	bool is_atomic_ctx)
+{
+	struct ipahal_imm_cmd_register_write cmd;
+	struct ipahal_imm_cmd_pyld *cmd_pyld;
+
+	memset(&cmd, 0, sizeof(cmd));
+	cmd.skip_pipeline_clear = skip_pipline_clear;
+	cmd.pipeline_clear_options = pipline_clr_opt;
+	cmd.value_mask = 0x0;
+
+	cmd_pyld = ipahal_construct_imm_cmd(IPA_IMM_CMD_REGISTER_WRITE,
+		&cmd, is_atomic_ctx);
+
+	if (!cmd_pyld)
+		IPAHAL_ERR("failed to construct register_write imm cmd\n");
+
+	return cmd_pyld;
+}
+
+
+/* IPA Packet Status Logic */
+
+#define IPA_PKT_STATUS_SET_MSK(__hw_bit_msk, __shft) \
+	(status->status_mask |= \
+		((hw_status->ipa_pkt.status_mask & (__hw_bit_msk) ? 1 : 0) \
+					<< (__shft)))
+
+static enum ipahal_pkt_status_exception pkt_status_parse_exception(
+	bool is_ipv6, u64 exception)
+{
+	enum ipahal_pkt_status_exception exception_type = 0;
+
+	switch (exception) {
+	case 0:
+		exception_type = IPAHAL_PKT_STATUS_EXCEPTION_NONE;
+		break;
+	case 1:
+		exception_type = IPAHAL_PKT_STATUS_EXCEPTION_DEAGGR;
+		break;
+	case 4:
+		exception_type = IPAHAL_PKT_STATUS_EXCEPTION_IPTYPE;
+		break;
+	case 8:
+		exception_type = IPAHAL_PKT_STATUS_EXCEPTION_PACKET_LENGTH;
+		break;
+	case 16:
+		exception_type = IPAHAL_PKT_STATUS_EXCEPTION_FRAG_RULE_MISS;
+		break;
+	case 32:
+		exception_type = IPAHAL_PKT_STATUS_EXCEPTION_SW_FILT;
+		break;
+	case 64:
+		if (is_ipv6)
+			exception_type = IPAHAL_PKT_STATUS_EXCEPTION_IPV6CT;
+		else
+			exception_type = IPAHAL_PKT_STATUS_EXCEPTION_NAT;
+		break;
+	case 229:
+		exception_type = IPAHAL_PKT_STATUS_EXCEPTION_CSUM;
+		break;
+	default:
+		IPAHAL_ERR("unsupported Status Exception type 0x%x\n",
+			exception);
+		WARN_ON(1);
+	}
+
+	return exception_type;
+}
+
+static void __ipa_parse_gen_pkt(struct ipahal_pkt_status *status,
+				const void *unparsed_status)
+{
+	bool is_ipv6;
+	union ipa_pkt_status_hw *hw_status =
+		(union ipa_pkt_status_hw *)unparsed_status;
+
+	is_ipv6 = (hw_status->ipa_pkt.status_mask & 0x80) ? false : true;
+	status->pkt_len = hw_status->ipa_pkt.pkt_len;
+	status->endp_src_idx = hw_status->ipa_pkt.endp_src_idx;
+	status->endp_dest_idx = hw_status->ipa_pkt.endp_dest_idx;
+	status->metadata = hw_status->ipa_pkt.metadata;
+	status->flt_local = hw_status->ipa_pkt.flt_local;
+	status->flt_hash = hw_status->ipa_pkt.flt_hash;
+	status->flt_global = hw_status->ipa_pkt.flt_hash;
+	status->flt_ret_hdr = hw_status->ipa_pkt.flt_ret_hdr;
+	status->flt_miss = (hw_status->ipa_pkt.rt_rule_id ==
+			IPAHAL_PKT_STATUS_FLTRT_RULE_MISS_ID);
+	status->flt_rule_id = hw_status->ipa_pkt.flt_rule_id;
+	status->rt_local = hw_status->ipa_pkt.rt_local;
+	status->rt_hash = hw_status->ipa_pkt.rt_hash;
+	status->ucp = hw_status->ipa_pkt.ucp;
+	status->rt_tbl_idx = hw_status->ipa_pkt.rt_tbl_idx;
+	status->rt_miss = (hw_status->ipa_pkt.rt_rule_id ==
+			IPAHAL_PKT_STATUS_FLTRT_RULE_MISS_ID);
+	status->rt_rule_id = hw_status->ipa_pkt.rt_rule_id;
+	status->nat_hit = hw_status->ipa_pkt.nat_hit;
+	status->nat_entry_idx = hw_status->ipa_pkt.nat_entry_idx;
+	status->tag_info = hw_status->ipa_pkt.tag_info;
+	status->seq_num = hw_status->ipa_pkt.seq_num;
+	status->time_of_day_ctr = hw_status->ipa_pkt.time_of_day_ctr;
+	status->hdr_local = hw_status->ipa_pkt.hdr_local;
+	status->hdr_offset = hw_status->ipa_pkt.hdr_offset;
+	status->frag_hit = hw_status->ipa_pkt.frag_hit;
+	status->frag_rule = hw_status->ipa_pkt.frag_rule;
+	status->nat_type = hw_status->ipa_pkt.nat_type;
+
+	status->exception = pkt_status_parse_exception(is_ipv6,
+			hw_status->ipa_pkt.exception);
+}
+
+static void __ipa_parse_frag_pkt(struct ipahal_pkt_status *status,
+				const void *unparsed_status)
+{
+	union ipa_pkt_status_hw *hw_status =
+		(union ipa_pkt_status_hw *)unparsed_status;
+
+	status->frag_rule_idx = hw_status->frag_pkt.frag_rule_idx;
+	status->tbl_idx = hw_status->frag_pkt.tbl_idx;
+	status->src_ip_addr = hw_status->frag_pkt.src_ip_addr;
+	status->dest_ip_addr = hw_status->frag_pkt.dest_ip_addr;
+	status->protocol = hw_status->frag_pkt.protocol;
+	status->ip_id = hw_status->frag_pkt.ip_id;
+	status->tlated_ip_addr = hw_status->frag_pkt.tlated_ip_addr;
+	status->ip_cksum_diff = hw_status->frag_pkt.ip_cksum_diff;
+	status->endp_src_idx = hw_status->frag_pkt.endp_src_idx;
+	status->endp_dest_idx = hw_status->frag_pkt.endp_dest_idx;
+	status->metadata = hw_status->frag_pkt.metadata;
+	status->seq_num = hw_status->frag_pkt.seq_num;
+	status->hdr_local = hw_status->frag_pkt.hdr_local;
+	status->hdr_offset = hw_status->frag_pkt.hdr_offset;
+	status->exception = hw_status->frag_pkt.exception;
+	status->nat_type = hw_status->frag_pkt.nat_type;
+}
+
+static void ipa_pkt_status_parse(
+	const void *unparsed_status, struct ipahal_pkt_status *status)
+{
+	enum ipahal_pkt_status_opcode opcode = 0;
+
+	union ipa_pkt_status_hw *hw_status =
+		(union ipa_pkt_status_hw *)unparsed_status;
+
+
+	switch (hw_status->ipa_pkt.status_opcode) {
+	case 0x1:
+		opcode = IPAHAL_PKT_STATUS_OPCODE_PACKET;
+		break;
+	case 0x2:
+		opcode = IPAHAL_PKT_STATUS_OPCODE_NEW_FRAG_RULE;
+		break;
+	case 0x4:
+		opcode = IPAHAL_PKT_STATUS_OPCODE_DROPPED_PACKET;
+		break;
+	case 0x8:
+		opcode = IPAHAL_PKT_STATUS_OPCODE_SUSPENDED_PACKET;
+		break;
+	case 0x10:
+		opcode = IPAHAL_PKT_STATUS_OPCODE_LOG;
+		break;
+	case 0x20:
+		opcode = IPAHAL_PKT_STATUS_OPCODE_DCMP;
+		break;
+	case 0x40:
+		opcode = IPAHAL_PKT_STATUS_OPCODE_PACKET_2ND_PASS;
+		break;
+	default:
+		IPAHAL_ERR_RL("unsupported Status Opcode 0x%x\n",
+			hw_status->ipa_pkt.status_opcode);
+	}
+
+	status->status_opcode = opcode;
+
+	if (status->status_opcode == IPAHAL_PKT_STATUS_OPCODE_NEW_FRAG_RULE)
+		__ipa_parse_frag_pkt(status, unparsed_status);
+	else
+		__ipa_parse_gen_pkt(status, unparsed_status);
+
+	switch (status->nat_type) {
+	case 0:
+		status->nat_type = IPAHAL_PKT_STATUS_NAT_NONE;
+		break;
+	case 1:
+		status->nat_type = IPAHAL_PKT_STATUS_NAT_SRC;
+		break;
+	case 2:
+		status->nat_type = IPAHAL_PKT_STATUS_NAT_DST;
+		break;
+	default:
+		IPAHAL_ERR_RL("unsupported Status NAT type 0x%x\n",
+			status->nat_type);
+	}
+
+	IPA_PKT_STATUS_SET_MSK(0x1, IPAHAL_PKT_STATUS_MASK_FRAG_PROCESS_SHFT);
+	IPA_PKT_STATUS_SET_MSK(0x2, IPAHAL_PKT_STATUS_MASK_FILT_PROCESS_SHFT);
+	IPA_PKT_STATUS_SET_MSK(0x4, IPAHAL_PKT_STATUS_MASK_NAT_PROCESS_SHFT);
+	IPA_PKT_STATUS_SET_MSK(0x8, IPAHAL_PKT_STATUS_MASK_ROUTE_PROCESS_SHFT);
+	IPA_PKT_STATUS_SET_MSK(0x10, IPAHAL_PKT_STATUS_MASK_TAG_VALID_SHFT);
+	IPA_PKT_STATUS_SET_MSK(0x20, IPAHAL_PKT_STATUS_MASK_FRAGMENT_SHFT);
+	IPA_PKT_STATUS_SET_MSK(0x40,
+		IPAHAL_PKT_STATUS_MASK_FIRST_FRAGMENT_SHFT);
+	IPA_PKT_STATUS_SET_MSK(0x80, IPAHAL_PKT_STATUS_MASK_V4_SHFT);
+	IPA_PKT_STATUS_SET_MSK(0x100,
+		IPAHAL_PKT_STATUS_MASK_CKSUM_PROCESS_SHFT);
+	IPA_PKT_STATUS_SET_MSK(0x200, IPAHAL_PKT_STATUS_MASK_AGGR_PROCESS_SHFT);
+	IPA_PKT_STATUS_SET_MSK(0x400, IPAHAL_PKT_STATUS_MASK_DEST_EOT_SHFT);
+	IPA_PKT_STATUS_SET_MSK(0x800,
+		IPAHAL_PKT_STATUS_MASK_DEAGGR_PROCESS_SHFT);
+	IPA_PKT_STATUS_SET_MSK(0x1000, IPAHAL_PKT_STATUS_MASK_DEAGG_FIRST_SHFT);
+	IPA_PKT_STATUS_SET_MSK(0x2000, IPAHAL_PKT_STATUS_MASK_SRC_EOT_SHFT);
+	IPA_PKT_STATUS_SET_MSK(0x4000, IPAHAL_PKT_STATUS_MASK_PREV_EOT_SHFT);
+	IPA_PKT_STATUS_SET_MSK(0x8000, IPAHAL_PKT_STATUS_MASK_BYTE_LIMIT_SHFT);
+	status->status_mask &= 0xFFFF;
+}
+
+/*
+ * ipa_pkt_status_parse_thin() - Parse some of the packet status fields
+ * for specific usage in the LAN rx data path where parsing needs to be done
+ * but only for specific fields.
+ * @unparsed_status: Pointer to H/W format of the packet status as read from HW
+ * @status: Pointer to pre-allocated buffer where the parsed info will be
+ * stored
+ */
+static void ipa_pkt_status_parse_thin(const void *unparsed_status,
+	struct ipahal_pkt_status_thin *status)
+{
+	union ipa_pkt_status_hw *hw_status =
+		(union ipa_pkt_status_hw *)unparsed_status;
+	bool is_ipv6;
+
+	is_ipv6 = (hw_status->ipa_pkt.status_mask & 0x80) ? false : true;
+	if (!unparsed_status || !status) {
+		IPAHAL_ERR("Input Error: unparsed_status=%pK status=%pK\n",
+			unparsed_status, status);
+		return;
+	}
+
+	IPAHAL_DBG_LOW("Parse Thin Status Packet\n");
+	status->metadata = hw_status->ipa_pkt.metadata;
+	status->endp_src_idx = hw_status->ipa_pkt.endp_src_idx;
+	status->ucp = hw_status->ipa_pkt.ucp;
+	status->exception = pkt_status_parse_exception(is_ipv6,
+						hw_status->ipa_pkt.exception);
+}
+
+/*
+ * struct ipahal_pkt_status_obj - Pakcet Status H/W information for
+ *  specific IPA version
+ * @size: H/W size of the status packet
+ * @parse: CB that parses the H/W packet status into the abstracted structure
+ * @parse_thin: light weight CB that parses only some of the fields for
+ * data path optimization
+ */
+struct ipahal_pkt_status_obj {
+	u32 size;
+	void (*parse)(const void *unparsed_status,
+		struct ipahal_pkt_status *status);
+	void (*parse_thin)(const void *unparsed_status,
+			struct ipahal_pkt_status_thin *status);
+};
+
+/*
+ * This table contains the info regard packet status for IPAv3 and later
+ * Information like: size of packet status and parsing function
+ * All the information on the pkt Status on IPAv3 are statically defined below.
+ * If information is missing regard some IPA version, the init function
+ *  will fill it with the information from the previous IPA version.
+ * Information is considered missing if all of the fields are 0
+ */
+static struct ipahal_pkt_status_obj ipahal_pkt_status_objs[IPA_HW_MAX] = {
+	/* IPAv3 */
+	[IPA_HW_v3_0] = {
+		IPA3_0_PKT_STATUS_SIZE,
+		ipa_pkt_status_parse,
+		ipa_pkt_status_parse_thin,
+		},
+};
+
+/*
+ * ipahal_pkt_status_init() - Build the packet status information array
+ *  for the different IPA versions
+ *  See ipahal_pkt_status_objs[] comments
+ */
+static int ipahal_pkt_status_init(enum ipa_hw_type ipa_hw_type)
+{
+	int i;
+	struct ipahal_pkt_status_obj zero_obj;
+
+	IPAHAL_DBG_LOW("Entry - HW_TYPE=%d\n", ipa_hw_type);
+
+	if ((ipa_hw_type < 0) || (ipa_hw_type >= IPA_HW_MAX)) {
+		IPAHAL_ERR("invalid IPA HW type (%d)\n", ipa_hw_type);
+		return -EINVAL;
+	}
+
+	/*
+	 * Since structure alignment is implementation dependent,
+	 * add test to avoid different and incompatible data layouts.
+	 * If test fails it also means that ipahal_pkt_status_parse_thin
+	 * need to be checked.
+	 *
+	 * In case new H/W has different size or structure of status packet,
+	 * add a compile time validty check for it like below (as well as
+	 * the new defines and/or the new strucutre in the internal header).
+	 */
+	BUILD_BUG_ON(sizeof(union ipa_pkt_status_hw) !=
+		IPA3_0_PKT_STATUS_SIZE);
+
+	memset(&zero_obj, 0, sizeof(zero_obj));
+	for (i = IPA_HW_v3_0 ; i < ipa_hw_type ; i++) {
+		if (!memcmp(&ipahal_pkt_status_objs[i+1], &zero_obj,
+			sizeof(struct ipahal_pkt_status_obj))) {
+			memcpy(&ipahal_pkt_status_objs[i+1],
+				&ipahal_pkt_status_objs[i],
+				sizeof(struct ipahal_pkt_status_obj));
+		} else {
+			/*
+			 * explicitly overridden Packet Status info
+			 * Check validity
+			 */
+			if (!ipahal_pkt_status_objs[i+1].size) {
+				IPAHAL_ERR(
+				  "Packet Status with zero size ipa_ver=%d\n",
+				  i+1);
+				WARN_ON(1);
+			}
+			if (!ipahal_pkt_status_objs[i+1].parse) {
+				IPAHAL_ERR(
+				  "Packet Status without Parse func ipa_ver=%d\n",
+				  i+1);
+				WARN_ON(1);
+			}
+			if (!ipahal_pkt_status_objs[i+1].parse_thin) {
+				IPAHAL_ERR(
+				  "Packet Status without Parse_thin func ipa_ver=%d\n",
+				  i+1);
+				WARN_ON(1);
+			}
+		}
+	}
+
+	return 0;
+}
+
+/*
+ * ipahal_pkt_status_get_size() - Get H/W size of packet status
+ */
+u32 ipahal_pkt_status_get_size(void)
+{
+	return ipahal_pkt_status_objs[ipahal_ctx->hw_type].size;
+}
+
+/*
+ * ipahal_pkt_status_parse() - Parse Packet Status payload to abstracted form
+ * @unparsed_status: Pointer to H/W format of the packet status as read from H/W
+ * @status: Pointer to pre-allocated buffer where the parsed info will be stored
+ */
+void ipahal_pkt_status_parse(const void *unparsed_status,
+	struct ipahal_pkt_status *status)
+{
+	if (!unparsed_status || !status) {
+		IPAHAL_ERR("Input Error: unparsed_status=%pK status=%pK\n",
+			unparsed_status, status);
+		return;
+	}
+
+	IPAHAL_DBG_LOW("Parse Status Packet\n");
+	memset(status, 0, sizeof(*status));
+	ipahal_pkt_status_objs[ipahal_ctx->hw_type].parse(unparsed_status,
+		status);
+}
+
+/*
+ * ipahal_pkt_status_parse_thin() - Similar to ipahal_pkt_status_parse,
+ * the difference is it only parses some of the status packet fields
+ * used for TP optimization.
+ * @unparsed_status: Pointer to H/W format of the packet status as read from H/W
+ * @status: Pointer to pre-allocated buffer where the parsed info will be stored
+ */
+void ipahal_pkt_status_parse_thin(const void *unparsed_status,
+	struct ipahal_pkt_status_thin *status)
+{
+	if (!unparsed_status || !status) {
+		IPAHAL_ERR("Input Error: unparsed_status=%pK status=%pK\n",
+			unparsed_status, status);
+		return;
+	}
+	IPAHAL_DBG_LOW("Parse_thin Status Packet\n");
+	ipahal_pkt_status_objs[ipahal_ctx->hw_type].parse_thin(unparsed_status,
+				status);
+}
+
+/*
+ * ipahal_pkt_status_exception_str() - returns string represents exception type
+ * @exception: [in] The exception type
+ */
+const char *ipahal_pkt_status_exception_str(
+	enum ipahal_pkt_status_exception exception)
+{
+	if (exception < 0 || exception >= IPAHAL_PKT_STATUS_EXCEPTION_MAX) {
+		IPAHAL_ERR(
+			"requested string of invalid pkt_status exception=%d\n",
+			exception);
+		return "Invalid PKT_STATUS_EXCEPTION";
+	}
+
+	return ipahal_pkt_status_exception_to_str[exception];
+}
+
+#ifdef CONFIG_DEBUG_FS
+static void ipahal_debugfs_init(void)
+{
+	ipahal_ctx->dent = debugfs_create_dir("ipahal", 0);
+	if (!ipahal_ctx->dent || IS_ERR(ipahal_ctx->dent)) {
+		IPAHAL_ERR("fail to create ipahal debugfs folder\n");
+		goto fail;
+	}
+
+	return;
+fail:
+	debugfs_remove_recursive(ipahal_ctx->dent);
+	ipahal_ctx->dent = NULL;
+}
+
+static void ipahal_debugfs_remove(void)
+{
+	if (!ipahal_ctx)
+		return;
+
+	if (IS_ERR(ipahal_ctx->dent)) {
+		IPAHAL_ERR("ipahal debugfs folder was not created\n");
+		return;
+	}
+
+	debugfs_remove_recursive(ipahal_ctx->dent);
+}
+#else /* CONFIG_DEBUG_FS */
+static void ipahal_debugfs_init(void) {}
+static void ipahal_debugfs_remove(void) {}
+#endif /* CONFIG_DEBUG_FS */
+
+/*
+ * ipahal_cp_hdr_to_hw_buff_v3() - copy header to hardware buffer according to
+ * base address and offset given.
+ * @base: dma base address
+ * @offset: offset from base address where the data will be copied
+ * @hdr: the header to be copied
+ * @hdr_len: the length of the header
+ */
+static void ipahal_cp_hdr_to_hw_buff_v3(void *const base, u32 offset,
+		u8 *const hdr, u32 hdr_len)
+{
+	memcpy(base + offset, hdr, hdr_len);
+}
+
+/* Header address update logic. */
+#define IPAHAL_CP_PROC_CTX_HEADER_UPDATE(hdr_lsb, hdr_msb, addr) \
+	do { \
+		hdr_lsb = lower_32_bits(addr); \
+		hdr_msb = upper_32_bits(addr); \
+	} while (0)
+
+/*
+ * ipahal_cp_proc_ctx_to_hw_buff_v3() - copy processing context to
+ * base address and offset given.
+ * @type: header processing context type (no processing context,
+ *	IPA_HDR_PROC_ETHII_TO_ETHII etc.)
+ * @base: dma base address
+ * @offset: offset from base address where the data will be copied
+ * @hdr_len: the length of the header
+ * @is_hdr_proc_ctx: header is located in phys_base (true) or hdr_base_addr
+ * @phys_base: memory location in DDR
+ * @hdr_base_addr: base address in table
+ * @offset_entry: offset from hdr_base_addr in table
+ * @l2tp_params: l2tp parameters
+ * @generic_params: generic proc_ctx params
+ * @is_64: Indicates whether header base address/dma base address is 64 bit.
+ */
+static int ipahal_cp_proc_ctx_to_hw_buff_v3(enum ipa_hdr_proc_type type,
+		void *const base, u32 offset,
+		u32 hdr_len, bool is_hdr_proc_ctx,
+		dma_addr_t phys_base, u64 hdr_base_addr,
+		struct ipa_hdr_offset_entry *offset_entry,
+		struct ipa_l2tp_hdr_proc_ctx_params *l2tp_params,
+		struct ipa_eth_II_to_eth_II_ex_procparams *generic_params,
+		bool is_64)
+{
+	u64 hdr_addr;
+
+	if (type == IPA_HDR_PROC_NONE) {
+		struct ipa_hw_hdr_proc_ctx_add_hdr_seq *ctx;
+
+		ctx = (struct ipa_hw_hdr_proc_ctx_add_hdr_seq *)
+			(base + offset);
+		ctx->hdr_add.tlv.type = IPA_PROC_CTX_TLV_TYPE_HDR_ADD;
+		ctx->hdr_add.tlv.length = 2;
+		ctx->hdr_add.tlv.value = hdr_len;
+		hdr_addr = is_hdr_proc_ctx ? phys_base :
+			hdr_base_addr + offset_entry->offset;
+		IPAHAL_DBG("header address 0x%llx\n",
+			hdr_addr);
+		IPAHAL_CP_PROC_CTX_HEADER_UPDATE(ctx->hdr_add.hdr_addr,
+			ctx->hdr_add.hdr_addr_hi, hdr_addr);
+		if (!is_64)
+			ctx->hdr_add.hdr_addr_hi = 0;
+		ctx->end.type = IPA_PROC_CTX_TLV_TYPE_END;
+		ctx->end.length = 0;
+		ctx->end.value = 0;
+	} else if (type == IPA_HDR_PROC_L2TP_HEADER_ADD) {
+		struct ipa_hw_hdr_proc_ctx_add_l2tp_hdr_cmd_seq *ctx;
+
+		ctx = (struct ipa_hw_hdr_proc_ctx_add_l2tp_hdr_cmd_seq *)
+			(base + offset);
+		ctx->hdr_add.tlv.type = IPA_PROC_CTX_TLV_TYPE_HDR_ADD;
+		ctx->hdr_add.tlv.length = 2;
+		ctx->hdr_add.tlv.value = hdr_len;
+		hdr_addr = is_hdr_proc_ctx ? phys_base :
+			hdr_base_addr + offset_entry->offset;
+		IPAHAL_DBG("header address 0x%llx\n",
+			hdr_addr);
+		IPAHAL_CP_PROC_CTX_HEADER_UPDATE(ctx->hdr_add.hdr_addr,
+			ctx->hdr_add.hdr_addr_hi, hdr_addr);
+		if (!is_64)
+			ctx->hdr_add.hdr_addr_hi = 0;
+		ctx->l2tp_params.tlv.type = IPA_PROC_CTX_TLV_TYPE_PROC_CMD;
+		ctx->l2tp_params.tlv.length = 1;
+		ctx->l2tp_params.tlv.value =
+				IPA_HDR_UCP_L2TP_HEADER_ADD;
+		ctx->l2tp_params.l2tp_params.eth_hdr_retained =
+			l2tp_params->hdr_add_param.eth_hdr_retained;
+		ctx->l2tp_params.l2tp_params.input_ip_version =
+			l2tp_params->hdr_add_param.input_ip_version;
+		ctx->l2tp_params.l2tp_params.output_ip_version =
+			l2tp_params->hdr_add_param.output_ip_version;
+
+		IPAHAL_DBG("command id %d\n", ctx->l2tp_params.tlv.value);
+		ctx->end.type = IPA_PROC_CTX_TLV_TYPE_END;
+		ctx->end.length = 0;
+		ctx->end.value = 0;
+	} else if (type == IPA_HDR_PROC_L2TP_HEADER_REMOVE) {
+		struct ipa_hw_hdr_proc_ctx_remove_l2tp_hdr_cmd_seq *ctx;
+
+		ctx = (struct ipa_hw_hdr_proc_ctx_remove_l2tp_hdr_cmd_seq *)
+			(base + offset);
+		ctx->hdr_add.tlv.type = IPA_PROC_CTX_TLV_TYPE_HDR_ADD;
+		ctx->hdr_add.tlv.length = 2;
+		ctx->hdr_add.tlv.value = hdr_len;
+		hdr_addr = is_hdr_proc_ctx ? phys_base :
+			hdr_base_addr + offset_entry->offset;
+		IPAHAL_DBG("header address 0x%llx length %d\n",
+			hdr_addr, ctx->hdr_add.tlv.value);
+		IPAHAL_CP_PROC_CTX_HEADER_UPDATE(ctx->hdr_add.hdr_addr,
+			ctx->hdr_add.hdr_addr_hi, hdr_addr);
+		if (!is_64)
+			ctx->hdr_add.hdr_addr_hi = 0;
+		ctx->l2tp_params.tlv.type = IPA_PROC_CTX_TLV_TYPE_PROC_CMD;
+		ctx->l2tp_params.tlv.length = 1;
+		ctx->l2tp_params.tlv.value =
+				IPA_HDR_UCP_L2TP_HEADER_REMOVE;
+		ctx->l2tp_params.l2tp_params.hdr_len_remove =
+			l2tp_params->hdr_remove_param.hdr_len_remove;
+		ctx->l2tp_params.l2tp_params.eth_hdr_retained =
+			l2tp_params->hdr_remove_param.eth_hdr_retained;
+		ctx->l2tp_params.l2tp_params.hdr_ofst_pkt_size_valid =
+			l2tp_params->hdr_remove_param.hdr_ofst_pkt_size_valid;
+		ctx->l2tp_params.l2tp_params.hdr_ofst_pkt_size =
+			l2tp_params->hdr_remove_param.hdr_ofst_pkt_size;
+		ctx->l2tp_params.l2tp_params.hdr_endianness =
+			l2tp_params->hdr_remove_param.hdr_endianness;
+		IPAHAL_DBG("hdr ofst valid: %d, hdr ofst pkt size: %d\n",
+			ctx->l2tp_params.l2tp_params.hdr_ofst_pkt_size_valid,
+			ctx->l2tp_params.l2tp_params.hdr_ofst_pkt_size);
+		IPAHAL_DBG("endianness: %d\n",
+			ctx->l2tp_params.l2tp_params.hdr_endianness);
+
+		IPAHAL_DBG("command id %d\n", ctx->l2tp_params.tlv.value);
+		ctx->end.type = IPA_PROC_CTX_TLV_TYPE_END;
+		ctx->end.length = 0;
+		ctx->end.value = 0;
+	}  else if (type == IPA_HDR_PROC_ETHII_TO_ETHII_EX) {
+		struct ipa_hw_hdr_proc_ctx_add_hdr_cmd_seq_ex *ctx;
+
+		ctx = (struct ipa_hw_hdr_proc_ctx_add_hdr_cmd_seq_ex *)
+			(base + offset);
+
+		ctx->hdr_add.tlv.type = IPA_PROC_CTX_TLV_TYPE_HDR_ADD;
+		ctx->hdr_add.tlv.length = 1;
+		ctx->hdr_add.tlv.value = hdr_len;
+		ctx->hdr_add.hdr_addr = is_hdr_proc_ctx ? phys_base :
+			hdr_base_addr + offset_entry->offset;
+		IPAHAL_DBG("header address 0x%x\n",
+			ctx->hdr_add.hdr_addr);
+
+		ctx->hdr_add_ex.tlv.type = IPA_PROC_CTX_TLV_TYPE_PROC_CMD;
+		ctx->hdr_add_ex.tlv.length = 1;
+		ctx->hdr_add_ex.tlv.value = IPA_HDR_UCP_ETHII_TO_ETHII_EX;
+
+		ctx->hdr_add_ex.params.input_ethhdr_negative_offset =
+			generic_params->input_ethhdr_negative_offset;
+		ctx->hdr_add_ex.params.output_ethhdr_negative_offset =
+			generic_params->output_ethhdr_negative_offset;
+		ctx->hdr_add_ex.params.reserved = 0;
+
+		ctx->end.type = IPA_PROC_CTX_TLV_TYPE_END;
+		ctx->end.length = 0;
+		ctx->end.value = 0;
+	} else {
+		struct ipa_hw_hdr_proc_ctx_add_hdr_cmd_seq *ctx;
+
+		ctx = (struct ipa_hw_hdr_proc_ctx_add_hdr_cmd_seq *)
+			(base + offset);
+		ctx->hdr_add.tlv.type = IPA_PROC_CTX_TLV_TYPE_HDR_ADD;
+		ctx->hdr_add.tlv.length = 2;
+		ctx->hdr_add.tlv.value = hdr_len;
+		hdr_addr = is_hdr_proc_ctx ? phys_base :
+			hdr_base_addr + offset_entry->offset;
+		IPAHAL_DBG("header address 0x%llx\n",
+			hdr_addr);
+		IPAHAL_CP_PROC_CTX_HEADER_UPDATE(ctx->hdr_add.hdr_addr,
+			ctx->hdr_add.hdr_addr_hi, hdr_addr);
+		if (!is_64)
+			ctx->hdr_add.hdr_addr_hi = 0;
+		ctx->cmd.type = IPA_PROC_CTX_TLV_TYPE_PROC_CMD;
+		ctx->cmd.length = 0;
+		switch (type) {
+		case IPA_HDR_PROC_ETHII_TO_ETHII:
+			ctx->cmd.value = IPA_HDR_UCP_ETHII_TO_ETHII;
+			break;
+		case IPA_HDR_PROC_ETHII_TO_802_3:
+			ctx->cmd.value = IPA_HDR_UCP_ETHII_TO_802_3;
+			break;
+		case IPA_HDR_PROC_802_3_TO_ETHII:
+			ctx->cmd.value = IPA_HDR_UCP_802_3_TO_ETHII;
+			break;
+		case IPA_HDR_PROC_802_3_TO_802_3:
+			ctx->cmd.value = IPA_HDR_UCP_802_3_TO_802_3;
+			break;
+		default:
+			IPAHAL_ERR("unknown ipa_hdr_proc_type %d", type);
+			WARN_ON(1);
+			return -EINVAL;
+		}
+		IPAHAL_DBG("command id %d\n", ctx->cmd.value);
+		ctx->end.type = IPA_PROC_CTX_TLV_TYPE_END;
+		ctx->end.length = 0;
+		ctx->end.value = 0;
+	}
+
+	return 0;
+}
+
+/*
+ * ipahal_get_proc_ctx_needed_len_v3() - calculates the needed length for
+ * addition of header processing context according to the type of processing
+ * context.
+ * @type: header processing context type (no processing context,
+ *	IPA_HDR_PROC_ETHII_TO_ETHII etc.)
+ */
+static int ipahal_get_proc_ctx_needed_len_v3(enum ipa_hdr_proc_type type)
+{
+	int ret;
+
+	switch (type) {
+	case IPA_HDR_PROC_NONE:
+		ret = sizeof(struct ipa_hw_hdr_proc_ctx_add_hdr_seq);
+		break;
+	case IPA_HDR_PROC_ETHII_TO_ETHII:
+	case IPA_HDR_PROC_ETHII_TO_802_3:
+	case IPA_HDR_PROC_802_3_TO_ETHII:
+	case IPA_HDR_PROC_802_3_TO_802_3:
+		ret = sizeof(struct ipa_hw_hdr_proc_ctx_add_hdr_cmd_seq);
+		break;
+	case IPA_HDR_PROC_L2TP_HEADER_ADD:
+		ret = sizeof(struct ipa_hw_hdr_proc_ctx_add_l2tp_hdr_cmd_seq);
+		break;
+	case IPA_HDR_PROC_L2TP_HEADER_REMOVE:
+		ret =
+		sizeof(struct ipa_hw_hdr_proc_ctx_remove_l2tp_hdr_cmd_seq);
+		break;
+	case IPA_HDR_PROC_ETHII_TO_ETHII_EX:
+		ret = sizeof(struct ipa_hw_hdr_proc_ctx_add_hdr_cmd_seq_ex);
+		break;
+	default:
+		/* invalid value to make sure failure */
+		IPAHAL_ERR_RL("invalid ipa_hdr_proc_type %d\n", type);
+		ret = -1;
+	}
+
+	return ret;
+}
+
+/*
+ * struct ipahal_hdr_funcs - headers handling functions for specific IPA
+ * version
+ * @ipahal_cp_hdr_to_hw_buff - copy function for regular headers
+ */
+struct ipahal_hdr_funcs {
+	void (*ipahal_cp_hdr_to_hw_buff)(void *const base, u32 offset,
+			u8 *const hdr, u32 hdr_len);
+
+	int (*ipahal_cp_proc_ctx_to_hw_buff)(enum ipa_hdr_proc_type type,
+			void *const base, u32 offset, u32 hdr_len,
+			bool is_hdr_proc_ctx, dma_addr_t phys_base,
+			u64 hdr_base_addr,
+			struct ipa_hdr_offset_entry *offset_entry,
+			struct ipa_l2tp_hdr_proc_ctx_params *l2tp_params,
+			struct ipa_eth_II_to_eth_II_ex_procparams
+			*generic_params,
+			bool is_64);
+
+	int (*ipahal_get_proc_ctx_needed_len)(enum ipa_hdr_proc_type type);
+};
+
+static struct ipahal_hdr_funcs hdr_funcs;
+
+static void ipahal_hdr_init(enum ipa_hw_type ipa_hw_type)
+{
+
+	IPAHAL_DBG("Entry - HW_TYPE=%d\n", ipa_hw_type);
+
+	/*
+	 * once there are changes in HW and need to use different case, insert
+	 * new case for the new h/w. put the default always for the latest HW
+	 * and make sure all previous supported versions have their cases.
+	 */
+	switch (ipa_hw_type) {
+	case IPA_HW_v3_0:
+	default:
+		hdr_funcs.ipahal_cp_hdr_to_hw_buff =
+				ipahal_cp_hdr_to_hw_buff_v3;
+		hdr_funcs.ipahal_cp_proc_ctx_to_hw_buff =
+				ipahal_cp_proc_ctx_to_hw_buff_v3;
+		hdr_funcs.ipahal_get_proc_ctx_needed_len =
+				ipahal_get_proc_ctx_needed_len_v3;
+	}
+	IPAHAL_DBG("Exit\n");
+}
+
+/*
+ * ipahal_cp_hdr_to_hw_buff() - copy header to hardware buffer according to
+ * base address and offset given.
+ * @base: dma base address
+ * @offset: offset from base address where the data will be copied
+ * @hdr: the header to be copied
+ * @hdr_len: the length of the header
+ */
+void ipahal_cp_hdr_to_hw_buff(void *base, u32 offset, u8 *const hdr,
+		u32 hdr_len)
+{
+	IPAHAL_DBG_LOW("Entry\n");
+	IPAHAL_DBG("base %pK, offset %d, hdr %pK, hdr_len %d\n", base,
+			offset, hdr, hdr_len);
+	if (!base || !hdr_len || !hdr) {
+		IPAHAL_ERR("failed on validating params\n");
+		return;
+	}
+
+	hdr_funcs.ipahal_cp_hdr_to_hw_buff(base, offset, hdr, hdr_len);
+
+	IPAHAL_DBG_LOW("Exit\n");
+}
+
+/*
+ * ipahal_cp_proc_ctx_to_hw_buff() - copy processing context to
+ * base address and offset given.
+ * @type: type of header processing context
+ * @base: dma base address
+ * @offset: offset from base address where the data will be copied
+ * @hdr_len: the length of the header
+ * @is_hdr_proc_ctx: header is located in phys_base (true) or hdr_base_addr
+ * @phys_base: memory location in DDR
+ * @hdr_base_addr: base address in table
+ * @offset_entry: offset from hdr_base_addr in table
+ * @l2tp_params: l2tp parameters
+ * @generic_params: generic proc_ctx params
+ * @is_64: Indicates whether header base address/dma base address is 64 bit.
+ */
+int ipahal_cp_proc_ctx_to_hw_buff(enum ipa_hdr_proc_type type,
+		void *const base, u32 offset, u32 hdr_len,
+		bool is_hdr_proc_ctx, dma_addr_t phys_base,
+		u64 hdr_base_addr, struct ipa_hdr_offset_entry *offset_entry,
+		struct ipa_l2tp_hdr_proc_ctx_params *l2tp_params,
+		struct ipa_eth_II_to_eth_II_ex_procparams *generic_params,
+		bool is_64)
+{
+	IPAHAL_DBG(
+		"type %d, base %pK, offset %d, hdr_len %d, is_hdr_proc_ctx %d, hdr_base_addr %llu, offset_entry %pK, bool %d\n"
+			, type, base, offset, hdr_len, is_hdr_proc_ctx,
+			hdr_base_addr, offset_entry, is_64);
+
+	if (!base ||
+		!hdr_len ||
+		(is_hdr_proc_ctx && !phys_base) ||
+		(!is_hdr_proc_ctx && !offset_entry) ||
+		(!is_hdr_proc_ctx && !hdr_base_addr)) {
+		IPAHAL_ERR(
+			"invalid input: hdr_len:%u phys_base:%pad hdr_base_addr:%llu is_hdr_proc_ctx:%d offset_entry:%pK\n"
+			, hdr_len, &phys_base, hdr_base_addr
+			, is_hdr_proc_ctx, offset_entry);
+		return -EINVAL;
+	}
+
+	return hdr_funcs.ipahal_cp_proc_ctx_to_hw_buff(type, base, offset,
+			hdr_len, is_hdr_proc_ctx, phys_base,
+			hdr_base_addr, offset_entry, l2tp_params,
+			generic_params, is_64);
+}
+
+/*
+ * ipahal_get_proc_ctx_needed_len() - calculates the needed length for
+ * addition of header processing context according to the type of processing
+ * context
+ * @type: header processing context type (no processing context,
+ *	IPA_HDR_PROC_ETHII_TO_ETHII etc.)
+ */
+int ipahal_get_proc_ctx_needed_len(enum ipa_hdr_proc_type type)
+{
+	int res;
+
+	IPAHAL_DBG("entry\n");
+
+	res = hdr_funcs.ipahal_get_proc_ctx_needed_len(type);
+
+	IPAHAL_DBG("Exit\n");
+
+	return res;
+}
+
+int ipahal_init(enum ipa_hw_type ipa_hw_type, void __iomem *base,
+	struct device *ipa_pdev)
+{
+	int result;
+
+	IPAHAL_DBG("Entry - IPA HW TYPE=%d base=%pK ipa_pdev=%pK\n",
+		ipa_hw_type, base, ipa_pdev);
+
+	ipahal_ctx = kzalloc(sizeof(*ipahal_ctx), GFP_KERNEL);
+	if (!ipahal_ctx) {
+		IPAHAL_ERR("kzalloc err for ipahal_ctx\n");
+		result = -ENOMEM;
+		goto bail_err_exit;
+	}
+
+	if (ipa_hw_type < IPA_HW_v3_0) {
+		IPAHAL_ERR("ipahal supported on IPAv3 and later only\n");
+		result = -EINVAL;
+		goto bail_free_ctx;
+	}
+
+	if (ipa_hw_type >= IPA_HW_MAX) {
+		IPAHAL_ERR("invalid IPA HW type (%d)\n", ipa_hw_type);
+		result = -EINVAL;
+		goto bail_free_ctx;
+	}
+
+	if (!base) {
+		IPAHAL_ERR("invalid memory io mapping addr\n");
+		result = -EINVAL;
+		goto bail_free_ctx;
+	}
+
+	if (!ipa_pdev) {
+		IPAHAL_ERR("invalid IPA platform device\n");
+		result = -EINVAL;
+		goto bail_free_ctx;
+	}
+
+	ipahal_ctx->hw_type = ipa_hw_type;
+	ipahal_ctx->base = base;
+	ipahal_ctx->ipa_pdev = ipa_pdev;
+
+	if (ipahal_reg_init(ipa_hw_type)) {
+		IPAHAL_ERR("failed to init ipahal reg\n");
+		result = -EFAULT;
+		goto bail_free_ctx;
+	}
+
+	if (ipahal_imm_cmd_init(ipa_hw_type)) {
+		IPAHAL_ERR("failed to init ipahal imm cmd\n");
+		result = -EFAULT;
+		goto bail_free_ctx;
+	}
+
+	if (ipahal_pkt_status_init(ipa_hw_type)) {
+		IPAHAL_ERR("failed to init ipahal pkt status\n");
+		result = -EFAULT;
+		goto bail_free_ctx;
+	}
+
+	ipahal_hdr_init(ipa_hw_type);
+
+	if (ipahal_fltrt_init(ipa_hw_type)) {
+		IPAHAL_ERR("failed to init ipahal flt rt\n");
+		result = -EFAULT;
+		goto bail_free_ctx;
+	}
+
+	if (ipahal_hw_stats_init(ipa_hw_type)) {
+		IPAHAL_ERR("failed to init ipahal hw stats\n");
+		result = -EFAULT;
+		goto bail_free_fltrt;
+	}
+
+	if (ipahal_nat_init(ipa_hw_type)) {
+		IPAHAL_ERR("failed to init ipahal NAT\n");
+		result = -EFAULT;
+		goto bail_free_fltrt;
+	}
+
+	/* create an IPC buffer for the registers dump */
+	ipahal_ctx->regdumpbuf = ipc_log_context_create(IPAHAL_IPC_LOG_PAGES,
+		"ipa_regs", 0);
+	if (ipahal_ctx->regdumpbuf == NULL)
+		IPAHAL_ERR("failed to create IPA regdump log, continue...\n");
+
+	ipahal_debugfs_init();
+
+	return 0;
+
+bail_free_fltrt:
+	ipahal_fltrt_destroy();
+bail_free_ctx:
+	if (ipahal_ctx->regdumpbuf)
+		ipc_log_context_destroy(ipahal_ctx->regdumpbuf);
+	kfree(ipahal_ctx);
+	ipahal_ctx = NULL;
+bail_err_exit:
+	return result;
+}
+
+void ipahal_destroy(void)
+{
+	IPAHAL_DBG("Entry\n");
+	ipahal_fltrt_destroy();
+	ipahal_debugfs_remove();
+	kfree(ipahal_ctx);
+	ipahal_ctx = NULL;
+}
+
+void ipahal_free_dma_mem(struct ipa_mem_buffer *mem)
+{
+	if (likely(mem)) {
+		dma_free_coherent(ipahal_ctx->ipa_pdev, mem->size, mem->base,
+			mem->phys_base);
+		mem->size = 0;
+		mem->base = NULL;
+		mem->phys_base = 0;
+	}
+}

+ 701 - 0
ipa/ipa_v3/ipahal/ipahal.h

@@ -0,0 +1,701 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2016-2019, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _IPAHAL_H_
+#define _IPAHAL_H_
+
+#include "../ipa_defs.h"
+#include "../../ipa_common_i.h"
+
+/*
+ * Immediate command names
+ *
+ * NOTE:: Any change to this enum, need to change to ipahal_imm_cmd_name_to_str
+ *	array as well.
+ */
+enum ipahal_imm_cmd_name {
+	IPA_IMM_CMD_IP_V4_FILTER_INIT,
+	IPA_IMM_CMD_IP_V6_FILTER_INIT,
+	IPA_IMM_CMD_IP_V4_NAT_INIT,
+	IPA_IMM_CMD_IP_V4_ROUTING_INIT,
+	IPA_IMM_CMD_IP_V6_ROUTING_INIT,
+	IPA_IMM_CMD_HDR_INIT_LOCAL,
+	IPA_IMM_CMD_HDR_INIT_SYSTEM,
+	IPA_IMM_CMD_REGISTER_WRITE,
+	IPA_IMM_CMD_NAT_DMA,
+	IPA_IMM_CMD_IP_PACKET_INIT,
+	IPA_IMM_CMD_DMA_SHARED_MEM,
+	IPA_IMM_CMD_IP_PACKET_TAG_STATUS,
+	IPA_IMM_CMD_DMA_TASK_32B_ADDR,
+	IPA_IMM_CMD_TABLE_DMA,
+	IPA_IMM_CMD_IP_V6_CT_INIT,
+	IPA_IMM_CMD_MAX,
+};
+
+/* Immediate commands abstracted structures */
+
+/*
+ * struct ipahal_imm_cmd_ip_v4_filter_init - IP_V4_FILTER_INIT cmd payload
+ * Inits IPv4 filter block.
+ * @hash_rules_addr: Addr in sys mem where ipv4 hashable flt tbl starts
+ * @nhash_rules_addr: Addr in sys mem where ipv4 non-hashable flt tbl starts
+ * @hash_rules_size: Size in bytes of the hashable tbl to cpy to local mem
+ * @hash_local_addr: Addr in shared mem where ipv4 hashable flt tbl should
+ *  be copied to
+ * @nhash_rules_size: Size in bytes of the non-hashable tbl to cpy to local mem
+ * @nhash_local_addr: Addr in shared mem where ipv4 non-hashable flt tbl should
+ *  be copied to
+ */
+struct ipahal_imm_cmd_ip_v4_filter_init {
+	u64 hash_rules_addr;
+	u64 nhash_rules_addr;
+	u32 hash_rules_size;
+	u32 hash_local_addr;
+	u32 nhash_rules_size;
+	u32 nhash_local_addr;
+};
+
+/*
+ * struct ipahal_imm_cmd_ip_v6_filter_init - IP_V6_FILTER_INIT cmd payload
+ * Inits IPv6 filter block.
+ * @hash_rules_addr: Addr in sys mem where ipv6 hashable flt tbl starts
+ * @nhash_rules_addr: Addr in sys mem where ipv6 non-hashable flt tbl starts
+ * @hash_rules_size: Size in bytes of the hashable tbl to cpy to local mem
+ * @hash_local_addr: Addr in shared mem where ipv6 hashable flt tbl should
+ *  be copied to
+ * @nhash_rules_size: Size in bytes of the non-hashable tbl to cpy to local mem
+ * @nhash_local_addr: Addr in shared mem where ipv6 non-hashable flt tbl should
+ *  be copied to
+ */
+struct ipahal_imm_cmd_ip_v6_filter_init {
+	u64 hash_rules_addr;
+	u64 nhash_rules_addr;
+	u32 hash_rules_size;
+	u32 hash_local_addr;
+	u32 nhash_rules_size;
+	u32 nhash_local_addr;
+};
+
+/*
+ * struct ipahal_imm_cmd_nat_ipv6ct_init_common - NAT/IPv6CT table init command
+ *                                                common part
+ * @base_table_addr: Address in sys/shared mem where base table start
+ * @expansion_table_addr: Address in sys/shared mem where expansion table
+ *  starts. Entries that result in hash collision are located in this table.
+ * @base_table_addr_shared: base_table_addr in shared mem (if not, then sys)
+ * @expansion_table_addr_shared: expansion_rules_addr in
+ *  shared mem (if not, then sys)
+ * @size_base_table: Num of entries in the base table
+ * @size_expansion_table: Num of entries in the expansion table
+ * @table_index: For future support of multiple tables
+ */
+struct ipahal_imm_cmd_nat_ipv6ct_init_common {
+	u64 base_table_addr;
+	u64 expansion_table_addr;
+	bool base_table_addr_shared;
+	bool expansion_table_addr_shared;
+	u16 size_base_table;
+	u16 size_expansion_table;
+	u8 table_index;
+};
+
+/*
+ * struct ipahal_imm_cmd_ip_v4_nat_init - IP_V4_NAT_INIT cmd payload
+ * Inits IPv4 NAT block. Initiate NAT table with it dimensions, location
+ *  cache address and other related parameters.
+ * @table_init: table initialization parameters
+ * @index_table_addr: Addr in sys/shared mem where index table, which points
+ *  to NAT table starts
+ * @index_table_expansion_addr: Addr in sys/shared mem where expansion index
+ *  table starts
+ * @index_table_addr_shared: index_table_addr in shared mem (if not, then sys)
+ * @index_table_expansion_addr_shared: index_table_expansion_addr in
+ *  shared mem (if not, then sys)
+ * @public_addr_info: Public IP addresses info suitable to the IPA H/W version
+ *                    IPA H/W >= 4.0 - PDN config table offset in SMEM
+ *                    IPA H/W < 4.0  - The public IP address
+ */
+struct ipahal_imm_cmd_ip_v4_nat_init {
+	struct ipahal_imm_cmd_nat_ipv6ct_init_common table_init;
+	u64 index_table_addr;
+	u64 index_table_expansion_addr;
+	bool index_table_addr_shared;
+	bool index_table_expansion_addr_shared;
+	u32 public_addr_info;
+};
+
+/*
+ * struct ipahal_imm_cmd_ip_v6_ct_init - IP_V6_CONN_TRACK_INIT cmd payload
+ * Inits IPv6CT block. Initiate IPv6CT table with it dimensions, location
+ *  cache address and other related parameters.
+ * @table_init: table initialization parameters
+ */
+struct ipahal_imm_cmd_ip_v6_ct_init {
+	struct ipahal_imm_cmd_nat_ipv6ct_init_common table_init;
+};
+
+/*
+ * struct ipahal_imm_cmd_ip_v4_routing_init - IP_V4_ROUTING_INIT cmd payload
+ * Inits IPv4 routing table/structure - with the rules and other related params
+ * @hash_rules_addr: Addr in sys mem where ipv4 hashable rt tbl starts
+ * @nhash_rules_addr: Addr in sys mem where ipv4 non-hashable rt tbl starts
+ * @hash_rules_size: Size in bytes of the hashable tbl to cpy to local mem
+ * @hash_local_addr: Addr in shared mem where ipv4 hashable rt tbl should
+ *  be copied to
+ * @nhash_rules_size: Size in bytes of the non-hashable tbl to cpy to local mem
+ * @nhash_local_addr: Addr in shared mem where ipv4 non-hashable rt tbl should
+ *  be copied to
+ */
+struct ipahal_imm_cmd_ip_v4_routing_init {
+	u64 hash_rules_addr;
+	u64 nhash_rules_addr;
+	u32 hash_rules_size;
+	u32 hash_local_addr;
+	u32 nhash_rules_size;
+	u32 nhash_local_addr;
+};
+
+/*
+ * struct ipahal_imm_cmd_ip_v6_routing_init - IP_V6_ROUTING_INIT cmd payload
+ * Inits IPv6 routing table/structure - with the rules and other related params
+ * @hash_rules_addr: Addr in sys mem where ipv6 hashable rt tbl starts
+ * @nhash_rules_addr: Addr in sys mem where ipv6 non-hashable rt tbl starts
+ * @hash_rules_size: Size in bytes of the hashable tbl to cpy to local mem
+ * @hash_local_addr: Addr in shared mem where ipv6 hashable rt tbl should
+ *  be copied to
+ * @nhash_rules_size: Size in bytes of the non-hashable tbl to cpy to local mem
+ * @nhash_local_addr: Addr in shared mem where ipv6 non-hashable rt tbl should
+ *  be copied to
+ */
+struct ipahal_imm_cmd_ip_v6_routing_init {
+	u64 hash_rules_addr;
+	u64 nhash_rules_addr;
+	u32 hash_rules_size;
+	u32 hash_local_addr;
+	u32 nhash_rules_size;
+	u32 nhash_local_addr;
+};
+
+/*
+ * struct ipahal_imm_cmd_hdr_init_local - HDR_INIT_LOCAL cmd payload
+ * Inits hdr table within local mem with the hdrs and their length.
+ * @hdr_table_addr: Word address in sys mem where the table starts (SRC)
+ * @size_hdr_table: Size of the above (in bytes)
+ * @hdr_addr: header address in IPA sram (used as DST for memory copy)
+ * @rsvd: reserved
+ */
+struct ipahal_imm_cmd_hdr_init_local {
+	u64 hdr_table_addr;
+	u32 size_hdr_table;
+	u32 hdr_addr;
+};
+
+/*
+ * struct ipahal_imm_cmd_hdr_init_system - HDR_INIT_SYSTEM cmd payload
+ * Inits hdr table within sys mem with the hdrs and their length.
+ * @hdr_table_addr: Word address in system memory where the hdrs tbl starts.
+ */
+struct ipahal_imm_cmd_hdr_init_system {
+	u64 hdr_table_addr;
+};
+
+/*
+ * struct ipahal_imm_cmd_table_dma - TABLE_DMA cmd payload
+ * Perform DMA operation on NAT and IPV6 connection tracking related mem
+ * addresses. Copy data into different locations within IPv6CT and NAT
+ * associated tbls. (For add/remove NAT rules)
+ * @offset: offset in bytes from base addr to write 'data' to
+ * @data: data to be written
+ * @table_index: NAT tbl index. Defines the tbl on which to perform DMA op.
+ * @base_addr: Base addr to which the DMA operation should be performed.
+ */
+struct ipahal_imm_cmd_table_dma {
+	u32 offset;
+	u16 data;
+	u8 table_index;
+	u8 base_addr;
+};
+
+/*
+ * struct ipahal_imm_cmd_ip_packet_init - IP_PACKET_INIT cmd payload
+ * Configuration for specific IP pkt. Shall be called prior to an IP pkt
+ *  data. Pkt will not go through IP pkt processing.
+ * @destination_pipe_index: Destination pipe index  (in case routing
+ *  is enabled, this field will overwrite the rt  rule)
+ */
+struct ipahal_imm_cmd_ip_packet_init {
+	u32 destination_pipe_index;
+};
+
+/*
+ * enum ipa_pipeline_clear_option - Values for pipeline clear waiting options
+ * @IPAHAL_HPS_CLEAR: Wait for HPS clear. All queues except high priority queue
+ *  shall not be serviced until HPS is clear of packets or immediate commands.
+ *  The high priority Rx queue / Q6ZIP group shall still be serviced normally.
+ *
+ * @IPAHAL_SRC_GRP_CLEAR: Wait for originating source group to be clear
+ *  (for no packet contexts allocated to the originating source group).
+ *  The source group / Rx queue shall not be serviced until all previously
+ *  allocated packet contexts are released. All other source groups/queues shall
+ *  be serviced normally.
+ *
+ * @IPAHAL_FULL_PIPELINE_CLEAR: Wait for full pipeline to be clear.
+ *  All groups / Rx queues shall not be serviced until IPA pipeline is fully
+ *  clear. This should be used for debug only.
+ */
+enum ipahal_pipeline_clear_option {
+	IPAHAL_HPS_CLEAR,
+	IPAHAL_SRC_GRP_CLEAR,
+	IPAHAL_FULL_PIPELINE_CLEAR
+};
+
+/*
+ * struct ipahal_imm_cmd_register_write - REGISTER_WRITE cmd payload
+ * Write value to register. Allows reg changes to be synced with data packet
+ *  and other immediate commands. Can be used to access the sram
+ * @offset: offset from IPA base address - Lower 16bit of the IPA reg addr
+ * @value: value to write to register
+ * @value_mask: mask specifying which value bits to write to the register
+ * @skip_pipeline_clear: if to skip pipeline clear waiting (don't wait)
+ * @pipeline_clear_option: options for pipeline clear waiting
+ */
+struct ipahal_imm_cmd_register_write {
+	u32 offset;
+	u32 value;
+	u32 value_mask;
+	bool skip_pipeline_clear;
+	enum ipahal_pipeline_clear_option pipeline_clear_options;
+};
+
+/*
+ * struct ipahal_imm_cmd_dma_shared_mem - DMA_SHARED_MEM cmd payload
+ * Perform mem copy into or out of the SW area of IPA local mem
+ * @system_addr: Address in system memory
+ * @size: Size in bytes of data to copy. Expected size is up to 2K bytes
+ * @local_addr: Address in IPA local memory
+ * @clear_after_read: Clear local memory at the end of a read operation allows
+ *  atomic read and clear if HPS is clear. Ignore for writes.
+ * @is_read: Read operation from local memory? If not, then write.
+ * @skip_pipeline_clear: if to skip pipeline clear waiting (don't wait)
+ * @pipeline_clear_option: options for pipeline clear waiting
+ */
+struct ipahal_imm_cmd_dma_shared_mem {
+	u64 system_addr;
+	u32 size;
+	u32 local_addr;
+	bool clear_after_read;
+	bool is_read;
+	bool skip_pipeline_clear;
+	enum ipahal_pipeline_clear_option pipeline_clear_options;
+};
+
+/*
+ * struct ipahal_imm_cmd_ip_packet_tag_status - IP_PACKET_TAG_STATUS cmd payload
+ * This cmd is used for to allow SW to track HW processing by setting a TAG
+ *  value that is passed back to SW inside Packet Status information.
+ *  TAG info will be provided as part of Packet Status info generated for
+ *  the next pkt transferred over the pipe.
+ *  This immediate command must be followed by a packet in the same transfer.
+ * @tag: Tag that is provided back to SW
+ */
+struct ipahal_imm_cmd_ip_packet_tag_status {
+	u64 tag;
+};
+
+/*
+ * struct ipahal_imm_cmd_dma_task_32b_addr - IPA_DMA_TASK_32B_ADDR cmd payload
+ * Used by clients using 32bit addresses. Used to perform DMA operation on
+ *  multiple descriptors.
+ *  The Opcode is dynamic, where it holds the number of buffer to process
+ * @cmplt: Complete flag: If true, IPA interrupt SW when the entire
+ *  DMA related data was completely xfered to its destination.
+ * @eof: Enf Of Frame flag: If true, IPA assert the EOT to the
+ *  dest client. This is used used for aggr sequence
+ * @flsh: Flush flag: If true pkt will go through the IPA blocks but
+ *  will not be xfered to dest client but rather will be discarded
+ * @lock: Lock pipe flag: If true, IPA will stop processing descriptors
+ *  from other EPs in the same src grp (RX queue)
+ * @unlock: Unlock pipe flag: If true, IPA will stop exclusively
+ *  servicing current EP out of the src EPs of the grp (RX queue)
+ * @size1: Size of buffer1 data
+ * @addr1: Pointer to buffer1 data
+ * @packet_size: Total packet size. If a pkt send using multiple DMA_TASKs,
+ *  only the first one needs to have this field set. It will be ignored
+ *  in subsequent DMA_TASKs until the packet ends (EOT). First DMA_TASK
+ *  must contain this field (2 or more buffers) or EOT.
+ */
+struct ipahal_imm_cmd_dma_task_32b_addr {
+	bool cmplt;
+	bool eof;
+	bool flsh;
+	bool lock;
+	bool unlock;
+	u32 size1;
+	u32 addr1;
+	u32 packet_size;
+};
+
+/*
+ * struct ipahal_imm_cmd_pyld - Immediate cmd payload information
+ * @len: length of the buffer
+ * @opcode: opcode of the immediate command
+ * @data: buffer contains the immediate command payload. Buffer goes
+ *  back to back with this structure
+ */
+struct ipahal_imm_cmd_pyld {
+	u16 len;
+	u16 opcode;
+	u8 data[0];
+};
+
+
+/* Immediate command Function APIs */
+
+/*
+ * ipahal_imm_cmd_name_str() - returns string that represent the imm cmd
+ * @cmd_name: [in] Immediate command name
+ */
+const char *ipahal_imm_cmd_name_str(enum ipahal_imm_cmd_name cmd_name);
+
+/*
+ * ipahal_construct_imm_cmd() - Construct immdiate command
+ * This function builds imm cmd bulk that can be be sent to IPA
+ * The command will be allocated dynamically.
+ * After done using it, call ipahal_destroy_imm_cmd() to release it
+ */
+struct ipahal_imm_cmd_pyld *ipahal_construct_imm_cmd(
+	enum ipahal_imm_cmd_name cmd, const void *params, bool is_atomic_ctx);
+
+/*
+ * ipahal_construct_nop_imm_cmd() - Construct immediate comamnd for NO-Op
+ * Core driver may want functionality to inject NOP commands to IPA
+ *  to ensure e.g., PIPLINE clear before someother operation.
+ * The functionality given by this function can be reached by
+ *  ipahal_construct_imm_cmd(). This function is helper to the core driver
+ *  to reach this NOP functionlity easily.
+ * @skip_pipline_clear: if to skip pipeline clear waiting (don't wait)
+ * @pipline_clr_opt: options for pipeline clear waiting
+ * @is_atomic_ctx: is called in atomic context or can sleep?
+ */
+struct ipahal_imm_cmd_pyld *ipahal_construct_nop_imm_cmd(
+	bool skip_pipline_clear,
+	enum ipahal_pipeline_clear_option pipline_clr_opt,
+	bool is_atomic_ctx);
+
+/*
+ * ipahal_destroy_imm_cmd() - Destroy/Release bulk that was built
+ *  by the construction functions
+ */
+static inline void ipahal_destroy_imm_cmd(struct ipahal_imm_cmd_pyld *pyld)
+{
+	kfree(pyld);
+}
+
+
+/* IPA Status packet Structures and Function APIs */
+
+/*
+ * enum ipahal_pkt_status_opcode - Packet Status Opcode
+ * @IPAHAL_STATUS_OPCODE_PACKET_2ND_PASS: Packet Status generated as part of
+ *  IPA second processing pass for a packet (i.e. IPA XLAT processing for
+ *  the translated packet).
+ */
+enum ipahal_pkt_status_opcode {
+	IPAHAL_PKT_STATUS_OPCODE_PACKET = 0,
+	IPAHAL_PKT_STATUS_OPCODE_NEW_FRAG_RULE,
+	IPAHAL_PKT_STATUS_OPCODE_DROPPED_PACKET,
+	IPAHAL_PKT_STATUS_OPCODE_SUSPENDED_PACKET,
+	IPAHAL_PKT_STATUS_OPCODE_LOG,
+	IPAHAL_PKT_STATUS_OPCODE_DCMP,
+	IPAHAL_PKT_STATUS_OPCODE_PACKET_2ND_PASS,
+};
+
+/*
+ * enum ipahal_pkt_status_exception - Packet Status exception type
+ * @IPAHAL_PKT_STATUS_EXCEPTION_PACKET_LENGTH: formerly IHL exception.
+ *
+ * Note: IPTYPE, PACKET_LENGTH and PACKET_THRESHOLD exceptions means that
+ *  partial / no IP processing took place and corresponding Status Mask
+ *  fields should be ignored. Flt and rt info is not valid.
+ *
+ * NOTE:: Any change to this enum, need to change to
+ *	ipahal_pkt_status_exception_to_str array as well.
+ */
+enum ipahal_pkt_status_exception {
+	IPAHAL_PKT_STATUS_EXCEPTION_NONE = 0,
+	IPAHAL_PKT_STATUS_EXCEPTION_DEAGGR,
+	IPAHAL_PKT_STATUS_EXCEPTION_IPTYPE,
+	IPAHAL_PKT_STATUS_EXCEPTION_PACKET_LENGTH,
+	IPAHAL_PKT_STATUS_EXCEPTION_PACKET_THRESHOLD,
+	IPAHAL_PKT_STATUS_EXCEPTION_FRAG_RULE_MISS,
+	IPAHAL_PKT_STATUS_EXCEPTION_SW_FILT,
+	/*
+	 * NAT and IPv6CT have the same value at HW.
+	 * NAT for IPv4 and IPv6CT for IPv6 exceptions
+	 */
+	IPAHAL_PKT_STATUS_EXCEPTION_NAT,
+	IPAHAL_PKT_STATUS_EXCEPTION_IPV6CT,
+	IPAHAL_PKT_STATUS_EXCEPTION_CSUM,
+	IPAHAL_PKT_STATUS_EXCEPTION_MAX,
+};
+
+/*
+ * enum ipahal_pkt_status_mask - Packet Status bitmask shift values of
+ *  the contained flags. This bitmask indicates flags on the properties of
+ *  the packet as well as IPA processing it may had.
+ * @FRAG_PROCESS: Frag block processing flag: Was pkt processed by frag block?
+ *  Also means the frag info is valid unless exception or first frag
+ * @FILT_PROCESS: Flt block processing flag: Was pkt processed by flt block?
+ *  Also means that flt info is valid.
+ * @NAT_PROCESS: NAT block processing flag: Was pkt processed by NAT block?
+ *  Also means that NAT info is valid, unless exception.
+ * @ROUTE_PROCESS: Rt block processing flag: Was pkt processed by rt block?
+ *  Also means that rt info is valid, unless exception.
+ * @TAG_VALID: Flag specifying if TAG and TAG info valid?
+ * @FRAGMENT: Flag specifying if pkt is IP fragment.
+ * @FIRST_FRAGMENT: Flag specifying if pkt is first fragment. In this case, frag
+ *  info is invalid
+ * @V4: Flag specifying pkt is IPv4 or IPv6
+ * @CKSUM_PROCESS: CSUM block processing flag: Was pkt processed by csum block?
+ *  If so, csum trailer exists
+ * @AGGR_PROCESS: Aggr block processing flag: Was pkt processed by aggr block?
+ * @DEST_EOT: Flag specifying if EOT was asserted for the pkt on dest endp
+ * @DEAGGR_PROCESS: Deaggr block processing flag: Was pkt processed by deaggr
+ *  block?
+ * @DEAGG_FIRST: Flag specifying if this is the first pkt in deaggr frame
+ * @SRC_EOT: Flag specifying if EOT asserted by src endp when sending the buffer
+ * @PREV_EOT: Flag specifying if EOT was sent just before the pkt as part of
+ *  aggr hard-byte-limit
+ * @BYTE_LIMIT: Flag specifying if pkt is over a configured byte limit.
+ */
+enum ipahal_pkt_status_mask {
+	IPAHAL_PKT_STATUS_MASK_FRAG_PROCESS_SHFT = 0,
+	IPAHAL_PKT_STATUS_MASK_FILT_PROCESS_SHFT,
+	IPAHAL_PKT_STATUS_MASK_NAT_PROCESS_SHFT,
+	IPAHAL_PKT_STATUS_MASK_ROUTE_PROCESS_SHFT,
+	IPAHAL_PKT_STATUS_MASK_TAG_VALID_SHFT,
+	IPAHAL_PKT_STATUS_MASK_FRAGMENT_SHFT,
+	IPAHAL_PKT_STATUS_MASK_FIRST_FRAGMENT_SHFT,
+	IPAHAL_PKT_STATUS_MASK_V4_SHFT,
+	IPAHAL_PKT_STATUS_MASK_CKSUM_PROCESS_SHFT,
+	IPAHAL_PKT_STATUS_MASK_AGGR_PROCESS_SHFT,
+	IPAHAL_PKT_STATUS_MASK_DEST_EOT_SHFT,
+	IPAHAL_PKT_STATUS_MASK_DEAGGR_PROCESS_SHFT,
+	IPAHAL_PKT_STATUS_MASK_DEAGG_FIRST_SHFT,
+	IPAHAL_PKT_STATUS_MASK_SRC_EOT_SHFT,
+	IPAHAL_PKT_STATUS_MASK_PREV_EOT_SHFT,
+	IPAHAL_PKT_STATUS_MASK_BYTE_LIMIT_SHFT,
+};
+
+/*
+ * Returns boolean value representing a property of the a packet.
+ * @__flag_shft: The shift value of the flag of the status bitmask of
+ * @__status: Pointer to abstracrted status structure
+ *  the needed property. See enum ipahal_pkt_status_mask
+ */
+#define IPAHAL_PKT_STATUS_MASK_FLAG_VAL(__flag_shft, __status) \
+	(((__status)->status_mask) & ((u32)0x1<<(__flag_shft)) ? true : false)
+
+/*
+ * enum ipahal_pkt_status_nat_type - Type of NAT
+ */
+enum ipahal_pkt_status_nat_type {
+	IPAHAL_PKT_STATUS_NAT_NONE,
+	IPAHAL_PKT_STATUS_NAT_SRC,
+	IPAHAL_PKT_STATUS_NAT_DST,
+};
+
+/*
+ * struct ipahal_pkt_status - IPA status packet abstracted payload.
+ *  This structure describes the status packet fields for the
+ *   following statuses: IPA_STATUS_PACKET, IPA_STATUS_DROPPED_PACKET,
+ *   IPA_STATUS_SUSPENDED_PACKET.
+ *  Other statuses types has different status packet structure.
+ * @tag_info: S/W defined value provided via immediate command
+ * @status_opcode: The Type of the status (Opcode).
+ * @exception: The first exception that took place.
+ *  In case of exception, src endp and pkt len are always valid.
+ * @status_mask: Bit mask for flags on several properties on the packet
+ *  and processing it may passed at IPA. See enum ipahal_pkt_status_mask
+ * @pkt_len: Pkt pyld len including hdr and retained hdr if used. Does
+ *  not include padding or checksum trailer len.
+ * @metadata: meta data value used by packet
+ * @flt_local: Filter table location flag: Does matching flt rule belongs to
+ *  flt tbl that resides in lcl memory? (if not, then system mem)
+ * @flt_hash: Filter hash hit flag: Does matching flt rule was in hash tbl?
+ * @flt_global: Global filter rule flag: Does matching flt rule belongs to
+ *  the global flt tbl? (if not, then the per endp tables)
+ * @flt_ret_hdr: Retain header in filter rule flag: Does matching flt rule
+ *  specifies to retain header?
+ *  Starting IPA4.5, this will be true only if packet has L2 header.
+ * @flt_miss: Filtering miss flag: Was their a filtering rule miss?
+ *   In case of miss, all flt info to be ignored
+ * @rt_local: Route table location flag: Does matching rt rule belongs to
+ *  rt tbl that resides in lcl memory? (if not, then system mem)
+ * @rt_hash: Route hash hit flag: Does matching rt rule was in hash tbl?
+ * @ucp: UC Processing flag
+ * @rt_miss: Routing miss flag: Was their a routing rule miss?
+ * @nat_hit: NAT hit flag: Was their NAT hit?
+ * @nat_type: Defines the type of the NAT operation:
+ * @time_of_day_ctr: running counter from IPA clock
+ * @hdr_local: Header table location flag: In header insertion, was the header
+ *  taken from the table resides in local memory? (If no, then system mem)
+ * @frag_hit: Frag hit flag: Was their frag rule hit in H/W frag table?
+ * @flt_rule_id: The ID of the matching filter rule (if no miss).
+ *  This info can be combined with endp_src_idx to locate the exact rule.
+ * @rt_rule_id: The ID of the matching rt rule. (if no miss). This info
+ *  can be combined with rt_tbl_idx to locate the exact rule.
+ * @nat_entry_idx: Index of the NAT entry used of NAT processing
+ * @hdr_offset: Offset of used header in the header table
+ * @endp_src_idx: Source end point index.
+ * @endp_dest_idx: Destination end point index.
+ *  Not valid in case of exception
+ * @rt_tbl_idx: Index of rt tbl that contains the rule on which was a match
+ * @seq_num: Per source endp unique packet sequence number
+ * @frag_rule: Frag rule index in H/W frag table in case of frag hit
+ * @frag_rule_idx: Frag rule index value.
+ * @tbl_idx: Table index valid or not.
+ * @src_ip_addr: Source packet IP address.
+ * @dest_ip_addr: Destination packet IP address.
+ * @protocol: Protocal number.
+ * @ip_id: IP packet IP ID number.
+ * @tlated_ip_addr: IP address.
+ * @ip_cksum_diff: IP packet checksum difference.
+ */
+struct ipahal_pkt_status {
+	u64 tag_info;
+	enum ipahal_pkt_status_opcode status_opcode;
+	enum ipahal_pkt_status_exception exception;
+	u32 status_mask;
+	u32 pkt_len;
+	u32 metadata;
+	bool flt_local;
+	bool flt_hash;
+	bool flt_global;
+	bool flt_ret_hdr;
+	bool flt_miss;
+	bool rt_local;
+	bool rt_hash;
+	bool ucp;
+	bool rt_miss;
+	bool nat_hit;
+	enum ipahal_pkt_status_nat_type nat_type;
+	u32 time_of_day_ctr;
+	bool hdr_local;
+	bool frag_hit;
+	u16 flt_rule_id;
+	u16 rt_rule_id;
+	u16 nat_entry_idx;
+	u16 hdr_offset;
+	u8 endp_src_idx;
+	u8 endp_dest_idx;
+	u8 rt_tbl_idx;
+	u8 seq_num;
+	u8 frag_rule;
+	u8 frag_rule_idx;
+	bool tbl_idx;
+	u32 src_ip_addr;
+	u32 dest_ip_addr;
+	u8 protocol;
+	u16 ip_id;
+	u32 tlated_ip_addr;
+	u16 ip_cksum_diff;
+
+};
+
+/*
+ * struct ipahal_pkt_status_thin - this struct is used to parse only
+ *  a few fields from the status packet, needed for LAN optimization.
+ * @exception: The first exception that took place.
+ * @metadata: meta data value used by packet
+ * @endp_src_idx: Source end point index.
+ * @ucp: UC Processing flag
+ */
+struct ipahal_pkt_status_thin {
+	enum ipahal_pkt_status_exception exception;
+	u32 metadata;
+	u8 endp_src_idx;
+	bool ucp;
+};
+
+/*
+ * ipahal_pkt_status_get_size() - Get H/W size of packet status
+ */
+u32 ipahal_pkt_status_get_size(void);
+
+/*
+ * ipahal_pkt_status_parse() - Parse Packet Status payload to abstracted form
+ * @unparsed_status: Pointer to H/W format of the packet status as read from H/W
+ * @status: Pointer to pre-allocated buffer where the parsed info will be stored
+ */
+void ipahal_pkt_status_parse(const void *unparsed_status,
+	struct ipahal_pkt_status *status);
+
+/*
+ * ipahal_pkt_status_parse_thin() - Parse some of the packet status fields
+ * for specific usage in the LAN rx data path where parsing needs to be done
+ * but only for specific fields.
+ * @unparsed_status: Pointer to H/W format of the packet status as read from HW
+ * @status: Pointer to pre-allocated buffer where the parsed info will be
+ * stored
+ */
+void ipahal_pkt_status_parse_thin(const void *unparsed_status,
+	struct ipahal_pkt_status_thin *status);
+
+/*
+ * ipahal_pkt_status_exception_str() - returns string represents exception type
+ * @exception: [in] The exception type
+ */
+const char *ipahal_pkt_status_exception_str(
+	enum ipahal_pkt_status_exception exception);
+
+/*
+ * ipahal_cp_hdr_to_hw_buff() - copy header to hardware buffer according to
+ * base address and offset given.
+ * @base: dma base address
+ * @offset: offset from base address where the data will be copied
+ * @hdr: the header to be copied
+ * @hdr_len: the length of the header
+ */
+void ipahal_cp_hdr_to_hw_buff(void *base, u32 offset, u8 *hdr, u32 hdr_len);
+
+/*
+ * ipahal_cp_proc_ctx_to_hw_buff() - copy processing context to
+ * base address and offset given.
+ * @type: type of header processing context
+ * @base: dma base address
+ * @offset: offset from base address where the data will be copied
+ * @hdr_len: the length of the header
+ * @is_hdr_proc_ctx: header is located in phys_base (true) or hdr_base_addr
+ * @phys_base: memory location in DDR
+ * @hdr_base_addr: base address in table
+ * @offset_entry: offset from hdr_base_addr in table
+ * @l2tp_params: l2tp parameters
+ * @generic_params: generic proc_ctx params
+ * @is_64: Indicates whether header base address/dma base address is 64 bit.
+ */
+int ipahal_cp_proc_ctx_to_hw_buff(enum ipa_hdr_proc_type type,
+		void *base, u32 offset, u32 hdr_len,
+		bool is_hdr_proc_ctx, dma_addr_t phys_base,
+		u64 hdr_base_addr,
+		struct ipa_hdr_offset_entry *offset_entry,
+		struct ipa_l2tp_hdr_proc_ctx_params *l2tp_params,
+		struct ipa_eth_II_to_eth_II_ex_procparams *generic_params,
+		bool is_64);
+
+/*
+ * ipahal_get_proc_ctx_needed_len() - calculates the needed length for addition
+ * of header processing context according to the type of processing context
+ * @type: header processing context type (no processing context,
+ *	IPA_HDR_PROC_ETHII_TO_ETHII etc.)
+ */
+int ipahal_get_proc_ctx_needed_len(enum ipa_hdr_proc_type type);
+
+int ipahal_init(enum ipa_hw_type ipa_hw_type, void __iomem *base,
+	struct device *ipa_pdev);
+void ipahal_destroy(void);
+void ipahal_free_dma_mem(struct ipa_mem_buffer *mem);
+
+#endif /* _IPAHAL_H_ */

+ 4366 - 0
ipa/ipa_v3/ipahal/ipahal_fltrt.c

@@ -0,0 +1,4366 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2012-2019, The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/ipa.h>
+#include <linux/errno.h>
+#include <linux/ipc_logging.h>
+#include <linux/debugfs.h>
+#include "ipahal.h"
+#include "ipahal_fltrt.h"
+#include "ipahal_fltrt_i.h"
+#include "ipahal_i.h"
+#include "../../ipa_common_i.h"
+
+#define IPA_MAC_FLT_BITS (IPA_FLT_MAC_DST_ADDR_ETHER_II | \
+		IPA_FLT_MAC_SRC_ADDR_ETHER_II | IPA_FLT_MAC_DST_ADDR_802_3 | \
+		IPA_FLT_MAC_SRC_ADDR_802_3 | IPA_FLT_MAC_DST_ADDR_802_1Q | \
+		IPA_FLT_MAC_SRC_ADDR_802_1Q)
+
+/*
+ * struct ipahal_fltrt_obj - Flt/Rt H/W information for specific IPA version
+ * @support_hash: Is hashable tables supported
+ * @tbl_width: Width of table in bytes
+ * @sysaddr_alignment: System table address alignment
+ * @lcladdr_alignment: Local table offset alignment
+ * @blk_sz_alignment: Rules block size alignment
+ * @rule_start_alignment: Rule start address alignment
+ * @tbl_hdr_width: Width of the header structure in bytes
+ * @tbl_addr_mask: Masking for Table address
+ * @rule_max_prio: Max possible priority of a rule
+ * @rule_min_prio: Min possible priority of a rule
+ * @low_rule_id: Low value of Rule ID that can be used
+ * @rule_id_bit_len: Rule is high (MSB) bit len
+ * @rule_buf_size: Max size rule may utilize.
+ * @write_val_to_hdr: Write address or offset to header entry
+ * @create_flt_bitmap: Create bitmap in H/W format using given bitmap
+ * @create_tbl_addr: Given raw table address, create H/W formated one
+ * @parse_tbl_addr: Parse the given H/W address (hdr format)
+ * @rt_generate_hw_rule: Generate RT rule in H/W format
+ * @flt_generate_hw_rule: Generate FLT rule in H/W format
+ * @flt_generate_eq: Generate flt equation attributes from rule attributes
+ * @rt_parse_hw_rule: Parse rt rule read from H/W
+ * @flt_parse_hw_rule: Parse flt rule read from H/W
+ * @eq_bitfield: Array of the bit fields of the support equations.
+ *	0xFF means the equation is not supported
+ */
+struct ipahal_fltrt_obj {
+	bool support_hash;
+	u32 tbl_width;
+	u32 sysaddr_alignment;
+	u32 lcladdr_alignment;
+	u32 blk_sz_alignment;
+	u32 rule_start_alignment;
+	u32 tbl_hdr_width;
+	u32 tbl_addr_mask;
+	int rule_max_prio;
+	int rule_min_prio;
+	u32 low_rule_id;
+	u32 rule_id_bit_len;
+	u32 rule_buf_size;
+	u8* (*write_val_to_hdr)(u64 val, u8 *hdr);
+	u64 (*create_flt_bitmap)(u64 ep_bitmap);
+	u64 (*create_tbl_addr)(bool is_sys, u64 addr);
+	void (*parse_tbl_addr)(u64 hwaddr, u64 *addr, bool *is_sys);
+	int (*rt_generate_hw_rule)(struct ipahal_rt_rule_gen_params *params,
+		u32 *hw_len, u8 *buf);
+	int (*flt_generate_hw_rule)(struct ipahal_flt_rule_gen_params *params,
+		u32 *hw_len, u8 *buf);
+	int (*flt_generate_eq)(enum ipa_ip_type ipt,
+		const struct ipa_rule_attrib *attrib,
+		struct ipa_ipfltri_rule_eq *eq_atrb);
+	int (*rt_parse_hw_rule)(u8 *addr, struct ipahal_rt_rule_entry *rule);
+	int (*flt_parse_hw_rule)(u8 *addr, struct ipahal_flt_rule_entry *rule);
+	u8 eq_bitfield[IPA_EQ_MAX];
+};
+
+
+static u64 ipa_fltrt_create_flt_bitmap(u64 ep_bitmap)
+{
+	/* At IPA3, there global configuration is possible but not used */
+	return (ep_bitmap << 1) & ~0x1;
+}
+
+static u64 ipa_fltrt_create_tbl_addr(bool is_sys, u64 addr)
+{
+	if (is_sys) {
+		if (addr & IPA3_0_HW_TBL_SYSADDR_ALIGNMENT) {
+			IPAHAL_ERR(
+				"sys addr is not aligned accordingly addr=0x%pad\n",
+				&addr);
+			ipa_assert();
+			return 0;
+		}
+	} else {
+		if (addr & IPA3_0_HW_TBL_LCLADDR_ALIGNMENT) {
+			IPAHAL_ERR("addr/ofst isn't lcl addr aligned %llu\n",
+				addr);
+			ipa_assert();
+			return 0;
+		}
+		/*
+		 * for local tables (at sram) offsets is used as tables
+		 * addresses. offset need to be in 8B units
+		 * (local address aligned) and left shifted to its place.
+		 * Local bit need to be enabled.
+		 */
+		addr /= IPA3_0_HW_TBL_LCLADDR_ALIGNMENT + 1;
+		addr *= IPA3_0_HW_TBL_ADDR_MASK + 1;
+		addr += 1;
+	}
+
+	return addr;
+}
+
+static void ipa_fltrt_parse_tbl_addr(u64 hwaddr, u64 *addr, bool *is_sys)
+{
+	IPAHAL_DBG_LOW("Parsing hwaddr 0x%llx\n", hwaddr);
+
+	*is_sys = !(hwaddr & 0x1);
+	hwaddr &= (~0ULL - 1);
+	if (hwaddr & IPA3_0_HW_TBL_SYSADDR_ALIGNMENT) {
+		IPAHAL_ERR(
+			"sys addr is not aligned accordingly addr=0x%pad\n",
+			&hwaddr);
+		ipa_assert();
+		return;
+	}
+
+	if (!*is_sys) {
+		hwaddr /= IPA3_0_HW_TBL_ADDR_MASK + 1;
+		hwaddr *= IPA3_0_HW_TBL_LCLADDR_ALIGNMENT + 1;
+	}
+
+	*addr = hwaddr;
+}
+
+/* Update these tables of the number of equations changes */
+static const int ipa3_0_ofst_meq32[] = { IPA_OFFSET_MEQ32_0,
+					IPA_OFFSET_MEQ32_1};
+static const int ipa3_0_ofst_meq128[] = { IPA_OFFSET_MEQ128_0,
+					IPA_OFFSET_MEQ128_1};
+static const int ipa3_0_ihl_ofst_rng16[] = { IPA_IHL_OFFSET_RANGE16_0,
+					IPA_IHL_OFFSET_RANGE16_1};
+static const int ipa3_0_ihl_ofst_meq32[] = { IPA_IHL_OFFSET_MEQ32_0,
+					IPA_IHL_OFFSET_MEQ32_1};
+
+static int ipa_fltrt_generate_hw_rule_bdy(enum ipa_ip_type ipt,
+	const struct ipa_rule_attrib *attrib, u8 **buf, u16 *en_rule);
+static int ipa_fltrt_generate_hw_rule_bdy_from_eq(
+		const struct ipa_ipfltri_rule_eq *attrib, u8 **buf);
+static int ipa_flt_generate_eq_ip4(enum ipa_ip_type ip,
+		const struct ipa_rule_attrib *attrib,
+		struct ipa_ipfltri_rule_eq *eq_atrb);
+static int ipa_flt_generate_eq_ip6(enum ipa_ip_type ip,
+		const struct ipa_rule_attrib *attrib,
+		struct ipa_ipfltri_rule_eq *eq_atrb);
+static int ipa_flt_generate_eq(enum ipa_ip_type ipt,
+		const struct ipa_rule_attrib *attrib,
+		struct ipa_ipfltri_rule_eq *eq_atrb);
+static int ipa_rt_parse_hw_rule(u8 *addr,
+		struct ipahal_rt_rule_entry *rule);
+static int ipa_rt_parse_hw_rule_ipav4_5(u8 *addr,
+		struct ipahal_rt_rule_entry *rule);
+static int ipa_flt_parse_hw_rule(u8 *addr,
+		struct ipahal_flt_rule_entry *rule);
+static int ipa_flt_parse_hw_rule_ipav4(u8 *addr,
+		struct ipahal_flt_rule_entry *rule);
+static int ipa_flt_parse_hw_rule_ipav4_5(u8 *addr,
+	struct ipahal_flt_rule_entry *rule);
+
+#define IPA_IS_RAN_OUT_OF_EQ(__eq_array, __eq_index) \
+	(ARRAY_SIZE(__eq_array) <= (__eq_index))
+
+#define IPA_GET_RULE_EQ_BIT_PTRN(__eq) \
+	(BIT(ipahal_fltrt_objs[ipahal_ctx->hw_type].eq_bitfield[(__eq)]))
+
+#define IPA_IS_RULE_EQ_VALID(__eq) \
+	(ipahal_fltrt_objs[ipahal_ctx->hw_type].eq_bitfield[(__eq)] != 0xFF)
+
+/*
+ * ipa_fltrt_rule_generation_err_check() - check basic validity on the rule
+ *  attribs before starting building it
+ *  checks if not not using ipv4 attribs on ipv6 and vice-versa
+ * @ip: IP address type
+ * @attrib: IPA rule attribute
+ *
+ * Return: 0 on success, -EPERM on failure
+ */
+static int ipa_fltrt_rule_generation_err_check(
+	enum ipa_ip_type ipt, const struct ipa_rule_attrib *attrib)
+{
+	if (ipt == IPA_IP_v4) {
+		if (attrib->attrib_mask & IPA_FLT_NEXT_HDR ||
+		    attrib->attrib_mask & IPA_FLT_TC ||
+		    attrib->attrib_mask & IPA_FLT_FLOW_LABEL) {
+			IPAHAL_ERR_RL("v6 attrib's specified for v4 rule\n");
+			return -EPERM;
+		}
+	} else if (ipt == IPA_IP_v6) {
+		if (attrib->attrib_mask & IPA_FLT_TOS ||
+		    attrib->attrib_mask & IPA_FLT_PROTOCOL) {
+			IPAHAL_ERR_RL("v4 attrib's specified for v6 rule\n");
+			return -EPERM;
+		}
+	} else {
+		IPAHAL_ERR_RL("unsupported ip %d\n", ipt);
+		return -EPERM;
+	}
+
+	return 0;
+}
+
+static int ipa_rt_gen_hw_rule(struct ipahal_rt_rule_gen_params *params,
+	u32 *hw_len, u8 *buf)
+{
+	struct ipa3_0_rt_rule_hw_hdr *rule_hdr;
+	u8 *start;
+	u16 en_rule = 0;
+
+	start = buf;
+	rule_hdr = (struct ipa3_0_rt_rule_hw_hdr *)buf;
+
+	ipa_assert_on(params->dst_pipe_idx & ~0x1F);
+	rule_hdr->u.hdr.pipe_dest_idx = params->dst_pipe_idx;
+	switch (params->hdr_type) {
+	case IPAHAL_RT_RULE_HDR_PROC_CTX:
+		rule_hdr->u.hdr.system = !params->hdr_lcl;
+		rule_hdr->u.hdr.proc_ctx = 1;
+		ipa_assert_on(params->hdr_ofst & 31);
+		rule_hdr->u.hdr.hdr_offset = (params->hdr_ofst) >> 5;
+		break;
+	case IPAHAL_RT_RULE_HDR_RAW:
+		rule_hdr->u.hdr.system = !params->hdr_lcl;
+		rule_hdr->u.hdr.proc_ctx = 0;
+		ipa_assert_on(params->hdr_ofst & 3);
+		rule_hdr->u.hdr.hdr_offset = (params->hdr_ofst) >> 2;
+		break;
+	case IPAHAL_RT_RULE_HDR_NONE:
+		rule_hdr->u.hdr.system = !params->hdr_lcl;
+		rule_hdr->u.hdr.proc_ctx = 0;
+		rule_hdr->u.hdr.hdr_offset = 0;
+		break;
+	default:
+		IPAHAL_ERR("Invalid HDR type %d\n", params->hdr_type);
+		WARN_ON_RATELIMIT_IPA(1);
+		return -EINVAL;
+	}
+
+	ipa_assert_on(params->priority & ~0x3FF);
+	rule_hdr->u.hdr.priority = params->priority;
+	rule_hdr->u.hdr.retain_hdr = params->rule->retain_hdr ? 0x1 : 0x0;
+	ipa_assert_on(params->id & ~((1 << IPA3_0_RULE_ID_BIT_LEN) - 1));
+	ipa_assert_on(params->id == ((1 << IPA3_0_RULE_ID_BIT_LEN) - 1));
+	rule_hdr->u.hdr.rule_id = params->id;
+
+	buf += sizeof(struct ipa3_0_rt_rule_hw_hdr);
+
+	if (ipa_fltrt_generate_hw_rule_bdy(params->ipt, &params->rule->attrib,
+		&buf, &en_rule)) {
+		IPAHAL_ERR("fail to generate hw rule\n");
+		return -EPERM;
+	}
+	rule_hdr->u.hdr.en_rule = en_rule;
+
+	IPAHAL_DBG_LOW("en_rule 0x%x\n", en_rule);
+	ipa_write_64(rule_hdr->u.word, (u8 *)rule_hdr);
+
+	if (*hw_len == 0) {
+		*hw_len = buf - start;
+	} else if (*hw_len != (buf - start)) {
+		IPAHAL_ERR("hw_len differs b/w passed=0x%x calc=%td\n",
+			*hw_len, (buf - start));
+		return -EPERM;
+	}
+
+	return 0;
+}
+
+static int ipa_rt_gen_hw_rule_ipav4_5(struct ipahal_rt_rule_gen_params *params,
+	u32 *hw_len, u8 *buf)
+{
+	struct ipa4_5_rt_rule_hw_hdr *rule_hdr;
+	u8 *start;
+	u16 en_rule = 0;
+
+	start = buf;
+	rule_hdr = (struct ipa4_5_rt_rule_hw_hdr *)buf;
+
+	ipa_assert_on(params->dst_pipe_idx & ~0x1F);
+	rule_hdr->u.hdr.pipe_dest_idx = params->dst_pipe_idx;
+	switch (params->hdr_type) {
+	case IPAHAL_RT_RULE_HDR_PROC_CTX:
+		rule_hdr->u.hdr.system = !params->hdr_lcl;
+		rule_hdr->u.hdr.proc_ctx = 1;
+		ipa_assert_on(params->hdr_ofst & 31);
+		rule_hdr->u.hdr.hdr_offset = (params->hdr_ofst) >> 5;
+		break;
+	case IPAHAL_RT_RULE_HDR_RAW:
+		rule_hdr->u.hdr.system = !params->hdr_lcl;
+		rule_hdr->u.hdr.proc_ctx = 0;
+		ipa_assert_on(params->hdr_ofst & 3);
+		rule_hdr->u.hdr.hdr_offset = (params->hdr_ofst) >> 2;
+		break;
+	case IPAHAL_RT_RULE_HDR_NONE:
+		rule_hdr->u.hdr.system = !params->hdr_lcl;
+		rule_hdr->u.hdr.proc_ctx = 0;
+		rule_hdr->u.hdr.hdr_offset = 0;
+		break;
+	default:
+		IPAHAL_ERR("Invalid HDR type %d\n", params->hdr_type);
+		WARN_ON_RATELIMIT_IPA(1);
+		return -EINVAL;
+	}
+
+	ipa_assert_on(params->priority & ~0x3FF);
+	rule_hdr->u.hdr.priority = params->priority;
+	rule_hdr->u.hdr.retain_hdr = params->rule->retain_hdr ? 0x1 : 0x0;
+	ipa_assert_on(params->id & ~((1 << IPA3_0_RULE_ID_BIT_LEN) - 1));
+	ipa_assert_on(params->id == ((1 << IPA3_0_RULE_ID_BIT_LEN) - 1));
+	rule_hdr->u.hdr.rule_id = params->id;
+	rule_hdr->u.hdr.stats_cnt_idx_lsb = params->cnt_idx & 0x3F;
+	rule_hdr->u.hdr.stats_cnt_idx_msb = (params->cnt_idx & 0xC0) >> 6;
+
+	buf += sizeof(struct ipa4_5_rt_rule_hw_hdr);
+
+	if (ipa_fltrt_generate_hw_rule_bdy(params->ipt, &params->rule->attrib,
+		&buf, &en_rule)) {
+		IPAHAL_ERR("fail to generate hw rule\n");
+		return -EPERM;
+	}
+	rule_hdr->u.hdr.en_rule = en_rule;
+
+	IPAHAL_DBG_LOW("en_rule 0x%x\n", en_rule);
+	ipa_write_64(rule_hdr->u.word, (u8 *)rule_hdr);
+
+	if (*hw_len == 0) {
+		*hw_len = buf - start;
+	} else if (*hw_len != (buf - start)) {
+		IPAHAL_ERR("hw_len differs b/w passed=0x%x calc=%td\n",
+			*hw_len, (buf - start));
+		return -EPERM;
+	}
+
+	return 0;
+}
+
+static int ipa_flt_gen_hw_rule(struct ipahal_flt_rule_gen_params *params,
+	u32 *hw_len, u8 *buf)
+{
+	struct ipa3_0_flt_rule_hw_hdr *rule_hdr;
+	u8 *start;
+	u16 en_rule = 0;
+
+	start = buf;
+	rule_hdr = (struct ipa3_0_flt_rule_hw_hdr *)buf;
+
+	switch (params->rule->action) {
+	case IPA_PASS_TO_ROUTING:
+		rule_hdr->u.hdr.action = 0x0;
+		break;
+	case IPA_PASS_TO_SRC_NAT:
+		rule_hdr->u.hdr.action = 0x1;
+		break;
+	case IPA_PASS_TO_DST_NAT:
+		rule_hdr->u.hdr.action = 0x2;
+		break;
+	case IPA_PASS_TO_EXCEPTION:
+		rule_hdr->u.hdr.action = 0x3;
+		break;
+	default:
+		IPAHAL_ERR_RL("Invalid Rule Action %d\n", params->rule->action);
+		WARN_ON_RATELIMIT_IPA(1);
+		return -EINVAL;
+	}
+	ipa_assert_on(params->rt_tbl_idx & ~0x1F);
+	rule_hdr->u.hdr.rt_tbl_idx = params->rt_tbl_idx;
+	rule_hdr->u.hdr.retain_hdr = params->rule->retain_hdr ? 0x1 : 0x0;
+	rule_hdr->u.hdr.rsvd1 = 0;
+	rule_hdr->u.hdr.rsvd2 = 0;
+	rule_hdr->u.hdr.rsvd3 = 0;
+
+	ipa_assert_on(params->priority & ~0x3FF);
+	rule_hdr->u.hdr.priority = params->priority;
+	ipa_assert_on(params->id & ~((1 << IPA3_0_RULE_ID_BIT_LEN) - 1));
+	ipa_assert_on(params->id == ((1 << IPA3_0_RULE_ID_BIT_LEN) - 1));
+	rule_hdr->u.hdr.rule_id = params->id;
+
+	buf += sizeof(struct ipa3_0_flt_rule_hw_hdr);
+
+	if (params->rule->eq_attrib_type) {
+		if (ipa_fltrt_generate_hw_rule_bdy_from_eq(
+			&params->rule->eq_attrib, &buf)) {
+			IPAHAL_ERR_RL("fail to generate hw rule from eq\n");
+			return -EPERM;
+		}
+		en_rule = params->rule->eq_attrib.rule_eq_bitmap;
+	} else {
+		if (ipa_fltrt_generate_hw_rule_bdy(params->ipt,
+			&params->rule->attrib, &buf, &en_rule)) {
+			IPAHAL_ERR_RL("fail to generate hw rule\n");
+			return -EPERM;
+		}
+	}
+	rule_hdr->u.hdr.en_rule = en_rule;
+
+	IPAHAL_DBG_LOW("en_rule=0x%x, action=%d, rt_idx=%d, retain_hdr=%d\n",
+		en_rule,
+		rule_hdr->u.hdr.action,
+		rule_hdr->u.hdr.rt_tbl_idx,
+		rule_hdr->u.hdr.retain_hdr);
+	IPAHAL_DBG_LOW("priority=%d, rule_id=%d\n",
+		rule_hdr->u.hdr.priority,
+		rule_hdr->u.hdr.rule_id);
+
+	ipa_write_64(rule_hdr->u.word, (u8 *)rule_hdr);
+
+	if (*hw_len == 0) {
+		*hw_len = buf - start;
+	} else if (*hw_len != (buf - start)) {
+		IPAHAL_ERR_RL("hw_len differs b/w passed=0x%x calc=%td\n",
+			*hw_len, (buf - start));
+		return -EPERM;
+	}
+
+	return 0;
+}
+
+static int ipa_flt_gen_hw_rule_ipav4(struct ipahal_flt_rule_gen_params *params,
+	u32 *hw_len, u8 *buf)
+{
+	struct ipa4_0_flt_rule_hw_hdr *rule_hdr;
+	u8 *start;
+	u16 en_rule = 0;
+
+	start = buf;
+	rule_hdr = (struct ipa4_0_flt_rule_hw_hdr *)buf;
+
+	switch (params->rule->action) {
+	case IPA_PASS_TO_ROUTING:
+		rule_hdr->u.hdr.action = 0x0;
+		break;
+	case IPA_PASS_TO_SRC_NAT:
+		rule_hdr->u.hdr.action = 0x1;
+		break;
+	case IPA_PASS_TO_DST_NAT:
+		rule_hdr->u.hdr.action = 0x2;
+		break;
+	case IPA_PASS_TO_EXCEPTION:
+		rule_hdr->u.hdr.action = 0x3;
+		break;
+	default:
+		IPAHAL_ERR("Invalid Rule Action %d\n", params->rule->action);
+		WARN_ON_RATELIMIT_IPA(1);
+		return -EINVAL;
+	}
+
+	ipa_assert_on(params->rt_tbl_idx & ~0x1F);
+	rule_hdr->u.hdr.rt_tbl_idx = params->rt_tbl_idx;
+	rule_hdr->u.hdr.retain_hdr = params->rule->retain_hdr ? 0x1 : 0x0;
+
+	ipa_assert_on(params->rule->pdn_idx & ~0xF);
+	rule_hdr->u.hdr.pdn_idx = params->rule->pdn_idx;
+	rule_hdr->u.hdr.set_metadata = params->rule->set_metadata;
+	rule_hdr->u.hdr.rsvd2 = 0;
+	rule_hdr->u.hdr.rsvd3 = 0;
+
+	ipa_assert_on(params->priority & ~0x3FF);
+	rule_hdr->u.hdr.priority = params->priority;
+	ipa_assert_on(params->id & ~((1 << IPA3_0_RULE_ID_BIT_LEN) - 1));
+	ipa_assert_on(params->id == ((1 << IPA3_0_RULE_ID_BIT_LEN) - 1));
+	rule_hdr->u.hdr.rule_id = params->id;
+
+	buf += sizeof(struct ipa4_0_flt_rule_hw_hdr);
+
+	if (params->rule->eq_attrib_type) {
+		if (ipa_fltrt_generate_hw_rule_bdy_from_eq(
+			&params->rule->eq_attrib, &buf)) {
+			IPAHAL_ERR("fail to generate hw rule from eq\n");
+			return -EPERM;
+		}
+		en_rule = params->rule->eq_attrib.rule_eq_bitmap;
+	} else {
+		if (ipa_fltrt_generate_hw_rule_bdy(params->ipt,
+			&params->rule->attrib, &buf, &en_rule)) {
+			IPAHAL_ERR("fail to generate hw rule\n");
+			return -EPERM;
+		}
+	}
+	rule_hdr->u.hdr.en_rule = en_rule;
+
+	IPAHAL_DBG_LOW("en_rule=0x%x, action=%d, rt_idx=%d, retain_hdr=%d\n",
+		en_rule,
+		rule_hdr->u.hdr.action,
+		rule_hdr->u.hdr.rt_tbl_idx,
+		rule_hdr->u.hdr.retain_hdr);
+	IPAHAL_DBG_LOW("priority=%d, rule_id=%d, pdn=%d, set_metadata=%d\n",
+		rule_hdr->u.hdr.priority,
+		rule_hdr->u.hdr.rule_id,
+		rule_hdr->u.hdr.pdn_idx,
+		rule_hdr->u.hdr.set_metadata);
+
+	ipa_write_64(rule_hdr->u.word, (u8 *)rule_hdr);
+
+	if (*hw_len == 0) {
+		*hw_len = buf - start;
+	} else if (*hw_len != (buf - start)) {
+		IPAHAL_ERR("hw_len differs b/w passed=0x%x calc=%td\n",
+			*hw_len, (buf - start));
+		return -EPERM;
+	}
+
+	return 0;
+}
+
+static int ipa_flt_gen_hw_rule_ipav4_5(
+	struct ipahal_flt_rule_gen_params *params,
+	u32 *hw_len, u8 *buf)
+{
+	struct ipa4_5_flt_rule_hw_hdr *rule_hdr;
+	u8 *start;
+	u16 en_rule = 0;
+
+	start = buf;
+	rule_hdr = (struct ipa4_5_flt_rule_hw_hdr *)buf;
+
+	switch (params->rule->action) {
+	case IPA_PASS_TO_ROUTING:
+		rule_hdr->u.hdr.action = 0x0;
+		break;
+	case IPA_PASS_TO_SRC_NAT:
+		rule_hdr->u.hdr.action = 0x1;
+		break;
+	case IPA_PASS_TO_DST_NAT:
+		rule_hdr->u.hdr.action = 0x2;
+		break;
+	case IPA_PASS_TO_EXCEPTION:
+		rule_hdr->u.hdr.action = 0x3;
+		break;
+	default:
+		IPAHAL_ERR("Invalid Rule Action %d\n", params->rule->action);
+		WARN_ON_RATELIMIT_IPA(1);
+		return -EINVAL;
+	}
+
+	ipa_assert_on(params->rt_tbl_idx & ~0x1F);
+	rule_hdr->u.hdr.rt_tbl_idx = params->rt_tbl_idx;
+	rule_hdr->u.hdr.retain_hdr = params->rule->retain_hdr ? 0x1 : 0x0;
+
+	ipa_assert_on(params->rule->pdn_idx & ~0xF);
+	rule_hdr->u.hdr.pdn_idx = params->rule->pdn_idx;
+	rule_hdr->u.hdr.set_metadata = params->rule->set_metadata;
+	rule_hdr->u.hdr.rsvd2 = 0;
+
+	ipa_assert_on(params->priority & ~0x3FF);
+	rule_hdr->u.hdr.priority = params->priority;
+	ipa_assert_on(params->id & ~((1 << IPA3_0_RULE_ID_BIT_LEN) - 1));
+	ipa_assert_on(params->id == ((1 << IPA3_0_RULE_ID_BIT_LEN) - 1));
+	rule_hdr->u.hdr.rule_id = params->id;
+	rule_hdr->u.hdr.stats_cnt_idx_lsb = params->cnt_idx & 0x3F;
+	rule_hdr->u.hdr.stats_cnt_idx_msb = (params->cnt_idx & 0xC0) >> 6;
+
+	buf += sizeof(struct ipa4_5_flt_rule_hw_hdr);
+
+	if (params->rule->eq_attrib_type) {
+		if (ipa_fltrt_generate_hw_rule_bdy_from_eq(
+			&params->rule->eq_attrib, &buf)) {
+			IPAHAL_ERR("fail to generate hw rule from eq\n");
+			return -EPERM;
+		}
+		en_rule = params->rule->eq_attrib.rule_eq_bitmap;
+	} else {
+		if (ipa_fltrt_generate_hw_rule_bdy(params->ipt,
+			&params->rule->attrib, &buf, &en_rule)) {
+			IPAHAL_ERR("fail to generate hw rule\n");
+			return -EPERM;
+		}
+	}
+	rule_hdr->u.hdr.en_rule = en_rule;
+
+	IPAHAL_DBG_LOW("en_rule=0x%x, action=%d, rt_idx=%d, retain_hdr=%d\n",
+		en_rule,
+		rule_hdr->u.hdr.action,
+		rule_hdr->u.hdr.rt_tbl_idx,
+		rule_hdr->u.hdr.retain_hdr);
+	IPAHAL_DBG_LOW("priority=%d, rule_id=%d, pdn=%d, set_metadata=%d\n",
+		rule_hdr->u.hdr.priority,
+		rule_hdr->u.hdr.rule_id,
+		rule_hdr->u.hdr.pdn_idx,
+		rule_hdr->u.hdr.set_metadata);
+
+	ipa_write_64(rule_hdr->u.word, (u8 *)rule_hdr);
+
+	if (*hw_len == 0) {
+		*hw_len = buf - start;
+	} else if (*hw_len != (buf - start)) {
+		IPAHAL_ERR("hw_len differs b/w passed=0x%x calc=%td\n",
+			*hw_len, (buf - start));
+		return -EPERM;
+	}
+
+	return 0;
+}
+
+/*
+ * This array contains the FLT/RT info for IPAv3 and later.
+ * All the information on IPAv3 are statically defined below.
+ * If information is missing regarding on some IPA version,
+ *  the init function will fill it with the information from the previous
+ *  IPA version.
+ * Information is considered missing if all of the fields are 0.
+ */
+static struct ipahal_fltrt_obj ipahal_fltrt_objs[IPA_HW_MAX] = {
+	/* IPAv3 */
+	[IPA_HW_v3_0] = {
+		true,
+		IPA3_0_HW_TBL_WIDTH,
+		IPA3_0_HW_TBL_SYSADDR_ALIGNMENT,
+		IPA3_0_HW_TBL_LCLADDR_ALIGNMENT,
+		IPA3_0_HW_TBL_BLK_SIZE_ALIGNMENT,
+		IPA3_0_HW_RULE_START_ALIGNMENT,
+		IPA3_0_HW_TBL_HDR_WIDTH,
+		IPA3_0_HW_TBL_ADDR_MASK,
+		IPA3_0_RULE_MAX_PRIORITY,
+		IPA3_0_RULE_MIN_PRIORITY,
+		IPA3_0_LOW_RULE_ID,
+		IPA3_0_RULE_ID_BIT_LEN,
+		IPA3_0_HW_RULE_BUF_SIZE,
+		ipa_write_64,
+		ipa_fltrt_create_flt_bitmap,
+		ipa_fltrt_create_tbl_addr,
+		ipa_fltrt_parse_tbl_addr,
+		ipa_rt_gen_hw_rule,
+		ipa_flt_gen_hw_rule,
+		ipa_flt_generate_eq,
+		ipa_rt_parse_hw_rule,
+		ipa_flt_parse_hw_rule,
+		{
+			[IPA_TOS_EQ]			= 0,
+			[IPA_PROTOCOL_EQ]		= 1,
+			[IPA_TC_EQ]			= 2,
+			[IPA_OFFSET_MEQ128_0]		= 3,
+			[IPA_OFFSET_MEQ128_1]		= 4,
+			[IPA_OFFSET_MEQ32_0]		= 5,
+			[IPA_OFFSET_MEQ32_1]		= 6,
+			[IPA_IHL_OFFSET_MEQ32_0]	= 7,
+			[IPA_IHL_OFFSET_MEQ32_1]	= 8,
+			[IPA_METADATA_COMPARE]		= 9,
+			[IPA_IHL_OFFSET_RANGE16_0]	= 10,
+			[IPA_IHL_OFFSET_RANGE16_1]	= 11,
+			[IPA_IHL_OFFSET_EQ_32]		= 12,
+			[IPA_IHL_OFFSET_EQ_16]		= 13,
+			[IPA_FL_EQ]			= 14,
+			[IPA_IS_FRAG]			= 15,
+			[IPA_IS_PURE_ACK]		= 0xFF,
+		},
+	},
+
+	/* IPAv4 */
+	[IPA_HW_v4_0] = {
+		true,
+		IPA3_0_HW_TBL_WIDTH,
+		IPA3_0_HW_TBL_SYSADDR_ALIGNMENT,
+		IPA3_0_HW_TBL_LCLADDR_ALIGNMENT,
+		IPA3_0_HW_TBL_BLK_SIZE_ALIGNMENT,
+		IPA3_0_HW_RULE_START_ALIGNMENT,
+		IPA3_0_HW_TBL_HDR_WIDTH,
+		IPA3_0_HW_TBL_ADDR_MASK,
+		IPA3_0_RULE_MAX_PRIORITY,
+		IPA3_0_RULE_MIN_PRIORITY,
+		IPA3_0_LOW_RULE_ID,
+		IPA3_0_RULE_ID_BIT_LEN,
+		IPA3_0_HW_RULE_BUF_SIZE,
+		ipa_write_64,
+		ipa_fltrt_create_flt_bitmap,
+		ipa_fltrt_create_tbl_addr,
+		ipa_fltrt_parse_tbl_addr,
+		ipa_rt_gen_hw_rule,
+		ipa_flt_gen_hw_rule_ipav4,
+		ipa_flt_generate_eq,
+		ipa_rt_parse_hw_rule,
+		ipa_flt_parse_hw_rule_ipav4,
+		{
+			[IPA_TOS_EQ]			= 0,
+			[IPA_PROTOCOL_EQ]		= 1,
+			[IPA_TC_EQ]			= 2,
+			[IPA_OFFSET_MEQ128_0]		= 3,
+			[IPA_OFFSET_MEQ128_1]		= 4,
+			[IPA_OFFSET_MEQ32_0]		= 5,
+			[IPA_OFFSET_MEQ32_1]		= 6,
+			[IPA_IHL_OFFSET_MEQ32_0]	= 7,
+			[IPA_IHL_OFFSET_MEQ32_1]	= 8,
+			[IPA_METADATA_COMPARE]		= 9,
+			[IPA_IHL_OFFSET_RANGE16_0]	= 10,
+			[IPA_IHL_OFFSET_RANGE16_1]	= 11,
+			[IPA_IHL_OFFSET_EQ_32]		= 12,
+			[IPA_IHL_OFFSET_EQ_16]		= 13,
+			[IPA_FL_EQ]			= 14,
+			[IPA_IS_FRAG]			= 15,
+			[IPA_IS_PURE_ACK]		= 0xFF,
+		},
+	},
+
+	/* IPAv4.2 */
+	[IPA_HW_v4_2] = {
+		false,
+		IPA3_0_HW_TBL_WIDTH,
+		IPA3_0_HW_TBL_SYSADDR_ALIGNMENT,
+		IPA3_0_HW_TBL_LCLADDR_ALIGNMENT,
+		IPA3_0_HW_TBL_BLK_SIZE_ALIGNMENT,
+		IPA3_0_HW_RULE_START_ALIGNMENT,
+		IPA3_0_HW_TBL_HDR_WIDTH,
+		IPA3_0_HW_TBL_ADDR_MASK,
+		IPA3_0_RULE_MAX_PRIORITY,
+		IPA3_0_RULE_MIN_PRIORITY,
+		IPA3_0_LOW_RULE_ID,
+		IPA3_0_RULE_ID_BIT_LEN,
+		IPA3_0_HW_RULE_BUF_SIZE,
+		ipa_write_64,
+		ipa_fltrt_create_flt_bitmap,
+		ipa_fltrt_create_tbl_addr,
+		ipa_fltrt_parse_tbl_addr,
+		ipa_rt_gen_hw_rule,
+		ipa_flt_gen_hw_rule_ipav4,
+		ipa_flt_generate_eq,
+		ipa_rt_parse_hw_rule,
+		ipa_flt_parse_hw_rule_ipav4,
+		{
+			[IPA_TOS_EQ]			= 0,
+			[IPA_PROTOCOL_EQ]		= 1,
+			[IPA_TC_EQ]			= 2,
+			[IPA_OFFSET_MEQ128_0]		= 3,
+			[IPA_OFFSET_MEQ128_1]		= 4,
+			[IPA_OFFSET_MEQ32_0]		= 5,
+			[IPA_OFFSET_MEQ32_1]		= 6,
+			[IPA_IHL_OFFSET_MEQ32_0]	= 7,
+			[IPA_IHL_OFFSET_MEQ32_1]	= 8,
+			[IPA_METADATA_COMPARE]		= 9,
+			[IPA_IHL_OFFSET_RANGE16_0]	= 10,
+			[IPA_IHL_OFFSET_RANGE16_1]	= 11,
+			[IPA_IHL_OFFSET_EQ_32]		= 12,
+			[IPA_IHL_OFFSET_EQ_16]		= 13,
+			[IPA_FL_EQ]			= 14,
+			[IPA_IS_FRAG]			= 15,
+			[IPA_IS_PURE_ACK]		= 0xFF,
+		},
+	},
+
+	/* IPAv4.5 */
+	[IPA_HW_v4_5] = {
+		true,
+		IPA3_0_HW_TBL_WIDTH,
+		IPA3_0_HW_TBL_SYSADDR_ALIGNMENT,
+		IPA3_0_HW_TBL_LCLADDR_ALIGNMENT,
+		IPA3_0_HW_TBL_BLK_SIZE_ALIGNMENT,
+		IPA3_0_HW_RULE_START_ALIGNMENT,
+		IPA3_0_HW_TBL_HDR_WIDTH,
+		IPA3_0_HW_TBL_ADDR_MASK,
+		IPA3_0_RULE_MAX_PRIORITY,
+		IPA3_0_RULE_MIN_PRIORITY,
+		IPA3_0_LOW_RULE_ID,
+		IPA3_0_RULE_ID_BIT_LEN,
+		IPA3_0_HW_RULE_BUF_SIZE,
+		ipa_write_64,
+		ipa_fltrt_create_flt_bitmap,
+		ipa_fltrt_create_tbl_addr,
+		ipa_fltrt_parse_tbl_addr,
+		ipa_rt_gen_hw_rule_ipav4_5,
+		ipa_flt_gen_hw_rule_ipav4_5,
+		ipa_flt_generate_eq,
+		ipa_rt_parse_hw_rule_ipav4_5,
+		ipa_flt_parse_hw_rule_ipav4_5,
+		{
+			[IPA_TOS_EQ]			= 0xFF,
+			[IPA_PROTOCOL_EQ]		= 1,
+			[IPA_TC_EQ]			= 2,
+			[IPA_OFFSET_MEQ128_0]		= 3,
+			[IPA_OFFSET_MEQ128_1]		= 4,
+			[IPA_OFFSET_MEQ32_0]		= 5,
+			[IPA_OFFSET_MEQ32_1]		= 6,
+			[IPA_IHL_OFFSET_MEQ32_0]	= 7,
+			[IPA_IHL_OFFSET_MEQ32_1]	= 8,
+			[IPA_METADATA_COMPARE]		= 9,
+			[IPA_IHL_OFFSET_RANGE16_0]	= 10,
+			[IPA_IHL_OFFSET_RANGE16_1]	= 11,
+			[IPA_IHL_OFFSET_EQ_32]		= 12,
+			[IPA_IHL_OFFSET_EQ_16]		= 13,
+			[IPA_FL_EQ]			= 14,
+			[IPA_IS_FRAG]			= 15,
+			[IPA_IS_PURE_ACK]		= 0,
+		},
+	},
+};
+
+static int ipa_flt_generate_eq(enum ipa_ip_type ipt,
+		const struct ipa_rule_attrib *attrib,
+		struct ipa_ipfltri_rule_eq *eq_atrb)
+{
+	if (ipa_fltrt_rule_generation_err_check(ipt, attrib))
+		return -EPERM;
+
+	if (ipt == IPA_IP_v4) {
+		if (ipa_flt_generate_eq_ip4(ipt, attrib, eq_atrb)) {
+			IPAHAL_ERR("failed to build ipv4 flt eq rule\n");
+			return -EPERM;
+		}
+	} else if (ipt == IPA_IP_v6) {
+		if (ipa_flt_generate_eq_ip6(ipt, attrib, eq_atrb)) {
+			IPAHAL_ERR("failed to build ipv6 flt eq rule\n");
+			return -EPERM;
+		}
+	} else {
+		IPAHAL_ERR("unsupported ip %d\n", ipt);
+		return  -EPERM;
+	}
+
+	/*
+	 * default "rule" means no attributes set -> map to
+	 * OFFSET_MEQ32_0 with mask of 0 and val of 0 and offset 0
+	 */
+	if (attrib->attrib_mask == 0) {
+		eq_atrb->rule_eq_bitmap = 0;
+		eq_atrb->rule_eq_bitmap |= IPA_GET_RULE_EQ_BIT_PTRN(
+			IPA_OFFSET_MEQ32_0);
+		eq_atrb->offset_meq_32[0].offset = 0;
+		eq_atrb->offset_meq_32[0].mask = 0;
+		eq_atrb->offset_meq_32[0].value = 0;
+	}
+
+	return 0;
+}
+
+static void ipa_fltrt_generate_mac_addr_hw_rule(u8 **extra, u8 **rest,
+	u8 hdr_mac_addr_offset,
+	const uint8_t mac_addr_mask[ETH_ALEN],
+	const uint8_t mac_addr[ETH_ALEN])
+{
+	int i;
+
+	*extra = ipa_write_8(hdr_mac_addr_offset, *extra);
+
+	/* LSB MASK and ADDR */
+	*rest = ipa_write_64(0, *rest);
+	*rest = ipa_write_64(0, *rest);
+
+	/* MSB MASK and ADDR */
+	*rest = ipa_write_16(0, *rest);
+	for (i = 5; i >= 0; i--)
+		*rest = ipa_write_8(mac_addr_mask[i], *rest);
+	*rest = ipa_write_16(0, *rest);
+	for (i = 5; i >= 0; i--)
+		*rest = ipa_write_8(mac_addr[i], *rest);
+}
+
+static inline void ipa_fltrt_get_mac_data(const struct ipa_rule_attrib *attrib,
+	uint32_t attrib_mask, u8 *offset, const uint8_t **mac_addr,
+	const uint8_t **mac_addr_mask)
+{
+	if (attrib_mask & IPA_FLT_MAC_DST_ADDR_ETHER_II) {
+		*offset = -14;
+		*mac_addr = attrib->dst_mac_addr;
+		*mac_addr_mask = attrib->dst_mac_addr_mask;
+		return;
+	}
+
+	if (attrib_mask & IPA_FLT_MAC_SRC_ADDR_ETHER_II) {
+		*offset = -8;
+		*mac_addr = attrib->src_mac_addr;
+		*mac_addr_mask = attrib->src_mac_addr_mask;
+		return;
+	}
+
+	if (attrib_mask & IPA_FLT_MAC_DST_ADDR_802_3) {
+		*offset = -22;
+		*mac_addr = attrib->dst_mac_addr;
+		*mac_addr_mask = attrib->dst_mac_addr_mask;
+		return;
+	}
+
+	if (attrib_mask & IPA_FLT_MAC_SRC_ADDR_802_3) {
+		*offset = -16;
+		*mac_addr = attrib->src_mac_addr;
+		*mac_addr_mask = attrib->src_mac_addr_mask;
+		return;
+	}
+
+	if (attrib_mask & IPA_FLT_MAC_DST_ADDR_802_1Q) {
+		*offset = -18;
+		*mac_addr = attrib->dst_mac_addr;
+		*mac_addr_mask = attrib->dst_mac_addr_mask;
+		return;
+	}
+
+	if (attrib_mask & IPA_FLT_MAC_SRC_ADDR_802_1Q) {
+		*offset = -10;
+		*mac_addr = attrib->src_mac_addr;
+		*mac_addr_mask = attrib->src_mac_addr_mask;
+		return;
+	}
+}
+
+static int ipa_fltrt_generate_mac_hw_rule_bdy(u16 *en_rule,
+	const struct ipa_rule_attrib *attrib,
+	u8 *ofst_meq128, u8 **extra, u8 **rest)
+{
+	u8 offset = 0;
+	const uint8_t *mac_addr = NULL;
+	const uint8_t *mac_addr_mask = NULL;
+	int i;
+	uint32_t attrib_mask;
+
+	for (i = 0; i < hweight_long(IPA_MAC_FLT_BITS); i++) {
+		switch (i) {
+		case 0:
+			attrib_mask = IPA_FLT_MAC_DST_ADDR_ETHER_II;
+			break;
+		case 1:
+			attrib_mask = IPA_FLT_MAC_SRC_ADDR_ETHER_II;
+			break;
+		case 2:
+			attrib_mask = IPA_FLT_MAC_DST_ADDR_802_3;
+			break;
+		case 3:
+			attrib_mask = IPA_FLT_MAC_SRC_ADDR_802_3;
+			break;
+		case 4:
+			attrib_mask = IPA_FLT_MAC_DST_ADDR_802_1Q;
+			break;
+		case 5:
+			attrib_mask = IPA_FLT_MAC_SRC_ADDR_802_1Q;
+			break;
+		default:
+			return -EPERM;
+		}
+
+		attrib_mask &= attrib->attrib_mask;
+		if (!attrib_mask)
+			continue;
+
+		if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ofst_meq128, *ofst_meq128)) {
+			IPAHAL_ERR("ran out of meq128 eq\n");
+			return -EPERM;
+		}
+
+		*en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(
+			ipa3_0_ofst_meq128[*ofst_meq128]);
+
+		ipa_fltrt_get_mac_data(attrib, attrib_mask, &offset,
+			&mac_addr, &mac_addr_mask);
+
+		ipa_fltrt_generate_mac_addr_hw_rule(extra, rest, offset,
+			mac_addr_mask,
+			mac_addr);
+
+		(*ofst_meq128)++;
+	}
+
+	return 0;
+}
+
+static inline int ipa_fltrt_generate_vlan_hw_rule_bdy(u16 *en_rule,
+	const struct ipa_rule_attrib *attrib,
+	u8 *ofst_meq32, u8 **extra, u8 **rest)
+{
+	if (attrib->attrib_mask & IPA_FLT_VLAN_ID) {
+		uint32_t vlan_tag;
+
+		if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ofst_meq32, *ofst_meq32)) {
+			IPAHAL_ERR("ran out of meq32 eq\n");
+			return -EPERM;
+		}
+		*en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(
+			ipa3_0_ofst_meq32[*ofst_meq32]);
+		/* -6 => offset of 802_1Q tag in L2 hdr */
+		*extra = ipa_write_8((u8)-6, *extra);
+		/* filter vlan packets: 0x8100 TPID + required VLAN ID */
+		vlan_tag = (0x8100 << 16) | (attrib->vlan_id & 0xFFF);
+		*rest = ipa_write_32(0xFFFF0FFF, *rest);
+		*rest = ipa_write_32(vlan_tag, *rest);
+		(*ofst_meq32)++;
+	}
+
+	return 0;
+}
+
+static int ipa_fltrt_generate_hw_rule_bdy_ip4(u16 *en_rule,
+	const struct ipa_rule_attrib *attrib,
+	u8 **extra_wrds, u8 **rest_wrds)
+{
+	u8 *extra = *extra_wrds;
+	u8 *rest = *rest_wrds;
+	u8 ofst_meq32 = 0;
+	u8 ihl_ofst_rng16 = 0;
+	u8 ihl_ofst_meq32 = 0;
+	u8 ofst_meq128 = 0;
+	int rc = 0;
+	bool tos_done = false;
+
+	if (attrib->attrib_mask & IPA_FLT_IS_PURE_ACK) {
+		if (!IPA_IS_RULE_EQ_VALID(IPA_IS_PURE_ACK)) {
+			IPAHAL_ERR("is_pure_ack eq not supported\n");
+			goto err;
+		}
+		*en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(IPA_IS_PURE_ACK);
+		extra = ipa_write_8(0, extra);
+	}
+
+	if (attrib->attrib_mask & IPA_FLT_TOS && !tos_done) {
+		if (!IPA_IS_RULE_EQ_VALID(IPA_TOS_EQ)) {
+			IPAHAL_DBG("tos eq not supported\n");
+		} else {
+			*en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(IPA_TOS_EQ);
+			extra = ipa_write_8(attrib->u.v4.tos, extra);
+			tos_done = true;
+		}
+	}
+
+	if (attrib->attrib_mask & IPA_FLT_PROTOCOL) {
+		*en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(IPA_PROTOCOL_EQ);
+		extra = ipa_write_8(attrib->u.v4.protocol, extra);
+	}
+
+	if (attrib->attrib_mask & IPA_MAC_FLT_BITS) {
+		if (ipa_fltrt_generate_mac_hw_rule_bdy(en_rule, attrib,
+			&ofst_meq128, &extra, &rest))
+			goto err;
+	}
+
+	if (attrib->attrib_mask & IPA_FLT_TOS_MASKED) {
+		if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ofst_meq32, ofst_meq32)) {
+			IPAHAL_ERR("ran out of meq32 eq\n");
+			goto err;
+		}
+		*en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(
+			ipa3_0_ofst_meq32[ofst_meq32]);
+		/* 0 => Take the first word. offset of TOS in v4 header is 1 */
+		extra = ipa_write_8(0, extra);
+		rest = ipa_write_32((attrib->tos_mask << 16), rest);
+		rest = ipa_write_32((attrib->tos_value << 16), rest);
+		ofst_meq32++;
+	}
+
+	if (attrib->attrib_mask & IPA_FLT_SRC_ADDR) {
+		if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ofst_meq32, ofst_meq32)) {
+			IPAHAL_ERR("ran out of meq32 eq\n");
+			goto err;
+		}
+		*en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(
+			ipa3_0_ofst_meq32[ofst_meq32]);
+		/* 12 => offset of src ip in v4 header */
+		extra = ipa_write_8(12, extra);
+		rest = ipa_write_32(attrib->u.v4.src_addr_mask, rest);
+		rest = ipa_write_32(attrib->u.v4.src_addr, rest);
+		ofst_meq32++;
+	}
+
+	if (attrib->attrib_mask & IPA_FLT_DST_ADDR) {
+		if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ofst_meq32, ofst_meq32)) {
+			IPAHAL_ERR("ran out of meq32 eq\n");
+			goto err;
+		}
+		*en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(
+			ipa3_0_ofst_meq32[ofst_meq32]);
+		/* 16 => offset of dst ip in v4 header */
+		extra = ipa_write_8(16, extra);
+		rest = ipa_write_32(attrib->u.v4.dst_addr_mask, rest);
+		rest = ipa_write_32(attrib->u.v4.dst_addr, rest);
+		ofst_meq32++;
+	}
+
+	if (attrib->attrib_mask & IPA_FLT_MAC_ETHER_TYPE) {
+		if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ofst_meq32, ofst_meq32)) {
+			IPAHAL_ERR("ran out of meq32 eq\n");
+			goto err;
+		}
+		*en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(
+			ipa3_0_ofst_meq32[ofst_meq32]);
+		/* -2 => offset of ether type in L2 hdr */
+		extra = ipa_write_8((u8)-2, extra);
+		rest = ipa_write_16(0, rest);
+		rest = ipa_write_16(htons(attrib->ether_type), rest);
+		rest = ipa_write_16(0, rest);
+		rest = ipa_write_16(htons(attrib->ether_type), rest);
+		ofst_meq32++;
+	}
+
+	if (attrib->attrib_mask & IPA_FLT_TOS && !tos_done) {
+		if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ofst_meq32, ofst_meq32)) {
+			IPAHAL_DBG("ran out of meq32 eq\n");
+		} else {
+			*en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(
+				ipa3_0_ofst_meq32[ofst_meq32]);
+			/*
+			 * 0 => Take the first word.
+			 * offset of TOS in v4 header is 1
+			 */
+			extra = ipa_write_8(0, extra);
+			rest = ipa_write_32(0xFF << 16, rest);
+			rest = ipa_write_32((attrib->u.v4.tos << 16), rest);
+			ofst_meq32++;
+			tos_done = true;
+		}
+	}
+
+	if (ipa_fltrt_generate_vlan_hw_rule_bdy(en_rule, attrib, &ofst_meq32,
+		&extra, &rest))
+		goto err;
+
+	if (attrib->attrib_mask & IPA_FLT_TYPE) {
+		if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ihl_ofst_meq32,
+			ihl_ofst_meq32)) {
+			IPAHAL_ERR("ran out of ihl_meq32 eq\n");
+			goto err;
+		}
+		*en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(
+			ipa3_0_ihl_ofst_meq32[ihl_ofst_meq32]);
+		/* 0  => offset of type after v4 header */
+		extra = ipa_write_8(0, extra);
+		rest = ipa_write_32(0xFF, rest);
+		rest = ipa_write_32(attrib->type, rest);
+		ihl_ofst_meq32++;
+	}
+
+	if (attrib->attrib_mask & IPA_FLT_CODE) {
+		if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ihl_ofst_meq32,
+			ihl_ofst_meq32)) {
+			IPAHAL_ERR("ran out of ihl_meq32 eq\n");
+			goto err;
+		}
+		*en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(
+			ipa3_0_ihl_ofst_meq32[ihl_ofst_meq32]);
+		/* 1  => offset of code after v4 header */
+		extra = ipa_write_8(1, extra);
+		rest = ipa_write_32(0xFF, rest);
+		rest = ipa_write_32(attrib->code, rest);
+		ihl_ofst_meq32++;
+	}
+
+	if (attrib->attrib_mask & IPA_FLT_SPI) {
+		if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ihl_ofst_meq32,
+			ihl_ofst_meq32)) {
+			IPAHAL_ERR("ran out of ihl_meq32 eq\n");
+			goto err;
+		}
+		*en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(
+			ipa3_0_ihl_ofst_meq32[ihl_ofst_meq32]);
+		/* 0  => offset of SPI after v4 header */
+		extra = ipa_write_8(0, extra);
+		rest = ipa_write_32(0xFFFFFFFF, rest);
+		rest = ipa_write_32(attrib->spi, rest);
+		ihl_ofst_meq32++;
+	}
+
+	if (attrib->attrib_mask & IPA_FLT_MAC_DST_ADDR_L2TP) {
+		if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ihl_ofst_meq32,
+			ihl_ofst_meq32) || IPA_IS_RAN_OUT_OF_EQ(
+			ipa3_0_ihl_ofst_meq32, ihl_ofst_meq32 + 1)) {
+			IPAHAL_ERR("ran out of ihl_meq32 eq\n");
+			goto err;
+		}
+		*en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(
+			ipa3_0_ihl_ofst_meq32[ihl_ofst_meq32]);
+		*en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(
+			ipa3_0_ihl_ofst_meq32[ihl_ofst_meq32 + 1]);
+		/* populate first ihl meq eq */
+		extra = ipa_write_8(8, extra);
+		rest = ipa_write_8(attrib->dst_mac_addr_mask[3], rest);
+		rest = ipa_write_8(attrib->dst_mac_addr_mask[2], rest);
+		rest = ipa_write_8(attrib->dst_mac_addr_mask[1], rest);
+		rest = ipa_write_8(attrib->dst_mac_addr_mask[0], rest);
+		rest = ipa_write_8(attrib->dst_mac_addr[3], rest);
+		rest = ipa_write_8(attrib->dst_mac_addr[2], rest);
+		rest = ipa_write_8(attrib->dst_mac_addr[1], rest);
+		rest = ipa_write_8(attrib->dst_mac_addr[0], rest);
+		/* populate second ihl meq eq */
+		extra = ipa_write_8(12, extra);
+		rest = ipa_write_16(0, rest);
+		rest = ipa_write_8(attrib->dst_mac_addr_mask[5], rest);
+		rest = ipa_write_8(attrib->dst_mac_addr_mask[4], rest);
+		rest = ipa_write_16(0, rest);
+		rest = ipa_write_8(attrib->dst_mac_addr[5], rest);
+		rest = ipa_write_8(attrib->dst_mac_addr[4], rest);
+		ihl_ofst_meq32 += 2;
+	}
+
+	if (attrib->attrib_mask & IPA_FLT_TCP_SYN) {
+		if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ihl_ofst_meq32,
+			ihl_ofst_meq32)) {
+			IPAHAL_ERR("ran out of ihl_meq32 eq\n");
+			goto err;
+		}
+		*en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(
+			ipa3_0_ihl_ofst_meq32[ihl_ofst_meq32]);
+		/* 12  => offset of SYN after v4 header */
+		extra = ipa_write_8(12, extra);
+		rest = ipa_write_32(0x20000, rest);
+		rest = ipa_write_32(0x20000, rest);
+		ihl_ofst_meq32++;
+	}
+
+	if (attrib->attrib_mask & IPA_FLT_TOS && !tos_done) {
+		if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ihl_ofst_meq32,
+			ihl_ofst_meq32)) {
+			IPAHAL_DBG("ran out of ihl_meq32 eq\n");
+		} else {
+			*en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(
+				ipa3_0_ihl_ofst_meq32[ihl_ofst_meq32]);
+			/*
+			 * 0 => Take the first word. offset of TOS in
+			 * v4 header is 1. MSB bit asserted at IHL means
+			 * to ignore packet IHL and do offset inside IPA header
+			 */
+			extra = ipa_write_8(0x80, extra);
+			rest = ipa_write_32(0xFF << 16, rest);
+			rest = ipa_write_32((attrib->u.v4.tos << 16), rest);
+			ihl_ofst_meq32++;
+			tos_done = true;
+		}
+	}
+
+	if (attrib->attrib_mask & IPA_FLT_META_DATA) {
+		*en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(IPA_METADATA_COMPARE);
+		rest = ipa_write_32(attrib->meta_data_mask, rest);
+		rest = ipa_write_32(attrib->meta_data, rest);
+	}
+
+	if (attrib->attrib_mask & IPA_FLT_SRC_PORT_RANGE) {
+		if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ihl_ofst_rng16,
+				ihl_ofst_rng16)) {
+			IPAHAL_ERR("ran out of ihl_rng16 eq\n");
+			goto err;
+		}
+		if (attrib->src_port_hi < attrib->src_port_lo) {
+			IPAHAL_ERR("bad src port range param\n");
+			goto err;
+		}
+		*en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(
+			ipa3_0_ihl_ofst_rng16[ihl_ofst_rng16]);
+		/* 0  => offset of src port after v4 header */
+		extra = ipa_write_8(0, extra);
+		rest = ipa_write_16(attrib->src_port_hi, rest);
+		rest = ipa_write_16(attrib->src_port_lo, rest);
+		ihl_ofst_rng16++;
+	}
+
+	if (attrib->attrib_mask & IPA_FLT_DST_PORT_RANGE) {
+		if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ihl_ofst_rng16,
+				ihl_ofst_rng16)) {
+			IPAHAL_ERR("ran out of ihl_rng16 eq\n");
+			goto err;
+		}
+		if (attrib->dst_port_hi < attrib->dst_port_lo) {
+			IPAHAL_ERR("bad dst port range param\n");
+			goto err;
+		}
+		*en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(
+			ipa3_0_ihl_ofst_rng16[ihl_ofst_rng16]);
+		/* 2  => offset of dst port after v4 header */
+		extra = ipa_write_8(2, extra);
+		rest = ipa_write_16(attrib->dst_port_hi, rest);
+		rest = ipa_write_16(attrib->dst_port_lo, rest);
+		ihl_ofst_rng16++;
+	}
+
+	if (attrib->attrib_mask & IPA_FLT_SRC_PORT) {
+		if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ihl_ofst_rng16,
+				ihl_ofst_rng16)) {
+			IPAHAL_ERR("ran out of ihl_rng16 eq\n");
+			goto err;
+		}
+		*en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(
+			ipa3_0_ihl_ofst_rng16[ihl_ofst_rng16]);
+		/* 0  => offset of src port after v4 header */
+		extra = ipa_write_8(0, extra);
+		rest = ipa_write_16(attrib->src_port, rest);
+		rest = ipa_write_16(attrib->src_port, rest);
+		ihl_ofst_rng16++;
+	}
+
+	if (attrib->attrib_mask & IPA_FLT_DST_PORT) {
+		if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ihl_ofst_rng16,
+				ihl_ofst_rng16)) {
+			IPAHAL_ERR("ran out of ihl_rng16 eq\n");
+			goto err;
+		}
+		*en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(
+			ipa3_0_ihl_ofst_rng16[ihl_ofst_rng16]);
+		/* 2  => offset of dst port after v4 header */
+		extra = ipa_write_8(2, extra);
+		rest = ipa_write_16(attrib->dst_port, rest);
+		rest = ipa_write_16(attrib->dst_port, rest);
+		ihl_ofst_rng16++;
+	}
+
+	if (attrib->attrib_mask & IPA_FLT_FRAGMENT)
+		*en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(IPA_IS_FRAG);
+
+	if (attrib->attrib_mask & IPA_FLT_TOS && !tos_done) {
+		IPAHAL_ERR("could not find equation for tos\n");
+		goto err;
+	}
+
+	goto done;
+
+err:
+	rc = -EPERM;
+done:
+	*extra_wrds = extra;
+	*rest_wrds = rest;
+	return rc;
+}
+
+static int ipa_fltrt_generate_hw_rule_bdy_ip6(u16 *en_rule,
+	const struct ipa_rule_attrib *attrib,
+	u8 **extra_wrds, u8 **rest_wrds)
+{
+	u8 *extra = *extra_wrds;
+	u8 *rest = *rest_wrds;
+	u8 ofst_meq32 = 0;
+	u8 ihl_ofst_rng16 = 0;
+	u8 ihl_ofst_meq32 = 0;
+	u8 ofst_meq128 = 0;
+	int rc = 0;
+
+	/* v6 code below assumes no extension headers TODO: fix this */
+	if (attrib->attrib_mask & IPA_FLT_IS_PURE_ACK) {
+		if (!IPA_IS_RULE_EQ_VALID(IPA_IS_PURE_ACK)) {
+			IPAHAL_ERR("is_pure_ack eq not supported\n");
+			goto err;
+		}
+		*en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(IPA_IS_PURE_ACK);
+		extra = ipa_write_8(0, extra);
+	}
+
+	if (attrib->attrib_mask & IPA_FLT_NEXT_HDR) {
+		*en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(IPA_PROTOCOL_EQ);
+		extra = ipa_write_8(attrib->u.v6.next_hdr, extra);
+	}
+
+	if (attrib->attrib_mask & IPA_FLT_TC) {
+		*en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(IPA_TC_EQ);
+		extra = ipa_write_8(attrib->u.v6.tc, extra);
+	}
+
+	if (attrib->attrib_mask & IPA_FLT_SRC_ADDR) {
+		if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ofst_meq128, ofst_meq128)) {
+			IPAHAL_ERR("ran out of meq128 eq\n");
+			goto err;
+		}
+		*en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(
+			ipa3_0_ofst_meq128[ofst_meq128]);
+		/* 8 => offset of src ip in v6 header */
+		extra = ipa_write_8(8, extra);
+		rest = ipa_write_32(attrib->u.v6.src_addr_mask[3], rest);
+		rest = ipa_write_32(attrib->u.v6.src_addr_mask[2], rest);
+		rest = ipa_write_32(attrib->u.v6.src_addr[3], rest);
+		rest = ipa_write_32(attrib->u.v6.src_addr[2], rest);
+		rest = ipa_write_32(attrib->u.v6.src_addr_mask[1], rest);
+		rest = ipa_write_32(attrib->u.v6.src_addr_mask[0], rest);
+		rest = ipa_write_32(attrib->u.v6.src_addr[1], rest);
+		rest = ipa_write_32(attrib->u.v6.src_addr[0], rest);
+		ofst_meq128++;
+	}
+
+	if (attrib->attrib_mask & IPA_FLT_DST_ADDR) {
+		if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ofst_meq128, ofst_meq128)) {
+			IPAHAL_ERR("ran out of meq128 eq\n");
+			goto err;
+		}
+		*en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(
+			ipa3_0_ofst_meq128[ofst_meq128]);
+		/* 24 => offset of dst ip in v6 header */
+		extra = ipa_write_8(24, extra);
+		rest = ipa_write_32(attrib->u.v6.dst_addr_mask[3], rest);
+		rest = ipa_write_32(attrib->u.v6.dst_addr_mask[2], rest);
+		rest = ipa_write_32(attrib->u.v6.dst_addr[3], rest);
+		rest = ipa_write_32(attrib->u.v6.dst_addr[2], rest);
+		rest = ipa_write_32(attrib->u.v6.dst_addr_mask[1], rest);
+		rest = ipa_write_32(attrib->u.v6.dst_addr_mask[0], rest);
+		rest = ipa_write_32(attrib->u.v6.dst_addr[1], rest);
+		rest = ipa_write_32(attrib->u.v6.dst_addr[0], rest);
+		ofst_meq128++;
+	}
+
+	if (attrib->attrib_mask & IPA_FLT_TOS_MASKED) {
+		if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ofst_meq128, ofst_meq128)) {
+			IPAHAL_ERR("ran out of meq128 eq\n");
+			goto err;
+		}
+		*en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(
+			ipa3_0_ofst_meq128[ofst_meq128]);
+		/* 0 => offset of TOS in v6 header */
+		extra = ipa_write_8(0, extra);
+		rest = ipa_write_64(0, rest);
+		rest = ipa_write_64(0, rest);
+		rest = ipa_write_32(0, rest);
+		rest = ipa_write_32((attrib->tos_mask << 20), rest);
+		rest = ipa_write_32(0, rest);
+		rest = ipa_write_32((attrib->tos_value << 20), rest);
+		ofst_meq128++;
+	}
+
+	if (attrib->attrib_mask & IPA_MAC_FLT_BITS) {
+		if (ipa_fltrt_generate_mac_hw_rule_bdy(en_rule, attrib,
+			&ofst_meq128, &extra, &rest))
+			goto err;
+	}
+
+	if (attrib->attrib_mask & IPA_FLT_MAC_ETHER_TYPE) {
+		if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ofst_meq32, ofst_meq32)) {
+			IPAHAL_ERR("ran out of meq32 eq\n");
+			goto err;
+		}
+		*en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(
+			ipa3_0_ofst_meq32[ofst_meq32]);
+		/* -2 => offset of ether type in L2 hdr */
+		extra = ipa_write_8((u8)-2, extra);
+		rest = ipa_write_16(0, rest);
+		rest = ipa_write_16(htons(attrib->ether_type), rest);
+		rest = ipa_write_16(0, rest);
+		rest = ipa_write_16(htons(attrib->ether_type), rest);
+		ofst_meq32++;
+	}
+
+	if (ipa_fltrt_generate_vlan_hw_rule_bdy(en_rule, attrib, &ofst_meq32,
+		&extra, &rest))
+		goto err;
+
+	if (attrib->attrib_mask & IPA_FLT_TYPE) {
+		if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ihl_ofst_meq32,
+			ihl_ofst_meq32)) {
+			IPAHAL_ERR("ran out of ihl_meq32 eq\n");
+			goto err;
+		}
+		*en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(
+			ipa3_0_ihl_ofst_meq32[ihl_ofst_meq32]);
+		/* 0  => offset of type after v6 header */
+		extra = ipa_write_8(0, extra);
+		rest = ipa_write_32(0xFF, rest);
+		rest = ipa_write_32(attrib->type, rest);
+		ihl_ofst_meq32++;
+	}
+
+	if (attrib->attrib_mask & IPA_FLT_CODE) {
+		if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ihl_ofst_meq32,
+			ihl_ofst_meq32)) {
+			IPAHAL_ERR("ran out of ihl_meq32 eq\n");
+			goto err;
+		}
+		*en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(
+			ipa3_0_ihl_ofst_meq32[ihl_ofst_meq32]);
+		/* 1  => offset of code after v6 header */
+		extra = ipa_write_8(1, extra);
+		rest = ipa_write_32(0xFF, rest);
+		rest = ipa_write_32(attrib->code, rest);
+		ihl_ofst_meq32++;
+	}
+
+	if (attrib->attrib_mask & IPA_FLT_SPI) {
+		if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ihl_ofst_meq32,
+			ihl_ofst_meq32)) {
+			IPAHAL_ERR("ran out of ihl_meq32 eq\n");
+			goto err;
+		}
+		*en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(
+			ipa3_0_ihl_ofst_meq32[ihl_ofst_meq32]);
+		/* 0  => offset of SPI after v6 header FIXME */
+		extra = ipa_write_8(0, extra);
+		rest = ipa_write_32(0xFFFFFFFF, rest);
+		rest = ipa_write_32(attrib->spi, rest);
+		ihl_ofst_meq32++;
+	}
+
+	if (attrib->attrib_mask & IPA_FLT_MAC_DST_ADDR_L2TP) {
+		if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ihl_ofst_meq32,
+			ihl_ofst_meq32) || IPA_IS_RAN_OUT_OF_EQ(
+			ipa3_0_ihl_ofst_meq32, ihl_ofst_meq32 + 1)) {
+			IPAHAL_ERR("ran out of ihl_meq32 eq\n");
+			goto err;
+		}
+		*en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(
+			ipa3_0_ihl_ofst_meq32[ihl_ofst_meq32]);
+		*en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(
+			ipa3_0_ihl_ofst_meq32[ihl_ofst_meq32 + 1]);
+		/* populate first ihl meq eq */
+		extra = ipa_write_8(8, extra);
+		rest = ipa_write_8(attrib->dst_mac_addr_mask[3], rest);
+		rest = ipa_write_8(attrib->dst_mac_addr_mask[2], rest);
+		rest = ipa_write_8(attrib->dst_mac_addr_mask[1], rest);
+		rest = ipa_write_8(attrib->dst_mac_addr_mask[0], rest);
+		rest = ipa_write_8(attrib->dst_mac_addr[3], rest);
+		rest = ipa_write_8(attrib->dst_mac_addr[2], rest);
+		rest = ipa_write_8(attrib->dst_mac_addr[1], rest);
+		rest = ipa_write_8(attrib->dst_mac_addr[0], rest);
+		/* populate second ihl meq eq */
+		extra = ipa_write_8(12, extra);
+		rest = ipa_write_16(0, rest);
+		rest = ipa_write_8(attrib->dst_mac_addr_mask[5], rest);
+		rest = ipa_write_8(attrib->dst_mac_addr_mask[4], rest);
+		rest = ipa_write_16(0, rest);
+		rest = ipa_write_8(attrib->dst_mac_addr[5], rest);
+		rest = ipa_write_8(attrib->dst_mac_addr[4], rest);
+		ihl_ofst_meq32 += 2;
+	}
+
+	if (attrib->attrib_mask & IPA_FLT_TCP_SYN) {
+		if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ihl_ofst_meq32,
+			ihl_ofst_meq32)) {
+			IPAHAL_ERR("ran out of ihl_meq32 eq\n");
+			goto err;
+		}
+		*en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(
+			ipa3_0_ihl_ofst_meq32[ihl_ofst_meq32]);
+		/* 12  => offset of SYN after v4 header */
+		extra = ipa_write_8(12, extra);
+		rest = ipa_write_32(0x20000, rest);
+		rest = ipa_write_32(0x20000, rest);
+		ihl_ofst_meq32++;
+	}
+
+	if (attrib->attrib_mask & IPA_FLT_TCP_SYN_L2TP) {
+		if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ihl_ofst_meq32,
+			ihl_ofst_meq32) || IPA_IS_RAN_OUT_OF_EQ(
+			ipa3_0_ihl_ofst_meq32, ihl_ofst_meq32 + 1)) {
+			IPAHAL_ERR("ran out of ihl_meq32 eq\n");
+			goto err;
+		}
+		*en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(
+			ipa3_0_ihl_ofst_meq32[ihl_ofst_meq32]);
+		*en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(
+			ipa3_0_ihl_ofst_meq32[ihl_ofst_meq32 + 1]);
+
+		/* populate TCP protocol eq */
+		if (attrib->ether_type == 0x0800) {
+			extra = ipa_write_8(30, extra);
+			rest = ipa_write_32(0xFF0000, rest);
+			rest = ipa_write_32(0x60000, rest);
+		} else {
+			extra = ipa_write_8(26, extra);
+			rest = ipa_write_32(0xFF00, rest);
+			rest = ipa_write_32(0x600, rest);
+		}
+
+		/* populate TCP SYN eq */
+		if (attrib->ether_type == 0x0800) {
+			extra = ipa_write_8(54, extra);
+			rest = ipa_write_32(0x20000, rest);
+			rest = ipa_write_32(0x20000, rest);
+		} else {
+			extra = ipa_write_8(74, extra);
+			rest = ipa_write_32(0x20000, rest);
+			rest = ipa_write_32(0x20000, rest);
+		}
+		ihl_ofst_meq32 += 2;
+	}
+
+	if (attrib->attrib_mask & IPA_FLT_L2TP_INNER_IP_TYPE) {
+		if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ihl_ofst_meq32,
+			ihl_ofst_meq32)) {
+			IPAHAL_ERR("ran out of ihl_meq32 eq\n");
+			goto err;
+		}
+		*en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(
+			ipa3_0_ihl_ofst_meq32[ihl_ofst_meq32]);
+		/* 22  => offset of IP type after v6 header */
+		extra = ipa_write_8(22, extra);
+		rest = ipa_write_32(0xF0000000, rest);
+		if (attrib->type == 0x40)
+			rest = ipa_write_32(0x40000000, rest);
+		else
+			rest = ipa_write_32(0x60000000, rest);
+		ihl_ofst_meq32++;
+	}
+
+	if (attrib->attrib_mask & IPA_FLT_L2TP_INNER_IPV4_DST_ADDR) {
+		if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ihl_ofst_meq32,
+			ihl_ofst_meq32)) {
+			IPAHAL_ERR("ran out of ihl_meq32 eq\n");
+			goto err;
+		}
+		*en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(
+			ipa3_0_ihl_ofst_meq32[ihl_ofst_meq32]);
+		/* 38  => offset of inner IPv4 addr */
+		extra = ipa_write_8(38, extra);
+		rest = ipa_write_32(attrib->u.v4.dst_addr_mask, rest);
+		rest = ipa_write_32(attrib->u.v4.dst_addr, rest);
+		ihl_ofst_meq32++;
+	}
+
+	if (attrib->attrib_mask & IPA_FLT_META_DATA) {
+		*en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(IPA_METADATA_COMPARE);
+		rest = ipa_write_32(attrib->meta_data_mask, rest);
+		rest = ipa_write_32(attrib->meta_data, rest);
+	}
+
+	if (attrib->attrib_mask & IPA_FLT_SRC_PORT) {
+		if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ihl_ofst_rng16,
+				ihl_ofst_rng16)) {
+			IPAHAL_ERR("ran out of ihl_rng16 eq\n");
+			goto err;
+		}
+		*en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(
+			ipa3_0_ihl_ofst_rng16[ihl_ofst_rng16]);
+		/* 0  => offset of src port after v6 header */
+		extra = ipa_write_8(0, extra);
+		rest = ipa_write_16(attrib->src_port, rest);
+		rest = ipa_write_16(attrib->src_port, rest);
+		ihl_ofst_rng16++;
+	}
+
+	if (attrib->attrib_mask & IPA_FLT_DST_PORT) {
+		if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ihl_ofst_rng16,
+				ihl_ofst_rng16)) {
+			IPAHAL_ERR("ran out of ihl_rng16 eq\n");
+			goto err;
+		}
+		*en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(
+			ipa3_0_ihl_ofst_rng16[ihl_ofst_rng16]);
+		/* 2  => offset of dst port after v6 header */
+		extra = ipa_write_8(2, extra);
+		rest = ipa_write_16(attrib->dst_port, rest);
+		rest = ipa_write_16(attrib->dst_port, rest);
+		ihl_ofst_rng16++;
+	}
+
+	if (attrib->attrib_mask & IPA_FLT_SRC_PORT_RANGE) {
+		if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ihl_ofst_rng16,
+				ihl_ofst_rng16)) {
+			IPAHAL_ERR("ran out of ihl_rng16 eq\n");
+			goto err;
+		}
+		if (attrib->src_port_hi < attrib->src_port_lo) {
+			IPAHAL_ERR("bad src port range param\n");
+			goto err;
+		}
+		*en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(
+			ipa3_0_ihl_ofst_rng16[ihl_ofst_rng16]);
+		/* 0  => offset of src port after v6 header */
+		extra = ipa_write_8(0, extra);
+		rest = ipa_write_16(attrib->src_port_hi, rest);
+		rest = ipa_write_16(attrib->src_port_lo, rest);
+		ihl_ofst_rng16++;
+	}
+
+	if (attrib->attrib_mask & IPA_FLT_DST_PORT_RANGE) {
+		if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ihl_ofst_rng16,
+				ihl_ofst_rng16)) {
+			IPAHAL_ERR("ran out of ihl_rng16 eq\n");
+			goto err;
+		}
+		if (attrib->dst_port_hi < attrib->dst_port_lo) {
+			IPAHAL_ERR("bad dst port range param\n");
+			goto err;
+		}
+		*en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(
+			ipa3_0_ihl_ofst_rng16[ihl_ofst_rng16]);
+		/* 2  => offset of dst port after v6 header */
+		extra = ipa_write_8(2, extra);
+		rest = ipa_write_16(attrib->dst_port_hi, rest);
+		rest = ipa_write_16(attrib->dst_port_lo, rest);
+		ihl_ofst_rng16++;
+	}
+
+	if (attrib->attrib_mask & IPA_FLT_TCP_SYN_L2TP) {
+		if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ihl_ofst_rng16,
+				ihl_ofst_rng16)) {
+			IPAHAL_ERR("ran out of ihl_rng16 eq\n");
+			goto err;
+		}
+		*en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(
+			ipa3_0_ihl_ofst_rng16[ihl_ofst_rng16]);
+		/* 20  => offset of Ethertype after v4 header */
+		if (attrib->ether_type == 0x0800) {
+			extra = ipa_write_8(21, extra);
+			rest = ipa_write_16(0x0045, rest);
+			rest = ipa_write_16(0x0045, rest);
+		} else {
+			extra = ipa_write_8(20, extra);
+			rest = ipa_write_16(attrib->ether_type, rest);
+			rest = ipa_write_16(attrib->ether_type, rest);
+		}
+		ihl_ofst_rng16++;
+	}
+
+	if (attrib->attrib_mask & IPA_FLT_FLOW_LABEL) {
+		*en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(IPA_FL_EQ);
+		rest = ipa_write_32(attrib->u.v6.flow_label & 0xFFFFF,
+			rest);
+	}
+
+	if (attrib->attrib_mask & IPA_FLT_FRAGMENT)
+		*en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(IPA_IS_FRAG);
+
+	goto done;
+
+err:
+	rc = -EPERM;
+done:
+	*extra_wrds = extra;
+	*rest_wrds = rest;
+	return rc;
+}
+
+static u8 *ipa_fltrt_copy_mem(u8 *src, u8 *dst, int cnt)
+{
+	while (cnt--)
+		*dst++ = *src++;
+
+	return dst;
+}
+
+/*
+ * ipa_fltrt_generate_hw_rule_bdy() - generate HW rule body (w/o header)
+ * @ip: IP address type
+ * @attrib: IPA rule attribute
+ * @buf: output buffer. Advance it after building the rule
+ * @en_rule: enable rule
+ *
+ * Return codes:
+ * 0: success
+ * -EPERM: wrong input
+ */
+static int ipa_fltrt_generate_hw_rule_bdy(enum ipa_ip_type ipt,
+	const struct ipa_rule_attrib *attrib, u8 **buf, u16 *en_rule)
+{
+	int sz;
+	int rc = 0;
+	u8 *extra_wrd_buf;
+	u8 *rest_wrd_buf;
+	u8 *extra_wrd_start;
+	u8 *rest_wrd_start;
+	u8 *extra_wrd_i;
+	u8 *rest_wrd_i;
+
+	sz = IPA3_0_HW_TBL_WIDTH * 2 + IPA3_0_HW_RULE_START_ALIGNMENT;
+	extra_wrd_buf = kzalloc(sz, GFP_KERNEL);
+	if (!extra_wrd_buf) {
+		rc = -ENOMEM;
+		goto fail_extra_alloc;
+	}
+
+	sz = IPA3_0_HW_RULE_BUF_SIZE + IPA3_0_HW_RULE_START_ALIGNMENT;
+	rest_wrd_buf = kzalloc(sz, GFP_KERNEL);
+	if (!rest_wrd_buf) {
+		rc = -ENOMEM;
+		goto fail_rest_alloc;
+	}
+
+	extra_wrd_start = extra_wrd_buf + IPA3_0_HW_RULE_START_ALIGNMENT;
+	extra_wrd_start = (u8 *)((long)extra_wrd_start &
+		~IPA3_0_HW_RULE_START_ALIGNMENT);
+
+	rest_wrd_start = rest_wrd_buf + IPA3_0_HW_RULE_START_ALIGNMENT;
+	rest_wrd_start = (u8 *)((long)rest_wrd_start &
+		~IPA3_0_HW_RULE_START_ALIGNMENT);
+
+	extra_wrd_i = extra_wrd_start;
+	rest_wrd_i = rest_wrd_start;
+
+	rc = ipa_fltrt_rule_generation_err_check(ipt, attrib);
+	if (rc) {
+		IPAHAL_ERR_RL("rule generation err check failed\n");
+		goto fail_err_check;
+	}
+
+	if (ipt == IPA_IP_v4) {
+		if (ipa_fltrt_generate_hw_rule_bdy_ip4(en_rule, attrib,
+			&extra_wrd_i, &rest_wrd_i)) {
+			IPAHAL_ERR_RL("failed to build ipv4 hw rule\n");
+			rc = -EPERM;
+			goto fail_err_check;
+		}
+
+	} else if (ipt == IPA_IP_v6) {
+		if (ipa_fltrt_generate_hw_rule_bdy_ip6(en_rule, attrib,
+			&extra_wrd_i, &rest_wrd_i)) {
+			IPAHAL_ERR_RL("failed to build ipv6 hw rule\n");
+			rc = -EPERM;
+			goto fail_err_check;
+		}
+	} else {
+		IPAHAL_ERR_RL("unsupported ip %d\n", ipt);
+		goto fail_err_check;
+	}
+
+	/*
+	 * default "rule" means no attributes set -> map to
+	 * OFFSET_MEQ32_0 with mask of 0 and val of 0 and offset 0
+	 */
+	if (attrib->attrib_mask == 0) {
+		IPAHAL_DBG_LOW("building default rule\n");
+		*en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(ipa3_0_ofst_meq32[0]);
+		extra_wrd_i = ipa_write_8(0, extra_wrd_i);  /* offset */
+		rest_wrd_i = ipa_write_32(0, rest_wrd_i);   /* mask */
+		rest_wrd_i = ipa_write_32(0, rest_wrd_i);   /* val */
+	}
+
+	IPAHAL_DBG_LOW("extra_word_1 0x%llx\n", *(u64 *)extra_wrd_start);
+	IPAHAL_DBG_LOW("extra_word_2 0x%llx\n",
+		*(u64 *)(extra_wrd_start + IPA3_0_HW_TBL_WIDTH));
+
+	extra_wrd_i = ipa_pad_to_64(extra_wrd_i);
+	sz = extra_wrd_i - extra_wrd_start;
+	IPAHAL_DBG_LOW("extra words params sz %d\n", sz);
+	*buf = ipa_fltrt_copy_mem(extra_wrd_start, *buf, sz);
+
+	rest_wrd_i = ipa_pad_to_64(rest_wrd_i);
+	sz = rest_wrd_i - rest_wrd_start;
+	IPAHAL_DBG_LOW("non extra words params sz %d\n", sz);
+	*buf = ipa_fltrt_copy_mem(rest_wrd_start, *buf, sz);
+
+fail_err_check:
+	kfree(rest_wrd_buf);
+fail_rest_alloc:
+	kfree(extra_wrd_buf);
+fail_extra_alloc:
+	return rc;
+}
+
+
+/**
+ * ipa_fltrt_calc_extra_wrd_bytes()- Calculate the number of extra words for eq
+ * @attrib: equation attribute
+ *
+ * Return value: 0 on success, negative otherwise
+ */
+static int ipa_fltrt_calc_extra_wrd_bytes(
+	const struct ipa_ipfltri_rule_eq *attrib)
+{
+	int num = 0;
+
+	/*
+	 * tos_eq_present field has two meanings:
+	 * tos equation for IPA ver < 4.5 (as the field name reveals)
+	 * pure_ack equation for IPA ver >= 4.5
+	 * In both cases it needs one extra word.
+	 */
+	if (attrib->tos_eq_present)
+		num++;
+	if (attrib->protocol_eq_present)
+		num++;
+	if (attrib->tc_eq_present)
+		num++;
+	num += attrib->num_offset_meq_128;
+	num += attrib->num_offset_meq_32;
+	num += attrib->num_ihl_offset_meq_32;
+	num += attrib->num_ihl_offset_range_16;
+	if (attrib->ihl_offset_eq_32_present)
+		num++;
+	if (attrib->ihl_offset_eq_16_present)
+		num++;
+
+	IPAHAL_DBG_LOW("extra bytes number %d\n", num);
+
+	return num;
+}
+
+static int ipa_fltrt_generate_hw_rule_bdy_from_eq(
+		const struct ipa_ipfltri_rule_eq *attrib, u8 **buf)
+{
+	uint8_t num_offset_meq_32 = attrib->num_offset_meq_32;
+	uint8_t num_ihl_offset_range_16 = attrib->num_ihl_offset_range_16;
+	uint8_t num_ihl_offset_meq_32 = attrib->num_ihl_offset_meq_32;
+	uint8_t num_offset_meq_128 = attrib->num_offset_meq_128;
+	int i;
+	int extra_bytes;
+	u8 *extra;
+	u8 *rest;
+
+	extra_bytes = ipa_fltrt_calc_extra_wrd_bytes(attrib);
+	/* only 3 eq does not have extra word param, 13 out of 16 is the number
+	 * of equations that needs extra word param
+	 */
+	if (extra_bytes > 13) {
+		IPAHAL_ERR_RL("too much extra bytes\n");
+		return -EPERM;
+	} else if (extra_bytes > IPA3_0_HW_TBL_HDR_WIDTH) {
+		/* two extra words */
+		extra = *buf;
+		rest = *buf + IPA3_0_HW_TBL_HDR_WIDTH * 2;
+	} else if (extra_bytes > 0) {
+		/* single exra word */
+		extra = *buf;
+		rest = *buf + IPA3_0_HW_TBL_HDR_WIDTH;
+	} else {
+		/* no extra words */
+		extra = NULL;
+		rest = *buf;
+	}
+
+	/*
+	 * tos_eq_present field has two meanings:
+	 * tos equation for IPA ver < 4.5 (as the field name reveals)
+	 * pure_ack equation for IPA ver >= 4.5
+	 * In both cases it needs one extra word.
+	 */
+	if (attrib->tos_eq_present) {
+		if (IPA_IS_RULE_EQ_VALID(IPA_IS_PURE_ACK)) {
+			extra = ipa_write_8(0, extra);
+		} else if (IPA_IS_RULE_EQ_VALID(IPA_TOS_EQ)) {
+			extra = ipa_write_8(attrib->tos_eq, extra);
+		} else {
+			IPAHAL_ERR("no support for pure_ack and tos eqs\n");
+			return -EPERM;
+		}
+	}
+
+	if (attrib->protocol_eq_present)
+		extra = ipa_write_8(attrib->protocol_eq, extra);
+
+	if (attrib->tc_eq_present)
+		extra = ipa_write_8(attrib->tc_eq, extra);
+
+	if (num_offset_meq_128) {
+		extra = ipa_write_8(attrib->offset_meq_128[0].offset, extra);
+		for (i = 0; i < 8; i++)
+			rest = ipa_write_8(attrib->offset_meq_128[0].mask[i],
+				rest);
+		for (i = 0; i < 8; i++)
+			rest = ipa_write_8(attrib->offset_meq_128[0].value[i],
+				rest);
+		for (i = 8; i < 16; i++)
+			rest = ipa_write_8(attrib->offset_meq_128[0].mask[i],
+				rest);
+		for (i = 8; i < 16; i++)
+			rest = ipa_write_8(attrib->offset_meq_128[0].value[i],
+				rest);
+		num_offset_meq_128--;
+	}
+
+	if (num_offset_meq_128) {
+		extra = ipa_write_8(attrib->offset_meq_128[1].offset, extra);
+		for (i = 0; i < 8; i++)
+			rest = ipa_write_8(attrib->offset_meq_128[1].mask[i],
+				rest);
+		for (i = 0; i < 8; i++)
+			rest = ipa_write_8(attrib->offset_meq_128[1].value[i],
+				rest);
+		for (i = 8; i < 16; i++)
+			rest = ipa_write_8(attrib->offset_meq_128[1].mask[i],
+				rest);
+		for (i = 8; i < 16; i++)
+			rest = ipa_write_8(attrib->offset_meq_128[1].value[i],
+				rest);
+		num_offset_meq_128--;
+	}
+
+	if (num_offset_meq_32) {
+		extra = ipa_write_8(attrib->offset_meq_32[0].offset, extra);
+		rest = ipa_write_32(attrib->offset_meq_32[0].mask, rest);
+		rest = ipa_write_32(attrib->offset_meq_32[0].value, rest);
+		num_offset_meq_32--;
+	}
+
+	if (num_offset_meq_32) {
+		extra = ipa_write_8(attrib->offset_meq_32[1].offset, extra);
+		rest = ipa_write_32(attrib->offset_meq_32[1].mask, rest);
+		rest = ipa_write_32(attrib->offset_meq_32[1].value, rest);
+		num_offset_meq_32--;
+	}
+
+	if (num_ihl_offset_meq_32) {
+		extra = ipa_write_8(attrib->ihl_offset_meq_32[0].offset,
+		extra);
+
+		rest = ipa_write_32(attrib->ihl_offset_meq_32[0].mask, rest);
+		rest = ipa_write_32(attrib->ihl_offset_meq_32[0].value, rest);
+		num_ihl_offset_meq_32--;
+	}
+
+	if (num_ihl_offset_meq_32) {
+		extra = ipa_write_8(attrib->ihl_offset_meq_32[1].offset,
+		extra);
+
+		rest = ipa_write_32(attrib->ihl_offset_meq_32[1].mask, rest);
+		rest = ipa_write_32(attrib->ihl_offset_meq_32[1].value, rest);
+		num_ihl_offset_meq_32--;
+	}
+
+	if (attrib->metadata_meq32_present) {
+		rest = ipa_write_32(attrib->metadata_meq32.mask, rest);
+		rest = ipa_write_32(attrib->metadata_meq32.value, rest);
+	}
+
+	if (num_ihl_offset_range_16) {
+		extra = ipa_write_8(attrib->ihl_offset_range_16[0].offset,
+		extra);
+
+		rest = ipa_write_16(attrib->ihl_offset_range_16[0].range_high,
+				rest);
+		rest = ipa_write_16(attrib->ihl_offset_range_16[0].range_low,
+				rest);
+		num_ihl_offset_range_16--;
+	}
+
+	if (num_ihl_offset_range_16) {
+		extra = ipa_write_8(attrib->ihl_offset_range_16[1].offset,
+		extra);
+
+		rest = ipa_write_16(attrib->ihl_offset_range_16[1].range_high,
+				rest);
+		rest = ipa_write_16(attrib->ihl_offset_range_16[1].range_low,
+				rest);
+		num_ihl_offset_range_16--;
+	}
+
+	if (attrib->ihl_offset_eq_32_present) {
+		extra = ipa_write_8(attrib->ihl_offset_eq_32.offset, extra);
+		rest = ipa_write_32(attrib->ihl_offset_eq_32.value, rest);
+	}
+
+	if (attrib->ihl_offset_eq_16_present) {
+		extra = ipa_write_8(attrib->ihl_offset_eq_16.offset, extra);
+		rest = ipa_write_16(attrib->ihl_offset_eq_16.value, rest);
+		rest = ipa_write_16(0, rest);
+	}
+
+	if (attrib->fl_eq_present)
+		rest = ipa_write_32(attrib->fl_eq & 0xFFFFF, rest);
+
+	if (extra)
+		extra = ipa_pad_to_64(extra);
+	rest = ipa_pad_to_64(rest);
+	*buf = rest;
+
+	return 0;
+}
+
+static void ipa_flt_generate_mac_addr_eq(struct ipa_ipfltri_rule_eq *eq_atrb,
+	u8 hdr_mac_addr_offset,	const uint8_t mac_addr_mask[ETH_ALEN],
+	const uint8_t mac_addr[ETH_ALEN], u8 ofst_meq128)
+{
+	int i;
+
+	eq_atrb->offset_meq_128[ofst_meq128].offset = hdr_mac_addr_offset;
+
+	/* LSB MASK and ADDR */
+	memset(eq_atrb->offset_meq_128[ofst_meq128].mask, 0, 8);
+	memset(eq_atrb->offset_meq_128[ofst_meq128].value, 0, 8);
+
+	/* MSB MASK and ADDR */
+	memset(eq_atrb->offset_meq_128[ofst_meq128].mask + 8, 0, 2);
+	for (i = 0; i <= 5; i++)
+		eq_atrb->offset_meq_128[ofst_meq128].mask[15 - i] =
+			mac_addr_mask[i];
+
+	memset(eq_atrb->offset_meq_128[ofst_meq128].value + 8, 0, 2);
+	for (i = 0; i <= 5; i++)
+		eq_atrb->offset_meq_128[ofst_meq128].value[15 - i] =
+			mac_addr[i];
+}
+
+static int ipa_flt_generate_mac_eq(
+	const struct ipa_rule_attrib *attrib, u16 *en_rule, u8 *ofst_meq128,
+	struct ipa_ipfltri_rule_eq *eq_atrb)
+{
+	u8 offset = 0;
+	const uint8_t *mac_addr = NULL;
+	const uint8_t *mac_addr_mask = NULL;
+	int i;
+	uint32_t attrib_mask;
+
+	for (i = 0; i < hweight_long(IPA_MAC_FLT_BITS); i++) {
+		switch (i) {
+		case 0:
+			attrib_mask = IPA_FLT_MAC_DST_ADDR_ETHER_II;
+			break;
+		case 1:
+			attrib_mask = IPA_FLT_MAC_SRC_ADDR_ETHER_II;
+			break;
+		case 2:
+			attrib_mask = IPA_FLT_MAC_DST_ADDR_802_3;
+			break;
+		case 3:
+			attrib_mask = IPA_FLT_MAC_SRC_ADDR_802_3;
+			break;
+		case 4:
+			attrib_mask = IPA_FLT_MAC_DST_ADDR_802_1Q;
+			break;
+		case 5:
+			attrib_mask = IPA_FLT_MAC_SRC_ADDR_802_1Q;
+			break;
+		default:
+			return -EPERM;
+		}
+
+		attrib_mask &= attrib->attrib_mask;
+		if (!attrib_mask)
+			continue;
+
+		if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ofst_meq128, *ofst_meq128)) {
+			IPAHAL_ERR("ran out of meq128 eq\n");
+			return -EPERM;
+		}
+
+		*en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(
+			ipa3_0_ofst_meq128[*ofst_meq128]);
+
+		ipa_fltrt_get_mac_data(attrib, attrib_mask, &offset,
+			&mac_addr, &mac_addr_mask);
+
+		ipa_flt_generate_mac_addr_eq(eq_atrb, offset,
+			mac_addr_mask, mac_addr,
+			*ofst_meq128);
+
+		(*ofst_meq128)++;
+	}
+
+	return 0;
+}
+
+static inline int ipa_flt_generat_vlan_eq(
+	const struct ipa_rule_attrib *attrib, u16 *en_rule, u8 *ofst_meq32,
+	struct ipa_ipfltri_rule_eq *eq_atrb)
+{
+	if (attrib->attrib_mask & IPA_FLT_VLAN_ID) {
+		uint32_t vlan_tag;
+
+		if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ofst_meq32, *ofst_meq32)) {
+			IPAHAL_ERR("ran out of meq32 eq\n");
+			return -EPERM;
+		}
+		*en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(
+			ipa3_0_ofst_meq32[*ofst_meq32]);
+		/* -6 => offset of 802_1Q tag in L2 hdr */
+		eq_atrb->offset_meq_32[*ofst_meq32].offset = -6;
+		/* filter vlan packets: 0x8100 TPID + required VLAN ID */
+		vlan_tag = (0x8100 << 16) | (attrib->vlan_id & 0xFFF);
+		eq_atrb->offset_meq_32[*ofst_meq32].mask = 0xFFFF0FFF;
+		eq_atrb->offset_meq_32[*ofst_meq32].value = vlan_tag;
+		(*ofst_meq32)++;
+	}
+
+	return 0;
+}
+
+static int ipa_flt_generate_eq_ip4(enum ipa_ip_type ip,
+		const struct ipa_rule_attrib *attrib,
+		struct ipa_ipfltri_rule_eq *eq_atrb)
+{
+	u8 ofst_meq32 = 0;
+	u8 ihl_ofst_rng16 = 0;
+	u8 ihl_ofst_meq32 = 0;
+	u8 ofst_meq128 = 0;
+	u16 eq_bitmap = 0;
+	u16 *en_rule = &eq_bitmap;
+	bool tos_done = false;
+
+	if (attrib->attrib_mask & IPA_FLT_IS_PURE_ACK) {
+		if (!IPA_IS_RULE_EQ_VALID(IPA_IS_PURE_ACK)) {
+			IPAHAL_ERR("is_pure_ack eq not supported\n");
+			return -EPERM;
+		}
+		*en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(IPA_IS_PURE_ACK);
+		/*
+		 * Starting IPA 4.5, where PURE ACK equation supported
+		 * and TOS equation support removed, field tos_eq_present
+		 * represent pure_ack presence.
+		 */
+		eq_atrb->tos_eq_present = 1;
+		eq_atrb->tos_eq = 0;
+	}
+
+	if (attrib->attrib_mask & IPA_FLT_TOS && !tos_done) {
+		if (!IPA_IS_RULE_EQ_VALID(IPA_TOS_EQ)) {
+			IPAHAL_DBG("tos eq not supported\n");
+		} else {
+			*en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(IPA_TOS_EQ);
+			eq_atrb->tos_eq_present = 1;
+			eq_atrb->tos_eq = attrib->u.v4.tos;
+		}
+	}
+
+	if (attrib->attrib_mask & IPA_FLT_PROTOCOL) {
+		*en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(IPA_PROTOCOL_EQ);
+		eq_atrb->protocol_eq_present = 1;
+		eq_atrb->protocol_eq = attrib->u.v4.protocol;
+	}
+
+	if (attrib->attrib_mask & IPA_MAC_FLT_BITS) {
+		if (ipa_flt_generate_mac_eq(attrib, en_rule,
+			&ofst_meq128, eq_atrb))
+			return -EPERM;
+	}
+
+	if (attrib->attrib_mask & IPA_FLT_MAC_DST_ADDR_L2TP) {
+		if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ihl_ofst_meq32,
+			ihl_ofst_meq32) || IPA_IS_RAN_OUT_OF_EQ(
+			ipa3_0_ihl_ofst_meq32, ihl_ofst_meq32 + 1)) {
+			IPAHAL_ERR("ran out of ihl_meq32 eq\n");
+			return -EPERM;
+		}
+		*en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(
+			ipa3_0_ihl_ofst_meq32[ihl_ofst_meq32]);
+		*en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(
+			ipa3_0_ihl_ofst_meq32[ihl_ofst_meq32 + 1]);
+		/* populate the first ihl meq 32 eq */
+		eq_atrb->ihl_offset_meq_32[ihl_ofst_meq32].offset = 8;
+		eq_atrb->ihl_offset_meq_32[ihl_ofst_meq32].mask =
+			(attrib->dst_mac_addr_mask[3] & 0xFF) |
+			((attrib->dst_mac_addr_mask[2] << 8) & 0xFF00) |
+			((attrib->dst_mac_addr_mask[1] << 16) & 0xFF0000) |
+			((attrib->dst_mac_addr_mask[0] << 24) & 0xFF000000);
+		eq_atrb->ihl_offset_meq_32[ihl_ofst_meq32].value =
+			(attrib->dst_mac_addr[3] & 0xFF) |
+			((attrib->dst_mac_addr[2] << 8) & 0xFF00) |
+			((attrib->dst_mac_addr[1] << 16) & 0xFF0000) |
+			((attrib->dst_mac_addr[0] << 24) & 0xFF000000);
+		/* populate the second ihl meq 32 eq */
+		eq_atrb->ihl_offset_meq_32[ihl_ofst_meq32 + 1].offset = 12;
+		eq_atrb->ihl_offset_meq_32[ihl_ofst_meq32 + 1].mask =
+			((attrib->dst_mac_addr_mask[5] << 16) & 0xFF0000) |
+			((attrib->dst_mac_addr_mask[4] << 24) & 0xFF000000);
+		eq_atrb->ihl_offset_meq_32[ihl_ofst_meq32 + 1].value =
+			((attrib->dst_mac_addr[5] << 16) & 0xFF0000) |
+			((attrib->dst_mac_addr[4] << 24) & 0xFF000000);
+		ihl_ofst_meq32 += 2;
+	}
+
+	if (attrib->attrib_mask & IPA_FLT_TCP_SYN) {
+		if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ihl_ofst_meq32,
+			ihl_ofst_meq32)) {
+			IPAHAL_ERR("ran out of ihl_meq32 eq\n");
+			return -EPERM;
+		}
+		*en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(
+			ipa3_0_ihl_ofst_meq32[ihl_ofst_meq32]);
+		/* 12  => offset of SYN after v4 header */
+		eq_atrb->ihl_offset_meq_32[ihl_ofst_meq32].offset = 12;
+		eq_atrb->ihl_offset_meq_32[ihl_ofst_meq32].mask = 0x20000;
+		eq_atrb->ihl_offset_meq_32[ihl_ofst_meq32].value = 0x20000;
+		ihl_ofst_meq32++;
+	}
+
+	if (attrib->attrib_mask & IPA_FLT_TOS_MASKED) {
+		if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ofst_meq32, ofst_meq32)) {
+			IPAHAL_ERR("ran out of meq32 eq\n");
+			return -EPERM;
+		}
+		*en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(
+			ipa3_0_ofst_meq32[ofst_meq32]);
+		eq_atrb->offset_meq_32[ofst_meq32].offset = 0;
+		eq_atrb->offset_meq_32[ofst_meq32].mask =
+			attrib->tos_mask << 16;
+		eq_atrb->offset_meq_32[ofst_meq32].value =
+			attrib->tos_value << 16;
+		ofst_meq32++;
+	}
+
+	if (attrib->attrib_mask & IPA_FLT_SRC_ADDR) {
+		if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ofst_meq32, ofst_meq32)) {
+			IPAHAL_ERR("ran out of meq32 eq\n");
+			return -EPERM;
+		}
+		*en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(
+			ipa3_0_ofst_meq32[ofst_meq32]);
+		eq_atrb->offset_meq_32[ofst_meq32].offset = 12;
+		eq_atrb->offset_meq_32[ofst_meq32].mask =
+			attrib->u.v4.src_addr_mask;
+		eq_atrb->offset_meq_32[ofst_meq32].value =
+			attrib->u.v4.src_addr;
+		ofst_meq32++;
+	}
+
+	if (attrib->attrib_mask & IPA_FLT_DST_ADDR) {
+		if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ofst_meq32, ofst_meq32)) {
+			IPAHAL_ERR("ran out of meq32 eq\n");
+			return -EPERM;
+		}
+		*en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(
+			ipa3_0_ofst_meq32[ofst_meq32]);
+		eq_atrb->offset_meq_32[ofst_meq32].offset = 16;
+		eq_atrb->offset_meq_32[ofst_meq32].mask =
+			attrib->u.v4.dst_addr_mask;
+		eq_atrb->offset_meq_32[ofst_meq32].value =
+			attrib->u.v4.dst_addr;
+		ofst_meq32++;
+	}
+
+	if (attrib->attrib_mask & IPA_FLT_MAC_ETHER_TYPE) {
+		if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ofst_meq32, ofst_meq32)) {
+			IPAHAL_ERR("ran out of meq32 eq\n");
+			return -EPERM;
+		}
+		*en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(
+			ipa3_0_ofst_meq32[ofst_meq32]);
+		eq_atrb->offset_meq_32[ofst_meq32].offset = -2;
+		eq_atrb->offset_meq_32[ofst_meq32].mask =
+			htons(attrib->ether_type);
+		eq_atrb->offset_meq_32[ofst_meq32].value =
+			htons(attrib->ether_type);
+		ofst_meq32++;
+	}
+
+	if (attrib->attrib_mask & IPA_FLT_TOS && !tos_done) {
+		if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ofst_meq32, ofst_meq32)) {
+			IPAHAL_DBG("ran out of meq32 eq\n");
+		} else {
+			*en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(
+				ipa3_0_ofst_meq32[ofst_meq32]);
+			/*
+			 * offset 0 => Take the first word.
+			 * offset of TOS in v4 header is 1
+			 */
+			eq_atrb->offset_meq_32[ofst_meq32].offset = 0;
+			eq_atrb->offset_meq_32[ofst_meq32].mask =
+				0xFF << 16;
+			eq_atrb->offset_meq_32[ofst_meq32].value =
+				attrib->u.v4.tos << 16;
+			ofst_meq32++;
+			tos_done = true;
+		}
+	}
+
+	if (ipa_flt_generat_vlan_eq(attrib, en_rule, &ofst_meq32, eq_atrb))
+		return -EPERM;
+
+	if (attrib->attrib_mask & IPA_FLT_TYPE) {
+		if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ihl_ofst_meq32,
+			ihl_ofst_meq32)) {
+			IPAHAL_ERR("ran out of ihl_meq32 eq\n");
+			return -EPERM;
+		}
+		*en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(
+			ipa3_0_ihl_ofst_meq32[ihl_ofst_meq32]);
+		eq_atrb->ihl_offset_meq_32[ihl_ofst_meq32].offset = 0;
+		eq_atrb->ihl_offset_meq_32[ihl_ofst_meq32].mask = 0xFF;
+		eq_atrb->ihl_offset_meq_32[ihl_ofst_meq32].value =
+			attrib->type;
+		ihl_ofst_meq32++;
+	}
+
+	if (attrib->attrib_mask & IPA_FLT_CODE) {
+		if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ihl_ofst_meq32,
+			ihl_ofst_meq32)) {
+			IPAHAL_ERR("ran out of ihl_meq32 eq\n");
+			return -EPERM;
+		}
+		*en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(
+			ipa3_0_ihl_ofst_meq32[ihl_ofst_meq32]);
+		eq_atrb->ihl_offset_meq_32[ihl_ofst_meq32].offset = 1;
+		eq_atrb->ihl_offset_meq_32[ihl_ofst_meq32].mask = 0xFF;
+		eq_atrb->ihl_offset_meq_32[ihl_ofst_meq32].value =
+			attrib->code;
+		ihl_ofst_meq32++;
+	}
+
+	if (attrib->attrib_mask & IPA_FLT_SPI) {
+		if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ihl_ofst_meq32,
+			ihl_ofst_meq32)) {
+			IPAHAL_ERR("ran out of ihl_meq32 eq\n");
+			return -EPERM;
+		}
+		*en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(
+			ipa3_0_ihl_ofst_meq32[ihl_ofst_meq32]);
+		eq_atrb->ihl_offset_meq_32[ihl_ofst_meq32].offset = 0;
+		eq_atrb->ihl_offset_meq_32[ihl_ofst_meq32].mask =
+			0xFFFFFFFF;
+		eq_atrb->ihl_offset_meq_32[ihl_ofst_meq32].value =
+			attrib->spi;
+		ihl_ofst_meq32++;
+	}
+
+	if (attrib->attrib_mask & IPA_FLT_TOS && !tos_done) {
+		if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ihl_ofst_meq32,
+			ihl_ofst_meq32)) {
+			IPAHAL_DBG("ran out of ihl_meq32 eq\n");
+		} else {
+			*en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(
+				ipa3_0_ihl_ofst_meq32[ihl_ofst_meq32]);
+			/*
+			 * 0 => Take the first word. offset of TOS in
+			 * v4 header is 1. MSB bit asserted at IHL means
+			 * to ignore packet IHL and do offset inside IPA header
+			 */
+			eq_atrb->ihl_offset_meq_32[ihl_ofst_meq32].offset =
+				0x80;
+			eq_atrb->ihl_offset_meq_32[ihl_ofst_meq32].mask =
+				0xFF << 16;
+			eq_atrb->ihl_offset_meq_32[ihl_ofst_meq32].value =
+				attrib->u.v4.tos << 16;
+			ihl_ofst_meq32++;
+			tos_done = true;
+		}
+	}
+
+	if (attrib->attrib_mask & IPA_FLT_META_DATA) {
+		*en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(
+			IPA_METADATA_COMPARE);
+		eq_atrb->metadata_meq32_present = 1;
+		eq_atrb->metadata_meq32.offset = 0;
+		eq_atrb->metadata_meq32.mask = attrib->meta_data_mask;
+		eq_atrb->metadata_meq32.value = attrib->meta_data;
+	}
+
+	if (attrib->attrib_mask & IPA_FLT_SRC_PORT_RANGE) {
+		if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ihl_ofst_rng16,
+				ihl_ofst_rng16)) {
+			IPAHAL_ERR("ran out of ihl_rng16 eq\n");
+			return -EPERM;
+		}
+		if (attrib->src_port_hi < attrib->src_port_lo) {
+			IPAHAL_ERR("bad src port range param\n");
+			return -EPERM;
+		}
+		*en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(
+			ipa3_0_ihl_ofst_rng16[ihl_ofst_rng16]);
+		eq_atrb->ihl_offset_range_16[ihl_ofst_rng16].offset = 0;
+		eq_atrb->ihl_offset_range_16[ihl_ofst_rng16].range_low
+			= attrib->src_port_lo;
+		eq_atrb->ihl_offset_range_16[ihl_ofst_rng16].range_high
+			= attrib->src_port_hi;
+		ihl_ofst_rng16++;
+	}
+
+	if (attrib->attrib_mask & IPA_FLT_DST_PORT_RANGE) {
+		if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ihl_ofst_rng16,
+				ihl_ofst_rng16)) {
+			IPAHAL_ERR("ran out of ihl_rng16 eq\n");
+			return -EPERM;
+		}
+		if (attrib->dst_port_hi < attrib->dst_port_lo) {
+			IPAHAL_ERR("bad dst port range param\n");
+			return -EPERM;
+		}
+		*en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(
+			ipa3_0_ihl_ofst_rng16[ihl_ofst_rng16]);
+		eq_atrb->ihl_offset_range_16[ihl_ofst_rng16].offset = 2;
+		eq_atrb->ihl_offset_range_16[ihl_ofst_rng16].range_low
+			= attrib->dst_port_lo;
+		eq_atrb->ihl_offset_range_16[ihl_ofst_rng16].range_high
+			= attrib->dst_port_hi;
+		ihl_ofst_rng16++;
+	}
+
+	if (attrib->attrib_mask & IPA_FLT_SRC_PORT) {
+		if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ihl_ofst_rng16,
+				ihl_ofst_rng16)) {
+			IPAHAL_ERR("ran out of ihl_rng16 eq\n");
+			return -EPERM;
+		}
+		*en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(
+			ipa3_0_ihl_ofst_rng16[ihl_ofst_rng16]);
+		eq_atrb->ihl_offset_range_16[ihl_ofst_rng16].offset = 0;
+		eq_atrb->ihl_offset_range_16[ihl_ofst_rng16].range_low
+			= attrib->src_port;
+		eq_atrb->ihl_offset_range_16[ihl_ofst_rng16].range_high
+			= attrib->src_port;
+		ihl_ofst_rng16++;
+	}
+
+	if (attrib->attrib_mask & IPA_FLT_DST_PORT) {
+		if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ihl_ofst_rng16,
+				ihl_ofst_rng16)) {
+			IPAHAL_ERR("ran out of ihl_rng16 eq\n");
+			return -EPERM;
+		}
+		*en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(
+			ipa3_0_ihl_ofst_rng16[ihl_ofst_rng16]);
+		eq_atrb->ihl_offset_range_16[ihl_ofst_rng16].offset = 2;
+		eq_atrb->ihl_offset_range_16[ihl_ofst_rng16].range_low
+			= attrib->dst_port;
+		eq_atrb->ihl_offset_range_16[ihl_ofst_rng16].range_high
+			= attrib->dst_port;
+		ihl_ofst_rng16++;
+	}
+
+	if (attrib->attrib_mask & IPA_FLT_FRAGMENT) {
+		*en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(IPA_IS_FRAG);
+		eq_atrb->ipv4_frag_eq_present = 1;
+	}
+
+	if (attrib->attrib_mask & IPA_FLT_TOS && !tos_done) {
+		IPAHAL_ERR("could not find equation for tos\n");
+		return -EPERM;
+	}
+
+	eq_atrb->rule_eq_bitmap = *en_rule;
+	eq_atrb->num_offset_meq_32 = ofst_meq32;
+	eq_atrb->num_ihl_offset_range_16 = ihl_ofst_rng16;
+	eq_atrb->num_ihl_offset_meq_32 = ihl_ofst_meq32;
+	eq_atrb->num_offset_meq_128 = ofst_meq128;
+
+	return 0;
+}
+
+static int ipa_flt_generate_eq_ip6(enum ipa_ip_type ip,
+		const struct ipa_rule_attrib *attrib,
+		struct ipa_ipfltri_rule_eq *eq_atrb)
+{
+	u8 ofst_meq32 = 0;
+	u8 ihl_ofst_rng16 = 0;
+	u8 ihl_ofst_meq32 = 0;
+	u8 ofst_meq128 = 0;
+	u16 eq_bitmap = 0;
+	u16 *en_rule = &eq_bitmap;
+
+	if (attrib->attrib_mask & IPA_FLT_IS_PURE_ACK) {
+		if (!IPA_IS_RULE_EQ_VALID(IPA_IS_PURE_ACK)) {
+			IPAHAL_ERR("is_pure_ack eq not supported\n");
+			return -EPERM;
+		}
+		*en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(IPA_IS_PURE_ACK);
+		/*
+		 * Starting IPA 4.5, where PURE ACK equation supported
+		 * and TOS equation support removed, field tos_eq_present
+		 * represent pure_ack presenence.
+		 */
+		eq_atrb->tos_eq_present = 1;
+		eq_atrb->tos_eq = 0;
+	}
+
+	if (attrib->attrib_mask & IPA_FLT_NEXT_HDR) {
+		*en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(
+			IPA_PROTOCOL_EQ);
+		eq_atrb->protocol_eq_present = 1;
+		eq_atrb->protocol_eq = attrib->u.v6.next_hdr;
+	}
+
+	if (attrib->attrib_mask & IPA_FLT_TC) {
+		*en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(
+			IPA_TC_EQ);
+		eq_atrb->tc_eq_present = 1;
+		eq_atrb->tc_eq = attrib->u.v6.tc;
+	}
+
+	if (attrib->attrib_mask & IPA_FLT_SRC_ADDR) {
+		if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ofst_meq128, ofst_meq128)) {
+			IPAHAL_ERR_RL("ran out of meq128 eq\n");
+			return -EPERM;
+		}
+		*en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(
+			ipa3_0_ofst_meq128[ofst_meq128]);
+		/* use the same word order as in ipa v2 */
+		eq_atrb->offset_meq_128[ofst_meq128].offset = 8;
+		*(u32 *)(eq_atrb->offset_meq_128[ofst_meq128].mask + 0)
+			= attrib->u.v6.src_addr_mask[0];
+		*(u32 *)(eq_atrb->offset_meq_128[ofst_meq128].mask + 4)
+			= attrib->u.v6.src_addr_mask[1];
+		*(u32 *)(eq_atrb->offset_meq_128[ofst_meq128].mask + 8)
+			= attrib->u.v6.src_addr_mask[2];
+		*(u32 *)(eq_atrb->offset_meq_128[ofst_meq128].mask + 12)
+			= attrib->u.v6.src_addr_mask[3];
+		*(u32 *)(eq_atrb->offset_meq_128[ofst_meq128].value + 0)
+			= attrib->u.v6.src_addr[0];
+		*(u32 *)(eq_atrb->offset_meq_128[ofst_meq128].value + 4)
+			= attrib->u.v6.src_addr[1];
+		*(u32 *)(eq_atrb->offset_meq_128[ofst_meq128].value + 8)
+			= attrib->u.v6.src_addr[2];
+		*(u32 *)(eq_atrb->offset_meq_128[ofst_meq128].value +
+				12) = attrib->u.v6.src_addr[3];
+		ofst_meq128++;
+	}
+
+	if (attrib->attrib_mask & IPA_FLT_DST_ADDR) {
+		if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ofst_meq128, ofst_meq128)) {
+			IPAHAL_ERR_RL("ran out of meq128 eq\n");
+			return -EPERM;
+		}
+		*en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(
+			ipa3_0_ofst_meq128[ofst_meq128]);
+		eq_atrb->offset_meq_128[ofst_meq128].offset = 24;
+		/* use the same word order as in ipa v2 */
+		*(u32 *)(eq_atrb->offset_meq_128[ofst_meq128].mask + 0)
+			= attrib->u.v6.dst_addr_mask[0];
+		*(u32 *)(eq_atrb->offset_meq_128[ofst_meq128].mask + 4)
+			= attrib->u.v6.dst_addr_mask[1];
+		*(u32 *)(eq_atrb->offset_meq_128[ofst_meq128].mask + 8)
+			= attrib->u.v6.dst_addr_mask[2];
+		*(u32 *)(eq_atrb->offset_meq_128[ofst_meq128].mask + 12)
+			= attrib->u.v6.dst_addr_mask[3];
+		*(u32 *)(eq_atrb->offset_meq_128[ofst_meq128].value + 0)
+			= attrib->u.v6.dst_addr[0];
+		*(u32 *)(eq_atrb->offset_meq_128[ofst_meq128].value + 4)
+			= attrib->u.v6.dst_addr[1];
+		*(u32 *)(eq_atrb->offset_meq_128[ofst_meq128].value + 8)
+			= attrib->u.v6.dst_addr[2];
+		*(u32 *)(eq_atrb->offset_meq_128[ofst_meq128].value +
+				12) = attrib->u.v6.dst_addr[3];
+		ofst_meq128++;
+	}
+
+	if (attrib->attrib_mask & IPA_FLT_TOS_MASKED) {
+		if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ofst_meq128, ofst_meq128)) {
+			IPAHAL_ERR_RL("ran out of meq128 eq\n");
+			return -EPERM;
+		}
+		*en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(
+			ipa3_0_ofst_meq128[ofst_meq128]);
+		eq_atrb->offset_meq_128[ofst_meq128].offset = 0;
+		memset(eq_atrb->offset_meq_128[ofst_meq128].mask, 0, 12);
+		*(u32 *)(eq_atrb->offset_meq_128[ofst_meq128].mask + 12)
+			= attrib->tos_mask << 20;
+		memset(eq_atrb->offset_meq_128[ofst_meq128].value, 0, 12);
+		*(u32 *)(eq_atrb->offset_meq_128[ofst_meq128].value +
+				12) = attrib->tos_value << 20;
+		ofst_meq128++;
+	}
+
+	if (attrib->attrib_mask & IPA_MAC_FLT_BITS) {
+		if (ipa_flt_generate_mac_eq(attrib, en_rule,
+			&ofst_meq128, eq_atrb))
+			return -EPERM;
+	}
+
+	if (attrib->attrib_mask & IPA_FLT_MAC_DST_ADDR_L2TP) {
+		if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ihl_ofst_meq32,
+			ihl_ofst_meq32) || IPA_IS_RAN_OUT_OF_EQ(
+			ipa3_0_ihl_ofst_meq32, ihl_ofst_meq32 + 1)) {
+			IPAHAL_ERR_RL("ran out of ihl_meq32 eq\n");
+			return -EPERM;
+		}
+		*en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(
+			ipa3_0_ihl_ofst_meq32[ihl_ofst_meq32]);
+		*en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(
+			ipa3_0_ihl_ofst_meq32[ihl_ofst_meq32 + 1]);
+		/* populate the first ihl meq 32 eq */
+		eq_atrb->ihl_offset_meq_32[ihl_ofst_meq32].offset = 8;
+		eq_atrb->ihl_offset_meq_32[ihl_ofst_meq32].mask =
+			(attrib->dst_mac_addr_mask[3] & 0xFF) |
+			((attrib->dst_mac_addr_mask[2] << 8) & 0xFF00) |
+			((attrib->dst_mac_addr_mask[1] << 16) & 0xFF0000) |
+			((attrib->dst_mac_addr_mask[0] << 24) & 0xFF000000);
+		eq_atrb->ihl_offset_meq_32[ihl_ofst_meq32].value =
+			(attrib->dst_mac_addr[3] & 0xFF) |
+			((attrib->dst_mac_addr[2] << 8) & 0xFF00) |
+			((attrib->dst_mac_addr[1] << 16) & 0xFF0000) |
+			((attrib->dst_mac_addr[0] << 24) & 0xFF000000);
+		/* populate the second ihl meq 32 eq */
+		eq_atrb->ihl_offset_meq_32[ihl_ofst_meq32 + 1].offset = 12;
+		eq_atrb->ihl_offset_meq_32[ihl_ofst_meq32 + 1].mask =
+			((attrib->dst_mac_addr_mask[5] << 16) & 0xFF0000) |
+			((attrib->dst_mac_addr_mask[4] << 24) & 0xFF000000);
+		eq_atrb->ihl_offset_meq_32[ihl_ofst_meq32 + 1].value =
+			((attrib->dst_mac_addr[5] << 16) & 0xFF0000) |
+			((attrib->dst_mac_addr[4] << 24) & 0xFF000000);
+		ihl_ofst_meq32 += 2;
+	}
+
+	if (attrib->attrib_mask & IPA_FLT_TCP_SYN) {
+		if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ihl_ofst_meq32,
+			ihl_ofst_meq32)) {
+			IPAHAL_ERR_RL("ran out of ihl_meq32 eq\n");
+			return -EPERM;
+		}
+		*en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(
+			ipa3_0_ihl_ofst_meq32[ihl_ofst_meq32]);
+		/* 12  => offset of SYN after v4 header */
+		eq_atrb->ihl_offset_meq_32[ihl_ofst_meq32].offset = 12;
+		eq_atrb->ihl_offset_meq_32[ihl_ofst_meq32].mask = 0x20000;
+		eq_atrb->ihl_offset_meq_32[ihl_ofst_meq32].value = 0x20000;
+		ihl_ofst_meq32++;
+	}
+
+	if (attrib->attrib_mask & IPA_FLT_TCP_SYN_L2TP) {
+		if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ihl_ofst_meq32,
+			ihl_ofst_meq32) || IPA_IS_RAN_OUT_OF_EQ(
+			ipa3_0_ihl_ofst_meq32, ihl_ofst_meq32 + 1)) {
+			IPAHAL_ERR_RL("ran out of ihl_meq32 eq\n");
+			return -EPERM;
+		}
+		*en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(
+			ipa3_0_ihl_ofst_meq32[ihl_ofst_meq32]);
+		*en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(
+			ipa3_0_ihl_ofst_meq32[ihl_ofst_meq32 + 1]);
+
+		/* populate TCP protocol eq */
+		if (attrib->ether_type == 0x0800) {
+			eq_atrb->ihl_offset_meq_32[ihl_ofst_meq32].offset = 30;
+			eq_atrb->ihl_offset_meq_32[ihl_ofst_meq32].mask =
+				0xFF0000;
+			eq_atrb->ihl_offset_meq_32[ihl_ofst_meq32].value =
+				0x60000;
+		} else {
+			eq_atrb->ihl_offset_meq_32[ihl_ofst_meq32].offset = 26;
+			eq_atrb->ihl_offset_meq_32[ihl_ofst_meq32].mask =
+				0xFF00;
+			eq_atrb->ihl_offset_meq_32[ihl_ofst_meq32].value =
+				0x600;
+		}
+
+		/* populate TCP SYN eq */
+		if (attrib->ether_type == 0x0800) {
+			eq_atrb->ihl_offset_meq_32[ihl_ofst_meq32].offset = 54;
+			eq_atrb->ihl_offset_meq_32[ihl_ofst_meq32].mask =
+				0x20000;
+			eq_atrb->ihl_offset_meq_32[ihl_ofst_meq32].value =
+				0x20000;
+		} else {
+			eq_atrb->ihl_offset_meq_32[ihl_ofst_meq32].offset = 74;
+			eq_atrb->ihl_offset_meq_32[ihl_ofst_meq32].mask =
+				0x20000;
+			eq_atrb->ihl_offset_meq_32[ihl_ofst_meq32].value =
+				0x20000;
+		}
+		ihl_ofst_meq32 += 2;
+	}
+
+	if (attrib->attrib_mask & IPA_FLT_L2TP_INNER_IP_TYPE) {
+		if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ihl_ofst_meq32,
+			ihl_ofst_meq32)) {
+			IPAHAL_ERR("ran out of ihl_meq32 eq\n");
+			return -EPERM;
+		}
+		*en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(
+			ipa3_0_ihl_ofst_meq32[ihl_ofst_meq32]);
+		/* 22  => offset of inner IP type after v6 header */
+		eq_atrb->ihl_offset_meq_32[ihl_ofst_meq32].offset = 22;
+		eq_atrb->ihl_offset_meq_32[ihl_ofst_meq32].mask =
+			0xF0000000;
+		eq_atrb->ihl_offset_meq_32[ihl_ofst_meq32].value =
+			(u32)attrib->type << 24;
+		ihl_ofst_meq32++;
+	}
+
+	if (attrib->attrib_mask & IPA_FLT_L2TP_INNER_IPV4_DST_ADDR) {
+		if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ihl_ofst_meq32,
+			ihl_ofst_meq32)) {
+			IPAHAL_ERR("ran out of ihl_meq32 eq\n");
+			return -EPERM;
+		}
+		*en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(
+			ipa3_0_ihl_ofst_meq32[ihl_ofst_meq32]);
+		/* 38  => offset of inner IPv4 addr */
+		eq_atrb->ihl_offset_meq_32[ihl_ofst_meq32].offset = 38;
+		eq_atrb->ihl_offset_meq_32[ihl_ofst_meq32].mask =
+			attrib->u.v4.dst_addr_mask;
+		eq_atrb->ihl_offset_meq_32[ihl_ofst_meq32].value =
+			attrib->u.v4.dst_addr;
+		ihl_ofst_meq32++;
+	}
+
+	if (attrib->attrib_mask & IPA_FLT_MAC_ETHER_TYPE) {
+		if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ofst_meq32, ofst_meq32)) {
+			IPAHAL_ERR_RL("ran out of meq32 eq\n");
+			return -EPERM;
+		}
+		*en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(
+			ipa3_0_ofst_meq32[ofst_meq32]);
+		eq_atrb->offset_meq_32[ofst_meq32].offset = -2;
+		eq_atrb->offset_meq_32[ofst_meq32].mask =
+			htons(attrib->ether_type);
+		eq_atrb->offset_meq_32[ofst_meq32].value =
+			htons(attrib->ether_type);
+		ofst_meq32++;
+	}
+
+	if (ipa_flt_generat_vlan_eq(attrib, en_rule, &ofst_meq32, eq_atrb))
+		return -EPERM;
+
+	if (attrib->attrib_mask & IPA_FLT_TYPE) {
+		if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ihl_ofst_meq32,
+			ihl_ofst_meq32)) {
+			IPAHAL_ERR_RL("ran out of ihl_meq32 eq\n");
+			return -EPERM;
+		}
+		*en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(
+			ipa3_0_ihl_ofst_meq32[ihl_ofst_meq32]);
+		eq_atrb->ihl_offset_meq_32[ihl_ofst_meq32].offset = 0;
+		eq_atrb->ihl_offset_meq_32[ihl_ofst_meq32].mask = 0xFF;
+		eq_atrb->ihl_offset_meq_32[ihl_ofst_meq32].value =
+			attrib->type;
+		ihl_ofst_meq32++;
+	}
+
+	if (attrib->attrib_mask & IPA_FLT_CODE) {
+		if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ihl_ofst_meq32,
+			ihl_ofst_meq32)) {
+			IPAHAL_ERR_RL("ran out of ihl_meq32 eq\n");
+			return -EPERM;
+		}
+		*en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(
+			ipa3_0_ihl_ofst_meq32[ihl_ofst_meq32]);
+		eq_atrb->ihl_offset_meq_32[ihl_ofst_meq32].offset = 1;
+		eq_atrb->ihl_offset_meq_32[ihl_ofst_meq32].mask = 0xFF;
+		eq_atrb->ihl_offset_meq_32[ihl_ofst_meq32].value =
+			attrib->code;
+		ihl_ofst_meq32++;
+	}
+
+	if (attrib->attrib_mask & IPA_FLT_SPI) {
+		if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ihl_ofst_meq32,
+			ihl_ofst_meq32)) {
+			IPAHAL_ERR_RL("ran out of ihl_meq32 eq\n");
+			return -EPERM;
+		}
+		*en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(
+			ipa3_0_ihl_ofst_meq32[ihl_ofst_meq32]);
+		eq_atrb->ihl_offset_meq_32[ihl_ofst_meq32].offset = 0;
+		eq_atrb->ihl_offset_meq_32[ihl_ofst_meq32].mask =
+			0xFFFFFFFF;
+		eq_atrb->ihl_offset_meq_32[ihl_ofst_meq32].value =
+			attrib->spi;
+		ihl_ofst_meq32++;
+	}
+
+	if (attrib->attrib_mask & IPA_FLT_META_DATA) {
+		*en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(
+			IPA_METADATA_COMPARE);
+		eq_atrb->metadata_meq32_present = 1;
+		eq_atrb->metadata_meq32.offset = 0;
+		eq_atrb->metadata_meq32.mask = attrib->meta_data_mask;
+		eq_atrb->metadata_meq32.value = attrib->meta_data;
+	}
+
+	if (attrib->attrib_mask & IPA_FLT_SRC_PORT) {
+		if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ihl_ofst_rng16,
+				ihl_ofst_rng16)) {
+			IPAHAL_ERR_RL("ran out of ihl_rng16 eq\n");
+			return -EPERM;
+		}
+		*en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(
+			ipa3_0_ihl_ofst_rng16[ihl_ofst_rng16]);
+		eq_atrb->ihl_offset_range_16[ihl_ofst_rng16].offset = 0;
+		eq_atrb->ihl_offset_range_16[ihl_ofst_rng16].range_low
+			= attrib->src_port;
+		eq_atrb->ihl_offset_range_16[ihl_ofst_rng16].range_high
+			= attrib->src_port;
+		ihl_ofst_rng16++;
+	}
+
+	if (attrib->attrib_mask & IPA_FLT_DST_PORT) {
+		if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ihl_ofst_rng16,
+				ihl_ofst_rng16)) {
+			IPAHAL_ERR_RL("ran out of ihl_rng16 eq\n");
+			return -EPERM;
+		}
+		*en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(
+			ipa3_0_ihl_ofst_rng16[ihl_ofst_rng16]);
+		eq_atrb->ihl_offset_range_16[ihl_ofst_rng16].offset = 2;
+		eq_atrb->ihl_offset_range_16[ihl_ofst_rng16].range_low
+			= attrib->dst_port;
+		eq_atrb->ihl_offset_range_16[ihl_ofst_rng16].range_high
+			= attrib->dst_port;
+		ihl_ofst_rng16++;
+	}
+
+	if (attrib->attrib_mask & IPA_FLT_SRC_PORT_RANGE) {
+		if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ihl_ofst_rng16,
+				ihl_ofst_rng16)) {
+			IPAHAL_ERR_RL("ran out of ihl_rng16 eq\n");
+			return -EPERM;
+		}
+		if (attrib->src_port_hi < attrib->src_port_lo) {
+			IPAHAL_ERR_RL("bad src port range param\n");
+			return -EPERM;
+		}
+		*en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(
+			ipa3_0_ihl_ofst_rng16[ihl_ofst_rng16]);
+		eq_atrb->ihl_offset_range_16[ihl_ofst_rng16].offset = 0;
+		eq_atrb->ihl_offset_range_16[ihl_ofst_rng16].range_low
+			= attrib->src_port_lo;
+		eq_atrb->ihl_offset_range_16[ihl_ofst_rng16].range_high
+			= attrib->src_port_hi;
+		ihl_ofst_rng16++;
+	}
+
+	if (attrib->attrib_mask & IPA_FLT_DST_PORT_RANGE) {
+		if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ihl_ofst_rng16,
+				ihl_ofst_rng16)) {
+			IPAHAL_ERR_RL("ran out of ihl_rng16 eq\n");
+			return -EPERM;
+		}
+		if (attrib->dst_port_hi < attrib->dst_port_lo) {
+			IPAHAL_ERR_RL("bad dst port range param\n");
+			return -EPERM;
+		}
+		*en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(
+			ipa3_0_ihl_ofst_rng16[ihl_ofst_rng16]);
+		eq_atrb->ihl_offset_range_16[ihl_ofst_rng16].offset = 2;
+		eq_atrb->ihl_offset_range_16[ihl_ofst_rng16].range_low
+			= attrib->dst_port_lo;
+		eq_atrb->ihl_offset_range_16[ihl_ofst_rng16].range_high
+			= attrib->dst_port_hi;
+		ihl_ofst_rng16++;
+	}
+
+	if (attrib->attrib_mask & IPA_FLT_TCP_SYN_L2TP) {
+		if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ihl_ofst_rng16,
+				ihl_ofst_rng16)) {
+			IPAHAL_ERR("ran out of ihl_rng16 eq\n");
+			return -EPERM;
+		}
+		*en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(
+			ipa3_0_ihl_ofst_rng16[ihl_ofst_rng16]);
+		if (attrib->ether_type == 0x0800) {
+			eq_atrb->ihl_offset_range_16[ihl_ofst_rng16].offset
+				= 21;
+			eq_atrb->ihl_offset_range_16[ihl_ofst_rng16].range_low
+				= 0x0045;
+			eq_atrb->ihl_offset_range_16[ihl_ofst_rng16].range_high
+				= 0x0045;
+		} else {
+			eq_atrb->ihl_offset_range_16[ihl_ofst_rng16].offset =
+				20;
+			eq_atrb->ihl_offset_range_16[ihl_ofst_rng16].range_low
+				= attrib->ether_type;
+			eq_atrb->ihl_offset_range_16[ihl_ofst_rng16].range_high
+				= attrib->ether_type;
+		}
+		ihl_ofst_rng16++;
+	}
+
+	if (attrib->attrib_mask & IPA_FLT_TCP_SYN_L2TP) {
+		if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ihl_ofst_rng16,
+				ihl_ofst_rng16)) {
+			IPAHAL_ERR_RL("ran out of ihl_rng16 eq\n");
+			return -EPERM;
+		}
+		*en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(
+			ipa3_0_ihl_ofst_rng16[ihl_ofst_rng16]);
+		if (attrib->ether_type == 0x0800) {
+			eq_atrb->ihl_offset_range_16[ihl_ofst_rng16].offset
+				= 21;
+			eq_atrb->ihl_offset_range_16[ihl_ofst_rng16].range_low
+				= 0x0045;
+			eq_atrb->ihl_offset_range_16[ihl_ofst_rng16].range_high
+				= 0x0045;
+		} else {
+			eq_atrb->ihl_offset_range_16[ihl_ofst_rng16].offset =
+				20;
+			eq_atrb->ihl_offset_range_16[ihl_ofst_rng16].range_low
+				= attrib->ether_type;
+			eq_atrb->ihl_offset_range_16[ihl_ofst_rng16].range_high
+				= attrib->ether_type;
+		}
+		ihl_ofst_rng16++;
+	}
+
+	if (attrib->attrib_mask & IPA_FLT_FLOW_LABEL) {
+		*en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(IPA_FL_EQ);
+		eq_atrb->fl_eq_present = 1;
+		eq_atrb->fl_eq = attrib->u.v6.flow_label;
+	}
+
+	if (attrib->attrib_mask & IPA_FLT_FRAGMENT) {
+		*en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(
+			IPA_IS_FRAG);
+		eq_atrb->ipv4_frag_eq_present = 1;
+	}
+
+	eq_atrb->rule_eq_bitmap = *en_rule;
+	eq_atrb->num_offset_meq_32 = ofst_meq32;
+	eq_atrb->num_ihl_offset_range_16 = ihl_ofst_rng16;
+	eq_atrb->num_ihl_offset_meq_32 = ihl_ofst_meq32;
+	eq_atrb->num_offset_meq_128 = ofst_meq128;
+
+	return 0;
+}
+
+static int ipa_fltrt_parse_hw_rule_eq(u8 *addr, u32 hdr_sz,
+	struct ipa_ipfltri_rule_eq *atrb, u32 *rule_size)
+{
+	u16 eq_bitmap;
+	int extra_bytes;
+	u8 *extra;
+	u8 *rest;
+	int i;
+	u8 dummy_extra_wrd;
+
+	if (!addr || !atrb || !rule_size) {
+		IPAHAL_ERR("Input error: addr=%pK atrb=%pK rule_size=%pK\n",
+			addr, atrb, rule_size);
+		return -EINVAL;
+	}
+
+	eq_bitmap = atrb->rule_eq_bitmap;
+
+	IPAHAL_DBG_LOW("eq_bitmap=0x%x\n", eq_bitmap);
+
+	if (IPA_IS_RULE_EQ_VALID(IPA_IS_PURE_ACK) &&
+		(eq_bitmap & IPA_GET_RULE_EQ_BIT_PTRN(IPA_IS_PURE_ACK))) {
+		/*
+		 * tos_eq_present field represents pure_ack when pure
+		 * ack equation valid (started IPA 4.5). In this case
+		 * tos equation should not be supported.
+		 */
+		atrb->tos_eq_present = true;
+	}
+	if (IPA_IS_RULE_EQ_VALID(IPA_TOS_EQ) &&
+		(eq_bitmap & IPA_GET_RULE_EQ_BIT_PTRN(IPA_TOS_EQ))) {
+		atrb->tos_eq_present = true;
+	}
+	if (eq_bitmap & IPA_GET_RULE_EQ_BIT_PTRN(IPA_PROTOCOL_EQ))
+		atrb->protocol_eq_present = true;
+	if (eq_bitmap & IPA_GET_RULE_EQ_BIT_PTRN(IPA_TC_EQ))
+		atrb->tc_eq_present = true;
+	if (eq_bitmap & IPA_GET_RULE_EQ_BIT_PTRN(IPA_OFFSET_MEQ128_0))
+		atrb->num_offset_meq_128++;
+	if (eq_bitmap & IPA_GET_RULE_EQ_BIT_PTRN(IPA_OFFSET_MEQ128_1))
+		atrb->num_offset_meq_128++;
+	if (eq_bitmap & IPA_GET_RULE_EQ_BIT_PTRN(IPA_OFFSET_MEQ32_0))
+		atrb->num_offset_meq_32++;
+	if (eq_bitmap & IPA_GET_RULE_EQ_BIT_PTRN(IPA_OFFSET_MEQ32_1))
+		atrb->num_offset_meq_32++;
+	if (eq_bitmap & IPA_GET_RULE_EQ_BIT_PTRN(IPA_IHL_OFFSET_MEQ32_0))
+		atrb->num_ihl_offset_meq_32++;
+	if (eq_bitmap & IPA_GET_RULE_EQ_BIT_PTRN(IPA_IHL_OFFSET_MEQ32_1))
+		atrb->num_ihl_offset_meq_32++;
+	if (eq_bitmap & IPA_GET_RULE_EQ_BIT_PTRN(IPA_METADATA_COMPARE))
+		atrb->metadata_meq32_present = true;
+	if (eq_bitmap & IPA_GET_RULE_EQ_BIT_PTRN(IPA_IHL_OFFSET_RANGE16_0))
+		atrb->num_ihl_offset_range_16++;
+	if (eq_bitmap & IPA_GET_RULE_EQ_BIT_PTRN(IPA_IHL_OFFSET_RANGE16_1))
+		atrb->num_ihl_offset_range_16++;
+	if (eq_bitmap & IPA_GET_RULE_EQ_BIT_PTRN(IPA_IHL_OFFSET_EQ_32))
+		atrb->ihl_offset_eq_32_present = true;
+	if (eq_bitmap & IPA_GET_RULE_EQ_BIT_PTRN(IPA_IHL_OFFSET_EQ_16))
+		atrb->ihl_offset_eq_16_present = true;
+	if (eq_bitmap & IPA_GET_RULE_EQ_BIT_PTRN(IPA_FL_EQ))
+		atrb->fl_eq_present = true;
+	if (eq_bitmap & IPA_GET_RULE_EQ_BIT_PTRN(IPA_IS_FRAG))
+		atrb->ipv4_frag_eq_present = true;
+
+	extra_bytes = ipa_fltrt_calc_extra_wrd_bytes(atrb);
+	/* only 3 eq does not have extra word param, 13 out of 16 is the number
+	 * of equations that needs extra word param
+	 */
+	if (extra_bytes > 13) {
+		IPAHAL_ERR("too much extra bytes\n");
+		return -EPERM;
+	} else if (extra_bytes > IPA3_0_HW_TBL_HDR_WIDTH) {
+		/* two extra words */
+		extra = addr + hdr_sz;
+		rest = extra + IPA3_0_HW_TBL_HDR_WIDTH * 2;
+	} else if (extra_bytes > 0) {
+		/* single extra word */
+		extra = addr + hdr_sz;
+		rest = extra + IPA3_0_HW_TBL_HDR_WIDTH;
+	} else {
+		/* no extra words */
+		dummy_extra_wrd = 0;
+		extra = &dummy_extra_wrd;
+		rest = addr + hdr_sz;
+	}
+	IPAHAL_DBG_LOW("addr=0x%pK extra=0x%pK rest=0x%pK\n",
+		addr, extra, rest);
+
+	if (IPA_IS_RULE_EQ_VALID(IPA_TOS_EQ) && atrb->tos_eq_present)
+		atrb->tos_eq = *extra++;
+	if (IPA_IS_RULE_EQ_VALID(IPA_IS_PURE_ACK) && atrb->tos_eq_present) {
+		atrb->tos_eq = 0;
+		extra++;
+	}
+	if (atrb->protocol_eq_present)
+		atrb->protocol_eq = *extra++;
+	if (atrb->tc_eq_present)
+		atrb->tc_eq = *extra++;
+
+	if (atrb->num_offset_meq_128 > 0) {
+		atrb->offset_meq_128[0].offset = *extra++;
+		for (i = 0; i < 8; i++)
+			atrb->offset_meq_128[0].mask[i] = *rest++;
+		for (i = 0; i < 8; i++)
+			atrb->offset_meq_128[0].value[i] = *rest++;
+		for (i = 8; i < 16; i++)
+			atrb->offset_meq_128[0].mask[i] = *rest++;
+		for (i = 8; i < 16; i++)
+			atrb->offset_meq_128[0].value[i] = *rest++;
+	}
+	if (atrb->num_offset_meq_128 > 1) {
+		atrb->offset_meq_128[1].offset = *extra++;
+		for (i = 0; i < 8; i++)
+			atrb->offset_meq_128[1].mask[i] = *rest++;
+		for (i = 0; i < 8; i++)
+			atrb->offset_meq_128[1].value[i] = *rest++;
+		for (i = 8; i < 16; i++)
+			atrb->offset_meq_128[1].mask[i] = *rest++;
+		for (i = 8; i < 16; i++)
+			atrb->offset_meq_128[1].value[i] = *rest++;
+	}
+
+	if (atrb->num_offset_meq_32 > 0) {
+		atrb->offset_meq_32[0].offset = *extra++;
+		atrb->offset_meq_32[0].mask = *((u32 *)rest);
+		rest += 4;
+		atrb->offset_meq_32[0].value = *((u32 *)rest);
+		rest += 4;
+	}
+	if (atrb->num_offset_meq_32 > 1) {
+		atrb->offset_meq_32[1].offset = *extra++;
+		atrb->offset_meq_32[1].mask = *((u32 *)rest);
+		rest += 4;
+		atrb->offset_meq_32[1].value = *((u32 *)rest);
+		rest += 4;
+	}
+
+	if (atrb->num_ihl_offset_meq_32 > 0) {
+		atrb->ihl_offset_meq_32[0].offset = *extra++;
+		atrb->ihl_offset_meq_32[0].mask = *((u32 *)rest);
+		rest += 4;
+		atrb->ihl_offset_meq_32[0].value = *((u32 *)rest);
+		rest += 4;
+	}
+	if (atrb->num_ihl_offset_meq_32 > 1) {
+		atrb->ihl_offset_meq_32[1].offset = *extra++;
+		atrb->ihl_offset_meq_32[1].mask = *((u32 *)rest);
+		rest += 4;
+		atrb->ihl_offset_meq_32[1].value = *((u32 *)rest);
+		rest += 4;
+	}
+
+	if (atrb->metadata_meq32_present) {
+		atrb->metadata_meq32.mask = *((u32 *)rest);
+		rest += 4;
+		atrb->metadata_meq32.value = *((u32 *)rest);
+		rest += 4;
+	}
+
+	if (atrb->num_ihl_offset_range_16 > 0) {
+		atrb->ihl_offset_range_16[0].offset = *extra++;
+		atrb->ihl_offset_range_16[0].range_high = *((u16 *)rest);
+		rest += 2;
+		atrb->ihl_offset_range_16[0].range_low = *((u16 *)rest);
+		rest += 2;
+	}
+	if (atrb->num_ihl_offset_range_16 > 1) {
+		atrb->ihl_offset_range_16[1].offset = *extra++;
+		atrb->ihl_offset_range_16[1].range_high = *((u16 *)rest);
+		rest += 2;
+		atrb->ihl_offset_range_16[1].range_low = *((u16 *)rest);
+		rest += 2;
+	}
+
+	if (atrb->ihl_offset_eq_32_present) {
+		atrb->ihl_offset_eq_32.offset = *extra++;
+		atrb->ihl_offset_eq_32.value = *((u32 *)rest);
+		rest += 4;
+	}
+
+	if (atrb->ihl_offset_eq_16_present) {
+		atrb->ihl_offset_eq_16.offset = *extra++;
+		atrb->ihl_offset_eq_16.value = *((u16 *)rest);
+		rest += 4;
+	}
+
+	if (atrb->fl_eq_present) {
+		atrb->fl_eq = *((u32 *)rest);
+		atrb->fl_eq &= 0xfffff;
+		rest += 4;
+	}
+
+	IPAHAL_DBG_LOW("before rule alignment rest=0x%pK\n", rest);
+	rest = (u8 *)(((unsigned long)rest + IPA3_0_HW_RULE_START_ALIGNMENT) &
+		~IPA3_0_HW_RULE_START_ALIGNMENT);
+	IPAHAL_DBG_LOW("after rule alignment  rest=0x%pK\n", rest);
+
+	*rule_size = rest - addr;
+	IPAHAL_DBG_LOW("rule_size=0x%x\n", *rule_size);
+
+	return 0;
+}
+
+static int ipa_rt_parse_hw_rule(u8 *addr, struct ipahal_rt_rule_entry *rule)
+{
+	struct ipa3_0_rt_rule_hw_hdr *rule_hdr;
+	struct ipa_ipfltri_rule_eq *atrb;
+
+	IPAHAL_DBG_LOW("Entry\n");
+
+	rule_hdr = (struct ipa3_0_rt_rule_hw_hdr *)addr;
+	atrb = &rule->eq_attrib;
+
+	IPAHAL_DBG_LOW("read hdr 0x%llx\n", rule_hdr->u.word);
+
+	if (rule_hdr->u.word == 0) {
+		/* table terminator - empty table */
+		rule->rule_size = 0;
+		return 0;
+	}
+
+	rule->dst_pipe_idx = rule_hdr->u.hdr.pipe_dest_idx;
+	if (rule_hdr->u.hdr.proc_ctx) {
+		rule->hdr_type = IPAHAL_RT_RULE_HDR_PROC_CTX;
+		rule->hdr_ofst = (rule_hdr->u.hdr.hdr_offset) << 5;
+	} else {
+		rule->hdr_type = IPAHAL_RT_RULE_HDR_RAW;
+		rule->hdr_ofst = (rule_hdr->u.hdr.hdr_offset) << 2;
+	}
+	rule->hdr_lcl = !rule_hdr->u.hdr.system;
+
+	rule->priority = rule_hdr->u.hdr.priority;
+	rule->retain_hdr = rule_hdr->u.hdr.retain_hdr;
+	rule->id = rule_hdr->u.hdr.rule_id;
+
+	atrb->rule_eq_bitmap = rule_hdr->u.hdr.en_rule;
+	return ipa_fltrt_parse_hw_rule_eq(addr, sizeof(*rule_hdr),
+		atrb, &rule->rule_size);
+}
+
+static int ipa_rt_parse_hw_rule_ipav4_5(u8 *addr,
+	struct ipahal_rt_rule_entry *rule)
+{
+	struct ipa4_5_rt_rule_hw_hdr *rule_hdr;
+	struct ipa_ipfltri_rule_eq *atrb;
+
+	IPAHAL_DBG_LOW("Entry\n");
+
+	rule_hdr = (struct ipa4_5_rt_rule_hw_hdr *)addr;
+	atrb = &rule->eq_attrib;
+
+	IPAHAL_DBG_LOW("read hdr 0x%llx\n", rule_hdr->u.word);
+
+	if (rule_hdr->u.word == 0) {
+		/* table termintator - empty table */
+		rule->rule_size = 0;
+		return 0;
+	}
+
+	rule->dst_pipe_idx = rule_hdr->u.hdr.pipe_dest_idx;
+	if (rule_hdr->u.hdr.proc_ctx) {
+		rule->hdr_type = IPAHAL_RT_RULE_HDR_PROC_CTX;
+		rule->hdr_ofst = (rule_hdr->u.hdr.hdr_offset) << 5;
+	} else {
+		rule->hdr_type = IPAHAL_RT_RULE_HDR_RAW;
+		rule->hdr_ofst = (rule_hdr->u.hdr.hdr_offset) << 2;
+	}
+	rule->hdr_lcl = !rule_hdr->u.hdr.system;
+
+	rule->priority = rule_hdr->u.hdr.priority;
+	rule->retain_hdr = rule_hdr->u.hdr.retain_hdr;
+	rule->cnt_idx = rule_hdr->u.hdr.stats_cnt_idx_lsb |
+		(rule_hdr->u.hdr.stats_cnt_idx_msb) << 6;
+	rule->id = rule_hdr->u.hdr.rule_id;
+
+	atrb->rule_eq_bitmap = rule_hdr->u.hdr.en_rule;
+	return ipa_fltrt_parse_hw_rule_eq(addr, sizeof(*rule_hdr),
+		atrb, &rule->rule_size);
+}
+
+static int ipa_flt_parse_hw_rule(u8 *addr, struct ipahal_flt_rule_entry *rule)
+{
+	struct ipa3_0_flt_rule_hw_hdr *rule_hdr;
+	struct ipa_ipfltri_rule_eq *atrb;
+
+	IPAHAL_DBG_LOW("Entry\n");
+
+	rule_hdr = (struct ipa3_0_flt_rule_hw_hdr *)addr;
+	atrb = &rule->rule.eq_attrib;
+
+	if (rule_hdr->u.word == 0) {
+		/* table termintator - empty table */
+		rule->rule_size = 0;
+		return 0;
+	}
+
+	switch (rule_hdr->u.hdr.action) {
+	case 0x0:
+		rule->rule.action = IPA_PASS_TO_ROUTING;
+		break;
+	case 0x1:
+		rule->rule.action = IPA_PASS_TO_SRC_NAT;
+		break;
+	case 0x2:
+		rule->rule.action = IPA_PASS_TO_DST_NAT;
+		break;
+	case 0x3:
+		rule->rule.action = IPA_PASS_TO_EXCEPTION;
+		break;
+	default:
+		IPAHAL_ERR("Invalid Rule Action %d\n", rule_hdr->u.hdr.action);
+		WARN_ON_RATELIMIT_IPA(1);
+		rule->rule.action = rule_hdr->u.hdr.action;
+	}
+
+	rule->rule.rt_tbl_idx = rule_hdr->u.hdr.rt_tbl_idx;
+	rule->rule.retain_hdr = rule_hdr->u.hdr.retain_hdr;
+	rule->priority = rule_hdr->u.hdr.priority;
+	rule->id = rule_hdr->u.hdr.rule_id;
+
+	atrb->rule_eq_bitmap = rule_hdr->u.hdr.en_rule;
+	rule->rule.eq_attrib_type = 1;
+	return ipa_fltrt_parse_hw_rule_eq(addr, sizeof(*rule_hdr),
+		atrb, &rule->rule_size);
+}
+
+static int ipa_flt_parse_hw_rule_ipav4(u8 *addr,
+	struct ipahal_flt_rule_entry *rule)
+{
+	struct ipa4_0_flt_rule_hw_hdr *rule_hdr;
+	struct ipa_ipfltri_rule_eq *atrb;
+
+	IPAHAL_DBG_LOW("Entry\n");
+
+	rule_hdr = (struct ipa4_0_flt_rule_hw_hdr *)addr;
+	atrb = &rule->rule.eq_attrib;
+
+	if (rule_hdr->u.word == 0) {
+		/* table termintator - empty table */
+		rule->rule_size = 0;
+		return 0;
+	}
+
+	switch (rule_hdr->u.hdr.action) {
+	case 0x0:
+		rule->rule.action = IPA_PASS_TO_ROUTING;
+		break;
+	case 0x1:
+		rule->rule.action = IPA_PASS_TO_SRC_NAT;
+		break;
+	case 0x2:
+		rule->rule.action = IPA_PASS_TO_DST_NAT;
+		break;
+	case 0x3:
+		rule->rule.action = IPA_PASS_TO_EXCEPTION;
+		break;
+	default:
+		IPAHAL_ERR("Invalid Rule Action %d\n", rule_hdr->u.hdr.action);
+		WARN_ON_RATELIMIT_IPA(1);
+		rule->rule.action = rule_hdr->u.hdr.action;
+	}
+
+	rule->rule.rt_tbl_idx = rule_hdr->u.hdr.rt_tbl_idx;
+	rule->rule.retain_hdr = rule_hdr->u.hdr.retain_hdr;
+	rule->priority = rule_hdr->u.hdr.priority;
+	rule->id = rule_hdr->u.hdr.rule_id;
+	rule->rule.pdn_idx = rule_hdr->u.hdr.pdn_idx;
+	rule->rule.set_metadata = rule_hdr->u.hdr.set_metadata;
+
+	atrb->rule_eq_bitmap = rule_hdr->u.hdr.en_rule;
+	rule->rule.eq_attrib_type = 1;
+	return ipa_fltrt_parse_hw_rule_eq(addr, sizeof(*rule_hdr),
+		atrb, &rule->rule_size);
+}
+
+static int ipa_flt_parse_hw_rule_ipav4_5(u8 *addr,
+	struct ipahal_flt_rule_entry *rule)
+{
+	struct ipa4_5_flt_rule_hw_hdr *rule_hdr;
+	struct ipa_ipfltri_rule_eq *atrb;
+
+	IPAHAL_DBG_LOW("Entry\n");
+
+	rule_hdr = (struct ipa4_5_flt_rule_hw_hdr *)addr;
+	atrb = &rule->rule.eq_attrib;
+
+	if (rule_hdr->u.word == 0) {
+		/* table termintator - empty table */
+		rule->rule_size = 0;
+		return 0;
+	}
+
+	switch (rule_hdr->u.hdr.action) {
+	case 0x0:
+		rule->rule.action = IPA_PASS_TO_ROUTING;
+		break;
+	case 0x1:
+		rule->rule.action = IPA_PASS_TO_SRC_NAT;
+		break;
+	case 0x2:
+		rule->rule.action = IPA_PASS_TO_DST_NAT;
+		break;
+	case 0x3:
+		rule->rule.action = IPA_PASS_TO_EXCEPTION;
+		break;
+	default:
+		IPAHAL_ERR("Invalid Rule Action %d\n", rule_hdr->u.hdr.action);
+		WARN_ON_RATELIMIT_IPA(1);
+		rule->rule.action = rule_hdr->u.hdr.action;
+	}
+
+	rule->rule.rt_tbl_idx = rule_hdr->u.hdr.rt_tbl_idx;
+	rule->rule.retain_hdr = rule_hdr->u.hdr.retain_hdr;
+	rule->priority = rule_hdr->u.hdr.priority;
+	rule->id = rule_hdr->u.hdr.rule_id;
+	rule->rule.pdn_idx = rule_hdr->u.hdr.pdn_idx;
+	rule->rule.set_metadata = rule_hdr->u.hdr.set_metadata;
+	rule->cnt_idx = rule_hdr->u.hdr.stats_cnt_idx_lsb |
+		(rule_hdr->u.hdr.stats_cnt_idx_msb) << 6;
+
+	atrb->rule_eq_bitmap = rule_hdr->u.hdr.en_rule;
+	rule->rule.eq_attrib_type = 1;
+	return ipa_fltrt_parse_hw_rule_eq(addr, sizeof(*rule_hdr),
+		atrb, &rule->rule_size);
+}
+
+/*
+ * ipahal_fltrt_init() - Build the FLT/RT information table
+ *  See ipahal_fltrt_objs[] comments
+ *
+ * Note: As global variables are initialized with zero, any un-overridden
+ *  register entry will be zero. By this we recognize them.
+ */
+int ipahal_fltrt_init(enum ipa_hw_type ipa_hw_type)
+{
+	struct ipahal_fltrt_obj zero_obj;
+	int i;
+	struct ipa_mem_buffer *mem;
+	int rc = -EFAULT;
+	u32 eq_bits;
+	u8 *eq_bitfield;
+
+	IPAHAL_DBG("Entry - HW_TYPE=%d\n", ipa_hw_type);
+
+	if (ipa_hw_type >= IPA_HW_MAX) {
+		IPAHAL_ERR("Invalid H/W type\n");
+		return -EFAULT;
+	}
+
+	memset(&zero_obj, 0, sizeof(zero_obj));
+	for (i = IPA_HW_v3_0 ; i < ipa_hw_type ; i++) {
+		if (!memcmp(&ipahal_fltrt_objs[i+1], &zero_obj,
+			sizeof(struct ipahal_fltrt_obj))) {
+			memcpy(&ipahal_fltrt_objs[i+1],
+				&ipahal_fltrt_objs[i],
+				sizeof(struct ipahal_fltrt_obj));
+		} else {
+			/*
+			 * explicitly overridden FLT RT info
+			 * Check validity
+			 */
+			if (!ipahal_fltrt_objs[i+1].tbl_width) {
+				IPAHAL_ERR(
+				 "Zero tbl width ipaver=%d\n",
+				 i+1);
+				WARN_ON(1);
+			}
+			if (!ipahal_fltrt_objs[i+1].sysaddr_alignment) {
+				IPAHAL_ERR(
+				  "No tbl sysaddr alignment ipaver=%d\n",
+				  i+1);
+				WARN_ON(1);
+			}
+			if (!ipahal_fltrt_objs[i+1].lcladdr_alignment) {
+				IPAHAL_ERR(
+				  "No tbl lcladdr alignment ipaver=%d\n",
+				  i+1);
+				WARN_ON(1);
+			}
+			if (!ipahal_fltrt_objs[i+1].blk_sz_alignment) {
+				IPAHAL_ERR(
+				  "No blk sz alignment ipaver=%d\n",
+				  i+1);
+				WARN_ON(1);
+			}
+			if (!ipahal_fltrt_objs[i+1].rule_start_alignment) {
+				IPAHAL_ERR(
+				  "No rule start alignment ipaver=%d\n",
+				  i+1);
+				WARN_ON(1);
+			}
+			if (!ipahal_fltrt_objs[i+1].tbl_hdr_width) {
+				IPAHAL_ERR(
+				 "Zero tbl hdr width ipaver=%d\n",
+				 i+1);
+				WARN_ON(1);
+			}
+			if (!ipahal_fltrt_objs[i+1].tbl_addr_mask) {
+				IPAHAL_ERR(
+				 "Zero tbl hdr width ipaver=%d\n",
+				 i+1);
+				WARN_ON(1);
+			}
+			if (ipahal_fltrt_objs[i+1].rule_id_bit_len < 2) {
+				IPAHAL_ERR(
+				 "Too little bits for rule_id ipaver=%d\n",
+				 i+1);
+				WARN_ON(1);
+			}
+			if (!ipahal_fltrt_objs[i+1].rule_buf_size) {
+				IPAHAL_ERR(
+				 "zero rule buf size ipaver=%d\n",
+				 i+1);
+				WARN_ON(1);
+			}
+			if (!ipahal_fltrt_objs[i+1].write_val_to_hdr) {
+				IPAHAL_ERR(
+				  "No write_val_to_hdr CB ipaver=%d\n",
+				  i+1);
+				WARN_ON(1);
+			}
+			if (!ipahal_fltrt_objs[i+1].create_flt_bitmap) {
+				IPAHAL_ERR(
+				  "No create_flt_bitmap CB ipaver=%d\n",
+				  i+1);
+				WARN_ON(1);
+			}
+			if (!ipahal_fltrt_objs[i+1].create_tbl_addr) {
+				IPAHAL_ERR(
+				  "No create_tbl_addr CB ipaver=%d\n",
+				  i+1);
+				WARN_ON(1);
+			}
+			if (!ipahal_fltrt_objs[i+1].parse_tbl_addr) {
+				IPAHAL_ERR(
+				  "No parse_tbl_addr CB ipaver=%d\n",
+				  i+1);
+				WARN_ON(1);
+			}
+			if (!ipahal_fltrt_objs[i+1].rt_generate_hw_rule) {
+				IPAHAL_ERR(
+				  "No rt_generate_hw_rule CB ipaver=%d\n",
+				  i+1);
+				WARN_ON(1);
+			}
+			if (!ipahal_fltrt_objs[i+1].flt_generate_hw_rule) {
+				IPAHAL_ERR(
+				  "No flt_generate_hw_rule CB ipaver=%d\n",
+				  i+1);
+				WARN_ON(1);
+			}
+			if (!ipahal_fltrt_objs[i+1].flt_generate_eq) {
+				IPAHAL_ERR(
+				  "No flt_generate_eq CB ipaver=%d\n",
+				  i+1);
+				WARN_ON(1);
+			}
+			if (!ipahal_fltrt_objs[i+1].rt_parse_hw_rule) {
+				IPAHAL_ERR(
+				  "No rt_parse_hw_rule CB ipaver=%d\n",
+				  i+1);
+				WARN_ON(1);
+			}
+			if (!ipahal_fltrt_objs[i+1].flt_parse_hw_rule) {
+				IPAHAL_ERR(
+				  "No flt_parse_hw_rule CB ipaver=%d\n",
+				  i+1);
+				WARN_ON(1);
+			}
+		}
+	}
+
+	eq_bits = 0;
+	eq_bitfield = ipahal_fltrt_objs[ipa_hw_type].eq_bitfield;
+	for (i = 0; i < IPA_EQ_MAX; i++) {
+		if (!IPA_IS_RULE_EQ_VALID(i))
+			continue;
+
+		if (eq_bits & IPA_GET_RULE_EQ_BIT_PTRN(eq_bitfield[i])) {
+			IPAHAL_ERR("more than eq with same bit. eq=%d\n", i);
+			WARN_ON(1);
+			return -EFAULT;
+		}
+		eq_bits |= IPA_GET_RULE_EQ_BIT_PTRN(eq_bitfield[i]);
+	}
+
+	mem = &ipahal_ctx->empty_fltrt_tbl;
+
+	/* setup an empty  table in system memory; This will
+	 * be used, for example, to delete a rt tbl safely
+	 */
+	mem->size = ipahal_fltrt_objs[ipa_hw_type].tbl_width;
+	mem->base = dma_alloc_coherent(ipahal_ctx->ipa_pdev, mem->size,
+		&mem->phys_base, GFP_KERNEL);
+	if (!mem->base) {
+		IPAHAL_ERR("DMA buff alloc fail %d bytes for empty tbl\n",
+			mem->size);
+		return -ENOMEM;
+	}
+
+	if (mem->phys_base &
+		ipahal_fltrt_objs[ipa_hw_type].sysaddr_alignment) {
+		IPAHAL_ERR("Empty table buf is not address aligned 0x%pad\n",
+			&mem->phys_base);
+		rc = -EFAULT;
+		goto clear_empty_tbl;
+	}
+
+	memset(mem->base, 0, mem->size);
+	IPAHAL_DBG("empty table allocated in system memory");
+
+	return 0;
+
+clear_empty_tbl:
+	dma_free_coherent(ipahal_ctx->ipa_pdev, mem->size, mem->base,
+		mem->phys_base);
+	return rc;
+}
+
+void ipahal_fltrt_destroy(void)
+{
+	IPAHAL_DBG("Entry\n");
+
+	if (ipahal_ctx && ipahal_ctx->empty_fltrt_tbl.base)
+		dma_free_coherent(ipahal_ctx->ipa_pdev,
+			ipahal_ctx->empty_fltrt_tbl.size,
+			ipahal_ctx->empty_fltrt_tbl.base,
+			ipahal_ctx->empty_fltrt_tbl.phys_base);
+}
+
+/* Get the H/W table (flt/rt) header width */
+u32 ipahal_get_hw_tbl_hdr_width(void)
+{
+	return ipahal_fltrt_objs[ipahal_ctx->hw_type].tbl_hdr_width;
+}
+
+/* Get the H/W local table (SRAM) address alignment
+ * Tables headers references to local tables via offsets in SRAM
+ * This function return the alignment of the offset that IPA expects
+ */
+u32 ipahal_get_lcl_tbl_addr_alignment(void)
+{
+	return ipahal_fltrt_objs[ipahal_ctx->hw_type].lcladdr_alignment;
+}
+
+/*
+ * Rule priority is used to distinguish rules order
+ * at the integrated table consisting from hashable and
+ * non-hashable tables. Max priority are rules that once are
+ * scanned by IPA, IPA will not look for further rules and use it.
+ */
+int ipahal_get_rule_max_priority(void)
+{
+	return ipahal_fltrt_objs[ipahal_ctx->hw_type].rule_max_prio;
+}
+
+/* Given a priority, calc and return the next lower one if it is in
+ * legal range.
+ */
+int ipahal_rule_decrease_priority(int *prio)
+{
+	struct ipahal_fltrt_obj *obj;
+
+	obj = &ipahal_fltrt_objs[ipahal_ctx->hw_type];
+
+	if (!prio) {
+		IPAHAL_ERR("Invalid Input\n");
+		return -EINVAL;
+	}
+
+	/* Priority logic is reverse. 0 priority considred max priority */
+	if (*prio > obj->rule_min_prio || *prio < obj->rule_max_prio) {
+		IPAHAL_ERR("Invalid given priority %d\n", *prio);
+		return -EINVAL;
+	}
+
+	*prio += 1;
+
+	if (*prio > obj->rule_min_prio) {
+		IPAHAL_ERR("Cannot decrease priority. Already on min\n");
+		*prio -= 1;
+		return -EFAULT;
+	}
+
+	return 0;
+}
+
+/* Does the given ID represents rule miss?
+ * Rule miss ID, is always the max ID possible in the bit-pattern
+ */
+bool ipahal_is_rule_miss_id(u32 id)
+{
+	return (id ==
+		((1U << ipahal_fltrt_objs[ipahal_ctx->hw_type].rule_id_bit_len)
+		-1));
+}
+
+/* Get rule ID with high bit only asserted
+ * Used e.g. to create groups of IDs according to this bit
+ */
+u32 ipahal_get_rule_id_hi_bit(void)
+{
+	return BIT(ipahal_fltrt_objs[ipahal_ctx->hw_type].rule_id_bit_len - 1);
+}
+
+/* Get the low value possible to be used for rule-id */
+u32 ipahal_get_low_rule_id(void)
+{
+	return  ipahal_fltrt_objs[ipahal_ctx->hw_type].low_rule_id;
+}
+
+/*
+ * Is the given counter id valid
+ */
+bool ipahal_is_rule_cnt_id_valid(u8 cnt_id)
+{
+	if (cnt_id < 0 || cnt_id > IPA_FLT_RT_HW_COUNTER)
+		return false;
+	return true;
+}
+
+
+/*
+ * low value possible for counter hdl id
+ */
+u32 ipahal_get_low_hdl_id(void)
+{
+	return IPA4_5_LOW_CNT_ID;
+}
+
+/*
+ * max counter hdl id for stats
+ */
+u32 ipahal_get_high_hdl_id(void)
+{
+	return IPA_MAX_FLT_RT_CNT_INDEX;
+}
+
+/*
+ * ipahal_rt_generate_empty_img() - Generate empty route image
+ *  Creates routing header buffer for the given tables number.
+ *  For each table, make it point to the empty table on DDR.
+ * @tbls_num: Number of tables. For each will have an entry in the header
+ * @hash_hdr_size: SRAM buf size of the hash tbls hdr. Used for space check
+ * @nhash_hdr_size: SRAM buf size of the nhash tbls hdr. Used for space check
+ * @mem: mem object that points to DMA mem representing the hdr structure
+ * @atomic: should DMA allocation be executed with atomic flag
+ */
+int ipahal_rt_generate_empty_img(u32 tbls_num, u32 hash_hdr_size,
+	u32 nhash_hdr_size, struct ipa_mem_buffer *mem, bool atomic)
+{
+	int i;
+	u64 addr;
+	struct ipahal_fltrt_obj *obj;
+	int flag;
+
+	IPAHAL_DBG("Entry\n");
+
+	flag = atomic ? GFP_ATOMIC : GFP_KERNEL;
+	obj = &ipahal_fltrt_objs[ipahal_ctx->hw_type];
+
+	if (!tbls_num || !nhash_hdr_size || !mem) {
+		IPAHAL_ERR("Input Error: tbls_num=%d nhash_hdr_sz=%d mem=%pK\n",
+			tbls_num, nhash_hdr_size, mem);
+		return -EINVAL;
+	}
+	if (obj->support_hash && !hash_hdr_size) {
+		IPAHAL_ERR("Input Error: hash_hdr_sz=%d\n", hash_hdr_size);
+		return -EINVAL;
+	}
+
+	if (nhash_hdr_size < (tbls_num * obj->tbl_hdr_width)) {
+		IPAHAL_ERR("No enough spc at non-hash hdr blk for all tbls\n");
+		WARN_ON(1);
+		return -EINVAL;
+	}
+	if (obj->support_hash &&
+		(hash_hdr_size < (tbls_num * obj->tbl_hdr_width))) {
+		IPAHAL_ERR("No enough spc at hash hdr blk for all tbls\n");
+		WARN_ON(1);
+		return -EINVAL;
+	}
+
+	mem->size = tbls_num * obj->tbl_hdr_width;
+	mem->base = dma_alloc_coherent(ipahal_ctx->ipa_pdev, mem->size,
+		&mem->phys_base, flag);
+	if (!mem->base) {
+		IPAHAL_ERR("fail to alloc DMA buff of size %d\n", mem->size);
+		return -ENOMEM;
+	}
+
+	addr = obj->create_tbl_addr(true,
+		ipahal_ctx->empty_fltrt_tbl.phys_base);
+	for (i = 0; i < tbls_num; i++)
+		obj->write_val_to_hdr(addr,
+			mem->base + i * obj->tbl_hdr_width);
+
+	return 0;
+}
+
+/*
+ * ipahal_flt_generate_empty_img() - Generate empty filter image
+ *  Creates filter header buffer for the given tables number.
+ *  For each table, make it point to the empty table on DDR.
+ * @tbls_num: Number of tables. For each will have an entry in the header
+ * @hash_hdr_size: SRAM buf size of the hash tbls hdr. Used for space check
+ * @nhash_hdr_size: SRAM buf size of the nhash tbls hdr. Used for space check
+ * @ep_bitmap: Bitmap representing the EP that has flt tables. The format
+ *  should be: bit0->EP0, bit1->EP1
+ *  If bitmap is zero -> create tbl without bitmap entry
+ * @mem: mem object that points to DMA mem representing the hdr structure
+ * @atomic: should DMA allocation be executed with atomic flag
+ */
+int ipahal_flt_generate_empty_img(u32 tbls_num, u32 hash_hdr_size,
+	u32 nhash_hdr_size, u64 ep_bitmap, struct ipa_mem_buffer *mem,
+	bool atomic)
+{
+	int flt_spc;
+	u64 flt_bitmap;
+	int i;
+	u64 addr;
+	struct ipahal_fltrt_obj *obj;
+	int flag;
+
+	IPAHAL_DBG("Entry - ep_bitmap 0x%llx\n", ep_bitmap);
+
+	flag = atomic ? GFP_ATOMIC : GFP_KERNEL;
+	obj = &ipahal_fltrt_objs[ipahal_ctx->hw_type];
+
+	if (!tbls_num || !nhash_hdr_size || !mem) {
+		IPAHAL_ERR("Input Error: tbls_num=%d nhash_hdr_sz=%d mem=%pK\n",
+			tbls_num, nhash_hdr_size, mem);
+		return -EINVAL;
+	}
+	if (obj->support_hash && !hash_hdr_size) {
+		IPAHAL_ERR("Input Error: hash_hdr_sz=%d\n", hash_hdr_size);
+		return -EINVAL;
+	}
+
+	if (obj->support_hash) {
+		flt_spc = hash_hdr_size;
+		/* bitmap word */
+		if (ep_bitmap)
+			flt_spc -= obj->tbl_hdr_width;
+		flt_spc /= obj->tbl_hdr_width;
+		if (tbls_num > flt_spc)  {
+			IPAHAL_ERR("space for hash flt hdr is too small\n");
+			WARN_ON(1);
+			return -EPERM;
+		}
+	}
+
+	flt_spc = nhash_hdr_size;
+	/* bitmap word */
+	if (ep_bitmap)
+		flt_spc -= obj->tbl_hdr_width;
+	flt_spc /= obj->tbl_hdr_width;
+	if (tbls_num > flt_spc)  {
+		IPAHAL_ERR("space for non-hash flt hdr is too small\n");
+		WARN_ON(1);
+		return -EPERM;
+	}
+
+	mem->size = tbls_num * obj->tbl_hdr_width;
+	if (ep_bitmap)
+		mem->size += obj->tbl_hdr_width;
+	mem->base = dma_alloc_coherent(ipahal_ctx->ipa_pdev, mem->size,
+		&mem->phys_base, flag);
+	if (!mem->base) {
+		IPAHAL_ERR("fail to alloc DMA buff of size %d\n", mem->size);
+		return -ENOMEM;
+	}
+
+	if (ep_bitmap) {
+		flt_bitmap = obj->create_flt_bitmap(ep_bitmap);
+		IPAHAL_DBG("flt bitmap 0x%llx\n", flt_bitmap);
+		obj->write_val_to_hdr(flt_bitmap, mem->base);
+	}
+
+	addr = obj->create_tbl_addr(true,
+		ipahal_ctx->empty_fltrt_tbl.phys_base);
+
+	if (ep_bitmap) {
+		for (i = 1; i <= tbls_num; i++)
+			obj->write_val_to_hdr(addr,
+				mem->base + i * obj->tbl_hdr_width);
+	} else {
+		for (i = 0; i < tbls_num; i++)
+			obj->write_val_to_hdr(addr,
+				mem->base + i * obj->tbl_hdr_width);
+	}
+
+	return 0;
+}
+
+/*
+ * ipa_fltrt_alloc_init_tbl_hdr() - allocate and initialize buffers for
+ *  flt/rt tables headers to be filled into sram. Init each table to point
+ *  to empty system table
+ * @params: Allocate IN and OUT params
+ *
+ * Return: 0 on success, negative on failure
+ */
+static int ipa_fltrt_alloc_init_tbl_hdr(
+	struct ipahal_fltrt_alloc_imgs_params *params)
+{
+	u64 addr;
+	int i;
+	struct ipahal_fltrt_obj *obj;
+	gfp_t flag = GFP_KERNEL;
+
+	obj = &ipahal_fltrt_objs[ipahal_ctx->hw_type];
+
+	if (!params) {
+		IPAHAL_ERR_RL("Input error: params=%pK\n", params);
+		return -EINVAL;
+	}
+
+	params->nhash_hdr.size = params->tbls_num * obj->tbl_hdr_width;
+alloc:
+	params->nhash_hdr.base = dma_alloc_coherent(ipahal_ctx->ipa_pdev,
+		params->nhash_hdr.size,
+		&params->nhash_hdr.phys_base, flag);
+	if (!params->nhash_hdr.base) {
+		if (flag == GFP_KERNEL) {
+			flag = GFP_ATOMIC;
+			goto alloc;
+		}
+		IPAHAL_ERR_RL("fail to alloc DMA buff of size %d\n",
+			params->nhash_hdr.size);
+		goto nhash_alloc_fail;
+	}
+
+	if (obj->support_hash) {
+		params->hash_hdr.size = params->tbls_num * obj->tbl_hdr_width;
+		params->hash_hdr.base = dma_alloc_coherent(ipahal_ctx->ipa_pdev,
+			params->hash_hdr.size, &params->hash_hdr.phys_base,
+			GFP_KERNEL);
+		if (!params->hash_hdr.base) {
+			IPAHAL_ERR_RL("fail to alloc DMA buff of size %d\n",
+				params->hash_hdr.size);
+			goto hash_alloc_fail;
+		}
+	}
+
+	addr = obj->create_tbl_addr(true,
+		ipahal_ctx->empty_fltrt_tbl.phys_base);
+	for (i = 0; i < params->tbls_num; i++) {
+		obj->write_val_to_hdr(addr,
+			params->nhash_hdr.base + i * obj->tbl_hdr_width);
+		if (obj->support_hash)
+			obj->write_val_to_hdr(addr,
+				params->hash_hdr.base +
+				i * obj->tbl_hdr_width);
+	}
+
+	return 0;
+
+hash_alloc_fail:
+	ipahal_free_dma_mem(&params->nhash_hdr);
+nhash_alloc_fail:
+	return -ENOMEM;
+}
+
+/*
+ * ipa_fltrt_alloc_lcl_bdy() - allocate and initialize buffers for
+ *  local flt/rt tables bodies to be filled into sram
+ * @params: Allocate IN and OUT params
+ *
+ * Return: 0 on success, negative on failure
+ */
+static int ipa_fltrt_alloc_lcl_bdy(
+	struct ipahal_fltrt_alloc_imgs_params *params)
+{
+	struct ipahal_fltrt_obj *obj;
+	gfp_t flag = GFP_KERNEL;
+
+	obj = &ipahal_fltrt_objs[ipahal_ctx->hw_type];
+
+	/* The HAL allocates larger sizes than the given effective ones
+	 * for alignments and border indications
+	 */
+	IPAHAL_DBG_LOW("lcl tbl bdy total effective sizes: hash=%u nhash=%u\n",
+		params->total_sz_lcl_hash_tbls,
+		params->total_sz_lcl_nhash_tbls);
+
+	IPAHAL_DBG_LOW("lcl tbl bdy count: hash=%u nhash=%u\n",
+		params->num_lcl_hash_tbls,
+		params->num_lcl_nhash_tbls);
+
+	/* Align the sizes to coop with termination word
+	 *  and H/W local table start offset alignment
+	 */
+	if (params->nhash_bdy.size) {
+		params->nhash_bdy.size = params->total_sz_lcl_nhash_tbls;
+		/* for table terminator */
+		params->nhash_bdy.size += obj->tbl_width *
+			params->num_lcl_nhash_tbls;
+		/* align the start of local rule-set */
+		params->nhash_bdy.size += obj->lcladdr_alignment *
+			params->num_lcl_nhash_tbls;
+		/* SRAM block size alignment */
+		params->nhash_bdy.size += obj->blk_sz_alignment;
+		params->nhash_bdy.size &= ~(obj->blk_sz_alignment);
+
+		IPAHAL_DBG_LOW("nhash lcl tbl bdy total h/w size = %u\n",
+			params->nhash_bdy.size);
+
+alloc1:
+		params->nhash_bdy.base = dma_alloc_coherent(
+			ipahal_ctx->ipa_pdev, params->nhash_bdy.size,
+			&params->nhash_bdy.phys_base, flag);
+		if (!params->nhash_bdy.base) {
+			if (flag == GFP_KERNEL) {
+				flag = GFP_ATOMIC;
+				goto alloc1;
+			}
+			IPAHAL_ERR("fail to alloc DMA buff of size %d\n",
+				params->nhash_bdy.size);
+			return -ENOMEM;
+		}
+	}
+
+	if (!obj->support_hash && params->hash_bdy.size) {
+		IPAHAL_ERR("No HAL Hash tbls support - Will be ignored\n");
+		WARN_ON(1);
+	}
+
+	if (obj->support_hash && params->hash_bdy.size) {
+		params->hash_bdy.size = params->total_sz_lcl_hash_tbls;
+		/* for table terminator */
+		params->hash_bdy.size += obj->tbl_width *
+			params->num_lcl_hash_tbls;
+		/* align the start of local rule-set */
+		params->hash_bdy.size += obj->lcladdr_alignment *
+			params->num_lcl_hash_tbls;
+		/* SRAM block size alignment */
+		params->hash_bdy.size += obj->blk_sz_alignment;
+		params->hash_bdy.size &= ~(obj->blk_sz_alignment);
+
+		IPAHAL_DBG_LOW("hash lcl tbl bdy total h/w size = %u\n",
+			params->hash_bdy.size);
+
+alloc2:
+		params->hash_bdy.base = dma_alloc_coherent(
+			ipahal_ctx->ipa_pdev, params->hash_bdy.size,
+			&params->hash_bdy.phys_base, flag);
+		if (!params->hash_bdy.base) {
+			if (flag == GFP_KERNEL) {
+				flag = GFP_ATOMIC;
+				goto alloc2;
+			}
+			IPAHAL_ERR("fail to alloc DMA buff of size %d\n",
+				params->hash_bdy.size);
+			goto hash_bdy_fail;
+		}
+	}
+
+	return 0;
+
+hash_bdy_fail:
+	if (params->nhash_bdy.size)
+		ipahal_free_dma_mem(&params->nhash_bdy);
+
+	return -ENOMEM;
+}
+
+/*
+ * ipahal_fltrt_allocate_hw_tbl_imgs() - Allocate tbl images DMA structures
+ *  Used usually during commit.
+ *  Allocates header structures and init them to point to empty DDR table
+ *  Allocate body strucutres for local bodies tables
+ * @params: Parameters for IN and OUT regard the allocation.
+ */
+int ipahal_fltrt_allocate_hw_tbl_imgs(
+	struct ipahal_fltrt_alloc_imgs_params *params)
+{
+	IPAHAL_DBG_LOW("Entry\n");
+
+	/* Input validation */
+	if (!params) {
+		IPAHAL_ERR_RL("Input err: no params\n");
+		return -EINVAL;
+	}
+	if (params->ipt >= IPA_IP_MAX) {
+		IPAHAL_ERR_RL("Input err: Invalid ip type %d\n", params->ipt);
+		return -EINVAL;
+	}
+
+	if (ipa_fltrt_alloc_init_tbl_hdr(params)) {
+		IPAHAL_ERR_RL("fail to alloc and init tbl hdr\n");
+		return -ENOMEM;
+	}
+
+	if (ipa_fltrt_alloc_lcl_bdy(params)) {
+		IPAHAL_ERR_RL("fail to alloc tbl bodies\n");
+		goto bdy_alloc_fail;
+	}
+
+	return 0;
+
+bdy_alloc_fail:
+	ipahal_free_dma_mem(&params->nhash_hdr);
+	if (params->hash_hdr.size)
+		ipahal_free_dma_mem(&params->hash_hdr);
+	return -ENOMEM;
+}
+
+/*
+ * ipahal_fltrt_allocate_hw_sys_tbl() - Allocate DMA mem for H/W flt/rt sys tbl
+ * @tbl_mem: IN/OUT param. size for effective table size. Pointer, for the
+ *  allocated memory.
+ *
+ * The size is adapted for needed alignments/borders.
+ */
+int ipahal_fltrt_allocate_hw_sys_tbl(struct ipa_mem_buffer *tbl_mem)
+{
+	struct ipahal_fltrt_obj *obj;
+	gfp_t flag = GFP_KERNEL;
+
+	IPAHAL_DBG_LOW("Entry\n");
+
+	if (!tbl_mem) {
+		IPAHAL_ERR("Input err\n");
+		return -EINVAL;
+	}
+
+	if (!tbl_mem->size) {
+		IPAHAL_ERR("Input err: zero table size\n");
+		return -EINVAL;
+	}
+
+	obj = &ipahal_fltrt_objs[ipahal_ctx->hw_type];
+
+	/* add word for rule-set terminator */
+	tbl_mem->size += obj->tbl_width;
+alloc:
+	tbl_mem->base = dma_alloc_coherent(ipahal_ctx->ipa_pdev, tbl_mem->size,
+		&tbl_mem->phys_base, flag);
+	if (!tbl_mem->base) {
+		if (flag == GFP_KERNEL) {
+			flag = GFP_ATOMIC;
+			goto alloc;
+		}
+		IPAHAL_ERR("fail to alloc DMA buf of size %d\n",
+			tbl_mem->size);
+		return -ENOMEM;
+	}
+	if (tbl_mem->phys_base & obj->sysaddr_alignment) {
+		IPAHAL_ERR("sys rt tbl address is not aligned\n");
+		goto align_err;
+	}
+
+	memset(tbl_mem->base, 0, tbl_mem->size);
+
+	return 0;
+
+align_err:
+	ipahal_free_dma_mem(tbl_mem);
+	return -EPERM;
+}
+
+/*
+ * ipahal_fltrt_write_addr_to_hdr() - Fill table header with table address
+ *  Given table addr/offset, adapt it to IPA H/W format and write it
+ *  to given header index.
+ * @addr: Address or offset to be used
+ * @hdr_base: base address of header structure to write the address
+ * @hdr_idx: index of the address in the header structure
+ * @is_sys: Is it system address or local offset
+ */
+int ipahal_fltrt_write_addr_to_hdr(u64 addr, void *hdr_base, u32 hdr_idx,
+	bool is_sys)
+{
+	struct ipahal_fltrt_obj *obj;
+	u64 hwaddr;
+	u8 *hdr;
+
+	IPAHAL_DBG_LOW("Entry\n");
+
+	obj = &ipahal_fltrt_objs[ipahal_ctx->hw_type];
+
+	if (!addr || !hdr_base) {
+		IPAHAL_ERR("Input err: addr=0x%llx hdr_base=%pK\n",
+			addr, hdr_base);
+		return -EINVAL;
+	}
+
+	hdr = (u8 *)hdr_base;
+	hdr += hdr_idx * obj->tbl_hdr_width;
+	hwaddr = obj->create_tbl_addr(is_sys, addr);
+	obj->write_val_to_hdr(hwaddr, hdr);
+
+	return 0;
+}
+
+/*
+ * ipahal_fltrt_read_addr_from_hdr() - Given sram address, read it's
+ *  content (physical address or offset) and parse it.
+ * @hdr_base: base sram address of the header structure.
+ * @hdr_idx: index of the header entry line in the header structure.
+ * @addr: The parsed address - Out parameter
+ * @is_sys: Is this system or local address - Out parameter
+ */
+int ipahal_fltrt_read_addr_from_hdr(void *hdr_base, u32 hdr_idx, u64 *addr,
+	bool *is_sys)
+{
+	struct ipahal_fltrt_obj *obj;
+	u64 hwaddr;
+	u8 *hdr;
+
+	IPAHAL_DBG_LOW("Entry\n");
+
+	obj = &ipahal_fltrt_objs[ipahal_ctx->hw_type];
+
+	if (!addr || !hdr_base || !is_sys) {
+		IPAHAL_ERR("Input err: addr=%pK hdr_base=%pK is_sys=%pK\n",
+			addr, hdr_base, is_sys);
+		return -EINVAL;
+	}
+
+	hdr = (u8 *)hdr_base;
+	hdr += hdr_idx * obj->tbl_hdr_width;
+	hwaddr = *((u64 *)hdr);
+	obj->parse_tbl_addr(hwaddr, addr, is_sys);
+	return 0;
+}
+
+/*
+ * ipahal_rt_generate_hw_rule() - generates the routing hardware rule
+ * @params: Params for the rule creation.
+ * @hw_len: Size of the H/W rule to be returned
+ * @buf: Buffer to build the rule in. If buf is NULL, then the rule will
+ *  be built in internal temp buf. This is used e.g. to get the rule size
+ *  only.
+ */
+int ipahal_rt_generate_hw_rule(struct ipahal_rt_rule_gen_params *params,
+	u32 *hw_len, u8 *buf)
+{
+	struct ipahal_fltrt_obj *obj;
+	u8 *tmp = NULL;
+	int rc;
+
+	IPAHAL_DBG_LOW("Entry\n");
+
+	if (!params || !hw_len) {
+		IPAHAL_ERR("Input err: params=%pK hw_len=%pK\n",
+			params, hw_len);
+		return -EINVAL;
+	}
+	if (!params->rule) {
+		IPAHAL_ERR("Input err: invalid rule\n");
+		return -EINVAL;
+	}
+	if (params->ipt >= IPA_IP_MAX) {
+		IPAHAL_ERR("Input err: Invalid ip type %d\n", params->ipt);
+		return -EINVAL;
+	}
+
+	obj = &ipahal_fltrt_objs[ipahal_ctx->hw_type];
+
+	if (buf == NULL) {
+		tmp = kzalloc(obj->rule_buf_size, GFP_KERNEL);
+		if (!tmp)
+			return -ENOMEM;
+		buf = tmp;
+	} else {
+		if ((long)buf & obj->rule_start_alignment) {
+			IPAHAL_ERR("buff is not rule start aligned\n");
+			return -EPERM;
+		}
+	}
+
+	rc = obj->rt_generate_hw_rule(params, hw_len, buf);
+	if (!tmp && !rc) {
+		/* write the rule-set terminator */
+		memset(buf + *hw_len, 0, obj->tbl_width);
+	}
+
+	kfree(tmp);
+
+	return rc;
+}
+
+/*
+ * ipahal_flt_generate_hw_rule() - generates the filtering hardware rule.
+ * @params: Params for the rule creation.
+ * @hw_len: Size of the H/W rule to be returned
+ * @buf: Buffer to build the rule in. If buf is NULL, then the rule will
+ *  be built in internal temp buf. This is used e.g. to get the rule size
+ *  only.
+ */
+int ipahal_flt_generate_hw_rule(struct ipahal_flt_rule_gen_params *params,
+	u32 *hw_len, u8 *buf)
+{
+	struct ipahal_fltrt_obj *obj;
+	u8 *tmp = NULL;
+	int rc;
+
+	IPAHAL_DBG_LOW("Entry\n");
+
+	if (!params || !hw_len) {
+		IPAHAL_ERR("Input err: params=%pK hw_len=%pK\n",
+			params, hw_len);
+		return -EINVAL;
+	}
+	if (!params->rule) {
+		IPAHAL_ERR("Input err: invalid rule\n");
+		return -EINVAL;
+	}
+	if (params->ipt >= IPA_IP_MAX) {
+		IPAHAL_ERR("Input err: Invalid ip type %d\n", params->ipt);
+		return -EINVAL;
+	}
+
+	obj = &ipahal_fltrt_objs[ipahal_ctx->hw_type];
+
+	if (buf == NULL) {
+		tmp = kzalloc(obj->rule_buf_size, GFP_KERNEL);
+		if (!tmp) {
+			IPAHAL_ERR("failed to alloc %u bytes\n",
+				obj->rule_buf_size);
+			return -ENOMEM;
+		}
+		buf = tmp;
+	} else
+		if ((long)buf & obj->rule_start_alignment) {
+			IPAHAL_ERR("buff is not rule rule start aligned\n");
+			return -EPERM;
+		}
+
+	rc = obj->flt_generate_hw_rule(params, hw_len, buf);
+	if (!tmp && !rc) {
+		/* write the rule-set terminator */
+		memset(buf + *hw_len, 0, obj->tbl_width);
+	}
+
+	kfree(tmp);
+
+	return rc;
+
+}
+
+/*
+ * ipahal_flt_generate_equation() - generate flt rule in equation form
+ *  Will build equation form flt rule from given info.
+ * @ipt: IP family
+ * @attrib: Rule attribute to be generated
+ * @eq_atrb: Equation form generated rule
+ * Note: Usage example: Pass the generated form to other sub-systems
+ *  for inter-subsystems rules exchange.
+ */
+int ipahal_flt_generate_equation(enum ipa_ip_type ipt,
+		const struct ipa_rule_attrib *attrib,
+		struct ipa_ipfltri_rule_eq *eq_atrb)
+{
+	IPAHAL_DBG_LOW("Entry\n");
+
+	if (ipt >= IPA_IP_MAX) {
+		IPAHAL_ERR_RL("Input err: Invalid ip type %d\n", ipt);
+		return -EINVAL;
+	}
+
+	if (!attrib || !eq_atrb) {
+		IPAHAL_ERR_RL("Input err: attrib=%pK eq_atrb=%pK\n",
+			attrib, eq_atrb);
+		return -EINVAL;
+	}
+
+	return ipahal_fltrt_objs[ipahal_ctx->hw_type].flt_generate_eq(ipt,
+		attrib, eq_atrb);
+
+}
+
+/*
+ * ipahal_rt_parse_hw_rule() - Parse H/W formated rt rule
+ *  Given the rule address, read the rule info from H/W and parse it.
+ * @rule_addr: Rule address (virtual memory)
+ * @rule: Out parameter for parsed rule info
+ */
+int ipahal_rt_parse_hw_rule(u8 *rule_addr,
+	struct ipahal_rt_rule_entry *rule)
+{
+	IPAHAL_DBG_LOW("Entry\n");
+
+	if (!rule_addr || !rule) {
+		IPAHAL_ERR("Input err: rule_addr=%pK rule=%pK\n",
+			rule_addr, rule);
+		return -EINVAL;
+	}
+
+	return ipahal_fltrt_objs[ipahal_ctx->hw_type].rt_parse_hw_rule(
+		rule_addr, rule);
+}
+
+/*
+ * ipahal_flt_parse_hw_rule() - Parse H/W formated flt rule
+ *  Given the rule address, read the rule info from H/W and parse it.
+ * @rule_addr: Rule address (virtual memory)
+ * @rule: Out parameter for parsed rule info
+ */
+int ipahal_flt_parse_hw_rule(u8 *rule_addr,
+	struct ipahal_flt_rule_entry *rule)
+{
+	IPAHAL_DBG_LOW("Entry\n");
+
+	if (!rule_addr || !rule) {
+		IPAHAL_ERR("Input err: rule_addr=%pK rule=%pK\n",
+			rule_addr, rule);
+		return -EINVAL;
+	}
+
+	return ipahal_fltrt_objs[ipahal_ctx->hw_type].flt_parse_hw_rule(
+		rule_addr, rule);
+}
+

+ 308 - 0
ipa/ipa_v3/ipahal/ipahal_fltrt.h

@@ -0,0 +1,308 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2016-2019, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _IPAHAL_FLTRT_H_
+#define _IPAHAL_FLTRT_H_
+
+/*
+ * struct ipahal_fltrt_alloc_imgs_params - Params for tbls imgs allocations
+ *  The allocation logic will allocate DMA memory representing the header.
+ *  If the bodies are local (SRAM) the allocation will allocate
+ *  a DMA buffers that would contain the content of these local tables in raw
+ * @ipt: IP version type
+ * @tbls_num: Number of tables to represent by the header
+ * @num_lcl_hash_tbls: Number of local (sram) hashable tables
+ * @num_lcl_nhash_tbls: Number of local (sram) non-hashable tables
+ * @total_sz_lcl_hash_tbls: Total size of local hashable tables
+ * @total_sz_lcl_nhash_tbls: Total size of local non-hashable tables
+ * @hash_hdr/nhash_hdr: OUT params for the header structures
+ * @hash_bdy/nhash_bdy: OUT params for the local body structures
+ */
+struct ipahal_fltrt_alloc_imgs_params {
+	enum ipa_ip_type ipt;
+	u32 tbls_num;
+	u32 num_lcl_hash_tbls;
+	u32 num_lcl_nhash_tbls;
+	u32 total_sz_lcl_hash_tbls;
+	u32 total_sz_lcl_nhash_tbls;
+
+	/* OUT PARAMS */
+	struct ipa_mem_buffer hash_hdr;
+	struct ipa_mem_buffer nhash_hdr;
+	struct ipa_mem_buffer hash_bdy;
+	struct ipa_mem_buffer nhash_bdy;
+};
+
+/*
+ * enum ipahal_rt_rule_hdr_type - Header type used in rt rules
+ * @IPAHAL_RT_RULE_HDR_NONE: No header is used
+ * @IPAHAL_RT_RULE_HDR_RAW: Raw header is used
+ * @IPAHAL_RT_RULE_HDR_PROC_CTX: Header Processing context is used
+ */
+enum ipahal_rt_rule_hdr_type {
+	IPAHAL_RT_RULE_HDR_NONE,
+	IPAHAL_RT_RULE_HDR_RAW,
+	IPAHAL_RT_RULE_HDR_PROC_CTX,
+};
+
+/*
+ * struct ipahal_rt_rule_gen_params - Params for generating rt rule
+ * @ipt: IP family version
+ * @dst_pipe_idx: Destination pipe index
+ * @hdr_type: Header type to be used
+ * @hdr_lcl: Does header on local or system table?
+ * @hdr_ofst: Offset of the header in the header table
+ * @priority: Rule priority
+ * @id: Rule ID
+ * @cnt_idx: Stats counter index
+ * @rule: Rule info
+ */
+struct ipahal_rt_rule_gen_params {
+	enum ipa_ip_type ipt;
+	int dst_pipe_idx;
+	enum ipahal_rt_rule_hdr_type hdr_type;
+	bool hdr_lcl;
+	u32 hdr_ofst;
+	u32 priority;
+	u32 id;
+	u8 cnt_idx;
+	const struct ipa_rt_rule_i *rule;
+};
+
+/*
+ * struct ipahal_rt_rule_entry - Rt rule info parsed from H/W
+ * @dst_pipe_idx: Destination pipe index
+ * @hdr_lcl: Does the references header located in sram or system mem?
+ * @hdr_ofst: Offset of the header in the header table
+ * @hdr_type: Header type to be used
+ * @priority: Rule priority
+ * @retain_hdr: to retain the removed header in header removal
+ * @id: Rule ID
+ * @cnt_idx: stats counter index
+ * @eq_attrib: Equations and their params in the rule
+ * @rule_size: Rule size in memory
+ */
+struct ipahal_rt_rule_entry {
+	int dst_pipe_idx;
+	bool hdr_lcl;
+	u32 hdr_ofst;
+	enum ipahal_rt_rule_hdr_type hdr_type;
+	u32 priority;
+	bool retain_hdr;
+	u32 id;
+	u8 cnt_idx;
+	struct ipa_ipfltri_rule_eq eq_attrib;
+	u32 rule_size;
+};
+
+/*
+ * struct ipahal_flt_rule_gen_params - Params for generating flt rule
+ * @ipt: IP family version
+ * @rt_tbl_idx: Routing table the rule pointing to
+ * @priority: Rule priority
+ * @id: Rule ID
+ * @cnt_idx: Stats counter index
+ * @rule: Rule info
+ */
+struct ipahal_flt_rule_gen_params {
+	enum ipa_ip_type ipt;
+	u32 rt_tbl_idx;
+	u32 priority;
+	u32 id;
+	u8 cnt_idx;
+	const struct ipa_flt_rule_i *rule;
+};
+
+/*
+ * struct ipahal_flt_rule_entry - Flt rule info parsed from H/W
+ * @rule: Rule info
+ * @priority: Rule priority
+ * @id: Rule ID
+ * @cnt_idx: stats counter index
+ * @rule_size: Rule size in memory
+ */
+struct ipahal_flt_rule_entry {
+	struct ipa_flt_rule_i rule;
+	u32 priority;
+	u32 id;
+	u8 cnt_idx;
+	u32 rule_size;
+};
+
+/* Get the H/W table (flt/rt) header width */
+u32 ipahal_get_hw_tbl_hdr_width(void);
+
+/* Get the H/W local table (SRAM) address alignment
+ * Tables headers references to local tables via offsets in SRAM
+ * This function return the alignment of the offset that IPA expects
+ */
+u32 ipahal_get_lcl_tbl_addr_alignment(void);
+
+/*
+ * Rule priority is used to distinguish rules order
+ * at the integrated table consisting from hashable and
+ * non-hashable tables. Max priority are rules that once are
+ * scanned by IPA, IPA will not look for further rules and use it.
+ */
+int ipahal_get_rule_max_priority(void);
+
+/* Given a priority, calc and return the next lower one if it is in
+ * legal range.
+ */
+int ipahal_rule_decrease_priority(int *prio);
+
+/* Does the given ID represents rule miss? */
+bool ipahal_is_rule_miss_id(u32 id);
+
+/* Get rule ID with high bit only asserted
+ * Used e.g. to create groups of IDs according to this bit
+ */
+u32 ipahal_get_rule_id_hi_bit(void);
+
+/* Get the low value possible to be used for rule-id */
+u32 ipahal_get_low_rule_id(void);
+
+/*
+ * low value possible for counter hdl id
+ */
+u32 ipahal_get_low_hdl_id(void);
+
+/*
+ * max counter hdl id for stats
+ */
+u32 ipahal_get_high_hdl_id(void);
+
+/* used for query check and associated with rt/flt rules */
+bool ipahal_is_rule_cnt_id_valid(u8 cnt_id);
+
+/* max rule id for stats */
+bool ipahal_get_max_stats_rule_id(void);
+
+/*
+ * ipahal_rt_generate_empty_img() - Generate empty route image
+ *  Creates routing header buffer for the given tables number.
+ * For each table, make it point to the empty table on DDR.
+ * @tbls_num: Number of tables. For each will have an entry in the header
+ * @hash_hdr_size: SRAM buf size of the hash tbls hdr. Used for space check
+ * @nhash_hdr_size: SRAM buf size of the nhash tbls hdr. Used for space check
+ * @mem: mem object that points to DMA mem representing the hdr structure
+ * @atomic: should DMA allocation be executed with atomic flag
+ */
+int ipahal_rt_generate_empty_img(u32 tbls_num, u32 hash_hdr_size,
+	u32 nhash_hdr_size, struct ipa_mem_buffer *mem, bool atomic);
+
+/*
+ * ipahal_flt_generate_empty_img() - Generate empty filter image
+ *  Creates filter header buffer for the given tables number.
+ *  For each table, make it point to the empty table on DDR.
+ * @tbls_num: Number of tables. For each will have an entry in the header
+ * @hash_hdr_size: SRAM buf size of the hash tbls hdr. Used for space check
+ * @nhash_hdr_size: SRAM buf size of the nhash tbls hdr. Used for space check
+ * @ep_bitmap: Bitmap representing the EP that has flt tables. The format
+ *  should be: bit0->EP0, bit1->EP1
+ * @mem: mem object that points to DMA mem representing the hdr structure
+ * @atomic: should DMA allocation be executed with atomic flag
+ */
+int ipahal_flt_generate_empty_img(u32 tbls_num, u32 hash_hdr_size,
+	u32 nhash_hdr_size, u64 ep_bitmap, struct ipa_mem_buffer *mem,
+	bool atomic);
+
+/*
+ * ipahal_fltrt_allocate_hw_tbl_imgs() - Allocate tbl images DMA structures
+ *  Used usually during commit.
+ *  Allocates header structures and init them to point to empty DDR table
+ *  Allocate body strucutres for local bodies tables
+ * @params: Parameters for IN and OUT regard the allocation.
+ */
+int ipahal_fltrt_allocate_hw_tbl_imgs(
+	struct ipahal_fltrt_alloc_imgs_params *params);
+
+/*
+ * ipahal_fltrt_allocate_hw_sys_tbl() - Allocate DMA mem for H/W flt/rt sys tbl
+ * @tbl_mem: IN/OUT param. size for effective table size. Pointer, for the
+ *  allocated memory.
+ *
+ * The size is adapted for needed alignments/borders.
+ */
+int ipahal_fltrt_allocate_hw_sys_tbl(struct ipa_mem_buffer *tbl_mem);
+
+/*
+ * ipahal_fltrt_write_addr_to_hdr() - Fill table header with table address
+ *  Given table addr/offset, adapt it to IPA H/W format and write it
+ *  to given header index.
+ * @addr: Address or offset to be used
+ * @hdr_base: base address of header structure to write the address
+ * @hdr_idx: index of the address in the header structure
+ * @is_sys: Is it system address or local offset
+ */
+int ipahal_fltrt_write_addr_to_hdr(u64 addr, void *hdr_base, u32 hdr_idx,
+	bool is_sys);
+
+/*
+ * ipahal_fltrt_read_addr_from_hdr() - Given sram address, read it's
+ *  content (physical address or offset) and parse it.
+ * @hdr_base: base sram address of the header structure.
+ * @hdr_idx: index of the header entry line in the header structure.
+ * @addr: The parsed address - Out parameter
+ * @is_sys: Is this system or local address - Out parameter
+ */
+int ipahal_fltrt_read_addr_from_hdr(void *hdr_base, u32 hdr_idx, u64 *addr,
+	bool *is_sys);
+
+/*
+ * ipahal_rt_generate_hw_rule() - generates the routing hardware rule.
+ * @params: Params for the rule creation.
+ * @hw_len: Size of the H/W rule to be returned
+ * @buf: Buffer to build the rule in. If buf is NULL, then the rule will
+ *  be built in internal temp buf. This is used e.g. to get the rule size
+ *  only.
+ */
+int ipahal_rt_generate_hw_rule(struct ipahal_rt_rule_gen_params *params,
+	u32 *hw_len, u8 *buf);
+
+/*
+ * ipahal_flt_generate_hw_rule() - generates the filtering hardware rule.
+ * @params: Params for the rule creation.
+ * @hw_len: Size of the H/W rule to be returned
+ * @buf: Buffer to build the rule in. If buf is NULL, then the rule will
+ *  be built in internal temp buf. This is used e.g. to get the rule size
+ *  only.
+ */
+int ipahal_flt_generate_hw_rule(struct ipahal_flt_rule_gen_params *params,
+	u32 *hw_len, u8 *buf);
+
+/*
+ * ipahal_flt_generate_equation() - generate flt rule in equation form
+ *  Will build equation form flt rule from given info.
+ * @ipt: IP family
+ * @attrib: Rule attribute to be generated
+ * @eq_atrb: Equation form generated rule
+ * Note: Usage example: Pass the generated form to other sub-systems
+ *  for inter-subsystems rules exchange.
+ */
+int ipahal_flt_generate_equation(enum ipa_ip_type ipt,
+		const struct ipa_rule_attrib *attrib,
+		struct ipa_ipfltri_rule_eq *eq_atrb);
+
+/*
+ * ipahal_rt_parse_hw_rule() - Parse H/W formated rt rule
+ *  Given the rule address, read the rule info from H/W and parse it.
+ * @rule_addr: Rule address (virtual memory)
+ * @rule: Out parameter for parsed rule info
+ */
+int ipahal_rt_parse_hw_rule(u8 *rule_addr,
+	struct ipahal_rt_rule_entry *rule);
+
+/*
+ * ipahal_flt_parse_hw_rule() - Parse H/W formated flt rule
+ *  Given the rule address, read the rule info from H/W and parse it.
+ * @rule_addr: Rule address (virtual memory)
+ * @rule: Out parameter for parsed rule info
+ */
+int ipahal_flt_parse_hw_rule(u8 *rule_addr,
+	struct ipahal_flt_rule_entry *rule);
+
+
+#endif /* _IPAHAL_FLTRT_H_ */

+ 257 - 0
ipa/ipa_v3/ipahal/ipahal_fltrt_i.h

@@ -0,0 +1,257 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2012-2019, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _IPAHAL_FLTRT_I_H_
+#define _IPAHAL_FLTRT_I_H_
+
+/*
+ * enum ipa_fltrt_equations - RULE equations
+ *  These are names values to the equations that can be used
+ *  The HAL layer holds mapping between these names and H/W
+ *  presentation.
+ */
+enum ipa_fltrt_equations {
+	IPA_TOS_EQ,
+	IPA_PROTOCOL_EQ,
+	IPA_TC_EQ,
+	IPA_OFFSET_MEQ128_0,
+	IPA_OFFSET_MEQ128_1,
+	IPA_OFFSET_MEQ32_0,
+	IPA_OFFSET_MEQ32_1,
+	IPA_IHL_OFFSET_MEQ32_0,
+	IPA_IHL_OFFSET_MEQ32_1,
+	IPA_METADATA_COMPARE,
+	IPA_IHL_OFFSET_RANGE16_0,
+	IPA_IHL_OFFSET_RANGE16_1,
+	IPA_IHL_OFFSET_EQ_32,
+	IPA_IHL_OFFSET_EQ_16,
+	IPA_FL_EQ,
+	IPA_IS_FRAG,
+	IPA_IS_PURE_ACK,
+	IPA_EQ_MAX,
+};
+
+/* Width and Alignment values for H/W structures.
+ * Specific for IPA version.
+ */
+#define IPA3_0_HW_TBL_SYSADDR_ALIGNMENT (127)
+#define IPA3_0_HW_TBL_LCLADDR_ALIGNMENT (7)
+#define IPA3_0_HW_TBL_BLK_SIZE_ALIGNMENT (127)
+#define IPA3_0_HW_TBL_WIDTH (8)
+#define IPA3_0_HW_TBL_HDR_WIDTH (8)
+#define IPA3_0_HW_TBL_ADDR_MASK (127)
+#define IPA3_0_HW_RULE_BUF_SIZE (256)
+#define IPA3_0_HW_RULE_START_ALIGNMENT (7)
+
+
+/*
+ * Rules Priority.
+ * Needed due to rules classification to hashable and non-hashable.
+ * Higher priority is lower in number. i.e. 0 is highest priority
+ */
+#define IPA3_0_RULE_MAX_PRIORITY (0)
+#define IPA3_0_RULE_MIN_PRIORITY (1023)
+
+/*
+ * RULE ID, bit length (e.g. 10 bits).
+ */
+#define IPA3_0_RULE_ID_BIT_LEN (10)
+#define IPA3_0_LOW_RULE_ID (1)
+
+/*
+ * COUNTER ID, LOW COUNTER ID.
+ */
+#define IPA4_5_LOW_CNT_ID (1)
+
+/**
+ * struct ipa3_0_rt_rule_hw_hdr - HW header of IPA routing rule
+ * @word: routing rule header properties
+ * @en_rule: enable rule - Equation bit fields
+ * @pipe_dest_idx: destination pipe index
+ * @system: Is referenced header is lcl or sys memory
+ * @hdr_offset: header offset
+ * @proc_ctx: whether hdr_offset points to header table or to
+ *	header processing context table
+ * @priority: Rule priority. Added to distinguish rules order
+ *  at the integrated table consisting from hashable and
+ *  non-hashable parts
+ * @rsvd1: reserved bits
+ * @retain_hdr: added to add back to the packet the header removed
+ *  as part of header removal. This will be done as part of
+ *  header insertion block.
+ * @rule_id: rule ID that will be returned in the packet status
+ * @rsvd2: reserved bits
+ */
+struct ipa3_0_rt_rule_hw_hdr {
+	union {
+		u64 word;
+		struct {
+			u64 en_rule:16;
+			u64 pipe_dest_idx:5;
+			u64 system:1;
+			u64 hdr_offset:9;
+			u64 proc_ctx:1;
+			u64 priority:10;
+			u64 rsvd1:5;
+			u64 retain_hdr:1;
+			u64 rule_id:10;
+			u64 rsvd2:6;
+		} hdr;
+	} u;
+};
+
+/**
+ * struct ipa3_0_rt_rule_hw_hdr - HW header of IPA routing rule
+ * @word: routing rule header properties
+ * @en_rule: enable rule - Equation bit fields
+ * @pipe_dest_idx: destination pipe index
+ * @system: Is referenced header is lcl or sys memory
+ * @hdr_offset: header offset
+ * @proc_ctx: whether hdr_offset points to header table or to
+ *	header processing context table
+ * @priority: Rule priority. Added to distinguish rules order
+ *  at the integrated table consisting from hashable and
+ *  non-hashable parts
+ * @stats_cnt_idx_msb: stats cnt index msb
+ * @rsvd2: reserved bits
+ * @retain_hdr: added to add back to the packet the header removed
+ *  as part of header removal. This will be done as part of
+ *  header insertion block.
+ * @rule_id: rule ID that will be returned in the packet status
+ * @stats_cnt_idx_lsb: stats cnt index lsb
+ */
+struct ipa4_5_rt_rule_hw_hdr {
+	union {
+		u64 word;
+		struct {
+			u64 en_rule:16;
+			u64 pipe_dest_idx:5;
+			u64 system:1;
+			u64 hdr_offset:9;
+			u64 proc_ctx:1;
+			u64 priority:10;
+			u64 stats_cnt_idx_msb : 2;
+			u64 rsvd2 : 3;
+			u64 retain_hdr:1;
+			u64 rule_id:10;
+			u64 stats_cnt_idx_lsb : 6;
+		} hdr;
+	} u;
+};
+
+/**
+ * struct ipa3_0_flt_rule_hw_hdr - HW header of IPA filter rule
+ * @word: filtering rule properties
+ * @en_rule: enable rule
+ * @action: post filtering action
+ * @rt_tbl_idx: index in routing table
+ * @retain_hdr: added to add back to the packet the header removed
+ *  as part of header removal. This will be done as part of
+ *  header insertion block.
+ * @rsvd1: reserved bits
+ * @priority: Rule priority. Added to distinguish rules order
+ *  at the integrated table consisting from hashable and
+ *  non-hashable parts
+ * @rsvd2: reserved bits
+ * @rule_id: rule ID that will be returned in the packet status
+ * @rsvd3: reserved bits
+ */
+struct ipa3_0_flt_rule_hw_hdr {
+	union {
+		u64 word;
+		struct {
+			u64 en_rule:16;
+			u64 action:5;
+			u64 rt_tbl_idx:5;
+			u64 retain_hdr:1;
+			u64 rsvd1:5;
+			u64 priority:10;
+			u64 rsvd2:6;
+			u64 rule_id:10;
+			u64 rsvd3:6;
+		} hdr;
+	} u;
+};
+
+/**
+ * struct ipa4_0_flt_rule_hw_hdr - HW header of IPA filter rule
+ * @word: filtering rule properties
+ * @en_rule: enable rule
+ * @action: post filtering action
+ * @rt_tbl_idx: index in routing table
+ * @retain_hdr: added to add back to the packet the header removed
+ *  as part of header removal. This will be done as part of
+ *  header insertion block.
+ * @pdn_idx: in case of go to src nat action possible to input the pdn index to
+ *  the NAT block
+ * @set_metadata: enable metadata replacement in the NAT block
+ * @priority: Rule priority. Added to distinguish rules order
+ *  at the integrated table consisting from hashable and
+ *  non-hashable parts
+ * @rsvd2: reserved bits
+ * @rule_id: rule ID that will be returned in the packet status
+ * @rsvd3: reserved bits
+ */
+struct ipa4_0_flt_rule_hw_hdr {
+	union {
+		u64 word;
+		struct {
+			u64 en_rule : 16;
+			u64 action : 5;
+			u64 rt_tbl_idx : 5;
+			u64 retain_hdr : 1;
+			u64 pdn_idx : 4;
+			u64 set_metadata : 1;
+			u64 priority : 10;
+			u64 rsvd2 : 6;
+			u64 rule_id : 10;
+			u64 rsvd3 : 6;
+		} hdr;
+	} u;
+};
+
+/**
+ * struct ipa4_5_flt_rule_hw_hdr - HW header of IPA filter rule
+ * @word: filtering rule properties
+ * @en_rule: enable rule
+ * @action: post filtering action
+ * @rt_tbl_idx: index in routing table
+ * @retain_hdr: added to add back to the packet the header removed
+ *  as part of header removal. This will be done as part of
+ *  header insertion block.
+ * @pdn_idx: in case of go to src nat action possible to input the pdn index to
+ *  the NAT block
+ * @set_metadata: enable metadata replacement in the NAT block
+ * @priority: Rule priority. Added to distinguish rules order
+ *  at the integrated table consisting from hashable and
+ *  non-hashable parts
+ * @stats_cnt_idx_msb: stats cnt index msb
+ * @rsvd2: reserved bits
+ * @rule_id: rule ID that will be returned in the packet status
+ * @stats_cnt_idx_lsb: stats cnt index lsb
+ */
+struct ipa4_5_flt_rule_hw_hdr {
+	union {
+		u64 word;
+		struct {
+			u64 en_rule : 16;
+			u64 action : 5;
+			u64 rt_tbl_idx : 5;
+			u64 retain_hdr : 1;
+			u64 pdn_idx : 4;
+			u64 set_metadata : 1;
+			u64 priority : 10;
+			u64 stats_cnt_idx_msb : 2;
+			u64 rsvd2 : 4;
+			u64 rule_id : 10;
+			u64 stats_cnt_idx_lsb : 6;
+		} hdr;
+	} u;
+};
+
+int ipahal_fltrt_init(enum ipa_hw_type ipa_hw_type);
+void ipahal_fltrt_destroy(void);
+
+#endif /* _IPAHAL_FLTRT_I_H_ */

+ 634 - 0
ipa/ipa_v3/ipahal/ipahal_hw_stats.c

@@ -0,0 +1,634 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2017-2019, The Linux Foundation. All rights reserved.
+ */
+
+#include "ipahal.h"
+#include "ipahal_hw_stats.h"
+#include "ipahal_hw_stats_i.h"
+#include "ipahal_i.h"
+
+struct ipahal_hw_stats_obj {
+	struct ipahal_stats_init_pyld *(*generate_init_pyld)(void *params,
+		bool is_atomic_ctx);
+	int (*get_offset)(void *params, struct ipahal_stats_offset *out);
+	int (*parse_stats)(void *init_params, void *raw_stats,
+		void *parsed_stats);
+};
+
+static int _count_ones(u32 number)
+{
+	int count = 0;
+
+	while (number) {
+		count++;
+		number = number & (number - 1);
+	}
+
+	return count;
+}
+
+static struct ipahal_stats_init_pyld *ipahal_generate_init_pyld_quota(
+	void *params, bool is_atomic_ctx)
+{
+	struct ipahal_stats_init_pyld *pyld;
+	struct ipahal_stats_init_quota *in =
+		(struct ipahal_stats_init_quota *)params;
+	int entries = _count_ones(in->enabled_bitmask);
+
+	IPAHAL_DBG_LOW("entries = %d\n", entries);
+	pyld = IPAHAL_MEM_ALLOC(sizeof(*pyld) +
+		entries * sizeof(struct ipahal_stats_quota_hw), is_atomic_ctx);
+	if (!pyld) {
+		IPAHAL_ERR("no mem\n");
+		return NULL;
+	}
+
+	pyld->len = entries * sizeof(struct ipahal_stats_quota_hw);
+	return pyld;
+}
+
+static int ipahal_get_offset_quota(void *params,
+	struct ipahal_stats_offset *out)
+{
+	struct ipahal_stats_get_offset_quota *in =
+		(struct ipahal_stats_get_offset_quota *)params;
+	int entries = _count_ones(in->init.enabled_bitmask);
+
+	IPAHAL_DBG_LOW("\n");
+	out->offset = 0;
+	out->size = entries * sizeof(struct ipahal_stats_quota_hw);
+
+	return 0;
+}
+
+static int ipahal_parse_stats_quota(void *init_params, void *raw_stats,
+	void *parsed_stats)
+{
+	struct ipahal_stats_init_quota *init =
+		(struct ipahal_stats_init_quota *)init_params;
+	struct ipahal_stats_quota_hw *raw_hw =
+		(struct ipahal_stats_quota_hw *)raw_stats;
+	struct ipahal_stats_quota_all *out =
+		(struct ipahal_stats_quota_all *)parsed_stats;
+	int stat_idx = 0;
+	int i;
+
+	memset(out, 0, sizeof(*out));
+	IPAHAL_DBG_LOW("\n");
+	for (i = 0; i < IPAHAL_MAX_PIPES; i++) {
+		if (init->enabled_bitmask & (1 << i)) {
+			IPAHAL_DBG_LOW("pipe %d stat_idx %d\n", i, stat_idx);
+			out->stats[i].num_ipv4_bytes =
+				raw_hw[stat_idx].num_ipv4_bytes;
+			out->stats[i].num_ipv4_pkts =
+				raw_hw[stat_idx].num_ipv4_pkts;
+			out->stats[i].num_ipv6_pkts =
+				raw_hw[stat_idx].num_ipv6_pkts;
+			out->stats[i].num_ipv6_bytes =
+				raw_hw[stat_idx].num_ipv6_bytes;
+			stat_idx++;
+		}
+	}
+
+	return 0;
+}
+
+static struct ipahal_stats_init_pyld *ipahal_generate_init_pyld_tethering(
+	void *params, bool is_atomic_ctx)
+{
+	struct ipahal_stats_init_pyld *pyld;
+	struct ipahal_stats_init_tethering *in =
+		(struct ipahal_stats_init_tethering *)params;
+	int hdr_entries = _count_ones(in->prod_bitmask);
+	int entries = 0;
+	int i;
+	void *pyld_ptr;
+	u32 incremental_offset;
+
+	IPAHAL_DBG_LOW("prod entries = %d\n", hdr_entries);
+	for (i = 0; i < sizeof(in->prod_bitmask) * 8; i++) {
+		if (in->prod_bitmask & (1 << i)) {
+			if (in->cons_bitmask[i] == 0) {
+				IPAHAL_ERR("no cons bitmask for prod %d\n", i);
+				return NULL;
+			}
+			entries += _count_ones(in->cons_bitmask[i]);
+		}
+	}
+	IPAHAL_DBG_LOW("sum all entries = %d\n", entries);
+
+	pyld = IPAHAL_MEM_ALLOC(sizeof(*pyld) +
+		hdr_entries * sizeof(struct ipahal_stats_tethering_hdr_hw) +
+		entries * sizeof(struct ipahal_stats_tethering_hw),
+		is_atomic_ctx);
+	if (!pyld)
+		return NULL;
+
+	pyld->len = hdr_entries * sizeof(struct ipahal_stats_tethering_hdr_hw) +
+		entries * sizeof(struct ipahal_stats_tethering_hw);
+
+	pyld_ptr = pyld->data;
+	incremental_offset =
+		(hdr_entries * sizeof(struct ipahal_stats_tethering_hdr_hw))
+			/ 8;
+	for (i = 0; i < sizeof(in->prod_bitmask) * 8; i++) {
+		if (in->prod_bitmask & (1 << i)) {
+			struct ipahal_stats_tethering_hdr_hw *hdr = pyld_ptr;
+
+			hdr->dst_mask = in->cons_bitmask[i];
+			hdr->offset = incremental_offset;
+			IPAHAL_DBG_LOW("hdr->dst_mask=0x%x\n", hdr->dst_mask);
+			IPAHAL_DBG_LOW("hdr->offset=0x%x\n", hdr->offset);
+			/* add the stats entry */
+			incremental_offset += _count_ones(in->cons_bitmask[i]) *
+				sizeof(struct ipahal_stats_tethering_hw) / 8;
+			pyld_ptr += sizeof(*hdr);
+		}
+	}
+
+	return pyld;
+}
+
+static int ipahal_get_offset_tethering(void *params,
+	struct ipahal_stats_offset *out)
+{
+	struct ipahal_stats_get_offset_tethering *in =
+		(struct ipahal_stats_get_offset_tethering *)params;
+	int entries = 0;
+	int i;
+
+	for (i = 0; i < sizeof(in->init.prod_bitmask) * 8; i++) {
+		if (in->init.prod_bitmask & (1 << i)) {
+			if (in->init.cons_bitmask[i] == 0) {
+				IPAHAL_ERR("no cons bitmask for prod %d\n", i);
+				return -EPERM;
+			}
+			entries += _count_ones(in->init.cons_bitmask[i]);
+		}
+	}
+	IPAHAL_DBG_LOW("sum all entries = %d\n", entries);
+
+	/* skip the header */
+	out->offset = _count_ones(in->init.prod_bitmask) *
+		sizeof(struct ipahal_stats_tethering_hdr_hw);
+	out->size = entries * sizeof(struct ipahal_stats_tethering_hw);
+
+	return 0;
+}
+
+static int ipahal_parse_stats_tethering(void *init_params, void *raw_stats,
+	void *parsed_stats)
+{
+	struct ipahal_stats_init_tethering *init =
+		(struct ipahal_stats_init_tethering *)init_params;
+	struct ipahal_stats_tethering_hw *raw_hw =
+		(struct ipahal_stats_tethering_hw *)raw_stats;
+	struct ipahal_stats_tethering_all *out =
+		(struct ipahal_stats_tethering_all *)parsed_stats;
+	int i, j;
+	int stat_idx = 0;
+
+	memset(out, 0, sizeof(*out));
+	IPAHAL_DBG_LOW("\n");
+	for (i = 0; i < IPAHAL_MAX_PIPES; i++) {
+		for (j = 0; j < IPAHAL_MAX_PIPES; j++) {
+			if ((init->prod_bitmask & (1 << i)) &&
+			    init->cons_bitmask[i] & (1 << j)) {
+				IPAHAL_DBG_LOW("prod %d cons %d\n", i, j);
+				IPAHAL_DBG_LOW("stat_idx %d\n", stat_idx);
+				out->stats[i][j].num_ipv4_bytes =
+					raw_hw[stat_idx].num_ipv4_bytes;
+				IPAHAL_DBG_LOW("num_ipv4_bytes %lld\n",
+					out->stats[i][j].num_ipv4_bytes);
+				out->stats[i][j].num_ipv4_pkts =
+					raw_hw[stat_idx].num_ipv4_pkts;
+				IPAHAL_DBG_LOW("num_ipv4_pkts %lld\n",
+					out->stats[i][j].num_ipv4_pkts);
+				out->stats[i][j].num_ipv6_pkts =
+					raw_hw[stat_idx].num_ipv6_pkts;
+				IPAHAL_DBG_LOW("num_ipv6_pkts %lld\n",
+					out->stats[i][j].num_ipv6_pkts);
+				out->stats[i][j].num_ipv6_bytes =
+					raw_hw[stat_idx].num_ipv6_bytes;
+				IPAHAL_DBG_LOW("num_ipv6_bytes %lld\n",
+					out->stats[i][j].num_ipv6_bytes);
+				stat_idx++;
+			}
+		}
+	}
+
+	return 0;
+}
+
+static struct ipahal_stats_init_pyld *ipahal_generate_init_pyld_flt_rt_v4_5(
+	void *params, bool is_atomic_ctx)
+{
+	struct ipahal_stats_init_pyld *pyld;
+	int num = (int)(params);
+
+	if (num > IPA_MAX_FLT_RT_CNT_INDEX ||
+		num <= 0) {
+		IPAHAL_ERR("num %d not valid\n", num);
+		return NULL;
+	}
+	pyld = IPAHAL_MEM_ALLOC(sizeof(*pyld) +
+		num *
+		sizeof(struct ipahal_stats_flt_rt_v4_5_hw),
+		is_atomic_ctx);
+	if (!pyld)
+		return NULL;
+	pyld->len = num *
+		sizeof(struct ipahal_stats_flt_rt_v4_5_hw);
+	return pyld;
+}
+
+static int ipahal_get_offset_flt_rt_v4_5(void *params,
+	struct ipahal_stats_offset *out)
+{
+	struct ipahal_stats_get_offset_flt_rt_v4_5 *in =
+		(struct ipahal_stats_get_offset_flt_rt_v4_5 *)params;
+	int num;
+
+	out->offset = (in->start_id - 1) *
+		sizeof(struct ipahal_stats_flt_rt_v4_5);
+	num = in->end_id - in->start_id + 1;
+	out->size = num * sizeof(struct ipahal_stats_flt_rt_v4_5);
+
+	return 0;
+}
+
+static int ipahal_parse_stats_flt_rt_v4_5(void *init_params,
+	void *raw_stats, void *parsed_stats)
+{
+	struct ipahal_stats_flt_rt_v4_5_hw *raw_hw =
+		(struct ipahal_stats_flt_rt_v4_5_hw *)raw_stats;
+	struct ipa_ioc_flt_rt_query *query =
+		(struct ipa_ioc_flt_rt_query *)parsed_stats;
+	int num, i;
+
+	num = query->end_id - query->start_id + 1;
+	IPAHAL_DBG_LOW("\n");
+	for (i = 0; i < num; i++) {
+		((struct ipa_flt_rt_stats *)
+		query->stats)[i].num_bytes =
+			raw_hw[i].num_bytes;
+		((struct ipa_flt_rt_stats *)
+		query->stats)[i].num_pkts_hash =
+			raw_hw[i].num_packets_hash;
+		((struct ipa_flt_rt_stats *)
+		query->stats)[i].num_pkts =
+			raw_hw[i].num_packets;
+	}
+
+	return 0;
+}
+
+
+static struct ipahal_stats_init_pyld *ipahal_generate_init_pyld_flt_rt(
+	void *params, bool is_atomic_ctx)
+{
+	struct ipahal_stats_init_pyld *pyld;
+	struct ipahal_stats_init_flt_rt *in =
+		(struct ipahal_stats_init_flt_rt *)params;
+	int hdr_entries;
+	int num_rules = 0;
+	int i, start_entry;
+	void *pyld_ptr;
+	u32 incremental_offset;
+
+	for (i = 0; i < IPAHAL_MAX_RULE_ID_32; i++)
+		num_rules += _count_ones(in->rule_id_bitmask[i]);
+
+	if (num_rules == 0) {
+		IPAHAL_ERR("no rule ids provided\n");
+		return NULL;
+	}
+	IPAHAL_DBG_LOW("num_rules = %d\n", num_rules);
+
+	hdr_entries = IPAHAL_MAX_RULE_ID_32;
+	for (i = 0; i < IPAHAL_MAX_RULE_ID_32; i++) {
+		if (in->rule_id_bitmask[i] != 0)
+			break;
+		hdr_entries--;
+	}
+	start_entry = i;
+
+	for (i = IPAHAL_MAX_RULE_ID_32 - 1; i >= start_entry; i--) {
+		if (in->rule_id_bitmask[i] != 0)
+			break;
+		hdr_entries--;
+	}
+	IPAHAL_DBG_LOW("hdr_entries = %d\n", hdr_entries);
+
+	pyld = IPAHAL_MEM_ALLOC(sizeof(*pyld) +
+		hdr_entries * sizeof(struct ipahal_stats_flt_rt_hdr_hw) +
+		num_rules * sizeof(struct ipahal_stats_flt_rt_hw),
+		is_atomic_ctx);
+	if (!pyld) {
+		IPAHAL_ERR("no mem\n");
+		return NULL;
+	}
+
+	pyld->len = hdr_entries * sizeof(struct ipahal_stats_flt_rt_hdr_hw) +
+		num_rules * sizeof(struct ipahal_stats_flt_rt_hw);
+
+	pyld_ptr = pyld->data;
+	incremental_offset =
+		(hdr_entries * sizeof(struct ipahal_stats_flt_rt_hdr_hw))
+			/ 8;
+	for (i = start_entry; i < hdr_entries; i++) {
+		struct ipahal_stats_flt_rt_hdr_hw *hdr = pyld_ptr;
+
+		hdr->en_mask = in->rule_id_bitmask[i];
+		hdr->cnt_offset = incremental_offset;
+		/* add the stats entry */
+		incremental_offset += _count_ones(in->rule_id_bitmask[i]) *
+			sizeof(struct ipahal_stats_flt_rt_hw) / 8;
+		pyld_ptr += sizeof(*hdr);
+	}
+
+	return pyld;
+}
+
+static int ipahal_get_offset_flt_rt(void *params,
+	struct ipahal_stats_offset *out)
+{
+	struct ipahal_stats_get_offset_flt_rt *in =
+		(struct ipahal_stats_get_offset_flt_rt *)params;
+	int i;
+	int hdr_entries;
+	int skip_rules = 0;
+	int start_entry;
+	int rule_bit = in->rule_id % 32;
+	int rule_idx = in->rule_id / 32;
+
+	if (rule_idx >= IPAHAL_MAX_RULE_ID_32) {
+		IPAHAL_ERR("invalid rule_id %d\n", in->rule_id);
+		return -EPERM;
+	}
+
+	hdr_entries = IPAHAL_MAX_RULE_ID_32;
+	for (i = 0; i < IPAHAL_MAX_RULE_ID_32; i++) {
+		if (in->init.rule_id_bitmask[i] != 0)
+			break;
+		hdr_entries--;
+	}
+
+	if (hdr_entries == 0) {
+		IPAHAL_ERR("no rule ids provided\n");
+		return -EPERM;
+	}
+	start_entry = i;
+
+	for (i = IPAHAL_MAX_RULE_ID_32 - 1; i >= 0; i--) {
+		if (in->init.rule_id_bitmask[i] != 0)
+			break;
+		hdr_entries--;
+	}
+	IPAHAL_DBG_LOW("hdr_entries = %d\n", hdr_entries);
+
+	/* skip the header */
+	out->offset = hdr_entries * sizeof(struct ipahal_stats_flt_rt_hdr_hw);
+
+	/* skip the previous rules  */
+	for (i = start_entry; i < rule_idx; i++)
+		skip_rules += _count_ones(in->init.rule_id_bitmask[i]);
+
+	for (i = 0; i < rule_bit; i++)
+		if (in->init.rule_id_bitmask[rule_idx] & (1 << i))
+			skip_rules++;
+
+	out->offset += skip_rules * sizeof(struct ipahal_stats_flt_rt_hw);
+	out->size = sizeof(struct ipahal_stats_flt_rt_hw);
+
+	return 0;
+}
+
+static int ipahal_parse_stats_flt_rt(void *init_params, void *raw_stats,
+	void *parsed_stats)
+{
+	struct ipahal_stats_flt_rt_hw *raw_hw =
+		(struct ipahal_stats_flt_rt_hw *)raw_stats;
+	struct ipahal_stats_flt_rt *out =
+		(struct ipahal_stats_flt_rt *)parsed_stats;
+
+	memset(out, 0, sizeof(*out));
+	IPAHAL_DBG_LOW("\n");
+	out->num_packets = raw_hw->num_packets;
+	out->num_packets_hash = raw_hw->num_packets_hash;
+
+	return 0;
+}
+
+static struct ipahal_stats_init_pyld *ipahal_generate_init_pyld_drop(
+	void *params, bool is_atomic_ctx)
+{
+	struct ipahal_stats_init_pyld *pyld;
+	struct ipahal_stats_init_drop *in =
+		(struct ipahal_stats_init_drop *)params;
+	int entries = _count_ones(in->enabled_bitmask);
+
+	IPAHAL_DBG_LOW("entries = %d\n", entries);
+	pyld = IPAHAL_MEM_ALLOC(sizeof(*pyld) +
+		entries * sizeof(struct ipahal_stats_drop_hw), is_atomic_ctx);
+	if (!pyld)
+		return NULL;
+
+	pyld->len = entries * sizeof(struct ipahal_stats_drop_hw);
+
+	return pyld;
+}
+
+static int ipahal_get_offset_drop(void *params,
+	struct ipahal_stats_offset *out)
+{
+	struct ipahal_stats_get_offset_drop *in =
+		(struct ipahal_stats_get_offset_drop *)params;
+	int entries = _count_ones(in->init.enabled_bitmask);
+
+	IPAHAL_DBG_LOW("\n");
+	out->offset = 0;
+	out->size = entries * sizeof(struct ipahal_stats_drop_hw);
+
+	return 0;
+}
+
+static int ipahal_parse_stats_drop(void *init_params, void *raw_stats,
+	void *parsed_stats)
+{
+	struct ipahal_stats_init_drop *init =
+		(struct ipahal_stats_init_drop *)init_params;
+	struct ipahal_stats_drop_hw *raw_hw =
+		(struct ipahal_stats_drop_hw *)raw_stats;
+	struct ipahal_stats_drop_all *out =
+		(struct ipahal_stats_drop_all *)parsed_stats;
+	int stat_idx = 0;
+	int i;
+
+	memset(out, 0, sizeof(*out));
+	IPAHAL_DBG_LOW("\n");
+	for (i = 0; i < IPAHAL_MAX_PIPES; i++) {
+		if (init->enabled_bitmask & (1 << i)) {
+			out->stats[i].drop_byte_cnt =
+				raw_hw[stat_idx].drop_byte_cnt;
+			out->stats[i].drop_packet_cnt =
+				raw_hw[stat_idx].drop_packet_cnt;
+			stat_idx++;
+		}
+	}
+
+	return 0;
+}
+
+static struct ipahal_hw_stats_obj
+	ipahal_hw_stats_objs[IPA_HW_MAX][IPAHAL_HW_STATS_MAX] = {
+	/* IPAv4 */
+	[IPA_HW_v4_0][IPAHAL_HW_STATS_QUOTA] = {
+		ipahal_generate_init_pyld_quota,
+		ipahal_get_offset_quota,
+		ipahal_parse_stats_quota
+	},
+	[IPA_HW_v4_0][IPAHAL_HW_STATS_TETHERING] = {
+		ipahal_generate_init_pyld_tethering,
+		ipahal_get_offset_tethering,
+		ipahal_parse_stats_tethering
+	},
+	[IPA_HW_v4_0][IPAHAL_HW_STATS_FNR] = {
+		ipahal_generate_init_pyld_flt_rt,
+		ipahal_get_offset_flt_rt,
+		ipahal_parse_stats_flt_rt
+	},
+	[IPA_HW_v4_0][IPAHAL_HW_STATS_DROP] = {
+		ipahal_generate_init_pyld_drop,
+		ipahal_get_offset_drop,
+		ipahal_parse_stats_drop
+	},
+	[IPA_HW_v4_5][IPAHAL_HW_STATS_QUOTA] = {
+		ipahal_generate_init_pyld_quota,
+		ipahal_get_offset_quota,
+		ipahal_parse_stats_quota
+	},
+	[IPA_HW_v4_5][IPAHAL_HW_STATS_FNR] = {
+		ipahal_generate_init_pyld_flt_rt_v4_5,
+		ipahal_get_offset_flt_rt_v4_5,
+		ipahal_parse_stats_flt_rt_v4_5
+	},
+	[IPA_HW_v4_5][IPAHAL_HW_STATS_TETHERING] = {
+		ipahal_generate_init_pyld_tethering,
+		ipahal_get_offset_tethering,
+		ipahal_parse_stats_tethering
+	},
+	[IPA_HW_v4_5][IPAHAL_HW_STATS_DROP] = {
+		ipahal_generate_init_pyld_drop,
+		ipahal_get_offset_drop,
+		ipahal_parse_stats_drop
+	},
+};
+
+int ipahal_hw_stats_init(enum ipa_hw_type ipa_hw_type)
+{
+	int i;
+	int j;
+	struct ipahal_hw_stats_obj zero_obj;
+	struct ipahal_hw_stats_obj *hw_stat_ptr;
+
+	IPAHAL_DBG_LOW("Entry - HW_TYPE=%d\n", ipa_hw_type);
+
+	if ((ipa_hw_type < 0) || (ipa_hw_type >= IPA_HW_MAX)) {
+		IPAHAL_ERR("invalid IPA HW type (%d)\n", ipa_hw_type);
+		return -EINVAL;
+	}
+
+	memset(&zero_obj, 0, sizeof(zero_obj));
+	for (i = IPA_HW_v4_0 ; i < ipa_hw_type ; i++) {
+		for (j = 0; j < IPAHAL_HW_STATS_MAX; j++) {
+			if (!memcmp(&ipahal_hw_stats_objs[i + 1][j], &zero_obj,
+				sizeof(struct ipahal_hw_stats_obj))) {
+				memcpy(&ipahal_hw_stats_objs[i + 1][j],
+					&ipahal_hw_stats_objs[i][j],
+					sizeof(struct ipahal_hw_stats_obj));
+			} else {
+				/*
+				 * explicitly overridden stat.
+				 * Check validity
+				 */
+				hw_stat_ptr = &ipahal_hw_stats_objs[i + 1][j];
+				if (!hw_stat_ptr->get_offset) {
+					IPAHAL_ERR(
+					  "stat=%d get_offset null ver=%d\n",
+					  j, i+1);
+					WARN_ON(1);
+				}
+				if (!hw_stat_ptr->parse_stats) {
+					IPAHAL_ERR(
+					  "stat=%d parse_stats null ver=%d\n",
+						j, i + 1);
+					WARN_ON(1);
+				}
+			}
+		}
+	}
+
+	return 0;
+}
+
+int ipahal_stats_get_offset(enum ipahal_hw_stats_type type, void *params,
+	struct ipahal_stats_offset *out)
+{
+	if (type < 0 || type >= IPAHAL_HW_STATS_MAX) {
+		IPAHAL_ERR("Invalid type stat=%d\n", type);
+		WARN_ON(1);
+		return -EFAULT;
+	}
+
+	if (!params || !out) {
+		IPAHAL_ERR("Null arg\n");
+		WARN_ON(1);
+		return -EFAULT;
+	}
+
+	return ipahal_hw_stats_objs[ipahal_ctx->hw_type][type].get_offset(
+		params, out);
+}
+
+struct ipahal_stats_init_pyld *ipahal_stats_generate_init_pyld(
+	enum ipahal_hw_stats_type type, void *params, bool is_atomic_ctx)
+{
+	struct ipahal_hw_stats_obj *hw_obj_ptr;
+
+	if (type < 0 || type >= IPAHAL_HW_STATS_MAX) {
+		IPAHAL_ERR("Invalid type stat=%d\n", type);
+		WARN_ON(1);
+		return NULL;
+	}
+
+	hw_obj_ptr = &ipahal_hw_stats_objs[ipahal_ctx->hw_type][type];
+	return hw_obj_ptr->generate_init_pyld(params, is_atomic_ctx);
+}
+
+int ipahal_parse_stats(enum ipahal_hw_stats_type type, void *init_params,
+	void *raw_stats, void *parsed_stats)
+{
+	if (WARN((type < 0 || type >= IPAHAL_HW_STATS_MAX),
+		"Invalid type stat = %d\n", type))
+		return -EFAULT;
+
+	if (WARN((!raw_stats || !parsed_stats), "Null arg\n"))
+		return -EFAULT;
+
+	return ipahal_hw_stats_objs[ipahal_ctx->hw_type][type].parse_stats(
+		init_params, raw_stats, parsed_stats);
+}
+
+void ipahal_set_flt_rt_sw_stats(void *raw_stats,
+	struct ipa_flt_rt_stats sw_stats)
+{
+	struct ipahal_stats_flt_rt_v4_5_hw *raw_hw =
+		(struct ipahal_stats_flt_rt_v4_5_hw *)raw_stats;
+
+	IPAHAL_DBG_LOW("\n");
+	raw_hw->num_bytes = sw_stats.num_bytes;
+	raw_hw->num_packets_hash = sw_stats.num_pkts_hash;
+	raw_hw->num_packets = sw_stats.num_pkts;
+}

+ 273 - 0
ipa/ipa_v3/ipahal/ipahal_hw_stats.h

@@ -0,0 +1,273 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2017-2019, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _IPAHAL_HW_STATS_H_
+#define _IPAHAL_HW_STATS_H_
+
+#include <linux/ipa.h>
+
+#define IPAHAL_MAX_PIPES 32
+#define IPAHAL_MAX_RULE_ID_32 (1024 / 32) /* 10 bits of rule id */
+
+enum ipahal_hw_stats_type {
+	IPAHAL_HW_STATS_QUOTA,
+	IPAHAL_HW_STATS_TETHERING,
+	IPAHAL_HW_STATS_FNR,
+	IPAHAL_HW_STATS_DROP,
+	IPAHAL_HW_STATS_MAX
+};
+
+/*
+ * struct ipahal_stats_init_pyld - Statistics initialization payload
+ * @len: length of payload
+ * @data: actual payload data
+ */
+struct ipahal_stats_init_pyld {
+	u16 len;
+	u16 reserved;
+	u8 data[0];
+};
+
+/*
+ * struct ipahal_stats_offset - Statistics offset parameters
+ * @offset: offset of the statistic from beginning of stats table
+ * @size: size of the statistics
+ */
+struct ipahal_stats_offset {
+	u32 offset;
+	u16 size;
+};
+
+/*
+ * struct ipahal_stats_init_quota - Initializations parameters for quota
+ * @enabled_bitmask: bit mask of pipes to be monitored
+ */
+struct ipahal_stats_init_quota {
+	u32 enabled_bitmask;
+};
+
+/*
+ * struct ipahal_stats_get_offset_quota - Get offset parameters for quota
+ * @init: initialization parameters used in initialization of stats
+ */
+struct ipahal_stats_get_offset_quota {
+	struct ipahal_stats_init_quota init;
+};
+
+/*
+ * struct ipahal_stats_quota - Quota statistics
+ * @num_ipv4_bytes: IPv4 bytes
+ * @num_ipv6_bytes: IPv6 bytes
+ * @num_ipv4_pkts: IPv4 packets
+ * @num_ipv6_pkts: IPv6 packets
+ */
+struct ipahal_stats_quota {
+	u64 num_ipv4_bytes;
+	u64 num_ipv6_bytes;
+	u64 num_ipv4_pkts;
+	u64 num_ipv6_pkts;
+};
+
+/*
+ * struct ipahal_stats_quota_all - Quota statistics for all pipes
+ * @stats: array of statistics per pipe
+ */
+struct ipahal_stats_quota_all {
+	struct ipahal_stats_quota stats[IPAHAL_MAX_PIPES];
+};
+
+/*
+ * struct ipahal_stats_init_tethering - Initializations parameters for tethering
+ * @prod_bitmask: bit mask of producer pipes to be monitored
+ * @cons_bitmask: bit mask of consumer pipes to be monitored per producer
+ */
+struct ipahal_stats_init_tethering {
+	u32 prod_bitmask;
+	u32 cons_bitmask[IPAHAL_MAX_PIPES];
+};
+
+/*
+ * struct ipahal_stats_get_offset_tethering - Get offset parameters for
+ *	tethering
+ * @init: initialization parameters used in initialization of stats
+ */
+struct ipahal_stats_get_offset_tethering {
+	struct ipahal_stats_init_tethering init;
+};
+
+/*
+ * struct ipahal_stats_tethering - Tethering statistics
+ * @num_ipv4_bytes: IPv4 bytes
+ * @num_ipv6_bytes: IPv6 bytes
+ * @num_ipv4_pkts: IPv4 packets
+ * @num_ipv6_pkts: IPv6 packets
+ */
+struct ipahal_stats_tethering {
+	u64 num_ipv4_bytes;
+	u64 num_ipv6_bytes;
+	u64 num_ipv4_pkts;
+	u64 num_ipv6_pkts;
+};
+
+/*
+ * struct ipahal_stats_tethering_all - Tethering statistics for all pipes
+ * @stats: matrix of statistics per pair of pipes
+ */
+struct ipahal_stats_tethering_all {
+	struct ipahal_stats_tethering
+		stats[IPAHAL_MAX_PIPES][IPAHAL_MAX_PIPES];
+};
+
+/*
+ * struct ipahal_stats_init_flt_rt - Initializations parameters for flt_rt
+ * @rule_id_bitmask: array describes which rule ids to monitor.
+ *	rule_id bit is determined by:
+ *		index to the array => rule_id / 32
+ *		bit to enable => rule_id % 32
+ */
+struct ipahal_stats_init_flt_rt {
+	u32 rule_id_bitmask[IPAHAL_MAX_RULE_ID_32];
+};
+
+/*
+ * struct ipahal_stats_get_offset_flt_rt - Get offset parameters for flt_rt
+ * @init: initialization parameters used in initialization of stats
+ * @rule_id: rule_id to get the offset for
+ */
+struct ipahal_stats_get_offset_flt_rt {
+	struct ipahal_stats_init_flt_rt init;
+	u32 rule_id;
+};
+
+/*
+ * struct ipahal_stats_flt_rt - flt_rt statistics
+ * @num_packets: Total number of packets hit this rule
+ * @num_packets_hash: Total number of packets hit this rule in hash table
+ */
+struct ipahal_stats_flt_rt {
+	u32 num_packets;
+	u32 num_packets_hash;
+};
+
+/*
+ * struct ipahal_stats_flt_rt_v4_5 - flt_rt statistics
+ * @num_packets: Total number of packets hit this rule
+ * @num_packets_hash: Total number of packets hit this rule in hash table
+ * @num_bytes: Total number of bytes hit this rule
+ */
+struct ipahal_stats_flt_rt_v4_5 {
+	u32 num_packets;
+	u32 num_packets_hash;
+	u64 num_bytes;
+};
+
+/*
+ * struct ipahal_stats_get_offset_flt_rt_v4_5 - Get offset parameters for flt_rt
+ * @start_id: start_id to get the offset
+ * @end_id: end_id to get the offset
+ */
+struct ipahal_stats_get_offset_flt_rt_v4_5 {
+	u8 start_id;
+	u8 end_id;
+};
+
+/*
+ * struct ipahal_stats_init_drop - Initializations parameters for Drop
+ * @enabled_bitmask: bit mask of pipes to be monitored
+ */
+struct ipahal_stats_init_drop {
+	u32 enabled_bitmask;
+};
+
+/*
+ * struct ipahal_stats_get_offset_drop - Get offset parameters for Drop
+ * @init: initialization parameters used in initialization of stats
+ */
+struct ipahal_stats_get_offset_drop {
+	struct ipahal_stats_init_drop init;
+};
+
+/*
+ * struct ipahal_stats_drop - Packet Drop statistics
+ * @drop_packet_cnt: number of packets dropped
+ * @drop_byte_cnt: number of bytes dropped
+ */
+struct ipahal_stats_drop {
+	u32 drop_packet_cnt;
+	u32 drop_byte_cnt;
+};
+
+/*
+ * struct ipahal_stats_drop_all - Drop statistics for all pipes
+ * @stats: array of statistics per pipes
+ */
+struct ipahal_stats_drop_all {
+	struct ipahal_stats_drop stats[IPAHAL_MAX_PIPES];
+};
+
+/*
+ * ipahal_stats_generate_init_pyld - Generate the init payload for stats
+ * @type: type of stats
+ * @params: init_pyld parameters based of stats type
+ * @is_atomic_ctx: is calling context atomic ?
+ *
+ * This function will generate the initialization payload for a particular
+ * statistic in hardware. IPA driver is expected to use this payload to
+ * initialize the SRAM.
+ *
+ * Return: pointer to ipahal_stats_init_pyld on success or NULL on failure.
+ */
+struct ipahal_stats_init_pyld *ipahal_stats_generate_init_pyld(
+	enum ipahal_hw_stats_type type, void *params, bool is_atomic_ctx);
+
+/*
+ * ipahal_destroy_stats_init_pyld() - Destroy/Release bulk that was built
+ *  by the ipahal_stats_generate_init_pyld function.
+ */
+static inline void ipahal_destroy_stats_init_pyld(
+	struct ipahal_stats_init_pyld *pyld)
+{
+	kfree(pyld);
+}
+
+/*
+ * ipahal_stats_get_offset - Get the offset / size of payload for stats
+ * @type: type of stats
+ * @params: get_offset parameters based of stats type
+ * @out: out parameter for the offset and size.
+ *
+ * This function will return the offset of the counter from beginning of
+ * the table.IPA driver is expected to read this portion in SRAM and pass
+ * it to ipahal_parse_stats() to interprete the stats.
+ *
+ * Return: 0 on success and negative on failure
+ */
+int ipahal_stats_get_offset(enum ipahal_hw_stats_type type, void *params,
+	struct ipahal_stats_offset *out);
+
+/*
+ * ipahal_parse_stats - parse statistics
+ * @type: type of stats
+ * @init_params: init_pyld parameters used on init
+ * @raw_stats: stats read from IPA SRAM
+ * @parsed_stats: pointer to parsed stats based on type
+ *
+ * Return: 0 on success and negative on failure
+ */
+int ipahal_parse_stats(enum ipahal_hw_stats_type type, void *init_params,
+	void *raw_stats, void *parsed_stats);
+
+
+/*
+ * ipahal_set_flt_rt_sw_stats - set sw counter stats for FnR
+ * @raw_stats: stats write to IPA SRAM
+ * @sw_stats: FnR sw stats to be written
+ *
+ * Return: None
+ */
+void ipahal_set_flt_rt_sw_stats(void *raw_stats,
+	struct ipa_flt_rt_stats sw_stats);
+
+#endif /* _IPAHAL_HW_STATS_H_ */

+ 54 - 0
ipa/ipa_v3/ipahal/ipahal_hw_stats_i.h

@@ -0,0 +1,54 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2017-2019, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _IPAHAL_HW_STATS_I_H_
+#define _IPAHAL_HW_STATS_I_H_
+
+#include "ipahal_hw_stats.h"
+
+int ipahal_hw_stats_init(enum ipa_hw_type ipa_hw_type);
+
+struct ipahal_stats_quota_hw {
+	u64 num_ipv4_bytes;
+	u64 num_ipv4_pkts:32;
+	u64 num_ipv6_pkts:32;
+	u64 num_ipv6_bytes;
+};
+
+struct ipahal_stats_tethering_hdr_hw {
+	u64 dst_mask:32;
+	u64 offset:32;
+};
+
+struct ipahal_stats_tethering_hw {
+	u64 num_ipv4_bytes;
+	u64 num_ipv4_pkts:32;
+	u64 num_ipv6_pkts:32;
+	u64 num_ipv6_bytes;
+};
+
+struct ipahal_stats_flt_rt_hdr_hw {
+	u64 en_mask:32;
+	u64 reserved:16;
+	u64 cnt_offset:16;
+};
+
+struct ipahal_stats_flt_rt_hw {
+	u64 num_packets_hash:32;
+	u64 num_packets:32;
+};
+
+struct ipahal_stats_flt_rt_v4_5_hw {
+	u64 num_packets_hash:32;
+	u64 num_packets:32;
+	u64 num_bytes;
+};
+
+struct ipahal_stats_drop_hw {
+	u64 drop_byte_cnt:40;
+	u64 drop_packet_cnt:24;
+};
+
+#endif /* _IPAHAL_HW_STATS_I_H_ */

+ 815 - 0
ipa/ipa_v3/ipahal/ipahal_i.h

@@ -0,0 +1,815 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2016-2019, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _IPAHAL_I_H_
+#define _IPAHAL_I_H_
+
+#include <linux/ipa.h>
+#include "../../ipa_common_i.h"
+
+#define IPAHAL_DRV_NAME "ipahal"
+
+#define IPAHAL_DBG(fmt, args...) \
+	do { \
+		pr_debug(IPAHAL_DRV_NAME " %s:%d " fmt, __func__, __LINE__, \
+			## args); \
+		IPA_IPC_LOGGING(ipa_get_ipc_logbuf(), \
+			IPAHAL_DRV_NAME " %s:%d " fmt, ## args); \
+		IPA_IPC_LOGGING(ipa_get_ipc_logbuf_low(), \
+			IPAHAL_DRV_NAME " %s:%d " fmt, ## args); \
+	} while (0)
+
+#define IPAHAL_DBG_LOW(fmt, args...) \
+	do { \
+		pr_debug(IPAHAL_DRV_NAME " %s:%d " fmt, __func__, __LINE__, \
+			## args); \
+		IPA_IPC_LOGGING(ipa_get_ipc_logbuf_low(), \
+			IPAHAL_DRV_NAME " %s:%d " fmt, ## args); \
+	} while (0)
+
+#define IPAHAL_ERR(fmt, args...) \
+	do { \
+		pr_err(IPAHAL_DRV_NAME " %s:%d " fmt, __func__, __LINE__, \
+			## args); \
+		IPA_IPC_LOGGING(ipa_get_ipc_logbuf(), \
+			IPAHAL_DRV_NAME " %s:%d " fmt, ## args); \
+		IPA_IPC_LOGGING(ipa_get_ipc_logbuf_low(), \
+			IPAHAL_DRV_NAME " %s:%d " fmt, ## args); \
+	} while (0)
+
+#define IPAHAL_ERR_RL(fmt, args...) \
+		do { \
+			pr_err_ratelimited_ipa(IPAHAL_DRV_NAME " %s:%d " fmt, \
+			__func__, __LINE__, ## args); \
+			IPA_IPC_LOGGING(ipa_get_ipc_logbuf(), \
+				IPAHAL_DRV_NAME " %s:%d " fmt, ## args); \
+			IPA_IPC_LOGGING(ipa_get_ipc_logbuf_low(), \
+				IPAHAL_DRV_NAME " %s:%d " fmt, ## args); \
+		} while (0)
+
+#define IPAHAL_DBG_REG(fmt, args...) \
+	do { \
+		pr_err(fmt, ## args); \
+		IPA_IPC_LOGGING(ipahal_ctx->regdumpbuf, \
+			" %s:%d " fmt, ## args); \
+	} while (0)
+
+#define IPAHAL_DBG_REG_IPC_ONLY(fmt, args...) \
+		IPA_IPC_LOGGING(ipahal_ctx->regdumpbuf, " %s:%d " fmt, ## args)
+
+#define IPAHAL_MEM_ALLOC(__size, __is_atomic_ctx) \
+	(kzalloc((__size), ((__is_atomic_ctx) ? GFP_ATOMIC : GFP_KERNEL)))
+
+#define IPAHAL_IPC_LOG_PAGES 50
+
+#define IPAHAL_PKT_STATUS_FLTRT_RULE_MISS_ID 0x3ff
+
+/*
+ * struct ipahal_context - HAL global context data
+ * @hw_type: IPA H/W type/version.
+ * @base: Base address to be used for accessing IPA memory. This is
+ *  I/O memory mapped address.
+ *  Controlled by debugfs. default is off
+ * @dent: Debugfs folder dir entry
+ * @ipa_pdev: IPA Platform Device. Will be used for DMA memory
+ * @empty_fltrt_tbl: Empty table to be used at tables init.
+ */
+struct ipahal_context {
+	enum ipa_hw_type hw_type;
+	void __iomem *base;
+	struct dentry *dent;
+	struct device *ipa_pdev;
+	struct ipa_mem_buffer empty_fltrt_tbl;
+	void *regdumpbuf;
+};
+
+extern struct ipahal_context *ipahal_ctx;
+
+
+
+/* Immediate commands H/W structures */
+
+/*
+ * struct ipa_imm_cmd_hw_ip_v4_filter_init - IP_V4_FILTER_INIT command payload
+ *  in H/W format.
+ * Inits IPv4 filter block.
+ * @hash_rules_addr: Addr in system mem where ipv4 hashable flt rules starts
+ * @hash_rules_size: Size in bytes of the hashable tbl to cpy to local mem
+ * @hash_local_addr: Addr in shared mem where ipv4 hashable flt tbl should
+ *  be copied to
+ * @nhash_rules_size: Size in bytes of the non-hashable tbl to cpy to local mem
+ * @nhash_local_addr: Addr in shared mem where ipv4 non-hashable flt tbl should
+ *  be copied to
+ * @rsvd: reserved
+ * @nhash_rules_addr: Addr in sys mem where ipv4 non-hashable flt tbl starts
+ */
+struct ipa_imm_cmd_hw_ip_v4_filter_init {
+	u64 hash_rules_addr:64;
+	u64 hash_rules_size:12;
+	u64 hash_local_addr:16;
+	u64 nhash_rules_size:12;
+	u64 nhash_local_addr:16;
+	u64 rsvd:8;
+	u64 nhash_rules_addr:64;
+};
+
+/*
+ * struct ipa_imm_cmd_hw_ip_v6_filter_init - IP_V6_FILTER_INIT command payload
+ *  in H/W format.
+ * Inits IPv6 filter block.
+ * @hash_rules_addr: Addr in system mem where ipv6 hashable flt rules starts
+ * @hash_rules_size: Size in bytes of the hashable tbl to cpy to local mem
+ * @hash_local_addr: Addr in shared mem where ipv6 hashable flt tbl should
+ *  be copied to
+ * @nhash_rules_size: Size in bytes of the non-hashable tbl to cpy to local mem
+ * @nhash_local_addr: Addr in shared mem where ipv6 non-hashable flt tbl should
+ *  be copied to
+ * @rsvd: reserved
+ * @nhash_rules_addr: Addr in sys mem where ipv6 non-hashable flt tbl starts
+ */
+struct ipa_imm_cmd_hw_ip_v6_filter_init {
+	u64 hash_rules_addr:64;
+	u64 hash_rules_size:12;
+	u64 hash_local_addr:16;
+	u64 nhash_rules_size:12;
+	u64 nhash_local_addr:16;
+	u64 rsvd:8;
+	u64 nhash_rules_addr:64;
+};
+
+/*
+ * struct ipa_imm_cmd_hw_ip_v4_nat_init - IP_V4_NAT_INIT command payload
+ *  in H/W format.
+ * Inits IPv4 NAT block. Initiate NAT table with it dimensions, location
+ *  cache address and other related parameters.
+ * @ipv4_rules_addr: Addr in sys/shared mem where ipv4 NAT rules start
+ * @ipv4_expansion_rules_addr: Addr in sys/shared mem where expansion NAT
+ *  table starts. IPv4 NAT rules that result in hash collision are located
+ *  in this table.
+ * @index_table_addr: Addr in sys/shared mem where index table, which points
+ *  to NAT table starts
+ * @index_table_expansion_addr: Addr in sys/shared mem where expansion index
+ *  table starts
+ * @table_index: For future support of multiple NAT tables
+ * @rsvd1: reserved
+ * @ipv4_rules_addr_type: ipv4_rules_addr in sys or shared mem
+ * @ipv4_expansion_rules_addr_type: ipv4_expansion_rules_addr in
+ *  sys or shared mem
+ * @index_table_addr_type: index_table_addr in sys or shared mem
+ * @index_table_expansion_addr_type: index_table_expansion_addr in
+ *  sys or shared mem
+ * @size_base_tables: Num of entries in NAT tbl and idx tbl (each)
+ * @size_expansion_tables: Num of entries in NAT expansion tbl and expansion
+ *  idx tbl (each)
+ * @rsvd2: reserved
+ * @public_addr_info: Public IP addresses info suitable to the IPA H/W version
+ *                    IPA H/W >= 4.0 - PDN config table offset in SMEM
+ *                    IPA H/W < 4.0  - The public IP address
+ */
+struct ipa_imm_cmd_hw_ip_v4_nat_init {
+	u64 ipv4_rules_addr:64;
+	u64 ipv4_expansion_rules_addr:64;
+	u64 index_table_addr:64;
+	u64 index_table_expansion_addr:64;
+	u64 table_index:3;
+	u64 rsvd1:1;
+	u64 ipv4_rules_addr_type:1;
+	u64 ipv4_expansion_rules_addr_type:1;
+	u64 index_table_addr_type:1;
+	u64 index_table_expansion_addr_type:1;
+	u64 size_base_tables:12;
+	u64 size_expansion_tables:10;
+	u64 rsvd2:2;
+	u64 public_addr_info:32;
+};
+
+/*
+ * struct ipa_imm_cmd_hw_ip_v6_ct_init - IP_V6_CONN_TRACK_INIT command payload
+ *  in H/W format.
+ * Inits IPv6CT block. Initiate IPv6CT table with it dimensions, location
+ *  cache address and other related parameters.
+ * @table_addr: Address in sys/shared mem where IPv6CT rules start
+ * @expansion_table_addr: Address in sys/shared mem where IPv6CT expansion
+ *  table starts. IPv6CT rules that result in hash collision are located
+ *  in this table.
+ * @table_index: For future support of multiple IPv6CT tables
+ * @rsvd1: reserved
+ * @table_addr_type: table_addr in sys or shared mem
+ * @expansion_table_addr_type: expansion_table_addr in sys or shared mem
+ * @rsvd2: reserved
+ * @size_base_tables: Number of entries in IPv6CT table
+ * @size_expansion_tables: Number of entries in IPv6CT expansion table
+ * @rsvd3: reserved
+ */
+struct ipa_imm_cmd_hw_ip_v6_ct_init {
+	u64 table_addr:64;
+	u64 expansion_table_addr:64;
+	u64 table_index:3;
+	u64 rsvd1:1;
+	u64 table_addr_type:1;
+	u64 expansion_table_addr_type:1;
+	u64 rsvd2:2;
+	u64 size_base_table:12;
+	u64 size_expansion_table:10;
+	u64 rsvd3:34;
+};
+
+/*
+ * struct ipa_imm_cmd_hw_ip_v4_routing_init - IP_V4_ROUTING_INIT command payload
+ *  in H/W format.
+ * Inits IPv4 routing table/structure - with the rules and other related params
+ * @hash_rules_addr: Addr in system mem where ipv4 hashable rt rules starts
+ * @hash_rules_size: Size in bytes of the hashable tbl to cpy to local mem
+ * @hash_local_addr: Addr in shared mem where ipv4 hashable rt tbl should
+ *  be copied to
+ * @nhash_rules_size: Size in bytes of the non-hashable tbl to cpy to local mem
+ * @nhash_local_addr: Addr in shared mem where ipv4 non-hashable rt tbl should
+ *  be copied to
+ * @rsvd: reserved
+ * @nhash_rules_addr: Addr in sys mem where ipv4 non-hashable rt tbl starts
+ */
+struct ipa_imm_cmd_hw_ip_v4_routing_init {
+	u64 hash_rules_addr:64;
+	u64 hash_rules_size:12;
+	u64 hash_local_addr:16;
+	u64 nhash_rules_size:12;
+	u64 nhash_local_addr:16;
+	u64 rsvd:8;
+	u64 nhash_rules_addr:64;
+};
+
+/*
+ * struct ipa_imm_cmd_hw_ip_v6_routing_init - IP_V6_ROUTING_INIT command payload
+ *  in H/W format.
+ * Inits IPv6 routing table/structure - with the rules and other related params
+ * @hash_rules_addr: Addr in system mem where ipv6 hashable rt rules starts
+ * @hash_rules_size: Size in bytes of the hashable tbl to cpy to local mem
+ * @hash_local_addr: Addr in shared mem where ipv6 hashable rt tbl should
+ *  be copied to
+ * @nhash_rules_size: Size in bytes of the non-hashable tbl to cpy to local mem
+ * @nhash_local_addr: Addr in shared mem where ipv6 non-hashable rt tbl should
+ *  be copied to
+ * @rsvd: reserved
+ * @nhash_rules_addr: Addr in sys mem where ipv6 non-hashable rt tbl starts
+ */
+struct ipa_imm_cmd_hw_ip_v6_routing_init {
+	u64 hash_rules_addr:64;
+	u64 hash_rules_size:12;
+	u64 hash_local_addr:16;
+	u64 nhash_rules_size:12;
+	u64 nhash_local_addr:16;
+	u64 rsvd:8;
+	u64 nhash_rules_addr:64;
+};
+
+/*
+ * struct ipa_imm_cmd_hw_hdr_init_local - HDR_INIT_LOCAL command payload
+ *  in H/W format.
+ * Inits hdr table within local mem with the hdrs and their length.
+ * @hdr_table_addr: Word address in sys mem where the table starts (SRC)
+ * @size_hdr_table: Size of the above (in bytes)
+ * @hdr_addr: header address in IPA sram (used as DST for memory copy)
+ * @rsvd: reserved
+ */
+struct ipa_imm_cmd_hw_hdr_init_local {
+	u64 hdr_table_addr:64;
+	u64 size_hdr_table:12;
+	u64 hdr_addr:16;
+	u64 rsvd:4;
+};
+
+/*
+ * struct ipa_imm_cmd_hw_nat_dma - NAT_DMA command payload
+ *  in H/W format
+ * Perform DMA operation on NAT related mem addressess. Copy data into
+ *  different locations within NAT associated tbls. (For add/remove NAT rules)
+ * @table_index: NAT tbl index. Defines the NAT tbl on which to perform DMA op.
+ * @rsvd1: reserved
+ * @base_addr: Base addr to which the DMA operation should be performed.
+ * @rsvd2: reserved
+ * @offset: offset in bytes from base addr to write 'data' to
+ * @data: data to be written
+ * @rsvd3: reserved
+ */
+struct ipa_imm_cmd_hw_nat_dma {
+	u64 table_index:3;
+	u64 rsvd1:1;
+	u64 base_addr:2;
+	u64 rsvd2:2;
+	u64 offset:32;
+	u64 data:16;
+	u64 rsvd3:8;
+};
+
+/*
+ * struct ipa_imm_cmd_hw_table_dma_ipav4 - TABLE_DMA command payload
+ *  in H/W format
+ * Perform DMA operation on NAT and ipv6 connection tracking related mem
+ * addresses. Copy data into different locations within NAT associated tbls
+ * (For add/remove NAT rules)
+ * @table_index: NAT tbl index. Defines the NAT tbl on which to perform DMA op.
+ * @rsvd1: reserved
+ * @base_addr: Base addr to which the DMA operation should be performed.
+ * @rsvd2: reserved
+ * @offset: offset in bytes from base addr to write 'data' to
+ * @data: data to be written
+ * @rsvd3: reserved
+ */
+struct ipa_imm_cmd_hw_table_dma_ipav4 {
+	u64 table_index : 3;
+	u64 rsvd1 : 1;
+	u64 base_addr : 3;
+	u64 rsvd2 : 1;
+	u64 offset : 32;
+	u64 data : 16;
+	u64 rsvd3 : 8;
+};
+
+/*
+ * struct ipa_imm_cmd_hw_hdr_init_system - HDR_INIT_SYSTEM command payload
+ *  in H/W format.
+ * Inits hdr table within sys mem with the hdrs and their length.
+ * @hdr_table_addr: Word address in system memory where the hdrs tbl starts.
+ */
+struct ipa_imm_cmd_hw_hdr_init_system {
+	u64 hdr_table_addr:64;
+};
+
+/*
+ * struct ipa_imm_cmd_hw_ip_packet_init - IP_PACKET_INIT command payload
+ *  in H/W format.
+ * Configuration for specific IP pkt. Shall be called prior to an IP pkt
+ *  data. Pkt will not go through IP pkt processing.
+ * @destination_pipe_index: Destination pipe index  (in case routing
+ *  is enabled, this field will overwrite the rt  rule)
+ * @rsvd: reserved
+ */
+struct ipa_imm_cmd_hw_ip_packet_init {
+	u64 destination_pipe_index:5;
+	u64 rsv1:59;
+};
+
+/*
+ * struct ipa_imm_cmd_hw_register_write - REGISTER_WRITE command payload
+ *  in H/W format.
+ * Write value to register. Allows reg changes to be synced with data packet
+ *  and other immediate command. Can be used to access the sram
+ * @sw_rsvd: Ignored by H/W. May be used by S/W
+ * @skip_pipeline_clear: 0 to wait until IPA pipeline is clear. 1 don't wait
+ * @offset: offset from IPA base address - Lower 16bit of the IPA reg addr
+ * @value: value to write to register
+ * @value_mask: mask specifying which value bits to write to the register
+ * @pipeline_clear_options: options for pipeline to clear
+ *	0: HPS - no pkt inside HPS (not grp specific)
+ *	1: source group - The immediate cmd src grp does not use any pkt ctxs
+ *	2: Wait until no pkt reside inside IPA pipeline
+ *	3: reserved
+ * @rsvd: reserved - should be set to zero
+ */
+struct ipa_imm_cmd_hw_register_write {
+	u64 sw_rsvd:15;
+	u64 skip_pipeline_clear:1;
+	u64 offset:16;
+	u64 value:32;
+	u64 value_mask:32;
+	u64 pipeline_clear_options:2;
+	u64 rsvd:30;
+};
+
+/*
+ * struct ipa_imm_cmd_hw_register_write - REGISTER_WRITE command payload
+ *  in H/W format.
+ * Write value to register. Allows reg changes to be synced with data packet
+ *  and other immediate command. Can be used to access the sram
+ * @sw_rsvd: Ignored by H/W. May be used by S/W
+ * @offset_high: high bits of the Offset field - bits 17-20
+ * @rsvd: reserved - should be set to zero
+ * @offset: offset from IPA base address - Lower 16bit of the IPA reg addr
+ * @value: value to write to register
+ * @value_mask: mask specifying which value bits to write to the register
+ * @rsvd2: reserved - should be set to zero
+ */
+struct ipa_imm_cmd_hw_register_write_v_4_0 {
+	u64 sw_rsvd:11;
+	u64 offset_high:4;
+	u64 rsvd:1;
+	u64 offset:16;
+	u64 value:32;
+	u64 value_mask:32;
+	u64 rsvd2:32;
+};
+
+/*
+ * struct ipa_imm_cmd_hw_dma_shared_mem - DMA_SHARED_MEM command payload
+ *  in H/W format.
+ * Perform mem copy into or out of the SW area of IPA local mem
+ * @sw_rsvd: Ignored by H/W. My be used by S/W
+ * @size: Size in bytes of data to copy. Expected size is up to 2K bytes
+ * @local_addr: Address in IPA local memory
+ * @direction: Read or write?
+ *	0: IPA write, Write to local address from system address
+ *	1: IPA read, Read from local address to system address
+ * @skip_pipeline_clear: 0 to wait until IPA pipeline is clear. 1 don't wait
+ * @pipeline_clear_options: options for pipeline to clear
+ *	0: HPS - no pkt inside HPS (not grp specific)
+ *	1: source group - The immediate cmd src grp does npt use any pkt ctxs
+ *	2: Wait until no pkt reside inside IPA pipeline
+ *	3: reserved
+ * @rsvd: reserved - should be set to zero
+ * @system_addr: Address in system memory
+ */
+struct ipa_imm_cmd_hw_dma_shared_mem {
+	u64 sw_rsvd:16;
+	u64 size:16;
+	u64 local_addr:16;
+	u64 direction:1;
+	u64 skip_pipeline_clear:1;
+	u64 pipeline_clear_options:2;
+	u64 rsvd:12;
+	u64 system_addr:64;
+};
+
+/*
+ * struct ipa_imm_cmd_hw_dma_shared_mem - DMA_SHARED_MEM command payload
+ *  in H/W format.
+ * Perform mem copy into or out of the SW area of IPA local mem
+ * @sw_rsvd: Ignored by H/W. My be used by S/W
+ * @size: Size in bytes of data to copy. Expected size is up to 2K bytes
+ * @clear_after_read: Clear local memory at the end of a read operation allows
+ *  atomic read and clear if HPS is clear. Ignore for writes.
+ * @local_addr: Address in IPA local memory
+ * @direction: Read or write?
+ *	0: IPA write, Write to local address from system address
+ *	1: IPA read, Read from local address to system address
+ * @rsvd: reserved - should be set to zero
+ * @system_addr: Address in system memory
+ */
+struct ipa_imm_cmd_hw_dma_shared_mem_v_4_0 {
+	u64 sw_rsvd:15;
+	u64 clear_after_read:1;
+	u64 size:16;
+	u64 local_addr:16;
+	u64 direction:1;
+	u64 rsvd:15;
+	u64 system_addr:64;
+};
+
+/*
+ * struct ipa_imm_cmd_hw_ip_packet_tag_status -
+ *  IP_PACKET_TAG_STATUS command payload in H/W format.
+ * This cmd is used for to allow SW to track HW processing by setting a TAG
+ *  value that is passed back to SW inside Packet Status information.
+ *  TAG info will be provided as part of Packet Status info generated for
+ *  the next pkt transferred over the pipe.
+ *  This immediate command must be followed by a packet in the same transfer.
+ * @sw_rsvd: Ignored by H/W. My be used by S/W
+ * @tag: Tag that is provided back to SW
+ */
+struct ipa_imm_cmd_hw_ip_packet_tag_status {
+	u64 sw_rsvd:16;
+	u64 tag:48;
+};
+
+/*
+ * struct ipa_imm_cmd_hw_dma_task_32b_addr -
+ *	IPA_DMA_TASK_32B_ADDR command payload in H/W format.
+ * Used by clients using 32bit addresses. Used to perform DMA operation on
+ *  multiple descriptors.
+ *  The Opcode is dynamic, where it holds the number of buffer to process
+ * @sw_rsvd: Ignored by H/W. My be used by S/W
+ * @cmplt: Complete flag: When asserted IPA will interrupt SW when the entire
+ *  DMA related data was completely xfered to its destination.
+ * @eof: Enf Of Frame flag: When asserted IPA will assert the EOT to the
+ *  dest client. This is used used for aggr sequence
+ * @flsh: Flush flag: When asserted, pkt will go through the IPA blocks but
+ *  will not be xfered to dest client but rather will be discarded
+ * @lock: Lock pipe flag: When asserted, IPA will stop processing descriptors
+ *  from other EPs in the same src grp (RX queue)
+ * @unlock: Unlock pipe flag: When asserted, IPA will stop exclusively
+ *  servicing current EP out of the src EPs of the grp (RX queue)
+ * @size1: Size of buffer1 data
+ * @addr1: Pointer to buffer1 data
+ * @packet_size: Total packet size. If a pkt send using multiple DMA_TASKs,
+ *  only the first one needs to have this field set. It will be ignored
+ *  in subsequent DMA_TASKs until the packet ends (EOT). First DMA_TASK
+ *  must contain this field (2 or more buffers) or EOT.
+ */
+struct ipa_imm_cmd_hw_dma_task_32b_addr {
+	u64 sw_rsvd:11;
+	u64 cmplt:1;
+	u64 eof:1;
+	u64 flsh:1;
+	u64 lock:1;
+	u64 unlock:1;
+	u64 size1:16;
+	u64 addr1:32;
+	u64 packet_size:16;
+};
+
+
+
+/* IPA Status packet H/W structures and info */
+
+/*
+ * struct ipa_status_pkt_hw - IPA status packet payload in H/W format.
+ *  This structure describes the status packet H/W structure for the
+ *   following statuses: IPA_STATUS_PACKET, IPA_STATUS_DROPPED_PACKET,
+ *   IPA_STATUS_SUSPENDED_PACKET.
+ *  Other statuses types has different status packet structure.
+ * @status_opcode: The Type of the status (Opcode).
+ * @exception: (not bitmask) - the first exception that took place.
+ *  In case of exception, src endp and pkt len are always valid.
+ * @status_mask: Bit mask specifying on which H/W blocks the pkt was processed.
+ * @pkt_len: Pkt pyld len including hdr, include retained hdr if used. Does
+ *  not include padding or checksum trailer len.
+ * @endp_src_idx: Source end point index.
+ * @rsvd1: reserved
+ * @endp_dest_idx: Destination end point index.
+ *  Not valid in case of exception
+ * @rsvd2: reserved
+ * @metadata: meta data value used by packet
+ * @flt_local: Filter table location flag: Does matching flt rule belongs to
+ *  flt tbl that resides in lcl memory? (if not, then system mem)
+ * @flt_hash: Filter hash hit flag: Does matching flt rule was in hash tbl?
+ * @flt_global: Global filter rule flag: Does matching flt rule belongs to
+ *  the global flt tbl? (if not, then the per endp tables)
+ * @flt_ret_hdr: Retain header in filter rule flag: Does matching flt rule
+ *  specifies to retain header?
+ *  Starting IPA4.5, this will be true only if packet has L2 header.
+ * @flt_rule_id: The ID of the matching filter rule. This info can be combined
+ *  with endp_src_idx to locate the exact rule. ID=0x3FF reserved to specify
+ *  flt miss. In case of miss, all flt info to be ignored
+ * @rt_local: Route table location flag: Does matching rt rule belongs to
+ *  rt tbl that resides in lcl memory? (if not, then system mem)
+ * @rt_hash: Route hash hit flag: Does matching rt rule was in hash tbl?
+ * @ucp: UC Processing flag.
+ * @rt_tbl_idx: Index of rt tbl that contains the rule on which was a match
+ * @rt_rule_id: The ID of the matching rt rule. This info can be combined
+ *  with rt_tbl_idx to locate the exact rule. ID=0x3FF reserved to specify
+ *  rt miss. In case of miss, all rt info to be ignored
+ * @nat_hit: NAT hit flag: Was their NAT hit?
+ * @nat_entry_idx: Index of the NAT entry used of NAT processing
+ * @nat_type: Defines the type of the NAT operation:
+ *	00: No NAT
+ *	01: Source NAT
+ *	10: Destination NAT
+ *	11: Reserved
+ * @tag_info: S/W defined value provided via immediate command
+ * @seq_num: Per source endp unique packet sequence number
+ * @time_of_day_ctr: running counter from IPA clock
+ * @hdr_local: Header table location flag: In header insertion, was the header
+ *  taken from the table resides in local memory? (If no, then system mem)
+ * @hdr_offset: Offset of used header in the header table
+ * @frag_hit: Frag hit flag: Was their frag rule hit in H/W frag table?
+ * @frag_rule: Frag rule index in H/W frag table in case of frag hit
+ * @hw_specific: H/W specific reserved value
+ */
+struct ipa_gen_pkt_status_hw {
+	u64 status_opcode:8;
+	u64 exception:8;
+	u64 status_mask:16;
+	u64 pkt_len:16;
+	u64 endp_src_idx:5;
+	u64 rsvd1:3;
+	u64 endp_dest_idx:5;
+	u64 rsvd2:3;
+	u64 metadata:32;
+	u64 flt_local:1;
+	u64 flt_hash:1;
+	u64 flt_global:1;
+	u64 flt_ret_hdr:1;
+	u64 flt_rule_id:10;
+	u64 rt_local:1;
+	u64 rt_hash:1;
+	u64 ucp:1;
+	u64 rt_tbl_idx:5;
+	u64 rt_rule_id:10;
+	u64 nat_hit:1;
+	u64 nat_entry_idx:13;
+	u64 nat_type:2;
+	u64 tag_info:48;
+	u64 seq_num:8;
+	u64 time_of_day_ctr:24;
+	u64 hdr_local:1;
+	u64 hdr_offset:10;
+	u64 frag_hit:1;
+	u64 frag_rule:4;
+	u64 hw_specific:16;
+} __packed;
+
+/*
+ * struct ipa_frag_pkt_status_hw - IPA status packet payload in H/W format.
+ *  This structure describes the frag status packet H/W structure for the
+ *   following statuses: IPA_NEW_FRAG_RULE.
+ * @status_opcode: The Type of the status (Opcode).
+ * @frag_rule_idx: Frag rule index value.
+ * @rsvd1: reserved
+ * @tbl_idx: Table index valid or not.
+ * @endp_src_idx: Source end point index.
+ * @exception: (not bitmask) - the first exception that took place.
+ *  In case of exception, src endp and pkt len are always valid.
+ * @rsvd2: reserved
+ * @seq_num: Packet sequence number.
+ * @src_ip_addr: Source packet IP address.
+ * @dest_ip_addr: Destination packet IP address.
+ * @rsvd3: reserved
+ * @nat_type: Defines the type of the NAT operation:
+ *	00: No NAT
+ *	01: Source NAT
+ *	10: Destination NAT
+ *	11: Reserved
+ * @protocol: Protocal number.
+ * @ip_id: IP packet IP ID number.
+ * @tlated_ip_addr: IP address.
+ * @hdr_local: Header table location flag: In header insertion, was the header
+ *  taken from the table resides in local memory? (If no, then system mem)
+ * @hdr_offset: Offset of used header in the header table
+ * @endp_dest_idx: Destination end point index.
+ * @ip_cksum_diff: IP packet checksum difference.
+ * @metadata: meta data value used by packet
+ * @rsvd4: reserved
+ */
+struct ipa_frag_pkt_status_hw {
+	u64 status_opcode:8;
+	u64 frag_rule_idx:4;
+	u64 reserved_1:3;
+	u64 tbl_idx:1;
+	u64 endp_src_idx:5;
+	u64 exception:1;
+	u64 reserved_2:2;
+	u64 seq_num:8;
+	u64 src_ip_addr:32;
+	u64 dest_ip_addr:32;
+	u64 reserved_3:6;
+	u64 nat_type:2;
+	u64 protocol:8;
+	u64 ip_id:16;
+	u64 tlated_ip_addr:32;
+	u64 hdr_local:1;
+	u64 hdr_offset:10;
+	u64 endp_dest_idx:5;
+	u64 ip_cksum_diff:16;
+	u64 metadata:32;
+	u64 reserved_4:32;
+} __packed;
+
+union ipa_pkt_status_hw {
+	struct ipa_gen_pkt_status_hw ipa_pkt;
+	struct ipa_frag_pkt_status_hw frag_pkt;
+} __packed;
+
+/* Size of H/W Packet Status */
+#define IPA3_0_PKT_STATUS_SIZE 32
+
+/* Headers and processing context H/W structures and definitions */
+
+/* uCP command numbers */
+#define IPA_HDR_UCP_802_3_TO_802_3		6
+#define IPA_HDR_UCP_802_3_TO_ETHII		7
+#define IPA_HDR_UCP_ETHII_TO_802_3		8
+#define IPA_HDR_UCP_ETHII_TO_ETHII		9
+#define IPA_HDR_UCP_L2TP_HEADER_ADD		10
+#define IPA_HDR_UCP_L2TP_HEADER_REMOVE		11
+#define IPA_HDR_UCP_L2TP_UDP_HEADER_ADD	12
+#define IPA_HDR_UCP_L2TP_UDP_HEADER_REMOVE	13
+#define IPA_HDR_UCP_ETHII_TO_ETHII_EX		14
+
+/* Processing context TLV type */
+#define IPA_PROC_CTX_TLV_TYPE_END 0
+#define IPA_PROC_CTX_TLV_TYPE_HDR_ADD 1
+#define IPA_PROC_CTX_TLV_TYPE_PROC_CMD 3
+
+/**
+ * struct ipa_hw_hdr_proc_ctx_tlv -
+ * HW structure of IPA processing context header - TLV part
+ * @type: 0 - end type
+ *        1 - header addition type
+ *        3 - processing command type
+ * @length: number of bytes after tlv
+ *        for type:
+ *        0 - needs to be 0
+ *        1 - header addition length
+ *        3 - number of 32B including type and length.
+ * @value: specific value for type
+ *        for type:
+ *        0 - needs to be 0
+ *        1 - header length
+ *        3 - command ID (see IPA_HDR_UCP_* definitions)
+ */
+struct ipa_hw_hdr_proc_ctx_tlv {
+	u32 type:8;
+	u32 length:8;
+	u32 value:16;
+};
+
+/**
+ * struct ipa_hw_hdr_proc_ctx_hdr_add -
+ * HW structure of IPA processing context - add header tlv
+ * @tlv: IPA processing context TLV
+ * @hdr_addr: processing context header address
+ */
+struct ipa_hw_hdr_proc_ctx_hdr_add {
+	struct ipa_hw_hdr_proc_ctx_tlv tlv;
+	u32 hdr_addr;
+	u32 hdr_addr_hi;
+};
+
+/**
+ * struct ipa_hw_hdr_proc_ctx_l2tp_add_hdr -
+ * HW structure of IPA processing context - add l2tp header tlv
+ * @tlv: IPA processing context TLV
+ * @l2tp_params: l2tp parameters
+ */
+struct ipa_hw_hdr_proc_ctx_l2tp_add_hdr {
+	struct ipa_hw_hdr_proc_ctx_tlv tlv;
+	struct ipa_l2tp_header_add_procparams l2tp_params;
+};
+
+/**
+ * struct ipa_hw_hdr_proc_ctx_l2tp_remove_hdr -
+ * HW structure of IPA processing context - remove l2tp header tlv
+ * @tlv: IPA processing context TLV
+ * @l2tp_params: l2tp parameters
+ */
+struct ipa_hw_hdr_proc_ctx_l2tp_remove_hdr {
+	struct ipa_hw_hdr_proc_ctx_tlv tlv;
+	struct ipa_l2tp_header_remove_procparams l2tp_params;
+};
+
+/**
+ * struct ipa_hw_hdr_proc_ctx_add_hdr_seq -
+ * IPA processing context header - add header sequence
+ * @hdr_add: add header command
+ * @end: tlv end command (cmd.type must be 0)
+ */
+struct ipa_hw_hdr_proc_ctx_add_hdr_seq {
+	struct ipa_hw_hdr_proc_ctx_hdr_add hdr_add;
+	struct ipa_hw_hdr_proc_ctx_tlv end;
+};
+
+/**
+ * struct ipa_hw_hdr_proc_ctx_add_hdr_cmd_seq -
+ * IPA processing context header - process command sequence
+ * @hdr_add: add header command
+ * @cmd: tlv processing command (cmd.type must be 3)
+ * @end: tlv end command (cmd.type must be 0)
+ */
+struct ipa_hw_hdr_proc_ctx_add_hdr_cmd_seq {
+	struct ipa_hw_hdr_proc_ctx_hdr_add hdr_add;
+	struct ipa_hw_hdr_proc_ctx_tlv cmd;
+	struct ipa_hw_hdr_proc_ctx_tlv end;
+};
+
+/**
+ * struct ipa_hw_hdr_proc_ctx_add_l2tp_hdr_cmd_seq -
+ * IPA processing context header - process command sequence
+ * @hdr_add: add header command
+ * @l2tp_params: l2tp params for header addition
+ * @end: tlv end command (cmd.type must be 0)
+ */
+struct ipa_hw_hdr_proc_ctx_add_l2tp_hdr_cmd_seq {
+	struct ipa_hw_hdr_proc_ctx_hdr_add hdr_add;
+	struct ipa_hw_hdr_proc_ctx_l2tp_add_hdr l2tp_params;
+	struct ipa_hw_hdr_proc_ctx_tlv end;
+};
+
+/**
+ * struct ipa_hw_hdr_proc_ctx_remove_l2tp_hdr_cmd_seq -
+ * IPA processing context header - process command sequence
+ * @hdr_add: add header command
+ * @l2tp_params: l2tp params for header removal
+ * @end: tlv end command (cmd.type must be 0)
+ */
+struct ipa_hw_hdr_proc_ctx_remove_l2tp_hdr_cmd_seq {
+	struct ipa_hw_hdr_proc_ctx_hdr_add hdr_add;
+	struct ipa_hw_hdr_proc_ctx_l2tp_remove_hdr l2tp_params;
+	struct ipa_hw_hdr_proc_ctx_tlv end;
+};
+
+/**
+ * struct ipa_hw_hdr_proc_ctx_add_hdr_ex -
+ * HW structure of IPA processing context - add generic header
+ * @tlv: IPA processing context TLV
+ * @params: generic eth2 to eth2 parameters
+ */
+struct ipa_hw_hdr_proc_ctx_add_hdr_ex {
+	struct ipa_hw_hdr_proc_ctx_tlv tlv;
+	struct ipa_eth_II_to_eth_II_ex_procparams params;
+};
+
+/**
+ * struct ipa_hw_hdr_proc_ctx_add_hdr_cmd_seq_ex -
+ * IPA processing context header - process command sequence
+ * @hdr_add: add header command
+ * @params: params for header generic header add
+ * @end: tlv end command (cmd.type must be 0)
+ */
+struct ipa_hw_hdr_proc_ctx_add_hdr_cmd_seq_ex {
+	struct ipa_hw_hdr_proc_ctx_hdr_add hdr_add;
+	struct ipa_hw_hdr_proc_ctx_add_hdr_ex hdr_add_ex;
+	struct ipa_hw_hdr_proc_ctx_tlv end;
+};
+
+#endif /* _IPAHAL_I_H_ */

+ 510 - 0
ipa/ipa_v3/ipahal/ipahal_nat.c

@@ -0,0 +1,510 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/debugfs.h>
+#include "ipahal_nat.h"
+#include "ipahal_nat_i.h"
+#include "ipahal_i.h"
+
+#define IPA_64_LOW_32_MASK (0xFFFFFFFF)
+#define IPA_64_HIGH_32_MASK (0xFFFFFFFF00000000ULL)
+
+static const char *ipahal_nat_type_to_str[IPA_NAT_MAX] = {
+	__stringify(IPAHAL_NAT_IPV4),
+	__stringify(IPAHAL_NAT_IPV4_INDEX),
+	__stringify(IPAHAL_NAT_IPV4_PDN),
+	__stringify(IPAHAL_NAT_IPV6CT)
+};
+
+static size_t ipa_nat_ipv4_entry_size_v_3_0(void)
+{
+	return sizeof(struct ipa_nat_hw_ipv4_entry);
+}
+
+static size_t ipa_nat_ipv4_index_entry_size_v_3_0(void)
+{
+	return sizeof(struct ipa_nat_hw_indx_entry);
+}
+
+static size_t ipa_nat_ipv4_pdn_entry_size_v_4_0(void)
+{
+	return sizeof(struct ipa_nat_hw_pdn_entry);
+}
+
+static size_t ipa_nat_ipv6ct_entry_size_v_4_0(void)
+{
+	return sizeof(struct ipa_nat_hw_ipv6ct_entry);
+}
+
+static bool ipa_nat_ipv4_is_entry_zeroed_v_3_0(const void *entry)
+{
+	struct ipa_nat_hw_ipv4_entry zero_entry = { 0 };
+
+	return (memcmp(&zero_entry, entry, sizeof(zero_entry))) ? false : true;
+}
+
+static bool ipa_nat_ipv4_is_index_entry_zeroed_v_3_0(const void *entry)
+{
+	struct ipa_nat_hw_indx_entry zero_entry = { 0 };
+
+	return (memcmp(&zero_entry, entry, sizeof(zero_entry))) ? false : true;
+}
+
+static bool ipa_nat_ipv4_is_pdn_entry_zeroed_v_4_0(const void *entry)
+{
+	struct ipa_nat_hw_pdn_entry zero_entry = { 0 };
+
+	return (memcmp(&zero_entry, entry, sizeof(zero_entry))) ? false : true;
+}
+
+static bool ipa_nat_ipv6ct_is_entry_zeroed_v_4_0(const void *entry)
+{
+	struct ipa_nat_hw_ipv6ct_entry zero_entry = { 0 };
+
+	return (memcmp(&zero_entry, entry, sizeof(zero_entry))) ? false : true;
+}
+
+static bool ipa_nat_ipv4_is_entry_valid_v_3_0(const void *entry)
+{
+	struct ipa_nat_hw_ipv4_entry *hw_entry =
+		(struct ipa_nat_hw_ipv4_entry *)entry;
+
+	return hw_entry->enable &&
+		hw_entry->protocol != IPAHAL_NAT_INVALID_PROTOCOL;
+}
+
+static bool ipa_nat_ipv4_is_index_entry_valid_v_3_0(const void *entry)
+{
+	struct ipa_nat_hw_indx_entry *hw_entry =
+		(struct ipa_nat_hw_indx_entry *)entry;
+
+	return hw_entry->tbl_entry != 0;
+}
+
+static bool ipa_nat_ipv4_is_pdn_entry_valid_v_4_0(const void *entry)
+{
+	struct ipa_nat_hw_pdn_entry *hw_entry =
+		(struct ipa_nat_hw_pdn_entry *)entry;
+
+	return hw_entry->public_ip != 0;
+}
+
+static bool ipa_nat_ipv6ct_is_entry_valid_v_4_0(const void *entry)
+{
+	struct ipa_nat_hw_ipv6ct_entry *hw_entry =
+		(struct ipa_nat_hw_ipv6ct_entry *)entry;
+
+	return hw_entry->enable &&
+		hw_entry->protocol != IPAHAL_NAT_INVALID_PROTOCOL;
+}
+
+static int ipa_nat_ipv4_stringify_entry_v_3_0(const void *entry,
+	char *buff, size_t buff_size)
+{
+	const struct ipa_nat_hw_ipv4_entry *nat_entry =
+		(const struct ipa_nat_hw_ipv4_entry *)entry;
+
+	return scnprintf(buff, buff_size,
+		"\t\tPrivate_IP=%pI4h  Target_IP=%pI4h\n"
+		"\t\tNext_Index=%d  Public_Port=%d\n"
+		"\t\tPrivate_Port=%d  Target_Port=%d\n"
+		"\t\tIP_CKSM_delta=0x%x  Enable=%s  Redirect=%s\n"
+		"\t\tTime_stamp=0x%x Proto=%d\n"
+		"\t\tPrev_Index=%d  Indx_tbl_entry=%d\n"
+		"\t\tTCP_UDP_cksum_delta=0x%x\n",
+		&nat_entry->private_ip, &nat_entry->target_ip,
+		nat_entry->next_index, nat_entry->public_port,
+		nat_entry->private_port, nat_entry->target_port,
+		nat_entry->ip_chksum,
+		(nat_entry->enable) ? "true" : "false",
+		(nat_entry->redirect) ? "Direct_To_APPS" : "Fwd_to_route",
+		nat_entry->time_stamp, nat_entry->protocol,
+		nat_entry->prev_index, nat_entry->indx_tbl_entry,
+		nat_entry->tcp_udp_chksum);
+}
+
+static int ipa_nat_ipv4_stringify_entry_v_4_0(const void *entry,
+	char *buff, size_t buff_size)
+{
+	int length;
+	const struct ipa_nat_hw_ipv4_entry *nat_entry =
+		(const struct ipa_nat_hw_ipv4_entry *)entry;
+
+	length = ipa_nat_ipv4_stringify_entry_v_3_0(entry, buff, buff_size);
+
+	length += scnprintf(buff + length, buff_size - length,
+		"\t\tPDN_Index=%d\n", nat_entry->pdn_index);
+
+	return length;
+}
+
+static int ipa_nat_ipv4_index_stringify_entry_v_3_0(const void *entry,
+	char *buff, size_t buff_size)
+{
+	const struct ipa_nat_hw_indx_entry *index_entry =
+		(const struct ipa_nat_hw_indx_entry *)entry;
+
+	return scnprintf(buff, buff_size,
+		"\t\tTable_Entry=%d  Next_Index=%d\n",
+		index_entry->tbl_entry, index_entry->next_index);
+}
+
+static int ipa_nat_ipv4_pdn_stringify_entry_v_4_0(const void *entry,
+	char *buff, size_t buff_size)
+{
+	const struct ipa_nat_hw_pdn_entry *pdn_entry =
+		(const struct ipa_nat_hw_pdn_entry *)entry;
+
+	return scnprintf(buff, buff_size,
+		"ip=%pI4h src_metadata=0x%X, dst_metadata=0x%X\n",
+		&pdn_entry->public_ip,
+		pdn_entry->src_metadata, pdn_entry->dst_metadata);
+}
+
+static inline int ipa_nat_ipv6_stringify_addr(char *buff, size_t buff_size,
+	const char *msg, u64 lsb, u64 msb)
+{
+	struct in6_addr addr;
+
+	addr.s6_addr32[0] = cpu_to_be32((msb & IPA_64_HIGH_32_MASK) >> 32);
+	addr.s6_addr32[1] = cpu_to_be32(msb & IPA_64_LOW_32_MASK);
+	addr.s6_addr32[2] = cpu_to_be32((lsb & IPA_64_HIGH_32_MASK) >> 32);
+	addr.s6_addr32[3] = cpu_to_be32(lsb & IPA_64_LOW_32_MASK);
+
+	return scnprintf(buff, buff_size,
+		"\t\t%s_IPv6_Addr=%pI6c\n", msg, &addr);
+}
+
+static int ipa_nat_ipv6ct_stringify_entry_v_4_0(const void *entry,
+	char *buff, size_t buff_size)
+{
+	int length = 0;
+	const struct ipa_nat_hw_ipv6ct_entry *ipv6ct_entry =
+		(const struct ipa_nat_hw_ipv6ct_entry *)entry;
+
+	length += ipa_nat_ipv6_stringify_addr(
+		buff + length,
+		buff_size - length,
+		"Src",
+		ipv6ct_entry->src_ipv6_lsb,
+		ipv6ct_entry->src_ipv6_msb);
+
+	length += ipa_nat_ipv6_stringify_addr(
+		buff + length,
+		buff_size - length,
+		"Dest",
+		ipv6ct_entry->dest_ipv6_lsb,
+		ipv6ct_entry->dest_ipv6_msb);
+
+	length += scnprintf(buff + length, buff_size - length,
+		"\t\tEnable=%s Redirect=%s Time_Stamp=0x%x Proto=%d\n"
+		"\t\tNext_Index=%d Dest_Port=%d Src_Port=%d\n"
+		"\t\tDirection Settings: Out=%s In=%s\n"
+		"\t\tPrev_Index=%d\n",
+		(ipv6ct_entry->enable) ? "true" : "false",
+		(ipv6ct_entry->redirect) ? "Direct_To_APPS" : "Fwd_to_route",
+		ipv6ct_entry->time_stamp,
+		ipv6ct_entry->protocol,
+		ipv6ct_entry->next_index,
+		ipv6ct_entry->dest_port,
+		ipv6ct_entry->src_port,
+		(ipv6ct_entry->out_allowed) ? "Allow" : "Deny",
+		(ipv6ct_entry->in_allowed) ? "Allow" : "Deny",
+		ipv6ct_entry->prev_index);
+
+	return length;
+}
+
+static void ipa_nat_ipv4_pdn_construct_entry_v_4_0(const void *fields,
+	u32 *address)
+{
+	const struct ipahal_nat_pdn_entry *pdn_entry =
+		(const struct ipahal_nat_pdn_entry *)fields;
+
+	struct ipa_nat_hw_pdn_entry *pdn_entry_address =
+		(struct ipa_nat_hw_pdn_entry *)address;
+
+	memset(pdn_entry_address, 0, sizeof(struct ipa_nat_hw_pdn_entry));
+
+	pdn_entry_address->public_ip = pdn_entry->public_ip;
+	pdn_entry_address->src_metadata = pdn_entry->src_metadata;
+	pdn_entry_address->dst_metadata = pdn_entry->dst_metadata;
+}
+
+static void ipa_nat_ipv4_pdn_parse_entry_v_4_0(void *fields,
+	const u32 *address)
+{
+	struct ipahal_nat_pdn_entry *pdn_entry =
+		(struct ipahal_nat_pdn_entry *)fields;
+
+	const struct ipa_nat_hw_pdn_entry *pdn_entry_address =
+		(const struct ipa_nat_hw_pdn_entry *)address;
+
+	pdn_entry->public_ip = pdn_entry_address->public_ip;
+	pdn_entry->src_metadata = pdn_entry_address->src_metadata;
+	pdn_entry->dst_metadata = pdn_entry_address->dst_metadata;
+}
+
+/*
+ * struct ipahal_nat_obj - H/W information for specific IPA version
+ * @entry_size - CB to get the size of the entry
+ * @is_entry_zeroed - CB to determine whether an entry is definitely zero
+ * @is_entry_valid - CB to determine whether an entry is valid
+ *  Validity criterium depends on entry type. E.g. for NAT base table
+ *   Entry need to be with valid protocol and enabled.
+ * @stringify_entry - CB to create string that represents an entry
+ * @construct_entry - CB to create NAT entry using the given fields
+ * @parse_entry - CB to parse NAT entry to the given fields structure
+ */
+struct ipahal_nat_obj {
+	size_t (*entry_size)(void);
+	bool (*is_entry_zeroed)(const void *entry);
+	bool (*is_entry_valid)(const void *entry);
+	int (*stringify_entry)(const void *entry, char *buff, size_t buff_size);
+	void (*construct_entry)(const void *fields, u32 *address);
+	void (*parse_entry)(void *fields, const u32 *address);
+};
+
+/*
+ * This table contains the info regard each NAT type for IPAv3 and later.
+ * Information like: get entry size and stringify entry functions.
+ * All the information on all the NAT types on IPAv3 are statically
+ * defined below. If information is missing regard some NAT type on some
+ * IPA version, the init function will fill it with the information from the
+ * previous IPA version.
+ * Information is considered missing if all of the fields are 0
+ */
+static struct ipahal_nat_obj ipahal_nat_objs[IPA_HW_MAX][IPA_NAT_MAX] = {
+	/* IPAv3 */
+	[IPA_HW_v3_0][IPAHAL_NAT_IPV4] = {
+			ipa_nat_ipv4_entry_size_v_3_0,
+			ipa_nat_ipv4_is_entry_zeroed_v_3_0,
+			ipa_nat_ipv4_is_entry_valid_v_3_0,
+			ipa_nat_ipv4_stringify_entry_v_3_0
+		},
+	[IPA_HW_v3_0][IPAHAL_NAT_IPV4_INDEX] = {
+			ipa_nat_ipv4_index_entry_size_v_3_0,
+			ipa_nat_ipv4_is_index_entry_zeroed_v_3_0,
+			ipa_nat_ipv4_is_index_entry_valid_v_3_0,
+			ipa_nat_ipv4_index_stringify_entry_v_3_0
+		},
+
+	/* IPAv4 */
+	[IPA_HW_v4_0][IPAHAL_NAT_IPV4] = {
+			ipa_nat_ipv4_entry_size_v_3_0,
+			ipa_nat_ipv4_is_entry_zeroed_v_3_0,
+			ipa_nat_ipv4_is_entry_valid_v_3_0,
+			ipa_nat_ipv4_stringify_entry_v_4_0
+		},
+	[IPA_HW_v4_0][IPAHAL_NAT_IPV4_PDN] = {
+			ipa_nat_ipv4_pdn_entry_size_v_4_0,
+			ipa_nat_ipv4_is_pdn_entry_zeroed_v_4_0,
+			ipa_nat_ipv4_is_pdn_entry_valid_v_4_0,
+			ipa_nat_ipv4_pdn_stringify_entry_v_4_0,
+			ipa_nat_ipv4_pdn_construct_entry_v_4_0,
+			ipa_nat_ipv4_pdn_parse_entry_v_4_0
+		},
+	[IPA_HW_v4_0][IPAHAL_NAT_IPV6CT] = {
+			ipa_nat_ipv6ct_entry_size_v_4_0,
+			ipa_nat_ipv6ct_is_entry_zeroed_v_4_0,
+			ipa_nat_ipv6ct_is_entry_valid_v_4_0,
+			ipa_nat_ipv6ct_stringify_entry_v_4_0
+		}
+};
+
+static void ipahal_nat_check_obj(struct ipahal_nat_obj *obj,
+	int nat_type, int ver)
+{
+	WARN(obj->entry_size == NULL, "%s missing entry_size for version %d\n",
+		ipahal_nat_type_str(nat_type), ver);
+	WARN(obj->is_entry_zeroed == NULL,
+		"%s missing is_entry_zeroed for version %d\n",
+		ipahal_nat_type_str(nat_type), ver);
+	WARN(obj->stringify_entry == NULL,
+		"%s missing stringify_entry for version %d\n",
+		ipahal_nat_type_str(nat_type), ver);
+}
+
+/*
+ * ipahal_nat_init() - Build the NAT information table
+ *  See ipahal_nat_objs[][] comments
+ */
+int ipahal_nat_init(enum ipa_hw_type ipa_hw_type)
+{
+	int i;
+	int j;
+	struct ipahal_nat_obj zero_obj, *next_obj;
+
+	IPAHAL_DBG("Entry - HW_TYPE=%d\n", ipa_hw_type);
+
+	memset(&zero_obj, 0, sizeof(zero_obj));
+
+	if ((ipa_hw_type < 0) || (ipa_hw_type >= IPA_HW_MAX)) {
+		IPAHAL_ERR("invalid IPA HW type (%d)\n", ipa_hw_type);
+		return -EINVAL;
+	}
+
+	for (i = IPA_HW_v3_0 ; i < ipa_hw_type ; ++i) {
+		for (j = 0; j < IPA_NAT_MAX; ++j) {
+			next_obj = &ipahal_nat_objs[i + 1][j];
+			if (!memcmp(next_obj, &zero_obj, sizeof(*next_obj))) {
+				memcpy(next_obj, &ipahal_nat_objs[i][j],
+					sizeof(*next_obj));
+			} else {
+				ipahal_nat_check_obj(next_obj, j, i + 1);
+			}
+		}
+	}
+
+	return 0;
+}
+
+const char *ipahal_nat_type_str(enum ipahal_nat_type nat_type)
+{
+	if (nat_type < 0 || nat_type >= IPA_NAT_MAX) {
+		IPAHAL_ERR("requested NAT type %d is invalid\n", nat_type);
+		return "Invalid NAT type";
+	}
+
+	return ipahal_nat_type_to_str[nat_type];
+}
+
+int ipahal_nat_entry_size(enum ipahal_nat_type nat_type, size_t *entry_size)
+{
+	if (WARN(entry_size == NULL, "entry_size is NULL\n"))
+		return -EINVAL;
+	if (WARN(nat_type < 0 || nat_type >= IPA_NAT_MAX,
+		"requested NAT type %d is invalid\n", nat_type))
+		return -EINVAL;
+
+	IPAHAL_DBG("Get the entry size for NAT type=%s\n",
+		ipahal_nat_type_str(nat_type));
+
+	*entry_size =
+		ipahal_nat_objs[ipahal_ctx->hw_type][nat_type].entry_size();
+
+	IPAHAL_DBG("The entry size is %zu\n", *entry_size);
+
+	return 0;
+}
+
+int ipahal_nat_is_entry_zeroed(enum ipahal_nat_type nat_type, void *entry,
+	bool *entry_zeroed)
+{
+	struct ipahal_nat_obj *nat_ptr;
+
+	if (WARN(entry == NULL || entry_zeroed == NULL,
+		"NULL pointer received\n"))
+		return -EINVAL;
+	if (WARN(nat_type < 0 || nat_type >= IPA_NAT_MAX,
+		"requested NAT type %d is invalid\n", nat_type))
+		return -EINVAL;
+
+	IPAHAL_DBG("Determine whether the entry is zeroed for NAT type=%s\n",
+		ipahal_nat_type_str(nat_type));
+
+	nat_ptr =
+		&ipahal_nat_objs[ipahal_ctx->hw_type][nat_type];
+
+	*entry_zeroed = nat_ptr->is_entry_zeroed(entry);
+
+	IPAHAL_DBG("The entry is %szeroed\n", (*entry_zeroed) ? "" : "not ");
+
+	return 0;
+}
+
+int ipahal_nat_is_entry_valid(enum ipahal_nat_type nat_type, void *entry,
+	bool *entry_valid)
+{
+	struct ipahal_nat_obj *nat_obj;
+
+	if (WARN(entry == NULL || entry_valid == NULL,
+		"NULL pointer received\n"))
+		return -EINVAL;
+	if (WARN(nat_type < 0 || nat_type >= IPA_NAT_MAX,
+		"requested NAT type %d is invalid\n", nat_type))
+		return -EINVAL;
+
+	IPAHAL_DBG("Determine whether the entry is valid for NAT type=%s\n",
+		ipahal_nat_type_str(nat_type));
+	nat_obj = &ipahal_nat_objs[ipahal_ctx->hw_type][nat_type];
+	*entry_valid = nat_obj->is_entry_valid(entry);
+	IPAHAL_DBG("The entry is %svalid\n", (*entry_valid) ? "" : "not ");
+
+	return 0;
+}
+
+int ipahal_nat_stringify_entry(enum ipahal_nat_type nat_type, void *entry,
+	char *buff, size_t buff_size)
+{
+	int result;
+	struct ipahal_nat_obj *nat_obj_ptr;
+
+	if (WARN(entry == NULL || buff == NULL, "NULL pointer received\n"))
+		return -EINVAL;
+	if (WARN(!buff_size, "The output buff size is zero\n"))
+		return -EINVAL;
+	if (WARN(nat_type < 0 || nat_type >= IPA_NAT_MAX,
+		"requested NAT type %d is invalid\n", nat_type))
+		return -EINVAL;
+
+	nat_obj_ptr =
+		&ipahal_nat_objs[ipahal_ctx->hw_type][nat_type];
+
+	IPAHAL_DBG("Create the string for the entry of NAT type=%s\n",
+		ipahal_nat_type_str(nat_type));
+
+	result = nat_obj_ptr->stringify_entry(entry, buff, buff_size);
+
+	IPAHAL_DBG("The string successfully created with length %d\n",
+		result);
+
+	return result;
+}
+
+int ipahal_nat_construct_entry(enum ipahal_nat_type nat_type,
+	const void *fields,
+	void *address)
+{
+	struct ipahal_nat_obj *nat_obj_ptr;
+
+	if (WARN(address == NULL || fields == NULL, "NULL pointer received\n"))
+		return -EINVAL;
+	if (WARN(nat_type < 0 || nat_type >= IPA_NAT_MAX,
+		"requested NAT type %d is invalid\n", nat_type))
+		return -EINVAL;
+
+	IPAHAL_DBG("Create %s entry using given fields\n",
+		ipahal_nat_type_str(nat_type));
+
+	nat_obj_ptr =
+		&ipahal_nat_objs[ipahal_ctx->hw_type][nat_type];
+
+	nat_obj_ptr->construct_entry(fields, address);
+
+	return 0;
+}
+
+int ipahal_nat_parse_entry(enum ipahal_nat_type nat_type, void *fields,
+	const void *address)
+{
+	struct ipahal_nat_obj *nat_obj_ptr;
+
+	if (WARN(address == NULL || fields == NULL, "NULL pointer received\n"))
+		return -EINVAL;
+	if (WARN(nat_type < 0 || nat_type >= IPA_NAT_MAX,
+		"requested NAT type %d is invalid\n", nat_type))
+		return -EINVAL;
+
+	IPAHAL_DBG("Get the parsed values for NAT type=%s\n",
+		ipahal_nat_type_str(nat_type));
+
+	nat_obj_ptr =
+		&ipahal_nat_objs[ipahal_ctx->hw_type][nat_type];
+
+	nat_obj_ptr->parse_entry(fields, address);
+
+	return 0;
+}

+ 103 - 0
ipa/ipa_v3/ipahal/ipahal_nat.h

@@ -0,0 +1,103 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _IPAHAL_NAT_H_
+#define _IPAHAL_NAT_H_
+
+/*
+ * NAT types
+ *
+ * NOTE:: Any change to this enum, need to change to ipahal_nat_to_str
+ *	  array as well.
+ */
+enum ipahal_nat_type {
+	IPAHAL_NAT_IPV4,
+	IPAHAL_NAT_IPV4_INDEX,
+	IPAHAL_NAT_IPV4_PDN,
+	IPAHAL_NAT_IPV6CT,
+	IPA_NAT_MAX
+};
+
+/**
+ * struct ipahal_nat_pdn_entry - IPA PDN config table entry
+ * @public_ip: the PDN's public ip
+ * @src_metadata: the PDN's metadata to be replaced for source NAT
+ * @dst_metadata: the PDN's metadata to be replaced for destination NAT
+ */
+struct ipahal_nat_pdn_entry {
+	u32 public_ip;
+	u32 src_metadata;
+	u32 dst_metadata;
+};
+
+/* NAT Function APIs */
+
+/*
+ * ipahal_nat_type_str() - returns string that represent the NAT type
+ * @nat_type: [in] NAT type
+ */
+const char *ipahal_nat_type_str(enum ipahal_nat_type nat_type);
+
+/*
+ * ipahal_nat_entry_size() - Gets the size of HW NAT entry
+ * @nat_type: [in] The type of the NAT entry
+ * @entry_size: [out] The size of the HW NAT entry
+ */
+int ipahal_nat_entry_size(enum ipahal_nat_type nat_type, size_t *entry_size);
+
+/*
+ * ipahal_nat_is_entry_zeroed() - Determines whether HW NAT entry is
+ *                                definitely zero
+ * @nat_type: [in] The type of the NAT entry
+ * @entry: [in] The NAT entry
+ * @entry_zeroed: [out] True if the received entry is definitely zero
+ */
+int ipahal_nat_is_entry_zeroed(enum ipahal_nat_type nat_type, void *entry,
+	bool *entry_zeroed);
+
+/*
+ * ipahal_nat_is_entry_valid() - Determines whether HW NAT entry is
+ *                                valid.
+ *  Validity criterium depends on entry type. E.g. for NAT base table
+ *   Entry need to be with valid protocol and enabled.
+ * @nat_type: [in] The type of the NAT entry
+ * @entry: [in] The NAT entry
+ * @entry_valid: [out] True if the received entry is valid
+ */
+int ipahal_nat_is_entry_valid(enum ipahal_nat_type nat_type, void *entry,
+	bool *entry_valid);
+
+/*
+ * ipahal_nat_stringify_entry() - Creates a string for HW NAT entry
+ * @nat_type: [in] The type of the NAT entry
+ * @entry: [in] The NAT entry
+ * @buff: [out] Output buffer for the result string
+ * @buff_size: [in] The size of the output buffer
+ * @return the number of characters written into buff not including
+ *         the trailing '\0'
+ */
+int ipahal_nat_stringify_entry(enum ipahal_nat_type nat_type, void *entry,
+	char *buff, size_t buff_size);
+
+/*
+ * ipahal_nat_construct_entry() - Create NAT entry using the given fields
+ * @nat_type: [in] The type of the NAT entry
+ * @fields: [in] The fields need to be written in the entry
+ * @address: [in] The address of the memory need to be written
+ */
+int ipahal_nat_construct_entry(enum ipahal_nat_type nat_type,
+	void const *fields,
+	void *address);
+
+/*
+ * ipahal_nat_parse_entry() - Parse NAT entry to the given fields structure
+ * @nat_type: [in] The type of the NAT entry
+ * @fields: [in] The fields need to be parsed from the entry
+ * @address: [in] The address of the memory need to be parsed
+ */
+int ipahal_nat_parse_entry(enum ipahal_nat_type nat_type, void *fields,
+	const void *address);
+
+#endif /* _IPAHAL_NAT_H_ */

+ 146 - 0
ipa/ipa_v3/ipahal/ipahal_nat_i.h

@@ -0,0 +1,146 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _IPAHAL_NAT_I_H_
+#define _IPAHAL_NAT_I_H_
+
+#include <linux/msm_ipa.h>
+
+/* ----------------------- IPv4 NAT Table Entry  -------------------------
+ *
+ * -----------------------------------------------------------------------
+ * |  7  |     6    |   5    |   4   |    3        | 2 |    1    |    0  |
+ * -----------------------------------------------------------------------
+ * |           Target IP(4B)         |             Private IP(4B)        |
+ * -----------------------------------------------------------------------
+ * |Target Port(2B) |Private Port(2B)| Public Port(2B) | Next Index(2B)  |
+ * -----------------------------------------------------------------------
+ * |Proto|      TimeStamp(3B)        |     Flags(2B)   |IP check sum Diff|
+ * |(1B) |                           |EN|Redirect|Resv |        (2B)     |
+ * -----------------------------------------------------------------------
+ * |TCP/UDP checksum|  PDN info(2B)  |    SW Specific Parameters(4B)     |
+ * |    diff (2B)   |Info|Resv       |index table entry|  prev index     |
+ * -----------------------------------------------------------------------
+ */
+struct ipa_nat_hw_ipv4_entry {
+	/* An IP address can't be bit-field, because its address is used */
+	u32 private_ip;
+	u32 target_ip;
+
+	u32 next_index : 16;
+	u32 public_port : 16;
+	u32 private_port : 16;
+	u32 target_port : 16;
+	u32 ip_chksum : 16;
+
+	u32 rsvd1 : 14;
+	u32 redirect : 1;
+	u32 enable : 1;
+
+	u32 time_stamp : 24;
+	u32 protocol : 8;
+
+	u32 prev_index : 16;
+	u32 indx_tbl_entry : 16;
+
+	u32 rsvd2 : 12;
+	u32 pdn_index : 4; /* IPA 4.0 and greater */
+
+	u32 tcp_udp_chksum : 16;
+};
+
+/*--- IPV4 NAT Index Table Entry --
+ *---------------------------------
+ *|   3   |   2   |   1   |   0   |
+ *---------------------------------
+ *|next index(2B) |table entry(2B)|
+ *---------------------------------
+ */
+struct ipa_nat_hw_indx_entry {
+	u16 tbl_entry;
+	u16 next_index;
+};
+
+/**
+ * struct ipa_nat_hw_pdn_entry - IPA PDN config table entry
+ * @public_ip: the PDN's public ip
+ * @src_metadata: the PDN's metadata to be replaced for source NAT
+ * @dst_metadata: the PDN's metadata to be replaced for destination NAT
+ * @resrvd: reserved field
+ * ---------------------------------
+ * |   3   |   2   |   1   |   0   |
+ * ---------------------------------
+ * |        public_ip (4B)         |
+ * ---------------------------------
+ * |      src_metadata (4B)        |
+ * ---------------------------------
+ * |      dst_metadata (4B)        |
+ * ---------------------------------
+ * |         resrvd (4B)           |
+ * ---------------------------------
+ */
+struct ipa_nat_hw_pdn_entry {
+	u32 public_ip;
+	u32 src_metadata;
+	u32 dst_metadata;
+	u32 resrvd;
+};
+
+/*-------------------------  IPV6CT Table Entry  ------------------------------
+ *-----------------------------------------------------------------------------
+ *|   7    |      6      |  5  |  4   |        3         |  2  |   1  |   0   |
+ *-----------------------------------------------------------------------------
+ *|                   Outbound Src IPv6 Address (8 LSB Bytes)                 |
+ *-----------------------------------------------------------------------------
+ *|                   Outbound Src IPv6 Address (8 MSB Bytes)                 |
+ *-----------------------------------------------------------------------------
+ *|                   Outbound Dest IPv6 Address (8 LSB Bytes)                |
+ *-----------------------------------------------------------------------------
+ *|                   Outbound Dest IPv6 Address (8 MSB Bytes)                |
+ *-----------------------------------------------------------------------------
+ *|Protocol|      TimeStamp (3B)      |       Flags (2B)       |Reserved (2B) |
+ *|  (1B)  |                          |Enable|Redirect|Resv    |              |
+ *-----------------------------------------------------------------------------
+ *|Reserved|Direction(1B)|Src Port(2B)|     Dest Port (2B)     |Next Index(2B)|
+ *|  (1B)  |IN|OUT|Resv  |            |                        |              |
+ *-----------------------------------------------------------------------------
+ *|    SW Specific Parameters(4B)     |                Reserved (4B)          |
+ *|    Prev Index (2B)   |Reserved(2B)|                                       |
+ *-----------------------------------------------------------------------------
+ *|                            Reserved (8B)                                  |
+ *-----------------------------------------------------------------------------
+ */
+struct ipa_nat_hw_ipv6ct_entry {
+	/* An IP address can't be bit-field, because its address is used */
+	u64 src_ipv6_lsb;
+	u64 src_ipv6_msb;
+	u64 dest_ipv6_lsb;
+	u64 dest_ipv6_msb;
+
+	u64 rsvd1 : 30;
+	u64 redirect : 1;
+	u64 enable : 1;
+
+	u64 time_stamp : 24;
+	u64 protocol : 8;
+
+	u64 next_index : 16;
+	u64 dest_port : 16;
+	u64 src_port : 16;
+	u64 rsvd2 : 6;
+	u64 out_allowed : 1;
+	u64 in_allowed : 1;
+	u64 rsvd3 : 8;
+
+	u64 rsvd4 : 48;
+	u64 prev_index : 16;
+
+	u64 rsvd5 : 64;
+};
+
+int ipahal_nat_init(enum ipa_hw_type ipa_hw_type);
+
+#endif /* _IPAHAL_NAT_I_H_ */
+

+ 4011 - 0
ipa/ipa_v3/ipahal/ipahal_reg.c

@@ -0,0 +1,4011 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2012-2019, The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/init.h>
+#include <linux/ipa.h>
+#include <linux/kernel.h>
+#include <linux/msm_ipa.h>
+#include "ipahal_i.h"
+#include "ipahal_reg.h"
+#include "ipahal_reg_i.h"
+
+#define IPA_MAX_MSG_LEN 4096
+
+static const char *ipareg_name_to_str[IPA_REG_MAX] = {
+	__stringify(IPA_ROUTE),
+	__stringify(IPA_IRQ_STTS_EE_n),
+	__stringify(IPA_IRQ_EN_EE_n),
+	__stringify(IPA_IRQ_CLR_EE_n),
+	__stringify(IPA_SUSPEND_IRQ_INFO_EE_n),
+	__stringify(IPA_SUSPEND_IRQ_EN_EE_n),
+	__stringify(IPA_SUSPEND_IRQ_CLR_EE_n),
+	__stringify(IPA_HOLB_DROP_IRQ_INFO_EE_n),
+	__stringify(IPA_HOLB_DROP_IRQ_EN_EE_n),
+	__stringify(IPA_HOLB_DROP_IRQ_CLR_EE_n),
+	__stringify(IPA_BCR),
+	__stringify(IPA_ENABLED_PIPES),
+	__stringify(IPA_VERSION),
+	__stringify(IPA_TAG_TIMER),
+	__stringify(IPA_NAT_TIMER),
+	__stringify(IPA_COMP_HW_VERSION),
+	__stringify(IPA_COMP_CFG),
+	__stringify(IPA_STATE_TX_WRAPPER),
+	__stringify(IPA_STATE_TX1),
+	__stringify(IPA_STATE_FETCHER),
+	__stringify(IPA_STATE_FETCHER_MASK),
+	__stringify(IPA_STATE_FETCHER_MASK_0),
+	__stringify(IPA_STATE_FETCHER_MASK_1),
+	__stringify(IPA_STATE_DFETCHER),
+	__stringify(IPA_STATE_ACL),
+	__stringify(IPA_STATE),
+	__stringify(IPA_STATE_RX_ACTIVE),
+	__stringify(IPA_STATE_TX0),
+	__stringify(IPA_STATE_AGGR_ACTIVE),
+	__stringify(IPA_COUNTER_CFG),
+	__stringify(IPA_STATE_GSI_TLV),
+	__stringify(IPA_STATE_GSI_AOS),
+	__stringify(IPA_STATE_GSI_IF),
+	__stringify(IPA_STATE_GSI_SKIP),
+	__stringify(IPA_STATE_GSI_IF_CONS),
+	__stringify(IPA_STATE_DPL_FIFO),
+	__stringify(IPA_STATE_COAL_MASTER),
+	__stringify(IPA_GENERIC_RAM_ARBITER_PRIORITY),
+	__stringify(IPA_STATE_NLO_AGGR),
+	__stringify(IPA_STATE_COAL_MASTER_1),
+	__stringify(IPA_ENDP_INIT_HDR_n),
+	__stringify(IPA_ENDP_INIT_HDR_EXT_n),
+	__stringify(IPA_ENDP_INIT_AGGR_n),
+	__stringify(IPA_AGGR_FORCE_CLOSE),
+	__stringify(IPA_ENDP_INIT_ROUTE_n),
+	__stringify(IPA_ENDP_INIT_MODE_n),
+	__stringify(IPA_ENDP_INIT_NAT_n),
+	__stringify(IPA_ENDP_INIT_CONN_TRACK_n),
+	__stringify(IPA_ENDP_INIT_CTRL_n),
+	__stringify(IPA_ENDP_INIT_CTRL_SCND_n),
+	__stringify(IPA_ENDP_INIT_CTRL_STATUS_n),
+	__stringify(IPA_ENDP_INIT_HOL_BLOCK_EN_n),
+	__stringify(IPA_ENDP_INIT_HOL_BLOCK_TIMER_n),
+	__stringify(IPA_ENDP_INIT_DEAGGR_n),
+	__stringify(IPA_ENDP_INIT_SEQ_n),
+	__stringify(IPA_DEBUG_CNT_REG_n),
+	__stringify(IPA_ENDP_INIT_CFG_n),
+	__stringify(IPA_IRQ_EE_UC_n),
+	__stringify(IPA_ENDP_INIT_HDR_METADATA_MASK_n),
+	__stringify(IPA_ENDP_INIT_HDR_METADATA_n),
+	__stringify(IPA_ENDP_INIT_PROD_CFG_n),
+	__stringify(IPA_ENDP_INIT_RSRC_GRP_n),
+	__stringify(IPA_SHARED_MEM_SIZE),
+	__stringify(IPA_SW_AREA_RAM_DIRECT_ACCESS_n),
+	__stringify(IPA_DEBUG_CNT_CTRL_n),
+	__stringify(IPA_UC_MAILBOX_m_n),
+	__stringify(IPA_FILT_ROUT_HASH_FLUSH),
+	__stringify(IPA_FILT_ROUT_HASH_EN),
+	__stringify(IPA_SINGLE_NDP_MODE),
+	__stringify(IPA_QCNCM),
+	__stringify(IPA_SYS_PKT_PROC_CNTXT_BASE),
+	__stringify(IPA_LOCAL_PKT_PROC_CNTXT_BASE),
+	__stringify(IPA_ENDP_STATUS_n),
+	__stringify(IPA_ENDP_YELLOW_RED_MARKER_CFG_n),
+	__stringify(IPA_ENDP_FILTER_ROUTER_HSH_CFG_n),
+	__stringify(IPA_SRC_RSRC_GRP_01_RSRC_TYPE_n),
+	__stringify(IPA_SRC_RSRC_GRP_23_RSRC_TYPE_n),
+	__stringify(IPA_SRC_RSRC_GRP_45_RSRC_TYPE_n),
+	__stringify(IPA_SRC_RSRC_GRP_67_RSRC_TYPE_n),
+	__stringify(IPA_DST_RSRC_GRP_01_RSRC_TYPE_n),
+	__stringify(IPA_DST_RSRC_GRP_23_RSRC_TYPE_n),
+	__stringify(IPA_DST_RSRC_GRP_45_RSRC_TYPE_n),
+	__stringify(IPA_DST_RSRC_GRP_67_RSRC_TYPE_n),
+	__stringify(IPA_RX_HPS_CLIENTS_MIN_DEPTH_0),
+	__stringify(IPA_RX_HPS_CLIENTS_MIN_DEPTH_1),
+	__stringify(IPA_RX_HPS_CLIENTS_MAX_DEPTH_0),
+	__stringify(IPA_RX_HPS_CLIENTS_MAX_DEPTH_1),
+	__stringify(IPA_HPS_FTCH_ARB_QUEUE_WEIGHT),
+	__stringify(IPA_QSB_MAX_WRITES),
+	__stringify(IPA_QSB_MAX_READS),
+	__stringify(IPA_TX_CFG),
+	__stringify(IPA_IDLE_INDICATION_CFG),
+	__stringify(IPA_DPS_SEQUENCER_FIRST),
+	__stringify(IPA_DPS_SEQUENCER_LAST),
+	__stringify(IPA_HPS_SEQUENCER_FIRST),
+	__stringify(IPA_HPS_SEQUENCER_LAST),
+	__stringify(IPA_CLKON_CFG),
+	__stringify(IPA_QTIME_TIMESTAMP_CFG),
+	__stringify(IPA_TIMERS_PULSE_GRAN_CFG),
+	__stringify(IPA_TIMERS_XO_CLK_DIV_CFG),
+	__stringify(IPA_STAT_QUOTA_BASE_n),
+	__stringify(IPA_STAT_QUOTA_MASK_n),
+	__stringify(IPA_STAT_TETHERING_BASE_n),
+	__stringify(IPA_STAT_TETHERING_MASK_n),
+	__stringify(IPA_STAT_FILTER_IPV4_BASE),
+	__stringify(IPA_STAT_FILTER_IPV6_BASE),
+	__stringify(IPA_STAT_ROUTER_IPV4_BASE),
+	__stringify(IPA_STAT_ROUTER_IPV6_BASE),
+	__stringify(IPA_STAT_FILTER_IPV4_START_ID),
+	__stringify(IPA_STAT_FILTER_IPV6_START_ID),
+	__stringify(IPA_STAT_ROUTER_IPV4_START_ID),
+	__stringify(IPA_STAT_ROUTER_IPV6_START_ID),
+	__stringify(IPA_STAT_FILTER_IPV4_END_ID),
+	__stringify(IPA_STAT_FILTER_IPV6_END_ID),
+	__stringify(IPA_STAT_ROUTER_IPV4_END_ID),
+	__stringify(IPA_STAT_ROUTER_IPV6_END_ID),
+	__stringify(IPA_STAT_DROP_CNT_BASE_n),
+	__stringify(IPA_STAT_DROP_CNT_MASK_n),
+	__stringify(IPA_SNOC_FEC_EE_n),
+	__stringify(IPA_FEC_ADDR_EE_n),
+	__stringify(IPA_FEC_ADDR_MSB_EE_n),
+	__stringify(IPA_FEC_ATTR_EE_n),
+	__stringify(IPA_ENDP_GSI_CFG1_n),
+	__stringify(IPA_ENDP_GSI_CFG_AOS_n),
+	__stringify(IPA_ENDP_GSI_CFG_TLV_n),
+	__stringify(IPA_COAL_EVICT_LRU),
+	__stringify(IPA_COAL_QMAP_CFG)
+};
+
+static void ipareg_construct_dummy(enum ipahal_reg_name reg,
+	const void *fields, u32 *val)
+{
+	IPAHAL_ERR("No construct function for %s\n",
+		ipahal_reg_name_str(reg));
+	WARN(1, "invalid register operation");
+}
+
+static void ipareg_parse_dummy(enum ipahal_reg_name reg,
+	void *fields, u32 val)
+{
+	IPAHAL_ERR("No parse function for %s\n",
+		ipahal_reg_name_str(reg));
+	WARN(1, "invalid register operation");
+}
+
+static void ipareg_construct_rx_hps_clients_depth1(
+	enum ipahal_reg_name reg, const void *fields, u32 *val)
+{
+	struct ipahal_reg_rx_hps_clients *clients =
+		(struct ipahal_reg_rx_hps_clients *)fields;
+
+	IPA_SETFIELD_IN_REG(*val, clients->client_minmax[0],
+		IPA_RX_HPS_CLIENTS_MINMAX_DEPTH_X_CLIENT_n_SHFT(0),
+		IPA_RX_HPS_CLIENTS_MINMAX_DEPTH_X_CLIENT_n_BMSK(0));
+
+	IPA_SETFIELD_IN_REG(*val, clients->client_minmax[1],
+		IPA_RX_HPS_CLIENTS_MINMAX_DEPTH_X_CLIENT_n_SHFT(1),
+		IPA_RX_HPS_CLIENTS_MINMAX_DEPTH_X_CLIENT_n_BMSK(1));
+}
+
+static void ipareg_construct_rx_hps_clients_depth0(
+	enum ipahal_reg_name reg, const void *fields, u32 *val)
+{
+	struct ipahal_reg_rx_hps_clients *clients =
+		(struct ipahal_reg_rx_hps_clients *)fields;
+
+	IPA_SETFIELD_IN_REG(*val, clients->client_minmax[0],
+		IPA_RX_HPS_CLIENTS_MINMAX_DEPTH_X_CLIENT_n_SHFT(0),
+		IPA_RX_HPS_CLIENTS_MINMAX_DEPTH_X_CLIENT_n_BMSK(0));
+
+	IPA_SETFIELD_IN_REG(*val, clients->client_minmax[1],
+		IPA_RX_HPS_CLIENTS_MINMAX_DEPTH_X_CLIENT_n_SHFT(1),
+		IPA_RX_HPS_CLIENTS_MINMAX_DEPTH_X_CLIENT_n_BMSK(1));
+
+	IPA_SETFIELD_IN_REG(*val, clients->client_minmax[2],
+		IPA_RX_HPS_CLIENTS_MINMAX_DEPTH_X_CLIENT_n_SHFT(2),
+		IPA_RX_HPS_CLIENTS_MINMAX_DEPTH_X_CLIENT_n_BMSK(2));
+
+	IPA_SETFIELD_IN_REG(*val, clients->client_minmax[3],
+		IPA_RX_HPS_CLIENTS_MINMAX_DEPTH_X_CLIENT_n_SHFT(3),
+		IPA_RX_HPS_CLIENTS_MINMAX_DEPTH_X_CLIENT_n_BMSK(3));
+}
+
+static void ipareg_construct_rx_hps_clients_depth0_v3_5(
+	enum ipahal_reg_name reg, const void *fields, u32 *val)
+{
+	struct ipahal_reg_rx_hps_clients *clients =
+		(struct ipahal_reg_rx_hps_clients *)fields;
+
+	IPA_SETFIELD_IN_REG(*val, clients->client_minmax[0],
+		IPA_RX_HPS_CLIENTS_MINMAX_DEPTH_X_CLIENT_n_SHFT(0),
+		IPA_RX_HPS_CLIENTS_MINMAX_DEPTH_X_CLIENT_n_BMSK_V3_5(0));
+
+	IPA_SETFIELD_IN_REG(*val, clients->client_minmax[1],
+		IPA_RX_HPS_CLIENTS_MINMAX_DEPTH_X_CLIENT_n_SHFT(1),
+		IPA_RX_HPS_CLIENTS_MINMAX_DEPTH_X_CLIENT_n_BMSK_V3_5(1));
+
+	IPA_SETFIELD_IN_REG(*val, clients->client_minmax[2],
+		IPA_RX_HPS_CLIENTS_MINMAX_DEPTH_X_CLIENT_n_SHFT(2),
+		IPA_RX_HPS_CLIENTS_MINMAX_DEPTH_X_CLIENT_n_BMSK_V3_5(2));
+
+	IPA_SETFIELD_IN_REG(*val, clients->client_minmax[3],
+		IPA_RX_HPS_CLIENTS_MINMAX_DEPTH_X_CLIENT_n_SHFT(3),
+		IPA_RX_HPS_CLIENTS_MINMAX_DEPTH_X_CLIENT_n_BMSK_V3_5(3));
+}
+
+static void ipareg_construct_rx_hps_clients_depth0_v4_5(
+	enum ipahal_reg_name reg, const void *fields, u32 *val)
+{
+	struct ipahal_reg_rx_hps_clients *clients =
+		(struct ipahal_reg_rx_hps_clients *)fields;
+
+	IPA_SETFIELD_IN_REG(*val, clients->client_minmax[0],
+		IPA_RX_HPS_CLIENTS_MINMAX_DEPTH_0_CLIENT_0_SHFT_v4_5,
+		IPA_RX_HPS_CLIENTS_MINMAX_DEPTH_0_CLIENT_0_BMSK_v4_5);
+
+	IPA_SETFIELD_IN_REG(*val, clients->client_minmax[1],
+		IPA_RX_HPS_CLIENTS_MINMAX_DEPTH_0_CLIENT_1_SHFT_v4_5,
+		IPA_RX_HPS_CLIENTS_MINMAX_DEPTH_0_CLIENT_1_BMSK_v4_5);
+
+	IPA_SETFIELD_IN_REG(*val, clients->client_minmax[2],
+		IPA_RX_HPS_CLIENTS_MINMAX_DEPTH_0_CLIENT_2_SHFT_v4_5,
+		IPA_RX_HPS_CLIENTS_MINMAX_DEPTH_0_CLIENT_2_BMSK_v4_5);
+
+	IPA_SETFIELD_IN_REG(*val, clients->client_minmax[3],
+		IPA_RX_HPS_CLIENTS_MINMAX_DEPTH_0_CLIENT_3_SHFT_v4_5,
+		IPA_RX_HPS_CLIENTS_MINMAX_DEPTH_0_CLIENT_3_BMSK_v4_5);
+
+	IPA_SETFIELD_IN_REG(*val, clients->client_minmax[4],
+		IPA_RX_HPS_CLIENTS_MINMAX_DEPTH_0_CLIENT_4_SHFT_v4_5,
+		IPA_RX_HPS_CLIENTS_MINMAX_DEPTH_0_CLIENT_4_BMSK_v4_5);
+}
+
+static void ipareg_construct_rsrg_grp_xy(
+	enum ipahal_reg_name reg, const void *fields, u32 *val)
+{
+	struct ipahal_reg_rsrc_grp_cfg *grp =
+		(struct ipahal_reg_rsrc_grp_cfg *)fields;
+
+	IPA_SETFIELD_IN_REG(*val, grp->x_min,
+		IPA_RSRC_GRP_XY_RSRC_TYPE_n_X_MIN_LIM_SHFT,
+		IPA_RSRC_GRP_XY_RSRC_TYPE_n_X_MIN_LIM_BMSK);
+	IPA_SETFIELD_IN_REG(*val, grp->x_max,
+		IPA_RSRC_GRP_XY_RSRC_TYPE_n_X_MAX_LIM_SHFT,
+		IPA_RSRC_GRP_XY_RSRC_TYPE_n_X_MAX_LIM_BMSK);
+	IPA_SETFIELD_IN_REG(*val, grp->y_min,
+		IPA_RSRC_GRP_XY_RSRC_TYPE_n_Y_MIN_LIM_SHFT,
+		IPA_RSRC_GRP_XY_RSRC_TYPE_n_Y_MIN_LIM_BMSK);
+	IPA_SETFIELD_IN_REG(*val, grp->y_max,
+		IPA_RSRC_GRP_XY_RSRC_TYPE_n_Y_MAX_LIM_SHFT,
+		IPA_RSRC_GRP_XY_RSRC_TYPE_n_Y_MAX_LIM_BMSK);
+}
+
+static void ipareg_construct_rsrg_grp_xy_v3_5(
+	enum ipahal_reg_name reg, const void *fields, u32 *val)
+{
+	struct ipahal_reg_rsrc_grp_cfg *grp =
+		(struct ipahal_reg_rsrc_grp_cfg *)fields;
+
+	IPA_SETFIELD_IN_REG(*val, grp->x_min,
+		IPA_RSRC_GRP_XY_RSRC_TYPE_n_X_MIN_LIM_SHFT_V3_5,
+		IPA_RSRC_GRP_XY_RSRC_TYPE_n_X_MIN_LIM_BMSK_V3_5);
+	IPA_SETFIELD_IN_REG(*val, grp->x_max,
+		IPA_RSRC_GRP_XY_RSRC_TYPE_n_X_MAX_LIM_SHFT_V3_5,
+		IPA_RSRC_GRP_XY_RSRC_TYPE_n_X_MAX_LIM_BMSK_V3_5);
+
+	/* DST_23 register has only X fields at ipa V3_5 */
+	if (reg == IPA_DST_RSRC_GRP_23_RSRC_TYPE_n)
+		return;
+
+	IPA_SETFIELD_IN_REG(*val, grp->y_min,
+		IPA_RSRC_GRP_XY_RSRC_TYPE_n_Y_MIN_LIM_SHFT_V3_5,
+		IPA_RSRC_GRP_XY_RSRC_TYPE_n_Y_MIN_LIM_BMSK_V3_5);
+	IPA_SETFIELD_IN_REG(*val, grp->y_max,
+		IPA_RSRC_GRP_XY_RSRC_TYPE_n_Y_MAX_LIM_SHFT_V3_5,
+		IPA_RSRC_GRP_XY_RSRC_TYPE_n_Y_MAX_LIM_BMSK_V3_5);
+}
+
+static void ipareg_construct_rsrg_grp_xy_v4_5(
+	enum ipahal_reg_name reg, const void *fields, u32 *val)
+{
+	struct ipahal_reg_rsrc_grp_cfg *grp =
+		(struct ipahal_reg_rsrc_grp_cfg *)fields;
+
+	IPA_SETFIELD_IN_REG(*val, grp->x_min,
+		IPA_RSRC_GRP_XY_RSRC_TYPE_n_X_MIN_LIM_SHFT_V3_5,
+		IPA_RSRC_GRP_XY_RSRC_TYPE_n_X_MIN_LIM_BMSK_V3_5);
+	IPA_SETFIELD_IN_REG(*val, grp->x_max,
+		IPA_RSRC_GRP_XY_RSRC_TYPE_n_X_MAX_LIM_SHFT_V3_5,
+		IPA_RSRC_GRP_XY_RSRC_TYPE_n_X_MAX_LIM_BMSK_V3_5);
+
+	/* SRC_45 and DST_45 register has only X fields at ipa V4_5 */
+	if (reg == IPA_SRC_RSRC_GRP_45_RSRC_TYPE_n ||
+		reg == IPA_DST_RSRC_GRP_45_RSRC_TYPE_n)
+		return;
+
+	IPA_SETFIELD_IN_REG(*val, grp->y_min,
+		IPA_RSRC_GRP_XY_RSRC_TYPE_n_Y_MIN_LIM_SHFT_V3_5,
+		IPA_RSRC_GRP_XY_RSRC_TYPE_n_Y_MIN_LIM_BMSK_V3_5);
+	IPA_SETFIELD_IN_REG(*val, grp->y_max,
+		IPA_RSRC_GRP_XY_RSRC_TYPE_n_Y_MAX_LIM_SHFT_V3_5,
+		IPA_RSRC_GRP_XY_RSRC_TYPE_n_Y_MAX_LIM_BMSK_V3_5);
+}
+
+static void ipareg_construct_hash_cfg_n(
+	enum ipahal_reg_name reg, const void *fields, u32 *val)
+{
+	struct ipahal_reg_fltrt_hash_tuple *tuple =
+		(struct ipahal_reg_fltrt_hash_tuple *)fields;
+
+	IPA_SETFIELD_IN_REG(*val, tuple->flt.src_id,
+		IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_FILTER_HASH_MSK_SRC_ID_SHFT,
+		IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_FILTER_HASH_MSK_SRC_ID_BMSK);
+	IPA_SETFIELD_IN_REG(*val, tuple->flt.src_ip_addr,
+		IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_FILTER_HASH_MSK_SRC_IP_SHFT,
+		IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_FILTER_HASH_MSK_SRC_IP_BMSK);
+	IPA_SETFIELD_IN_REG(*val, tuple->flt.dst_ip_addr,
+		IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_FILTER_HASH_MSK_DST_IP_SHFT,
+		IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_FILTER_HASH_MSK_DST_IP_BMSK);
+	IPA_SETFIELD_IN_REG(*val, tuple->flt.src_port,
+		IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_FILTER_HASH_MSK_SRC_PORT_SHFT,
+		IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_FILTER_HASH_MSK_SRC_PORT_BMSK);
+	IPA_SETFIELD_IN_REG(*val, tuple->flt.dst_port,
+		IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_FILTER_HASH_MSK_DST_PORT_SHFT,
+		IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_FILTER_HASH_MSK_DST_PORT_BMSK);
+	IPA_SETFIELD_IN_REG(*val, tuple->flt.protocol,
+		IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_FILTER_HASH_MSK_PROTOCOL_SHFT,
+		IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_FILTER_HASH_MSK_PROTOCOL_BMSK);
+	IPA_SETFIELD_IN_REG(*val, tuple->flt.meta_data,
+		IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_FILTER_HASH_MSK_METADATA_SHFT,
+		IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_FILTER_HASH_MSK_METADATA_BMSK);
+	IPA_SETFIELD_IN_REG(*val, tuple->undefined1,
+		IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_UNDEFINED1_SHFT,
+		IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_UNDEFINED1_BMSK);
+	IPA_SETFIELD_IN_REG(*val, tuple->rt.src_id,
+		IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_ROUTER_HASH_MSK_SRC_ID_SHFT,
+		IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_ROUTER_HASH_MSK_SRC_ID_BMSK);
+	IPA_SETFIELD_IN_REG(*val, tuple->rt.src_ip_addr,
+		IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_ROUTER_HASH_MSK_SRC_IP_SHFT,
+		IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_ROUTER_HASH_MSK_SRC_IP_BMSK);
+	IPA_SETFIELD_IN_REG(*val, tuple->rt.dst_ip_addr,
+		IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_ROUTER_HASH_MSK_DST_IP_SHFT,
+		IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_ROUTER_HASH_MSK_DST_IP_BMSK);
+	IPA_SETFIELD_IN_REG(*val, tuple->rt.src_port,
+		IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_ROUTER_HASH_MSK_SRC_PORT_SHFT,
+		IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_ROUTER_HASH_MSK_SRC_PORT_BMSK);
+	IPA_SETFIELD_IN_REG(*val, tuple->rt.dst_port,
+		IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_ROUTER_HASH_MSK_DST_PORT_SHFT,
+		IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_ROUTER_HASH_MSK_DST_PORT_BMSK);
+	IPA_SETFIELD_IN_REG(*val, tuple->rt.protocol,
+		IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_ROUTER_HASH_MSK_PROTOCOL_SHFT,
+		IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_ROUTER_HASH_MSK_PROTOCOL_BMSK);
+	IPA_SETFIELD_IN_REG(*val, tuple->rt.meta_data,
+		IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_ROUTER_HASH_MSK_METADATA_SHFT,
+		IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_ROUTER_HASH_MSK_METADATA_BMSK);
+	IPA_SETFIELD_IN_REG(*val, tuple->undefined2,
+		IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_UNDEFINED2_SHFT,
+		IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_UNDEFINED2_BMSK);
+}
+
+static void ipareg_parse_hash_cfg_n(
+	enum ipahal_reg_name reg, void *fields, u32 val)
+{
+	struct ipahal_reg_fltrt_hash_tuple *tuple =
+		(struct ipahal_reg_fltrt_hash_tuple *)fields;
+
+	tuple->flt.src_id =
+		IPA_GETFIELD_FROM_REG(val,
+		IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_FILTER_HASH_MSK_SRC_ID_SHFT,
+		IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_FILTER_HASH_MSK_SRC_ID_BMSK);
+	tuple->flt.src_ip_addr =
+		IPA_GETFIELD_FROM_REG(val,
+		IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_FILTER_HASH_MSK_SRC_IP_SHFT,
+		IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_FILTER_HASH_MSK_SRC_IP_BMSK);
+	tuple->flt.dst_ip_addr =
+		IPA_GETFIELD_FROM_REG(val,
+		IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_FILTER_HASH_MSK_DST_IP_SHFT,
+		IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_FILTER_HASH_MSK_DST_IP_BMSK);
+	tuple->flt.src_port =
+		IPA_GETFIELD_FROM_REG(val,
+		IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_FILTER_HASH_MSK_SRC_PORT_SHFT,
+		IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_FILTER_HASH_MSK_SRC_PORT_BMSK);
+	tuple->flt.dst_port =
+		IPA_GETFIELD_FROM_REG(val,
+		IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_FILTER_HASH_MSK_DST_PORT_SHFT,
+		IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_FILTER_HASH_MSK_DST_PORT_BMSK);
+	tuple->flt.protocol =
+		IPA_GETFIELD_FROM_REG(val,
+		IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_FILTER_HASH_MSK_PROTOCOL_SHFT,
+		IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_FILTER_HASH_MSK_PROTOCOL_BMSK);
+	tuple->flt.meta_data =
+		IPA_GETFIELD_FROM_REG(val,
+		IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_FILTER_HASH_MSK_METADATA_SHFT,
+		IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_FILTER_HASH_MSK_METADATA_BMSK);
+	tuple->undefined1 =
+		IPA_GETFIELD_FROM_REG(val,
+		IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_UNDEFINED1_SHFT,
+		IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_UNDEFINED1_BMSK);
+	tuple->rt.src_id =
+		IPA_GETFIELD_FROM_REG(val,
+		IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_ROUTER_HASH_MSK_SRC_ID_SHFT,
+		IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_ROUTER_HASH_MSK_SRC_ID_BMSK);
+	tuple->rt.src_ip_addr =
+		IPA_GETFIELD_FROM_REG(val,
+		IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_ROUTER_HASH_MSK_SRC_IP_SHFT,
+		IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_ROUTER_HASH_MSK_SRC_IP_BMSK);
+	tuple->rt.dst_ip_addr =
+		IPA_GETFIELD_FROM_REG(val,
+		IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_ROUTER_HASH_MSK_DST_IP_SHFT,
+		IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_ROUTER_HASH_MSK_DST_IP_BMSK);
+	tuple->rt.src_port =
+		IPA_GETFIELD_FROM_REG(val,
+		IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_ROUTER_HASH_MSK_SRC_PORT_SHFT,
+		IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_ROUTER_HASH_MSK_SRC_PORT_BMSK);
+	tuple->rt.dst_port =
+		IPA_GETFIELD_FROM_REG(val,
+		IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_ROUTER_HASH_MSK_DST_PORT_SHFT,
+		IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_ROUTER_HASH_MSK_DST_PORT_BMSK);
+	tuple->rt.protocol =
+		IPA_GETFIELD_FROM_REG(val,
+		IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_ROUTER_HASH_MSK_PROTOCOL_SHFT,
+		IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_ROUTER_HASH_MSK_PROTOCOL_BMSK);
+	tuple->rt.meta_data =
+		IPA_GETFIELD_FROM_REG(val,
+		IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_ROUTER_HASH_MSK_METADATA_SHFT,
+		IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_ROUTER_HASH_MSK_METADATA_BMSK);
+	tuple->undefined2 =
+		IPA_GETFIELD_FROM_REG(val,
+		IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_UNDEFINED2_SHFT,
+		IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_UNDEFINED2_BMSK);
+}
+
+static void ipareg_construct_endp_status_n_common(
+	const struct ipahal_reg_ep_cfg_status *ep_status, u32 *val)
+{
+	IPA_SETFIELD_IN_REG(*val, ep_status->status_en,
+			IPA_ENDP_STATUS_n_STATUS_EN_SHFT,
+			IPA_ENDP_STATUS_n_STATUS_EN_BMSK);
+
+	IPA_SETFIELD_IN_REG(*val, ep_status->status_ep,
+			IPA_ENDP_STATUS_n_STATUS_ENDP_SHFT,
+			IPA_ENDP_STATUS_n_STATUS_ENDP_BMSK);
+}
+
+static void ipareg_construct_endp_status_n(
+	enum ipahal_reg_name reg, const void *fields, u32 *val)
+{
+	const struct ipahal_reg_ep_cfg_status *ep_status =
+		(const struct ipahal_reg_ep_cfg_status *)fields;
+
+	ipareg_construct_endp_status_n_common(ep_status, val);
+
+	IPA_SETFIELD_IN_REG(*val, ep_status->status_location,
+			IPA_ENDP_STATUS_n_STATUS_LOCATION_SHFT,
+			IPA_ENDP_STATUS_n_STATUS_LOCATION_BMSK);
+}
+
+static void ipareg_construct_endp_status_n_v4_0(
+	enum ipahal_reg_name reg, const void *fields, u32 *val)
+{
+	struct ipahal_reg_ep_cfg_status *ep_status =
+		(struct ipahal_reg_ep_cfg_status *)fields;
+
+	ipareg_construct_endp_status_n_common(ep_status, val);
+
+	IPA_SETFIELD_IN_REG(*val, ep_status->status_location,
+			IPA_ENDP_STATUS_n_STATUS_LOCATION_SHFT,
+			IPA_ENDP_STATUS_n_STATUS_LOCATION_BMSK);
+
+	IPA_SETFIELD_IN_REG(*val, ep_status->status_pkt_suppress,
+			IPA_ENDP_STATUS_n_STATUS_PKT_SUPPRESS_SHFT,
+			IPA_ENDP_STATUS_n_STATUS_PKT_SUPPRESS_BMSK);
+}
+
+static void ipareg_construct_endp_status_n_v4_5(
+	enum ipahal_reg_name reg, const void *fields, u32 *val)
+{
+	struct ipahal_reg_ep_cfg_status *ep_status =
+		(struct ipahal_reg_ep_cfg_status *)fields;
+
+	ipareg_construct_endp_status_n_common(ep_status, val);
+
+	IPA_SETFIELD_IN_REG(*val, ep_status->status_pkt_suppress,
+			IPA_ENDP_STATUS_n_STATUS_PKT_SUPPRESS_SHFT,
+			IPA_ENDP_STATUS_n_STATUS_PKT_SUPPRESS_BMSK);
+}
+
+static void ipareg_construct_clkon_cfg_common(
+	const struct ipahal_reg_clkon_cfg *clkon_cfg, u32 *val)
+{
+	IPA_SETFIELD_IN_REG(*val, !!clkon_cfg->open_global_2x_clk,
+			IPA_CLKON_CFG_OPEN_GLOBAL_2X_CLK_SHFT,
+			IPA_CLKON_CFG_OPEN_GLOBAL_2X_CLK_BMSK);
+
+	IPA_SETFIELD_IN_REG(*val, !!clkon_cfg->open_global,
+			IPA_CLKON_CFG_OPEN_GLOBAL_SHFT,
+			IPA_CLKON_CFG_OPEN_GLOBAL_BMSK);
+
+	IPA_SETFIELD_IN_REG(*val, !!clkon_cfg->open_gsi_if,
+			IPA_CLKON_CFG_OPEN_GSI_IF_SHFT,
+			IPA_CLKON_CFG_OPEN_GSI_IF_BMSK);
+
+	IPA_SETFIELD_IN_REG(*val, !!clkon_cfg->open_weight_arb,
+			IPA_CLKON_CFG_OPEN_WEIGHT_ARB_SHFT,
+			IPA_CLKON_CFG_OPEN_WEIGHT_ARB_BMSK);
+
+	IPA_SETFIELD_IN_REG(*val, !!clkon_cfg->open_qmb,
+			IPA_CLKON_CFG_OPEN_QMB_SHFT,
+			IPA_CLKON_CFG_OPEN_QMB_BMSK);
+
+	IPA_SETFIELD_IN_REG(*val, !!clkon_cfg->open_ram_slaveway,
+			IPA_CLKON_CFG_OPEN_RAM_SLAVEWAY_SHFT,
+			IPA_CLKON_CFG_OPEN_RAM_SLAVEWAY_BMSK);
+
+	IPA_SETFIELD_IN_REG(*val, !!clkon_cfg->open_aggr_wrapper,
+			IPA_CLKON_CFG_OPEN_AGGR_WRAPPER_SHFT,
+			IPA_CLKON_CFG_OPEN_AGGR_WRAPPER_BMSK);
+
+	IPA_SETFIELD_IN_REG(*val, !!clkon_cfg->open_qsb2axi_cmdq_l,
+			IPA_CLKON_CFG_OPEN_QSB2AXI_CMDQ_L_SHFT,
+			IPA_CLKON_CFG_OPEN_QSB2AXI_CMDQ_L_BMSK);
+
+	IPA_SETFIELD_IN_REG(*val, !!clkon_cfg->open_fnr,
+			IPA_CLKON_CFG_OPEN_FNR_SHFT,
+			IPA_CLKON_CFG_OPEN_FNR_BMSK);
+
+	IPA_SETFIELD_IN_REG(*val, !!clkon_cfg->open_tx_1,
+			IPA_CLKON_CFG_OPEN_TX_1_SHFT,
+			IPA_CLKON_CFG_OPEN_TX_1_BMSK);
+
+	IPA_SETFIELD_IN_REG(*val, !!clkon_cfg->open_tx_0,
+			IPA_CLKON_CFG_OPEN_TX_0_SHFT,
+			IPA_CLKON_CFG_OPEN_TX_0_BMSK);
+
+	IPA_SETFIELD_IN_REG(*val, !!clkon_cfg->open_ntf_tx_cmdqs,
+			IPA_CLKON_CFG_OPEN_NTF_TX_CMDQS_SHFT,
+			IPA_CLKON_CFG_OPEN_NTF_TX_CMDQS_BMSK);
+
+	IPA_SETFIELD_IN_REG(*val, !!clkon_cfg->open_h_dcph,
+			IPA_CLKON_CFG_OPEN_H_DCPH_SHFT,
+			IPA_CLKON_CFG_OPEN_H_DCPH_BMSK);
+
+	IPA_SETFIELD_IN_REG(*val, !!clkon_cfg->open_d_dcph,
+			IPA_CLKON_CFG_OPEN_D_DCPH_SHFT,
+			IPA_CLKON_CFG_OPEN_D_DCPH_BMSK);
+
+	IPA_SETFIELD_IN_REG(*val, !!clkon_cfg->open_ack_mngr,
+			IPA_CLKON_CFG_OPEN_ACK_MNGR_SHFT,
+			IPA_CLKON_CFG_OPEN_ACK_MNGR_BMSK);
+
+	IPA_SETFIELD_IN_REG(*val, !!clkon_cfg->open_ctx_handler,
+			IPA_CLKON_CFG_OPEN_CTX_HANDLER_SHFT,
+			IPA_CLKON_CFG_OPEN_CTX_HANDLER_BMSK);
+
+	IPA_SETFIELD_IN_REG(*val, !!clkon_cfg->open_rsrc_mngr,
+			IPA_CLKON_CFG_OPEN_RSRC_MNGR_SHFT,
+			IPA_CLKON_CFG_OPEN_RSRC_MNGR_BMSK);
+
+	IPA_SETFIELD_IN_REG(*val, !!clkon_cfg->open_dps_tx_cmdqs,
+			IPA_CLKON_CFG_OPEN_DPS_TX_CMDQS_SHFT,
+			IPA_CLKON_CFG_OPEN_DPS_TX_CMDQS_BMSK);
+
+	IPA_SETFIELD_IN_REG(*val, !!clkon_cfg->open_hps_dps_cmdqs,
+			IPA_CLKON_CFG_OPEN_HPS_DPS_CMDQS_SHFT,
+			IPA_CLKON_CFG_OPEN_HPS_DPS_CMDQS_BMSK);
+
+	IPA_SETFIELD_IN_REG(*val, !!clkon_cfg->open_rx_hps_cmdqs,
+			IPA_CLKON_CFG_OPEN_RX_HPS_CMDQS_SHFT,
+			IPA_CLKON_CFG_OPEN_RX_HPS_CMDQS_BMSK);
+
+	IPA_SETFIELD_IN_REG(*val, !!clkon_cfg->open_dps,
+			IPA_CLKON_CFG_OPEN_DPS_SHFT,
+			IPA_CLKON_CFG_OPEN_DPS_BMSK);
+
+	IPA_SETFIELD_IN_REG(*val, !!clkon_cfg->open_hps,
+			IPA_CLKON_CFG_OPEN_HPS_SHFT,
+			IPA_CLKON_CFG_OPEN_HPS_BMSK);
+
+	IPA_SETFIELD_IN_REG(*val, !!clkon_cfg->open_ftch_dps,
+			IPA_CLKON_CFG_OPEN_FTCH_DPS_SHFT,
+			IPA_CLKON_CFG_OPEN_FTCH_DPS_BMSK);
+
+	IPA_SETFIELD_IN_REG(*val, !!clkon_cfg->open_ftch_hps,
+			IPA_CLKON_CFG_OPEN_FTCH_HPS_SHFT,
+			IPA_CLKON_CFG_OPEN_FTCH_HPS_BMSK);
+
+	IPA_SETFIELD_IN_REG(*val, !!clkon_cfg->open_ram_arb,
+			IPA_CLKON_CFG_OPEN_RAM_ARB_SHFT,
+			IPA_CLKON_CFG_OPEN_RAM_ARB_BMSK);
+
+	IPA_SETFIELD_IN_REG(*val, !!clkon_cfg->open_misc,
+			IPA_CLKON_CFG_OPEN_MISC_SHFT,
+			IPA_CLKON_CFG_OPEN_MISC_BMSK);
+
+	IPA_SETFIELD_IN_REG(*val, !!clkon_cfg->open_tx_wrapper,
+			IPA_CLKON_CFG_OPEN_TX_WRAPPER_SHFT,
+			IPA_CLKON_CFG_OPEN_TX_WRAPPER_BMSK);
+
+	IPA_SETFIELD_IN_REG(*val, !!clkon_cfg->open_proc,
+			IPA_CLKON_CFG_OPEN_PROC_SHFT,
+			IPA_CLKON_CFG_OPEN_PROC_BMSK);
+
+	IPA_SETFIELD_IN_REG(*val, !!clkon_cfg->open_rx,
+			IPA_CLKON_CFG_OPEN_RX_SHFT,
+			IPA_CLKON_CFG_OPEN_RX_BMSK);
+}
+
+static void ipareg_construct_clkon_cfg(
+	enum ipahal_reg_name reg, const void *fields, u32 *val)
+{
+	struct ipahal_reg_clkon_cfg *clkon_cfg =
+		(struct ipahal_reg_clkon_cfg *)fields;
+
+	ipareg_construct_clkon_cfg_common(clkon_cfg, val);
+
+	IPA_SETFIELD_IN_REG(*val, !!clkon_cfg->open_dcmp,
+			IPA_CLKON_CFG_OPEN_DCMP_SHFT,
+			IPA_CLKON_CFG_OPEN_DCMP_BMSK);
+}
+
+static void ipareg_construct_clkon_cfg_v4_5(
+	enum ipahal_reg_name reg, const void *fields, u32 *val)
+{
+	struct ipahal_reg_clkon_cfg *clkon_cfg =
+		(struct ipahal_reg_clkon_cfg *)fields;
+
+	ipareg_construct_clkon_cfg_common(clkon_cfg, val);
+
+	IPA_SETFIELD_IN_REG(*val, !!clkon_cfg->open_dpl_fifo,
+			IPA_CLKON_CFG_CGC_OPEN_DPL_FIFO_SHFT_V4_5,
+			IPA_CLKON_CFG_CGC_OPEN_DPL_FIFO_BMSK_V4_5);
+}
+
+static void ipareg_parse_clkon_cfg_common(
+	struct ipahal_reg_clkon_cfg *clkon_cfg, u32 val)
+{
+	memset(clkon_cfg, 0, sizeof(struct ipahal_reg_clkon_cfg));
+
+	clkon_cfg->open_global_2x_clk = IPA_GETFIELD_FROM_REG(val,
+			IPA_CLKON_CFG_OPEN_GLOBAL_2X_CLK_SHFT,
+			IPA_CLKON_CFG_OPEN_GLOBAL_2X_CLK_BMSK);
+
+	clkon_cfg->open_global = IPA_GETFIELD_FROM_REG(val,
+			IPA_CLKON_CFG_OPEN_GLOBAL_SHFT,
+			IPA_CLKON_CFG_OPEN_GLOBAL_BMSK);
+
+	clkon_cfg->open_gsi_if = IPA_GETFIELD_FROM_REG(val,
+			IPA_CLKON_CFG_OPEN_GSI_IF_SHFT,
+			IPA_CLKON_CFG_OPEN_GSI_IF_BMSK);
+
+	clkon_cfg->open_weight_arb = IPA_GETFIELD_FROM_REG(val,
+			IPA_CLKON_CFG_OPEN_WEIGHT_ARB_SHFT,
+			IPA_CLKON_CFG_OPEN_WEIGHT_ARB_BMSK);
+
+	clkon_cfg->open_qmb = IPA_GETFIELD_FROM_REG(val,
+			IPA_CLKON_CFG_OPEN_QMB_SHFT,
+			IPA_CLKON_CFG_OPEN_QMB_BMSK);
+
+	clkon_cfg->open_ram_slaveway = IPA_GETFIELD_FROM_REG(val,
+			IPA_CLKON_CFG_OPEN_RAM_SLAVEWAY_SHFT,
+			IPA_CLKON_CFG_OPEN_RAM_SLAVEWAY_BMSK);
+
+	clkon_cfg->open_aggr_wrapper = IPA_GETFIELD_FROM_REG(val,
+			IPA_CLKON_CFG_OPEN_AGGR_WRAPPER_SHFT,
+			IPA_CLKON_CFG_OPEN_AGGR_WRAPPER_BMSK);
+
+	clkon_cfg->open_qsb2axi_cmdq_l = IPA_GETFIELD_FROM_REG(val,
+			IPA_CLKON_CFG_OPEN_QSB2AXI_CMDQ_L_SHFT,
+			IPA_CLKON_CFG_OPEN_QSB2AXI_CMDQ_L_BMSK);
+
+	clkon_cfg->open_fnr = IPA_GETFIELD_FROM_REG(val,
+			IPA_CLKON_CFG_OPEN_FNR_SHFT,
+			IPA_CLKON_CFG_OPEN_FNR_BMSK);
+
+	clkon_cfg->open_tx_1 = IPA_GETFIELD_FROM_REG(val,
+			IPA_CLKON_CFG_OPEN_TX_1_SHFT,
+			IPA_CLKON_CFG_OPEN_TX_1_BMSK);
+
+	clkon_cfg->open_tx_0 = IPA_GETFIELD_FROM_REG(val,
+			IPA_CLKON_CFG_OPEN_TX_0_SHFT,
+			IPA_CLKON_CFG_OPEN_TX_0_BMSK);
+
+	clkon_cfg->open_ntf_tx_cmdqs = IPA_GETFIELD_FROM_REG(val,
+			IPA_CLKON_CFG_OPEN_NTF_TX_CMDQS_SHFT,
+			IPA_CLKON_CFG_OPEN_NTF_TX_CMDQS_BMSK);
+
+	clkon_cfg->open_h_dcph = IPA_GETFIELD_FROM_REG(val,
+			IPA_CLKON_CFG_OPEN_H_DCPH_SHFT,
+			IPA_CLKON_CFG_OPEN_H_DCPH_BMSK);
+
+	clkon_cfg->open_d_dcph = IPA_GETFIELD_FROM_REG(val,
+			IPA_CLKON_CFG_OPEN_D_DCPH_SHFT,
+			IPA_CLKON_CFG_OPEN_D_DCPH_BMSK);
+
+	clkon_cfg->open_ack_mngr = IPA_GETFIELD_FROM_REG(val,
+			IPA_CLKON_CFG_OPEN_ACK_MNGR_SHFT,
+			IPA_CLKON_CFG_OPEN_ACK_MNGR_BMSK);
+
+	clkon_cfg->open_ctx_handler = IPA_GETFIELD_FROM_REG(val,
+			IPA_CLKON_CFG_OPEN_CTX_HANDLER_SHFT,
+			IPA_CLKON_CFG_OPEN_CTX_HANDLER_BMSK);
+
+	clkon_cfg->open_rsrc_mngr = IPA_GETFIELD_FROM_REG(val,
+			IPA_CLKON_CFG_OPEN_RSRC_MNGR_SHFT,
+			IPA_CLKON_CFG_OPEN_RSRC_MNGR_BMSK);
+
+	clkon_cfg->open_dps_tx_cmdqs = IPA_GETFIELD_FROM_REG(val,
+			IPA_CLKON_CFG_OPEN_DPS_TX_CMDQS_SHFT,
+			IPA_CLKON_CFG_OPEN_DPS_TX_CMDQS_BMSK);
+
+	clkon_cfg->open_hps_dps_cmdqs = IPA_GETFIELD_FROM_REG(val,
+			IPA_CLKON_CFG_OPEN_HPS_DPS_CMDQS_SHFT,
+			IPA_CLKON_CFG_OPEN_HPS_DPS_CMDQS_BMSK);
+
+	clkon_cfg->open_rx_hps_cmdqs = IPA_GETFIELD_FROM_REG(val,
+			IPA_CLKON_CFG_OPEN_RX_HPS_CMDQS_SHFT,
+			IPA_CLKON_CFG_OPEN_RX_HPS_CMDQS_BMSK);
+
+	clkon_cfg->open_dps = IPA_GETFIELD_FROM_REG(val,
+			IPA_CLKON_CFG_OPEN_DPS_SHFT,
+			IPA_CLKON_CFG_OPEN_DPS_BMSK);
+
+	clkon_cfg->open_hps = IPA_GETFIELD_FROM_REG(val,
+			IPA_CLKON_CFG_OPEN_HPS_SHFT,
+			IPA_CLKON_CFG_OPEN_HPS_BMSK);
+
+	clkon_cfg->open_ftch_dps = IPA_GETFIELD_FROM_REG(val,
+			IPA_CLKON_CFG_OPEN_FTCH_DPS_SHFT,
+			IPA_CLKON_CFG_OPEN_FTCH_DPS_BMSK);
+
+	clkon_cfg->open_ftch_hps = IPA_GETFIELD_FROM_REG(val,
+			IPA_CLKON_CFG_OPEN_FTCH_HPS_SHFT,
+			IPA_CLKON_CFG_OPEN_FTCH_HPS_BMSK);
+
+	clkon_cfg->open_ram_arb = IPA_GETFIELD_FROM_REG(val,
+			IPA_CLKON_CFG_OPEN_RAM_ARB_SHFT,
+			IPA_CLKON_CFG_OPEN_RAM_ARB_BMSK);
+
+	clkon_cfg->open_misc = IPA_GETFIELD_FROM_REG(val,
+			IPA_CLKON_CFG_OPEN_MISC_SHFT,
+			IPA_CLKON_CFG_OPEN_MISC_BMSK);
+
+	clkon_cfg->open_tx_wrapper = IPA_GETFIELD_FROM_REG(val,
+			IPA_CLKON_CFG_OPEN_TX_WRAPPER_SHFT,
+			IPA_CLKON_CFG_OPEN_TX_WRAPPER_BMSK);
+
+	clkon_cfg->open_proc = IPA_GETFIELD_FROM_REG(val,
+			IPA_CLKON_CFG_OPEN_PROC_SHFT,
+			IPA_CLKON_CFG_OPEN_PROC_BMSK);
+
+	clkon_cfg->open_rx = IPA_GETFIELD_FROM_REG(val,
+			IPA_CLKON_CFG_OPEN_RX_SHFT,
+			IPA_CLKON_CFG_OPEN_RX_BMSK);
+}
+
+static void ipareg_parse_clkon_cfg(
+	enum ipahal_reg_name reg, void *fields, u32 val)
+{
+	struct ipahal_reg_clkon_cfg *clkon_cfg =
+		(struct ipahal_reg_clkon_cfg *)fields;
+
+	ipareg_parse_clkon_cfg_common(clkon_cfg, val);
+
+	clkon_cfg->open_dcmp = IPA_GETFIELD_FROM_REG(val,
+			IPA_CLKON_CFG_OPEN_DCMP_SHFT,
+			IPA_CLKON_CFG_OPEN_DCMP_BMSK);
+}
+
+static void ipareg_parse_clkon_cfg_v4_5(
+	enum ipahal_reg_name reg, void *fields, u32 val)
+{
+	struct ipahal_reg_clkon_cfg *clkon_cfg =
+		(struct ipahal_reg_clkon_cfg *)fields;
+
+	ipareg_parse_clkon_cfg_common(clkon_cfg, val);
+
+	clkon_cfg->open_dpl_fifo = IPA_GETFIELD_FROM_REG(val,
+			IPA_CLKON_CFG_CGC_OPEN_DPL_FIFO_SHFT_V4_5,
+			IPA_CLKON_CFG_CGC_OPEN_DPL_FIFO_BMSK_V4_5);
+}
+
+static void ipareg_construct_qtime_timestamp_cfg(
+	enum ipahal_reg_name reg, const void *fields, u32 *val)
+{
+	const struct ipahal_reg_qtime_timestamp_cfg *ts_cfg =
+		(const struct ipahal_reg_qtime_timestamp_cfg *)fields;
+
+	if (!ts_cfg->dpl_timestamp_sel &&
+		ts_cfg->dpl_timestamp_lsb) {
+		IPAHAL_ERR("non zero DPL shift while legacy mode\n");
+		WARN_ON(1);
+	}
+
+	IPA_SETFIELD_IN_REG(*val,
+		ts_cfg->dpl_timestamp_lsb,
+		IPA_QTIME_TIMESTAMP_CFG_DPL_TIMESTAMP_LSB_SHFT,
+		IPA_QTIME_TIMESTAMP_CFG_DPL_TIMESTAMP_LSB_BMSK);
+	IPA_SETFIELD_IN_REG(*val,
+		ts_cfg->dpl_timestamp_sel ? 1 : 0,
+		IPA_QTIME_TIMESTAMP_CFG_DPL_TIMESTAMP_SEL_SHFT,
+		IPA_QTIME_TIMESTAMP_CFG_DPL_TIMESTAMP_SEL_BMSK);
+	IPA_SETFIELD_IN_REG(*val,
+		ts_cfg->tag_timestamp_lsb,
+		IPA_QTIME_TIMESTAMP_CFG_TAG_TIMESTAMP_LSB_SHFT,
+		IPA_QTIME_TIMESTAMP_CFG_TAG_TIMESTAMP_LSB_BMSK);
+	IPA_SETFIELD_IN_REG(*val,
+		ts_cfg->nat_timestamp_lsb,
+		IPA_QTIME_TIMESTAMP_CFG_NAT_TIMESTAMP_LSB_SHFT,
+		IPA_QTIME_TIMESTAMP_CFG_NAT_TIMESTAMP_LSB_BMSK);
+}
+
+static u8 ipareg_timers_pulse_gran_code(
+	enum ipa_timers_time_gran_type gran)
+{
+	switch (gran) {
+	case IPA_TIMERS_TIME_GRAN_10_USEC:		return 0;
+	case IPA_TIMERS_TIME_GRAN_20_USEC:		return 1;
+	case IPA_TIMERS_TIME_GRAN_50_USEC:		return 2;
+	case IPA_TIMERS_TIME_GRAN_100_USEC:		return 3;
+	case IPA_TIMERS_TIME_GRAN_1_MSEC:		return 4;
+	case IPA_TIMERS_TIME_GRAN_10_MSEC:		return 5;
+	case IPA_TIMERS_TIME_GRAN_100_MSEC:		return 6;
+	case IPA_TIMERS_TIME_GRAN_NEAR_HALF_SEC:	return 7;
+	default:
+		IPAHAL_ERR("Invalid granularity %d\n", gran);
+		break;
+	}
+
+	return 3;
+}
+
+static enum ipa_timers_time_gran_type
+	ipareg_timers_pulse_gran_decode(u8 code)
+{
+	switch (code) {
+	case 0: return IPA_TIMERS_TIME_GRAN_10_USEC;
+	case 1: return IPA_TIMERS_TIME_GRAN_20_USEC;
+	case 2: return IPA_TIMERS_TIME_GRAN_50_USEC;
+	case 3: return IPA_TIMERS_TIME_GRAN_100_USEC;
+	case 4: return IPA_TIMERS_TIME_GRAN_1_MSEC;
+	case 5: return IPA_TIMERS_TIME_GRAN_10_MSEC;
+	case 6: return IPA_TIMERS_TIME_GRAN_100_MSEC;
+	case 7: return IPA_TIMERS_TIME_GRAN_NEAR_HALF_SEC;
+	default:
+		IPAHAL_ERR("Invalid coded granularity %d\n", code);
+		break;
+	}
+
+	return IPA_TIMERS_TIME_GRAN_100_USEC;
+}
+
+static void ipareg_construct_timers_pulse_gran_cfg(
+	enum ipahal_reg_name reg, const void *fields, u32 *val)
+{
+	const struct ipahal_reg_timers_pulse_gran_cfg *gran_cfg =
+		(const struct ipahal_reg_timers_pulse_gran_cfg *)fields;
+
+	IPA_SETFIELD_IN_REG(*val,
+		ipareg_timers_pulse_gran_code(gran_cfg->gran_0),
+		IPA_TIMERS_PULSE_GRAN_CFG_GRAN_X_SHFT(0),
+		IPA_TIMERS_PULSE_GRAN_CFG_GRAN_X_BMSK(0));
+
+	IPA_SETFIELD_IN_REG(*val,
+		ipareg_timers_pulse_gran_code(gran_cfg->gran_1),
+		IPA_TIMERS_PULSE_GRAN_CFG_GRAN_X_SHFT(1),
+		IPA_TIMERS_PULSE_GRAN_CFG_GRAN_X_BMSK(1));
+
+	IPA_SETFIELD_IN_REG(*val,
+		ipareg_timers_pulse_gran_code(gran_cfg->gran_2),
+		IPA_TIMERS_PULSE_GRAN_CFG_GRAN_X_SHFT(2),
+		IPA_TIMERS_PULSE_GRAN_CFG_GRAN_X_BMSK(2));
+}
+
+static void ipareg_parse_timers_pulse_gran_cfg(
+	enum ipahal_reg_name reg, void *fields, u32 val)
+{
+	u8 code;
+	struct ipahal_reg_timers_pulse_gran_cfg *gran_cfg =
+		(struct ipahal_reg_timers_pulse_gran_cfg *)fields;
+
+	code = IPA_GETFIELD_FROM_REG(val,
+		IPA_TIMERS_PULSE_GRAN_CFG_GRAN_X_SHFT(0),
+		IPA_TIMERS_PULSE_GRAN_CFG_GRAN_X_BMSK(0));
+	gran_cfg->gran_0 = ipareg_timers_pulse_gran_decode(code);
+
+	code = IPA_GETFIELD_FROM_REG(val,
+		IPA_TIMERS_PULSE_GRAN_CFG_GRAN_X_SHFT(1),
+		IPA_TIMERS_PULSE_GRAN_CFG_GRAN_X_BMSK(1));
+	gran_cfg->gran_1 = ipareg_timers_pulse_gran_decode(code);
+
+	code = IPA_GETFIELD_FROM_REG(val,
+		IPA_TIMERS_PULSE_GRAN_CFG_GRAN_X_SHFT(2),
+		IPA_TIMERS_PULSE_GRAN_CFG_GRAN_X_BMSK(2));
+	gran_cfg->gran_2 = ipareg_timers_pulse_gran_decode(code);
+}
+
+static void ipareg_construct_timers_xo_clk_div_cfg(
+	enum ipahal_reg_name reg, const void *fields, u32 *val)
+{
+	const struct ipahal_reg_timers_xo_clk_div_cfg *div_cfg =
+		(const struct ipahal_reg_timers_xo_clk_div_cfg *)fields;
+
+	IPA_SETFIELD_IN_REG(*val,
+		div_cfg->enable ? 1 : 0,
+		IPA_TIMERS_XO_CLK_DIV_CFG_ENABLE_SHFT,
+		IPA_TIMERS_XO_CLK_DIV_CFG_ENABLE_BMSK);
+
+	IPA_SETFIELD_IN_REG(*val,
+		div_cfg->value,
+		IPA_TIMERS_XO_CLK_DIV_CFG_VALUE_SHFT,
+		IPA_TIMERS_XO_CLK_DIV_CFG_VALUE_BMSK);
+}
+
+static void ipareg_parse_timers_xo_clk_div_cfg(
+	enum ipahal_reg_name reg, void *fields, u32 val)
+{
+	struct ipahal_reg_timers_xo_clk_div_cfg *div_cfg =
+		(struct ipahal_reg_timers_xo_clk_div_cfg *)fields;
+
+	div_cfg->enable =
+		IPA_GETFIELD_FROM_REG(val,
+		IPA_TIMERS_XO_CLK_DIV_CFG_ENABLE_SHFT,
+		IPA_TIMERS_XO_CLK_DIV_CFG_ENABLE_BMSK);
+
+	div_cfg->value =
+		IPA_GETFIELD_FROM_REG(val,
+		IPA_TIMERS_XO_CLK_DIV_CFG_VALUE_SHFT,
+		IPA_TIMERS_XO_CLK_DIV_CFG_VALUE_BMSK);
+}
+
+static void ipareg_construct_comp_cfg_comon(
+	const struct ipahal_reg_comp_cfg *comp_cfg, u32 *val)
+{
+
+	IPA_SETFIELD_IN_REG(*val,
+		!!comp_cfg->ipa_qmb_select_by_address_global_en,
+		IPA_COMP_CFG_IPA_QMB_SELECT_BY_ADDRESS_GLOBAL_EN_SHFT,
+		IPA_COMP_CFG_IPA_QMB_SELECT_BY_ADDRESS_GLOBAL_EN_BMSK);
+
+	IPA_SETFIELD_IN_REG(*val,
+		!!comp_cfg->gsi_multi_axi_masters_dis,
+		IPA_COMP_CFG_GSI_MULTI_AXI_MASTERS_DIS_SHFT,
+		IPA_COMP_CFG_GSI_MULTI_AXI_MASTERS_DIS_BMSK);
+
+	IPA_SETFIELD_IN_REG(*val,
+		!!comp_cfg->gsi_snoc_cnoc_loop_protection_disable,
+		IPA_COMP_CFG_GSI_SNOC_CNOC_LOOP_PROTECTION_DISABLE_SHFT,
+		IPA_COMP_CFG_GSI_SNOC_CNOC_LOOP_PROTECTION_DISABLE_BMSK);
+
+	IPA_SETFIELD_IN_REG(*val,
+		!!comp_cfg->gen_qmb_0_snoc_cnoc_loop_protection_disable,
+		IPA_COMP_CFG_GEN_QMB_0_SNOC_CNOC_LOOP_PROTECTION_DISABLE_SHFT,
+		IPA_COMP_CFG_GEN_QMB_0_SNOC_CNOC_LOOP_PROTECTION_DISABLE_BMSK);
+
+	IPA_SETFIELD_IN_REG(*val,
+		!!comp_cfg->gen_qmb_1_multi_inorder_wr_dis,
+		IPA_COMP_CFG_GEN_QMB_1_MULTI_INORDER_WR_DIS_SHFT,
+		IPA_COMP_CFG_GEN_QMB_1_MULTI_INORDER_WR_DIS_BMSK);
+
+	IPA_SETFIELD_IN_REG(*val,
+		!!comp_cfg->gen_qmb_0_multi_inorder_wr_dis,
+		IPA_COMP_CFG_GEN_QMB_0_MULTI_INORDER_WR_DIS_SHFT,
+		IPA_COMP_CFG_GEN_QMB_0_MULTI_INORDER_WR_DIS_BMSK);
+
+	IPA_SETFIELD_IN_REG(*val,
+		!!comp_cfg->gen_qmb_1_multi_inorder_rd_dis,
+		IPA_COMP_CFG_GEN_QMB_1_MULTI_INORDER_RD_DIS_SHFT,
+		IPA_COMP_CFG_GEN_QMB_1_MULTI_INORDER_RD_DIS_BMSK);
+
+	IPA_SETFIELD_IN_REG(*val,
+		!!comp_cfg->gen_qmb_0_multi_inorder_rd_dis,
+		IPA_COMP_CFG_GEN_QMB_0_MULTI_INORDER_RD_DIS_SHFT,
+		IPA_COMP_CFG_GEN_QMB_0_MULTI_INORDER_RD_DIS_BMSK);
+
+	IPA_SETFIELD_IN_REG(*val,
+		!!comp_cfg->gsi_multi_inorder_wr_dis,
+		IPA_COMP_CFG_GSI_MULTI_INORDER_WR_DIS_SHFT,
+		IPA_COMP_CFG_GSI_MULTI_INORDER_WR_DIS_BMSK);
+
+	IPA_SETFIELD_IN_REG(*val,
+		!!comp_cfg->gsi_multi_inorder_rd_dis,
+		IPA_COMP_CFG_GSI_MULTI_INORDER_RD_DIS_SHFT,
+		IPA_COMP_CFG_GSI_MULTI_INORDER_RD_DIS_BMSK);
+
+	IPA_SETFIELD_IN_REG(*val,
+		!!comp_cfg->ipa_qmb_select_by_address_prod_en,
+		IPA_COMP_CFG_IPA_QMB_SELECT_BY_ADDRESS_PROD_EN_SHFT,
+		IPA_COMP_CFG_IPA_QMB_SELECT_BY_ADDRESS_PROD_EN_BMSK);
+
+	IPA_SETFIELD_IN_REG(*val,
+		!!comp_cfg->ipa_qmb_select_by_address_cons_en,
+		IPA_COMP_CFG_IPA_QMB_SELECT_BY_ADDRESS_CONS_EN_SHFT,
+		IPA_COMP_CFG_IPA_QMB_SELECT_BY_ADDRESS_CONS_EN_BMSK);
+
+	IPA_SETFIELD_IN_REG(*val,
+		!!comp_cfg->gen_qmb_1_snoc_bypass_dis,
+		IPA_COMP_CFG_GEN_QMB_1_SNOC_BYPASS_DIS_SHFT,
+		IPA_COMP_CFG_GEN_QMB_1_SNOC_BYPASS_DIS_BMSK);
+
+	IPA_SETFIELD_IN_REG(*val,
+		!!comp_cfg->gen_qmb_0_snoc_bypass_dis,
+		IPA_COMP_CFG_GEN_QMB_0_SNOC_BYPASS_DIS_SHFT,
+		IPA_COMP_CFG_GEN_QMB_0_SNOC_BYPASS_DIS_BMSK);
+
+	IPA_SETFIELD_IN_REG(*val,
+		!!comp_cfg->gsi_snoc_bypass_dis,
+		IPA_COMP_CFG_GSI_SNOC_BYPASS_DIS_SHFT,
+		IPA_COMP_CFG_GSI_SNOC_BYPASS_DIS_BMSK);
+}
+
+static void ipareg_construct_comp_cfg(
+	enum ipahal_reg_name reg, const void *fields, u32 *val)
+{
+	struct ipahal_reg_comp_cfg *comp_cfg =
+		(struct ipahal_reg_comp_cfg *)fields;
+
+	ipareg_construct_comp_cfg_comon(comp_cfg, val);
+
+	IPA_SETFIELD_IN_REG(*val,
+		comp_cfg->ipa_atomic_fetcher_arb_lock_dis,
+		IPA_COMP_CFG_IPA_ATOMIC_FETCHER_ARB_LOCK_DIS_SHFT,
+		IPA_COMP_CFG_IPA_ATOMIC_FETCHER_ARB_LOCK_DIS_BMSK);
+
+	IPA_SETFIELD_IN_REG(*val,
+		!!comp_cfg->enable,
+		IPA_COMP_CFG_ENABLE_SHFT,
+		IPA_COMP_CFG_ENABLE_BMSK);
+
+	IPA_SETFIELD_IN_REG(*val,
+		!!comp_cfg->ipa_dcmp_fast_clk_en,
+		IPA_COMP_CFG_IPA_DCMP_FAST_CLK_EN_SHFT,
+		IPA_COMP_CFG_IPA_DCMP_FAST_CLK_EN_BMSK);
+}
+
+static void ipareg_construct_comp_cfg_v4_5(
+	enum ipahal_reg_name reg, const void *fields, u32 *val)
+{
+	struct ipahal_reg_comp_cfg *comp_cfg =
+		(struct ipahal_reg_comp_cfg *)fields;
+
+	ipareg_construct_comp_cfg_comon(comp_cfg, val);
+
+	IPA_SETFIELD_IN_REG(*val,
+		comp_cfg->ipa_atomic_fetcher_arb_lock_dis,
+		IPA_COMP_CFG_IPA_ATOMIC_FETCHER_ARB_LOCK_DIS_SHFT,
+		IPA_COMP_CFG_IPA_ATOMIC_FETCHER_ARB_LOCK_DIS_BMSK);
+
+	IPA_SETFIELD_IN_REG(*val,
+		!!comp_cfg->ipa_full_flush_wait_rsc_closure_en,
+		IPA_COMP_CFG_IPA_FULL_FLUSH_WAIT_RSC_CLOSURE_EN_SHFT_v4_5,
+		IPA_COMP_CFG_IPA_FULL_FLUSH_WAIT_RSC_CLOSURE_EN_BMSK_v4_5);
+}
+
+static void ipareg_construct_comp_cfg_v4_9(
+	enum ipahal_reg_name reg, const void *fields, u32 *val)
+{
+	struct ipahal_reg_comp_cfg *comp_cfg =
+		(struct ipahal_reg_comp_cfg *)fields;
+
+	ipareg_construct_comp_cfg_comon(comp_cfg, val);
+
+	IPA_SETFIELD_IN_REG(*val,
+		!!comp_cfg->gen_qmb_0_dynamic_asize,
+		IPA_COMP_CFG_GEN_QMB_0_DYNAMIC_ASIZE_SHFT_v4_9,
+		IPA_COMP_CFG_GEN_QMB_0_DYNAMIC_ASIZE_BMSK_v4_9);
+
+	IPA_SETFIELD_IN_REG(*val,
+		!!comp_cfg->gen_qmb_1_dynamic_asize,
+		IPA_COMP_CFG_GEN_QMB_1_DYNAMIC_ASIZE_SHFT_v4_9,
+		IPA_COMP_CFG_GEN_QMB_1_DYNAMIC_ASIZE_BMSK_v4_9);
+
+	IPA_SETFIELD_IN_REG(*val,
+		!!comp_cfg->ipa_atomic_fetcher_arb_lock_dis,
+		IPA_COMP_CFG_IPA_ATOMIC_FETCHER_ARB_LOCK_DIS_SHFT_v4_9,
+		IPA_COMP_CFG_IPA_ATOMIC_FETCHER_ARB_LOCK_DIS_BMSK_v4_9);
+
+	IPA_SETFIELD_IN_REG(*val,
+	!!comp_cfg->gsi_if_out_of_buf_stop_reset_mask_enable,
+	IPA_COMP_CFG_GSI_IF_OUT_OF_BUF_STOP_RESET_MASK_ENABLE_SHFT_v4_9,
+	IPA_COMP_CFG_GSI_IF_OUT_OF_BUF_STOP_RESET_MASK_ENABLE_BMSK_v4_9);
+
+	IPA_SETFIELD_IN_REG(*val,
+		!!comp_cfg->genqmb_aooowr,
+		IPA_COMP_CFG_GENQMB_AOOOWR_SHFT_v4_9,
+		IPA_COMP_CFG_GENQMB_AOOOWR_BMSK_v4_9);
+
+	IPA_SETFIELD_IN_REG(*val,
+		!!comp_cfg->qmb_ram_rd_cache_disable,
+		IPA_COMP_CFG_QMB_RAM_RD_CACHE_DISABLE_SHFT_v4_9,
+		IPA_COMP_CFG_QMB_RAM_RD_CACHE_DISABLE_BMSK_v4_9);
+
+	IPA_SETFIELD_IN_REG(*val,
+		!!comp_cfg->ipa_full_flush_wait_rsc_closure_en,
+		IPA_COMP_CFG_IPA_FULL_FLUSH_WAIT_RSC_CLOSURE_EN_SHFT_v4_9,
+		IPA_COMP_CFG_IPA_FULL_FLUSH_WAIT_RSC_CLOSURE_EN_BMSK_v4_9);
+
+	IPA_SETFIELD_IN_REG(*val,
+	!!comp_cfg->ram_arb_priority_client_samp_fix_disable,
+	IPA_COMP_CFG_RAM_ARB_PRIORITY_CLIENT_SAMP_FIX_DISABLE_SHFT_v4_9,
+	IPA_COMP_CFG_RAM_ARB_PRIORITY_CLIENT_SAMP_FIX_DISABLE_BMSK_v4_9);
+
+
+}
+
+static void ipareg_parse_comp_cfg_common(
+	struct ipahal_reg_comp_cfg *comp_cfg, u32 val)
+{
+	memset(comp_cfg, 0, sizeof(struct ipahal_reg_comp_cfg));
+
+
+	comp_cfg->ipa_qmb_select_by_address_global_en =
+		IPA_GETFIELD_FROM_REG(val,
+		IPA_COMP_CFG_IPA_QMB_SELECT_BY_ADDRESS_GLOBAL_EN_SHFT,
+		IPA_COMP_CFG_IPA_QMB_SELECT_BY_ADDRESS_GLOBAL_EN_BMSK);
+
+	comp_cfg->gsi_multi_axi_masters_dis =
+		IPA_GETFIELD_FROM_REG(val,
+		IPA_COMP_CFG_GSI_MULTI_AXI_MASTERS_DIS_SHFT,
+		IPA_COMP_CFG_GSI_MULTI_AXI_MASTERS_DIS_BMSK);
+
+	comp_cfg->gsi_snoc_cnoc_loop_protection_disable =
+		IPA_GETFIELD_FROM_REG(val,
+		IPA_COMP_CFG_GSI_SNOC_CNOC_LOOP_PROTECTION_DISABLE_SHFT,
+		IPA_COMP_CFG_GSI_SNOC_CNOC_LOOP_PROTECTION_DISABLE_BMSK);
+
+	comp_cfg->gen_qmb_0_snoc_cnoc_loop_protection_disable =
+		IPA_GETFIELD_FROM_REG(val,
+		IPA_COMP_CFG_GEN_QMB_0_SNOC_CNOC_LOOP_PROTECTION_DISABLE_SHFT,
+		IPA_COMP_CFG_GEN_QMB_0_SNOC_CNOC_LOOP_PROTECTION_DISABLE_BMSK);
+
+	comp_cfg->gen_qmb_1_multi_inorder_wr_dis =
+		IPA_GETFIELD_FROM_REG(val,
+		IPA_COMP_CFG_GEN_QMB_1_MULTI_INORDER_WR_DIS_SHFT,
+		IPA_COMP_CFG_GEN_QMB_1_MULTI_INORDER_WR_DIS_BMSK);
+
+	comp_cfg->gen_qmb_0_multi_inorder_wr_dis =
+		IPA_GETFIELD_FROM_REG(val,
+		IPA_COMP_CFG_GEN_QMB_0_MULTI_INORDER_WR_DIS_SHFT,
+		IPA_COMP_CFG_GEN_QMB_0_MULTI_INORDER_WR_DIS_BMSK);
+
+	comp_cfg->gen_qmb_1_multi_inorder_rd_dis =
+		IPA_GETFIELD_FROM_REG(val,
+		IPA_COMP_CFG_GEN_QMB_1_MULTI_INORDER_RD_DIS_SHFT,
+		IPA_COMP_CFG_GEN_QMB_1_MULTI_INORDER_RD_DIS_BMSK);
+
+	comp_cfg->gen_qmb_0_multi_inorder_rd_dis =
+		IPA_GETFIELD_FROM_REG(val,
+		IPA_COMP_CFG_GEN_QMB_0_MULTI_INORDER_RD_DIS_SHFT,
+		IPA_COMP_CFG_GEN_QMB_0_MULTI_INORDER_RD_DIS_BMSK);
+
+	comp_cfg->gsi_multi_inorder_wr_dis =
+		IPA_GETFIELD_FROM_REG(val,
+		IPA_COMP_CFG_GSI_MULTI_INORDER_WR_DIS_SHFT,
+		IPA_COMP_CFG_GSI_MULTI_INORDER_WR_DIS_BMSK);
+
+	comp_cfg->gsi_multi_inorder_rd_dis =
+		IPA_GETFIELD_FROM_REG(val,
+		IPA_COMP_CFG_GSI_MULTI_INORDER_RD_DIS_SHFT,
+		IPA_COMP_CFG_GSI_MULTI_INORDER_RD_DIS_BMSK);
+
+	comp_cfg->ipa_qmb_select_by_address_prod_en =
+		IPA_GETFIELD_FROM_REG(val,
+		IPA_COMP_CFG_IPA_QMB_SELECT_BY_ADDRESS_PROD_EN_SHFT,
+		IPA_COMP_CFG_IPA_QMB_SELECT_BY_ADDRESS_PROD_EN_BMSK);
+
+	comp_cfg->ipa_qmb_select_by_address_cons_en =
+		IPA_GETFIELD_FROM_REG(val,
+		IPA_COMP_CFG_IPA_QMB_SELECT_BY_ADDRESS_CONS_EN_SHFT,
+		IPA_COMP_CFG_IPA_QMB_SELECT_BY_ADDRESS_CONS_EN_BMSK);
+
+	comp_cfg->gen_qmb_1_snoc_bypass_dis =
+		IPA_GETFIELD_FROM_REG(val,
+		IPA_COMP_CFG_GEN_QMB_1_SNOC_BYPASS_DIS_SHFT,
+		IPA_COMP_CFG_GEN_QMB_1_SNOC_BYPASS_DIS_BMSK);
+
+	comp_cfg->gen_qmb_0_snoc_bypass_dis =
+		IPA_GETFIELD_FROM_REG(val,
+		IPA_COMP_CFG_GEN_QMB_0_SNOC_BYPASS_DIS_SHFT,
+		IPA_COMP_CFG_GEN_QMB_0_SNOC_BYPASS_DIS_BMSK);
+
+	comp_cfg->gsi_snoc_bypass_dis =
+		IPA_GETFIELD_FROM_REG(val,
+		IPA_COMP_CFG_GSI_SNOC_BYPASS_DIS_SHFT,
+		IPA_COMP_CFG_GSI_SNOC_BYPASS_DIS_BMSK);
+}
+
+static void ipareg_parse_comp_cfg(
+	enum ipahal_reg_name reg, void *fields, u32 val)
+{
+	struct ipahal_reg_comp_cfg *comp_cfg =
+		(struct ipahal_reg_comp_cfg *)fields;
+
+	ipareg_parse_comp_cfg_common(comp_cfg, val);
+
+	comp_cfg->ipa_atomic_fetcher_arb_lock_dis =
+		IPA_GETFIELD_FROM_REG(val,
+		IPA_COMP_CFG_IPA_ATOMIC_FETCHER_ARB_LOCK_DIS_SHFT,
+		IPA_COMP_CFG_IPA_ATOMIC_FETCHER_ARB_LOCK_DIS_BMSK);
+
+	comp_cfg->enable =
+		IPA_GETFIELD_FROM_REG(val,
+		IPA_COMP_CFG_ENABLE_SHFT,
+		IPA_COMP_CFG_ENABLE_BMSK);
+
+	comp_cfg->ipa_dcmp_fast_clk_en =
+		IPA_GETFIELD_FROM_REG(val,
+		IPA_COMP_CFG_IPA_DCMP_FAST_CLK_EN_SHFT,
+		IPA_COMP_CFG_IPA_DCMP_FAST_CLK_EN_BMSK);
+}
+
+static void ipareg_parse_comp_cfg_v4_5(
+	enum ipahal_reg_name reg, void *fields, u32 val)
+{
+	struct ipahal_reg_comp_cfg *comp_cfg =
+		(struct ipahal_reg_comp_cfg *)fields;
+
+	ipareg_parse_comp_cfg_common(comp_cfg, val);
+
+	comp_cfg->ipa_atomic_fetcher_arb_lock_dis =
+		IPA_GETFIELD_FROM_REG(val,
+		IPA_COMP_CFG_IPA_ATOMIC_FETCHER_ARB_LOCK_DIS_SHFT,
+		IPA_COMP_CFG_IPA_ATOMIC_FETCHER_ARB_LOCK_DIS_BMSK);
+
+	comp_cfg->ipa_full_flush_wait_rsc_closure_en =
+		IPA_GETFIELD_FROM_REG(val,
+		IPA_COMP_CFG_IPA_FULL_FLUSH_WAIT_RSC_CLOSURE_EN_SHFT_v4_5,
+		IPA_COMP_CFG_IPA_FULL_FLUSH_WAIT_RSC_CLOSURE_EN_BMSK_v4_5);
+}
+
+static void ipareg_parse_comp_cfg_v4_9(
+	enum ipahal_reg_name reg, void *fields, u32 val)
+{
+	struct ipahal_reg_comp_cfg *comp_cfg =
+		(struct ipahal_reg_comp_cfg *)fields;
+
+	ipareg_parse_comp_cfg_common(comp_cfg, val);
+
+	comp_cfg->gen_qmb_0_dynamic_asize =
+		IPA_GETFIELD_FROM_REG(val,
+		IPA_COMP_CFG_GEN_QMB_0_DYNAMIC_ASIZE_SHFT_v4_9,
+		IPA_COMP_CFG_GEN_QMB_0_DYNAMIC_ASIZE_BMSK_v4_9);
+
+	comp_cfg->gen_qmb_1_dynamic_asize =
+		IPA_GETFIELD_FROM_REG(val,
+		IPA_COMP_CFG_GEN_QMB_1_DYNAMIC_ASIZE_SHFT_v4_9,
+		IPA_COMP_CFG_GEN_QMB_1_DYNAMIC_ASIZE_BMSK_v4_9);
+
+	comp_cfg->ipa_atomic_fetcher_arb_lock_dis =
+		IPA_GETFIELD_FROM_REG(val,
+		IPA_COMP_CFG_IPA_ATOMIC_FETCHER_ARB_LOCK_DIS_SHFT_v4_9,
+		IPA_COMP_CFG_IPA_ATOMIC_FETCHER_ARB_LOCK_DIS_BMSK_v4_9);
+
+	comp_cfg->gsi_if_out_of_buf_stop_reset_mask_enable =
+	IPA_GETFIELD_FROM_REG(val,
+	IPA_COMP_CFG_GSI_IF_OUT_OF_BUF_STOP_RESET_MASK_ENABLE_SHFT_v4_9,
+	IPA_COMP_CFG_GSI_IF_OUT_OF_BUF_STOP_RESET_MASK_ENABLE_BMSK_v4_9);
+
+	comp_cfg->genqmb_aooowr =
+		IPA_GETFIELD_FROM_REG(val,
+		IPA_COMP_CFG_GENQMB_AOOOWR_SHFT_v4_9,
+		IPA_COMP_CFG_GENQMB_AOOOWR_BMSK_v4_9);
+
+	comp_cfg->qmb_ram_rd_cache_disable =
+		IPA_GETFIELD_FROM_REG(val,
+		IPA_COMP_CFG_QMB_RAM_RD_CACHE_DISABLE_SHFT_v4_9,
+		IPA_COMP_CFG_QMB_RAM_RD_CACHE_DISABLE_BMSK_v4_9);
+
+	comp_cfg->ipa_full_flush_wait_rsc_closure_en =
+		IPA_GETFIELD_FROM_REG(val,
+		IPA_COMP_CFG_IPA_FULL_FLUSH_WAIT_RSC_CLOSURE_EN_SHFT_v4_9,
+		IPA_COMP_CFG_IPA_FULL_FLUSH_WAIT_RSC_CLOSURE_EN_BMSK_v4_9);
+
+	comp_cfg->ram_arb_priority_client_samp_fix_disable =
+	IPA_GETFIELD_FROM_REG(val,
+	IPA_COMP_CFG_RAM_ARB_PRIORITY_CLIENT_SAMP_FIX_DISABLE_SHFT_v4_9,
+	IPA_COMP_CFG_RAM_ARB_PRIORITY_CLIENT_SAMP_FIX_DISABLE_BMSK_v4_9);
+
+}
+
+static void ipareg_parse_state_tx_wrapper_v4_5(
+	enum ipahal_reg_name reg, void *fields, u32 val)
+{
+	struct ipahal_reg_tx_wrapper *tx =
+		(struct ipahal_reg_tx_wrapper *)fields;
+
+	tx->tx0_idle = IPA_GETFIELD_FROM_REG(val,
+		IPA_STATE_TX_WRAPPER_TX0_IDLE_SHFT,
+		IPA_STATE_TX_WRAPPER_TX0_IDLE_BMSK);
+
+	tx->tx1_idle = IPA_GETFIELD_FROM_REG(val,
+		IPA_STATE_TX_WRAPPER_TX1_IDLE_SHFT,
+		IPA_STATE_TX_WRAPPER_TX1_IDLE_BMSK);
+
+	tx->ipa_prod_ackmngr_db_empty = IPA_GETFIELD_FROM_REG(val,
+		IPA_STATE_TX_WRAPPER_IPA_PROD_ACKMNGR_DB_EMPTY_SHFT,
+		IPA_STATE_TX_WRAPPER_IPA_PROD_ACKMNGR_DB_EMPTY_BMSK);
+
+	tx->ipa_prod_ackmngr_state_idle = IPA_GETFIELD_FROM_REG(val,
+		IPA_STATE_TX_WRAPPER_IPA_PROD_ACKMNGR_STATE_IDLE_SHFT,
+		IPA_STATE_TX_WRAPPER_IPA_PROD_ACKMNGR_STATE_IDLE_BMSK);
+
+	tx->ipa_prod_prod_bresp_empty = IPA_GETFIELD_FROM_REG(val,
+		IPA_STATE_TX_WRAPPER_IPA_PROD_BRESP_EMPTY_SHFT,
+		IPA_STATE_TX_WRAPPER_IPA_PROD_BRESP_EMPTY_BMSK);
+
+	tx->ipa_prod_prod_bresp_toggle_idle = IPA_GETFIELD_FROM_REG(val,
+		IPA_STATE_TX_WRAPPER_IPA_PROD_BRESP_EMPTY_SHFT,
+		IPA_STATE_TX_WRAPPER_IPA_PROD_BRESP_EMPTY_BMSK);
+
+	tx->ipa_mbim_pkt_fms_idle = IPA_GETFIELD_FROM_REG(val,
+		IPA_STATE_TX_WRAPPER_IPA_MBIM_PKT_FMS_IDLE_SHFT,
+		IPA_STATE_TX_WRAPPER_IPA_MBIM_PKT_FMS_IDLE_BMSK);
+
+	tx->mbim_direct_dma = IPA_GETFIELD_FROM_REG(val,
+		IPA_STATE_TX_WRAPPER_MBIM_DIRECT_DMA_SHFT,
+		IPA_STATE_TX_WRAPPER_MBIM_DIRECT_DMA_BMSK);
+
+	tx->trnseq_force_valid = IPA_GETFIELD_FROM_REG(val,
+		IPA_STATE_TX_WRAPPER_TRNSEQ_FORCE_VALID_SHFT,
+		IPA_STATE_TX_WRAPPER_TRNSEQ_FORCE_VALID_BMSK);
+
+	tx->pkt_drop_cnt_idle = IPA_GETFIELD_FROM_REG(val,
+		IPA_STATE_TX_WRAPPER_PKT_DROP_CNT_IDLE_SHFT,
+		IPA_STATE_TX_WRAPPER_PKT_DROP_CNT_IDLE_BMSK);
+
+	tx->nlo_direct_dma = IPA_GETFIELD_FROM_REG(val,
+		IPA_STATE_TX_WRAPPER_NLO_DIRECT_DMA_SHFT,
+		IPA_STATE_TX_WRAPPER_NLO_DIRECT_DMA_BMSK);
+
+	tx->coal_direct_dma = IPA_GETFIELD_FROM_REG(val,
+		IPA_STATE_TX_WRAPPER_COAL_DIRECT_DMA_SHFT,
+		IPA_STATE_TX_WRAPPER_COAL_DIRECT_DMA_BMSK);
+
+	tx->coal_slave_idle = IPA_GETFIELD_FROM_REG(val,
+		IPA_STATE_TX_WRAPPER_COAL_SLAVE_IDLE_SHFT,
+		IPA_STATE_TX_WRAPPER_COAL_SLAVE_IDLE_BMSK);
+
+	tx->coal_slave_ctx_idle = IPA_GETFIELD_FROM_REG(val,
+		IPA_STATE_TX_WRAPPER_COAL_SLAVE_CTX_IDLE_SHFT,
+		IPA_STATE_TX_WRAPPER_COAL_SLAVE_CTX_IDLE_BMSK);
+
+	tx->coal_slave_open_frame = IPA_GETFIELD_FROM_REG(val,
+		IPA_STATE_TX_WRAPPER_COAL_SLAVE_OPEN_FRAME_SHFT,
+		IPA_STATE_TX_WRAPPER_COAL_SLAVE_OPEN_FRAME_BMSK);
+}
+
+static void ipareg_parse_state_tx_wrapper_v4_7(
+	enum ipahal_reg_name reg, void *fields, u32 val)
+{
+	struct ipahal_reg_tx_wrapper *tx =
+		(struct ipahal_reg_tx_wrapper *)fields;
+
+	tx->tx0_idle = IPA_GETFIELD_FROM_REG(val,
+		IPA_STATE_TX_WRAPPER_TX0_IDLE_SHFT_v4_7,
+		IPA_STATE_TX_WRAPPER_TX0_IDLE_BMSK_v4_7);
+
+	tx->tx1_idle = IPA_GETFIELD_FROM_REG(val,
+		IPA_STATE_TX_WRAPPER_TX1_IDLE_SHFT_v4_7,
+		IPA_STATE_TX_WRAPPER_TX1_IDLE_BMSK_v4_7);
+
+	tx->ipa_prod_ackmngr_db_empty = IPA_GETFIELD_FROM_REG(val,
+		IPA_STATE_TX_WRAPPER_IPA_PROD_ACKMNGR_DB_EMPTY_SHFT_v4_7,
+		IPA_STATE_TX_WRAPPER_IPA_PROD_ACKMNGR_DB_EMPTY_BMSK_v4_7);
+
+	tx->ipa_prod_ackmngr_state_idle = IPA_GETFIELD_FROM_REG(val,
+		IPA_STATE_TX_WRAPPER_IPA_PROD_ACKMNGR_STATE_IDLE_SHFT_v4_7,
+		IPA_STATE_TX_WRAPPER_IPA_PROD_ACKMNGR_STATE_IDLE_BMSK_v4_7);
+
+	tx->ipa_prod_prod_bresp_empty = IPA_GETFIELD_FROM_REG(val,
+		IPA_STATE_TX_WRAPPER_IPA_PROD_BRESP_EMPTY_SHFT_v4_7,
+		IPA_STATE_TX_WRAPPER_IPA_PROD_BRESP_EMPTY_BMSK_v4_7);
+
+	tx->coal_slave_idle = IPA_GETFIELD_FROM_REG(val,
+		IPA_STATE_TX_WRAPPER_COAL_SLAVE_IDLE_SHFT_v4_7,
+		IPA_STATE_TX_WRAPPER_COAL_SLAVE_IDLE_BMSK_v4_7);
+
+	tx->coal_slave_ctx_idle = IPA_GETFIELD_FROM_REG(val,
+		IPA_STATE_TX_WRAPPER_COAL_SLAVE_CTX_IDLE_SHFT_v4_7,
+		IPA_STATE_TX_WRAPPER_COAL_SLAVE_CTX_IDLE_BMSK_v4_7);
+
+	tx->coal_slave_open_frame = IPA_GETFIELD_FROM_REG(val,
+		IPA_STATE_TX_WRAPPER_COAL_SLAVE_OPEN_FRAME_SHFT_v4_7,
+		IPA_STATE_TX_WRAPPER_COAL_SLAVE_OPEN_FRAME_BMSK_v4_7);
+}
+
+static void ipareg_construct_qcncm(
+	enum ipahal_reg_name reg, const void *fields, u32 *val)
+{
+	struct ipahal_reg_qcncm *qcncm =
+		(struct ipahal_reg_qcncm *)fields;
+
+	IPA_SETFIELD_IN_REG(*val, qcncm->mode_en ? 1 : 0,
+		IPA_QCNCM_MODE_EN_SHFT,
+		IPA_QCNCM_MODE_EN_BMSK);
+	IPA_SETFIELD_IN_REG(*val, qcncm->mode_val,
+		IPA_QCNCM_MODE_VAL_SHFT,
+		IPA_QCNCM_MODE_VAL_BMSK);
+	IPA_SETFIELD_IN_REG(*val, qcncm->undefined,
+		0, IPA_QCNCM_MODE_VAL_BMSK);
+}
+
+static void ipareg_parse_qcncm(
+	enum ipahal_reg_name reg, void *fields, u32 val)
+{
+	struct ipahal_reg_qcncm *qcncm =
+		(struct ipahal_reg_qcncm *)fields;
+
+	memset(qcncm, 0, sizeof(struct ipahal_reg_qcncm));
+	qcncm->mode_en = IPA_GETFIELD_FROM_REG(val,
+		IPA_QCNCM_MODE_EN_SHFT,
+		IPA_QCNCM_MODE_EN_BMSK);
+	qcncm->mode_val = IPA_GETFIELD_FROM_REG(val,
+		IPA_QCNCM_MODE_VAL_SHFT,
+		IPA_QCNCM_MODE_VAL_BMSK);
+	qcncm->undefined = IPA_GETFIELD_FROM_REG(val,
+		0, IPA_QCNCM_UNDEFINED1_BMSK);
+	qcncm->undefined |= IPA_GETFIELD_FROM_REG(val,
+		0, IPA_QCNCM_MODE_UNDEFINED2_BMSK);
+}
+
+static void ipareg_construct_single_ndp_mode(
+	enum ipahal_reg_name reg, const void *fields, u32 *val)
+{
+	struct ipahal_reg_single_ndp_mode *mode =
+		(struct ipahal_reg_single_ndp_mode *)fields;
+
+	IPA_SETFIELD_IN_REG(*val, mode->single_ndp_en ? 1 : 0,
+		IPA_SINGLE_NDP_MODE_SINGLE_NDP_EN_SHFT,
+		IPA_SINGLE_NDP_MODE_SINGLE_NDP_EN_BMSK);
+
+	IPA_SETFIELD_IN_REG(*val, mode->undefined,
+		IPA_SINGLE_NDP_MODE_UNDEFINED_SHFT,
+		IPA_SINGLE_NDP_MODE_UNDEFINED_BMSK);
+}
+
+static void ipareg_parse_single_ndp_mode(
+	enum ipahal_reg_name reg, void *fields, u32 val)
+{
+	struct ipahal_reg_single_ndp_mode *mode =
+		(struct ipahal_reg_single_ndp_mode *)fields;
+
+	memset(mode, 0, sizeof(struct ipahal_reg_single_ndp_mode));
+	mode->single_ndp_en = IPA_GETFIELD_FROM_REG(val,
+		IPA_SINGLE_NDP_MODE_SINGLE_NDP_EN_SHFT,
+		IPA_SINGLE_NDP_MODE_SINGLE_NDP_EN_BMSK);
+	mode->undefined = IPA_GETFIELD_FROM_REG(val,
+		IPA_SINGLE_NDP_MODE_UNDEFINED_SHFT,
+		IPA_SINGLE_NDP_MODE_UNDEFINED_BMSK);
+}
+
+static void ipareg_construct_debug_cnt_ctrl_n(
+	enum ipahal_reg_name reg, const void *fields, u32 *val)
+{
+	struct ipahal_reg_debug_cnt_ctrl *dbg_cnt_ctrl =
+		(struct ipahal_reg_debug_cnt_ctrl *)fields;
+	u8 type;
+
+	IPA_SETFIELD_IN_REG(*val, dbg_cnt_ctrl->en ? 1 : 0,
+		IPA_DEBUG_CNT_CTRL_n_DBG_CNT_EN_SHFT,
+		IPA_DEBUG_CNT_CTRL_n_DBG_CNT_EN_BMSK);
+
+	switch (dbg_cnt_ctrl->type) {
+	case DBG_CNT_TYPE_IPV4_FLTR:
+		type = 0x0;
+		if (!dbg_cnt_ctrl->rule_idx_pipe_rule) {
+			IPAHAL_ERR("No FLT global rules\n");
+			WARN_ON(1);
+		}
+		break;
+	case DBG_CNT_TYPE_IPV4_ROUT:
+		type = 0x1;
+		break;
+	case DBG_CNT_TYPE_GENERAL:
+		type = 0x2;
+		break;
+	case DBG_CNT_TYPE_IPV6_FLTR:
+		type = 0x4;
+		if (!dbg_cnt_ctrl->rule_idx_pipe_rule) {
+			IPAHAL_ERR("No FLT global rules\n");
+			WARN_ON(1);
+		}
+		break;
+	case DBG_CNT_TYPE_IPV6_ROUT:
+		type = 0x5;
+		break;
+	default:
+		IPAHAL_ERR("Invalid dbg_cnt_ctrl type (%d) for %s\n",
+			dbg_cnt_ctrl->type, ipahal_reg_name_str(reg));
+		WARN_ON(1);
+		return;
+
+	}
+
+	IPA_SETFIELD_IN_REG(*val, type,
+		IPA_DEBUG_CNT_CTRL_n_DBG_CNT_TYPE_SHFT,
+		IPA_DEBUG_CNT_CTRL_n_DBG_CNT_TYPE_BMSK);
+
+	IPA_SETFIELD_IN_REG(*val, dbg_cnt_ctrl->product ? 1 : 0,
+		IPA_DEBUG_CNT_CTRL_n_DBG_CNT_PRODUCT_SHFT,
+		IPA_DEBUG_CNT_CTRL_n_DBG_CNT_PRODUCT_BMSK);
+
+	IPA_SETFIELD_IN_REG(*val, dbg_cnt_ctrl->src_pipe,
+		IPA_DEBUG_CNT_CTRL_n_DBG_CNT_SOURCE_PIPE_SHFT,
+		IPA_DEBUG_CNT_CTRL_n_DBG_CNT_SOURCE_PIPE_BMSK);
+
+	if (ipahal_ctx->hw_type <= IPA_HW_v3_1) {
+		IPA_SETFIELD_IN_REG(*val, dbg_cnt_ctrl->rule_idx,
+			IPA_DEBUG_CNT_CTRL_n_DBG_CNT_RULE_INDEX_SHFT,
+			IPA_DEBUG_CNT_CTRL_n_DBG_CNT_RULE_INDEX_BMSK);
+		IPA_SETFIELD_IN_REG(*val, dbg_cnt_ctrl->rule_idx_pipe_rule,
+			IPA_DEBUG_CNT_CTRL_n_DBG_CNT_RULE_INDEX_PIPE_RULE_SHFT,
+			IPA_DEBUG_CNT_CTRL_n_DBG_CNT_RULE_INDEX_PIPE_RULE_BMSK
+			);
+	} else {
+		IPA_SETFIELD_IN_REG(*val, dbg_cnt_ctrl->rule_idx,
+			IPA_DEBUG_CNT_CTRL_n_DBG_CNT_RULE_INDEX_SHFT,
+			IPA_DEBUG_CNT_CTRL_n_DBG_CNT_RULE_INDEX_BMSK_V3_5);
+	}
+}
+
+static void ipareg_parse_shared_mem_size(
+	enum ipahal_reg_name reg, void *fields, u32 val)
+{
+	struct ipahal_reg_shared_mem_size *smem_sz =
+		(struct ipahal_reg_shared_mem_size *)fields;
+
+	memset(smem_sz, 0, sizeof(struct ipahal_reg_shared_mem_size));
+	smem_sz->shared_mem_sz = IPA_GETFIELD_FROM_REG(val,
+		IPA_SHARED_MEM_SIZE_SHARED_MEM_SIZE_SHFT,
+		IPA_SHARED_MEM_SIZE_SHARED_MEM_SIZE_BMSK);
+
+	smem_sz->shared_mem_baddr = IPA_GETFIELD_FROM_REG(val,
+		IPA_SHARED_MEM_SIZE_SHARED_MEM_BADDR_SHFT,
+		IPA_SHARED_MEM_SIZE_SHARED_MEM_BADDR_BMSK);
+}
+
+static void ipareg_construct_endp_init_rsrc_grp_n(
+		enum ipahal_reg_name reg, const void *fields, u32 *val)
+{
+	struct ipahal_reg_endp_init_rsrc_grp *rsrc_grp =
+		(struct ipahal_reg_endp_init_rsrc_grp *)fields;
+
+	IPA_SETFIELD_IN_REG(*val, rsrc_grp->rsrc_grp,
+		IPA_ENDP_INIT_RSRC_GRP_n_RSRC_GRP_SHFT,
+		IPA_ENDP_INIT_RSRC_GRP_n_RSRC_GRP_BMSK);
+}
+
+static void ipareg_construct_endp_init_rsrc_grp_n_v3_5(
+		enum ipahal_reg_name reg, const void *fields, u32 *val)
+{
+	struct ipahal_reg_endp_init_rsrc_grp *rsrc_grp =
+		(struct ipahal_reg_endp_init_rsrc_grp *)fields;
+
+	IPA_SETFIELD_IN_REG(*val, rsrc_grp->rsrc_grp,
+		IPA_ENDP_INIT_RSRC_GRP_n_RSRC_GRP_SHFT_v3_5,
+		IPA_ENDP_INIT_RSRC_GRP_n_RSRC_GRP_BMSK_v3_5);
+}
+
+static void ipareg_construct_endp_init_rsrc_grp_n_v4_5(
+		enum ipahal_reg_name reg, const void *fields, u32 *val)
+{
+	struct ipahal_reg_endp_init_rsrc_grp *rsrc_grp =
+		(struct ipahal_reg_endp_init_rsrc_grp *)fields;
+
+	IPA_SETFIELD_IN_REG(*val, rsrc_grp->rsrc_grp,
+		IPA_ENDP_INIT_RSRC_GRP_n_RSRC_GRP_SHFT_v4_5,
+		IPA_ENDP_INIT_RSRC_GRP_n_RSRC_GRP_BMSK_v4_5);
+}
+
+static void ipareg_construct_endp_init_rsrc_grp_n_v4_9(
+		enum ipahal_reg_name reg, const void *fields, u32 *val)
+{
+	struct ipahal_reg_endp_init_rsrc_grp *rsrc_grp =
+		(struct ipahal_reg_endp_init_rsrc_grp *)fields;
+
+	IPA_SETFIELD_IN_REG(*val, rsrc_grp->rsrc_grp,
+		IPA_ENDP_INIT_RSRC_GRP_n_RSRC_GRP_SHFT_v4_9,
+		IPA_ENDP_INIT_RSRC_GRP_n_RSRC_GRP_BMSK_v4_9);
+}
+
+static void ipareg_construct_endp_init_hdr_metadata_n(
+		enum ipahal_reg_name reg, const void *fields, u32 *val)
+{
+	struct ipa_ep_cfg_metadata *metadata =
+		(struct ipa_ep_cfg_metadata *)fields;
+
+	IPA_SETFIELD_IN_REG(*val, metadata->qmap_id,
+			IPA_ENDP_INIT_HDR_METADATA_n_METADATA_SHFT,
+			IPA_ENDP_INIT_HDR_METADATA_n_METADATA_BMSK);
+}
+
+static void ipareg_construct_endp_init_hdr_metadata_mask_n(
+		enum ipahal_reg_name reg, const void *fields, u32 *val)
+{
+	struct ipa_ep_cfg_metadata_mask *metadata_mask =
+		(struct ipa_ep_cfg_metadata_mask *)fields;
+
+	IPA_SETFIELD_IN_REG(*val, metadata_mask->metadata_mask,
+			IPA_ENDP_INIT_HDR_METADATA_MASK_n_METADATA_MASK_SHFT,
+			IPA_ENDP_INIT_HDR_METADATA_MASK_n_METADATA_MASK_BMSK);
+}
+
+static void ipareg_construct_endp_init_cfg_n(
+	enum ipahal_reg_name reg, const void *fields, u32 *val)
+{
+	struct ipa_ep_cfg_cfg *cfg =
+		(struct ipa_ep_cfg_cfg *)fields;
+	u32 cs_offload_en;
+
+	switch (cfg->cs_offload_en) {
+	case IPA_DISABLE_CS_OFFLOAD:
+		cs_offload_en = 0;
+		break;
+	case IPA_ENABLE_CS_OFFLOAD_UL:
+		cs_offload_en = 1;
+		break;
+	case IPA_ENABLE_CS_OFFLOAD_DL:
+		cs_offload_en = 2;
+		break;
+	default:
+		IPAHAL_ERR("Invalid cs_offload_en value for %s\n",
+			ipahal_reg_name_str(reg));
+		WARN_ON(1);
+		return;
+	}
+
+	IPA_SETFIELD_IN_REG(*val, cfg->frag_offload_en ? 1 : 0,
+			IPA_ENDP_INIT_CFG_n_FRAG_OFFLOAD_EN_SHFT,
+			IPA_ENDP_INIT_CFG_n_FRAG_OFFLOAD_EN_BMSK);
+	IPA_SETFIELD_IN_REG(*val, cs_offload_en,
+			IPA_ENDP_INIT_CFG_n_CS_OFFLOAD_EN_SHFT,
+			IPA_ENDP_INIT_CFG_n_CS_OFFLOAD_EN_BMSK);
+	IPA_SETFIELD_IN_REG(*val, cfg->cs_metadata_hdr_offset,
+			IPA_ENDP_INIT_CFG_n_CS_METADATA_HDR_OFFSET_SHFT,
+			IPA_ENDP_INIT_CFG_n_CS_METADATA_HDR_OFFSET_BMSK);
+	IPA_SETFIELD_IN_REG(*val, cfg->gen_qmb_master_sel,
+			IPA_ENDP_INIT_CFG_n_CS_GEN_QMB_MASTER_SEL_SHFT,
+			IPA_ENDP_INIT_CFG_n_CS_GEN_QMB_MASTER_SEL_BMSK);
+
+}
+
+static void ipareg_construct_endp_init_deaggr_n(
+		enum ipahal_reg_name reg, const void *fields, u32 *val)
+{
+	struct ipa_ep_cfg_deaggr *ep_deaggr =
+		(struct ipa_ep_cfg_deaggr *)fields;
+
+	IPA_SETFIELD_IN_REG(*val, ep_deaggr->deaggr_hdr_len,
+		IPA_ENDP_INIT_DEAGGR_n_DEAGGR_HDR_LEN_SHFT,
+		IPA_ENDP_INIT_DEAGGR_n_DEAGGR_HDR_LEN_BMSK);
+
+	IPA_SETFIELD_IN_REG(*val, ep_deaggr->packet_offset_valid,
+		IPA_ENDP_INIT_DEAGGR_n_PACKET_OFFSET_VALID_SHFT,
+		IPA_ENDP_INIT_DEAGGR_n_PACKET_OFFSET_VALID_BMSK);
+
+	IPA_SETFIELD_IN_REG(*val, ep_deaggr->packet_offset_location,
+		IPA_ENDP_INIT_DEAGGR_n_PACKET_OFFSET_LOCATION_SHFT,
+		IPA_ENDP_INIT_DEAGGR_n_PACKET_OFFSET_LOCATION_BMSK);
+
+	IPA_SETFIELD_IN_REG(*val, ep_deaggr->max_packet_len,
+		IPA_ENDP_INIT_DEAGGR_n_MAX_PACKET_LEN_SHFT,
+		IPA_ENDP_INIT_DEAGGR_n_MAX_PACKET_LEN_BMSK);
+}
+
+static void ipareg_construct_endp_init_hol_block_en_n(
+	enum ipahal_reg_name reg, const void *fields, u32 *val)
+{
+	struct ipa_ep_cfg_holb *ep_holb =
+		(struct ipa_ep_cfg_holb *)fields;
+
+	IPA_SETFIELD_IN_REG(*val, ep_holb->en,
+		IPA_ENDP_INIT_HOL_BLOCK_EN_n_EN_SHFT,
+		IPA_ENDP_INIT_HOL_BLOCK_EN_n_EN_BMSK);
+}
+
+static void ipareg_construct_endp_init_hol_block_timer_n(
+	enum ipahal_reg_name reg, const void *fields, u32 *val)
+{
+	struct ipa_ep_cfg_holb *ep_holb =
+		(struct ipa_ep_cfg_holb *)fields;
+
+	IPA_SETFIELD_IN_REG(*val, ep_holb->tmr_val,
+		IPA_ENDP_INIT_HOL_BLOCK_TIMER_n_TIMER_SHFT,
+		IPA_ENDP_INIT_HOL_BLOCK_TIMER_n_TIMER_BMSK);
+}
+
+
+static void ipareg_construct_endp_init_hol_block_timer_n_v4_2(
+	enum ipahal_reg_name reg, const void *fields, u32 *val)
+{
+	struct ipa_ep_cfg_holb *ep_holb =
+		(struct ipa_ep_cfg_holb *)fields;
+
+	IPA_SETFIELD_IN_REG(*val, ep_holb->scale,
+		IPA_ENDP_INIT_HOL_BLOCK_TIMER_n_SCALE_SHFT_V_4_2,
+		IPA_ENDP_INIT_HOL_BLOCK_TIMER_n_SCALE_BMSK_V_4_2);
+	IPA_SETFIELD_IN_REG(*val, ep_holb->base_val,
+		IPA_ENDP_INIT_HOL_BLOCK_TIMER_n_BASE_VALUE_SHFT_V_4_2,
+		IPA_ENDP_INIT_HOL_BLOCK_TIMER_n_BASE_VALUE_BMSK_V_4_2);
+}
+
+static void ipareg_construct_endp_init_hol_block_timer_n_v4_5(
+	enum ipahal_reg_name reg, const void *fields, u32 *val)
+{
+	struct ipa_ep_cfg_holb *ep_holb =
+		(struct ipa_ep_cfg_holb *)fields;
+
+	if (ep_holb->pulse_generator != !!ep_holb->pulse_generator) {
+		IPAHAL_ERR("Pulse generator is not 0 or 1 %d\n",
+			ep_holb->pulse_generator);
+		WARN_ON(1);
+	}
+
+	IPA_SETFIELD_IN_REG(*val, ep_holb->scaled_time,
+		IPA_ENDP_INIT_HOL_BLOCK_TIMER_n_TIME_LIMIT_SHFT_V4_5,
+		IPA_ENDP_INIT_HOL_BLOCK_TIMER_n_TIME_LIMIT_BMSK_V4_5);
+
+	IPA_SETFIELD_IN_REG(*val, ep_holb->pulse_generator,
+		IPA_ENDP_INIT_HOL_BLOCK_TIMER_n_GRAN_SEL_SHFT_V4_5,
+		IPA_ENDP_INIT_HOL_BLOCK_TIMER_n_GRAN_SEL_BMSK_V4_5);
+}
+
+static void ipareg_construct_endp_init_ctrl_n(enum ipahal_reg_name reg,
+	const void *fields, u32 *val)
+{
+	struct ipa_ep_cfg_ctrl *ep_ctrl =
+		(struct ipa_ep_cfg_ctrl *)fields;
+
+	IPA_SETFIELD_IN_REG(*val, ep_ctrl->ipa_ep_suspend,
+		IPA_ENDP_INIT_CTRL_n_ENDP_SUSPEND_SHFT,
+		IPA_ENDP_INIT_CTRL_n_ENDP_SUSPEND_BMSK);
+
+	IPA_SETFIELD_IN_REG(*val, ep_ctrl->ipa_ep_delay,
+		IPA_ENDP_INIT_CTRL_n_ENDP_DELAY_SHFT,
+		IPA_ENDP_INIT_CTRL_n_ENDP_DELAY_BMSK);
+}
+
+static void ipareg_parse_endp_init_ctrl_n(enum ipahal_reg_name reg,
+	void *fields, u32 val)
+{
+	struct ipa_ep_cfg_ctrl *ep_ctrl =
+		(struct ipa_ep_cfg_ctrl *)fields;
+
+	ep_ctrl->ipa_ep_suspend =
+		((val & IPA_ENDP_INIT_CTRL_n_ENDP_SUSPEND_BMSK) >>
+			IPA_ENDP_INIT_CTRL_n_ENDP_SUSPEND_SHFT);
+
+	ep_ctrl->ipa_ep_delay =
+		((val & IPA_ENDP_INIT_CTRL_n_ENDP_DELAY_BMSK) >>
+		IPA_ENDP_INIT_CTRL_n_ENDP_DELAY_SHFT);
+}
+
+static void ipareg_construct_endp_init_ctrl_n_v4_0(enum ipahal_reg_name reg,
+	const void *fields, u32 *val)
+{
+	struct ipa_ep_cfg_ctrl *ep_ctrl =
+		(struct ipa_ep_cfg_ctrl *)fields;
+
+	WARN_ON(ep_ctrl->ipa_ep_suspend);
+
+	IPA_SETFIELD_IN_REG(*val, ep_ctrl->ipa_ep_delay,
+		IPA_ENDP_INIT_CTRL_n_ENDP_DELAY_SHFT,
+		IPA_ENDP_INIT_CTRL_n_ENDP_DELAY_BMSK);
+}
+
+static void ipareg_construct_endp_init_ctrl_scnd_n(enum ipahal_reg_name reg,
+	const void *fields, u32 *val)
+{
+	struct ipahal_ep_cfg_ctrl_scnd *ep_ctrl_scnd =
+		(struct ipahal_ep_cfg_ctrl_scnd *)fields;
+
+	IPA_SETFIELD_IN_REG(*val, ep_ctrl_scnd->endp_delay,
+		IPA_ENDP_INIT_CTRL_SCND_n_ENDP_DELAY_SHFT,
+		IPA_ENDP_INIT_CTRL_SCND_n_ENDP_DELAY_BMSK);
+}
+
+static void ipareg_construct_endp_init_nat_n(enum ipahal_reg_name reg,
+		const void *fields, u32 *val)
+{
+	struct ipa_ep_cfg_nat *ep_nat =
+		(struct ipa_ep_cfg_nat *)fields;
+
+	IPA_SETFIELD_IN_REG(*val, ep_nat->nat_en,
+		IPA_ENDP_INIT_NAT_n_NAT_EN_SHFT,
+		IPA_ENDP_INIT_NAT_n_NAT_EN_BMSK);
+}
+
+static void ipareg_construct_endp_init_conn_track_n(enum ipahal_reg_name reg,
+	const void *fields, u32 *val)
+{
+	struct ipa_ep_cfg_conn_track *ep_ipv6ct =
+		(struct ipa_ep_cfg_conn_track *)fields;
+
+	IPA_SETFIELD_IN_REG(*val, ep_ipv6ct->conn_track_en,
+		IPA_ENDP_INIT_CONN_TRACK_n_CONN_TRACK_EN_SHFT,
+		IPA_ENDP_INIT_CONN_TRACK_n_CONN_TRACK_EN_BMSK);
+}
+
+static void ipareg_construct_endp_init_mode_n(enum ipahal_reg_name reg,
+		const void *fields, u32 *val)
+{
+	struct ipahal_reg_endp_init_mode *init_mode =
+		(struct ipahal_reg_endp_init_mode *)fields;
+
+	IPA_SETFIELD_IN_REG(*val, init_mode->ep_mode.mode,
+		IPA_ENDP_INIT_MODE_n_MODE_SHFT,
+		IPA_ENDP_INIT_MODE_n_MODE_BMSK);
+
+	IPA_SETFIELD_IN_REG(*val, init_mode->dst_pipe_number,
+		IPA_ENDP_INIT_MODE_n_DEST_PIPE_INDEX_SHFT,
+		IPA_ENDP_INIT_MODE_n_DEST_PIPE_INDEX_BMSK);
+}
+
+static void ipareg_construct_endp_init_mode_n_v4_5(enum ipahal_reg_name reg,
+		const void *fields, u32 *val)
+{
+	struct ipahal_reg_endp_init_mode *init_mode =
+		(struct ipahal_reg_endp_init_mode *)fields;
+
+	IPA_SETFIELD_IN_REG(*val, init_mode->ep_mode.mode,
+		IPA_ENDP_INIT_MODE_n_MODE_SHFT_V4_5,
+		IPA_ENDP_INIT_MODE_n_MODE_BMSK_V4_5);
+
+	IPA_SETFIELD_IN_REG(*val, init_mode->dst_pipe_number,
+		IPA_ENDP_INIT_MODE_n_DEST_PIPE_INDEX_SHFT_V4_5,
+		IPA_ENDP_INIT_MODE_n_DEST_PIPE_INDEX_BMSK_V4_5);
+}
+
+static void ipareg_construct_endp_init_route_n(enum ipahal_reg_name reg,
+	const void *fields, u32 *val)
+{
+	struct ipahal_reg_endp_init_route *ep_init_rt =
+		(struct ipahal_reg_endp_init_route *)fields;
+
+	IPA_SETFIELD_IN_REG(*val, ep_init_rt->route_table_index,
+		IPA_ENDP_INIT_ROUTE_n_ROUTE_TABLE_INDEX_SHFT,
+		IPA_ENDP_INIT_ROUTE_n_ROUTE_TABLE_INDEX_BMSK);
+
+}
+
+static void ipareg_parse_endp_init_aggr_n(enum ipahal_reg_name reg,
+	void *fields, u32 val)
+{
+	struct ipa_ep_cfg_aggr *ep_aggr =
+		(struct ipa_ep_cfg_aggr *)fields;
+
+	memset(ep_aggr, 0, sizeof(struct ipa_ep_cfg_aggr));
+
+	ep_aggr->aggr_en =
+		(((val & IPA_ENDP_INIT_AGGR_n_AGGR_EN_BMSK) >>
+			IPA_ENDP_INIT_AGGR_n_AGGR_EN_SHFT)
+			== IPA_ENABLE_AGGR);
+	ep_aggr->aggr =
+		((val & IPA_ENDP_INIT_AGGR_n_AGGR_TYPE_BMSK) >>
+			IPA_ENDP_INIT_AGGR_n_AGGR_TYPE_SHFT);
+	ep_aggr->aggr_byte_limit =
+		((val & IPA_ENDP_INIT_AGGR_n_AGGR_BYTE_LIMIT_BMSK) >>
+			IPA_ENDP_INIT_AGGR_n_AGGR_BYTE_LIMIT_SHFT);
+	ep_aggr->aggr_time_limit =
+		((val & IPA_ENDP_INIT_AGGR_n_AGGR_TIME_LIMIT_BMSK) >>
+			IPA_ENDP_INIT_AGGR_n_AGGR_TIME_LIMIT_SHFT);
+	ep_aggr->aggr_time_limit *= 1000; /* HW works in msec */
+	ep_aggr->aggr_pkt_limit =
+		((val & IPA_ENDP_INIT_AGGR_n_AGGR_PKT_LIMIT_BMSK) >>
+			IPA_ENDP_INIT_AGGR_n_AGGR_PKT_LIMIT_SHFT);
+	ep_aggr->aggr_sw_eof_active =
+		((val & IPA_ENDP_INIT_AGGR_n_AGGR_SW_EOF_ACTIVE_BMSK) >>
+			IPA_ENDP_INIT_AGGR_n_AGGR_SW_EOF_ACTIVE_SHFT);
+	ep_aggr->aggr_hard_byte_limit_en =
+		((val & IPA_ENDP_INIT_AGGR_n_AGGR_HARD_BYTE_LIMIT_ENABLE_BMSK)
+			>>
+			IPA_ENDP_INIT_AGGR_n_AGGR_HARD_BYTE_LIMIT_ENABLE_SHFT);
+}
+
+static void ipareg_parse_endp_init_aggr_n_v4_5(enum ipahal_reg_name reg,
+	void *fields, u32 val)
+{
+	struct ipa_ep_cfg_aggr *ep_aggr =
+		(struct ipa_ep_cfg_aggr *)fields;
+
+	memset(ep_aggr, 0, sizeof(struct ipa_ep_cfg_aggr));
+
+	ep_aggr->aggr_en =
+		(((val & IPA_ENDP_INIT_AGGR_n_AGGR_EN_BMSK_V4_5) >>
+			IPA_ENDP_INIT_AGGR_n_AGGR_EN_SHFT_V4_5)
+			== IPA_ENABLE_AGGR);
+	ep_aggr->aggr =
+		((val & IPA_ENDP_INIT_AGGR_n_AGGR_TYPE_BMSK_V4_5) >>
+			IPA_ENDP_INIT_AGGR_n_AGGR_TYPE_SHFT_V4_5);
+	ep_aggr->aggr_byte_limit =
+		((val & IPA_ENDP_INIT_AGGR_n_AGGR_BYTE_LIMIT_BMSK_V4_5) >>
+			IPA_ENDP_INIT_AGGR_n_AGGR_BYTE_LIMIT_SHFT_V4_5);
+	ep_aggr->scaled_time =
+		((val & IPA_ENDP_INIT_AGGR_n_AGGR_TIME_LIMIT_BMSK_V4_5) >>
+			IPA_ENDP_INIT_AGGR_n_AGGR_TIME_LIMIT_SHFT_V4_5);
+	ep_aggr->aggr_pkt_limit =
+		((val & IPA_ENDP_INIT_AGGR_n_AGGR_PKT_LIMIT_BMSK_V4_5) >>
+			IPA_ENDP_INIT_AGGR_n_AGGR_PKT_LIMIT_SHFT_V4_5);
+	ep_aggr->aggr_sw_eof_active =
+		((val & IPA_ENDP_INIT_AGGR_n_AGGR_SW_EOF_ACTIVE_BMSK_V4_5) >>
+			IPA_ENDP_INIT_AGGR_n_AGGR_SW_EOF_ACTIVE_SHFT_V4_5);
+	ep_aggr->aggr_hard_byte_limit_en =
+		((val &
+		 IPA_ENDP_INIT_AGGR_n_AGGR_HARD_BYTE_LIMIT_ENABLE_BMSK_V4_5)
+		 >>
+		 IPA_ENDP_INIT_AGGR_n_AGGR_HARD_BYTE_LIMIT_ENABLE_SHFT_V4_5);
+	ep_aggr->pulse_generator =
+		((val & IPA_ENDP_INIT_AGGR_n_AGGR_GRAN_SEL_BMSK_V4_5) >>
+			IPA_ENDP_INIT_AGGR_n_AGGR_GRAN_SEL_SHFT_V4_5);
+}
+
+static void ipareg_construct_endp_init_aggr_n(enum ipahal_reg_name reg,
+	const void *fields, u32 *val)
+{
+	struct ipa_ep_cfg_aggr *ep_aggr =
+		(struct ipa_ep_cfg_aggr *)fields;
+	u32 byte_limit;
+	u32 pkt_limit;
+	u32 max_byte_limit;
+	u32 max_pkt_limit;
+
+	IPA_SETFIELD_IN_REG(*val, ep_aggr->aggr_en,
+		IPA_ENDP_INIT_AGGR_n_AGGR_EN_SHFT,
+		IPA_ENDP_INIT_AGGR_n_AGGR_EN_BMSK);
+
+	IPA_SETFIELD_IN_REG(*val, ep_aggr->aggr,
+		IPA_ENDP_INIT_AGGR_n_AGGR_TYPE_SHFT,
+		IPA_ENDP_INIT_AGGR_n_AGGR_TYPE_BMSK);
+
+	/* make sure aggregation byte limit does not cross HW boundaries */
+	max_byte_limit = IPA_ENDP_INIT_AGGR_n_AGGR_BYTE_LIMIT_BMSK >>
+		IPA_ENDP_INIT_AGGR_n_AGGR_BYTE_LIMIT_SHFT;
+	byte_limit = (ep_aggr->aggr_byte_limit > max_byte_limit) ?
+		max_byte_limit : ep_aggr->aggr_byte_limit;
+	IPA_SETFIELD_IN_REG(*val, byte_limit,
+		IPA_ENDP_INIT_AGGR_n_AGGR_BYTE_LIMIT_SHFT,
+		IPA_ENDP_INIT_AGGR_n_AGGR_BYTE_LIMIT_BMSK);
+
+	/* HW works in msec */
+	IPA_SETFIELD_IN_REG(*val, ep_aggr->aggr_time_limit / 1000,
+		IPA_ENDP_INIT_AGGR_n_AGGR_TIME_LIMIT_SHFT,
+		IPA_ENDP_INIT_AGGR_n_AGGR_TIME_LIMIT_BMSK);
+
+	/* make sure aggregation pkt limit does not cross HW boundaries */
+	max_pkt_limit = IPA_ENDP_INIT_AGGR_n_AGGR_PKT_LIMIT_BMSK >>
+		IPA_ENDP_INIT_AGGR_n_AGGR_PKT_LIMIT_SHFT;
+	pkt_limit = (ep_aggr->aggr_pkt_limit > max_pkt_limit) ?
+		max_pkt_limit : ep_aggr->aggr_pkt_limit;
+	IPA_SETFIELD_IN_REG(*val, pkt_limit,
+		IPA_ENDP_INIT_AGGR_n_AGGR_PKT_LIMIT_SHFT,
+		IPA_ENDP_INIT_AGGR_n_AGGR_PKT_LIMIT_BMSK);
+
+	IPA_SETFIELD_IN_REG(*val, ep_aggr->aggr_sw_eof_active,
+		IPA_ENDP_INIT_AGGR_n_AGGR_SW_EOF_ACTIVE_SHFT,
+		IPA_ENDP_INIT_AGGR_n_AGGR_SW_EOF_ACTIVE_BMSK);
+
+	/*IPA 3.5.1 and above target versions hard byte limit enable supported*/
+	IPA_SETFIELD_IN_REG(*val, ep_aggr->aggr_hard_byte_limit_en,
+		IPA_ENDP_INIT_AGGR_n_AGGR_HARD_BYTE_LIMIT_ENABLE_SHFT,
+		IPA_ENDP_INIT_AGGR_n_AGGR_HARD_BYTE_LIMIT_ENABLE_BMSK);
+}
+
+static void ipareg_construct_endp_init_aggr_n_v4_5(enum ipahal_reg_name reg,
+	const void *fields, u32 *val)
+{
+	struct ipa_ep_cfg_aggr *ep_aggr =
+		(struct ipa_ep_cfg_aggr *)fields;
+	u32 byte_limit;
+	u32 pkt_limit;
+	u32 max_byte_limit;
+	u32 max_pkt_limit;
+
+	IPA_SETFIELD_IN_REG(*val, ep_aggr->aggr_en,
+		IPA_ENDP_INIT_AGGR_n_AGGR_EN_SHFT_V4_5,
+		IPA_ENDP_INIT_AGGR_n_AGGR_EN_BMSK_V4_5);
+
+	IPA_SETFIELD_IN_REG(*val, ep_aggr->aggr,
+		IPA_ENDP_INIT_AGGR_n_AGGR_TYPE_SHFT_V4_5,
+		IPA_ENDP_INIT_AGGR_n_AGGR_TYPE_BMSK_V4_5);
+
+	/* make sure aggregation byte limit does not cross HW boundaries */
+	max_byte_limit = IPA_ENDP_INIT_AGGR_n_AGGR_BYTE_LIMIT_BMSK_V4_5 >>
+		IPA_ENDP_INIT_AGGR_n_AGGR_BYTE_LIMIT_SHFT_V4_5;
+	byte_limit = (ep_aggr->aggr_byte_limit > max_byte_limit) ?
+		max_byte_limit : ep_aggr->aggr_byte_limit;
+	IPA_SETFIELD_IN_REG(*val, byte_limit,
+		IPA_ENDP_INIT_AGGR_n_AGGR_BYTE_LIMIT_SHFT_V4_5,
+		IPA_ENDP_INIT_AGGR_n_AGGR_BYTE_LIMIT_BMSK_V4_5);
+
+	IPA_SETFIELD_IN_REG(*val, ep_aggr->scaled_time,
+		IPA_ENDP_INIT_AGGR_n_AGGR_TIME_LIMIT_SHFT_V4_5,
+		IPA_ENDP_INIT_AGGR_n_AGGR_TIME_LIMIT_BMSK_V4_5);
+
+	/* make sure aggregation pkt limit does not cross HW boundaries */
+	max_pkt_limit = IPA_ENDP_INIT_AGGR_n_AGGR_PKT_LIMIT_BMSK_V4_5 >>
+		IPA_ENDP_INIT_AGGR_n_AGGR_PKT_LIMIT_SHFT_V4_5;
+	pkt_limit = (ep_aggr->aggr_pkt_limit > max_pkt_limit) ?
+		max_pkt_limit : ep_aggr->aggr_pkt_limit;
+	IPA_SETFIELD_IN_REG(*val, ep_aggr->aggr_pkt_limit,
+		IPA_ENDP_INIT_AGGR_n_AGGR_PKT_LIMIT_SHFT_V4_5,
+		IPA_ENDP_INIT_AGGR_n_AGGR_PKT_LIMIT_BMSK_V4_5);
+
+	IPA_SETFIELD_IN_REG(*val, ep_aggr->aggr_sw_eof_active,
+		IPA_ENDP_INIT_AGGR_n_AGGR_SW_EOF_ACTIVE_SHFT_V4_5,
+		IPA_ENDP_INIT_AGGR_n_AGGR_SW_EOF_ACTIVE_BMSK_V4_5);
+
+	IPA_SETFIELD_IN_REG(*val, ep_aggr->aggr_hard_byte_limit_en,
+		IPA_ENDP_INIT_AGGR_n_AGGR_HARD_BYTE_LIMIT_ENABLE_SHFT_V4_5,
+		IPA_ENDP_INIT_AGGR_n_AGGR_HARD_BYTE_LIMIT_ENABLE_BMSK_V4_5);
+
+	IPA_SETFIELD_IN_REG(*val, ep_aggr->pulse_generator,
+		IPA_ENDP_INIT_AGGR_n_AGGR_GRAN_SEL_SHFT_V4_5,
+		IPA_ENDP_INIT_AGGR_n_AGGR_GRAN_SEL_BMSK_V4_5);
+}
+
+static void ipareg_construct_endp_init_hdr_ext_n_common(
+	const struct ipa_ep_cfg_hdr_ext *ep_hdr_ext, u32 *val)
+{
+	u8 hdr_endianness;
+
+	hdr_endianness = ep_hdr_ext->hdr_little_endian ? 0 : 1;
+	IPA_SETFIELD_IN_REG(*val, ep_hdr_ext->hdr_pad_to_alignment,
+		IPA_ENDP_INIT_HDR_EXT_n_HDR_PAD_TO_ALIGNMENT_SHFT,
+		IPA_ENDP_INIT_HDR_EXT_n_HDR_PAD_TO_ALIGNMENT_BMSK);
+
+	IPA_SETFIELD_IN_REG(*val, ep_hdr_ext->hdr_total_len_or_pad_offset,
+		IPA_ENDP_INIT_HDR_EXT_n_HDR_TOTAL_LEN_OR_PAD_OFFSET_SHFT,
+		IPA_ENDP_INIT_HDR_EXT_n_HDR_TOTAL_LEN_OR_PAD_OFFSET_BMSK);
+
+	IPA_SETFIELD_IN_REG(*val, ep_hdr_ext->hdr_payload_len_inc_padding,
+		IPA_ENDP_INIT_HDR_EXT_n_HDR_PAYLOAD_LEN_INC_PADDING_SHFT,
+		IPA_ENDP_INIT_HDR_EXT_n_HDR_PAYLOAD_LEN_INC_PADDING_BMSK);
+
+	IPA_SETFIELD_IN_REG(*val, ep_hdr_ext->hdr_total_len_or_pad,
+		IPA_ENDP_INIT_HDR_EXT_n_HDR_TOTAL_LEN_OR_PAD_SHFT,
+		IPA_ENDP_INIT_HDR_EXT_n_HDR_TOTAL_LEN_OR_PAD_BMSK);
+
+	IPA_SETFIELD_IN_REG(*val, ep_hdr_ext->hdr_total_len_or_pad_valid,
+		IPA_ENDP_INIT_HDR_EXT_n_HDR_TOTAL_LEN_OR_PAD_VALID_SHFT,
+		IPA_ENDP_INIT_HDR_EXT_n_HDR_TOTAL_LEN_OR_PAD_VALID_BMSK);
+
+	IPA_SETFIELD_IN_REG(*val, hdr_endianness,
+		IPA_ENDP_INIT_HDR_EXT_n_HDR_ENDIANNESS_SHFT,
+		IPA_ENDP_INIT_HDR_EXT_n_HDR_ENDIANNESS_BMSK);
+}
+
+static void ipareg_construct_endp_init_hdr_ext_n(enum ipahal_reg_name reg,
+	const void *fields, u32 *val)
+{
+	ipareg_construct_endp_init_hdr_ext_n_common(fields, val);
+}
+
+static void ipareg_construct_endp_init_hdr_ext_n_v4_5(enum ipahal_reg_name reg,
+	const void *fields, u32 *val)
+{
+	const struct ipa_ep_cfg_hdr_ext *ep_hdr_ext =
+		(const struct ipa_ep_cfg_hdr_ext *)fields;
+	u32 msb;
+
+	ipareg_construct_endp_init_hdr_ext_n_common(ep_hdr_ext, val);
+
+	msb = ep_hdr_ext->hdr_total_len_or_pad_offset >>
+		hweight_long(
+		IPA_ENDP_INIT_HDR_EXT_n_HDR_TOTAL_LEN_OR_PAD_OFFSET_BMSK);
+	IPA_SETFIELD_IN_REG(*val, msb,
+	 IPA_ENDP_INIT_HDR_EXT_n_HDR_TOTAL_LEN_OR_PAD_OFFSET_MSB_SHFT_v4_5,
+	 IPA_ENDP_INIT_HDR_EXT_n_HDR_TOTAL_LEN_OR_PAD_OFFSET_MSB_BMSK_v4_5);
+
+	if (!ep_hdr_ext->hdr) {
+		IPAHAL_ERR("No header info, skipping it.\n");
+		return;
+	}
+
+	msb = ep_hdr_ext->hdr->hdr_ofst_pkt_size >>
+		hweight_long(IPA_ENDP_INIT_HDR_n_HDR_OFST_PKT_SIZE_BMSK_v4_5);
+	IPA_SETFIELD_IN_REG(*val, msb,
+		IPA_ENDP_INIT_HDR_EXT_n_HDR_OFST_PKT_SIZE_MSB_SHFT_v4_5,
+		IPA_ENDP_INIT_HDR_EXT_n_HDR_OFST_PKT_SIZE_MSB_BMSK_v4_5);
+
+	msb = ep_hdr_ext->hdr->hdr_additional_const_len >>
+		hweight_long(
+		IPA_ENDP_INIT_HDR_n_HDR_ADDITIONAL_CONST_LEN_BMSK_v4_5);
+	IPA_SETFIELD_IN_REG(*val, msb,
+		IPA_ENDP_INIT_HDR_EXT_n_HDR_ADDITIONAL_CONST_LEN_MSB_SHFT_v4_5,
+		IPA_ENDP_INIT_HDR_EXT_n_HDR_ADDITIONAL_CONST_LEN_MSB_BMSK_v4_5
+		);
+}
+
+static void ipareg_construct_endp_init_hdr_n(enum ipahal_reg_name reg,
+	const void *fields, u32 *val)
+{
+	struct ipa_ep_cfg_hdr *ep_hdr;
+
+	ep_hdr = (struct ipa_ep_cfg_hdr *)fields;
+
+	IPA_SETFIELD_IN_REG(*val, ep_hdr->hdr_metadata_reg_valid,
+		IPA_ENDP_INIT_HDR_n_HDR_METADATA_REG_VALID_SHFT,
+		IPA_ENDP_INIT_HDR_n_HDR_METADATA_REG_VALID_BMSK);
+
+	IPA_SETFIELD_IN_REG(*val, ep_hdr->hdr_remove_additional,
+		IPA_ENDP_INIT_HDR_n_HDR_LEN_INC_DEAGG_HDR_SHFT,
+		IPA_ENDP_INIT_HDR_n_HDR_LEN_INC_DEAGG_HDR_BMSK);
+
+	IPA_SETFIELD_IN_REG(*val, ep_hdr->hdr_a5_mux,
+		IPA_ENDP_INIT_HDR_n_HDR_A5_MUX_SHFT,
+		IPA_ENDP_INIT_HDR_n_HDR_A5_MUX_BMSK);
+
+	IPA_SETFIELD_IN_REG(*val, ep_hdr->hdr_ofst_pkt_size,
+		IPA_ENDP_INIT_HDR_n_HDR_OFST_PKT_SIZE_SHFT,
+		IPA_ENDP_INIT_HDR_n_HDR_OFST_PKT_SIZE_BMSK);
+
+	IPA_SETFIELD_IN_REG(*val, ep_hdr->hdr_ofst_pkt_size_valid,
+		IPA_ENDP_INIT_HDR_n_HDR_OFST_PKT_SIZE_VALID_SHFT,
+		IPA_ENDP_INIT_HDR_n_HDR_OFST_PKT_SIZE_VALID_BMSK);
+
+	IPA_SETFIELD_IN_REG(*val, ep_hdr->hdr_additional_const_len,
+		IPA_ENDP_INIT_HDR_n_HDR_ADDITIONAL_CONST_LEN_SHFT,
+		IPA_ENDP_INIT_HDR_n_HDR_ADDITIONAL_CONST_LEN_BMSK);
+
+	IPA_SETFIELD_IN_REG(*val, ep_hdr->hdr_ofst_metadata,
+		IPA_ENDP_INIT_HDR_n_HDR_OFST_METADATA_SHFT,
+		IPA_ENDP_INIT_HDR_n_HDR_OFST_METADATA_BMSK);
+
+	IPA_SETFIELD_IN_REG(*val, ep_hdr->hdr_ofst_metadata_valid,
+		IPA_ENDP_INIT_HDR_n_HDR_OFST_METADATA_VALID_SHFT,
+		IPA_ENDP_INIT_HDR_n_HDR_OFST_METADATA_VALID_BMSK);
+
+	IPA_SETFIELD_IN_REG(*val, ep_hdr->hdr_len,
+		IPA_ENDP_INIT_HDR_n_HDR_LEN_SHFT,
+		IPA_ENDP_INIT_HDR_n_HDR_LEN_BMSK);
+}
+
+static void ipareg_construct_endp_init_hdr_n_common(
+	struct ipa_ep_cfg_hdr *ep_hdr, u32 *val)
+{
+	u32 msb;
+
+	msb = ep_hdr->hdr_ofst_metadata >>
+		hweight_long(IPA_ENDP_INIT_HDR_n_HDR_OFST_METADATA_BMSK_v4_5);
+	IPA_SETFIELD_IN_REG(*val, msb,
+		IPA_ENDP_INIT_HDR_n_HDR_OFST_METADATA_MSB_SHFT_v4_5,
+		IPA_ENDP_INIT_HDR_n_HDR_OFST_METADATA_MSB_BMSK_v4_5);
+
+	msb = ep_hdr->hdr_len >>
+		hweight_long(IPA_ENDP_INIT_HDR_n_HDR_LEN_BMSK_v4_5);
+	IPA_SETFIELD_IN_REG(*val, msb,
+		IPA_ENDP_INIT_HDR_n_HDR_LEN_MSB_SHFT_v4_5,
+		IPA_ENDP_INIT_HDR_n_HDR_LEN_MSB_BMSK_v4_5);
+
+	IPA_SETFIELD_IN_REG(*val, ep_hdr->hdr_remove_additional,
+		IPA_ENDP_INIT_HDR_n_HDR_LEN_INC_DEAGG_HDR_SHFT_v4_5,
+		IPA_ENDP_INIT_HDR_n_HDR_LEN_INC_DEAGG_HDR_BMSK_v4_5);
+
+	IPA_SETFIELD_IN_REG(*val, ep_hdr->hdr_ofst_pkt_size,
+		IPA_ENDP_INIT_HDR_n_HDR_OFST_PKT_SIZE_SHFT_v4_5,
+		IPA_ENDP_INIT_HDR_n_HDR_OFST_PKT_SIZE_BMSK_v4_5);
+
+	IPA_SETFIELD_IN_REG(*val, ep_hdr->hdr_ofst_pkt_size_valid,
+		IPA_ENDP_INIT_HDR_n_HDR_OFST_PKT_SIZE_VALID_SHFT_v4_5,
+		IPA_ENDP_INIT_HDR_n_HDR_OFST_PKT_SIZE_VALID_BMSK_v4_5);
+
+	IPA_SETFIELD_IN_REG(*val, ep_hdr->hdr_additional_const_len,
+		IPA_ENDP_INIT_HDR_n_HDR_ADDITIONAL_CONST_LEN_SHFT_v4_5,
+		IPA_ENDP_INIT_HDR_n_HDR_ADDITIONAL_CONST_LEN_BMSK_v4_5);
+
+	IPA_SETFIELD_IN_REG(*val, ep_hdr->hdr_ofst_metadata,
+		IPA_ENDP_INIT_HDR_n_HDR_OFST_METADATA_SHFT_v4_5,
+		IPA_ENDP_INIT_HDR_n_HDR_OFST_METADATA_BMSK_v4_5);
+
+	IPA_SETFIELD_IN_REG(*val, ep_hdr->hdr_ofst_metadata_valid,
+		IPA_ENDP_INIT_HDR_n_HDR_OFST_METADATA_VALID_SHFT_v4_5,
+		IPA_ENDP_INIT_HDR_n_HDR_OFST_METADATA_VALID_BMSK_v4_5);
+
+	IPA_SETFIELD_IN_REG(*val, ep_hdr->hdr_len,
+		IPA_ENDP_INIT_HDR_n_HDR_LEN_SHFT_v4_5,
+		IPA_ENDP_INIT_HDR_n_HDR_LEN_BMSK_v4_5);
+}
+
+static void ipareg_construct_endp_init_hdr_n_v4_5(enum ipahal_reg_name reg,
+	const void *fields, u32 *val)
+{
+	struct ipa_ep_cfg_hdr *ep_hdr;
+
+	ep_hdr = (struct ipa_ep_cfg_hdr *)fields;
+
+	ipareg_construct_endp_init_hdr_n_common(ep_hdr, val);
+
+	IPA_SETFIELD_IN_REG(*val, ep_hdr->hdr_a5_mux,
+		IPA_ENDP_INIT_HDR_n_HDR_A5_MUX_SHFT_v4_5,
+		IPA_ENDP_INIT_HDR_n_HDR_A5_MUX_BMSK_v4_5);
+
+}
+
+static void ipareg_construct_endp_init_hdr_n_v4_9(enum ipahal_reg_name reg,
+	const void *fields, u32 *val)
+{
+	struct ipa_ep_cfg_hdr *ep_hdr;
+
+	ep_hdr = (struct ipa_ep_cfg_hdr *)fields;
+
+	ipareg_construct_endp_init_hdr_n_common(ep_hdr, val);
+
+}
+
+static void ipareg_construct_route(enum ipahal_reg_name reg,
+	const void *fields, u32 *val)
+{
+	struct ipahal_reg_route *route;
+
+	route = (struct ipahal_reg_route *)fields;
+
+	IPA_SETFIELD_IN_REG(*val, route->route_dis,
+		IPA_ROUTE_ROUTE_DIS_SHFT,
+		IPA_ROUTE_ROUTE_DIS_BMSK);
+
+	IPA_SETFIELD_IN_REG(*val, route->route_def_pipe,
+		IPA_ROUTE_ROUTE_DEF_PIPE_SHFT,
+		IPA_ROUTE_ROUTE_DEF_PIPE_BMSK);
+
+	IPA_SETFIELD_IN_REG(*val, route->route_def_hdr_table,
+		IPA_ROUTE_ROUTE_DEF_HDR_TABLE_SHFT,
+		IPA_ROUTE_ROUTE_DEF_HDR_TABLE_BMSK);
+
+	IPA_SETFIELD_IN_REG(*val, route->route_def_hdr_ofst,
+		IPA_ROUTE_ROUTE_DEF_HDR_OFST_SHFT,
+		IPA_ROUTE_ROUTE_DEF_HDR_OFST_BMSK);
+
+	IPA_SETFIELD_IN_REG(*val, route->route_frag_def_pipe,
+		IPA_ROUTE_ROUTE_FRAG_DEF_PIPE_SHFT,
+		IPA_ROUTE_ROUTE_FRAG_DEF_PIPE_BMSK);
+
+	IPA_SETFIELD_IN_REG(*val, route->route_def_retain_hdr,
+		IPA_ROUTE_ROUTE_DEF_RETAIN_HDR_SHFT,
+		IPA_ROUTE_ROUTE_DEF_RETAIN_HDR_BMSK);
+}
+
+static void ipareg_construct_qsb_max_writes(enum ipahal_reg_name reg,
+	const void *fields, u32 *val)
+{
+	struct ipahal_reg_qsb_max_writes *max_writes;
+
+	max_writes = (struct ipahal_reg_qsb_max_writes *)fields;
+
+	IPA_SETFIELD_IN_REG(*val, max_writes->qmb_0_max_writes,
+			    IPA_QSB_MAX_WRITES_GEN_QMB_0_MAX_WRITES_SHFT,
+			    IPA_QSB_MAX_WRITES_GEN_QMB_0_MAX_WRITES_BMSK);
+	IPA_SETFIELD_IN_REG(*val, max_writes->qmb_1_max_writes,
+			    IPA_QSB_MAX_WRITES_GEN_QMB_1_MAX_WRITES_SHFT,
+			    IPA_QSB_MAX_WRITES_GEN_QMB_1_MAX_WRITES_BMSK);
+}
+
+static void ipareg_construct_qsb_max_reads(enum ipahal_reg_name reg,
+	const void *fields, u32 *val)
+{
+	struct ipahal_reg_qsb_max_reads *max_reads;
+
+	max_reads = (struct ipahal_reg_qsb_max_reads *)fields;
+
+	IPA_SETFIELD_IN_REG(*val, max_reads->qmb_0_max_reads,
+			    IPA_QSB_MAX_READS_GEN_QMB_0_MAX_READS_SHFT,
+			    IPA_QSB_MAX_READS_GEN_QMB_0_MAX_READS_BMSK);
+	IPA_SETFIELD_IN_REG(*val, max_reads->qmb_1_max_reads,
+			    IPA_QSB_MAX_READS_GEN_QMB_1_MAX_READS_SHFT,
+			    IPA_QSB_MAX_READS_GEN_QMB_1_MAX_READS_BMSK);
+}
+
+static void ipareg_construct_qsb_max_reads_v4_0(enum ipahal_reg_name reg,
+	const void *fields, u32 *val)
+{
+	struct ipahal_reg_qsb_max_reads *max_reads;
+
+	max_reads = (struct ipahal_reg_qsb_max_reads *)fields;
+
+	IPA_SETFIELD_IN_REG(*val, max_reads->qmb_0_max_reads,
+			    IPA_QSB_MAX_READS_GEN_QMB_0_MAX_READS_SHFT,
+			    IPA_QSB_MAX_READS_GEN_QMB_0_MAX_READS_BMSK);
+	IPA_SETFIELD_IN_REG(*val, max_reads->qmb_1_max_reads,
+			    IPA_QSB_MAX_READS_GEN_QMB_1_MAX_READS_SHFT,
+			    IPA_QSB_MAX_READS_GEN_QMB_1_MAX_READS_BMSK);
+	IPA_SETFIELD_IN_REG(*val, max_reads->qmb_0_max_read_beats,
+		    IPA_QSB_MAX_READS_GEN_QMB_0_MAX_READS_BEATS_SHFT_V4_0,
+		    IPA_QSB_MAX_READS_GEN_QMB_0_MAX_READS_BEATS_BMSK_V4_0);
+	IPA_SETFIELD_IN_REG(*val, max_reads->qmb_1_max_read_beats,
+		    IPA_QSB_MAX_READS_GEN_QMB_1_MAX_READS_BEATS_SHFT_V4_0,
+		    IPA_QSB_MAX_READS_GEN_QMB_1_MAX_READS_BEATS_BMSK_V4_0);
+}
+
+static void ipareg_parse_qsb_max_reads(enum ipahal_reg_name reg,
+	void *fields, u32 val)
+{
+	struct ipahal_reg_qsb_max_reads *max_reads;
+
+	max_reads = (struct ipahal_reg_qsb_max_reads *)fields;
+
+	max_reads->qmb_0_max_reads = IPA_GETFIELD_FROM_REG(val,
+			    IPA_QSB_MAX_READS_GEN_QMB_0_MAX_READS_SHFT,
+			    IPA_QSB_MAX_READS_GEN_QMB_0_MAX_READS_BMSK);
+	max_reads->qmb_1_max_reads = IPA_GETFIELD_FROM_REG(val,
+			    IPA_QSB_MAX_READS_GEN_QMB_1_MAX_READS_SHFT,
+			    IPA_QSB_MAX_READS_GEN_QMB_1_MAX_READS_BMSK);
+	max_reads->qmb_0_max_read_beats = IPA_GETFIELD_FROM_REG(val,
+		    IPA_QSB_MAX_READS_GEN_QMB_0_MAX_READS_BEATS_SHFT_V4_0,
+		    IPA_QSB_MAX_READS_GEN_QMB_0_MAX_READS_BEATS_BMSK_V4_0);
+	max_reads->qmb_1_max_read_beats = IPA_GETFIELD_FROM_REG(val,
+		    IPA_QSB_MAX_READS_GEN_QMB_1_MAX_READS_BEATS_SHFT_V4_0,
+		    IPA_QSB_MAX_READS_GEN_QMB_1_MAX_READS_BEATS_BMSK_V4_0);
+}
+
+static void ipareg_parse_qsb_max_writes(enum ipahal_reg_name reg,
+	void *fields, u32 val)
+{
+	struct ipahal_reg_qsb_max_writes *max_writes;
+
+	max_writes = (struct ipahal_reg_qsb_max_writes *)fields;
+
+	max_writes->qmb_0_max_writes = IPA_GETFIELD_FROM_REG(val,
+			    IPA_QSB_MAX_WRITES_GEN_QMB_0_MAX_WRITES_SHFT,
+			    IPA_QSB_MAX_WRITES_GEN_QMB_0_MAX_WRITES_BMSK);
+	max_writes->qmb_1_max_writes = IPA_GETFIELD_FROM_REG(val,
+			    IPA_QSB_MAX_WRITES_GEN_QMB_1_MAX_WRITES_SHFT,
+			    IPA_QSB_MAX_WRITES_GEN_QMB_1_MAX_WRITES_BMSK);
+}
+
+static void ipareg_parse_tx_cfg(enum ipahal_reg_name reg,
+	void *fields, u32 val)
+{
+	struct ipahal_reg_tx_cfg *tx_cfg;
+
+	tx_cfg = (struct ipahal_reg_tx_cfg *)fields;
+
+	tx_cfg->tx0_prefetch_disable = IPA_GETFIELD_FROM_REG(val,
+		IPA_TX_CFG_TX0_PREFETCH_DISABLE_SHFT_V3_5,
+		IPA_TX_CFG_TX0_PREFETCH_DISABLE_BMSK_V3_5);
+
+	tx_cfg->tx1_prefetch_disable = IPA_GETFIELD_FROM_REG(val,
+		IPA_TX_CFG_TX1_PREFETCH_DISABLE_SHFT_V3_5,
+		IPA_TX_CFG_TX1_PREFETCH_DISABLE_BMSK_V3_5);
+
+	tx_cfg->tx0_prefetch_almost_empty_size = IPA_GETFIELD_FROM_REG(val,
+		IPA_TX_CFG_PREFETCH_ALMOST_EMPTY_SIZE_SHFT_V3_5,
+		IPA_TX_CFG_PREFETCH_ALMOST_EMPTY_SIZE_BMSK_V3_5);
+
+	tx_cfg->tx1_prefetch_almost_empty_size =
+		tx_cfg->tx0_prefetch_almost_empty_size;
+}
+
+static void ipareg_parse_tx_cfg_v4_0(enum ipahal_reg_name reg,
+	void *fields, u32 val)
+{
+	struct ipahal_reg_tx_cfg *tx_cfg;
+
+	tx_cfg = (struct ipahal_reg_tx_cfg *)fields;
+
+	tx_cfg->tx0_prefetch_almost_empty_size = IPA_GETFIELD_FROM_REG(val,
+		IPA_TX_CFG_PREFETCH_ALMOST_EMPTY_SIZE_TX0_SHFT_V4_0,
+		IPA_TX_CFG_PREFETCH_ALMOST_EMPTY_SIZE_TX0_BMSK_V4_0);
+
+	tx_cfg->tx1_prefetch_almost_empty_size = IPA_GETFIELD_FROM_REG(val,
+		IPA_TX_CFG_PREFETCH_ALMOST_EMPTY_SIZE_TX1_SHFT_V4_0,
+		IPA_TX_CFG_PREFETCH_ALMOST_EMPTY_SIZE_TX1_BMSK_V4_0);
+
+	tx_cfg->dmaw_scnd_outsd_pred_en = IPA_GETFIELD_FROM_REG(val,
+		IPA_TX_CFG_DMAW_SCND_OUTSD_PRED_EN_SHFT_V4_0,
+		IPA_TX_CFG_DMAW_SCND_OUTSD_PRED_EN_BMSK_V4_0);
+
+	tx_cfg->dmaw_scnd_outsd_pred_threshold = IPA_GETFIELD_FROM_REG(val,
+		IPA_TX_CFG_DMAW_SCND_OUTSD_PRED_THRESHOLD_SHFT_V4_0,
+		IPA_TX_CFG_DMAW_SCND_OUTSD_PRED_THRESHOLD_BMSK_V4_0);
+
+	tx_cfg->dmaw_max_beats_256_dis = IPA_GETFIELD_FROM_REG(val,
+		IPA_TX_CFG_DMAW_MAX_BEATS_256_DIS_SHFT_V4_0,
+		IPA_TX_CFG_DMAW_MAX_BEATS_256_DIS_BMSK_V4_0);
+
+	tx_cfg->pa_mask_en = IPA_GETFIELD_FROM_REG(val,
+		IPA_TX_CFG_PA_MASK_EN_SHFT_V4_0,
+		IPA_TX_CFG_PA_MASK_EN_BMSK_V4_0);
+}
+
+static void ipareg_parse_tx_cfg_v4_5(enum ipahal_reg_name reg,
+	void *fields, u32 val)
+{
+	struct ipahal_reg_tx_cfg *tx_cfg;
+
+	ipareg_parse_tx_cfg_v4_0(reg, fields, val);
+
+	tx_cfg = (struct ipahal_reg_tx_cfg *)fields;
+
+	tx_cfg->dual_tx_enable = IPA_GETFIELD_FROM_REG(val,
+		IPA_TX_CFG_DUAL_TX_ENABLE_SHFT_V4_5,
+		IPA_TX_CFG_DUAL_TX_ENABLE_BMSK_V4_5);
+}
+
+static void ipareg_parse_tx_cfg_v4_9(enum ipahal_reg_name reg,
+	void *fields, u32 val)
+{
+	struct ipahal_reg_tx_cfg *tx_cfg;
+
+	ipareg_parse_tx_cfg_v4_5(reg, fields, val);
+
+	tx_cfg = (struct ipahal_reg_tx_cfg *)fields;
+
+	tx_cfg->sspnd_pa_no_start_state = IPA_GETFIELD_FROM_REG(val,
+		IPA_TX_CFG_SSPND_PA_NO_START_STATE_SHFT_V4_9,
+		IPA_TX_CFG_SSPND_PA_NO_START_STATE_BMSK_V4_9);
+}
+
+static void ipareg_construct_tx_cfg(enum ipahal_reg_name reg,
+	const void *fields, u32 *val)
+{
+	struct ipahal_reg_tx_cfg *tx_cfg;
+
+	tx_cfg = (struct ipahal_reg_tx_cfg *)fields;
+
+	if (tx_cfg->tx0_prefetch_almost_empty_size !=
+			tx_cfg->tx1_prefetch_almost_empty_size)
+		ipa_assert();
+
+	IPA_SETFIELD_IN_REG(*val, tx_cfg->tx0_prefetch_disable,
+		IPA_TX_CFG_TX0_PREFETCH_DISABLE_SHFT_V3_5,
+		IPA_TX_CFG_TX0_PREFETCH_DISABLE_BMSK_V3_5);
+
+	IPA_SETFIELD_IN_REG(*val, tx_cfg->tx1_prefetch_disable,
+		IPA_TX_CFG_TX1_PREFETCH_DISABLE_SHFT_V3_5,
+		IPA_TX_CFG_TX1_PREFETCH_DISABLE_BMSK_V3_5);
+
+	IPA_SETFIELD_IN_REG(*val, tx_cfg->tx0_prefetch_almost_empty_size,
+		IPA_TX_CFG_PREFETCH_ALMOST_EMPTY_SIZE_SHFT_V3_5,
+		IPA_TX_CFG_PREFETCH_ALMOST_EMPTY_SIZE_BMSK_V3_5);
+}
+
+static void ipareg_construct_tx_cfg_v4_0(enum ipahal_reg_name reg,
+	const void *fields, u32 *val)
+{
+	struct ipahal_reg_tx_cfg *tx_cfg;
+
+	tx_cfg = (struct ipahal_reg_tx_cfg *)fields;
+
+	IPA_SETFIELD_IN_REG(*val, tx_cfg->tx0_prefetch_almost_empty_size,
+		IPA_TX_CFG_PREFETCH_ALMOST_EMPTY_SIZE_TX0_SHFT_V4_0,
+		IPA_TX_CFG_PREFETCH_ALMOST_EMPTY_SIZE_TX0_BMSK_V4_0);
+
+	IPA_SETFIELD_IN_REG(*val, tx_cfg->tx1_prefetch_almost_empty_size,
+		IPA_TX_CFG_PREFETCH_ALMOST_EMPTY_SIZE_TX1_SHFT_V4_0,
+		IPA_TX_CFG_PREFETCH_ALMOST_EMPTY_SIZE_TX1_BMSK_V4_0);
+
+	IPA_SETFIELD_IN_REG(*val, tx_cfg->dmaw_scnd_outsd_pred_threshold,
+		IPA_TX_CFG_DMAW_SCND_OUTSD_PRED_THRESHOLD_SHFT_V4_0,
+		IPA_TX_CFG_DMAW_SCND_OUTSD_PRED_THRESHOLD_BMSK_V4_0);
+
+	IPA_SETFIELD_IN_REG(*val, tx_cfg->dmaw_max_beats_256_dis,
+		IPA_TX_CFG_DMAW_MAX_BEATS_256_DIS_SHFT_V4_0,
+		IPA_TX_CFG_DMAW_MAX_BEATS_256_DIS_BMSK_V4_0);
+
+	IPA_SETFIELD_IN_REG(*val, tx_cfg->dmaw_scnd_outsd_pred_en,
+		IPA_TX_CFG_DMAW_SCND_OUTSD_PRED_EN_SHFT_V4_0,
+		IPA_TX_CFG_DMAW_SCND_OUTSD_PRED_EN_BMSK_V4_0);
+
+	IPA_SETFIELD_IN_REG(*val, tx_cfg->pa_mask_en,
+		IPA_TX_CFG_PA_MASK_EN_SHFT_V4_0,
+		IPA_TX_CFG_PA_MASK_EN_BMSK_V4_0);
+}
+
+static void ipareg_construct_tx_cfg_v4_5(enum ipahal_reg_name reg,
+	const void *fields, u32 *val)
+{
+	struct ipahal_reg_tx_cfg *tx_cfg;
+
+	ipareg_construct_tx_cfg_v4_0(reg, fields, val);
+
+	tx_cfg = (struct ipahal_reg_tx_cfg *)fields;
+
+	IPA_SETFIELD_IN_REG(*val, tx_cfg->dual_tx_enable,
+		IPA_TX_CFG_DUAL_TX_ENABLE_SHFT_V4_5,
+		IPA_TX_CFG_DUAL_TX_ENABLE_BMSK_V4_5);
+}
+
+static void ipareg_construct_tx_cfg_v4_9(enum ipahal_reg_name reg,
+	const void *fields, u32 *val)
+{
+	struct ipahal_reg_tx_cfg *tx_cfg;
+
+	ipareg_construct_tx_cfg_v4_5(reg, fields, val);
+
+	tx_cfg = (struct ipahal_reg_tx_cfg *)fields;
+
+	IPA_SETFIELD_IN_REG(*val, tx_cfg->sspnd_pa_no_start_state,
+		IPA_TX_CFG_SSPND_PA_NO_START_STATE_SHFT_V4_9,
+		IPA_TX_CFG_SSPND_PA_NO_START_STATE_BMSK_V4_9);
+}
+
+static void ipareg_construct_idle_indication_cfg(enum ipahal_reg_name reg,
+	const void *fields, u32 *val)
+{
+	struct ipahal_reg_idle_indication_cfg *idle_indication_cfg;
+
+	idle_indication_cfg = (struct ipahal_reg_idle_indication_cfg *)fields;
+
+	IPA_SETFIELD_IN_REG(*val,
+		idle_indication_cfg->enter_idle_debounce_thresh,
+		IPA_IDLE_INDICATION_CFG_ENTER_IDLE_DEBOUNCE_THRESH_SHFT_V3_5,
+		IPA_IDLE_INDICATION_CFG_ENTER_IDLE_DEBOUNCE_THRESH_BMSK_V3_5);
+
+	IPA_SETFIELD_IN_REG(*val,
+		idle_indication_cfg->const_non_idle_enable,
+		IPA_IDLE_INDICATION_CFG_CONST_NON_IDLE_ENABLE_SHFT_V3_5,
+		IPA_IDLE_INDICATION_CFG_CONST_NON_IDLE_ENABLE_BMSK_V3_5);
+}
+
+static void ipareg_construct_hps_queue_weights(enum ipahal_reg_name reg,
+	const void *fields, u32 *val)
+{
+	struct ipahal_reg_rx_hps_weights *hps_weights;
+
+	hps_weights = (struct ipahal_reg_rx_hps_weights *)fields;
+
+	IPA_SETFIELD_IN_REG(*val,
+		hps_weights->hps_queue_weight_0,
+		IPA_HPS_FTCH_ARB_QUEUE_WEIGHTS_RX_HPS_QUEUE_WEIGHT_0_SHFT,
+		IPA_HPS_FTCH_ARB_QUEUE_WEIGHTS_RX_HPS_QUEUE_WEIGHT_0_BMSK);
+
+	IPA_SETFIELD_IN_REG(*val,
+		hps_weights->hps_queue_weight_1,
+		IPA_HPS_FTCH_ARB_QUEUE_WEIGHTS_RX_HPS_QUEUE_WEIGHT_1_SHFT,
+		IPA_HPS_FTCH_ARB_QUEUE_WEIGHTS_RX_HPS_QUEUE_WEIGHT_1_BMSK);
+
+	IPA_SETFIELD_IN_REG(*val,
+		hps_weights->hps_queue_weight_2,
+		IPA_HPS_FTCH_ARB_QUEUE_WEIGHTS_RX_HPS_QUEUE_WEIGHT_2_SHFT,
+		IPA_HPS_FTCH_ARB_QUEUE_WEIGHTS_RX_HPS_QUEUE_WEIGHT_2_BMSK);
+
+	IPA_SETFIELD_IN_REG(*val,
+		hps_weights->hps_queue_weight_3,
+		IPA_HPS_FTCH_ARB_QUEUE_WEIGHTS_RX_HPS_QUEUE_WEIGHT_3_SHFT,
+		IPA_HPS_FTCH_ARB_QUEUE_WEIGHTS_RX_HPS_QUEUE_WEIGHT_3_BMSK);
+}
+
+static void ipareg_parse_hps_queue_weights(
+	enum ipahal_reg_name reg, void *fields, u32 val)
+{
+	struct ipahal_reg_rx_hps_weights *hps_weights =
+		(struct ipahal_reg_rx_hps_weights *)fields;
+
+	memset(hps_weights, 0, sizeof(struct ipahal_reg_rx_hps_weights));
+
+	hps_weights->hps_queue_weight_0 = IPA_GETFIELD_FROM_REG(val,
+		IPA_HPS_FTCH_ARB_QUEUE_WEIGHTS_RX_HPS_QUEUE_WEIGHT_0_SHFT,
+		IPA_HPS_FTCH_ARB_QUEUE_WEIGHTS_RX_HPS_QUEUE_WEIGHT_0_BMSK);
+
+	hps_weights->hps_queue_weight_1 = IPA_GETFIELD_FROM_REG(val,
+		IPA_HPS_FTCH_ARB_QUEUE_WEIGHTS_RX_HPS_QUEUE_WEIGHT_1_SHFT,
+		IPA_HPS_FTCH_ARB_QUEUE_WEIGHTS_RX_HPS_QUEUE_WEIGHT_1_BMSK);
+
+	hps_weights->hps_queue_weight_2 = IPA_GETFIELD_FROM_REG(val,
+		IPA_HPS_FTCH_ARB_QUEUE_WEIGHTS_RX_HPS_QUEUE_WEIGHT_2_SHFT,
+		IPA_HPS_FTCH_ARB_QUEUE_WEIGHTS_RX_HPS_QUEUE_WEIGHT_2_BMSK);
+
+	hps_weights->hps_queue_weight_3 = IPA_GETFIELD_FROM_REG(val,
+		IPA_HPS_FTCH_ARB_QUEUE_WEIGHTS_RX_HPS_QUEUE_WEIGHT_3_SHFT,
+		IPA_HPS_FTCH_ARB_QUEUE_WEIGHTS_RX_HPS_QUEUE_WEIGHT_3_BMSK);
+}
+
+static void ipareg_construct_counter_cfg(enum ipahal_reg_name reg,
+	const void *fields, u32 *val)
+{
+	struct ipahal_reg_counter_cfg *counter_cfg =
+		(struct ipahal_reg_counter_cfg *)fields;
+
+	IPA_SETFIELD_IN_REG(*val, counter_cfg->aggr_granularity,
+		IPA_COUNTER_CFG_AGGR_GRANULARITY_SHFT,
+		IPA_COUNTER_CFG_AGGR_GRANULARITY_BMSK);
+}
+
+static void ipareg_parse_counter_cfg(
+	enum ipahal_reg_name reg, void *fields, u32 val)
+{
+	struct ipahal_reg_counter_cfg *counter_cfg =
+		(struct ipahal_reg_counter_cfg *)fields;
+
+	memset(counter_cfg, 0, sizeof(*counter_cfg));
+
+	counter_cfg->aggr_granularity = IPA_GETFIELD_FROM_REG(val,
+		IPA_COUNTER_CFG_AGGR_GRANULARITY_SHFT,
+		IPA_COUNTER_CFG_AGGR_GRANULARITY_BMSK);
+}
+
+static void ipareg_parse_state_coal_master(
+	enum ipahal_reg_name reg, void *fields, u32 val)
+{
+	struct ipahal_reg_state_coal_master *state_coal_master =
+		(struct ipahal_reg_state_coal_master *)fields;
+
+	memset(state_coal_master, 0, sizeof(*state_coal_master));
+
+	state_coal_master->vp_timer_expired = IPA_GETFIELD_FROM_REG(val,
+		IPA_STATE_COAL_MASTER_VP_TIMER_EXPIRED_SHFT,
+		IPA_STATE_COAL_MASTER_VP_TIMER_EXPIRED_BMSK);
+
+	state_coal_master->lru_vp = IPA_GETFIELD_FROM_REG(val,
+		IPA_STATE_COAL_MASTER_LRU_VP_SHFT,
+		IPA_STATE_COAL_MASTER_LRU_VP_BMSK);
+
+	state_coal_master->init_vp_fsm_state = IPA_GETFIELD_FROM_REG(val,
+		IPA_STATE_COAL_MASTER_INIT_VP_FSM_STATE_SHFT,
+		IPA_STATE_COAL_MASTER_INIT_VP_FSM_STATE_BMSK);
+
+	state_coal_master->check_fir_fsm_state = IPA_GETFIELD_FROM_REG(val,
+		IPA_STATE_COAL_MASTER_CHECK_FIR_FSM_STATE_SHFT,
+		IPA_STATE_COAL_MASTER_CHECK_FIR_FSM_STATE_BMSK);
+
+	state_coal_master->hash_calc_fsm_state = IPA_GETFIELD_FROM_REG(val,
+		IPA_STATE_COAL_MASTER_HASH_CALC_FSM_STATE_SHFT,
+		IPA_STATE_COAL_MASTER_HASH_CALC_FSM_STATE_BMSK);
+
+	state_coal_master->find_open_fsm_state = IPA_GETFIELD_FROM_REG(val,
+		IPA_STATE_COAL_MASTER_FIND_OPEN_FSM_STATE_SHFT,
+		IPA_STATE_COAL_MASTER_FIND_OPEN_FSM_STATE_BMSK);
+
+	state_coal_master->main_fsm_state = IPA_GETFIELD_FROM_REG(val,
+		IPA_STATE_COAL_MASTER_MAIN_FSM_STATE_SHFT,
+		IPA_STATE_COAL_MASTER_MAIN_FSM_STATE_BMSK);
+
+	state_coal_master->vp_vld = IPA_GETFIELD_FROM_REG(val,
+		IPA_STATE_COAL_MASTER_VP_VLD_SHFT,
+		IPA_STATE_COAL_MASTER_VP_VLD_BMSK);
+}
+
+static void ipareg_construct_coal_evict_lru(enum ipahal_reg_name reg,
+	const void *fields, u32 *val)
+{
+	struct ipahal_reg_coal_evict_lru *evict_lru =
+		(struct ipahal_reg_coal_evict_lru *)fields;
+
+	IPA_SETFIELD_IN_REG(*val, evict_lru->coal_vp_lru_thrshld,
+		IPA_COAL_VP_LRU_THRSHLD_SHFT, IPA_COAL_VP_LRU_THRSHLD_BMSK);
+
+	IPA_SETFIELD_IN_REG(*val, evict_lru->coal_eviction_en,
+		IPA_COAL_EVICTION_EN_SHFT, IPA_COAL_EVICTION_EN_BMSK);
+}
+
+static void ipareg_parse_coal_evict_lru(enum ipahal_reg_name reg,
+	void *fields, u32 val)
+{
+	struct ipahal_reg_coal_evict_lru *evict_lru =
+		(struct ipahal_reg_coal_evict_lru *)fields;
+
+	memset(evict_lru, 0, sizeof(*evict_lru));
+
+	evict_lru->coal_vp_lru_thrshld = IPA_GETFIELD_FROM_REG(val,
+		IPA_COAL_VP_LRU_THRSHLD_SHFT, IPA_COAL_VP_LRU_THRSHLD_BMSK);
+
+	evict_lru->coal_eviction_en = IPA_GETFIELD_FROM_REG(val,
+		IPA_COAL_EVICTION_EN_SHFT, IPA_COAL_EVICTION_EN_BMSK);
+}
+
+static void ipareg_construct_coal_qmap_cfg(enum ipahal_reg_name reg,
+	const void *fields, u32 *val)
+{
+	struct ipahal_reg_coal_qmap_cfg *qmap_cfg =
+		(struct ipahal_reg_coal_qmap_cfg *)fields;
+
+	IPA_SETFIELD_IN_REG(*val, qmap_cfg->mux_id_byte_sel,
+		IPA_COAL_QMAP_CFG_SHFT, IPA_COAL_QMAP_CFG_BMSK);
+}
+
+static void ipareg_parse_coal_qmap_cfg(enum ipahal_reg_name reg,
+	void *fields, u32 val)
+{
+	struct ipahal_reg_coal_qmap_cfg *qmap_cfg =
+		(struct ipahal_reg_coal_qmap_cfg *)fields;
+
+	memset(qmap_cfg, 0, sizeof(*qmap_cfg));
+
+	qmap_cfg->mux_id_byte_sel = IPA_GETFIELD_FROM_REG(val,
+		IPA_COAL_QMAP_CFG_SHFT, IPA_COAL_QMAP_CFG_BMSK);
+}
+
+/*
+ * struct ipahal_reg_obj - Register H/W information for specific IPA version
+ * @construct - CB to construct register value from abstracted structure
+ * @parse - CB to parse register value to abstracted structure
+ * @offset - register offset relative to base address
+ * @n_ofst - N parameterized register sub-offset
+ * @n_start - starting n for n_registers used for printing
+ * @n_end - ending n for n_registers used for printing
+ * @en_print - enable this register to be printed when the device crashes
+ */
+struct ipahal_reg_obj {
+	void (*construct)(enum ipahal_reg_name reg, const void *fields,
+		u32 *val);
+	void (*parse)(enum ipahal_reg_name reg, void *fields,
+		u32 val);
+	u32 offset;
+	u32 n_ofst;
+	int n_start;
+	int n_end;
+	bool en_print;
+};
+
+/*
+ * This table contains the info regarding each register for IPAv3 and later.
+ * Information like: offset and construct/parse functions.
+ * All the information on the register on IPAv3 are statically defined below.
+ * If information is missing regarding some register on some IPA version,
+ *  the init function will fill it with the information from the previous
+ *  IPA version.
+ * Information is considered missing if all of the fields are 0.
+ * If offset is -1, this means that the register is removed on the
+ *  specific version.
+ */
+static struct ipahal_reg_obj ipahal_reg_objs[IPA_HW_MAX][IPA_REG_MAX] = {
+	/* IPAv3 */
+	[IPA_HW_v3_0][IPA_ROUTE] = {
+		ipareg_construct_route, ipareg_parse_dummy,
+		0x00000048, 0, 0, 0, 0},
+	[IPA_HW_v3_0][IPA_IRQ_STTS_EE_n] = {
+		ipareg_construct_dummy, ipareg_parse_dummy,
+		0x00003008, 0x1000, 0, 0, 0},
+	[IPA_HW_v3_0][IPA_IRQ_EN_EE_n] = {
+		ipareg_construct_dummy, ipareg_parse_dummy,
+		0x0000300c, 0x1000, 0, 0, 0},
+	[IPA_HW_v3_0][IPA_IRQ_CLR_EE_n] = {
+		ipareg_construct_dummy, ipareg_parse_dummy,
+		0x00003010, 0x1000, 0, 0, 0},
+	[IPA_HW_v3_0][IPA_SUSPEND_IRQ_INFO_EE_n] = {
+		ipareg_construct_dummy, ipareg_parse_dummy,
+		0x00003098, 0x1000, 0, 0, 0},
+	[IPA_HW_v3_0][IPA_BCR] = {
+		ipareg_construct_dummy, ipareg_parse_dummy,
+		0x000001D0, 0, 0, 0, 0},
+	[IPA_HW_v3_0][IPA_ENABLED_PIPES] = {
+		ipareg_construct_dummy, ipareg_parse_dummy,
+		0x00000038, 0, 0, 0, 0},
+	[IPA_HW_v3_0][IPA_VERSION] = {
+		ipareg_construct_dummy, ipareg_parse_dummy,
+		0x00000034, 0, 0, 0, 0},
+	[IPA_HW_v3_0][IPA_TAG_TIMER] = {
+		ipareg_construct_dummy, ipareg_parse_dummy,
+		0x00000060, 0, 0, 0, 0},
+	[IPA_HW_v3_0][IPA_COMP_HW_VERSION] = {
+		ipareg_construct_dummy, ipareg_parse_dummy,
+		0x00000030, 0, 0, 0, 0},
+	[IPA_HW_v3_0][IPA_COMP_CFG] = {
+		ipareg_construct_comp_cfg, ipareg_parse_comp_cfg,
+		0x0000003C, 0, 0, 0, 0},
+	[IPA_HW_v3_0][IPA_STATE_AGGR_ACTIVE] = {
+		ipareg_construct_dummy, ipareg_parse_dummy,
+		0x0000010C, 0, 0, 0, 0},
+	[IPA_HW_v3_0][IPA_ENDP_INIT_HDR_n] = {
+		ipareg_construct_endp_init_hdr_n, ipareg_parse_dummy,
+		0x00000810, 0x70, 0, 0, 0},
+	[IPA_HW_v3_0][IPA_ENDP_INIT_HDR_EXT_n] = {
+		ipareg_construct_endp_init_hdr_ext_n, ipareg_parse_dummy,
+		0x00000814, 0x70, 0, 0, 0},
+	[IPA_HW_v3_0][IPA_ENDP_INIT_AGGR_n] = {
+		ipareg_construct_endp_init_aggr_n,
+		ipareg_parse_endp_init_aggr_n,
+		0x00000824, 0x70, 0, 0, 0},
+	[IPA_HW_v3_0][IPA_AGGR_FORCE_CLOSE] = {
+		ipareg_construct_dummy, ipareg_parse_dummy,
+		0x000001EC, 0, 0, 0, 0},
+	[IPA_HW_v3_0][IPA_ENDP_INIT_ROUTE_n] = {
+		ipareg_construct_endp_init_route_n, ipareg_parse_dummy,
+		0x00000828, 0x70, 0, 0, 0},
+	[IPA_HW_v3_0][IPA_ENDP_INIT_MODE_n] = {
+		ipareg_construct_endp_init_mode_n, ipareg_parse_dummy,
+		0x00000820, 0x70, 0, 0, 0},
+	[IPA_HW_v3_0][IPA_ENDP_INIT_NAT_n] = {
+		ipareg_construct_endp_init_nat_n, ipareg_parse_dummy,
+		0x0000080C, 0x70, 0, 0, 0},
+	[IPA_HW_v3_0][IPA_ENDP_INIT_CTRL_n] = {
+		ipareg_construct_endp_init_ctrl_n,
+		ipareg_parse_endp_init_ctrl_n,
+		0x00000800, 0x70, 0, 0, 0},
+	[IPA_HW_v3_0][IPA_ENDP_INIT_CTRL_SCND_n] = {
+		ipareg_construct_endp_init_ctrl_scnd_n, ipareg_parse_dummy,
+		0x00000804, 0x70, 0, 0, 0},
+	[IPA_HW_v3_0][IPA_ENDP_INIT_HOL_BLOCK_EN_n] = {
+		ipareg_construct_endp_init_hol_block_en_n,
+		ipareg_parse_dummy,
+		0x0000082c, 0x70, 0, 0, 0},
+	[IPA_HW_v3_0][IPA_ENDP_INIT_HOL_BLOCK_TIMER_n] = {
+		ipareg_construct_endp_init_hol_block_timer_n,
+		ipareg_parse_dummy,
+		0x00000830, 0x70, 0, 0, 0},
+	[IPA_HW_v3_0][IPA_ENDP_INIT_DEAGGR_n] = {
+		ipareg_construct_endp_init_deaggr_n,
+		ipareg_parse_dummy,
+		0x00000834, 0x70, 0, 0, 0},
+	[IPA_HW_v3_0][IPA_ENDP_INIT_SEQ_n] = {
+		ipareg_construct_dummy, ipareg_parse_dummy,
+		0x0000083C, 0x70, 0, 0, 0},
+	[IPA_HW_v3_0][IPA_DEBUG_CNT_REG_n] = {
+		ipareg_construct_dummy, ipareg_parse_dummy,
+		0x00000600, 0x4, 0, 0, 0},
+	[IPA_HW_v3_0][IPA_ENDP_INIT_CFG_n] = {
+		ipareg_construct_endp_init_cfg_n, ipareg_parse_dummy,
+		0x00000808, 0x70, 0, 0, 0},
+	[IPA_HW_v3_0][IPA_IRQ_EE_UC_n] = {
+		ipareg_construct_dummy, ipareg_parse_dummy,
+		0x0000301c, 0x1000, 0, 0, 0},
+	[IPA_HW_v3_0][IPA_ENDP_INIT_HDR_METADATA_MASK_n] = {
+		ipareg_construct_endp_init_hdr_metadata_mask_n,
+		ipareg_parse_dummy,
+		0x00000818, 0x70, 0, 0, 0},
+	[IPA_HW_v3_0][IPA_ENDP_INIT_HDR_METADATA_n] = {
+		ipareg_construct_endp_init_hdr_metadata_n,
+		ipareg_parse_dummy,
+		0x0000081c, 0x70, 0, 0, 0},
+	[IPA_HW_v3_0][IPA_ENDP_INIT_RSRC_GRP_n] = {
+		ipareg_construct_endp_init_rsrc_grp_n,
+		ipareg_parse_dummy,
+		0x00000838, 0x70, 0, 0, 0},
+	[IPA_HW_v3_0][IPA_SHARED_MEM_SIZE] = {
+		ipareg_construct_dummy, ipareg_parse_shared_mem_size,
+		0x00000054, 0, 0, 0, 0},
+	[IPA_HW_v3_0][IPA_SW_AREA_RAM_DIRECT_ACCESS_n] = {
+		ipareg_construct_dummy, ipareg_parse_dummy,
+		0x00007000, 0x4, 0, 0, 0},
+	[IPA_HW_v3_0][IPA_DEBUG_CNT_CTRL_n] = {
+		ipareg_construct_debug_cnt_ctrl_n, ipareg_parse_dummy,
+		0x00000640, 0x4, 0, 0, 0},
+	[IPA_HW_v3_0][IPA_UC_MAILBOX_m_n] = {
+		ipareg_construct_dummy, ipareg_parse_dummy,
+		0x00032000, 0x4, 0, 0, 0},
+	[IPA_HW_v3_0][IPA_FILT_ROUT_HASH_FLUSH] = {
+		ipareg_construct_dummy, ipareg_parse_dummy,
+		0x00000090, 0, 0, 0, 0},
+	[IPA_HW_v3_0][IPA_SINGLE_NDP_MODE] = {
+		ipareg_construct_single_ndp_mode, ipareg_parse_single_ndp_mode,
+		0x00000068, 0, 0, 0, 0},
+	[IPA_HW_v3_0][IPA_QCNCM] = {
+		ipareg_construct_qcncm, ipareg_parse_qcncm,
+		0x00000064, 0, 0, 0, 0},
+	[IPA_HW_v3_0][IPA_SYS_PKT_PROC_CNTXT_BASE] = {
+		ipareg_construct_dummy, ipareg_parse_dummy,
+		0x000001e0, 0, 0, 0, 0},
+	[IPA_HW_v3_0][IPA_LOCAL_PKT_PROC_CNTXT_BASE] = {
+		ipareg_construct_dummy, ipareg_parse_dummy,
+		0x000001e8, 0, 0, 0, 0},
+	[IPA_HW_v3_0][IPA_ENDP_STATUS_n] = {
+		ipareg_construct_endp_status_n, ipareg_parse_dummy,
+		0x00000840, 0x70, 0, 0, 0},
+	[IPA_HW_v3_0][IPA_ENDP_FILTER_ROUTER_HSH_CFG_n] = {
+		ipareg_construct_hash_cfg_n, ipareg_parse_hash_cfg_n,
+		0x0000085C, 0x70, 0, 0, 0},
+	[IPA_HW_v3_0][IPA_SRC_RSRC_GRP_01_RSRC_TYPE_n] = {
+		ipareg_construct_rsrg_grp_xy, ipareg_parse_dummy,
+		0x00000400, 0x20, 0, 0, 0},
+	[IPA_HW_v3_0][IPA_SRC_RSRC_GRP_23_RSRC_TYPE_n] = {
+		ipareg_construct_rsrg_grp_xy, ipareg_parse_dummy,
+		0x00000404, 0x20, 0, 0, 0},
+	[IPA_HW_v3_0][IPA_SRC_RSRC_GRP_45_RSRC_TYPE_n] = {
+		ipareg_construct_rsrg_grp_xy, ipareg_parse_dummy,
+		0x00000408, 0x20, 0, 0, 0},
+	[IPA_HW_v3_0][IPA_SRC_RSRC_GRP_67_RSRC_TYPE_n] = {
+		ipareg_construct_rsrg_grp_xy, ipareg_parse_dummy,
+		0x0000040C, 0x20, 0, 0, 0},
+	[IPA_HW_v3_0][IPA_DST_RSRC_GRP_01_RSRC_TYPE_n] = {
+		ipareg_construct_rsrg_grp_xy, ipareg_parse_dummy,
+		0x00000500, 0x20, 0, 0, 0},
+	[IPA_HW_v3_0][IPA_DST_RSRC_GRP_23_RSRC_TYPE_n] = {
+		ipareg_construct_rsrg_grp_xy, ipareg_parse_dummy,
+		0x00000504, 0x20, 0, 0, 0},
+	[IPA_HW_v3_0][IPA_DST_RSRC_GRP_45_RSRC_TYPE_n] = {
+		ipareg_construct_rsrg_grp_xy, ipareg_parse_dummy,
+		0x00000508, 0x20, 0, 0, 0},
+	[IPA_HW_v3_0][IPA_DST_RSRC_GRP_67_RSRC_TYPE_n] = {
+		ipareg_construct_rsrg_grp_xy, ipareg_parse_dummy,
+		0x0000050c, 0x20, 0, 0, 0},
+	[IPA_HW_v3_0][IPA_RX_HPS_CLIENTS_MIN_DEPTH_0] = {
+		ipareg_construct_rx_hps_clients_depth0, ipareg_parse_dummy,
+		0x000023C4, 0, 0, 0, 0},
+	[IPA_HW_v3_0][IPA_RX_HPS_CLIENTS_MIN_DEPTH_1] = {
+		ipareg_construct_rx_hps_clients_depth1, ipareg_parse_dummy,
+		0x000023C8, 0, 0, 0, 0},
+	[IPA_HW_v3_0][IPA_RX_HPS_CLIENTS_MAX_DEPTH_0] = {
+		ipareg_construct_rx_hps_clients_depth0, ipareg_parse_dummy,
+		0x000023CC, 0, 0, 0, 0},
+	[IPA_HW_v3_0][IPA_RX_HPS_CLIENTS_MAX_DEPTH_1] = {
+		ipareg_construct_rx_hps_clients_depth1, ipareg_parse_dummy,
+		0x000023D0, 0, 0, 0, 0},
+	[IPA_HW_v3_0][IPA_QSB_MAX_WRITES] = {
+		ipareg_construct_qsb_max_writes, ipareg_parse_dummy,
+		0x00000074, 0, 0, 0, 0},
+	[IPA_HW_v3_0][IPA_QSB_MAX_READS] = {
+		ipareg_construct_qsb_max_reads, ipareg_parse_dummy,
+		0x00000078, 0, 0, 0, 0},
+	[IPA_HW_v3_0][IPA_DPS_SEQUENCER_FIRST] = {
+		ipareg_construct_dummy, ipareg_parse_dummy,
+		0x0001e000, 0, 0, 0, 0},
+	[IPA_HW_v3_0][IPA_DPS_SEQUENCER_LAST] = {
+		ipareg_construct_dummy, ipareg_parse_dummy,
+		0x0001e07c, 0, 0, 0, 0},
+	[IPA_HW_v3_0][IPA_HPS_SEQUENCER_FIRST] = {
+		ipareg_construct_dummy, ipareg_parse_dummy,
+		0x0001e080, 0, 0, 0, 0},
+	[IPA_HW_v3_0][IPA_HPS_SEQUENCER_LAST] = {
+		ipareg_construct_dummy, ipareg_parse_dummy,
+		0x0001e26c, 0, 0, 0, 0},
+
+
+	/* IPAv3.1 */
+	[IPA_HW_v3_1][IPA_SUSPEND_IRQ_INFO_EE_n] = {
+		ipareg_construct_dummy, ipareg_parse_dummy,
+		0x00003030, 0x1000, 0, 0, 0},
+	[IPA_HW_v3_1][IPA_SUSPEND_IRQ_EN_EE_n] = {
+		ipareg_construct_dummy, ipareg_parse_dummy,
+		0x00003034, 0x1000, 0, 0, 0},
+	[IPA_HW_v3_1][IPA_SUSPEND_IRQ_CLR_EE_n] = {
+		ipareg_construct_dummy, ipareg_parse_dummy,
+		0x00003038, 0x1000, 0, 0, 0},
+
+
+	/* IPAv3.5 */
+	[IPA_HW_v3_5][IPA_TX_CFG] = {
+		ipareg_construct_tx_cfg, ipareg_parse_tx_cfg,
+		0x000001FC, 0, 0, 0, 0},
+	[IPA_HW_v3_5][IPA_SRC_RSRC_GRP_01_RSRC_TYPE_n] = {
+		ipareg_construct_rsrg_grp_xy_v3_5, ipareg_parse_dummy,
+		0x00000400, 0x20, 0, 0, 0},
+	[IPA_HW_v3_5][IPA_SRC_RSRC_GRP_23_RSRC_TYPE_n] = {
+		ipareg_construct_rsrg_grp_xy_v3_5, ipareg_parse_dummy,
+		0x00000404, 0x20, 0, 0, 0},
+	[IPA_HW_v3_5][IPA_SRC_RSRC_GRP_45_RSRC_TYPE_n] = {
+		ipareg_construct_dummy, ipareg_parse_dummy,
+		-1, 0, 0, 0, 0},
+	[IPA_HW_v3_5][IPA_SRC_RSRC_GRP_67_RSRC_TYPE_n] = {
+		ipareg_construct_dummy, ipareg_parse_dummy,
+		-1, 0, 0, 0, 0},
+	[IPA_HW_v3_5][IPA_DST_RSRC_GRP_01_RSRC_TYPE_n] = {
+		ipareg_construct_rsrg_grp_xy_v3_5, ipareg_parse_dummy,
+		0x00000500, 0x20, 0, 0, 0},
+	[IPA_HW_v3_5][IPA_DST_RSRC_GRP_23_RSRC_TYPE_n] = {
+		ipareg_construct_rsrg_grp_xy_v3_5, ipareg_parse_dummy,
+		0x00000504, 0x20, 0, 0, 0},
+	[IPA_HW_v3_5][IPA_DST_RSRC_GRP_45_RSRC_TYPE_n] = {
+		ipareg_construct_dummy, ipareg_parse_dummy,
+		-1, 0, 0, 0, 0},
+	[IPA_HW_v3_5][IPA_DST_RSRC_GRP_67_RSRC_TYPE_n] = {
+		ipareg_construct_dummy, ipareg_parse_dummy,
+		-1, 0, 0, 0, 0},
+	[IPA_HW_v3_5][IPA_ENDP_INIT_RSRC_GRP_n] = {
+		ipareg_construct_endp_init_rsrc_grp_n_v3_5,
+		ipareg_parse_dummy,
+		0x00000838, 0x70, 0, 0, 0},
+	[IPA_HW_v3_5][IPA_RX_HPS_CLIENTS_MIN_DEPTH_0] = {
+		ipareg_construct_rx_hps_clients_depth0_v3_5,
+		ipareg_parse_dummy,
+		0x000023C4, 0, 0, 0, 0},
+	[IPA_HW_v3_5][IPA_RX_HPS_CLIENTS_MIN_DEPTH_1] = {
+		ipareg_construct_dummy, ipareg_parse_dummy,
+		-1, 0, 0, 0, 0},
+	[IPA_HW_v3_5][IPA_RX_HPS_CLIENTS_MAX_DEPTH_0] = {
+		ipareg_construct_rx_hps_clients_depth0_v3_5,
+		ipareg_parse_dummy,
+		0x000023CC, 0, 0, 0, 0},
+	[IPA_HW_v3_5][IPA_RX_HPS_CLIENTS_MAX_DEPTH_1] = {
+		ipareg_construct_dummy, ipareg_parse_dummy,
+		-1, 0, 0, 0, 0},
+	[IPA_HW_v3_5][IPA_IDLE_INDICATION_CFG] = {
+		ipareg_construct_idle_indication_cfg, ipareg_parse_dummy,
+		0x00000220, 0, 0, 0, 0},
+	[IPA_HW_v3_5][IPA_HPS_FTCH_ARB_QUEUE_WEIGHT] = {
+		ipareg_construct_hps_queue_weights,
+		ipareg_parse_hps_queue_weights, 0x000005a4, 0, 0, 0, 0},
+	[IPA_HW_v3_5][IPA_COUNTER_CFG] = {
+		ipareg_construct_counter_cfg, ipareg_parse_counter_cfg,
+		0x000001F0, 0, 0, 0, 0},
+	[IPA_HW_v3_5][IPA_ENDP_GSI_CFG1_n] = {
+		ipareg_construct_dummy, ipareg_parse_dummy,
+		0x00002794, 0x4, 0, 0, 0 },
+	[IPA_HW_v3_5][IPA_ENDP_GSI_CFG_AOS_n] = {
+		ipareg_construct_dummy, ipareg_parse_dummy,
+		0x000029A8, 0x4, 0, 0, 0 },
+	[IPA_HW_v3_5][IPA_ENDP_GSI_CFG_TLV_n] = {
+		ipareg_construct_dummy, ipareg_parse_dummy,
+		0x00002924, 0x4, 0, 0, 0 },
+	[IPA_HW_v3_5][IPA_HPS_SEQUENCER_LAST] = {
+		ipareg_construct_dummy, ipareg_parse_dummy,
+		0x0001e1fc, 0, 0, 0, 0},
+
+	/* IPAv4.0 */
+	[IPA_HW_v4_0][IPA_SUSPEND_IRQ_INFO_EE_n] = {
+		ipareg_construct_dummy, ipareg_parse_dummy,
+		0x00003030, 0x1000, 0, 0, 1},
+	[IPA_HW_v4_0][IPA_SUSPEND_IRQ_EN_EE_n] = {
+		ipareg_construct_dummy, ipareg_parse_dummy,
+		0x00003034, 0x1000, 0, 0, 1},
+	[IPA_HW_v4_0][IPA_SUSPEND_IRQ_CLR_EE_n] = {
+		ipareg_construct_dummy, ipareg_parse_dummy,
+		0x00003038, 0x1000, 0, 0, 1},
+	[IPA_HW_v4_0][IPA_IRQ_EN_EE_n] = {
+		ipareg_construct_dummy, ipareg_parse_dummy,
+		0x0000300c, 0x1000, 0, 0, 1},
+	[IPA_HW_v4_0][IPA_TAG_TIMER] = {
+		ipareg_construct_dummy, ipareg_parse_dummy,
+		0x00000060, 0, 0, 0, 1},
+	[IPA_HW_v4_0][IPA_ENDP_INIT_CTRL_n] = {
+		ipareg_construct_endp_init_ctrl_n_v4_0, ipareg_parse_dummy,
+		0x00000800, 0x70, 0, 22, 1},
+	[IPA_HW_v4_0][IPA_ENDP_INIT_HDR_EXT_n] = {
+		ipareg_construct_endp_init_hdr_ext_n, ipareg_parse_dummy,
+		0x00000814, 0x70, 0, 22, 1},
+	[IPA_HW_v4_0][IPA_ENDP_INIT_AGGR_n] = {
+		ipareg_construct_endp_init_aggr_n,
+		ipareg_parse_endp_init_aggr_n,
+		0x00000824, 0x70, 0, 22, 1},
+	[IPA_HW_v4_0][IPA_TX_CFG] = {
+		ipareg_construct_tx_cfg_v4_0, ipareg_parse_tx_cfg_v4_0,
+		0x000001FC, 0, 0, 0, 0},
+	[IPA_HW_v4_0][IPA_DEBUG_CNT_REG_n] = {
+		ipareg_construct_dummy, ipareg_parse_dummy,
+		-1, 0, 0, 0, 0},
+	[IPA_HW_v4_0][IPA_DEBUG_CNT_CTRL_n] = {
+		ipareg_construct_debug_cnt_ctrl_n, ipareg_parse_dummy,
+		-1, 0, 0, 0, 0},
+	[IPA_HW_v4_0][IPA_QCNCM] = {
+		ipareg_construct_qcncm, ipareg_parse_qcncm,
+		-1, 0, 0, 0, 0},
+	[IPA_HW_v4_0][IPA_SINGLE_NDP_MODE] = {
+		ipareg_construct_single_ndp_mode, ipareg_parse_single_ndp_mode,
+		-1, 0, 0, 0, 0},
+	[IPA_HW_v4_0][IPA_QSB_MAX_READS] = {
+		ipareg_construct_qsb_max_reads_v4_0, ipareg_parse_dummy,
+		0x00000078, 0, 0, 0, 0},
+	[IPA_HW_v4_0][IPA_FILT_ROUT_HASH_FLUSH] = {
+		ipareg_construct_dummy, ipareg_parse_dummy,
+		0x0000014c, 0, 0, 0, 0},
+	[IPA_HW_v4_0][IPA_ENDP_INIT_HDR_n] = {
+		ipareg_construct_endp_init_hdr_n, ipareg_parse_dummy,
+		0x00000810, 0x70, 0, 22, 1},
+	[IPA_HW_v4_0][IPA_ENDP_INIT_ROUTE_n] = {
+		ipareg_construct_endp_init_route_n, ipareg_parse_dummy,
+		-1, 0, 0, 0, 0},
+	[IPA_HW_v4_0][IPA_ENDP_INIT_MODE_n] = {
+		ipareg_construct_endp_init_mode_n, ipareg_parse_dummy,
+		0x00000820, 0x70, 0, 9, 1},
+	[IPA_HW_v4_0][IPA_ENDP_INIT_NAT_n] = {
+		ipareg_construct_endp_init_nat_n, ipareg_parse_dummy,
+		0x0000080C, 0x70, 0, 9, 1},
+	[IPA_HW_v4_0][IPA_ENDP_STATUS_n] = {
+		ipareg_construct_endp_status_n_v4_0, ipareg_parse_dummy,
+		0x00000840, 0x70, 0, 22, 1},
+	[IPA_HW_v4_0][IPA_ENDP_FILTER_ROUTER_HSH_CFG_n] = {
+		ipareg_construct_hash_cfg_n, ipareg_parse_hash_cfg_n,
+		0x0000085C, 0x70, 0, 31, 1},
+	[IPA_HW_v4_0][IPA_ENDP_INIT_CONN_TRACK_n] = {
+		ipareg_construct_endp_init_conn_track_n,
+		ipareg_parse_dummy,
+		0x00000850, 0x70, 0, 9, 1},
+	[IPA_HW_v4_0][IPA_ENDP_INIT_CTRL_SCND_n] = {
+		ipareg_construct_endp_init_ctrl_scnd_n, ipareg_parse_dummy,
+		0x00000804, 0x70, 0, 22, 1},
+	[IPA_HW_v4_0][IPA_ENDP_INIT_HOL_BLOCK_EN_n] = {
+		ipareg_construct_endp_init_hol_block_en_n,
+		ipareg_parse_dummy,
+		0x0000082c, 0x70, 10, 22, 1},
+	[IPA_HW_v4_0][IPA_ENDP_INIT_HOL_BLOCK_TIMER_n] = {
+		ipareg_construct_endp_init_hol_block_timer_n,
+		ipareg_parse_dummy,
+		0x00000830, 0x70, 10, 22, 1},
+	[IPA_HW_v4_0][IPA_ENDP_INIT_DEAGGR_n] = {
+		ipareg_construct_endp_init_deaggr_n,
+		ipareg_parse_dummy,
+		0x00000834, 0x70, 0, 9, 1},
+	[IPA_HW_v4_0][IPA_ENDP_INIT_SEQ_n] = {
+		ipareg_construct_dummy, ipareg_parse_dummy,
+		0x0000083C, 0x70, 0, 9, 1},
+	[IPA_HW_v4_0][IPA_ENDP_INIT_CFG_n] = {
+		ipareg_construct_endp_init_cfg_n, ipareg_parse_dummy,
+		0x00000808, 0x70, 0, 22, 1},
+	[IPA_HW_v4_0][IPA_IRQ_EE_UC_n] = {
+		ipareg_construct_dummy, ipareg_parse_dummy,
+		0x0000301c, 0x1000, 0, 0, 1},
+	[IPA_HW_v4_0][IPA_ENDP_INIT_HDR_METADATA_MASK_n] = {
+		ipareg_construct_endp_init_hdr_metadata_mask_n,
+		ipareg_parse_dummy,
+		0x00000818, 0x70, 10, 22, 1},
+	[IPA_HW_v4_0][IPA_ENDP_INIT_HDR_METADATA_n] = {
+		ipareg_construct_endp_init_hdr_metadata_n,
+		ipareg_parse_dummy,
+		0x0000081c, 0x70, 0, 9, 1},
+	[IPA_HW_v4_0][IPA_CLKON_CFG] = {
+		ipareg_construct_clkon_cfg, ipareg_parse_clkon_cfg,
+		0x00000044, 0, 0, 0, 0},
+	[IPA_HW_v4_0][IPA_STAT_QUOTA_BASE_n] = {
+		ipareg_construct_dummy, ipareg_parse_dummy,
+		0x00000700, 0x4, 0, 0, 0},
+	[IPA_HW_v4_0][IPA_STAT_QUOTA_MASK_n] = {
+		ipareg_construct_dummy, ipareg_parse_dummy,
+		0x00000708, 0x4, 0, 0, 0},
+	[IPA_HW_v4_0][IPA_STAT_TETHERING_BASE_n] = {
+		ipareg_construct_dummy, ipareg_parse_dummy,
+		0x00000710, 0x4, 0, 0, 0},
+	[IPA_HW_v4_0][IPA_STAT_TETHERING_MASK_n] = {
+		ipareg_construct_dummy, ipareg_parse_dummy,
+		0x00000718, 0x4, 0, 0, 0},
+	[IPA_HW_v4_0][IPA_STAT_FILTER_IPV4_BASE] = {
+		ipareg_construct_dummy, ipareg_parse_dummy,
+		0x00000720, 0, 0, 0, 0},
+	[IPA_HW_v4_0][IPA_STAT_FILTER_IPV6_BASE] = {
+		ipareg_construct_dummy, ipareg_parse_dummy,
+		0x00000724, 0, 0, 0, 0},
+	[IPA_HW_v4_0][IPA_STAT_ROUTER_IPV4_BASE] = {
+		ipareg_construct_dummy, ipareg_parse_dummy,
+		0x00000728, 0, 0, 0, 0},
+	[IPA_HW_v4_0][IPA_STAT_ROUTER_IPV6_BASE] = {
+		ipareg_construct_dummy, ipareg_parse_dummy,
+		0x0000072C, 0, 0, 0, 0},
+	[IPA_HW_v4_0][IPA_STAT_FILTER_IPV4_START_ID] = {
+		ipareg_construct_dummy, ipareg_parse_dummy,
+		0x00000730, 0, 0, 0, 0},
+	[IPA_HW_v4_0][IPA_STAT_FILTER_IPV6_START_ID] = {
+		ipareg_construct_dummy, ipareg_parse_dummy,
+		0x00000734, 0, 0, 0, 0},
+	[IPA_HW_v4_0][IPA_STAT_ROUTER_IPV4_START_ID] = {
+		ipareg_construct_dummy, ipareg_parse_dummy,
+		0x00000738, 0, 0, 0, 0},
+	[IPA_HW_v4_0][IPA_STAT_ROUTER_IPV6_START_ID] = {
+		ipareg_construct_dummy, ipareg_parse_dummy,
+		0x0000073C, 0, 0, 0, 0},
+	[IPA_HW_v4_0][IPA_STAT_FILTER_IPV4_END_ID] = {
+		ipareg_construct_dummy, ipareg_parse_dummy,
+		0x00000740, 0, 0, 0, 0},
+	[IPA_HW_v4_0][IPA_STAT_FILTER_IPV6_END_ID] = {
+		ipareg_construct_dummy, ipareg_parse_dummy,
+		0x00000744, 0, 0, 0, 0},
+	[IPA_HW_v4_0][IPA_STAT_ROUTER_IPV4_END_ID] = {
+		ipareg_construct_dummy, ipareg_parse_dummy,
+		0x00000748, 0, 0, 0, 0},
+	[IPA_HW_v4_0][IPA_STAT_ROUTER_IPV6_END_ID] = {
+		ipareg_construct_dummy, ipareg_parse_dummy,
+		0x0000074C, 0, 0, 0, 0},
+	[IPA_HW_v4_0][IPA_STAT_DROP_CNT_BASE_n] = {
+		ipareg_construct_dummy, ipareg_parse_dummy,
+		0x00000750, 0x4, 0, 0, 1},
+	[IPA_HW_v4_0][IPA_STAT_DROP_CNT_MASK_n] = {
+		ipareg_construct_dummy, ipareg_parse_dummy,
+		0x00000758, 0x4, 0, 0, 1},
+	[IPA_HW_v4_0][IPA_STATE_TX_WRAPPER] = {
+		ipareg_construct_dummy, ipareg_parse_dummy,
+		0x00000090, 0, 0, 0, 1},
+	[IPA_HW_v4_0][IPA_STATE_TX1] = {
+		ipareg_construct_dummy, ipareg_parse_dummy,
+		0x00000094, 0, 0, 0, 1},
+	[IPA_HW_v4_0][IPA_STATE_FETCHER] = {
+		ipareg_construct_dummy, ipareg_parse_dummy,
+		0x00000098, 0, 0, 0, 1},
+	[IPA_HW_v4_0][IPA_STATE_FETCHER_MASK] = {
+		ipareg_construct_dummy, ipareg_parse_dummy,
+		0x0000009C, 0, 0, 0, 1},
+	[IPA_HW_v4_0][IPA_STATE_DFETCHER] = {
+		ipareg_construct_dummy, ipareg_parse_dummy,
+		0x000000A0, 0, 0, 0, 1},
+	[IPA_HW_v4_0][IPA_STATE_ACL] = {
+		ipareg_construct_dummy, ipareg_parse_dummy,
+		0x000000A4, 0, 0, 0, 1},
+	[IPA_HW_v4_0][IPA_STATE] = {
+		ipareg_construct_dummy, ipareg_parse_dummy,
+		0x000000A8, 0, 0, 0, 1},
+	[IPA_HW_v4_0][IPA_STATE_RX_ACTIVE] = {
+		ipareg_construct_dummy, ipareg_parse_dummy,
+		0x000000AC, 0, 0, 0, 1},
+	[IPA_HW_v4_0][IPA_STATE_TX0] = {
+		ipareg_construct_dummy, ipareg_parse_dummy,
+		0x000000B0, 0, 0, 0, 1},
+	[IPA_HW_v4_0][IPA_STATE_AGGR_ACTIVE] = {
+		ipareg_construct_dummy, ipareg_parse_dummy,
+		0x000000B4, 0, 0, 0, 1},
+	[IPA_HW_v4_0][IPA_STATE_GSI_TLV] = {
+		ipareg_construct_dummy, ipareg_parse_dummy,
+		0x000000B8, 0, 0, 0, 1},
+	[IPA_HW_v4_0][IPA_STATE_GSI_AOS] = {
+		ipareg_construct_dummy, ipareg_parse_dummy,
+		0x000000BC, 0, 0, 0, 1},
+	[IPA_HW_v4_0][IPA_STATE_GSI_IF] = {
+		ipareg_construct_dummy, ipareg_parse_dummy,
+		0x000000C0, 0, 0, 0, 1},
+	[IPA_HW_v4_0][IPA_STATE_GSI_SKIP] = {
+		ipareg_construct_dummy, ipareg_parse_dummy,
+		0x000000C4, 0, 0, 0, 1},
+	[IPA_HW_v4_0][IPA_SNOC_FEC_EE_n] = {
+		ipareg_construct_dummy, ipareg_parse_dummy,
+		0x00003018, 0x1000, 0, 0, 1},
+	[IPA_HW_v4_0][IPA_FEC_ADDR_EE_n] = {
+		ipareg_construct_dummy, ipareg_parse_dummy,
+		0x00003020, 0x1000, 0, 0, 1},
+	[IPA_HW_v4_0][IPA_FEC_ADDR_MSB_EE_n] = {
+		ipareg_construct_dummy, ipareg_parse_dummy,
+		0x00003024, 0x1000, 0, 0, 1},
+	[IPA_HW_v4_0][IPA_FEC_ATTR_EE_n] = {
+		ipareg_construct_dummy, ipareg_parse_dummy,
+		0x00003028, 0x1000, 0, 0, 1},
+	[IPA_HW_v4_0][IPA_HOLB_DROP_IRQ_INFO_EE_n] = {
+		ipareg_construct_dummy, ipareg_parse_dummy,
+		0x0000303C, 0x1000, 0, 0, 1},
+	[IPA_HW_v4_0][IPA_HOLB_DROP_IRQ_EN_EE_n] = {
+		ipareg_construct_dummy, ipareg_parse_dummy,
+		0x00003040, 0x1000, 0, 0, 1},
+	[IPA_HW_v4_0][IPA_HOLB_DROP_IRQ_CLR_EE_n] = {
+		ipareg_construct_dummy, ipareg_parse_dummy,
+		0x00003044, 0x1000, 0, 0, 1},
+	[IPA_HW_v4_0][IPA_ENDP_INIT_CTRL_STATUS_n] = {
+		ipareg_construct_dummy, ipareg_parse_dummy,
+		0x00000864, 0x70, 0, 22, 1},
+	[IPA_HW_v4_0][IPA_ENDP_INIT_PROD_CFG_n] = {
+		ipareg_construct_dummy, ipareg_parse_dummy,
+		0x00000CC8, 0x70, 10, 22, 1},
+	[IPA_HW_v4_0][IPA_ENDP_INIT_RSRC_GRP_n] = {
+		ipareg_construct_endp_init_rsrc_grp_n_v3_5,
+		ipareg_parse_dummy,
+		0x00000838, 0x70, 0, 22, 1},
+	[IPA_HW_v4_0][IPA_ENDP_YELLOW_RED_MARKER_CFG_n] = {
+		ipareg_construct_dummy, ipareg_parse_dummy,
+		0x00000CC0, 0x70, 10, 22, 1},
+
+	/* IPA4.2 */
+	[IPA_HW_v4_2][IPA_IDLE_INDICATION_CFG] = {
+		ipareg_construct_idle_indication_cfg, ipareg_parse_dummy,
+		0x00000240, 0, 0, 0, 0},
+	[IPA_HW_v4_2][IPA_ENDP_INIT_HOL_BLOCK_TIMER_n] = {
+		ipareg_construct_endp_init_hol_block_timer_n_v4_2,
+		ipareg_parse_dummy,
+		0x00000830, 0x70, 8, 16, 1},
+	[IPA_HW_v4_2][IPA_ENDP_FILTER_ROUTER_HSH_CFG_n] = {
+		ipareg_construct_dummy, ipareg_parse_dummy,
+		-1, 0, 0, 0, 0},
+	[IPA_HW_v4_2][IPA_HPS_FTCH_ARB_QUEUE_WEIGHT] = {
+		ipareg_construct_dummy,
+		ipareg_parse_dummy, -1, 0, 0, 0, 0},
+	[IPA_HW_v4_2][IPA_FILT_ROUT_HASH_EN] = {
+		ipareg_construct_dummy, ipareg_parse_dummy,
+		0x00000148, 0, 0, 0, 0},
+
+	/* IPA4.5 */
+	[IPA_HW_v4_5][IPA_SRC_RSRC_GRP_01_RSRC_TYPE_n] = {
+		ipareg_construct_rsrg_grp_xy_v4_5, ipareg_parse_dummy,
+		0x00000400, 0x20, 0, 0, 0},
+	[IPA_HW_v4_5][IPA_SRC_RSRC_GRP_23_RSRC_TYPE_n] = {
+		ipareg_construct_rsrg_grp_xy_v4_5, ipareg_parse_dummy,
+		0x00000404, 0x20, 0, 0, 0},
+	[IPA_HW_v4_5][IPA_SRC_RSRC_GRP_45_RSRC_TYPE_n] = {
+		ipareg_construct_rsrg_grp_xy_v4_5, ipareg_parse_dummy,
+		0x00000408, 0x20, 0, 0, 0},
+	[IPA_HW_v4_5][IPA_DST_RSRC_GRP_01_RSRC_TYPE_n] = {
+		ipareg_construct_rsrg_grp_xy_v4_5, ipareg_parse_dummy,
+		0x00000500, 0x20, 0, 0, 0},
+	[IPA_HW_v4_5][IPA_DST_RSRC_GRP_23_RSRC_TYPE_n] = {
+		ipareg_construct_rsrg_grp_xy_v4_5, ipareg_parse_dummy,
+		0x00000504, 0x20, 0, 0, 0},
+	[IPA_HW_v4_5][IPA_DST_RSRC_GRP_45_RSRC_TYPE_n] = {
+		ipareg_construct_rsrg_grp_xy_v4_5, ipareg_parse_dummy,
+		0x00000508, 0x20, 0, 0, 0},
+	[IPA_HW_v4_5][IPA_RX_HPS_CLIENTS_MIN_DEPTH_0] = {
+		ipareg_construct_rx_hps_clients_depth0_v4_5,
+		ipareg_parse_dummy,
+		0x000023c4, 0, 0, 0, 0},
+	[IPA_HW_v4_5][IPA_RX_HPS_CLIENTS_MAX_DEPTH_0] = {
+		ipareg_construct_rx_hps_clients_depth0_v4_5,
+		ipareg_parse_dummy,
+		0x000023cc, 0, 0, 0, 0},
+	[IPA_HW_v4_5][IPA_BCR] = {
+		ipareg_construct_dummy, ipareg_parse_dummy,
+		-1, 0, 0, 0, 0},
+	[IPA_HW_v4_5][IPA_COMP_CFG] = {
+		ipareg_construct_comp_cfg_v4_5, ipareg_parse_comp_cfg_v4_5,
+		0x0000003C, 0, 0, 0, 0},
+	[IPA_HW_v4_5][IPA_STATE_TX_WRAPPER] = {
+		ipareg_construct_dummy, ipareg_parse_state_tx_wrapper_v4_5,
+		0x00000090, 0, 0, 0, 1 },
+	[IPA_HW_v4_5][IPA_STATE_FETCHER_MASK] = {
+		ipareg_construct_dummy, ipareg_parse_dummy,
+		-1, 0, 0, 0, 0},
+	[IPA_HW_v4_5][IPA_STATE_FETCHER_MASK_0] = {
+		ipareg_construct_dummy, ipareg_parse_dummy,
+		0x0000009C, 0, 0, 0, 1},
+	[IPA_HW_v4_5][IPA_STATE_FETCHER_MASK_1] = {
+		ipareg_construct_dummy, ipareg_parse_dummy,
+		0x000000CC, 0, 0, 0, 1},
+	[IPA_HW_v4_5][IPA_COUNTER_CFG] = {
+		ipareg_construct_dummy, ipareg_parse_dummy,
+		-1, 0, 0, 0, 0},
+	[IPA_HW_v4_5][IPA_STATE_GSI_IF_CONS] = {
+		ipareg_construct_dummy, ipareg_parse_dummy,
+		0x000000C8, 0, 0, 0, 1},
+	[IPA_HW_v4_5][IPA_STATE_DPL_FIFO] = {
+		ipareg_construct_dummy, ipareg_parse_dummy,
+		0x000000D0, 0, 0, 0, 1},
+	[IPA_HW_v4_5][IPA_STATE_COAL_MASTER] = {
+		ipareg_construct_dummy, ipareg_parse_state_coal_master,
+		0x000000D4, 0, 0, 0, 1},
+	[IPA_HW_v4_5][IPA_GENERIC_RAM_ARBITER_PRIORITY] = {
+		ipareg_construct_dummy, ipareg_parse_dummy,
+		0x000000D8, 0, 0, 0, 1},
+	[IPA_HW_v4_5][IPA_STATE_NLO_AGGR] = {
+		ipareg_construct_dummy, ipareg_parse_dummy,
+		0x000000DC, 0, 0, 0, 1},
+	[IPA_HW_v4_5][IPA_STATE_COAL_MASTER_1] = {
+		ipareg_construct_dummy, ipareg_parse_dummy,
+		0x000000E0, 0, 0, 0, 1},
+	[IPA_HW_v4_5][IPA_ENDP_YELLOW_RED_MARKER_CFG_n] = {
+		ipareg_construct_dummy, ipareg_parse_dummy,
+		0x00000860, 0x70, 13, 30, 1},
+	[IPA_HW_v4_5][IPA_ENDP_INIT_MODE_n] = {
+		ipareg_construct_endp_init_mode_n_v4_5, ipareg_parse_dummy,
+		0x00000820, 0x70, 0, 12, 1},
+	[IPA_HW_v4_5][IPA_TX_CFG] = {
+		ipareg_construct_tx_cfg_v4_5, ipareg_parse_tx_cfg_v4_5,
+		0x000001FC, 0, 0, 0, 0},
+	[IPA_HW_v4_5][IPA_CLKON_CFG] = {
+		ipareg_construct_clkon_cfg_v4_5, ipareg_parse_clkon_cfg_v4_5,
+		0x00000044, 0, 0, 0, 0},
+	[IPA_HW_v4_5][IPA_QTIME_TIMESTAMP_CFG] = {
+		ipareg_construct_qtime_timestamp_cfg, ipareg_parse_dummy,
+		0x00000024c, 0, 0, 0, 1},
+	[IPA_HW_v4_5][IPA_TIMERS_PULSE_GRAN_CFG] = {
+		ipareg_construct_timers_pulse_gran_cfg,
+		ipareg_parse_timers_pulse_gran_cfg,
+		0x000000254, 0, 0, 0, 1},
+	[IPA_HW_v4_5][IPA_TIMERS_XO_CLK_DIV_CFG] = {
+		ipareg_construct_timers_xo_clk_div_cfg,
+		ipareg_parse_timers_xo_clk_div_cfg,
+		0x000000250, 0, 0, 0, 1},
+	[IPA_HW_v4_5][IPA_STAT_QUOTA_BASE_n] = {
+		ipareg_construct_dummy, ipareg_parse_dummy,
+		0x00000700, 0x4, 0, 0, 0},
+	[IPA_HW_v4_5][IPA_STAT_QUOTA_MASK_n] = {
+		ipareg_construct_dummy, ipareg_parse_dummy,
+		0x00000708, 0x4, 0, 0, 0},
+	[IPA_HW_v4_5][IPA_STAT_TETHERING_BASE_n] = {
+		ipareg_construct_dummy, ipareg_parse_dummy,
+		0x00000710, 0x4, 0, 0, 0},
+	[IPA_HW_v4_5][IPA_STAT_TETHERING_MASK_n] = {
+		ipareg_construct_dummy, ipareg_parse_dummy,
+		0x00000718, 0x4, 0, 0, 0},
+	[IPA_HW_v4_5][IPA_STAT_FILTER_IPV4_BASE] = {
+		ipareg_construct_dummy, ipareg_parse_dummy,
+		0x00000720, 0, 0, 0, 0},
+	[IPA_HW_v4_5][IPA_STAT_FILTER_IPV6_BASE] = {
+		ipareg_construct_dummy, ipareg_parse_dummy,
+		0x00000724, 0, 0, 0, 0},
+	[IPA_HW_v4_5][IPA_STAT_ROUTER_IPV4_BASE] = {
+		ipareg_construct_dummy, ipareg_parse_dummy,
+		0x00000728, 0, 0, 0, 0},
+	[IPA_HW_v4_5][IPA_STAT_ROUTER_IPV6_BASE] = {
+		ipareg_construct_dummy, ipareg_parse_dummy,
+		0x0000072C, 0, 0, 0, 0},
+	[IPA_HW_v4_5][IPA_STAT_DROP_CNT_BASE_n] = {
+		ipareg_construct_dummy, ipareg_parse_dummy,
+		0x00000750, 0x4, 0, 0, 1},
+	[IPA_HW_v4_5][IPA_STAT_DROP_CNT_MASK_n] = {
+		ipareg_construct_dummy, ipareg_parse_dummy,
+		0x00000758, 0x4, 0, 0, 1},
+	[IPA_HW_v4_5][IPA_ENDP_INIT_SEQ_n] = {
+		ipareg_construct_dummy, ipareg_parse_dummy,
+		0x0000083C, 0x70, 0, 12, 1},
+	[IPA_HW_v4_5][IPA_ENDP_INIT_CFG_n] = {
+		ipareg_construct_endp_init_cfg_n, ipareg_parse_dummy,
+		0x00000808, 0x70, 0, 30, 1},
+	[IPA_HW_v4_5][IPA_ENDP_INIT_DEAGGR_n] = {
+		ipareg_construct_endp_init_deaggr_n,
+		ipareg_parse_dummy,
+		0x00000834, 0x70, 0, 12, 1},
+	[IPA_HW_v4_5][IPA_ENDP_INIT_CTRL_n] = {
+		ipareg_construct_endp_init_ctrl_n_v4_0, ipareg_parse_dummy,
+		0x00000800, 0x70, 0, 30, 1},
+	[IPA_HW_v4_5][IPA_ENDP_INIT_CTRL_SCND_n] = {
+		ipareg_construct_endp_init_ctrl_scnd_n, ipareg_parse_dummy,
+		0x00000804, 0x70, 0, 30, 1},
+	[IPA_HW_v4_5][IPA_ENDP_INIT_CTRL_STATUS_n] = {
+		ipareg_construct_dummy, ipareg_parse_dummy,
+		0x00000864, 0x70, 0, 30, 1},
+	[IPA_HW_v4_5][IPA_ENDP_INIT_PROD_CFG_n] = {
+		ipareg_construct_dummy, ipareg_parse_dummy,
+		0x00000CC8, 0x70, 13, 30, 1},
+	[IPA_HW_v4_5][IPA_ENDP_FILTER_ROUTER_HSH_CFG_n] = {
+		ipareg_construct_hash_cfg_n, ipareg_parse_hash_cfg_n,
+		0x0000085C, 0x70, 0, 31, 1},
+	[IPA_HW_v4_5][IPA_ENDP_STATUS_n] = {
+		ipareg_construct_endp_status_n_v4_5, ipareg_parse_dummy,
+		0x00000840, 0x70, 0, 30, 1},
+	[IPA_HW_v4_5][IPA_ENDP_INIT_NAT_n] = {
+		ipareg_construct_endp_init_nat_n, ipareg_parse_dummy,
+		0x0000080C, 0x70, 0, 12, 1},
+	[IPA_HW_v4_5][IPA_ENDP_INIT_CONN_TRACK_n] = {
+		ipareg_construct_endp_init_conn_track_n,
+		ipareg_parse_dummy,
+		0x00000850, 0x70, 0, 12, 1},
+	[IPA_HW_v4_5][IPA_ENDP_INIT_RSRC_GRP_n] = {
+		ipareg_construct_endp_init_rsrc_grp_n_v4_5,
+		ipareg_parse_dummy,
+		0x00000838, 0x70, 0, 30, 1},
+	[IPA_HW_v4_5][IPA_STAT_FILTER_IPV4_START_ID] = {
+		ipareg_construct_dummy, ipareg_parse_dummy,
+		-1, 0, 0, 0, 0},
+	[IPA_HW_v4_5][IPA_STAT_FILTER_IPV6_START_ID] = {
+		ipareg_construct_dummy, ipareg_parse_dummy,
+		-1, 0, 0, 0, 0},
+	[IPA_HW_v4_5][IPA_STAT_ROUTER_IPV4_START_ID] = {
+		ipareg_construct_dummy, ipareg_parse_dummy,
+		-1, 0, 0, 0, 0},
+	[IPA_HW_v4_5][IPA_STAT_ROUTER_IPV6_START_ID] = {
+		ipareg_construct_dummy, ipareg_parse_dummy,
+		-1, 0, 0, 0, 0},
+	[IPA_HW_v4_5][IPA_STAT_FILTER_IPV4_END_ID] = {
+		ipareg_construct_dummy, ipareg_parse_dummy,
+		-1, 0, 0, 0, 0},
+	[IPA_HW_v4_5][IPA_STAT_FILTER_IPV6_END_ID] = {
+		ipareg_construct_dummy, ipareg_parse_dummy,
+		-1, 0, 0, 0, 0},
+	[IPA_HW_v4_5][IPA_STAT_ROUTER_IPV4_END_ID] = {
+		ipareg_construct_dummy, ipareg_parse_dummy,
+		-1, 0, 0, 0, 0},
+	[IPA_HW_v4_5][IPA_STAT_ROUTER_IPV6_END_ID] = {
+		ipareg_construct_dummy, ipareg_parse_dummy,
+		-1, 0, 0, 0, 0},
+	[IPA_HW_v4_5][IPA_DPS_SEQUENCER_FIRST] = {
+		ipareg_construct_dummy, ipareg_parse_dummy,
+		0x00002570, 0, 0, 0, 0},
+	[IPA_HW_v4_5][IPA_DPS_SEQUENCER_LAST] = {
+		ipareg_construct_dummy, ipareg_parse_dummy,
+		0x00002574, 0, 0, 0, 0},
+	[IPA_HW_v4_5][IPA_HPS_SEQUENCER_FIRST] = {
+		ipareg_construct_dummy, ipareg_parse_dummy,
+		0x00002578, 0, 0, 0, 0},
+	[IPA_HW_v4_5][IPA_HPS_SEQUENCER_LAST] = {
+		ipareg_construct_dummy, ipareg_parse_dummy,
+		0x0000257c, 0, 0, 0, 0},
+	[IPA_HW_v4_5][IPA_NAT_TIMER] = {
+		ipareg_construct_dummy, ipareg_parse_dummy,
+		0x00000058, 0, 0, 0, 1},
+	[IPA_HW_v4_5][IPA_ENDP_INIT_HOL_BLOCK_EN_n] = {
+		ipareg_construct_endp_init_hol_block_en_n,
+		ipareg_parse_dummy,
+		0x0000082c, 0x70, 13, 30, 1},
+	[IPA_HW_v4_5][IPA_ENDP_INIT_HOL_BLOCK_TIMER_n] = {
+		ipareg_construct_endp_init_hol_block_timer_n_v4_5,
+		ipareg_parse_dummy,
+		0x00000830, 0x70, 13, 30, 1},
+	[IPA_HW_v4_5][IPA_ENDP_INIT_AGGR_n] = {
+		ipareg_construct_endp_init_aggr_n_v4_5,
+		ipareg_parse_endp_init_aggr_n_v4_5,
+		0x00000824, 0x70, 0, 30, 1},
+	[IPA_HW_v4_5][IPA_SW_AREA_RAM_DIRECT_ACCESS_n] = {
+		ipareg_construct_dummy, ipareg_parse_dummy,
+		0x000010000, 0x4, 0, 0, 0},
+	[IPA_HW_v4_5][IPA_ENDP_INIT_HDR_n] = {
+		ipareg_construct_endp_init_hdr_n_v4_5, ipareg_parse_dummy,
+		0x00000810, 0x70, 0, 30, 1},
+	[IPA_HW_v4_5][IPA_ENDP_INIT_HDR_EXT_n] = {
+		ipareg_construct_endp_init_hdr_ext_n_v4_5, ipareg_parse_dummy,
+		0x00000814, 0x70, 0, 30, 1},
+	[IPA_HW_v4_5][IPA_ENDP_INIT_HDR_METADATA_n] = {
+		ipareg_construct_endp_init_hdr_metadata_n,
+		ipareg_parse_dummy,
+		0x0000081c, 0x70, 0, 12, 1},
+	[IPA_HW_v4_5][IPA_ENDP_INIT_HDR_METADATA_MASK_n] = {
+		ipareg_construct_endp_init_hdr_metadata_mask_n,
+		ipareg_parse_dummy,
+		0x00000818, 0x70, 13, 30, 1},
+	[IPA_HW_v4_5][IPA_UC_MAILBOX_m_n] = {
+		ipareg_construct_dummy, ipareg_parse_dummy,
+		0x00082000, 0x4, 0, 0, 0},
+	[IPA_HW_v4_5][IPA_COAL_EVICT_LRU] = {
+		ipareg_construct_coal_evict_lru, ipareg_parse_coal_evict_lru,
+		0x0000180C, 0, 0, 0, 0},
+	[IPA_HW_v4_5][IPA_COAL_QMAP_CFG] = {
+		ipareg_construct_coal_qmap_cfg, ipareg_parse_coal_qmap_cfg,
+		0x00001810, 0, 0, 0, 0},
+	[IPA_HW_v4_7][IPA_STATE_TX_WRAPPER] = {
+		ipareg_construct_dummy, ipareg_parse_state_tx_wrapper_v4_7,
+		0x00000090, 0, 0, 0, 1 },
+
+	/* IPA4.9 */
+
+	/*IPA_DEBUG*/
+	[IPA_HW_v4_9][IPA_ENDP_GSI_CFG1_n] = {
+		ipareg_construct_dummy, ipareg_parse_dummy,
+		0x000026C0, 0x4, 0, 30, 0 },
+	[IPA_HW_v4_9][IPA_ENDP_GSI_CFG_TLV_n] = {
+		ipareg_construct_dummy, ipareg_parse_dummy,
+		0x00002758, 0x4, 0, 30, 0 },
+	[IPA_HW_v4_9][IPA_ENDP_GSI_CFG_AOS_n] = {
+		ipareg_construct_dummy, ipareg_parse_dummy,
+		0x000027D4, 0x4, 0, 30, 0 },
+
+
+	/*IPA_CFG*/
+	[IPA_HW_v4_9][IPA_COMP_CFG] = {
+		ipareg_construct_comp_cfg_v4_9, ipareg_parse_comp_cfg_v4_9,
+		0x0000003C, 0, 0, 0, 0},
+	[IPA_HW_v4_9][IPA_QSB_MAX_READS] = {
+		ipareg_construct_qsb_max_reads_v4_0, ipareg_parse_qsb_max_reads,
+		0x00000078, 0, 0, 0, 0},
+	[IPA_HW_v4_9][IPA_QSB_MAX_WRITES] = {
+		ipareg_construct_qsb_max_writes, ipareg_parse_qsb_max_writes,
+		0x00000074, 0, 0, 0, 0},
+	[IPA_HW_v4_9][IPA_TX_CFG] = {
+		ipareg_construct_tx_cfg_v4_9, ipareg_parse_tx_cfg_v4_9,
+		0x000001FC, 0, 0, 0, 0},
+	[IPA_HW_v4_9][IPA_ENDP_INIT_NAT_n] = {
+		ipareg_construct_endp_init_nat_n, ipareg_parse_dummy,
+		0x0000080C, 0x70, 0, 10, 0},
+	[IPA_HW_v4_9][IPA_ENDP_INIT_HDR_n] = {
+		ipareg_construct_endp_init_hdr_n_v4_9, ipareg_parse_dummy,
+		0x00000810, 0x70, 0, 30, 0},
+	[IPA_HW_v4_9][IPA_ENDP_INIT_HDR_METADATA_n] = {
+		ipareg_construct_endp_init_hdr_metadata_n,
+		ipareg_parse_dummy,
+		0x0000081c, 0x70, 0, 10, 0},
+	[IPA_HW_v4_9][IPA_ENDP_INIT_MODE_n] = {
+		ipareg_construct_endp_init_mode_n_v4_5, ipareg_parse_dummy,
+		0x00000820, 0x70, 0, 10, 0},
+	[IPA_HW_v4_9][IPA_ENDP_INIT_DEAGGR_n] = {
+		ipareg_construct_endp_init_deaggr_n,
+		ipareg_parse_dummy,
+		0x00000834, 0x70, 0, 10, 0},
+	[IPA_HW_v4_9][IPA_ENDP_INIT_RSRC_GRP_n] = {
+		ipareg_construct_endp_init_rsrc_grp_n_v4_9,
+		ipareg_parse_dummy,
+		0x00000838, 0x70, 0, 30, 0},
+	[IPA_HW_v4_9][IPA_ENDP_INIT_SEQ_n] = {
+		ipareg_construct_dummy, ipareg_parse_dummy,
+		0x0000083C, 0x70, 0, 10, 0},
+	[IPA_HW_v4_9][IPA_ENDP_INIT_CONN_TRACK_n] = {
+		ipareg_construct_endp_init_conn_track_n,
+		ipareg_parse_dummy,
+		0x00000850, 0x70, 0, 10, 0},
+
+	/*IPA_EE*/
+	[IPA_HW_v4_9][IPA_IRQ_STTS_EE_n] = {
+		ipareg_construct_dummy, ipareg_parse_dummy,
+		0x00004008, 0x1000, 0, 0, 0},
+	[IPA_HW_v4_9][IPA_IRQ_EN_EE_n] = {
+		ipareg_construct_dummy, ipareg_parse_dummy,
+		0x0000400c, 0x1000, 0, 0, 0},
+	[IPA_HW_v4_9][IPA_IRQ_CLR_EE_n] = {
+		ipareg_construct_dummy, ipareg_parse_dummy,
+		0x00004010, 0x1000, 0, 0, 0},
+	[IPA_HW_v4_9][IPA_SNOC_FEC_EE_n] = {
+		ipareg_construct_dummy, ipareg_parse_dummy,
+		0x00004018, 0x1000, 0, 0, 0},
+	[IPA_HW_v4_9][IPA_SUSPEND_IRQ_INFO_EE_n] = {
+		ipareg_construct_dummy, ipareg_parse_dummy,
+		0x00004030, 0x1000, 0, 0, 0},
+	[IPA_HW_v4_9][IPA_SUSPEND_IRQ_EN_EE_n] = {
+		ipareg_construct_dummy, ipareg_parse_dummy,
+		0x00004034, 0x1000, 0, 0, 0},
+	[IPA_HW_v4_9][IPA_SUSPEND_IRQ_CLR_EE_n] = {
+		ipareg_construct_dummy, ipareg_parse_dummy,
+		0x00004038, 0x1000, 0, 0, 0},
+	[IPA_HW_v4_9][IPA_HOLB_DROP_IRQ_INFO_EE_n] = {
+		ipareg_construct_dummy, ipareg_parse_dummy,
+		0x0000403C, 0x1000, 0, 0, 0},
+	[IPA_HW_v4_9][IPA_HOLB_DROP_IRQ_EN_EE_n] = {
+		ipareg_construct_dummy, ipareg_parse_dummy,
+		0x00004040, 0x1000, 0, 0, 0},
+	[IPA_HW_v4_9][IPA_HOLB_DROP_IRQ_CLR_EE_n] = {
+		ipareg_construct_dummy, ipareg_parse_dummy,
+		0x00004044, 0x1000, 0, 0, 0},
+	[IPA_HW_v4_9][IPA_IRQ_EE_UC_n] = {
+		ipareg_construct_dummy, ipareg_parse_dummy,
+		0x0000401c, 0x1000, 0, 0, 1},
+	[IPA_HW_v4_9][IPA_FEC_ADDR_EE_n] = {
+		ipareg_construct_dummy, ipareg_parse_dummy,
+		0x00004020, 0x1000, 0, 0, 1},
+	[IPA_HW_v4_9][IPA_FEC_ADDR_MSB_EE_n] = {
+		ipareg_construct_dummy, ipareg_parse_dummy,
+		0x00004024, 0x1000, 0, 0, 1},
+	[IPA_HW_v4_9][IPA_FEC_ATTR_EE_n] = {
+		ipareg_construct_dummy, ipareg_parse_dummy,
+		0x00004028, 0x1000, 0, 0, 1},
+};
+
+/*
+ * ipahal_print_all_regs() - Loop and read and print all the valid registers
+ *  Parameterized registers are also printed for all the valid ranges.
+ *  Print to dmsg and IPC logs
+ */
+void ipahal_print_all_regs(bool print_to_dmesg)
+{
+	int i, j;
+	struct ipahal_reg_obj *reg;
+
+	IPAHAL_DBG("Printing all registers for ipa_hw_type %d\n",
+		ipahal_ctx->hw_type);
+
+	if ((ipahal_ctx->hw_type < IPA_HW_v4_0) ||
+		(ipahal_ctx->hw_type >= IPA_HW_MAX)) {
+		IPAHAL_ERR("invalid IPA HW type (%d)\n", ipahal_ctx->hw_type);
+		return;
+	}
+
+	for (i = 0; i < IPA_REG_MAX ; i++) {
+		reg = &(ipahal_reg_objs[ipahal_ctx->hw_type][i]);
+
+		/* skip obsolete registers */
+		if (reg->offset == -1)
+			continue;
+
+		if (!reg->en_print)
+			continue;
+
+		j = reg->n_start;
+
+		if (j == reg->n_end && (reg->n_ofst == 0)) {
+			if (print_to_dmesg)
+				IPAHAL_DBG_REG("%s=0x%x\n",
+					ipahal_reg_name_str(i),
+					ipahal_read_reg_n(i, j));
+			else
+				IPAHAL_DBG_REG_IPC_ONLY("%s=0x%x\n",
+					ipahal_reg_name_str(i),
+					ipahal_read_reg_n(i, j));
+		} else {
+			for (; j <= reg->n_end; j++) {
+				if (print_to_dmesg)
+					IPAHAL_DBG_REG("%s_%u=0x%x\n",
+						ipahal_reg_name_str(i),
+						j, ipahal_read_reg_n(i, j));
+				else
+					IPAHAL_DBG_REG_IPC_ONLY("%s_%u=0x%x\n",
+						ipahal_reg_name_str(i),
+						j, ipahal_read_reg_n(i, j));
+			}
+		}
+	}
+}
+
+/*
+ * ipahal_reg_init() - Build the registers information table
+ *  See ipahal_reg_objs[][] comments
+ *
+ * Note: As global variables are initialized with zero, any un-overridden
+ *  register entry will be zero. By this we recognize them.
+ */
+int ipahal_reg_init(enum ipa_hw_type ipa_hw_type)
+{
+	int i;
+	int j;
+	struct ipahal_reg_obj zero_obj;
+
+	IPAHAL_DBG_LOW("Entry - HW_TYPE=%d\n", ipa_hw_type);
+
+	if ((ipa_hw_type < 0) || (ipa_hw_type >= IPA_HW_MAX)) {
+		IPAHAL_ERR("invalid IPA HW type (%d)\n", ipa_hw_type);
+		return -EINVAL;
+	}
+
+	memset(&zero_obj, 0, sizeof(zero_obj));
+	for (i = IPA_HW_v3_0 ; i < ipa_hw_type ; i++) {
+		for (j = 0; j < IPA_REG_MAX ; j++) {
+			if (!memcmp(&ipahal_reg_objs[i+1][j], &zero_obj,
+				sizeof(struct ipahal_reg_obj))) {
+				memcpy(&ipahal_reg_objs[i+1][j],
+					&ipahal_reg_objs[i][j],
+					sizeof(struct ipahal_reg_obj));
+			} else {
+				/*
+				 * explicitly overridden register.
+				 * Check validity
+				 */
+				if (!ipahal_reg_objs[i+1][j].offset) {
+					IPAHAL_ERR(
+					  "reg=%s with zero offset ipa_ver=%d\n",
+					  ipahal_reg_name_str(j), i+1);
+					WARN_ON(1);
+				}
+				if (!ipahal_reg_objs[i+1][j].construct) {
+					IPAHAL_ERR(
+					  "reg=%s with NULL construct func ipa_ver=%d\n",
+					  ipahal_reg_name_str(j), i+1);
+					WARN_ON(1);
+				}
+				if (!ipahal_reg_objs[i+1][j].parse) {
+					IPAHAL_ERR(
+					  "reg=%s with NULL parse func ipa_ver=%d\n",
+					  ipahal_reg_name_str(j), i+1);
+					WARN_ON(1);
+				}
+			}
+		}
+	}
+
+	return 0;
+}
+
+/*
+ * ipahal_reg_name_str() - returns string that represent the register
+ * @reg_name: [in] register name
+ */
+const char *ipahal_reg_name_str(enum ipahal_reg_name reg_name)
+{
+	if (reg_name < 0 || reg_name >= IPA_REG_MAX) {
+		IPAHAL_ERR("requested name of invalid reg=%d\n", reg_name);
+		return "Invalid Register";
+	}
+
+	return ipareg_name_to_str[reg_name];
+}
+
+/*
+ * ipahal_read_reg_n() - Get n parameterized reg value
+ */
+u32 ipahal_read_reg_n(enum ipahal_reg_name reg, u32 n)
+{
+	u32 offset;
+
+	if (reg >= IPA_REG_MAX) {
+		IPAHAL_ERR("Invalid register reg=%u\n", reg);
+		WARN_ON(1);
+		return -EINVAL;
+	}
+
+	IPAHAL_DBG_LOW("read from %s n=%u\n",
+		ipahal_reg_name_str(reg), n);
+
+	offset = ipahal_reg_objs[ipahal_ctx->hw_type][reg].offset;
+	if (offset == -1) {
+		IPAHAL_ERR("Read access to obsolete reg=%s\n",
+			ipahal_reg_name_str(reg));
+		WARN_ON(1);
+		return -EPERM;
+	}
+	offset += ipahal_reg_objs[ipahal_ctx->hw_type][reg].n_ofst * n;
+	return ioread32(ipahal_ctx->base + offset);
+}
+
+/*
+ * ipahal_read_reg_mn() - Get mn parameterized reg value
+ */
+u32 ipahal_read_reg_mn(enum ipahal_reg_name reg, u32 m, u32 n)
+{
+	u32 offset;
+
+	if (reg >= IPA_REG_MAX) {
+		IPAHAL_ERR("Invalid register reg=%u\n", reg);
+		WARN_ON(1);
+		return -EINVAL;
+	}
+
+	IPAHAL_DBG_LOW("read %s m=%u n=%u\n",
+		ipahal_reg_name_str(reg), m, n);
+	offset = ipahal_reg_objs[ipahal_ctx->hw_type][reg].offset;
+	if (offset == -1) {
+		IPAHAL_ERR("Read access to obsolete reg=%s\n",
+			ipahal_reg_name_str(reg));
+		WARN_ON_ONCE(1);
+		return -EPERM;
+	}
+	/*
+	 * Currently there is one register with m and n parameters
+	 *	IPA_UC_MAILBOX_m_n. The m value of it is 0x80.
+	 * If more such registers will be added in the future,
+	 *	we can move the m parameter to the table above.
+	 */
+	offset += 0x80 * m;
+	offset += ipahal_reg_objs[ipahal_ctx->hw_type][reg].n_ofst * n;
+	return ioread32(ipahal_ctx->base + offset);
+}
+
+/*
+ * ipahal_write_reg_mn() - Write to m/n parameterized reg a raw value
+ */
+void ipahal_write_reg_mn(enum ipahal_reg_name reg, u32 m, u32 n, u32 val)
+{
+	u32 offset;
+
+	if (reg >= IPA_REG_MAX) {
+		IPAHAL_ERR("Invalid register reg=%u\n", reg);
+		WARN_ON(1);
+		return;
+	}
+
+	IPAHAL_DBG_LOW("write to %s m=%u n=%u val=%u\n",
+		ipahal_reg_name_str(reg), m, n, val);
+	offset = ipahal_reg_objs[ipahal_ctx->hw_type][reg].offset;
+	if (offset == -1) {
+		IPAHAL_ERR("Write access to obsolete reg=%s\n",
+			ipahal_reg_name_str(reg));
+		WARN_ON(1);
+		return;
+	}
+	/*
+	 * Currently there is one register with m and n parameters
+	 *	IPA_UC_MAILBOX_m_n. The m value of it is 0x80.
+	 * If more such registers will be added in the future,
+	 *	we can move the m parameter to the table above.
+	 */
+	offset +=  0x80 * m;
+	offset += ipahal_reg_objs[ipahal_ctx->hw_type][reg].n_ofst * n;
+	iowrite32(val, ipahal_ctx->base + offset);
+}
+
+/*
+ * ipahal_read_reg_n_fields() - Get the parsed value of n parameterized reg
+ */
+u32 ipahal_read_reg_n_fields(enum ipahal_reg_name reg, u32 n, void *fields)
+{
+	u32 val = 0;
+	u32 offset;
+
+	if (!fields) {
+		IPAHAL_ERR("Input error fields\n");
+		WARN_ON(1);
+		return -EINVAL;
+	}
+
+	if (reg >= IPA_REG_MAX) {
+		IPAHAL_ERR("Invalid register reg=%u\n", reg);
+		WARN_ON(1);
+		return -EINVAL;
+	}
+
+	IPAHAL_DBG_LOW("read from %s n=%u and parse it\n",
+		ipahal_reg_name_str(reg), n);
+	offset = ipahal_reg_objs[ipahal_ctx->hw_type][reg].offset;
+	if (offset == -1) {
+		IPAHAL_ERR("Read access to obsolete reg=%s\n",
+			ipahal_reg_name_str(reg));
+		WARN_ON(1);
+		return -EPERM;
+	}
+	offset += ipahal_reg_objs[ipahal_ctx->hw_type][reg].n_ofst * n;
+	val = ioread32(ipahal_ctx->base + offset);
+	ipahal_reg_objs[ipahal_ctx->hw_type][reg].parse(reg, fields, val);
+
+	return val;
+}
+
+/*
+ * ipahal_write_reg_n_fields() - Write to n parameterized reg a prased value
+ */
+void ipahal_write_reg_n_fields(enum ipahal_reg_name reg, u32 n,
+		const void *fields)
+{
+	u32 val = 0;
+	u32 offset;
+
+	if (!fields) {
+		IPAHAL_ERR("Input error fields=%pK\n", fields);
+		WARN_ON(1);
+		return;
+	}
+
+	if (reg >= IPA_REG_MAX) {
+		IPAHAL_ERR("Invalid register reg=%u\n", reg);
+		WARN_ON(1);
+		return;
+	}
+
+	IPAHAL_DBG_LOW("write to %s n=%u after constructing it\n",
+		ipahal_reg_name_str(reg), n);
+	offset = ipahal_reg_objs[ipahal_ctx->hw_type][reg].offset;
+	if (offset == -1) {
+		IPAHAL_ERR("Write access to obsolete reg=%s\n",
+			ipahal_reg_name_str(reg));
+		WARN_ON(1);
+		return;
+	}
+	offset += ipahal_reg_objs[ipahal_ctx->hw_type][reg].n_ofst * n;
+	ipahal_reg_objs[ipahal_ctx->hw_type][reg].construct(reg, fields, &val);
+
+	iowrite32(val, ipahal_ctx->base + offset);
+}
+
+/*
+ * Get the offset of a m/n parameterized register
+ */
+u32 ipahal_get_reg_mn_ofst(enum ipahal_reg_name reg, u32 m, u32 n)
+{
+	u32 offset;
+
+	if (reg >= IPA_REG_MAX) {
+		IPAHAL_ERR("Invalid register reg=%u\n", reg);
+		WARN_ON(1);
+		return -EINVAL;
+	}
+
+	IPAHAL_DBG_LOW("get offset of %s m=%u n=%u\n",
+		ipahal_reg_name_str(reg), m, n);
+	offset = ipahal_reg_objs[ipahal_ctx->hw_type][reg].offset;
+	if (offset == -1) {
+		IPAHAL_ERR("Access to obsolete reg=%s\n",
+			ipahal_reg_name_str(reg));
+		WARN_ON(1);
+		return -EPERM;
+	}
+	/*
+	 * Currently there is one register with m and n parameters
+	 *	IPA_UC_MAILBOX_m_n. The m value of it is 0x80.
+	 * If more such registers will be added in the future,
+	 *	we can move the m parameter to the table above.
+	 */
+	offset +=  0x80 * m;
+	offset += ipahal_reg_objs[ipahal_ctx->hw_type][reg].n_ofst * n;
+
+	return offset;
+}
+
+u32 ipahal_get_reg_base(void)
+{
+	return 0x00040000;
+}
+
+
+/*
+ * Specific functions
+ * These functions supply specific register values for specific operations
+ *  that cannot be reached by generic functions.
+ * E.g. To disable aggregation, need to write to specific bits of the AGGR
+ *  register. The other bits should be untouched. This oeprate is very specific
+ *  and cannot be generically defined. For such operations we define these
+ *  specific functions.
+ */
+
+void ipahal_get_aggr_force_close_valmask(int ep_idx,
+	struct ipahal_reg_valmask *valmask)
+{
+	u32 shft = 0;
+	u32 bmsk = 0;
+
+	if (!valmask) {
+		IPAHAL_ERR("Input error\n");
+		return;
+	}
+
+	memset(valmask, 0, sizeof(struct ipahal_reg_valmask));
+
+	if (ipahal_ctx->hw_type <= IPA_HW_v3_1) {
+		shft = IPA_AGGR_FORCE_CLOSE_AGGR_FORCE_CLOSE_PIPE_BITMAP_SHFT;
+		bmsk = IPA_AGGR_FORCE_CLOSE_AGGR_FORCE_CLOSE_PIPE_BITMAP_BMSK;
+	} else if (ipahal_ctx->hw_type <= IPA_HW_v3_5_1) {
+		shft =
+		IPA_AGGR_FORCE_CLOSE_AGGR_FORCE_CLOSE_PIPE_BITMAP_SHFT_V3_5;
+		bmsk =
+		IPA_AGGR_FORCE_CLOSE_AGGR_FORCE_CLOSE_PIPE_BITMAP_BMSK_V3_5;
+	} else if (ipahal_ctx->hw_type <= IPA_HW_v4_1) {
+		shft =
+		IPA_AGGR_FORCE_CLOSE_AGGR_FORCE_CLOSE_PIPE_BITMAP_SHFT_V4_0;
+		bmsk =
+		IPA_AGGR_FORCE_CLOSE_AGGR_FORCE_CLOSE_PIPE_BITMAP_BMSK_V4_0;
+	} else if (ipahal_ctx->hw_type <= IPA_HW_v4_2) {
+		shft =
+		IPA_AGGR_FORCE_CLOSE_AGGR_FORCE_CLOSE_PIPE_BITMAP_SHFT_V4_2;
+		bmsk =
+		IPA_AGGR_FORCE_CLOSE_AGGR_FORCE_CLOSE_PIPE_BITMAP_BMSK_V4_2;
+	} else if (ipahal_ctx->hw_type <= IPA_HW_v4_5) {
+		shft =
+		IPA_AGGR_FORCE_CLOSE_AGGR_FORCE_CLOSE_PIPE_BITMAP_SHFT_V4_5;
+		bmsk =
+		IPA_AGGR_FORCE_CLOSE_AGGR_FORCE_CLOSE_PIPE_BITMAP_BMSK_V4_5;
+	} else if (ipahal_ctx->hw_type <= IPA_HW_v4_7) {
+		shft =
+		IPA_AGGR_FORCE_CLOSE_AGGR_FORCE_CLOSE_PIPE_BITMAP_SHFT_V4_7;
+		bmsk =
+		IPA_AGGR_FORCE_CLOSE_AGGR_FORCE_CLOSE_PIPE_BITMAP_BMSK_V4_7;
+	}
+
+	if (ep_idx > (sizeof(valmask->val) * 8 - 1)) {
+		IPAHAL_ERR("too big ep_idx %d\n", ep_idx);
+		ipa_assert();
+		return;
+	}
+	IPA_SETFIELD_IN_REG(valmask->val, 1 << ep_idx, shft, bmsk);
+	valmask->mask = bmsk;
+}
+
+void ipahal_get_fltrt_hash_flush_valmask(
+	struct ipahal_reg_fltrt_hash_flush *flush,
+	struct ipahal_reg_valmask *valmask)
+{
+	if (!flush || !valmask) {
+		IPAHAL_ERR("Input error: flush=%pK ; valmask=%pK\n",
+			flush, valmask);
+		return;
+	}
+
+	memset(valmask, 0, sizeof(struct ipahal_reg_valmask));
+
+	if (flush->v6_rt)
+		valmask->val |=
+			(1<<IPA_FILT_ROUT_HASH_FLUSH_IPv6_ROUT_SHFT);
+	if (flush->v6_flt)
+		valmask->val |=
+			(1<<IPA_FILT_ROUT_HASH_FLUSH_IPv6_FILT_SHFT);
+	if (flush->v4_rt)
+		valmask->val |=
+			(1<<IPA_FILT_ROUT_HASH_FLUSH_IPv4_ROUT_SHFT);
+	if (flush->v4_flt)
+		valmask->val |=
+			(1<<IPA_FILT_ROUT_HASH_FLUSH_IPv4_FILT_SHFT);
+
+	valmask->mask = valmask->val;
+}

+ 825 - 0
ipa/ipa_v3/ipahal/ipahal_reg.h

@@ -0,0 +1,825 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2012-2019, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _IPAHAL_REG_H_
+#define _IPAHAL_REG_H_
+
+#include <linux/ipa.h>
+
+/*
+ * Registers names
+ *
+ * NOTE:: Any change to this enum, need to change to ipareg_name_to_str
+ *	array as well.
+ */
+enum ipahal_reg_name {
+	IPA_ROUTE,
+	IPA_IRQ_STTS_EE_n,
+	IPA_IRQ_EN_EE_n,
+	IPA_IRQ_CLR_EE_n,
+	IPA_SUSPEND_IRQ_INFO_EE_n,
+	IPA_SUSPEND_IRQ_EN_EE_n,
+	IPA_SUSPEND_IRQ_CLR_EE_n,
+	IPA_HOLB_DROP_IRQ_INFO_EE_n,
+	IPA_HOLB_DROP_IRQ_EN_EE_n,
+	IPA_HOLB_DROP_IRQ_CLR_EE_n,
+	IPA_BCR,
+	IPA_ENABLED_PIPES,
+	IPA_VERSION,
+	IPA_TAG_TIMER,
+	IPA_NAT_TIMER,
+	IPA_COMP_HW_VERSION,
+	IPA_COMP_CFG,
+	IPA_STATE_TX_WRAPPER,
+	IPA_STATE_TX1,
+	IPA_STATE_FETCHER,
+	IPA_STATE_FETCHER_MASK,
+	IPA_STATE_FETCHER_MASK_0,
+	IPA_STATE_FETCHER_MASK_1,
+	IPA_STATE_DFETCHER,
+	IPA_STATE_ACL,
+	IPA_STATE,
+	IPA_STATE_RX_ACTIVE,
+	IPA_STATE_TX0,
+	IPA_STATE_AGGR_ACTIVE,
+	IPA_COUNTER_CFG,
+	IPA_STATE_GSI_TLV,
+	IPA_STATE_GSI_AOS,
+	IPA_STATE_GSI_IF,
+	IPA_STATE_GSI_SKIP,
+	IPA_STATE_GSI_IF_CONS,
+	IPA_STATE_DPL_FIFO,
+	IPA_STATE_COAL_MASTER,
+	IPA_GENERIC_RAM_ARBITER_PRIORITY,
+	IPA_STATE_NLO_AGGR,
+	IPA_STATE_COAL_MASTER_1,
+	IPA_ENDP_INIT_HDR_n,
+	IPA_ENDP_INIT_HDR_EXT_n,
+	IPA_ENDP_INIT_AGGR_n,
+	IPA_AGGR_FORCE_CLOSE,
+	IPA_ENDP_INIT_ROUTE_n,
+	IPA_ENDP_INIT_MODE_n,
+	IPA_ENDP_INIT_NAT_n,
+	IPA_ENDP_INIT_CONN_TRACK_n,
+	IPA_ENDP_INIT_CTRL_n,
+	IPA_ENDP_INIT_CTRL_SCND_n,
+	IPA_ENDP_INIT_CTRL_STATUS_n,
+	IPA_ENDP_INIT_HOL_BLOCK_EN_n,
+	IPA_ENDP_INIT_HOL_BLOCK_TIMER_n,
+	IPA_ENDP_INIT_DEAGGR_n,
+	IPA_ENDP_INIT_SEQ_n,
+	IPA_DEBUG_CNT_REG_n,
+	IPA_ENDP_INIT_CFG_n,
+	IPA_IRQ_EE_UC_n,
+	IPA_ENDP_INIT_HDR_METADATA_MASK_n,
+	IPA_ENDP_INIT_HDR_METADATA_n,
+	IPA_ENDP_INIT_PROD_CFG_n,
+	IPA_ENDP_INIT_RSRC_GRP_n,
+	IPA_SHARED_MEM_SIZE,
+	IPA_SW_AREA_RAM_DIRECT_ACCESS_n,
+	IPA_DEBUG_CNT_CTRL_n,
+	IPA_UC_MAILBOX_m_n,
+	IPA_FILT_ROUT_HASH_FLUSH,
+	IPA_FILT_ROUT_HASH_EN,
+	IPA_SINGLE_NDP_MODE,
+	IPA_QCNCM,
+	IPA_SYS_PKT_PROC_CNTXT_BASE,
+	IPA_LOCAL_PKT_PROC_CNTXT_BASE,
+	IPA_ENDP_STATUS_n,
+	IPA_ENDP_YELLOW_RED_MARKER_CFG_n,
+	IPA_ENDP_FILTER_ROUTER_HSH_CFG_n,
+	IPA_SRC_RSRC_GRP_01_RSRC_TYPE_n,
+	IPA_SRC_RSRC_GRP_23_RSRC_TYPE_n,
+	IPA_SRC_RSRC_GRP_45_RSRC_TYPE_n,
+	IPA_SRC_RSRC_GRP_67_RSRC_TYPE_n,
+	IPA_DST_RSRC_GRP_01_RSRC_TYPE_n,
+	IPA_DST_RSRC_GRP_23_RSRC_TYPE_n,
+	IPA_DST_RSRC_GRP_45_RSRC_TYPE_n,
+	IPA_DST_RSRC_GRP_67_RSRC_TYPE_n,
+	IPA_RX_HPS_CLIENTS_MIN_DEPTH_0,
+	IPA_RX_HPS_CLIENTS_MIN_DEPTH_1,
+	IPA_RX_HPS_CLIENTS_MAX_DEPTH_0,
+	IPA_RX_HPS_CLIENTS_MAX_DEPTH_1,
+	IPA_HPS_FTCH_ARB_QUEUE_WEIGHT,
+	IPA_QSB_MAX_WRITES,
+	IPA_QSB_MAX_READS,
+	IPA_TX_CFG,
+	IPA_IDLE_INDICATION_CFG,
+	IPA_DPS_SEQUENCER_FIRST,
+	IPA_DPS_SEQUENCER_LAST,
+	IPA_HPS_SEQUENCER_FIRST,
+	IPA_HPS_SEQUENCER_LAST,
+	IPA_CLKON_CFG,
+	IPA_QTIME_TIMESTAMP_CFG,
+	IPA_TIMERS_PULSE_GRAN_CFG,
+	IPA_TIMERS_XO_CLK_DIV_CFG,
+	IPA_STAT_QUOTA_BASE_n,
+	IPA_STAT_QUOTA_MASK_n,
+	IPA_STAT_TETHERING_BASE_n,
+	IPA_STAT_TETHERING_MASK_n,
+	IPA_STAT_FILTER_IPV4_BASE,
+	IPA_STAT_FILTER_IPV6_BASE,
+	IPA_STAT_ROUTER_IPV4_BASE,
+	IPA_STAT_ROUTER_IPV6_BASE,
+	IPA_STAT_FILTER_IPV4_START_ID,
+	IPA_STAT_FILTER_IPV6_START_ID,
+	IPA_STAT_ROUTER_IPV4_START_ID,
+	IPA_STAT_ROUTER_IPV6_START_ID,
+	IPA_STAT_FILTER_IPV4_END_ID,
+	IPA_STAT_FILTER_IPV6_END_ID,
+	IPA_STAT_ROUTER_IPV4_END_ID,
+	IPA_STAT_ROUTER_IPV6_END_ID,
+	IPA_STAT_DROP_CNT_BASE_n,
+	IPA_STAT_DROP_CNT_MASK_n,
+	IPA_SNOC_FEC_EE_n,
+	IPA_FEC_ADDR_EE_n,
+	IPA_FEC_ADDR_MSB_EE_n,
+	IPA_FEC_ATTR_EE_n,
+	IPA_ENDP_GSI_CFG1_n,
+	IPA_ENDP_GSI_CFG_AOS_n,
+	IPA_ENDP_GSI_CFG_TLV_n,
+	IPA_COAL_EVICT_LRU,
+	IPA_COAL_QMAP_CFG,
+	IPA_REG_MAX,
+};
+
+/*
+ * struct ipahal_reg_route - IPA route register
+ * @route_dis: route disable
+ * @route_def_pipe: route default pipe
+ * @route_def_hdr_table: route default header table
+ * @route_def_hdr_ofst: route default header offset table
+ * @route_frag_def_pipe: Default pipe to route fragmented exception
+ *    packets and frag new rule statues, if source pipe does not have
+ *    a notification status pipe defined.
+ * @route_def_retain_hdr: default value of retain header. It is used
+ *    when no rule was hit
+ */
+struct ipahal_reg_route {
+	u32 route_dis;
+	u32 route_def_pipe;
+	u32 route_def_hdr_table;
+	u32 route_def_hdr_ofst;
+	u8  route_frag_def_pipe;
+	u32 route_def_retain_hdr;
+};
+
+/*
+ * struct ipahal_reg_endp_init_route - IPA ENDP_INIT_ROUTE_n register
+ * @route_table_index: Default index of routing table (IPA Consumer).
+ */
+struct ipahal_reg_endp_init_route {
+	u32 route_table_index;
+};
+
+/*
+ * struct ipahal_reg_endp_init_rsrc_grp - IPA_ENDP_INIT_RSRC_GRP_n register
+ * @rsrc_grp: Index of group for this ENDP. If this ENDP is a source-ENDP,
+ *	index is for source-resource-group. If destination ENPD, index is
+ *	for destination-resoruce-group.
+ */
+struct ipahal_reg_endp_init_rsrc_grp {
+	u32 rsrc_grp;
+};
+
+/*
+ * struct ipahal_reg_endp_init_mode - IPA ENDP_INIT_MODE_n register
+ * @dst_pipe_number: This parameter specifies destination output-pipe-packets
+ *	will be routed to. Valid for DMA mode only and for Input
+ *	Pipes only (IPA Consumer)
+ */
+struct ipahal_reg_endp_init_mode {
+	u32 dst_pipe_number;
+	struct ipa_ep_cfg_mode ep_mode;
+};
+
+/*
+ * struct ipahal_reg_shared_mem_size - IPA_SHARED_MEM_SIZE register
+ * @shared_mem_sz: Available size [in 8Bytes] of SW partition within
+ *	IPA shared memory.
+ * @shared_mem_baddr: Offset of SW partition within IPA
+ *	shared memory[in 8Bytes]. To get absolute address of SW partition,
+ *	add this offset to IPA_SW_AREA_RAM_DIRECT_ACCESS_n baddr.
+ */
+struct ipahal_reg_shared_mem_size {
+	u32 shared_mem_sz;
+	u32 shared_mem_baddr;
+};
+
+/*
+ * struct ipahal_reg_ep_cfg_status - status configuration in IPA end-point
+ * @status_en: Determines if end point supports Status Indications. SW should
+ *	set this bit in order to enable Statuses. Output Pipe - send
+ *	Status indications only if bit is set. Input Pipe - forward Status
+ *	indication to STATUS_ENDP only if bit is set. Valid for Input
+ *	and Output Pipes (IPA Consumer and Producer)
+ * @status_ep: Statuses generated for this endpoint will be forwarded to the
+ *	specified Status End Point. Status endpoint needs to be
+ *	configured with STATUS_EN=1 Valid only for Input Pipes (IPA
+ *	Consumer)
+ * @status_location: Location of PKT-STATUS on destination pipe.
+ *	If set to 0 (default), PKT-STATUS will be appended before the packet
+ *	for this endpoint. If set to 1, PKT-STATUS will be appended after the
+ *	packet for this endpoint. Valid only for Output Pipes (IPA Producer)
+ * @status_pkt_suppress: Disable notification status, when statistics is enabled
+ */
+struct ipahal_reg_ep_cfg_status {
+	bool status_en;
+	u8 status_ep;
+	bool status_location;
+	u8 status_pkt_suppress;
+};
+
+/*
+ * struct ipahal_reg_clkon_cfg-  Enables SW bypass clock-gating for the IPA core
+ *
+ * @all: Enables SW bypass clock-gating controls for this sub-module;
+ *	0: CGC is enabled by internal logic, 1: No CGC (clk is always 'ON').
+ *	sub-module affected is based on var name -> ex: open_rx refers
+ *	to IPA_RX sub-module and open_global refers to global IPA 1x clock
+ */
+struct ipahal_reg_clkon_cfg {
+	bool open_dpl_fifo;
+	bool open_global_2x_clk;
+	bool open_global;
+	bool open_gsi_if;
+	bool open_weight_arb;
+	bool open_qmb;
+	bool open_ram_slaveway;
+	bool open_aggr_wrapper;
+	bool open_qsb2axi_cmdq_l;
+	bool open_fnr;
+	bool open_tx_1;
+	bool open_tx_0;
+	bool open_ntf_tx_cmdqs;
+	bool open_dcmp;
+	bool open_h_dcph;
+	bool open_d_dcph;
+	bool open_ack_mngr;
+	bool open_ctx_handler;
+	bool open_rsrc_mngr;
+	bool open_dps_tx_cmdqs;
+	bool open_hps_dps_cmdqs;
+	bool open_rx_hps_cmdqs;
+	bool open_dps;
+	bool open_hps;
+	bool open_ftch_dps;
+	bool open_ftch_hps;
+	bool open_ram_arb;
+	bool open_misc;
+	bool open_tx_wrapper;
+	bool open_proc;
+	bool open_rx;
+};
+
+/*
+ * struct ipahal_reg_qtime_timestamp_cfg - IPA timestamp configuration
+ *  Relevant starting IPA 4.5.
+ *  IPA timestamps are based on QTIMER which is 56bit length which is
+ *  based on XO clk running at 19.2MHz (52nsec resolution).
+ *  Specific timestamps (TAG, NAT, DPL) my require lower resolution.
+ *  This can be achieved by omitting LSB bits from 56bit QTIMER.
+ *  e.g. if we omit (shift) 24 bit then we get (2^24)*(52n)=0.87sec resolution.
+ *
+ * @dpl_timestamp_lsb: Shifting Qtime value. Value will be used as LSB of
+ *  DPL timestamp.
+ * @dpl_timestamp_sel: if false, DPL timestamp will be based on legacy
+ *  DPL_TIMER which counts in 1ms. if true, it will be based on QTIME
+ *  value shifted by dpl_timestamp_lsb.
+ * @tag_timestamp_lsb: Shifting Qtime value. Value will be used as LSB of
+ *  TAG timestamp.
+ * @nat_timestamp_lsb: Shifting Qtime value. Value will be used as LSB of
+ *  NAT timestamp.
+ */
+struct ipahal_reg_qtime_timestamp_cfg {
+	u32 dpl_timestamp_lsb;
+	bool dpl_timestamp_sel;
+	u32 tag_timestamp_lsb;
+	u32 nat_timestamp_lsb;
+};
+
+/*
+ * enum ipa_timers_time_gran_type - Time granularity to be used with timers
+ *
+ * e.g. for HOLB and Aggregation timers
+ */
+enum ipa_timers_time_gran_type {
+	IPA_TIMERS_TIME_GRAN_10_USEC,
+	IPA_TIMERS_TIME_GRAN_20_USEC,
+	IPA_TIMERS_TIME_GRAN_50_USEC,
+	IPA_TIMERS_TIME_GRAN_100_USEC,
+	IPA_TIMERS_TIME_GRAN_1_MSEC,
+	IPA_TIMERS_TIME_GRAN_10_MSEC,
+	IPA_TIMERS_TIME_GRAN_100_MSEC,
+	IPA_TIMERS_TIME_GRAN_NEAR_HALF_SEC, /* 0.65536s */
+	IPA_TIMERS_TIME_GRAN_MAX,
+};
+
+/*
+ * struct ipahal_reg_timers_pulse_gran_cfg - Counters tick granularities
+ *  Relevant starting IPA 4.5.
+ *  IPA timers are based on XO CLK running 19.2MHz (52ns resolution) deviced
+ *  by clock divider (see IPA_TIMERS_XO_CLK_DIV_CFG) - default 100Khz (10usec).
+ *  IPA timers instances (e.g. HOLB or AGGR) may require different resolutions.
+ *  There are 3 global pulse generators with configurable granularity. Each
+ *  timer instance can choose one of the three generators to work with.
+ *  Each generator granularity can be one of supported ones.
+ *
+ * @gran_X: granularity tick of counterX
+ */
+struct ipahal_reg_timers_pulse_gran_cfg {
+	enum ipa_timers_time_gran_type gran_0;
+	enum ipa_timers_time_gran_type gran_1;
+	enum ipa_timers_time_gran_type gran_2;
+};
+
+/*
+ * struct ipahal_reg_timers_xo_clk_div_cfg - IPA timers clock divider
+ * Used to control clock divider which gets XO_CLK of 19.2MHz as input.
+ * Output of CDIV is used to generate IPA timers granularity
+ *
+ * @enable: Enable of the clock divider for all IPA and GSI timers.
+ *  clock is disabled by default, and need to be enabled when system is up.
+ * @value: Divided value to be used by CDIV. POR value is set to 191
+ *  to generate 100KHz clk based on XO_CLK.
+ *  Values of ipahal_reg_timers_pulse_gran_cfg are based on this default.
+ */
+struct ipahal_reg_timers_xo_clk_div_cfg {
+	bool enable;
+	u32 value;
+};
+
+/*
+ * struct ipahal_reg_comp_cfg- IPA Core QMB/Master Port selection
+ *
+ * @enable / @ipa_dcmp_fast_clk_en: are not relevant starting IPA4.5
+ * @ipa_full_flush_wait_rsc_closure_en: relevant starting IPA4.5
+ */
+struct ipahal_reg_comp_cfg {
+	bool gen_qmb_0_dynamic_asize;
+	bool gen_qmb_1_dynamic_asize;
+	bool ipa_full_flush_wait_rsc_closure_en;
+	u8 ipa_atomic_fetcher_arb_lock_dis;
+	bool gsi_if_out_of_buf_stop_reset_mask_enable;
+	bool genqmb_aooowr;
+	bool qmb_ram_rd_cache_disable;
+	bool ipa_qmb_select_by_address_global_en;
+	bool gsi_multi_axi_masters_dis;
+	bool gsi_snoc_cnoc_loop_protection_disable;
+	bool gen_qmb_0_snoc_cnoc_loop_protection_disable;
+	bool gen_qmb_1_multi_inorder_wr_dis;
+	bool gen_qmb_0_multi_inorder_wr_dis;
+	bool gen_qmb_1_multi_inorder_rd_dis;
+	bool gen_qmb_0_multi_inorder_rd_dis;
+	bool gsi_multi_inorder_wr_dis;
+	bool gsi_multi_inorder_rd_dis;
+	bool ipa_qmb_select_by_address_prod_en;
+	bool ipa_qmb_select_by_address_cons_en;
+	bool ipa_dcmp_fast_clk_en;
+	bool gen_qmb_1_snoc_bypass_dis;
+	bool gen_qmb_0_snoc_bypass_dis;
+	bool gsi_snoc_bypass_dis;
+	bool ram_arb_priority_client_samp_fix_disable;
+	bool enable;
+};
+
+/*
+ * struct ipahal_reg_tx_wrapper- IPA TX Wrapper state information
+ */
+struct ipahal_reg_tx_wrapper {
+	bool tx0_idle;
+	bool tx1_idle;
+	bool ipa_prod_ackmngr_db_empty;
+	bool ipa_prod_ackmngr_state_idle;
+	bool ipa_prod_prod_bresp_empty;
+	bool ipa_prod_prod_bresp_toggle_idle;
+	bool ipa_mbim_pkt_fms_idle;
+	u8 mbim_direct_dma;
+	bool trnseq_force_valid;
+	bool pkt_drop_cnt_idle;
+	u8 nlo_direct_dma;
+	u8 coal_direct_dma;
+	bool coal_slave_idle;
+	bool coal_slave_ctx_idle;
+	u8 coal_slave_open_frame;
+};
+
+/*
+ * struct ipa_hash_tuple - Hash tuple members for flt and rt
+ *  the fields tells if to be masked or not
+ * @src_id: pipe number for flt, table index for rt
+ * @src_ip_addr: IP source address
+ * @dst_ip_addr: IP destination address
+ * @src_port: L4 source port
+ * @dst_port: L4 destination port
+ * @protocol: IP protocol field
+ * @meta_data: packet meta-data
+ *
+ */
+struct ipahal_reg_hash_tuple {
+	/* src_id: pipe in flt, tbl index in rt */
+	bool src_id;
+	bool src_ip_addr;
+	bool dst_ip_addr;
+	bool src_port;
+	bool dst_port;
+	bool protocol;
+	bool meta_data;
+};
+
+/*
+ * struct ipahal_reg_fltrt_hash_tuple - IPA hash tuple register
+ * @flt: Hash tuple info for filtering
+ * @rt: Hash tuple info for routing
+ * @undefinedX: Undefined/Unused bit fields set of the register
+ */
+struct ipahal_reg_fltrt_hash_tuple {
+	struct ipahal_reg_hash_tuple flt;
+	struct ipahal_reg_hash_tuple rt;
+	u32 undefined1;
+	u32 undefined2;
+};
+
+/*
+ * enum ipahal_reg_dbg_cnt_type - Debug Counter Type
+ * DBG_CNT_TYPE_IPV4_FLTR - Count IPv4 filtering rules
+ * DBG_CNT_TYPE_IPV4_ROUT - Count IPv4 routing rules
+ * DBG_CNT_TYPE_GENERAL - General counter
+ * DBG_CNT_TYPE_IPV6_FLTR - Count IPv6 filtering rules
+ * DBG_CNT_TYPE_IPV4_ROUT - Count IPv6 routing rules
+ */
+enum ipahal_reg_dbg_cnt_type {
+	DBG_CNT_TYPE_IPV4_FLTR,
+	DBG_CNT_TYPE_IPV4_ROUT,
+	DBG_CNT_TYPE_GENERAL,
+	DBG_CNT_TYPE_IPV6_FLTR,
+	DBG_CNT_TYPE_IPV6_ROUT,
+};
+
+/*
+ * struct ipahal_reg_debug_cnt_ctrl - IPA_DEBUG_CNT_CTRL_n register
+ * @en - Enable debug counter
+ * @type - Type of debugging couting
+ * @product - False->Count Bytes . True->Count #packets
+ * @src_pipe - Specific Pipe to match. If FF, no need to match
+ *	specific pipe
+ * @rule_idx_pipe_rule - Global Rule or Pipe Rule. If pipe, then indicated by
+ *	src_pipe. Starting at IPA V3_5,
+ *	no support on Global Rule. This field will be ignored.
+ * @rule_idx - Rule index. Irrelevant for type General
+ */
+struct ipahal_reg_debug_cnt_ctrl {
+	bool en;
+	enum ipahal_reg_dbg_cnt_type type;
+	bool product;
+	u8 src_pipe;
+	bool rule_idx_pipe_rule;
+	u16 rule_idx;
+};
+
+/*
+ * struct ipahal_reg_rsrc_grp_cfg - Min/Max values for two rsrc groups
+ * @x_min - first group min value
+ * @x_max - first group max value
+ * @y_min - second group min value
+ * @y_max - second group max value
+ */
+struct ipahal_reg_rsrc_grp_cfg {
+	u32 x_min;
+	u32 x_max;
+	u32 y_min;
+	u32 y_max;
+};
+
+/*
+ * struct ipahal_reg_rx_hps_clients - Min or Max values for RX HPS clients
+ * @client_minmax - Min or Max values. In case of depth 0 the 4 or 5 values
+ *	are used. In case of depth 1, only the first 2 values are used
+ */
+struct ipahal_reg_rx_hps_clients {
+	u32 client_minmax[5];
+};
+
+/*
+ * struct ipahal_reg_rx_hps_weights - weight values for RX HPS clients
+ * @hps_queue_weight_0 - 4 bit Weight for RX_HPS_CMDQ #0 (3:0)
+ * @hps_queue_weight_1 - 4 bit Weight for RX_HPS_CMDQ #1 (7:4)
+ * @hps_queue_weight_2 - 4 bit Weight for RX_HPS_CMDQ #2 (11:8)
+ * @hps_queue_weight_3 - 4 bit Weight for RX_HPS_CMDQ #3 (15:12)
+ */
+struct ipahal_reg_rx_hps_weights {
+	u32 hps_queue_weight_0;
+	u32 hps_queue_weight_1;
+	u32 hps_queue_weight_2;
+	u32 hps_queue_weight_3;
+};
+
+/*
+ * struct ipahal_reg_counter_cfg - granularity of counter registers
+ * @aggr_granularity  -Defines the granularity of AGGR timers
+ *	granularity [msec]=(x+1)/(32)
+ */
+struct ipahal_reg_counter_cfg {
+	enum {
+		GRAN_VALUE_125_USEC = 3,
+		GRAN_VALUE_250_USEC = 7,
+		GRAN_VALUE_500_USEC = 15,
+		GRAN_VALUE_MSEC = 31,
+	} aggr_granularity;
+};
+
+
+/*
+ * struct ipahal_reg_valmask - holding values and masking for registers
+ *	HAL application may require only value and mask of it for some
+ *	register fields.
+ * @val - The value
+ * @mask - Tha mask of the value
+ */
+struct ipahal_reg_valmask {
+	u32 val;
+	u32 mask;
+};
+
+/*
+ * struct ipahal_reg_fltrt_hash_flush - Flt/Rt flush configuration
+ * @v6_rt - Flush IPv6 Routing cache
+ * @v6_flt - Flush IPv6 Filtering cache
+ * @v4_rt - Flush IPv4 Routing cache
+ * @v4_flt - Flush IPv4 Filtering cache
+ */
+struct ipahal_reg_fltrt_hash_flush {
+	bool v6_rt;
+	bool v6_flt;
+	bool v4_rt;
+	bool v4_flt;
+};
+
+/*
+ * struct ipahal_reg_single_ndp_mode - IPA SINGLE_NDP_MODE register
+ * @single_ndp_en: When set to '1', IPA builds MBIM frames with up to 1
+ *	NDP-header.
+ * @unused: undefined bits of the register
+ */
+struct ipahal_reg_single_ndp_mode {
+	bool single_ndp_en;
+	u32 undefined;
+};
+
+/*
+ * struct ipahal_reg_qcncm - IPA QCNCM register
+ * @mode_en: When QCNCM_MODE_EN=1, IPA will use QCNCM signature.
+ * @mode_val: Used only when QCNCM_MODE_EN=1 and sets SW Signature in
+ *	the NDP header.
+ * @unused: undefined bits of the register
+ */
+struct ipahal_reg_qcncm {
+	bool mode_en;
+	u32 mode_val;
+	u32 undefined;
+};
+
+/*
+ * struct ipahal_reg_qsb_max_writes - IPA QSB Max Writes register
+ * @qmb_0_max_writes: Max number of outstanding writes for GEN_QMB_0
+ * @qmb_1_max_writes: Max number of outstanding writes for GEN_QMB_1
+ */
+struct ipahal_reg_qsb_max_writes {
+	u32 qmb_0_max_writes;
+	u32 qmb_1_max_writes;
+};
+
+/*
+ * struct ipahal_reg_qsb_max_reads - IPA QSB Max Reads register
+ * @qmb_0_max_reads: Max number of outstanding reads for GEN_QMB_0
+ * @qmb_1_max_reads: Max number of outstanding reads for GEN_QMB_1
+ * @qmb_0_max_read_beats: Max number of outstanding read beats for GEN_QMB_0
+ * @qmb_1_max_read_beats: Max number of outstanding read beats for GEN_QMB_1
+ */
+struct ipahal_reg_qsb_max_reads {
+	u32 qmb_0_max_reads;
+	u32 qmb_1_max_reads;
+	u32 qmb_0_max_read_beats;
+	u32 qmb_1_max_read_beats;
+};
+
+/*
+ * struct ipahal_reg_tx_cfg - IPA TX_CFG register
+ * @tx0_prefetch_disable: Disable prefetch on TX0
+ * @tx1_prefetch_disable: Disable prefetch on TX1
+ * @tx0_prefetch_almost_empty_size: Prefetch almost empty size on TX0
+ * @tx1_prefetch_almost_empty_size: Prefetch almost empty size on TX1
+ * @dmaw_scnd_outsd_pred_threshold: threshold for DMAW_SCND_OUTSD_PRED_EN
+ * @dmaw_max_beats_256_dis:
+ * @dmaw_scnd_outsd_pred_en:
+ * @pa_mask_en:
+ * @dual_tx_enable: When 1 TX0 and TX1 are enabled. When 0 only TX0 is enabled
+ * @sspnd_pa_no_start_state: When 1 sspnd_req does not take inco account
+			     PA FSM state START.
+			     When 0 sspnd_req_ will not be answered
+			     on that state.
+ *  Relevant starting IPA4.5
+ */
+struct ipahal_reg_tx_cfg {
+	bool tx0_prefetch_disable;
+	bool tx1_prefetch_disable;
+	u32 tx0_prefetch_almost_empty_size;
+	u32 tx1_prefetch_almost_empty_size;
+	u32 dmaw_scnd_outsd_pred_threshold;
+	u32 dmaw_max_beats_256_dis;
+	u32 dmaw_scnd_outsd_pred_en;
+	u32 pa_mask_en;
+	bool dual_tx_enable;
+	bool sspnd_pa_no_start_state;
+};
+
+/*
+ * struct ipahal_reg_idle_indication_cfg - IPA IDLE_INDICATION_CFG register
+ * @const_non_idle_enable: enable the asserting of the IDLE value and DCD
+ * @enter_idle_debounce_thresh:  configure the debounce threshold
+ */
+struct ipahal_reg_idle_indication_cfg {
+	u16 enter_idle_debounce_thresh;
+	bool const_non_idle_enable;
+};
+
+/*
+ * struct ipa_ep_cfg_ctrl_scnd - IPA_ENDP_INIT_CTRL_SCND_n register
+ * @endp_delay: delay endpoint
+ */
+struct ipahal_ep_cfg_ctrl_scnd {
+	bool endp_delay;
+};
+
+/*
+ * struct ipahal_reg_state_coal_master- IPA_STATE_COAL_MASTER register
+ * @vp_timer_expired: VP bitmap. If set, Vp aggregation timer has expired
+ * @lru_cp: least recently used VP index
+ * @init_vp_fsm_state: init VP FSM current state
+ * @check_fir_fsm_state: check fir FMS current state
+ * @hash_calc_fsm_state: hash calculation FSM current state
+ * @find_open_fsm_state: find open VP FSM current state
+ * @main_fsm_state: main coalescing master state FSM current state
+ * @vp_vld: VP bitmap. If set, VP is valid, and coalescing frame is open.
+ */
+struct ipahal_reg_state_coal_master {
+	u32 vp_timer_expired;
+	u32 lru_vp;
+	u32 init_vp_fsm_state;
+	u32 check_fir_fsm_state;
+	u32 hash_calc_fsm_state;
+	u32 find_open_fsm_state;
+	u32 main_fsm_state;
+	u32 vp_vld;
+};
+
+/*
+ * struct ipahal_reg_coal_evict_lru - IPA_COAL_EVICT_LRU register
+ * @coal_vp_lru_thrshld: Connection that is opened below  this val
+ *			 will not get evicted
+ * @coal_eviction_en: Enable eviction
+ */
+struct ipahal_reg_coal_evict_lru {
+	u32 coal_vp_lru_thrshld;
+	bool coal_eviction_en;
+};
+
+/*
+ * struct ipahal_reg_coal_qmap_cfg - IPA_COAL_QMAP_CFG register
+ * @mux_id_byte_sel: MUX_ID field in the QMAP portion in COALESCING header is
+ * taken from injected packet metadata field in PKT_CTX.
+ * Metadata consists of 4 bytes, configuring value 0 to MUX_ID_BYTE_SEL will
+ * take bits 7:0 from metadata field, value 1 will take bits 15:8 and so on ...
+ */
+struct ipahal_reg_coal_qmap_cfg {
+	u32 mux_id_byte_sel;
+};
+
+/*
+ * ipahal_print_all_regs() - Loop and read and print all the valid registers
+ *  Parameterized registers are also printed for all the valid ranges.
+ *  Print to dmsg and IPC logs
+ */
+void ipahal_print_all_regs(bool print_to_dmesg);
+
+/*
+ * ipahal_reg_name_str() - returns string that represent the register
+ * @reg_name: [in] register name
+ */
+const char *ipahal_reg_name_str(enum ipahal_reg_name reg_name);
+
+/*
+ * ipahal_read_reg_n() - Get the raw value of n parameterized reg
+ */
+u32 ipahal_read_reg_n(enum ipahal_reg_name reg, u32 n);
+
+/*
+ * ipahal_read_reg_mn() - Get mn parameterized reg value
+ */
+u32 ipahal_read_reg_mn(enum ipahal_reg_name reg, u32 m, u32 n);
+
+/*
+ * ipahal_write_reg_mn() - Write to m/n parameterized reg a raw value
+ */
+void ipahal_write_reg_mn(enum ipahal_reg_name reg, u32 m, u32 n, u32 val);
+
+/*
+ * ipahal_write_reg_n() - Write to n parameterized reg a raw value
+ */
+static inline void ipahal_write_reg_n(enum ipahal_reg_name reg,
+	u32 n, u32 val)
+{
+	ipahal_write_reg_mn(reg, 0, n, val);
+}
+
+/*
+ * ipahal_read_reg_n_fields() - Get the parsed value of n parameterized reg
+ */
+u32 ipahal_read_reg_n_fields(enum ipahal_reg_name reg, u32 n, void *fields);
+
+/*
+ * ipahal_write_reg_n_fields() - Write to n parameterized reg a prased value
+ */
+void ipahal_write_reg_n_fields(enum ipahal_reg_name reg, u32 n,
+	const void *fields);
+
+/*
+ * ipahal_read_reg() - Get the raw value of a reg
+ */
+static inline u32 ipahal_read_reg(enum ipahal_reg_name reg)
+{
+	return ipahal_read_reg_n(reg, 0);
+}
+
+/*
+ * ipahal_write_reg() - Write to reg a raw value
+ */
+static inline void ipahal_write_reg(enum ipahal_reg_name reg,
+	u32 val)
+{
+	ipahal_write_reg_mn(reg, 0, 0, val);
+}
+
+/*
+ * ipahal_read_reg_fields() - Get the parsed value of a reg
+ */
+static inline u32 ipahal_read_reg_fields(enum ipahal_reg_name reg, void *fields)
+{
+	return ipahal_read_reg_n_fields(reg, 0, fields);
+}
+
+/*
+ * ipahal_write_reg_fields() - Write to reg a parsed value
+ */
+static inline void ipahal_write_reg_fields(enum ipahal_reg_name reg,
+	const void *fields)
+{
+	ipahal_write_reg_n_fields(reg, 0, fields);
+}
+
+/*
+ * Get the offset of a m/n parameterized register
+ */
+u32 ipahal_get_reg_mn_ofst(enum ipahal_reg_name reg, u32 m, u32 n);
+
+/*
+ * Get the offset of a n parameterized register
+ */
+static inline u32 ipahal_get_reg_n_ofst(enum ipahal_reg_name reg, u32 n)
+{
+	return ipahal_get_reg_mn_ofst(reg, 0, n);
+}
+
+/*
+ * Get the offset of a register
+ */
+static inline u32 ipahal_get_reg_ofst(enum ipahal_reg_name reg)
+{
+	return ipahal_get_reg_mn_ofst(reg, 0, 0);
+}
+
+/*
+ * Get the register base address
+ */
+u32 ipahal_get_reg_base(void);
+
+/*
+ * Specific functions
+ * These functions supply specific register values for specific operations
+ *  that cannot be reached by generic functions.
+ * E.g. To disable aggregation, need to write to specific bits of the AGGR
+ *  register. The other bits should be untouched. This operation is very
+ *  specific and cannot be generically defined. For such operations we define
+ *  these specific functions.
+ */
+u32 ipahal_aggr_get_max_byte_limit(void);
+u32 ipahal_aggr_get_max_pkt_limit(void);
+void ipahal_get_aggr_force_close_valmask(int ep_idx,
+	struct ipahal_reg_valmask *valmask);
+void ipahal_get_fltrt_hash_flush_valmask(
+	struct ipahal_reg_fltrt_hash_flush *flush,
+	struct ipahal_reg_valmask *valmask);
+
+#endif /* _IPAHAL_REG_H_ */

+ 702 - 0
ipa/ipa_v3/ipahal/ipahal_reg_i.h

@@ -0,0 +1,702 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2012-2019, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _IPAHAL_REG_I_H_
+#define _IPAHAL_REG_I_H_
+
+int ipahal_reg_init(enum ipa_hw_type ipa_hw_type);
+
+#define IPA_SETFIELD(val, shift, mask) (((val) << (shift)) & (mask))
+#define IPA_SETFIELD_IN_REG(reg, val, shift, mask) \
+			(reg |= ((val) << (shift)) & (mask))
+#define IPA_GETFIELD_FROM_REG(reg, shift, mask) \
+		(((reg) & (mask)) >> (shift))
+
+
+/* IPA_ROUTE register */
+#define IPA_ROUTE_ROUTE_DIS_SHFT 0x0
+#define IPA_ROUTE_ROUTE_DIS_BMSK 0x1
+#define IPA_ROUTE_ROUTE_DEF_PIPE_SHFT 0x1
+#define IPA_ROUTE_ROUTE_DEF_PIPE_BMSK 0x3e
+#define IPA_ROUTE_ROUTE_DEF_HDR_TABLE_SHFT 0x6
+#define IPA_ROUTE_ROUTE_DEF_HDR_TABLE_BMSK 0X40
+#define IPA_ROUTE_ROUTE_DEF_HDR_OFST_SHFT 0x7
+#define IPA_ROUTE_ROUTE_DEF_HDR_OFST_BMSK 0x1ff80
+#define IPA_ROUTE_ROUTE_FRAG_DEF_PIPE_BMSK 0x3e0000
+#define IPA_ROUTE_ROUTE_FRAG_DEF_PIPE_SHFT 0x11
+#define IPA_ROUTE_ROUTE_DEF_RETAIN_HDR_BMSK  0x1000000
+#define IPA_ROUTE_ROUTE_DEF_RETAIN_HDR_SHFT 0x18
+
+/* IPA_ENDP_INIT_HDR_n register */
+#define IPA_ENDP_INIT_HDR_n_HDR_LEN_BMSK 0x3f
+#define IPA_ENDP_INIT_HDR_n_HDR_LEN_SHFT 0x0
+#define IPA_ENDP_INIT_HDR_n_HDR_OFST_METADATA_VALID_BMSK 0x40
+#define IPA_ENDP_INIT_HDR_n_HDR_OFST_METADATA_VALID_SHFT 0x6
+#define IPA_ENDP_INIT_HDR_n_HDR_OFST_METADATA_SHFT 0x7
+#define IPA_ENDP_INIT_HDR_n_HDR_OFST_METADATA_BMSK 0x1f80
+#define IPA_ENDP_INIT_HDR_n_HDR_ADDITIONAL_CONST_LEN_BMSK 0x7e000
+#define IPA_ENDP_INIT_HDR_n_HDR_ADDITIONAL_CONST_LEN_SHFT 0xd
+#define IPA_ENDP_INIT_HDR_n_HDR_OFST_PKT_SIZE_VALID_BMSK 0x80000
+#define IPA_ENDP_INIT_HDR_n_HDR_OFST_PKT_SIZE_VALID_SHFT 0x13
+#define IPA_ENDP_INIT_HDR_n_HDR_OFST_PKT_SIZE_BMSK 0x3f00000
+#define IPA_ENDP_INIT_HDR_n_HDR_OFST_PKT_SIZE_SHFT 0x14
+#define IPA_ENDP_INIT_HDR_n_HDR_A5_MUX_BMSK 0x4000000
+#define IPA_ENDP_INIT_HDR_n_HDR_A5_MUX_SHFT 0x1a
+#define IPA_ENDP_INIT_HDR_n_HDR_LEN_INC_DEAGG_HDR_BMSK 0x8000000
+#define IPA_ENDP_INIT_HDR_n_HDR_LEN_INC_DEAGG_HDR_SHFT 0x1b
+#define IPA_ENDP_INIT_HDR_n_HDR_METADATA_REG_VALID_BMSK 0x10000000
+#define IPA_ENDP_INIT_HDR_n_HDR_METADATA_REG_VALID_SHFT 0x1c
+
+#define IPA_ENDP_INIT_HDR_n_HDR_LEN_BMSK_v4_5 0x3f
+#define IPA_ENDP_INIT_HDR_n_HDR_LEN_SHFT_v4_5 0x0
+#define IPA_ENDP_INIT_HDR_n_HDR_OFST_METADATA_VALID_BMSK_v4_5 0x40
+#define IPA_ENDP_INIT_HDR_n_HDR_OFST_METADATA_VALID_SHFT_v4_5 0x6
+#define IPA_ENDP_INIT_HDR_n_HDR_OFST_METADATA_SHFT_v4_5 0x7
+#define IPA_ENDP_INIT_HDR_n_HDR_OFST_METADATA_BMSK_v4_5 0x1f80
+#define IPA_ENDP_INIT_HDR_n_HDR_ADDITIONAL_CONST_LEN_BMSK_v4_5 0x7e000
+#define IPA_ENDP_INIT_HDR_n_HDR_ADDITIONAL_CONST_LEN_SHFT_v4_5 0xd
+#define IPA_ENDP_INIT_HDR_n_HDR_OFST_PKT_SIZE_VALID_BMSK_v4_5 0x80000
+#define IPA_ENDP_INIT_HDR_n_HDR_OFST_PKT_SIZE_VALID_SHFT_v4_5 0x13
+#define IPA_ENDP_INIT_HDR_n_HDR_OFST_PKT_SIZE_BMSK_v4_5 0x3f00000
+#define IPA_ENDP_INIT_HDR_n_HDR_OFST_PKT_SIZE_SHFT_v4_5 0x14
+#define IPA_ENDP_INIT_HDR_n_HDR_A5_MUX_BMSK_v4_5 0x4000000
+#define IPA_ENDP_INIT_HDR_n_HDR_A5_MUX_SHFT_v4_5 0x1a
+#define IPA_ENDP_INIT_HDR_n_HDR_LEN_INC_DEAGG_HDR_BMSK_v4_5 0x8000000
+#define IPA_ENDP_INIT_HDR_n_HDR_LEN_INC_DEAGG_HDR_SHFT_v4_5 0x1b
+#define IPA_ENDP_INIT_HDR_n_HDR_LEN_MSB_BMSK_v4_5 0x30000000
+#define IPA_ENDP_INIT_HDR_n_HDR_LEN_MSB_SHFT_v4_5 0x1c
+#define IPA_ENDP_INIT_HDR_n_HDR_OFST_METADATA_MSB_BMSK_v4_5 0xc0000000
+#define IPA_ENDP_INIT_HDR_n_HDR_OFST_METADATA_MSB_SHFT_v4_5 0x1e
+
+/* IPA_ENDP_INIT_HDR_EXT_n register */
+#define IPA_ENDP_INIT_HDR_EXT_n_HDR_ENDIANNESS_BMSK 0x1
+#define IPA_ENDP_INIT_HDR_EXT_n_HDR_ENDIANNESS_SHFT 0x0
+#define IPA_ENDP_INIT_HDR_EXT_n_HDR_TOTAL_LEN_OR_PAD_VALID_BMSK 0x2
+#define IPA_ENDP_INIT_HDR_EXT_n_HDR_TOTAL_LEN_OR_PAD_VALID_SHFT 0x1
+#define IPA_ENDP_INIT_HDR_EXT_n_HDR_TOTAL_LEN_OR_PAD_BMSK 0x4
+#define IPA_ENDP_INIT_HDR_EXT_n_HDR_TOTAL_LEN_OR_PAD_SHFT 0x2
+#define IPA_ENDP_INIT_HDR_EXT_n_HDR_PAYLOAD_LEN_INC_PADDING_BMSK 0x8
+#define IPA_ENDP_INIT_HDR_EXT_n_HDR_PAYLOAD_LEN_INC_PADDING_SHFT 0x3
+#define IPA_ENDP_INIT_HDR_EXT_n_HDR_TOTAL_LEN_OR_PAD_OFFSET_BMSK 0x3f0
+#define IPA_ENDP_INIT_HDR_EXT_n_HDR_TOTAL_LEN_OR_PAD_OFFSET_SHFT 0x4
+#define IPA_ENDP_INIT_HDR_EXT_n_HDR_PAD_TO_ALIGNMENT_SHFT 0xa
+#define IPA_ENDP_INIT_HDR_EXT_n_HDR_PAD_TO_ALIGNMENT_BMSK 0x3c00
+#define IPA_ENDP_INIT_HDR_EXT_n_HDR_TOTAL_LEN_OR_PAD_OFFSET_MSB_SHFT_v4_5 0x10
+#define IPA_ENDP_INIT_HDR_EXT_n_HDR_TOTAL_LEN_OR_PAD_OFFSET_MSB_BMSK_v4_5 \
+									0x30000
+#define IPA_ENDP_INIT_HDR_EXT_n_HDR_OFST_PKT_SIZE_MSB_SHFT_v4_5 0x12
+#define IPA_ENDP_INIT_HDR_EXT_n_HDR_OFST_PKT_SIZE_MSB_BMSK_v4_5 0xC0000
+#define IPA_ENDP_INIT_HDR_EXT_n_HDR_ADDITIONAL_CONST_LEN_MSB_SHFT_v4_5 0x14
+#define IPA_ENDP_INIT_HDR_EXT_n_HDR_ADDITIONAL_CONST_LEN_MSB_BMSK_v4_5 0x300000
+
+/* IPA_ENDP_INIT_AGGR_n register */
+#define IPA_ENDP_INIT_AGGR_n_AGGR_HARD_BYTE_LIMIT_ENABLE_BMSK	0x1000000
+#define IPA_ENDP_INIT_AGGR_n_AGGR_HARD_BYTE_LIMIT_ENABLE_SHFT	0x18
+#define IPA_ENDP_INIT_AGGR_n_AGGR_FORCE_CLOSE_BMSK 0x400000
+#define IPA_ENDP_INIT_AGGR_n_AGGR_FORCE_CLOSE_SHFT 0x16
+#define IPA_ENDP_INIT_AGGR_n_AGGR_SW_EOF_ACTIVE_BMSK	0x200000
+#define IPA_ENDP_INIT_AGGR_n_AGGR_SW_EOF_ACTIVE_SHFT	0x15
+#define IPA_ENDP_INIT_AGGR_n_AGGR_PKT_LIMIT_BMSK 0x1f8000
+#define IPA_ENDP_INIT_AGGR_n_AGGR_PKT_LIMIT_SHFT 0xf
+#define IPA_ENDP_INIT_AGGR_n_AGGR_TIME_LIMIT_BMSK 0x7c00
+#define IPA_ENDP_INIT_AGGR_n_AGGR_TIME_LIMIT_SHFT 0xa
+#define IPA_ENDP_INIT_AGGR_n_AGGR_BYTE_LIMIT_BMSK 0x3e0
+#define IPA_ENDP_INIT_AGGR_n_AGGR_BYTE_LIMIT_SHFT 0x5
+#define IPA_ENDP_INIT_AGGR_n_AGGR_TYPE_BMSK 0x1c
+#define IPA_ENDP_INIT_AGGR_n_AGGR_TYPE_SHFT 0x2
+#define IPA_ENDP_INIT_AGGR_n_AGGR_EN_BMSK 0x3
+#define IPA_ENDP_INIT_AGGR_n_AGGR_EN_SHFT 0x0
+
+#define IPA_ENDP_INIT_AGGR_n_AGGR_GRAN_SEL_BMSK_V4_5 0x8000000
+#define IPA_ENDP_INIT_AGGR_n_AGGR_GRAN_SEL_SHFT_V4_5 27
+#define IPA_ENDP_INIT_AGGR_n_AGGR_HARD_BYTE_LIMIT_ENABLE_BMSK_V4_5 0x4000000
+#define IPA_ENDP_INIT_AGGR_n_AGGR_HARD_BYTE_LIMIT_ENABLE_SHFT_V4_5 26
+#define IPA_ENDP_INIT_AGGR_n_AGGR_FORCE_CLOSE_BMSK_V4_5 0x1000000
+#define IPA_ENDP_INIT_AGGR_n_AGGR_FORCE_CLOSE_SHFT_V4_5 24
+#define IPA_ENDP_INIT_AGGR_n_AGGR_SW_EOF_ACTIVE_BMSK_V4_5 0x800000
+#define IPA_ENDP_INIT_AGGR_n_AGGR_SW_EOF_ACTIVE_SHFT_V4_5 23
+#define IPA_ENDP_INIT_AGGR_n_AGGR_PKT_LIMIT_BMSK_V4_5 0x7e0000
+#define IPA_ENDP_INIT_AGGR_n_AGGR_PKT_LIMIT_SHFT_V4_5 17
+#define IPA_ENDP_INIT_AGGR_n_AGGR_TIME_LIMIT_BMSK_V4_5 0x1f000
+#define IPA_ENDP_INIT_AGGR_n_AGGR_TIME_LIMIT_SHFT_V4_5 12
+#define IPA_ENDP_INIT_AGGR_n_AGGR_BYTE_LIMIT_BMSK_V4_5 0x7e0
+#define IPA_ENDP_INIT_AGGR_n_AGGR_BYTE_LIMIT_SHFT_V4_5 5
+#define IPA_ENDP_INIT_AGGR_n_AGGR_TYPE_BMSK_V4_5 0x1c
+#define IPA_ENDP_INIT_AGGR_n_AGGR_TYPE_SHFT_V4_5 2
+#define IPA_ENDP_INIT_AGGR_n_AGGR_EN_BMSK_V4_5 0x3
+#define IPA_ENDP_INIT_AGGR_n_AGGR_EN_SHFT_V4_5 0
+
+/* IPA_AGGR_FORCE_CLOSE register */
+#define IPA_AGGR_FORCE_CLOSE_AGGR_FORCE_CLOSE_PIPE_BITMAP_BMSK 0x3fffffff
+#define IPA_AGGR_FORCE_CLOSE_AGGR_FORCE_CLOSE_PIPE_BITMAP_SHFT 0
+#define IPA_AGGR_FORCE_CLOSE_AGGR_FORCE_CLOSE_PIPE_BITMAP_BMSK_V3_5 0xfffff
+#define IPA_AGGR_FORCE_CLOSE_AGGR_FORCE_CLOSE_PIPE_BITMAP_SHFT_V3_5 0
+#define IPA_AGGR_FORCE_CLOSE_AGGR_FORCE_CLOSE_PIPE_BITMAP_BMSK_V4_0 0x7fffff
+#define IPA_AGGR_FORCE_CLOSE_AGGR_FORCE_CLOSE_PIPE_BITMAP_SHFT_V4_0 0
+#define IPA_AGGR_FORCE_CLOSE_AGGR_FORCE_CLOSE_PIPE_BITMAP_BMSK_V4_2 0x1ffff
+#define IPA_AGGR_FORCE_CLOSE_AGGR_FORCE_CLOSE_PIPE_BITMAP_SHFT_V4_2 0
+#define IPA_AGGR_FORCE_CLOSE_AGGR_FORCE_CLOSE_PIPE_BITMAP_BMSK_V4_5 0x7fffffff
+#define IPA_AGGR_FORCE_CLOSE_AGGR_FORCE_CLOSE_PIPE_BITMAP_SHFT_V4_5 0
+#define IPA_AGGR_FORCE_CLOSE_AGGR_FORCE_CLOSE_PIPE_BITMAP_BMSK_V4_7 0x7fffff
+#define IPA_AGGR_FORCE_CLOSE_AGGR_FORCE_CLOSE_PIPE_BITMAP_SHFT_V4_7 0
+
+/* IPA_ENDP_INIT_ROUTE_n register */
+#define IPA_ENDP_INIT_ROUTE_n_ROUTE_TABLE_INDEX_BMSK 0x1f
+#define IPA_ENDP_INIT_ROUTE_n_ROUTE_TABLE_INDEX_SHFT 0x0
+
+/* IPA_ENDP_INIT_MODE_n register */
+#define IPA_ENDP_INIT_MODE_n_HDR_FTCH_DISABLE_BMSK 0x40000000
+#define IPA_ENDP_INIT_MODE_n_HDR_FTCH_DISABLE_SHFT 0x1e
+#define IPA_ENDP_INIT_MODE_n_PAD_EN_BMSK 0x20000000
+#define IPA_ENDP_INIT_MODE_n_PAD_EN_SHFT 0x1d
+#define IPA_ENDP_INIT_MODE_n_PIPE_REPLICATION_EN_BMSK 0x10000000
+#define IPA_ENDP_INIT_MODE_n_PIPE_REPLICATION_EN_SHFT 0x1c
+#define IPA_ENDP_INIT_MODE_n_BYTE_THRESHOLD_BMSK 0xffff000
+#define IPA_ENDP_INIT_MODE_n_BYTE_THRESHOLD_SHFT 0xc
+#define IPA_ENDP_INIT_MODE_n_DEST_PIPE_INDEX_BMSK 0x1f0
+#define IPA_ENDP_INIT_MODE_n_DEST_PIPE_INDEX_SHFT 0x4
+#define IPA_ENDP_INIT_MODE_n_MODE_BMSK 0x7
+#define IPA_ENDP_INIT_MODE_n_MODE_SHFT 0x0
+
+#define IPA_ENDP_INIT_MODE_n_PAD_EN_BMSK_V4_5 0x20000000
+#define IPA_ENDP_INIT_MODE_n_PAD_EN_SHFT_V4_5 0x1d
+#define IPA_ENDP_INIT_MODE_n_PIPE_REPLICATION_EN_BMSK_V4_5 0x10000000
+#define IPA_ENDP_INIT_MODE_n_PIPE_REPLICATION_EN_SHFT_V4_5 0x1c
+#define IPA_ENDP_INIT_MODE_n_BYTE_THRESHOLD_BMSK_V4_5 0xffff000
+#define IPA_ENDP_INIT_MODE_n_BYTE_THRESHOLD_SHFT_V4_5 0xc
+#define IPA_ENDP_INIT_MODE_n_DEST_PIPE_INDEX_BMSK_V4_5 0x1f0
+#define IPA_ENDP_INIT_MODE_n_DEST_PIPE_INDEX_SHFT_V4_5 0x4
+#define IPA_ENDP_INIT_MODE_n_DCPH_ENABLE_BMSK_V4_5 0x8
+#define IPA_ENDP_INIT_MODE_n_DCPH_ENABLE_SHFT_V4_5 0x3
+#define IPA_ENDP_INIT_MODE_n_MODE_BMSK_V4_5 0x7
+#define IPA_ENDP_INIT_MODE_n_MODE_SHFT_V4_5 0x0
+
+/* IPA_ENDP_INIT_NAT_n register */
+#define IPA_ENDP_INIT_NAT_n_NAT_EN_BMSK 0x3
+#define IPA_ENDP_INIT_NAT_n_NAT_EN_SHFT 0x0
+
+/* IPA_ENDP_INIT_CONN_TRACK_n register */
+#define IPA_ENDP_INIT_CONN_TRACK_n_CONN_TRACK_EN_BMSK 0x1
+#define IPA_ENDP_INIT_CONN_TRACK_n_CONN_TRACK_EN_SHFT 0x0
+
+/* IPA_ENDP_INIT_CTRL_n register */
+#define IPA_ENDP_INIT_CTRL_n_ENDP_SUSPEND_BMSK 0x1
+#define IPA_ENDP_INIT_CTRL_n_ENDP_SUSPEND_SHFT 0x0
+#define IPA_ENDP_INIT_CTRL_n_ENDP_DELAY_BMSK 0x2
+#define IPA_ENDP_INIT_CTRL_n_ENDP_DELAY_SHFT 0x1
+
+/* IPA_ENDP_INIT_CTRL_SCND_n register */
+#define IPA_ENDP_INIT_CTRL_SCND_n_ENDP_DELAY_BMSK 0x2
+#define IPA_ENDP_INIT_CTRL_SCND_n_ENDP_DELAY_SHFT 0x1
+
+/* IPA_ENDP_INIT_HOL_BLOCK_EN_n register */
+#define IPA_ENDP_INIT_HOL_BLOCK_EN_n_RMSK 0x1
+#define IPA_ENDP_INIT_HOL_BLOCK_EN_n_MAX 19
+#define IPA_ENDP_INIT_HOL_BLOCK_EN_n_MAX_V_4_0 22
+#define IPA_ENDP_INIT_HOL_BLOCK_EN_n_EN_BMSK 0x1
+#define IPA_ENDP_INIT_HOL_BLOCK_EN_n_EN_SHFT 0x0
+
+/* IPA_ENDP_INIT_HOL_BLOCK_TIMER_n register */
+#define IPA_ENDP_INIT_HOL_BLOCK_TIMER_n_TIMER_BMSK 0xffffffff
+#define IPA_ENDP_INIT_HOL_BLOCK_TIMER_n_TIMER_SHFT 0x0
+
+#define IPA_ENDP_INIT_HOL_BLOCK_TIMER_n_BASE_VALUE_SHFT_V_4_2 0
+#define IPA_ENDP_INIT_HOL_BLOCK_TIMER_n_BASE_VALUE_BMSK_V_4_2 0x1f
+#define IPA_ENDP_INIT_HOL_BLOCK_TIMER_n_SCALE_SHFT_V_4_2  0x8
+#define IPA_ENDP_INIT_HOL_BLOCK_TIMER_n_SCALE_BMSK_V_4_2 0x1f00
+
+#define IPA_ENDP_INIT_HOL_BLOCK_TIMER_n_TIME_LIMIT_BMSK_V4_5 0x1F
+#define IPA_ENDP_INIT_HOL_BLOCK_TIMER_n_TIME_LIMIT_SHFT_V4_5 0
+#define IPA_ENDP_INIT_HOL_BLOCK_TIMER_n_GRAN_SEL_BMSK_V4_5 0x100
+#define IPA_ENDP_INIT_HOL_BLOCK_TIMER_n_GRAN_SEL_SHFT_V4_5 8
+
+/* IPA_ENDP_INIT_DEAGGR_n register */
+#define IPA_ENDP_INIT_DEAGGR_n_MAX_PACKET_LEN_BMSK 0xFFFF0000
+#define IPA_ENDP_INIT_DEAGGR_n_MAX_PACKET_LEN_SHFT 0x10
+#define IPA_ENDP_INIT_DEAGGR_n_PACKET_OFFSET_LOCATION_BMSK 0x3F00
+#define IPA_ENDP_INIT_DEAGGR_n_PACKET_OFFSET_LOCATION_SHFT 0x8
+#define IPA_ENDP_INIT_DEAGGR_n_PACKET_OFFSET_VALID_BMSK  0x80
+#define IPA_ENDP_INIT_DEAGGR_n_PACKET_OFFSET_VALID_SHFT 0x7
+#define IPA_ENDP_INIT_DEAGGR_n_DEAGGR_HDR_LEN_BMSK 0x3F
+#define IPA_ENDP_INIT_DEAGGR_n_DEAGGR_HDR_LEN_SHFT 0x0
+
+/* IPA_IPA_ENDP_INIT_SEQ_n register */
+#define IPA_ENDP_INIT_SEQ_n_DPS_REP_SEQ_TYPE_BMSK 0xf000
+#define IPA_ENDP_INIT_SEQ_n_DPS_REP_SEQ_TYPE_SHFT 0xc
+#define IPA_ENDP_INIT_SEQ_n_HPS_REP_SEQ_TYPE_BMSK 0xf00
+#define IPA_ENDP_INIT_SEQ_n_HPS_REP_SEQ_TYPE_SHFT 0x8
+#define IPA_ENDP_INIT_SEQ_n_DPS_SEQ_TYPE_BMSK 0xf0
+#define IPA_ENDP_INIT_SEQ_n_DPS_SEQ_TYPE_SHFT 0x4
+#define IPA_ENDP_INIT_SEQ_n_HPS_SEQ_TYPE_BMSK 0xf
+#define IPA_ENDP_INIT_SEQ_n_HPS_SEQ_TYPE_SHFT 0x0
+
+/* IPA_DEBUG_CNT_REG_m register */
+#define IPA_DEBUG_CNT_REG_N_RMSK 0xffffffff
+#define IPA_DEBUG_CNT_REG_N_MAX 15
+#define IPA_DEBUG_CNT_REG_N_DBG_CNT_REG_BMSK 0xffffffff
+#define IPA_DEBUG_CNT_REG_N_DBG_CNT_REG_SHFT 0x0
+
+/* IPA_ENDP_INIT_CFG_n register */
+#define IPA_ENDP_INIT_CFG_n_CS_GEN_QMB_MASTER_SEL_BMSK 0x100
+#define IPA_ENDP_INIT_CFG_n_CS_GEN_QMB_MASTER_SEL_SHFT 0x8
+#define IPA_ENDP_INIT_CFG_n_CS_METADATA_HDR_OFFSET_BMSK 0x78
+#define IPA_ENDP_INIT_CFG_n_CS_METADATA_HDR_OFFSET_SHFT 0x3
+#define IPA_ENDP_INIT_CFG_n_CS_OFFLOAD_EN_BMSK 0x6
+#define IPA_ENDP_INIT_CFG_n_CS_OFFLOAD_EN_SHFT 0x1
+#define IPA_ENDP_INIT_CFG_n_FRAG_OFFLOAD_EN_BMSK 0x1
+#define IPA_ENDP_INIT_CFG_n_FRAG_OFFLOAD_EN_SHFT 0x0
+
+/* IPA_ENDP_INIT_HDR_METADATA_MASK_n register */
+#define IPA_ENDP_INIT_HDR_METADATA_MASK_n_METADATA_MASK_BMSK 0xffffffff
+#define IPA_ENDP_INIT_HDR_METADATA_MASK_n_METADATA_MASK_SHFT 0x0
+
+/* IPA_IPA_ENDP_INIT_HDR_METADATA_n register */
+#define IPA_ENDP_INIT_HDR_METADATA_n_METADATA_BMSK 0xffffffff
+#define IPA_ENDP_INIT_HDR_METADATA_n_METADATA_SHFT 0x0
+
+/* IPA_ENDP_INIT_RSRC_GRP_n register */
+#define IPA_ENDP_INIT_RSRC_GRP_n_RSRC_GRP_BMSK 0x7
+#define IPA_ENDP_INIT_RSRC_GRP_n_RSRC_GRP_SHFT 0
+#define IPA_ENDP_INIT_RSRC_GRP_n_RSRC_GRP_BMSK_v3_5 0x3
+#define IPA_ENDP_INIT_RSRC_GRP_n_RSRC_GRP_SHFT_v3_5 0
+#define IPA_ENDP_INIT_RSRC_GRP_n_RSRC_GRP_BMSK_v4_5 0x7
+#define IPA_ENDP_INIT_RSRC_GRP_n_RSRC_GRP_SHFT_v4_5 0
+#define IPA_ENDP_INIT_RSRC_GRP_n_RSRC_GRP_BMSK_v4_9 0x3
+#define IPA_ENDP_INIT_RSRC_GRP_n_RSRC_GRP_SHFT_v4_9 0
+
+/* IPA_SHARED_MEM_SIZE register */
+#define IPA_SHARED_MEM_SIZE_SHARED_MEM_BADDR_BMSK 0xffff0000
+#define IPA_SHARED_MEM_SIZE_SHARED_MEM_BADDR_SHFT 0x10
+#define IPA_SHARED_MEM_SIZE_SHARED_MEM_SIZE_BMSK  0xffff
+#define IPA_SHARED_MEM_SIZE_SHARED_MEM_SIZE_SHFT  0x0
+
+/* IPA_DEBUG_CNT_CTRL_n register */
+#define IPA_DEBUG_CNT_CTRL_n_DBG_CNT_RULE_INDEX_PIPE_RULE_BMSK 0x10000000
+#define IPA_DEBUG_CNT_CTRL_n_DBG_CNT_RULE_INDEX_PIPE_RULE_SHFT 0x1c
+#define IPA_DEBUG_CNT_CTRL_n_DBG_CNT_RULE_INDEX_BMSK 0x0ff00000
+#define IPA_DEBUG_CNT_CTRL_n_DBG_CNT_RULE_INDEX_BMSK_V3_5 0x1ff00000
+#define IPA_DEBUG_CNT_CTRL_n_DBG_CNT_RULE_INDEX_SHFT 0x14
+#define IPA_DEBUG_CNT_CTRL_n_DBG_CNT_SOURCE_PIPE_BMSK 0x1f000
+#define IPA_DEBUG_CNT_CTRL_n_DBG_CNT_SOURCE_PIPE_SHFT 0xc
+#define IPA_DEBUG_CNT_CTRL_n_DBG_CNT_PRODUCT_BMSK 0x100
+#define IPA_DEBUG_CNT_CTRL_n_DBG_CNT_PRODUCT_SHFT 0x8
+#define IPA_DEBUG_CNT_CTRL_n_DBG_CNT_TYPE_BMSK 0x70
+#define IPA_DEBUG_CNT_CTRL_n_DBG_CNT_TYPE_SHFT 0x4
+#define IPA_DEBUG_CNT_CTRL_n_DBG_CNT_EN_BMSK 0x1
+#define IPA_DEBUG_CNT_CTRL_n_DBG_CNT_EN_SHFT 0x0
+
+/* IPA_FILT_ROUT_HASH_FLUSH register */
+#define IPA_FILT_ROUT_HASH_FLUSH_IPv4_FILT_SHFT 12
+#define IPA_FILT_ROUT_HASH_FLUSH_IPv4_ROUT_SHFT 8
+#define IPA_FILT_ROUT_HASH_FLUSH_IPv6_FILT_SHFT 4
+#define IPA_FILT_ROUT_HASH_FLUSH_IPv6_ROUT_SHFT 0
+
+/* IPA_SINGLE_NDP_MODE register */
+#define IPA_SINGLE_NDP_MODE_UNDEFINED_BMSK 0xfffffffe
+#define IPA_SINGLE_NDP_MODE_UNDEFINED_SHFT 0x1
+#define IPA_SINGLE_NDP_MODE_SINGLE_NDP_EN_BMSK 0x1
+#define IPA_SINGLE_NDP_MODE_SINGLE_NDP_EN_SHFT 0
+
+/* IPA_QCNCM register */
+#define IPA_QCNCM_MODE_UNDEFINED2_BMSK 0xf0000000
+#define IPA_QCNCM_MODE_UNDEFINED2_SHFT 0x1c
+#define IPA_QCNCM_MODE_VAL_BMSK 0xffffff0
+#define IPA_QCNCM_MODE_VAL_SHFT 0x4
+#define IPA_QCNCM_UNDEFINED1_BMSK 0xe
+#define IPA_QCNCM_UNDEFINED1_SHFT 0x1
+#define IPA_QCNCM_MODE_EN_BMSK 0x1
+#define IPA_QCNCM_MODE_EN_SHFT 0
+
+/* IPA_ENDP_STATUS_n register */
+#define IPA_ENDP_STATUS_n_STATUS_PKT_SUPPRESS_BMSK 0x200
+#define IPA_ENDP_STATUS_n_STATUS_PKT_SUPPRESS_SHFT 0x9
+#define IPA_ENDP_STATUS_n_STATUS_LOCATION_BMSK 0x100
+#define IPA_ENDP_STATUS_n_STATUS_LOCATION_SHFT 0x8
+#define IPA_ENDP_STATUS_n_STATUS_ENDP_BMSK 0x3e
+#define IPA_ENDP_STATUS_n_STATUS_ENDP_SHFT 0x1
+#define IPA_ENDP_STATUS_n_STATUS_EN_BMSK 0x1
+#define IPA_ENDP_STATUS_n_STATUS_EN_SHFT 0x0
+
+/* IPA_CLKON_CFG register */
+#define IPA_CLKON_CFG_CGC_OPEN_DPL_FIFO_BMSK_V4_5  0x40000000
+#define IPA_CLKON_CFG_CGC_OPEN_DPL_FIFO_SHFT_V4_5 30
+#define IPA_CLKON_CFG_OPEN_GLOBAL_2X_CLK_BMSK  0x20000000
+#define IPA_CLKON_CFG_OPEN_GLOBAL_2X_CLK_SHFT 29
+#define IPA_CLKON_CFG_OPEN_GLOBAL_BMSK 0x10000000
+#define IPA_CLKON_CFG_OPEN_GLOBAL_SHFT 28
+#define IPA_CLKON_CFG_OPEN_GSI_IF_BMSK 0x8000000
+#define IPA_CLKON_CFG_OPEN_GSI_IF_SHFT 27
+#define IPA_CLKON_CFG_OPEN_WEIGHT_ARB_SHFT 26
+#define IPA_CLKON_CFG_OPEN_WEIGHT_ARB_BMSK 0x4000000
+#define IPA_CLKON_CFG_OPEN_QMB_SHFT 25
+#define IPA_CLKON_CFG_OPEN_QMB_BMSK 0x2000000
+#define IPA_CLKON_CFG_OPEN_RAM_SLAVEWAY_SHFT 24
+#define IPA_CLKON_CFG_OPEN_RAM_SLAVEWAY_BMSK 0x1000000
+#define IPA_CLKON_CFG_OPEN_AGGR_WRAPPER_SHFT 23
+#define IPA_CLKON_CFG_OPEN_AGGR_WRAPPER_BMSK 0x800000
+#define IPA_CLKON_CFG_OPEN_QSB2AXI_CMDQ_L_SHFT 22
+#define IPA_CLKON_CFG_OPEN_QSB2AXI_CMDQ_L_BMSK 0x400000
+#define IPA_CLKON_CFG_OPEN_FNR_SHFT 21
+#define IPA_CLKON_CFG_OPEN_FNR_BMSK 0x200000
+#define IPA_CLKON_CFG_OPEN_TX_1_SHFT 20
+#define IPA_CLKON_CFG_OPEN_TX_1_BMSK 0x100000
+#define IPA_CLKON_CFG_OPEN_TX_0_SHFT 19
+#define IPA_CLKON_CFG_OPEN_TX_0_BMSK 0x80000
+#define IPA_CLKON_CFG_OPEN_NTF_TX_CMDQS_SHFT 18
+#define IPA_CLKON_CFG_OPEN_NTF_TX_CMDQS_BMSK 0x40000
+#define IPA_CLKON_CFG_OPEN_DCMP_SHFT 17
+#define IPA_CLKON_CFG_OPEN_DCMP_BMSK 0x20000
+#define IPA_CLKON_CFG_OPEN_H_DCPH_SHFT 16
+#define IPA_CLKON_CFG_OPEN_H_DCPH_BMSK 0x10000
+#define IPA_CLKON_CFG_OPEN_D_DCPH_SHFT 15
+#define IPA_CLKON_CFG_OPEN_D_DCPH_BMSK 0x8000
+#define IPA_CLKON_CFG_OPEN_ACK_MNGR_SHFT 14
+#define IPA_CLKON_CFG_OPEN_ACK_MNGR_BMSK 0x4000
+#define IPA_CLKON_CFG_OPEN_CTX_HANDLER_SHFT 13
+#define IPA_CLKON_CFG_OPEN_CTX_HANDLER_BMSK 0x2000
+#define IPA_CLKON_CFG_OPEN_RSRC_MNGR_SHFT 12
+#define IPA_CLKON_CFG_OPEN_RSRC_MNGR_BMSK 0x1000
+#define IPA_CLKON_CFG_OPEN_DPS_TX_CMDQS_SHFT 11
+#define IPA_CLKON_CFG_OPEN_DPS_TX_CMDQS_BMSK 0x800
+#define IPA_CLKON_CFG_OPEN_HPS_DPS_CMDQS_SHFT 10
+#define IPA_CLKON_CFG_OPEN_HPS_DPS_CMDQS_BMSK 0x400
+#define IPA_CLKON_CFG_OPEN_RX_HPS_CMDQS_SHFT 9
+#define IPA_CLKON_CFG_OPEN_RX_HPS_CMDQS_BMSK 0x200
+#define IPA_CLKON_CFG_OPEN_DPS_SHFT 8
+#define IPA_CLKON_CFG_OPEN_DPS_BMSK 0x100
+#define IPA_CLKON_CFG_OPEN_HPS_SHFT 7
+#define IPA_CLKON_CFG_OPEN_HPS_BMSK 0x80
+#define IPA_CLKON_CFG_OPEN_FTCH_DPS_SHFT 6
+#define IPA_CLKON_CFG_OPEN_FTCH_DPS_BMSK 0x40
+#define IPA_CLKON_CFG_OPEN_FTCH_HPS_SHFT 5
+#define IPA_CLKON_CFG_OPEN_FTCH_HPS_BMSK 0x20
+#define IPA_CLKON_CFG_OPEN_RAM_ARB_SHFT 4
+#define IPA_CLKON_CFG_OPEN_RAM_ARB_BMSK 0x10
+#define IPA_CLKON_CFG_OPEN_MISC_SHFT 3
+#define IPA_CLKON_CFG_OPEN_MISC_BMSK 0x8
+#define IPA_CLKON_CFG_OPEN_TX_WRAPPER_SHFT 2
+#define IPA_CLKON_CFG_OPEN_TX_WRAPPER_BMSK 0x4
+#define IPA_CLKON_CFG_OPEN_PROC_SHFT 1
+#define IPA_CLKON_CFG_OPEN_PROC_BMSK 0x2
+#define IPA_CLKON_CFG_OPEN_RX_BMSK 0x1
+#define IPA_CLKON_CFG_OPEN_RX_SHFT 0
+
+/* IPA_QTIME_TIMESTAMP_CFG register */
+#define IPA_QTIME_TIMESTAMP_CFG_DPL_TIMESTAMP_LSB_SHFT 0
+#define IPA_QTIME_TIMESTAMP_CFG_DPL_TIMESTAMP_LSB_BMSK 0x1F
+#define IPA_QTIME_TIMESTAMP_CFG_DPL_TIMESTAMP_SEL_SHFT 7
+#define IPA_QTIME_TIMESTAMP_CFG_DPL_TIMESTAMP_SEL_BMSK 0x80
+#define IPA_QTIME_TIMESTAMP_CFG_TAG_TIMESTAMP_LSB_SHFT 8
+#define IPA_QTIME_TIMESTAMP_CFG_TAG_TIMESTAMP_LSB_BMSK 0x1F00
+#define IPA_QTIME_TIMESTAMP_CFG_NAT_TIMESTAMP_LSB_SHFT 16
+#define IPA_QTIME_TIMESTAMP_CFG_NAT_TIMESTAMP_LSB_BMSK 0x1F0000
+
+/* IPA_TIMERS_PULSE_GRAN_CFG register */
+#define IPA_TIMERS_PULSE_GRAN_CFG_GRAN_X_SHFT(x) (3 * (x))
+#define IPA_TIMERS_PULSE_GRAN_CFG_GRAN_X_BMSK(x) (0x7 << (3 * (x)))
+
+/* IPA_TIMERS_XO_CLK_DIV_CFG register */
+#define IPA_TIMERS_XO_CLK_DIV_CFG_VALUE_SHFT 0
+#define IPA_TIMERS_XO_CLK_DIV_CFG_VALUE_BMSK 0x1FF
+#define IPA_TIMERS_XO_CLK_DIV_CFG_ENABLE_SHFT 31
+#define IPA_TIMERS_XO_CLK_DIV_CFG_ENABLE_BMSK 0x80000000
+
+/* IPA_ENDP_FILTER_ROUTER_HSH_CFG_n register */
+#define IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_FILTER_HASH_MSK_SRC_ID_SHFT 0
+#define IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_FILTER_HASH_MSK_SRC_ID_BMSK 0x1
+#define IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_FILTER_HASH_MSK_SRC_IP_SHFT 1
+#define IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_FILTER_HASH_MSK_SRC_IP_BMSK 0x2
+#define IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_FILTER_HASH_MSK_DST_IP_SHFT 2
+#define IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_FILTER_HASH_MSK_DST_IP_BMSK 0x4
+#define IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_FILTER_HASH_MSK_SRC_PORT_SHFT 3
+#define IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_FILTER_HASH_MSK_SRC_PORT_BMSK 0x8
+#define IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_FILTER_HASH_MSK_DST_PORT_SHFT 4
+#define IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_FILTER_HASH_MSK_DST_PORT_BMSK 0x10
+#define IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_FILTER_HASH_MSK_PROTOCOL_SHFT 5
+#define IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_FILTER_HASH_MSK_PROTOCOL_BMSK 0x20
+#define IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_FILTER_HASH_MSK_METADATA_SHFT 6
+#define IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_FILTER_HASH_MSK_METADATA_BMSK 0x40
+#define IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_UNDEFINED1_SHFT 7
+#define IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_UNDEFINED1_BMSK 0xff80
+#define IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_ROUTER_HASH_MSK_SRC_ID_SHFT 16
+#define IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_ROUTER_HASH_MSK_SRC_ID_BMSK 0x10000
+#define IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_ROUTER_HASH_MSK_SRC_IP_SHFT 17
+#define IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_ROUTER_HASH_MSK_SRC_IP_BMSK 0x20000
+#define IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_ROUTER_HASH_MSK_DST_IP_SHFT 18
+#define IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_ROUTER_HASH_MSK_DST_IP_BMSK 0x40000
+#define IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_ROUTER_HASH_MSK_SRC_PORT_SHFT 19
+#define IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_ROUTER_HASH_MSK_SRC_PORT_BMSK 0x80000
+#define IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_ROUTER_HASH_MSK_DST_PORT_SHFT 20
+#define IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_ROUTER_HASH_MSK_DST_PORT_BMSK 0x100000
+#define IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_ROUTER_HASH_MSK_PROTOCOL_SHFT 21
+#define IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_ROUTER_HASH_MSK_PROTOCOL_BMSK 0x200000
+#define IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_ROUTER_HASH_MSK_METADATA_SHFT 22
+#define IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_ROUTER_HASH_MSK_METADATA_BMSK 0x400000
+#define IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_UNDEFINED2_SHFT 23
+#define IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_UNDEFINED2_BMSK 0xff800000
+
+/* IPA_RSRC_GRP_XY_RSRC_TYPE_n register */
+#define IPA_RSRC_GRP_XY_RSRC_TYPE_n_Y_MAX_LIM_BMSK 0xFF000000
+#define IPA_RSRC_GRP_XY_RSRC_TYPE_n_Y_MAX_LIM_SHFT 24
+#define IPA_RSRC_GRP_XY_RSRC_TYPE_n_Y_MIN_LIM_BMSK 0xFF0000
+#define IPA_RSRC_GRP_XY_RSRC_TYPE_n_Y_MIN_LIM_SHFT 16
+#define IPA_RSRC_GRP_XY_RSRC_TYPE_n_X_MAX_LIM_BMSK 0xFF00
+#define IPA_RSRC_GRP_XY_RSRC_TYPE_n_X_MAX_LIM_SHFT 8
+#define IPA_RSRC_GRP_XY_RSRC_TYPE_n_X_MIN_LIM_BMSK 0xFF
+#define IPA_RSRC_GRP_XY_RSRC_TYPE_n_X_MIN_LIM_SHFT 0
+#define IPA_RSRC_GRP_XY_RSRC_TYPE_n_Y_MAX_LIM_BMSK_V3_5 0x3F000000
+#define IPA_RSRC_GRP_XY_RSRC_TYPE_n_Y_MAX_LIM_SHFT_V3_5 24
+#define IPA_RSRC_GRP_XY_RSRC_TYPE_n_Y_MIN_LIM_BMSK_V3_5 0x3F0000
+#define IPA_RSRC_GRP_XY_RSRC_TYPE_n_Y_MIN_LIM_SHFT_V3_5 16
+#define IPA_RSRC_GRP_XY_RSRC_TYPE_n_X_MAX_LIM_BMSK_V3_5 0x3F00
+#define IPA_RSRC_GRP_XY_RSRC_TYPE_n_X_MAX_LIM_SHFT_V3_5 8
+#define IPA_RSRC_GRP_XY_RSRC_TYPE_n_X_MIN_LIM_BMSK_V3_5 0x3F
+#define IPA_RSRC_GRP_XY_RSRC_TYPE_n_X_MIN_LIM_SHFT_V3_5 0
+
+/* IPA_RX_HPS_CLIENTS_MIN/MAX_DEPTH_0/1 registers */
+#define IPA_RX_HPS_CLIENTS_MINMAX_DEPTH_X_CLIENT_n_BMSK(n) (0x7F << (8 * (n)))
+#define IPA_RX_HPS_CLIENTS_MINMAX_DEPTH_X_CLIENT_n_BMSK_V3_5(n) \
+						(0xF << (8 * (n)))
+#define IPA_RX_HPS_CLIENTS_MINMAX_DEPTH_X_CLIENT_n_SHFT(n) (8 * (n))
+#define IPA_RX_HPS_CLIENTS_MINMAX_DEPTH_0_CLIENT_4_BMSK_v4_5 0xF0000000
+#define IPA_RX_HPS_CLIENTS_MINMAX_DEPTH_0_CLIENT_4_SHFT_v4_5 28
+#define IPA_RX_HPS_CLIENTS_MINMAX_DEPTH_0_CLIENT_3_BMSK_v4_5 0xF000000
+#define IPA_RX_HPS_CLIENTS_MINMAX_DEPTH_0_CLIENT_3_SHFT_v4_5 24
+#define IPA_RX_HPS_CLIENTS_MINMAX_DEPTH_0_CLIENT_2_BMSK_v4_5 0xF0000
+#define IPA_RX_HPS_CLIENTS_MINMAX_DEPTH_0_CLIENT_2_SHFT_v4_5 16
+#define IPA_RX_HPS_CLIENTS_MINMAX_DEPTH_0_CLIENT_1_BMSK_v4_5 0xF00
+#define IPA_RX_HPS_CLIENTS_MINMAX_DEPTH_0_CLIENT_1_SHFT_v4_5 8
+#define IPA_RX_HPS_CLIENTS_MINMAX_DEPTH_0_CLIENT_0_BMSK_v4_5 0xF
+#define IPA_RX_HPS_CLIENTS_MINMAX_DEPTH_0_CLIENT_0_SHFT_v4_5 0
+
+/* IPA_QSB_MAX_WRITES register */
+#define IPA_QSB_MAX_WRITES_GEN_QMB_0_MAX_WRITES_BMSK (0xf)
+#define IPA_QSB_MAX_WRITES_GEN_QMB_0_MAX_WRITES_SHFT (0)
+#define IPA_QSB_MAX_WRITES_GEN_QMB_1_MAX_WRITES_BMSK (0xf0)
+#define IPA_QSB_MAX_WRITES_GEN_QMB_1_MAX_WRITES_SHFT (4)
+
+/* IPA_QSB_MAX_READS register */
+#define IPA_QSB_MAX_READS_GEN_QMB_0_MAX_READS_BMSK (0xf)
+#define IPA_QSB_MAX_READS_GEN_QMB_0_MAX_READS_SHFT (0)
+#define IPA_QSB_MAX_READS_GEN_QMB_1_MAX_READS_BMSK (0xf0)
+#define IPA_QSB_MAX_READS_GEN_QMB_1_MAX_READS_SHFT (4)
+
+/* IPA_QSB_MAX_READS_BEATS register */
+#define IPA_QSB_MAX_READS_GEN_QMB_0_MAX_READS_BEATS_BMSK_V4_0 (0xff0000)
+#define IPA_QSB_MAX_READS_GEN_QMB_0_MAX_READS_BEATS_SHFT_V4_0 (0x10)
+#define IPA_QSB_MAX_READS_GEN_QMB_1_MAX_READS_BEATS_BMSK_V4_0 (0xff000000)
+#define IPA_QSB_MAX_READS_GEN_QMB_1_MAX_READS_BEATS_SHFT_V4_0 (0x18)
+
+/* IPA_TX_CFG register */
+#define IPA_TX_CFG_TX0_PREFETCH_DISABLE_BMSK_V3_5 (0x1)
+#define IPA_TX_CFG_TX0_PREFETCH_DISABLE_SHFT_V3_5 (0)
+#define IPA_TX_CFG_TX1_PREFETCH_DISABLE_BMSK_V3_5 (0x2)
+#define IPA_TX_CFG_TX1_PREFETCH_DISABLE_SHFT_V3_5 (1)
+#define IPA_TX_CFG_PREFETCH_ALMOST_EMPTY_SIZE_BMSK_V3_5 (0x1C)
+#define IPA_TX_CFG_PREFETCH_ALMOST_EMPTY_SIZE_SHFT_V3_5 (2)
+
+#define IPA_TX_CFG_SSPND_PA_NO_START_STATE_BMSK_V4_9 (0x40000)
+#define IPA_TX_CFG_SSPND_PA_NO_START_STATE_SHFT_V4_9 (0x12)
+#define IPA_TX_CFG_DUAL_TX_ENABLE_BMSK_V4_5 (0x20000)
+#define IPA_TX_CFG_DUAL_TX_ENABLE_SHFT_V4_5 (0x11)
+#define IPA_TX_CFG_PREFETCH_ALMOST_EMPTY_SIZE_TX1_BMSK_V4_0 (0x1e000)
+#define IPA_TX_CFG_PREFETCH_ALMOST_EMPTY_SIZE_TX1_SHFT_V4_0 (0xd)
+#define IPA_TX_CFG_PA_MASK_EN_BMSK_V4_0 (0x1000)
+#define IPA_TX_CFG_PA_MASK_EN_SHFT_V4_0 (0xc)
+#define IPA_TX_CFG_DMAW_SCND_OUTSD_PRED_EN_BMSK_V4_0 (0x800)
+#define IPA_TX_CFG_DMAW_SCND_OUTSD_PRED_EN_SHFT_V4_0 (0xb)
+#define IPA_TX_CFG_DMAW_MAX_BEATS_256_DIS_BMSK_V4_0 (0x400)
+#define IPA_TX_CFG_DMAW_MAX_BEATS_256_DIS_SHFT_V4_0 (0xa)
+#define IPA_TX_CFG_DMAW_SCND_OUTSD_PRED_THRESHOLD_BMSK_V4_0 (0x3c0)
+#define IPA_TX_CFG_DMAW_SCND_OUTSD_PRED_THRESHOLD_SHFT_V4_0 (0x6)
+#define IPA_TX_CFG_PREFETCH_ALMOST_EMPTY_SIZE_TX0_BMSK_V4_0 (0x3c)
+#define IPA_TX_CFG_PREFETCH_ALMOST_EMPTY_SIZE_TX0_SHFT_V4_0 (0x2)
+
+/* IPA_IDLE_INDICATION_CFG regiser */
+#define IPA_IDLE_INDICATION_CFG_ENTER_IDLE_DEBOUNCE_THRESH_BMSK_V3_5 (0xffff)
+#define IPA_IDLE_INDICATION_CFG_ENTER_IDLE_DEBOUNCE_THRESH_SHFT_V3_5 (0)
+#define IPA_IDLE_INDICATION_CFG_CONST_NON_IDLE_ENABLE_BMSK_V3_5 (0x10000)
+#define IPA_IDLE_INDICATION_CFG_CONST_NON_IDLE_ENABLE_SHFT_V3_5 (16)
+
+/* IPA_HPS_FTCH_QUEUE_WEIGHT register */
+#define IPA_HPS_FTCH_ARB_QUEUE_WEIGHTS_RX_HPS_QUEUE_WEIGHT_0_BMSK (0xf)
+#define IPA_HPS_FTCH_ARB_QUEUE_WEIGHTS_RX_HPS_QUEUE_WEIGHT_0_SHFT (0x0)
+#define IPA_HPS_FTCH_ARB_QUEUE_WEIGHTS_RX_HPS_QUEUE_WEIGHT_1_BMSK (0xf0)
+#define IPA_HPS_FTCH_ARB_QUEUE_WEIGHTS_RX_HPS_QUEUE_WEIGHT_1_SHFT (0x4)
+#define IPA_HPS_FTCH_ARB_QUEUE_WEIGHTS_RX_HPS_QUEUE_WEIGHT_2_BMSK (0xf00)
+#define IPA_HPS_FTCH_ARB_QUEUE_WEIGHTS_RX_HPS_QUEUE_WEIGHT_2_SHFT (0x8)
+#define IPA_HPS_FTCH_ARB_QUEUE_WEIGHTS_RX_HPS_QUEUE_WEIGHT_3_BMSK (0xf000)
+#define IPA_HPS_FTCH_ARB_QUEUE_WEIGHTS_RX_HPS_QUEUE_WEIGHT_3_SHFT (0xc)
+
+/* IPA_COUNTER_CFG register */
+#define IPA_COUNTER_CFG_AGGR_GRANULARITY_BMSK (0x1f0)
+#define IPA_COUNTER_CFG_AGGR_GRANULARITY_SHFT (0x4)
+
+/* IPA_COMP_CFG register*/
+#define IPA_COMP_CFG_IPA_ATOMIC_FETCHER_ARB_LOCK_DIS_BMSK 0x1E0000
+#define IPA_COMP_CFG_IPA_ATOMIC_FETCHER_ARB_LOCK_DIS_SHFT 17
+#define IPA_COMP_CFG_IPA_QMB_SELECT_BY_ADDRESS_GLOBAL_EN_BMSK 0x10000
+#define IPA_COMP_CFG_IPA_QMB_SELECT_BY_ADDRESS_GLOBAL_EN_SHFT 16
+#define IPA_COMP_CFG_GSI_MULTI_AXI_MASTERS_DIS_BMSK 0x8000
+#define IPA_COMP_CFG_GSI_MULTI_AXI_MASTERS_DIS_SHFT 15
+#define IPA_COMP_CFG_GSI_SNOC_CNOC_LOOP_PROTECTION_DISABLE_BMSK 0x4000
+#define IPA_COMP_CFG_GSI_SNOC_CNOC_LOOP_PROTECTION_DISABLE_SHFT 14
+#define IPA_COMP_CFG_GEN_QMB_0_SNOC_CNOC_LOOP_PROTECTION_DISABLE_BMSK 0x2000
+#define IPA_COMP_CFG_GEN_QMB_0_SNOC_CNOC_LOOP_PROTECTION_DISABLE_SHFT 13
+#define IPA_COMP_CFG_GEN_QMB_1_MULTI_INORDER_WR_DIS_BMSK 0x1000
+#define IPA_COMP_CFG_GEN_QMB_1_MULTI_INORDER_WR_DIS_SHFT 12
+#define IPA_COMP_CFG_GEN_QMB_0_MULTI_INORDER_WR_DIS_BMSK 0x800
+#define IPA_COMP_CFG_GEN_QMB_0_MULTI_INORDER_WR_DIS_SHFT 11
+#define IPA_COMP_CFG_GEN_QMB_1_MULTI_INORDER_RD_DIS_BMSK 0x400
+#define IPA_COMP_CFG_GEN_QMB_1_MULTI_INORDER_RD_DIS_SHFT 10
+#define IPA_COMP_CFG_GEN_QMB_0_MULTI_INORDER_RD_DIS_BMSK 0x200
+#define IPA_COMP_CFG_GEN_QMB_0_MULTI_INORDER_RD_DIS_SHFT 9
+#define IPA_COMP_CFG_GSI_MULTI_INORDER_WR_DIS_BMSK 0x100
+#define IPA_COMP_CFG_GSI_MULTI_INORDER_WR_DIS_SHFT 8
+#define IPA_COMP_CFG_GSI_MULTI_INORDER_RD_DIS_BMSK 0x80
+#define IPA_COMP_CFG_GSI_MULTI_INORDER_RD_DIS_SHFT 7
+#define IPA_COMP_CFG_IPA_QMB_SELECT_BY_ADDRESS_PROD_EN_BMSK 0x40
+#define IPA_COMP_CFG_IPA_QMB_SELECT_BY_ADDRESS_PROD_EN_SHFT 6
+#define IPA_COMP_CFG_IPA_QMB_SELECT_BY_ADDRESS_CONS_EN_BMSK 0x20
+#define IPA_COMP_CFG_IPA_QMB_SELECT_BY_ADDRESS_CONS_EN_SHFT 5
+#define IPA_COMP_CFG_IPA_DCMP_FAST_CLK_EN_BMSK 0x10
+#define IPA_COMP_CFG_IPA_DCMP_FAST_CLK_EN_SHFT 4
+#define IPA_COMP_CFG_GEN_QMB_1_SNOC_BYPASS_DIS_BMSK 0x8
+#define IPA_COMP_CFG_GEN_QMB_1_SNOC_BYPASS_DIS_SHFT 3
+#define IPA_COMP_CFG_GEN_QMB_0_SNOC_BYPASS_DIS_BMSK 0x4
+#define IPA_COMP_CFG_GEN_QMB_0_SNOC_BYPASS_DIS_SHFT 2
+#define IPA_COMP_CFG_GSI_SNOC_BYPASS_DIS_BMSK 0x2
+#define IPA_COMP_CFG_GSI_SNOC_BYPASS_DIS_SHFT 1
+#define IPA_COMP_CFG_ENABLE_BMSK 0x1
+#define IPA_COMP_CFG_ENABLE_SHFT 0
+
+#define IPA_COMP_CFG_IPA_FULL_FLUSH_WAIT_RSC_CLOSURE_EN_BMSK_v4_5 0x200000
+#define IPA_COMP_CFG_IPA_FULL_FLUSH_WAIT_RSC_CLOSURE_EN_SHFT_v4_5 21
+#define IPA_COMP_CFG_IPA_ATOMIC_FETCHER_ARB_LOCK_DIS_BMSK_v4_5 0x1E0000
+#define IPA_COMP_CFG_IPA_ATOMIC_FETCHER_ARB_LOCK_DIS_SHFT_v4_5 17
+#define IPA_COMP_CFG_IPA_QMB_SELECT_BY_ADDRESS_GLOBAL_EN_BMSK_v4_5 0x10000
+#define IPA_COMP_CFG_IPA_QMB_SELECT_BY_ADDRESS_GLOBAL_EN_SHFT_v4_5 16
+#define IPA_COMP_CFG_GSI_MULTI_AXI_MASTERS_DIS_BMSK_v4_5 0x8000
+#define IPA_COMP_CFG_GSI_MULTI_AXI_MASTERS_DIS_SHFT_v4_5 15
+#define IPA_COMP_CFG_GSI_SNOC_CNOC_LOOP_PROTECTION_DISABLE_BMSK_v4_5 0x4000
+#define IPA_COMP_CFG_GSI_SNOC_CNOC_LOOP_PROTECTION_DISABLE_SHFT_v4_5 14
+#define IPA_COMP_CFG_GEN_QMB_0_SNOC_CNOC_LOOP_PROTECTION_DISABLE_BMSK_v4_5 \
+									0x2000
+#define IPA_COMP_CFG_GEN_QMB_0_SNOC_CNOC_LOOP_PROTECTION_DISABLE_SHFT_v4_5 13
+#define IPA_COMP_CFG_GEN_QMB_1_MULTI_INORDER_WR_DIS_BMSK_v4_5 0x1000
+#define IPA_COMP_CFG_GEN_QMB_1_MULTI_INORDER_WR_DIS_SHFT_v4_5 12
+#define IPA_COMP_CFG_GEN_QMB_0_MULTI_INORDER_WR_DIS_BMSK_v4_5 0x800
+#define IPA_COMP_CFG_GEN_QMB_0_MULTI_INORDER_WR_DIS_SHFT_v4_5 11
+#define IPA_COMP_CFG_GEN_QMB_1_MULTI_INORDER_RD_DIS_BMSK_v4_5 0x400
+#define IPA_COMP_CFG_GEN_QMB_1_MULTI_INORDER_RD_DIS_SHFT_v4_5 10
+#define IPA_COMP_CFG_GEN_QMB_0_MULTI_INORDER_RD_DIS_BMSK_v4_5 0x200
+#define IPA_COMP_CFG_GEN_QMB_0_MULTI_INORDER_RD_DIS_SHFT_v4_5 9
+#define IPA_COMP_CFG_GSI_MULTI_INORDER_WR_DIS_BMSK_v4_5 0x100
+#define IPA_COMP_CFG_GSI_MULTI_INORDER_WR_DIS_SHFT_v4_5 8
+#define IPA_COMP_CFG_GSI_MULTI_INORDER_RD_DIS_BMSK_v4_5 0x80
+#define IPA_COMP_CFG_GSI_MULTI_INORDER_RD_DIS_SHFT_v4_5 7
+#define IPA_COMP_CFG_IPA_QMB_SELECT_BY_ADDRESS_PROD_EN_BMSK_v4_5 0x40
+#define IPA_COMP_CFG_IPA_QMB_SELECT_BY_ADDRESS_PROD_EN_SHFT_v4_5 6
+#define IPA_COMP_CFG_IPA_QMB_SELECT_BY_ADDRESS_CONS_EN_BMSK_v4_5 0x20
+#define IPA_COMP_CFG_IPA_QMB_SELECT_BY_ADDRESS_CONS_EN_SHFT_v4_5 5
+#define IPA_COMP_CFG_GEN_QMB_1_SNOC_BYPASS_DIS_BMSK_v4_5 0x8
+#define IPA_COMP_CFG_GEN_QMB_1_SNOC_BYPASS_DIS_SHFT_v4_5 3
+#define IPA_COMP_CFG_GEN_QMB_0_SNOC_BYPASS_DIS_BMSK_v4_5 0x4
+#define IPA_COMP_CFG_GEN_QMB_0_SNOC_BYPASS_DIS_SHFT_v4_5 2
+#define IPA_COMP_CFG_GSI_SNOC_BYPASS_DIS_BMSK_v4_5 0x2
+#define IPA_COMP_CFG_GSI_SNOC_BYPASS_DIS_SHFT_v4_5 1
+
+
+/*IPA 4.9*/
+#define IPA_COMP_CFG_GEN_QMB_0_DYNAMIC_ASIZE_BMSK_v4_9 0x80000000
+#define IPA_COMP_CFG_GEN_QMB_0_DYNAMIC_ASIZE_SHFT_v4_9 31
+#define IPA_COMP_CFG_GEN_QMB_1_DYNAMIC_ASIZE_BMSK_v4_9 0x40000000
+#define IPA_COMP_CFG_GEN_QMB_1_DYNAMIC_ASIZE_SHFT_v4_9 30
+#define IPA_COMP_CFG_IPA_ATOMIC_FETCHER_ARB_LOCK_DIS_BMSK_v4_9 0x1C00000
+#define IPA_COMP_CFG_IPA_ATOMIC_FETCHER_ARB_LOCK_DIS_SHFT_v4_9 22
+#define IPA_COMP_CFG_GSI_IF_OUT_OF_BUF_STOP_RESET_MASK_ENABLE_BMSK_v4_9 0x200000
+#define IPA_COMP_CFG_GSI_IF_OUT_OF_BUF_STOP_RESET_MASK_ENABLE_SHFT_v4_9 21
+#define IPA_COMP_CFG_GENQMB_AOOOWR_BMSK_v4_9 0x100000
+#define IPA_COMP_CFG_GENQMB_AOOOWR_SHFT_v4_9 20
+#define IPA_COMP_CFG_QMB_RAM_RD_CACHE_DISABLE_BMSK_v4_9 0x80000
+#define IPA_COMP_CFG_QMB_RAM_RD_CACHE_DISABLE_SHFT_v4_9 19
+#define IPA_COMP_CFG_IPA_FULL_FLUSH_WAIT_RSC_CLOSURE_EN_BMSK_v4_9 0x20000
+#define IPA_COMP_CFG_IPA_FULL_FLUSH_WAIT_RSC_CLOSURE_EN_SHFT_v4_9 17
+#define IPA_COMP_CFG_RAM_ARB_PRIORITY_CLIENT_SAMP_FIX_DISABLE_BMSK_v4_9 0x1
+#define IPA_COMP_CFG_RAM_ARB_PRIORITY_CLIENT_SAMP_FIX_DISABLE_SHFT_v4_9 0
+
+/* IPA_COAL registers*/
+#define IPA_STATE_COAL_MASTER_VP_TIMER_EXPIRED_BMSK 0xF0000000
+#define IPA_STATE_COAL_MASTER_VP_TIMER_EXPIRED_SHFT 28
+#define IPA_STATE_COAL_MASTER_LRU_VP_BMSK 0xF000000
+#define IPA_STATE_COAL_MASTER_LRU_VP_SHFT 24
+#define IPA_STATE_COAL_MASTER_INIT_VP_FSM_STATE_BMSK 0xF00000
+#define IPA_STATE_COAL_MASTER_INIT_VP_FSM_STATE_SHFT 20
+#define IPA_STATE_COAL_MASTER_CHECK_FIR_FSM_STATE_BMSK 0xF0000
+#define IPA_STATE_COAL_MASTER_CHECK_FIR_FSM_STATE_SHFT 16
+#define IPA_STATE_COAL_MASTER_HASH_CALC_FSM_STATE_BMSK 0xF000
+#define IPA_STATE_COAL_MASTER_HASH_CALC_FSM_STATE_SHFT 12
+#define IPA_STATE_COAL_MASTER_FIND_OPEN_FSM_STATE_BMSK 0xF00
+#define IPA_STATE_COAL_MASTER_FIND_OPEN_FSM_STATE_SHFT 8
+#define IPA_STATE_COAL_MASTER_MAIN_FSM_STATE_BMSK 0xF0
+#define IPA_STATE_COAL_MASTER_MAIN_FSM_STATE_SHFT 4
+#define IPA_STATE_COAL_MASTER_VP_VLD_BMSK 0xF0
+#define IPA_STATE_COAL_MASTER_VP_VLD_SHFT 0
+#define IPA_COAL_VP_LRU_THRSHLD_BMSK 0x3E
+#define IPA_COAL_VP_LRU_THRSHLD_SHFT 1
+#define IPA_COAL_EVICTION_EN_BMSK 0x1
+#define IPA_COAL_EVICTION_EN_SHFT 0
+#define IPA_COAL_QMAP_CFG_BMSK 0x1
+#define IPA_COAL_QMAP_CFG_SHFT 0
+
+#define IPA_STATE_TX_WRAPPER_COAL_SLAVE_OPEN_FRAME_BMSK 0xf0000000
+#define IPA_STATE_TX_WRAPPER_COAL_SLAVE_OPEN_FRAME_SHFT 0x1f
+#define IPA_STATE_TX_WRAPPER_COAL_SLAVE_CTX_IDLE_BMSK 0x100000
+#define IPA_STATE_TX_WRAPPER_COAL_SLAVE_CTX_IDLE_SHFT 0x10
+#define IPA_STATE_TX_WRAPPER_COAL_SLAVE_IDLE_BMSK 0x8000
+#define IPA_STATE_TX_WRAPPER_COAL_SLAVE_IDLE_SHFT 0xf
+#define IPA_STATE_TX_WRAPPER_COAL_DIRECT_DMA_BMSK 0x6000
+#define IPA_STATE_TX_WRAPPER_COAL_DIRECT_DMA_SHFT 0xd
+#define IPA_STATE_TX_WRAPPER_NLO_DIRECT_DMA_BMSK 0x1800
+#define IPA_STATE_TX_WRAPPER_NLO_DIRECT_DMA_SHFT 0xb
+#define IPA_STATE_TX_WRAPPER_PKT_DROP_CNT_IDLE_BMSK 0x400
+#define IPA_STATE_TX_WRAPPER_PKT_DROP_CNT_IDLE_SHFT 0xa
+#define IPA_STATE_TX_WRAPPER_TRNSEQ_FORCE_VALID_BMSK 0x200
+#define IPA_STATE_TX_WRAPPER_TRNSEQ_FORCE_VALID_SHFT 0x9
+#define IPA_STATE_TX_WRAPPER_MBIM_DIRECT_DMA_BMSK 0x180
+#define IPA_STATE_TX_WRAPPER_MBIM_DIRECT_DMA_SHFT 0x7
+#define IPA_STATE_TX_WRAPPER_IPA_MBIM_PKT_FMS_IDLE_BMSK 0x40
+#define IPA_STATE_TX_WRAPPER_IPA_MBIM_PKT_FMS_IDLE_SHFT 0x6
+#define IPA_STATE_TX_WRAPPER_IPA_PROD_BRESP_TOGGLE_IDLE_BMSK 0x20
+#define IPA_STATE_TX_WRAPPER_IPA_PROD_BRESP_TOGGLE_IDLE_SHFT 0x5
+#define IPA_STATE_TX_WRAPPER_IPA_PROD_BRESP_EMPTY_BMSK 0x10
+#define IPA_STATE_TX_WRAPPER_IPA_PROD_BRESP_EMPTY_SHFT 0x4
+#define IPA_STATE_TX_WRAPPER_IPA_PROD_ACKMNGR_STATE_IDLE_BMSK 0x8
+#define IPA_STATE_TX_WRAPPER_IPA_PROD_ACKMNGR_STATE_IDLE_SHFT 0x3
+#define IPA_STATE_TX_WRAPPER_IPA_PROD_ACKMNGR_DB_EMPTY_BMSK 0x4
+#define IPA_STATE_TX_WRAPPER_IPA_PROD_ACKMNGR_DB_EMPTY_SHFT 0x2
+#define IPA_STATE_TX_WRAPPER_TX1_IDLE_BMSK 0x2
+#define IPA_STATE_TX_WRAPPER_TX1_IDLE_SHFT 0x1
+#define IPA_STATE_TX_WRAPPER_TX0_IDLE_BMSK 0x1
+#define IPA_STATE_TX_WRAPPER_TX0_IDLE_SHFT 0x0
+
+#define IPA_STATE_TX_WRAPPER_COAL_SLAVE_OPEN_FRAME_BMSK_v4_7 0xf0000000
+#define IPA_STATE_TX_WRAPPER_COAL_SLAVE_OPEN_FRAME_SHFT_v4_7 28
+#define IPA_STATE_TX_WRAPPER_COAL_SLAVE_CTX_IDLE_BMSK_v4_7 0x80000
+#define IPA_STATE_TX_WRAPPER_COAL_SLAVE_CTX_IDLE_SHFT_v4_7 19
+#define IPA_STATE_TX_WRAPPER_COAL_SLAVE_IDLE_BMSK_v4_7 0x40000
+#define IPA_STATE_TX_WRAPPER_COAL_SLAVE_IDLE_SHFT_v4_7 18
+#define IPA_STATE_TX_WRAPPER_IPA_PROD_BRESP_EMPTY_BMSK_v4_7 0x10
+#define IPA_STATE_TX_WRAPPER_IPA_PROD_BRESP_EMPTY_SHFT_v4_7 4
+#define IPA_STATE_TX_WRAPPER_IPA_PROD_ACKMNGR_STATE_IDLE_BMSK_v4_7 0x8
+#define IPA_STATE_TX_WRAPPER_IPA_PROD_ACKMNGR_STATE_IDLE_SHFT_v4_7 3
+#define IPA_STATE_TX_WRAPPER_IPA_PROD_ACKMNGR_DB_EMPTY_BMSK_v4_7 0x4
+#define IPA_STATE_TX_WRAPPER_IPA_PROD_ACKMNGR_DB_EMPTY_SHFT_v4_7 2
+#define IPA_STATE_TX_WRAPPER_TX1_IDLE_BMSK_v4_7 0x2
+#define IPA_STATE_TX_WRAPPER_TX1_IDLE_SHFT_v4_7 1
+#define IPA_STATE_TX_WRAPPER_TX0_IDLE_BMSK_v4_7 0x1
+#define IPA_STATE_TX_WRAPPER_TX0_IDLE_SHFT_v4_7 0
+
+#endif /* _IPAHAL_REG_I_H_ */

이 변경점에서 너무 많은 파일들이 변경되어 몇몇 파일들은 표시되지 않았습니다.