Jelajahi Sumber

Merge "msm: ipa3: new low latency data pipes support"

qctecmdr 3 tahun lalu
induk
melakukan
56f3b1c620

+ 95 - 9
drivers/platform/msm/gsi/gsi.c

@@ -1098,6 +1098,62 @@ static irqreturn_t gsi_isr(int irq, void *ctxt)
 	return IRQ_HANDLED;
 }
 
+static irqreturn_t gsi_msi_isr(int irq, void *ctxt)
+{
+	int ee = gsi_ctx->per.ee;
+	uint64_t rp;
+	struct gsi_chan_xfer_notify notify;
+	unsigned long flags;
+	unsigned long cntr;
+	bool empty;
+	struct gsi_evt_ctx *evt_ctxt;
+	void __iomem *msi_clear_add;
+	void __iomem *msi_add;
+
+	evt_ctxt = (struct gsi_evt_ctx *)(ctxt);
+
+	if (evt_ctxt->props.intf != GSI_EVT_CHTYPE_GPI_EV) {
+		GSIERR("Unexpected irq intf %d\n",
+			evt_ctxt->props.intf);
+		GSI_ASSERT();
+	}
+	/* Clear IRQ by writing irq number to the MSI clear address */
+	msi_clear_add = (void __iomem *)evt_ctxt->props.msi_clear_addr;
+	iowrite32(evt_ctxt->props.intvec, msi_clear_add);
+	/* Writing zero to MSI address as well */
+	msi_add = (void __iomem *)evt_ctxt->props.msi_addr_iore_mapped;
+	iowrite32(0, msi_add);
+	/* Clearing IEOB irq if there are any genereated for MSI channel */
+	gsihal_write_reg_nk(GSI_EE_n_CNTXT_SRC_IEOB_IRQ_CLR_k, ee,
+		gsihal_get_ch_reg_idx(evt_ctxt->id),
+		gsihal_get_ch_reg_mask(evt_ctxt->id));
+	spin_lock_irqsave(&evt_ctxt->ring.slock, flags);
+check_again:
+	cntr = 0;
+	empty = true;
+	rp = evt_ctxt->props.gsi_read_event_ring_rp(&evt_ctxt->props,
+			evt_ctxt->id, ee);
+	rp |= evt_ctxt->ring.rp & 0xFFFFFFFF00000000;
+
+	evt_ctxt->ring.rp = rp;
+	while (evt_ctxt->ring.rp_local != rp) {
+		++cntr;
+		if (evt_ctxt->props.exclusive &&
+			atomic_read(&evt_ctxt->chan[0]->poll_mode)) {
+			cntr = 0;
+			break;
+		}
+		gsi_process_evt_re(evt_ctxt, &notify, true);
+		empty = false;
+	}
+	if (!empty)
+		gsi_ring_evt_doorbell(evt_ctxt);
+	if (cntr != 0)
+		goto check_again;
+	spin_unlock_irqrestore(&evt_ctxt->ring.slock, flags);
+	return IRQ_HANDLED;
+}
+
 static uint32_t gsi_get_max_channels(enum gsi_ver ver)
 {
 	uint32_t max_ch = 0;
@@ -1894,7 +1950,7 @@ int gsi_alloc_evt_ring(struct gsi_evt_ring_props *props, unsigned long dev_hdl,
 	enum gsi_evt_ch_cmd_opcode op = GSI_EVT_ALLOCATE;
 	struct gsihal_reg_ee_n_ev_ch_cmd ev_ch_cmd;
 	struct gsi_evt_ctx *ctx;
-	int res;
+	int res = 0;
 	int ee;
 	unsigned long flags;
 
@@ -1951,6 +2007,22 @@ int gsi_alloc_evt_ring(struct gsi_evt_ring_props *props, unsigned long dev_hdl,
 	ctx->num_of_chan_allocated = 0;
 	ctx->props = *props;
 
+	if (ctx->props.intf == GSI_EVT_CHTYPE_GPI_EV &&
+		ctx->props.intr == GSI_INTR_MSI) {
+		GSIERR("Registering MSI Interrupt for intvec = %d\n",
+			ctx->props.intvec);
+		res = devm_request_irq(gsi_ctx->dev, ctx->props.msi_irq,
+				gsi_msi_isr,
+				IRQF_TRIGGER_HIGH,
+				"gsi",
+				ctx);
+		if (res) {
+			GSIERR("MSI interrupt reg fails res = %d, intvec = %d\n",
+				res, ctx->props.intvec);
+			GSI_ASSERT();
+		}
+	}
+
 	mutex_lock(&gsi_ctx->mlock);
 	ee = gsi_ctx->per.ee;
 	ev_ch_cmd.opcode = op;
@@ -2068,7 +2140,7 @@ int gsi_dealloc_evt_ring(unsigned long evt_ring_hdl)
 	struct gsihal_reg_ee_n_ev_ch_cmd ev_ch_cmd;
 	enum gsi_evt_ch_cmd_opcode op = GSI_EVT_DE_ALLOC;
 	struct gsi_evt_ctx *ctx;
-	int res;
+	int res = 0;
 
 	if (!gsi_ctx) {
 		pr_err("%s:%d gsi context not allocated\n", __func__, __LINE__);
@@ -2094,6 +2166,12 @@ int gsi_dealloc_evt_ring(unsigned long evt_ring_hdl)
 		return -GSI_STATUS_UNSUPPORTED_OP;
 	}
 
+	if (ctx->props.intf == GSI_EVT_CHTYPE_GPI_EV &&
+		ctx->props.intr == GSI_INTR_MSI) {
+		GSIERR("Interrupt dereg for msi_irq = %d\n", ctx->props.msi_irq);
+		devm_free_irq(gsi_ctx->dev, ctx->props.msi_irq, ctx);
+	}
+
 	mutex_lock(&gsi_ctx->mlock);
 	reinit_completion(&ctx->compl);
 	ev_ch_cmd.chid = evt_ring_hdl;
@@ -2389,6 +2467,8 @@ static void gsi_program_chan_ctx_qos(struct gsi_chan_props *props,
 				props->empty_lvl_threshold;
 			if (gsi_ctx->per.ver >= GSI_VER_2_9)
 				ch_k_qos.db_in_bytes = props->db_in_bytes;
+			if (gsi_ctx->per.ver >= GSI_VER_3_0)
+				ch_k_qos.low_latency_en = props->low_latency_en;
 		}
 	}
 	gsihal_write_reg_nk_fields(GSI_EE_n_GSI_CH_k_QOS,
@@ -4164,10 +4244,13 @@ int gsi_config_channel_mode(unsigned long chan_hdl, enum gsi_chan_mode mode)
 	if (curr == GSI_CHAN_MODE_CALLBACK &&
 			mode == GSI_CHAN_MODE_POLL) {
 		if (gsi_ctx->per.ver >= GSI_VER_3_0) {
-			__gsi_config_ieob_irq_k(gsi_ctx->per.ee,
+			/* Masking/Unmasking of intrpts is not allowed for MSI chanls */
+			if (ctx->evtr->props.intr != GSI_INTR_MSI) {
+				__gsi_config_ieob_irq_k(gsi_ctx->per.ee,
 				gsihal_get_ch_reg_idx(ctx->evtr->id),
 				gsihal_get_ch_reg_mask(ctx->evtr->id),
 				0);
+			}
 		}
 		else {
 			__gsi_config_ieob_irq(gsi_ctx->per.ee, 1 << ctx->evtr->id, 0);
@@ -4212,10 +4295,13 @@ int gsi_config_channel_mode(unsigned long chan_hdl, enum gsi_chan_mode mode)
 				atomic_set(&coal_ctx->poll_mode, mode);
 		}
 		if (gsi_ctx->per.ver >= GSI_VER_3_0) {
-			__gsi_config_ieob_irq_k(gsi_ctx->per.ee,
+			/* Masking/Unmasking of intrpts is not allowed for MSI chanls */
+			if (ctx->evtr->props.intr != GSI_INTR_MSI) {
+				__gsi_config_ieob_irq_k(gsi_ctx->per.ee,
 				gsihal_get_ch_reg_idx(ctx->evtr->id),
 				gsihal_get_ch_reg_mask(ctx->evtr->id),
 				~0);
+			}
 		}
 		else {
 			__gsi_config_ieob_irq(gsi_ctx->per.ee, 1 << ctx->evtr->id, ~0);
@@ -4564,7 +4650,7 @@ int gsi_halt_channel_ee(unsigned int chan_idx, unsigned int ee, int *code)
 	*code = gsi_ctx->scratch.word0.s.generic_ee_cmd_return_code;
 free_lock:
 	__gsi_config_glob_irq(gsi_ctx->per.ee,
-			gsihal_get_glob_irq_en_gp_int1_mask(), 0);
+		gsihal_get_glob_irq_en_gp_int1_mask(), 0);
 	mutex_unlock(&gsi_ctx->mlock);
 
 	return res;
@@ -4633,7 +4719,7 @@ int gsi_alloc_channel_ee(unsigned int chan_idx, unsigned int ee, int *code)
 	*code = gsi_ctx->scratch.word0.s.generic_ee_cmd_return_code;
 free_lock:
 	__gsi_config_glob_irq(gsi_ctx->per.ee,
-			gsihal_get_glob_irq_en_gp_int1_mask(), 0);
+		gsihal_get_glob_irq_en_gp_int1_mask(), 0);
 	mutex_unlock(&gsi_ctx->mlock);
 
 	return res;
@@ -4724,7 +4810,7 @@ int gsi_enable_flow_control_ee(unsigned int chan_idx, unsigned int ee,
 	*code = gsi_ctx->scratch.word0.s.generic_ee_cmd_return_code;
 free_lock:
 	__gsi_config_glob_irq(gsi_ctx->per.ee,
-			gsihal_get_glob_irq_en_gp_int1_mask(), 0);
+		gsihal_get_glob_irq_en_gp_int1_mask(), 0);
 	mutex_unlock(&gsi_ctx->mlock);
 
 	return res;
@@ -4810,7 +4896,7 @@ int gsi_flow_control_ee(unsigned int chan_idx, unsigned int ee,
 	res = GSI_STATUS_SUCCESS;
 free_lock:
 	__gsi_config_glob_irq(gsi_ctx->per.ee,
-			gsihal_get_glob_irq_en_gp_int1_mask(), 0);
+		gsihal_get_glob_irq_en_gp_int1_mask(), 0);
 	mutex_unlock(&gsi_ctx->mlock);
 
 	return res;
@@ -4878,7 +4964,7 @@ int gsi_query_flow_control_state_ee(unsigned int chan_idx, unsigned int ee,
 
 free_lock:
 	__gsi_config_glob_irq(gsi_ctx->per.ee,
-			gsihal_get_glob_irq_en_gp_int1_mask(), 0);
+		gsihal_get_glob_irq_en_gp_int1_mask(), 0);
 	mutex_unlock(&gsi_ctx->mlock);
 
 	return res;

+ 8 - 1
drivers/platform/msm/gsi/gsi.h

@@ -14,6 +14,7 @@
 #include <linux/msm_gsi.h>
 #include <linux/errno.h>
 #include <linux/ipc_logging.h>
+#include <linux/iommu.h>
 
 /*
  * The following for adding code (ie. for EMULATION) not found on x86.
@@ -169,7 +170,9 @@ enum gsi_evt_ring_elem_size {
  * @int_modt:        cycles base interrupt moderation (32KHz clock)
  * @int_modc:        interrupt moderation packet counter
  * @intvec:          write data for MSI write
- * @msi_addr:        MSI address
+ * @msi_irq:         MSI irq number
+ * @msi_addr:        MSI address, APSS_GICA_SETSPI_NSR reg address
+ * @msi_clear_addr:  MSI address, APSS_GICA_CLRSPI_NSR reg address
  * @rp_update_addr:  physical address to which event read pointer should be
  *                   written on every event generation. must be set to 0 when
  *                   no update is desdired
@@ -197,7 +200,10 @@ struct gsi_evt_ring_props {
 	uint16_t int_modt;
 	uint8_t int_modc;
 	uint32_t intvec;
+	uint32_t msi_irq;
 	uint64_t msi_addr;
+	uint64_t msi_addr_iore_mapped;
+	uint64_t msi_clear_addr;
 	uint64_t rp_update_addr;
 	void *rp_update_vaddr;
 	bool exclusive;
@@ -485,6 +491,7 @@ struct gsi_chan_props {
 	uint16_t max_re_expected;
 	uint64_t ring_base_addr;
 	uint8_t db_in_bytes;
+	uint8_t low_latency_en;
 	void *ring_base_vaddr;
 	enum gsi_chan_use_db_eng use_db_eng;
 	enum gsi_max_prefetch max_prefetch;

+ 1 - 1
drivers/platform/msm/ipa/Kbuild

@@ -36,7 +36,7 @@ ipam-y += \
 
 ipam-$(CONFIG_RMNET_IPA3) += ipa_v3/rmnet_ipa.o ipa_v3/ipa_qmi_service_v01.o \
 	ipa_v3/ipa_qmi_service.o ipa_v3/rmnet_ctl_ipa.o \
-	ipa_v3/rmnet_ipa_fd_ioctl.o
+	ipa_v3/rmnet_ipa_fd_ioctl.o ipa_v3/rmnet_ll_ipa.o
 
 ipam-$(CONFIG_IPA3_MHI_PRIME_MANAGER) += ipa_v3/ipa_mpm.o
 

+ 125 - 0
drivers/platform/msm/ipa/ipa_v3/ipa.c

@@ -6819,6 +6819,9 @@ static inline void ipa3_register_to_fmwk(void)
 	data.ipa_suspend_wdi_pipe = ipa3_suspend_wdi_pipe;
 	data.ipa_uc_reg_rdyCB = ipa3_uc_reg_rdyCB;
 	data.ipa_uc_dereg_rdyCB = ipa3_uc_dereg_rdyCB;
+	data.ipa_rmnet_ll_xmit = ipa3_rmnet_ll_xmit;
+	data.ipa_register_rmnet_ll_cb = ipa3_register_rmnet_ll_cb;
+	data.ipa_unregister_rmnet_ll_cb = ipa3_unregister_rmnet_ll_cb;
 
 	if (ipa_fmwk_register_ipa(&data)) {
 		IPAERR("couldn't register to IPA framework\n");
@@ -8099,6 +8102,19 @@ static int ipa3_pre_init(const struct ipa3_plat_drv_res *resource_p,
 	ipa3_ctx->tx_poll = resource_p->tx_poll;
 	ipa3_ctx->ipa_gpi_event_rp_ddr = resource_p->ipa_gpi_event_rp_ddr;
 	ipa3_ctx->rmnet_ctl_enable = resource_p->rmnet_ctl_enable;
+	ipa3_ctx->rmnet_ll_enable = resource_p->rmnet_ll_enable;
+	ipa3_ctx->gsi_msi_addr = resource_p->gsi_msi_addr;
+	ipa3_ctx->gsi_msi_addr_io_mapped = 0;
+	ipa3_ctx->gsi_msi_clear_addr_io_mapped = 0;
+	ipa3_ctx->gsi_msi_clear_addr = resource_p->gsi_msi_clear_addr;
+	ipa3_ctx->gsi_rmnet_ctl_evt_ring_intvec =
+		resource_p->gsi_rmnet_ctl_evt_ring_intvec;
+	ipa3_ctx->gsi_rmnet_ctl_evt_ring_irq =
+		resource_p->gsi_rmnet_ctl_evt_ring_irq;
+	ipa3_ctx->gsi_rmnet_ll_evt_ring_intvec =
+		resource_p->gsi_rmnet_ll_evt_ring_intvec;
+	ipa3_ctx->gsi_rmnet_ll_evt_ring_irq =
+		resource_p->gsi_rmnet_ll_evt_ring_irq;
 	ipa3_ctx->tx_wrapper_cache_max_size = get_tx_wrapper_cache_size(
 			resource_p->tx_wrapper_cache_max_size);
 	ipa3_ctx->ipa_config_is_auto = resource_p->ipa_config_is_auto;
@@ -8596,11 +8612,21 @@ static int ipa3_pre_init(const struct ipa3_plat_drv_res *resource_p,
 		}
 	}
 
+	if (ipa3_ctx->rmnet_ll_enable) {
+		result = ipa3_rmnet_ll_init();
+		if (result) {
+			IPAERR(":ipa3_rmnet_ll_init err=%d\n", -result);
+			result = -ENODEV;
+			goto fail_rmnet_ll_init;
+		}
+	}
+
 	mutex_init(&ipa3_ctx->app_clock_vote.mutex);
 	ipa3_ctx->is_modem_up = false;
 
 	return 0;
 
+fail_rmnet_ll_init:
 fail_rmnet_ctl_init:
 	ipa3_wwan_cleanup();
 fail_wwan_init:
@@ -8918,6 +8944,10 @@ static int get_ipa_dts_configuration(struct platform_device *pdev,
 	u32 ipa_holb_monitor_max_cnt_usb;
 	u32 ipa_holb_monitor_max_cnt_11ad;
 	u32 ipa_wan_aggr_pkt_cnt;
+	u32 gsi_msi_addr;
+	u32 gsi_msi_clear_addr;
+	u32 gsi_rmnet_ctl_evt_ring_intvec;
+	u32 gsi_rmnet_ll_evt_ring_intvec;
 
 	/* initialize ipa3_res */
 	ipa_drv_res->ipa_wdi3_2g_holb_timeout = 0;
@@ -9228,6 +9258,70 @@ static int get_ipa_dts_configuration(struct platform_device *pdev,
 		ipa_drv_res->rmnet_ctl_enable
 		? "True" : "False");
 
+	ipa_drv_res->rmnet_ll_enable =
+		of_property_read_bool(pdev->dev.of_node,
+		"qcom,rmnet-ll-enable");
+	IPADBG(": Enable rmnet ll = %s\n",
+		ipa_drv_res->rmnet_ll_enable
+		? "True" : "False");
+
+	result = of_property_read_u32(pdev->dev.of_node,
+		"qcom,gsi-msi-addr",
+		&gsi_msi_addr);
+	IPADBG("GSI MSI addr = %lu\n", gsi_msi_addr);
+	ipa_drv_res->gsi_msi_addr = (u64)gsi_msi_addr;
+
+	result = of_property_read_u32(pdev->dev.of_node,
+		"qcom,gsi-msi-clear-addr",
+		&gsi_msi_clear_addr);
+	IPADBG("GSI MSI clear addr = %lu\n", gsi_msi_clear_addr);
+	ipa_drv_res->gsi_msi_clear_addr = (u64)gsi_msi_clear_addr;
+
+	/* Get IPA MSI IRQ number for rmnet_ctl */
+	resource = platform_get_resource_byname(pdev, IORESOURCE_IRQ,
+		"msi-irq-rmnet-ctl");
+	if (!resource) {
+		ipa_drv_res->gsi_rmnet_ctl_evt_ring_irq = 0;
+		IPAERR(":get resource failed for msi-irq-rmnet-ctl\n");
+	} else {
+		ipa_drv_res->gsi_rmnet_ctl_evt_ring_irq = resource->start;
+		IPADBG(": msi-irq-rmnet-ctl = %d\n",
+			ipa_drv_res->gsi_rmnet_ctl_evt_ring_irq);
+	}
+
+	/* Get IPA MSI IRQ number for rmnet_ll */
+	resource = platform_get_resource_byname(pdev, IORESOURCE_IRQ,
+		"msi-irq-rmnet-ll");
+	if (!resource) {
+		ipa_drv_res->gsi_rmnet_ll_evt_ring_irq = 0;
+		IPAERR(":get resource failed for msi-irq-rmnet-ll\n");
+	} else {
+		ipa_drv_res->gsi_rmnet_ll_evt_ring_irq = resource->start;
+		IPADBG(": msi-irq-rmnet-ll = %d\n",
+			ipa_drv_res->gsi_rmnet_ll_evt_ring_irq);
+	}
+
+	result = of_property_read_u32(pdev->dev.of_node,
+		"qcom,gsi-rmnet-ctl-evt-ring-intvec",
+		&gsi_rmnet_ctl_evt_ring_intvec);
+	IPADBG("gsi_rmnet_ctl_evt_ring_intvec = %u\n",
+		gsi_rmnet_ctl_evt_ring_intvec);
+	ipa_drv_res->gsi_rmnet_ctl_evt_ring_intvec =
+		gsi_rmnet_ctl_evt_ring_intvec;
+
+	result = of_property_read_u32(pdev->dev.of_node,
+		"qcom,gsi-rmnet-ll-evt-ring-intvec",
+		&gsi_rmnet_ll_evt_ring_intvec);
+	IPADBG("gsi_rmnet_ll_evt_ring_intvec = %u\n",
+		gsi_rmnet_ll_evt_ring_intvec);
+	ipa_drv_res->gsi_rmnet_ll_evt_ring_intvec =
+		gsi_rmnet_ll_evt_ring_intvec;
+
+	if (!ipa3_ctx->gsi_msi_addr_io_mapped &&
+		!ipa3_ctx->gsi_msi_clear_addr_io_mapped &&
+		(ipa3_ctx->rmnet_ll_enable || ipa3_ctx->rmnet_ctl_enable))
+			ipa_gsi_map_unmap_gsi_msi_addr(true);
+
 	result = of_property_read_string(pdev->dev.of_node,
 			"qcom,use-gsi-ipa-fw", &ipa_drv_res->gsi_fw_file_name);
 	if (!result)
@@ -10779,6 +10873,37 @@ int ipa3_pci_drv_probe(struct pci_dev *pci_dev, const struct pci_device_id *ent)
 	return result;
 }
 
+void ipa_gsi_map_unmap_gsi_msi_addr(bool map)
+{
+	struct ipa_smmu_cb_ctx *cb;
+	u64 rounddown_addr;
+	int res;
+	int prot = IOMMU_READ | IOMMU_WRITE | IOMMU_MMIO;
+
+	cb = ipa3_get_smmu_ctx(IPA_SMMU_CB_AP);
+	rounddown_addr = rounddown(ipa3_ctx->gsi_msi_addr, PAGE_SIZE);
+	if (map) {
+		res = ipa3_iommu_map(cb->iommu_domain,
+			rounddown_addr, rounddown_addr, PAGE_SIZE, prot);
+		if (res) {
+			IPAERR("iommu mapping failed for gsi_msi_addr\n");
+			ipa_assert();
+		}
+		ipa3_ctx->gsi_msi_clear_addr_io_mapped =
+			(u64)ioremap(ipa3_ctx->gsi_msi_clear_addr, 4);
+		ipa3_ctx->gsi_msi_addr_io_mapped =
+			(u64)ioremap(ipa3_ctx->gsi_msi_addr, 4);
+	} else {
+		iounmap((int *) ipa3_ctx->gsi_msi_clear_addr_io_mapped);
+		iounmap((int *) ipa3_ctx->gsi_msi_addr_io_mapped);
+		res = iommu_unmap(cb->iommu_domain, rounddown_addr, PAGE_SIZE);
+		ipa3_ctx->gsi_msi_clear_addr_io_mapped = 0;
+		ipa3_ctx->gsi_msi_addr_io_mapped = 0;
+		if (res)
+			IPAERR("smmu unmap for gsi_msi_addr failed %d\n", res);
+	}
+}
+
 /*
  * The following returns transport register memory location and
  * size...

+ 4 - 0
drivers/platform/msm/ipa/ipa_v3/ipa_debugfs.c

@@ -1467,6 +1467,8 @@ static ssize_t ipa3_read_stats(struct file *file, char __user *ubuf,
 		"wan_rx_empty=%u\n"
 		"wan_rx_empty_coal=%u\n"
 		"wan_repl_rx_empty=%u\n"
+		"rmnet_ll_rx_empty=%u\n"
+		"rmnet_ll_repl_rx_empty=%u\n"
 		"lan_rx_empty=%u\n"
 		"lan_repl_rx_empty=%u\n"
 		"flow_enable=%u\n"
@@ -1485,6 +1487,8 @@ static ssize_t ipa3_read_stats(struct file *file, char __user *ubuf,
 		ipa3_ctx->stats.wan_rx_empty,
 		ipa3_ctx->stats.wan_rx_empty_coal,
 		ipa3_ctx->stats.wan_repl_rx_empty,
+		ipa3_ctx->stats.rmnet_ll_rx_empty,
+		ipa3_ctx->stats.rmnet_ll_repl_rx_empty,
 		ipa3_ctx->stats.lan_rx_empty,
 		ipa3_ctx->stats.lan_repl_rx_empty,
 		ipa3_ctx->stats.flow_enable,

+ 173 - 19
drivers/platform/msm/ipa/ipa_v3/ipa_dp.c

@@ -140,6 +140,7 @@ static unsigned long tag_to_pointer_wa(uint64_t tag);
 static uint64_t pointer_to_tag_wa(struct ipa3_tx_pkt_wrapper *tx_pkt);
 static void ipa3_tasklet_rx_notify(unsigned long data);
 static u32 ipa_adjust_ra_buff_base_sz(u32 aggr_byte_limit);
+static int ipa3_rmnet_ll_rx_poll(struct napi_struct *napi_rx, int budget);
 
 struct gsi_chan_xfer_notify g_lan_rx_notify[IPA_LAN_NAPI_MAX_FRAMES];
 
@@ -648,15 +649,13 @@ int ipa3_send(struct ipa3_sys_context *sys,
 
 	return 0;
 
-failure_dma_map:
-	kmem_cache_free(ipa3_ctx->tx_pkt_wrapper_cache, tx_pkt);
-
 failure:
 	ipahal_destroy_imm_cmd(tag_pyld_ret);
 	tx_pkt = tx_pkt_first;
 	for (j = 0; j < i; j++) {
 		next_pkt = list_next_entry(tx_pkt, link);
 		list_del(&tx_pkt->link);
+		sys->len--;
 
 		if (!tx_pkt->no_unmap_dma) {
 			if (desc[j].type != IPA_DATA_DESC_SKB_PAGED) {
@@ -674,6 +673,9 @@ failure:
 		tx_pkt = next_pkt;
 	}
 
+failure_dma_map:
+	kmem_cache_free(ipa3_ctx->tx_pkt_wrapper_cache, tx_pkt);
+
 	spin_unlock_bh(&sys->spinlock);
 	return result;
 }
@@ -954,6 +956,8 @@ void __ipa3_update_curr_poll_state(enum ipa_client_type client, int state)
 		case IPA_CLIENT_APPS_WAN_PROD:
 		case IPA_CLIENT_APPS_LAN_PROD:
 		case IPA_CLIENT_APPS_WAN_LOW_LAT_CONS:
+		case IPA_CLIENT_APPS_WAN_LOW_LAT_DATA_PROD:
+		case IPA_CLIENT_APPS_WAN_LOW_LAT_DATA_CONS:
 			/* for error handling */
 			break;
 		default:
@@ -1126,6 +1130,11 @@ static void ipa_pm_sys_pipe_cb(void *p, enum ipa_pm_cb_event event)
 			usleep_range(SUSPEND_MIN_SLEEP_RX,
 				SUSPEND_MAX_SLEEP_RX);
 			IPA_ACTIVE_CLIENTS_DEC_SPECIAL("PIPE_SUSPEND_LOW_LAT");
+		} else if (sys->ep->client == IPA_CLIENT_APPS_WAN_LOW_LAT_DATA_CONS) {
+			IPA_ACTIVE_CLIENTS_INC_SPECIAL("PIPE_SUSPEND_LOW_LAT_DATA");
+			usleep_range(SUSPEND_MIN_SLEEP_RX,
+				SUSPEND_MAX_SLEEP_RX);
+			IPA_ACTIVE_CLIENTS_DEC_SPECIAL("PIPE_SUSPEND_LOW_LAT_DATA");
 		} else
 			IPAERR("Unexpected event %d\n for client %d\n",
 				event, sys->ep->client);
@@ -1298,7 +1307,8 @@ int ipa3_setup_sys_pipe(struct ipa_sys_connect_params *sys_in, u32 *clnt_hdl)
 			NAPI_TX_WEIGHT);
 			ep->sys->napi_tx_enable = ipa3_ctx->tx_napi_enable;
 			ep->sys->tx_poll = ipa3_ctx->tx_poll;
-		} else if(sys_in->client == IPA_CLIENT_APPS_WAN_PROD) {
+		} else if(sys_in->client == IPA_CLIENT_APPS_WAN_PROD ||
+			sys_in->client == IPA_CLIENT_APPS_WAN_LOW_LAT_DATA_PROD) {
 			netif_tx_napi_add((struct net_device *)sys_in->priv,
 			&ep->sys->napi_tx, tx_completion_func,
 			NAPI_TX_WEIGHT);
@@ -1316,6 +1326,12 @@ int ipa3_setup_sys_pipe(struct ipa_sys_connect_params *sys_in, u32 *clnt_hdl)
 		}
 	}
 
+	if (sys_in->client == IPA_CLIENT_APPS_WAN_LOW_LAT_DATA_CONS) {
+		netif_napi_add((struct net_device *)sys_in->priv,
+			&ep->sys->napi_rx, ipa3_rmnet_ll_rx_poll, NAPI_WEIGHT);
+		napi_enable(&ep->sys->napi_rx);
+	}
+
 	ep->client = sys_in->client;
 	ep->sys->ext_ioctl_v2 = sys_in->ext_ioctl_v2;
 	ep->sys->int_modt = sys_in->int_modt;
@@ -1327,7 +1343,7 @@ int ipa3_setup_sys_pipe(struct ipa_sys_connect_params *sys_in, u32 *clnt_hdl)
 	if (ipa3_assign_policy(sys_in, ep->sys)) {
 		IPAERR("failed to sys ctx for client %d\n", sys_in->client);
 		result = -ENOMEM;
-		goto fail_napi;
+		goto fail_napi_rx;
 	}
 
 	ep->valid = 1;
@@ -1455,7 +1471,9 @@ int ipa3_setup_sys_pipe(struct ipa_sys_connect_params *sys_in, u32 *clnt_hdl)
 	}
 
 	if (IPA_CLIENT_IS_CONS(sys_in->client)) {
-		if (IPA_CLIENT_IS_WAN_CONS(sys_in->client) &&
+		if ((IPA_CLIENT_IS_WAN_CONS(sys_in->client) ||
+			sys_in->client ==
+			IPA_CLIENT_APPS_WAN_LOW_LAT_DATA_CONS) &&
 			ipa3_ctx->ipa_wan_skb_page) {
 			ipa3_replenish_rx_page_recycle(ep->sys);
 		} else
@@ -1472,7 +1490,9 @@ int ipa3_setup_sys_pipe(struct ipa_sys_connect_params *sys_in, u32 *clnt_hdl)
 	ipa3_ctx->skip_ep_cfg_shadow[ipa_ep_idx] = ep->skip_ep_cfg;
 	if (!ep->skip_ep_cfg && IPA_CLIENT_IS_PROD(sys_in->client)) {
 		if (ipa3_ctx->modem_cfg_emb_pipe_flt &&
-			sys_in->client == IPA_CLIENT_APPS_WAN_PROD)
+			(sys_in->client == IPA_CLIENT_APPS_WAN_PROD ||
+				sys_in->client ==
+				IPA_CLIENT_APPS_WAN_LOW_LAT_DATA_PROD))
 			IPADBG("modem cfg emb pipe flt\n");
 		else
 			ipa3_install_dflt_flt_rules(ipa_ep_idx);
@@ -1533,6 +1553,11 @@ fail_page_recycle_repl:
 		ep->sys->page_recycle_repl->capacity = 0;
 		kfree(ep->sys->page_recycle_repl);
 	}
+fail_napi_rx:
+	if (sys_in->client == IPA_CLIENT_APPS_WAN_LOW_LAT_DATA_CONS) {
+		napi_disable(&ep->sys->napi_rx);
+		netif_napi_del(&ep->sys->napi_rx);
+	}
 fail_napi:
 	/* Delete NAPI TX object. */
 	if (ipa3_ctx->tx_napi_enable &&
@@ -1616,6 +1641,11 @@ int ipa3_teardown_sys_pipe(u32 clnt_hdl)
 			netif_napi_del(&ep->sys->napi_tx);
 	}
 
+	if(ep->client == IPA_CLIENT_APPS_WAN_LOW_LAT_DATA_CONS) {
+		napi_disable(&ep->sys->napi_rx);
+		netif_napi_del(&ep->sys->napi_rx);
+	}
+
 	/* channel stop might fail on timeout if IPA is busy */
 	for (i = 0; i < IPA_GSI_CHANNEL_STOP_MAX_RETRY; i++) {
 		result = ipa3_stop_gsi_channel(clnt_hdl);
@@ -1716,7 +1746,8 @@ int ipa3_teardown_sys_pipe(u32 clnt_hdl)
 
 	if (!ep->skip_ep_cfg && IPA_CLIENT_IS_PROD(ep->client)) {
 		if (ipa3_ctx->modem_cfg_emb_pipe_flt &&
-			ep->client == IPA_CLIENT_APPS_WAN_PROD)
+			(ep->client == IPA_CLIENT_APPS_WAN_PROD ||
+				ep->client == IPA_CLIENT_APPS_WAN_LOW_LAT_DATA_PROD))
 			IPADBG("modem cfg emb pipe flt\n");
 		else
 			ipa3_delete_dflt_flt_rules(clnt_hdl);
@@ -2131,6 +2162,9 @@ static void ipa3_wq_handle_rx(struct work_struct *work)
 	IPA_ACTIVE_CLIENTS_INC_EP(client_type);
 	if (ipa_net_initialized && sys->napi_obj) {
 		napi_schedule(sys->napi_obj);
+	} else if (ipa_net_initialized &&
+		sys->ep->client == IPA_CLIENT_APPS_WAN_LOW_LAT_DATA_CONS) {
+		napi_schedule(&sys->napi_rx);
 	} else if (IPA_CLIENT_IS_LOW_LAT_CONS(sys->ep->client)) {
 		tasklet_schedule(&sys->tasklet);
 	} else
@@ -2202,6 +2236,8 @@ fail_kmem_cache_alloc:
 			IPA_STATS_INC_CNT(ipa3_ctx->stats.lan_repl_rx_empty);
 		else if (sys->ep->client == IPA_CLIENT_APPS_WAN_LOW_LAT_CONS)
 			IPA_STATS_INC_CNT(ipa3_ctx->stats.low_lat_repl_rx_empty);
+		else if (sys->ep->client == IPA_CLIENT_APPS_WAN_LOW_LAT_DATA_CONS)
+			IPA_STATS_INC_CNT(ipa3_ctx->stats.rmnet_ll_repl_rx_empty);
 		pr_err_ratelimited("%s sys=%pK repl ring empty\n",
 				__func__, sys);
 		goto begin;
@@ -2307,6 +2343,8 @@ fail_kmem_cache_alloc:
 		if (sys->ep->client == IPA_CLIENT_APPS_WAN_CONS ||
 			sys->ep->client == IPA_CLIENT_APPS_WAN_COAL_CONS)
 			IPA_STATS_INC_CNT(ipa3_ctx->stats.wan_repl_rx_empty);
+		if (sys->ep->client == IPA_CLIENT_APPS_WAN_LOW_LAT_DATA_CONS)
+			IPA_STATS_INC_CNT(ipa3_ctx->stats.rmnet_ll_repl_rx_empty);
 		pr_err_ratelimited("%s sys=%pK wq_repl ring empty\n",
 				__func__, sys);
 		goto begin;
@@ -2373,7 +2411,19 @@ static void ipa3_replenish_rx_page_recycle(struct ipa3_sys_context *sys)
 	/* start replenish only when buffers go lower than the threshold */
 	if (sys->rx_pool_sz - sys->len < IPA_REPL_XFER_THRESH)
 		return;
-	stats_i = (sys->ep->client == IPA_CLIENT_APPS_WAN_COAL_CONS) ? 0 : 1;
+	switch (sys->ep->client) {
+		case IPA_CLIENT_APPS_WAN_COAL_CONS:
+			stats_i = 0;
+			break;
+		case IPA_CLIENT_APPS_WAN_CONS:
+			stats_i = 1;
+			break;
+		case IPA_CLIENT_APPS_WAN_LOW_LAT_DATA_CONS:
+			stats_i = 2;
+			break;
+		default:
+			IPAERR_RL("Unexpected client%d\n", sys->ep->client);
+	}
 
 	rx_len_cached = sys->len;
 	curr_wq = atomic_read(&sys->repl->head_idx);
@@ -2452,6 +2502,8 @@ static void ipa3_replenish_rx_page_recycle(struct ipa3_sys_context *sys)
 			IPA_STATS_INC_CNT(ipa3_ctx->stats.wan_rx_empty_coal);
 		else if (sys->ep->client == IPA_CLIENT_APPS_LAN_CONS)
 			IPA_STATS_INC_CNT(ipa3_ctx->stats.lan_rx_empty);
+		else if (sys->ep->client == IPA_CLIENT_APPS_WAN_LOW_LAT_DATA_CONS)
+			IPA_STATS_INC_CNT(ipa3_ctx->stats.rmnet_ll_rx_empty);
 		else
 			WARN_ON(1);
 	}
@@ -2898,6 +2950,8 @@ static void ipa3_fast_replenish_rx_cache(struct ipa3_sys_context *sys)
 			IPA_STATS_INC_CNT(ipa3_ctx->stats.lan_rx_empty);
 		else if (sys->ep->client == IPA_CLIENT_APPS_WAN_LOW_LAT_CONS)
 			IPA_STATS_INC_CNT(ipa3_ctx->stats.low_lat_rx_empty);
+		else if (sys->ep->client == IPA_CLIENT_APPS_WAN_LOW_LAT_DATA_CONS)
+			IPA_STATS_INC_CNT(ipa3_ctx->stats.rmnet_ll_rx_empty);
 		else
 			WARN_ON_RATELIMIT_IPA(1);
 		queue_delayed_work(sys->wq, &sys->replenish_rx_work,
@@ -4137,7 +4191,8 @@ static int ipa3_assign_policy(struct ipa_sys_connect_params *in,
 		return 0;
 	}
 
-	if (in->client == IPA_CLIENT_APPS_WAN_PROD) {
+	if (in->client == IPA_CLIENT_APPS_WAN_PROD ||
+		in->client == IPA_CLIENT_APPS_WAN_LOW_LAT_DATA_PROD) {
 		sys->policy = IPA_POLICY_INTR_MODE;
 		if (ipa3_ctx->ipa_hw_type >= IPA_HW_v5_0)
 			sys->use_comm_evt_ring = false;
@@ -4184,7 +4239,8 @@ static int ipa3_assign_policy(struct ipa_sys_connect_params *in,
 		if (in->client == IPA_CLIENT_APPS_LAN_CONS ||
 		    in->client == IPA_CLIENT_APPS_WAN_CONS ||
 		    in->client == IPA_CLIENT_APPS_WAN_COAL_CONS ||
-		    in->client == IPA_CLIENT_APPS_WAN_LOW_LAT_CONS) {
+		    in->client == IPA_CLIENT_APPS_WAN_LOW_LAT_CONS ||
+		    in->client == IPA_CLIENT_APPS_WAN_LOW_LAT_DATA_CONS) {
 			sys->ep->status.status_en = true;
 			sys->policy = IPA_POLICY_INTR_POLL_MODE;
 			INIT_WORK(&sys->work, ipa3_wq_handle_rx);
@@ -4218,13 +4274,15 @@ static int ipa3_assign_policy(struct ipa_sys_connect_params *in,
 				in->ipa_ep_cfg.aggr.aggr_time_limit =
 					IPA_GENERIC_AGGR_TIME_LIMIT;
 			} else if (in->client == IPA_CLIENT_APPS_WAN_CONS ||
-				in->client == IPA_CLIENT_APPS_WAN_COAL_CONS) {
+				in->client == IPA_CLIENT_APPS_WAN_COAL_CONS ||
+				in->client == IPA_CLIENT_APPS_WAN_LOW_LAT_DATA_CONS) {
 				in->ipa_ep_cfg.aggr.aggr_en = IPA_ENABLE_AGGR;
 				if (!in->ext_ioctl_v2)
 					in->ipa_ep_cfg.aggr.aggr_time_limit =
 						IPA_GENERIC_AGGR_TIME_LIMIT;
-				if (ipa3_ctx->ipa_wan_skb_page
-					&& in->napi_obj) {
+				if ((ipa3_ctx->ipa_wan_skb_page
+					&& in->napi_obj) ||
+					in->client == IPA_CLIENT_APPS_WAN_LOW_LAT_DATA_CONS) {
 					INIT_WORK(&sys->repl_work,
 							ipa3_wq_page_repl);
 					sys->pyld_hdlr = ipa3_wan_rx_pyld_hdlr;
@@ -4852,11 +4910,15 @@ void __ipa_gsi_irq_rx_scedule_poll(struct ipa3_sys_context *sys)
 	 * where we would have ref counts.
 	 */
 	if ((ipa_net_initialized && sys->napi_obj) ||
-		IPA_CLIENT_IS_LOW_LAT_CONS(sys->ep->client))
+		IPA_CLIENT_IS_LOW_LAT_CONS(sys->ep->client) ||
+		(sys->ep->client == IPA_CLIENT_APPS_WAN_LOW_LAT_DATA_CONS))
 		clk_off = IPA_ACTIVE_CLIENTS_INC_EP_NO_BLOCK(client_type);
 	if (!clk_off && ipa_net_initialized && sys->napi_obj) {
 		trace_ipa3_napi_schedule(sys->ep->client);
 		napi_schedule(sys->napi_obj);
+	} else if (!clk_off &&
+		(sys->ep->client == IPA_CLIENT_APPS_WAN_LOW_LAT_DATA_CONS)) {
+		napi_schedule(&sys->napi_rx);
 	} else if (!clk_off &&
 		IPA_CLIENT_IS_LOW_LAT_CONS(sys->ep->client)) {
 		tasklet_schedule(&sys->tasklet);
@@ -5017,6 +5079,8 @@ static int ipa_gsi_setup_channel(struct ipa_sys_connect_params *in,
 		in->client == IPA_CLIENT_APPS_WAN_COAL_CONS ||
 		in->client == IPA_CLIENT_APPS_WAN_LOW_LAT_CONS ||
 		in->client == IPA_CLIENT_APPS_WAN_LOW_LAT_PROD ||
+		in->client == IPA_CLIENT_APPS_WAN_LOW_LAT_DATA_CONS ||
+		in->client == IPA_CLIENT_APPS_WAN_LOW_LAT_DATA_PROD ||
 		in->client == IPA_CLIENT_APPS_WAN_PROD)
 		mem_flag = GFP_ATOMIC;
 
@@ -5098,7 +5162,25 @@ static int ipa_gsi_setup_event_ring(struct ipa3_ep_context *ep,
 	evt_rp_dma_addr = 0;
 	memset(&gsi_evt_ring_props, 0, sizeof(gsi_evt_ring_props));
 	gsi_evt_ring_props.intf = GSI_EVT_CHTYPE_GPI_EV;
-	gsi_evt_ring_props.intr = GSI_INTR_IRQ;
+	if ((ep->client == IPA_CLIENT_APPS_WAN_LOW_LAT_DATA_CONS) &&
+		ipa3_ctx->gsi_rmnet_ll_evt_ring_irq) {
+		gsi_evt_ring_props.intr = GSI_INTR_MSI;
+		gsi_evt_ring_props.msi_addr = ipa3_ctx->gsi_msi_addr;
+		gsi_evt_ring_props.msi_clear_addr = ipa3_ctx->gsi_msi_clear_addr_io_mapped;
+		gsi_evt_ring_props.msi_addr_iore_mapped = ipa3_ctx->gsi_msi_addr_io_mapped;
+		gsi_evt_ring_props.intvec = ipa3_ctx->gsi_rmnet_ll_evt_ring_intvec;
+		gsi_evt_ring_props.msi_irq = ipa3_ctx->gsi_rmnet_ll_evt_ring_irq;
+	} else if ((ep->client == IPA_CLIENT_APPS_WAN_LOW_LAT_CONS) &&
+		ipa3_ctx->gsi_rmnet_ctl_evt_ring_irq) {
+		gsi_evt_ring_props.intr = GSI_INTR_MSI;
+		gsi_evt_ring_props.msi_addr = ipa3_ctx->gsi_msi_addr;
+		gsi_evt_ring_props.msi_clear_addr = ipa3_ctx->gsi_msi_clear_addr_io_mapped;
+		gsi_evt_ring_props.msi_addr_iore_mapped = ipa3_ctx->gsi_msi_addr_io_mapped;
+		gsi_evt_ring_props.intvec = ipa3_ctx->gsi_rmnet_ctl_evt_ring_intvec;
+		gsi_evt_ring_props.msi_irq = ipa3_ctx->gsi_rmnet_ctl_evt_ring_irq;
+	} else {
+		gsi_evt_ring_props.intr = GSI_INTR_IRQ;
+	}
 	gsi_evt_ring_props.re_size = GSI_EVT_RING_RE_SIZE_16B;
 	gsi_evt_ring_props.ring_len = ring_size;
 	gsi_evt_ring_props.ring_base_vaddr =
@@ -5131,7 +5213,9 @@ static int ipa_gsi_setup_event_ring(struct ipa3_ep_context *ep,
 		(ep->client == IPA_CLIENT_APPS_WAN_CONS) ||
 		(ep->client == IPA_CLIENT_APPS_WAN_COAL_CONS) ||
 		(ep->client == IPA_CLIENT_APPS_WAN_LOW_LAT_PROD) ||
-		(ep->client == IPA_CLIENT_APPS_WAN_LOW_LAT_CONS))) {
+		(ep->client == IPA_CLIENT_APPS_WAN_LOW_LAT_CONS) ||
+		(ep->client == IPA_CLIENT_APPS_WAN_LOW_LAT_DATA_PROD) ||
+		(ep->client == IPA_CLIENT_APPS_WAN_LOW_LAT_DATA_CONS))) {
 		gsi_evt_ring_props.int_modt = ep->sys->int_modt;
 		gsi_evt_ring_props.int_modc = ep->sys->int_modc;
 	}
@@ -5207,7 +5291,8 @@ static int ipa_gsi_setup_transfer_ring(struct ipa3_ep_context *ep,
 	if (IPA_CLIENT_IS_PROD(ep->client)) {
 		gsi_channel_props.dir = GSI_CHAN_DIR_TO_GSI;
 		if(ep->client == IPA_CLIENT_APPS_WAN_PROD ||
-		   ep->client == IPA_CLIENT_APPS_LAN_PROD)
+		   ep->client == IPA_CLIENT_APPS_LAN_PROD ||
+		   ep->client == IPA_CLIENT_APPS_WAN_LOW_LAT_DATA_PROD)
 			gsi_channel_props.tx_poll = ipa3_ctx->tx_poll;
 		else
 			gsi_channel_props.tx_poll = false;
@@ -5258,6 +5343,10 @@ static int ipa_gsi_setup_transfer_ring(struct ipa3_ep_context *ep,
 	else
 		gsi_channel_props.low_weight = 1;
 	gsi_channel_props.db_in_bytes = 1;
+	/* Configure Low Latency Mode. */
+	if (ep->client == IPA_CLIENT_APPS_WAN_LOW_LAT_DATA_PROD ||
+		ep->client == IPA_CLIENT_APPS_WAN_LOW_LAT_DATA_CONS)
+		gsi_channel_props.low_latency_en = 1;
 	gsi_channel_props.prefetch_mode = gsi_ep_info->prefetch_mode;
 	gsi_channel_props.empty_lvl_threshold = gsi_ep_info->prefetch_threshold;
 	gsi_channel_props.chan_user_data = user_data;
@@ -5274,7 +5363,8 @@ static int ipa_gsi_setup_transfer_ring(struct ipa3_ep_context *ep,
 
 	/* overwrite the cleanup_cb for page recycling */
 	if (ipa3_ctx->ipa_wan_skb_page &&
-		(IPA_CLIENT_IS_WAN_CONS(ep->client)))
+		(IPA_CLIENT_IS_WAN_CONS(ep->client) ||
+		(ep->client == IPA_CLIENT_APPS_WAN_LOW_LAT_DATA_CONS)))
 		gsi_channel_props.cleanup_cb = free_rx_page;
 
 	result = gsi_alloc_channel(&gsi_channel_props, ipa3_ctx->gsi_dev_hdl,
@@ -5774,3 +5864,67 @@ start_poll:
 	IPA_ACTIVE_CLIENTS_DEC_EP_NO_BLOCK(sys->ep->client);
 }
 
+static int ipa3_rmnet_ll_rx_poll(struct napi_struct *napi_rx, int budget)
+{
+	struct ipa3_sys_context *sys = container_of(napi_rx,
+		struct ipa3_sys_context, napi_rx);
+	int remain_aggr_weight;
+	int ret;
+	int cnt = 0;
+	int num = 0;
+	struct ipa_active_client_logging_info log;
+	static struct gsi_chan_xfer_notify notify[IPA_WAN_NAPI_MAX_FRAMES];
+
+	IPA_ACTIVE_CLIENTS_PREP_SPECIAL(log, "NAPI_LL");
+
+
+	remain_aggr_weight = budget / ipa3_ctx->ipa_wan_aggr_pkt_cnt;
+	if (remain_aggr_weight > IPA_WAN_NAPI_MAX_FRAMES) {
+		IPAERR("NAPI weight is higher than expected\n");
+		IPAERR("expected %d got %d\n",
+			IPA_WAN_NAPI_MAX_FRAMES, remain_aggr_weight);
+		return -EINVAL;
+	}
+
+start_poll:
+	/*
+	 * it is guaranteed we already have clock here.
+	 * This is mainly for clock scaling.
+	 */
+	ipa_pm_activate(sys->pm_hdl);
+	while (remain_aggr_weight > 0 &&
+		atomic_read(&sys->curr_polling_state)) {
+		atomic_set(&ipa3_ctx->transport_pm.eot_activity, 1);
+		ret = ipa_poll_gsi_n_pkt(sys, notify,
+			remain_aggr_weight, &num);
+		if (ret)
+			break;
+		ipa3_rx_napi_chain(sys, notify, num);
+		remain_aggr_weight -= num;
+
+		if (sys->len == 0) {
+			if (remain_aggr_weight == 0)
+				cnt--;
+			break;
+		}
+	}
+	cnt += budget - remain_aggr_weight * ipa3_ctx->ipa_wan_aggr_pkt_cnt;
+	/* call repl_hdlr before napi_reschedule / napi_complete */
+	sys->repl_hdlr(sys);
+	/* When not able to replenish enough descriptors, keep in polling
+	 * mode, wait for napi-poll and replenish again.
+	 */
+	if (cnt < budget && (sys->len > IPA_DEFAULT_SYS_YELLOW_WM)) {
+		napi_complete(napi_rx);
+		ret = ipa3_rx_switch_to_intr_mode(sys);
+		if (ret == -GSI_STATUS_PENDING_IRQ &&
+				napi_reschedule(napi_rx))
+			goto start_poll;
+		IPA_ACTIVE_CLIENTS_DEC_EP_NO_BLOCK(sys->ep->client);
+	} else {
+		cnt = budget;
+		IPADBG_LOW("Client = %d not replenished free descripotrs\n",
+				sys->ep->client);
+	}
+	return cnt;
+}

+ 7 - 0
drivers/platform/msm/ipa/ipa_v3/ipa_flt.c

@@ -488,6 +488,13 @@ static bool ipa_flt_skip_pipe_config(int pipe)
 		return true;
 	}
 
+	if ((ipa3_get_ep_mapping(IPA_CLIENT_APPS_WAN_LOW_LAT_DATA_PROD) == pipe
+		&& ipa3_ctx->modem_cfg_emb_pipe_flt)
+		&& ep->client == IPA_CLIENT_APPS_WAN_LOW_LAT_DATA_PROD) {
+		IPADBG_LOW("skip %d\n", pipe);
+		return true;
+	}
+
 	return false;
 }
 

+ 54 - 0
drivers/platform/msm/ipa/ipa_v3/ipa_hw_stats.c

@@ -162,6 +162,13 @@ int ipa_hw_stats_init(void)
 			teth_stats_init->prod_mask[reg_idx] |= mask;
 		}
 
+		if (ipa3_ctx->ipa_hw_type >= IPA_HW_v5_1) {
+			mask = ipa_hw_stats_get_ep_bit_n_idx(
+				IPA_CLIENT_Q6_DL_NLO_LL_DATA_PROD,
+				&reg_idx);
+			teth_stats_init->prod_mask[reg_idx] |= mask;
+		}
+
 		if (ipa_hw_stats_get_ep_bit_n_idx(
 			IPA_CLIENT_Q6_WAN_PROD,
 			&reg_idx)) {
@@ -255,6 +262,53 @@ int ipa_hw_stats_init(void)
 				&reg_idx);
 			teth_stats_init->dst_ep_mask[ep_index][reg_idx] |= mask;
 		}
+
+		if (ipa_hw_stats_get_ep_bit_n_idx(
+			IPA_CLIENT_Q6_DL_NLO_LL_DATA_PROD,
+			&reg_idx) && (ipa3_ctx->ipa_hw_type >= IPA_HW_v5_0)) {
+			ep_index = ipa3_get_ep_mapping(
+					IPA_CLIENT_Q6_DL_NLO_LL_DATA_PROD);
+			if (ep_index == -1) {
+				IPAERR("Invalid client.\n");
+				ret = -EINVAL;
+				goto fail_free_stats_ctx;
+			}
+			mask = ipa_hw_stats_get_ep_bit_n_idx(
+				IPA_CLIENT_USB_CONS,
+				&reg_idx);
+			teth_stats_init->dst_ep_mask[ep_index][reg_idx] = mask;
+
+			if (ipa3_ctx->ipa_wdi3_over_gsi) {
+				mask = ipa_hw_stats_get_ep_bit_n_idx(
+					IPA_CLIENT_WLAN2_CONS,
+					&reg_idx);
+				teth_stats_init->dst_ep_mask[ep_index][reg_idx]
+					|= mask;
+			} else {
+				mask = ipa_hw_stats_get_ep_bit_n_idx(
+					IPA_CLIENT_WLAN1_CONS,
+					&reg_idx);
+				teth_stats_init->dst_ep_mask[ep_index][reg_idx]
+					|= mask;
+			}
+
+			mask = ipa_hw_stats_get_ep_bit_n_idx(
+				IPA_CLIENT_WIGIG1_CONS,
+				&reg_idx);
+			teth_stats_init->dst_ep_mask[ep_index][reg_idx] |= mask;
+			mask = ipa_hw_stats_get_ep_bit_n_idx(
+				IPA_CLIENT_WIGIG2_CONS,
+				&reg_idx);
+			teth_stats_init->dst_ep_mask[ep_index][reg_idx] |= mask;
+			mask = ipa_hw_stats_get_ep_bit_n_idx(
+				IPA_CLIENT_WIGIG3_CONS,
+				&reg_idx);
+			teth_stats_init->dst_ep_mask[ep_index][reg_idx] |= mask;
+			mask = ipa_hw_stats_get_ep_bit_n_idx(
+				IPA_CLIENT_WIGIG4_CONS,
+				&reg_idx);
+			teth_stats_init->dst_ep_mask[ep_index][reg_idx] |= mask;
+		}
 	}
 
 	if (ipa_hw_stats_get_ep_bit_n_idx(

+ 50 - 3
drivers/platform/msm/ipa/ipa_v3/ipa_i.h

@@ -59,7 +59,7 @@
 #define IPA5_PIPE_REG_NUM 2
 #define IPA5_MAX_NUM_PIPES (IPA5_PIPES_NUM)
 #define IPA_SYS_DESC_FIFO_SZ 0x800
-#define IPA_SYS_TX_DATA_DESC_FIFO_SZ 0x1000
+#define IPA_SYS_TX_DATA_DESC_FIFO_SZ 0x2000
 #define IPA_COMMON_EVENT_RING_SIZE 0x7C00
 #define IPA_LAN_RX_HEADER_LENGTH (2)
 #define IPA_QMAP_HEADER_LENGTH (4)
@@ -107,7 +107,7 @@ enum {
 
 #define NAPI_WEIGHT 64
 
-#define NAPI_TX_WEIGHT 32
+#define NAPI_TX_WEIGHT 64
 
 #define IPA_WAN_AGGR_PKT_CNT 1
 
@@ -1108,6 +1108,7 @@ struct ipa3_page_repl_ctx {
  * @xmit_eot_cnt: count of pending eot for tasklet to process
  * @tasklet: tasklet for eot write_done handle (tx_complete)
  * @napi_tx: napi for eot write done handle (tx_complete) - to replace tasklet
+ * @napi_rx: napi for eot write done handle (rx_complete) - to replace tasklet
  * @in_napi_context: an atomic variable used for non-blocking locking,
  * preventing from multiple napi_sched to be called.
  * @int_modt: GSI event ring interrupt moderation timer
@@ -1151,6 +1152,7 @@ struct ipa3_sys_context {
 	bool skip_eot;
 	u32 eob_drop_cnt;
 	struct napi_struct napi_tx;
+	struct napi_struct napi_rx;
 	bool tx_poll;
 	bool napi_tx_enable;
 	atomic_t in_napi_context;
@@ -1506,6 +1508,8 @@ struct ipa3_stats {
 	u32 wan_rx_empty;
 	u32 wan_rx_empty_coal;
 	u32 wan_repl_rx_empty;
+	u32 rmnet_ll_rx_empty;
+	u32 rmnet_ll_repl_rx_empty;
 	u32 lan_rx_empty;
 	u32 lan_repl_rx_empty;
 	u32 low_lat_rx_empty;
@@ -1514,7 +1518,7 @@ struct ipa3_stats {
 	u32 flow_disable;
 	u32 tx_non_linear;
 	u32 rx_page_drop_cnt;
-	struct ipa3_page_recycle_stats page_recycle_stats[2];
+	struct ipa3_page_recycle_stats page_recycle_stats[3];
 	u64 page_recycle_cnt[2][IPA_PAGE_POLL_THRESHOLD_MAX];
 };
 
@@ -2061,10 +2065,18 @@ struct ipa3_eth_error_stats {
  * @coal_cmd_pyld: holds the coslescing close frame command payload
  * @ipa_gpi_event_rp_ddr: use DDR to access event RP for GPI channels
  * @rmnet_ctl_enable: enable pipe support fow low latency data
+ * @rmnet_ll_enable: enable pipe support fow low latency data
  * @gsi_fw_file_name: GSI IPA fw file name
  * @uc_fw_file_name: uC IPA fw file name
  * @eth_info: ethernet client mapping
  * @max_num_smmu_cb: number of smmu s1 cb supported
+ * @u64 gsi_msi_addr: MSI SPI set address APSS_GICA_SETSPI_NSR
+ * @u64 gsi_msi_clear_addr: MSI SPI clear address APSS_GICA_CLRSPI_NSR
+ * @u64 gsi_msi_ioremapped_addr: iore mapped address for debugging purpose
+ * @u32 gsi_rmnet_ctl_evt_ring_irq: IRQ number for rmnet_ctl pipe
+ * @u32 gsi_rmnet_ll_evt_ring_irq; IRQ number for rmnet_ll pipe
+ * @u32 gsi_rmnet_ctl_evt_ring_intvec: HW IRQ number for rmnet_ctl pipe
+ * @u32 gsi_rmnet_ll_evt_ring_intvec; HW IRQ number for rmnet_ll pipe
  * @non_hash_flt_lcl_sys_switch: number of times non-hash flt table moved
  */
 struct ipa3_context {
@@ -2257,6 +2269,7 @@ struct ipa3_context {
 	bool clients_registered;
 	bool ipa_gpi_event_rp_ddr;
 	bool rmnet_ctl_enable;
+	bool rmnet_ll_enable;
 	char *gsi_fw_file_name;
 	char *uc_fw_file_name;
 	struct ipa3_eth_info
@@ -2281,6 +2294,14 @@ struct ipa3_context {
 	u8 page_poll_threshold;
 	u32 non_hash_flt_lcl_sys_switch;
 	bool wan_common_page_pool;
+	u64 gsi_msi_addr;
+	u64 gsi_msi_clear_addr;
+	u64 gsi_msi_addr_io_mapped;
+	u64 gsi_msi_clear_addr_io_mapped;
+	u32 gsi_rmnet_ctl_evt_ring_intvec;
+	u32 gsi_rmnet_ctl_evt_ring_irq;
+	u32 gsi_rmnet_ll_evt_ring_intvec;
+	u32 gsi_rmnet_ll_evt_ring_irq;
 };
 
 struct ipa3_plat_drv_res {
@@ -2340,6 +2361,7 @@ struct ipa3_plat_drv_res {
 	u32 icc_clk_val[IPA_ICC_LVL_MAX][IPA_ICC_MAX];
 	bool ipa_gpi_event_rp_ddr;
 	bool rmnet_ctl_enable;
+	bool rmnet_ll_enable;
 	bool ipa_use_uc_holb_monitor;
 	u32 ipa_holb_monitor_poll_period;
 	u32 ipa_holb_monitor_max_cnt_wlan;
@@ -2358,6 +2380,12 @@ struct ipa3_plat_drv_res {
 	u16 ulso_ip_id_min;
 	u16 ulso_ip_id_max;
 	bool use_pm_wrapper;
+	u64 gsi_msi_addr;
+	u64 gsi_msi_clear_addr;
+	u32 gsi_rmnet_ctl_evt_ring_intvec;
+	u32 gsi_rmnet_ctl_evt_ring_irq;
+	u32 gsi_rmnet_ll_evt_ring_intvec;
+	u32 gsi_rmnet_ll_evt_ring_irq;
 };
 
 /**
@@ -3281,6 +3309,24 @@ int ipa3_setup_apps_low_lat_prod_pipe(bool rmnet_config,
 int ipa3_setup_apps_low_lat_cons_pipe(bool rmnet_config,
 	struct rmnet_ingress_param *ingress_param);
 int ipa3_teardown_apps_low_lat_pipes(void);
+int ipa3_rmnet_ll_init(void);
+int ipa3_register_rmnet_ll_cb(
+	void (*ipa_rmnet_ll_ready_cb)(void *user_data1),
+	void *user_data1,
+	void (*ipa_rmnet_ll_stop_cb)(void *user_data2),
+	void *user_data2,
+	void (*ipa_rmnet_ll_rx_notify_cb)(
+	void *user_data3, void *rx_data),
+	void *user_data3);
+int ipa3_unregister_rmnet_ll_cb(void);
+int ipa3_rmnet_ll_xmit(struct sk_buff *skb);
+int ipa3_setup_apps_low_lat_data_prod_pipe(
+	struct rmnet_egress_param *egress_param,
+	struct net_device *dev);
+int ipa3_setup_apps_low_lat_data_cons_pipe(
+	struct rmnet_ingress_param *ingress_param,
+	struct net_device *dev);
+int ipa3_teardown_apps_low_lat_data_pipes(void);
 const char *ipa_hw_error_str(enum ipa3_hw_errors err_type);
 int ipa_gsi_ch20_wa(void);
 int ipa3_lan_rx_poll(u32 clnt_hdl, int weight);
@@ -3312,6 +3358,7 @@ irq_handler_t ipa3_get_isr(void);
 void ipa_pc_qmp_enable(void);
 u32 ipa3_get_r_rev_version(void);
 void ipa3_notify_clients_registered(void);
+void ipa_gsi_map_unmap_gsi_msi_addr(bool map);
 #if defined(CONFIG_IPA3_REGDUMP)
 int ipa_reg_save_init(u32 value);
 void ipa_save_registers(void);

+ 44 - 2
drivers/platform/msm/ipa/ipa_v3/ipa_utils.c

@@ -4597,6 +4597,14 @@ static const struct ipa_ep_configuration ipa3_ep_mapping
 			{ 4, 9, 16, 24, IPA_EE_AP, GSI_SMART_PRE_FETCH, 3	},
 			IPA_TX_INSTANCE_NA },
 
+	[IPA_5_1][IPA_CLIENT_APPS_WAN_LOW_LAT_DATA_PROD] = {
+			true, IPA_v5_0_GROUP_URLLC,
+			true,
+			IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP,
+			QMB_MASTER_SELECT_DDR,
+			{ 10, 5, 10, 16, IPA_EE_AP, GSI_SMART_PRE_FETCH, 3	},
+			IPA_TX_INSTANCE_NA },
+
 	[IPA_5_1][IPA_CLIENT_Q6_WAN_PROD]         = {
 			true, IPA_v5_0_GROUP_DL,
 			true,
@@ -4621,6 +4629,14 @@ static const struct ipa_ep_configuration ipa3_ep_mapping
 			{ 15, 2, 28, 32, IPA_EE_Q6, GSI_FREE_PRE_FETCH, 3 },
 			IPA_TX_INSTANCE_NA },
 
+	[IPA_5_1][IPA_CLIENT_Q6_DL_NLO_LL_DATA_PROD] = {
+			true, IPA_v5_0_GROUP_URLLC,
+			true,
+			IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_DEC_UCP,
+			QMB_MASTER_SELECT_DDR,
+			{ 5, 8, 28, 32, IPA_EE_Q6, GSI_SMART_PRE_FETCH, 3 },
+			IPA_TX_INSTANCE_UL },
+
 	[IPA_5_1][IPA_CLIENT_APPS_LAN_CONS] = {
 			true, IPA_v5_0_GROUP_UL,
 			false,
@@ -4725,6 +4741,14 @@ static const struct ipa_ep_configuration ipa3_ep_mapping
 			{ 32, 10, 9, 9, IPA_EE_AP, GSI_SMART_PRE_FETCH, 3	},
 			IPA_TX_INSTANCE_DL },
 
+	[IPA_5_1][IPA_CLIENT_APPS_WAN_LOW_LAT_DATA_CONS] = {
+			true, IPA_v5_0_GROUP_DL,
+			false,
+			IPA_DPS_HPS_SEQ_TYPE_INVALID,
+			QMB_MASTER_SELECT_DDR,
+			{ 33, 6, 9, 9, IPA_EE_AP, GSI_SMART_PRE_FETCH, 3	},
+			IPA_TX_INSTANCE_DL },
+
 	[IPA_5_1][IPA_CLIENT_Q6_LAN_CONS]         = {
 			true, IPA_v5_0_GROUP_DL,
 			false,
@@ -5909,6 +5933,9 @@ const char *ipa_clients_strings[IPA_CLIENT_MAX] = {
 	__stringify(IPA_CLIENT_ETHERNET2_CONS),
 	__stringify(RESERVERD_PROD_118),
 	__stringify(IPA_CLIENT_WLAN2_CONS1),
+	__stringify(IPA_CLIENT_APPS_WAN_LOW_LAT_DATA_PROD),
+	__stringify(IPA_CLIENT_APPS_WAN_LOW_LAT_DATA_CONS),
+	__stringify(IPA_CLIENT_Q6_DL_NLO_LL_DATA_PROD),
 };
 EXPORT_SYMBOL(ipa_clients_strings);
 
@@ -10907,6 +10934,11 @@ int ipa3_suspend_apps_pipes(bool suspend)
 	if (res == -EAGAIN)
 		goto undo_qmap_cons;
 
+	res = _ipa_suspend_resume_pipe(IPA_CLIENT_APPS_WAN_LOW_LAT_DATA_CONS,
+		suspend);
+	if (res == -EAGAIN)
+		goto undo_low_lat_data_cons;
+
 	if (suspend) {
 		struct ipahal_reg_tx_wrapper tx;
 		int ep_idx;
@@ -10921,7 +10953,7 @@ int ipa3_suspend_apps_pipes(bool suspend)
 			IPADBG("COAL frame is open 0x%x\n",
 				tx.coal_slave_open_frame);
 			res = -EAGAIN;
-			goto undo_qmap_cons;
+			goto undo_low_lat_data_cons;
 		}
 
 		usleep_range(IPA_TAG_SLEEP_MIN_USEC, IPA_TAG_SLEEP_MAX_USEC);
@@ -10934,7 +10966,7 @@ int ipa3_suspend_apps_pipes(bool suspend)
 				if (res) {
 					IPADBG("suspend irq is pending 0x%x\n",
 						res);
-					goto undo_qmap_cons;
+					goto undo_low_lat_data_cons;
 				}
 			}
 		} else {
@@ -10954,6 +10986,10 @@ do_prod:
 		suspend);
 	if (res == -EAGAIN)
 		goto undo_qmap_prod;
+	res = _ipa_suspend_resume_pipe(IPA_CLIENT_APPS_WAN_LOW_LAT_DATA_PROD,
+		suspend);
+	if (res == -EAGAIN)
+		goto undo_low_lat_data_prod;
 	res = _ipa_suspend_resume_pipe(IPA_CLIENT_APPS_WAN_PROD, suspend);
 	if (res == -EAGAIN)
 		goto undo_wan_prod;
@@ -10961,11 +10997,17 @@ do_prod:
 
 undo_wan_prod:
 	_ipa_suspend_resume_pipe(IPA_CLIENT_APPS_WAN_PROD, !suspend);
+undo_low_lat_data_prod:
+	_ipa_suspend_resume_pipe(IPA_CLIENT_APPS_WAN_LOW_LAT_DATA_PROD,
+		!suspend);
 undo_qmap_prod:
 	_ipa_suspend_resume_pipe(IPA_CLIENT_APPS_WAN_LOW_LAT_PROD,
 		!suspend);
 undo_lan_prod:
 	_ipa_suspend_resume_pipe(IPA_CLIENT_APPS_LAN_PROD, !suspend);
+undo_low_lat_data_cons:
+	_ipa_suspend_resume_pipe(IPA_CLIENT_APPS_WAN_LOW_LAT_DATA_CONS,
+		!suspend);
 undo_qmap_cons:
 	_ipa_suspend_resume_pipe(IPA_CLIENT_APPS_WAN_LOW_LAT_CONS,
 		!suspend);

+ 356 - 6
drivers/platform/msm/ipa/ipa_v3/rmnet_ipa.c

@@ -59,6 +59,7 @@ enum ipa_ap_ingress_ep_enum {
 	IPA_AP_INGRESS_EP_DEFAULT = 1 << 0,
 	IPA_AP_INGRESS_EP_COALS = 1 << 1,
 	IPA_AP_INGRESS_EP_LOW_LAT = 1 << 2,
+	IPA_AP_INGRESS_EP_LOW_LAT_DATA = 1 << 3,
 };
 
 #define IPA_WWAN_RX_SOFTIRQ_THRESH 16
@@ -163,6 +164,7 @@ struct rmnet_ipa3_context {
 	u32 qmap_hdr_hdl;
 	/* For both IPv4 and IPv6, one rule for ICMP and one for the rest */
 	u32 dflt_wan_rt_hdl[IPA_IP_MAX][WAN_RT_RULES_TOTAL];
+	u32 low_lat_rt_hdl[IPA_IP_MAX][WAN_RT_RULES_TOTAL];
 	struct ipa3_rmnet_mux_val mux_channel[MAX_NUM_OF_MUX_CHANNEL];
 	int num_q6_rules;
 	int old_num_q6_rules;
@@ -530,6 +532,111 @@ free_rule:
 	return ret;
 }
 
+/**
+ * ipa3_setup_low_lat_rt_rules() - Setup default wan routing tables
+ *
+ * Return codes:
+ * 0: success
+ * -ENOMEM: failed to allocate memory
+ * -EPERM: failed to add the tables
+ */
+static int ipa3_setup_low_lat_rt_rules(void)
+{
+	int ret = 0;
+	struct ipa_ioc_add_rt_rule_ext_v2 *rt_rule;
+	struct ipa_rt_rule_add_ext_v2 *rt_rule_entry;
+
+	rt_rule = kzalloc(sizeof(struct ipa_ioc_add_rt_rule_ext_v2),
+		GFP_KERNEL);
+	if (!rt_rule)
+		return -ENOMEM;
+	rt_rule->num_rules = 2;
+	rt_rule->rules = (uint64_t)kzalloc(
+		rt_rule->num_rules * sizeof(struct ipa_rt_rule_add_ext_v2),
+		GFP_KERNEL);
+	if (!(struct ipa_rt_rule_add_ext_v2 *)(rt_rule->rules)) {
+		ret = -ENOMEM;
+		goto free_rule;
+	}
+
+	/* setup a low lat v4 route to point to Apps */
+	rt_rule->commit = 1;
+	rt_rule->rule_add_ext_size = sizeof(struct ipa_rt_rule_add_ext_v2);
+	rt_rule->ip = IPA_IP_v4;
+	strlcpy(rt_rule->rt_tbl_name, IPA_DFLT_WAN_RT_TBL_NAME,
+			IPA_RESOURCE_NAME_MAX);
+
+	rt_rule_entry = (struct ipa_rt_rule_add_ext_v2 *)rt_rule->rules;
+	rt_rule_entry[WAN_RT_COMMON].at_rear = 0;
+	rt_rule_entry[WAN_RT_COMMON].rule.dst =
+		IPA_CLIENT_APPS_WAN_LOW_LAT_DATA_CONS;
+	rt_rule_entry[WAN_RT_COMMON].rule.hdr_hdl =
+		rmnet_ipa3_ctx->qmap_hdr_hdl;
+	rt_rule_entry[WAN_RT_COMMON].rule.attrib.attrib_mask =
+		IPA_FLT_META_DATA;
+	/* Low lat routing is based on metadata */
+	rt_rule_entry[WAN_RT_COMMON].rule.attrib.meta_data =
+		ipa3_get_ep_mapping(IPA_CLIENT_APPS_WAN_LOW_LAT_DATA_CONS);
+	rt_rule_entry[WAN_RT_COMMON].rule.attrib.meta_data_mask =
+		0xFF;
+
+	rt_rule_entry[WAN_RT_ICMP].at_rear = 0;
+	rt_rule_entry[WAN_RT_ICMP].rule.dst =
+		IPA_CLIENT_APPS_WAN_LOW_LAT_DATA_CONS;
+	rt_rule_entry[WAN_RT_ICMP].rule.hdr_hdl =
+		rmnet_ipa3_ctx->qmap_hdr_hdl;
+	rt_rule_entry[WAN_RT_ICMP].rule.attrib.attrib_mask =
+		IPA_FLT_META_DATA;
+	rt_rule_entry[WAN_RT_ICMP].rule.attrib.meta_data =
+		ipa3_get_ep_mapping(IPA_CLIENT_APPS_WAN_LOW_LAT_DATA_CONS);
+	rt_rule_entry[WAN_RT_ICMP].rule.attrib.meta_data_mask =
+		0xFF;
+	rt_rule_entry[WAN_RT_ICMP].rule.attrib.attrib_mask |=
+			IPA_FLT_PROTOCOL;
+	rt_rule_entry[WAN_RT_ICMP].rule.attrib.u.v4.protocol =
+		(uint8_t)IPPROTO_ICMP;
+
+	if (ipa3_add_rt_rule_ext_v2(rt_rule)) {
+		IPAWANERR("fail to add low lat v4 rule\n");
+		ret = -EPERM;
+		goto free_rule_entry;
+	}
+	IPAWANDBG("low lat v4 rt rule hdl[WAN_RT_COMMON]=%x\n",
+		rt_rule_entry[WAN_RT_COMMON].rt_rule_hdl);
+	rmnet_ipa3_ctx->low_lat_rt_hdl[IPA_IP_v4][WAN_RT_COMMON] =
+		rt_rule_entry[WAN_RT_COMMON].rt_rule_hdl;
+	IPAWANDBG("low lat v4 rt rule hdl[WAN_RT_ICMP]=%x\n",
+		rt_rule_entry[WAN_RT_ICMP].rt_rule_hdl);
+	rmnet_ipa3_ctx->low_lat_rt_hdl[IPA_IP_v4][WAN_RT_ICMP] =
+		rt_rule_entry[WAN_RT_ICMP].rt_rule_hdl;
+
+	/* setup low lat v6 route to point to A5 */
+	rt_rule->ip = IPA_IP_v6;
+	rt_rule_entry[WAN_RT_ICMP].rule.attrib.attrib_mask =
+		IPA_FLT_META_DATA | IPA_FLT_NEXT_HDR;
+	rt_rule_entry[WAN_RT_ICMP].rule.attrib.u.v6.next_hdr =
+		(uint8_t)IPPROTO_ICMP;
+	if (ipa3_add_rt_rule_ext_v2(rt_rule)) {
+		IPAWANERR("fail to add low lat v6 rule\n");
+		ret = -EPERM;
+		goto free_rule_entry;
+	}
+	IPAWANDBG("low lat v6 rt rule hdl[WAN_RT_COMMON]=%x\n",
+		rt_rule_entry[WAN_RT_COMMON].rt_rule_hdl);
+	rmnet_ipa3_ctx->low_lat_rt_hdl[IPA_IP_v6][WAN_RT_COMMON] =
+		rt_rule_entry[WAN_RT_COMMON].rt_rule_hdl;
+	IPAWANDBG("low lat v6 rt rule hdl[WAN_RT_ICMP]=%x\n",
+		rt_rule_entry[WAN_RT_ICMP].rt_rule_hdl);
+	rmnet_ipa3_ctx->low_lat_rt_hdl[IPA_IP_v6][WAN_RT_ICMP] =
+		rt_rule_entry[WAN_RT_ICMP].rt_rule_hdl;
+
+free_rule_entry:
+	kfree((void *)(rt_rule->rules));
+free_rule:
+	kfree(rt_rule);
+	return ret;
+}
+
 static void ipa3_del_dflt_wan_rt_tables(void)
 {
 	struct ipa_ioc_del_rt_rule *rt_rule;
@@ -569,6 +676,44 @@ static void ipa3_del_dflt_wan_rt_tables(void)
 	kfree(rt_rule);
 }
 
+static void ipa3_del_low_lat_rt_rule(void)
+{
+	struct ipa_ioc_del_rt_rule *rt_rule;
+	struct ipa_rt_rule_del *rt_rule_entry;
+	int i, len, num_of_rules_per_ip_type;
+	enum ipa_ip_type ip_type;
+
+	num_of_rules_per_ip_type = 2;
+
+	len = sizeof(struct ipa_ioc_del_rt_rule) + 1 *
+			   sizeof(struct ipa_rt_rule_del);
+	rt_rule = kzalloc(len, GFP_KERNEL);
+	if (!rt_rule)
+		return;
+
+	rt_rule->commit = 1;
+	rt_rule->num_hdls = 1;
+
+	rt_rule_entry = &rt_rule->hdl[0];
+	rt_rule_entry->status = -1;
+
+	for (ip_type = IPA_IP_v4; ip_type <= IPA_IP_v6; ip_type++) {
+		for (i = WAN_RT_COMMON; i < num_of_rules_per_ip_type; i++) {
+			rt_rule->ip = ip_type;
+			rt_rule_entry->hdl =
+				rmnet_ipa3_ctx->low_lat_rt_hdl[ip_type][i];
+			IPAWANERR("Deleting Route hdl:(0x%x) with ip type: %d\n",
+				rt_rule_entry->hdl, ip_type);
+			if (ipa3_del_rt_rule(rt_rule) ||
+					(rt_rule_entry->status)) {
+				IPAWANERR("Routing rule deletion failed\n");
+			}
+		}
+	}
+
+	kfree(rt_rule);
+}
+
 static void ipa3_copy_qmi_flt_rule_ex(
 	struct ipa_ioc_ext_intf_prop *q6_ul_flt_rule_ptr,
 	void *flt_spec_ptr_void)
@@ -2123,8 +2268,35 @@ static int handle3_ingress_format_v2(struct net_device *dev,
 
 		} else if (ingress_param[i].ingress_ep_type ==
 			RMNET_INGRESS_LOW_LAT_DATA) {
-			IPAWANERR("Ingress Low lat data pipe is not defined\n");
-			continue;
+			/* Searching through the static table, if pipe exists already */
+			for (j = 0; j < RMNET_INGRESS_MAX; j++) {
+				if (ingress_pipe_status[j].ep_type ==
+					RMNET_INGRESS_LOW_LAT_DATA &&
+					ingress_pipe_status[j].status == IPA_PIPE_SETUP_EXISTS) {
+					ingress_param[i].pipe_setup_status
+						= IPA_PIPE_SETUP_EXISTS;
+					IPAWANERR("Receiving ingress low lat data ioctl again");
+					break;
+				}
+			}
+
+			if (ipa3_ctx->rmnet_ll_enable &&
+				(ingress_param[i].pipe_setup_status == IPA_PIPE_SETUP_EXISTS))
+				continue;
+
+			ingress_pipe_status[i].ep_type = RMNET_INGRESS_LOW_LAT_DATA;
+			rc = ipa3_setup_apps_low_lat_data_cons_pipe(
+					&ingress_param[i], dev);
+			if (rc) {
+				IPAWANERR("failed to setup ingress low lat data endpoint\n");
+				ingress_pipe_status[i].status = IPA_PIPE_SETUP_FAILURE;
+				continue;
+			}
+			rmnet_ipa3_ctx->ingress_eps_mask |= IPA_AP_INGRESS_EP_LOW_LAT_DATA;
+			IPAWANDBG("Ingress LOW LAT DATA pipe setup successfully\n");
+			ingress_param[i].pipe_setup_status = IPA_PIPE_SETUP_SUCCESS;
+			/* caching the success status of the pipe */
+			ingress_pipe_status[i].status = IPA_PIPE_SETUP_EXISTS;
 		} else {
 			IPAWANERR("Ingress ep_type not defined\n");
 		}
@@ -2154,6 +2326,12 @@ static int handle3_ingress_format_v2(struct net_device *dev,
 			return -EFAULT;
 		}
 
+		if(ipa3_ctx->rmnet_ll_enable) {
+			rc = ipa3_setup_low_lat_rt_rules();
+			if (rc)
+				IPAWANERR("low lat rt rule add failed = %d\n", rc);
+		}
+
 		rc = ipa3_setup_dflt_wan_rt_tables();
 		if (rc) {
 			ipa3_del_a7_qmap_hdr();
@@ -2389,8 +2567,34 @@ static int handle3_egress_format_v2(struct net_device *dev,
 
 		} else if (egress_param[i].egress_ep_type ==
 			RMNET_EGRESS_LOW_LAT_DATA) {
-			IPAWANERR("Egress Low lat data pipe is not defined yet\n");
-			continue;
+			/* Searching through the static table, if pipe exists already */
+			for (j = 0; j < RMNET_EGRESS_MAX; j++) {
+				if (egress_pipe_status[j].ep_type ==
+					RMNET_EGRESS_LOW_LAT_DATA &&
+					egress_pipe_status[j].status == IPA_PIPE_SETUP_EXISTS) {
+					egress_param[i].pipe_setup_status = IPA_PIPE_SETUP_EXISTS;
+					IPAWANERR("Receiving egress low lat data ioctl again");
+					break;
+				}
+			}
+
+			if (ipa3_ctx->rmnet_ll_enable &&
+				(egress_param[i].pipe_setup_status == IPA_PIPE_SETUP_EXISTS))
+				continue;
+
+			egress_pipe_status[i].ep_type = RMNET_EGRESS_LOW_LAT_DATA;
+
+			rc = ipa3_setup_apps_low_lat_data_prod_pipe(
+					&egress_param[i], dev);
+			if (rc) {
+				IPAWANERR("failed to setup egress low lat data endpoint\n");
+				egress_pipe_status[i].status = IPA_PIPE_SETUP_FAILURE;
+				continue;
+			}
+			IPAWANDBG("Egress LOW LAT DATA pipe setup successfully\n");
+			egress_param[i].pipe_setup_status = IPA_PIPE_SETUP_SUCCESS;
+			/* caching the success status of the pipe */
+			egress_pipe_status[i].status = IPA_PIPE_SETUP_EXISTS;
 		} else {
 			IPAWANERR("Egress ep type not defined");
 		}
@@ -2427,7 +2631,7 @@ static int handle3_egress_format_v2(struct net_device *dev,
 static int ipa3_wwan_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
 {
 	int rc = 0;
-	int mru = 1000, epid = 1, mux_index, len;
+	int mru = 1000, epid = 1, mux_index, len, epid_ll = 5;
 	struct ipa_msg_meta msg_meta;
 	struct ipa_wan_msg *wan_msg = NULL;
 	struct rmnet_ioctl_extended_s ext_ioctl_data;
@@ -2438,7 +2642,7 @@ static int ipa3_wwan_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
 	uint32_t  mux_id;
 	int8_t *v_name;
 	struct mutex *mux_mutex_ptr;
-	int wan_ep;
+	int wan_ep, rmnet_ll_ep;
 	bool tcp_en = false, udp_en = false;
 	bool mtu_v4_set = false, mtu_v6_set = false;
 	enum ipa_ip_type iptype;
@@ -2599,6 +2803,61 @@ static int ipa3_wwan_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
 			ext_ioctl_data.u.ipa_ep_pair.consumer_pipe_num,
 			ext_ioctl_data.u.ipa_ep_pair.producer_pipe_num);
 			break;
+		/*  Get endpoint ID for LL */
+		case RMNET_IOCTL_GET_EPID_LL:
+			IPAWANDBG("get ioctl: RMNET_IOCTL_GET_EPID_LL\n");
+			ext_ioctl_data.u.data = epid_ll;
+			if (copy_to_user((u8 *)ifr->ifr_ifru.ifru_data,
+				&ext_ioctl_data,
+				sizeof(struct rmnet_ioctl_extended_s)))
+				rc = -EFAULT;
+			if (copy_from_user(&ext_ioctl_data,
+				(u8 *)ifr->ifr_ifru.ifru_data,
+				sizeof(struct rmnet_ioctl_extended_s))) {
+				IPAWANERR("copy extended ioctl data failed\n");
+				rc = -EFAULT;
+			break;
+			}
+			IPAWANDBG("RMNET_IOCTL_GET_EPID_LL return %d\n",
+					ext_ioctl_data.u.data);
+			break;
+		/*  Endpoint pair  */
+		case RMNET_IOCTL_GET_EP_PAIR_LL:
+			IPAWANDBG("get ioctl: RMNET_IOCTL_GET_EP_PAIR_LL\n");
+			rmnet_ll_ep =
+				ipa_get_ep_mapping(IPA_CLIENT_APPS_WAN_LOW_LAT_DATA_CONS);
+			if (rmnet_ll_ep == IPA_EP_NOT_ALLOCATED) {
+				IPAWANERR("Embedded datapath not supported\n");
+				rc = -EFAULT;
+				break;
+			}
+			ext_ioctl_data.u.ipa_ep_pair.producer_pipe_num =
+				rmnet_ll_ep;
+
+			rmnet_ll_ep =
+				ipa_get_ep_mapping(IPA_CLIENT_APPS_WAN_LOW_LAT_DATA_PROD);
+			if (rmnet_ll_ep == IPA_EP_NOT_ALLOCATED) {
+				IPAWANERR("Embedded datapath not supported\n");
+				rc = -EFAULT;
+				break;
+			}
+			ext_ioctl_data.u.ipa_ep_pair.consumer_pipe_num =
+				rmnet_ll_ep;
+			if (copy_to_user((u8 *)ifr->ifr_ifru.ifru_data,
+				&ext_ioctl_data,
+				sizeof(struct rmnet_ioctl_extended_s)))
+				rc = -EFAULT;
+			if (copy_from_user(&ext_ioctl_data,
+				(u8 *)ifr->ifr_ifru.ifru_data,
+				sizeof(struct rmnet_ioctl_extended_s))) {
+				IPAWANERR("copy extended ioctl data failed\n");
+				rc = -EFAULT;
+				break;
+			}
+			IPAWANDBG("RMNET_IOCTL_GET_EP_PAIR_LL c: %d p: %d\n",
+			ext_ioctl_data.u.ipa_ep_pair.consumer_pipe_num,
+			ext_ioctl_data.u.ipa_ep_pair.producer_pipe_num);
+			break;
 		/*  Get driver name  */
 		case RMNET_IOCTL_GET_DRIVER_NAME:
 			if (IPA_NETDEV() != NULL) {
@@ -3472,6 +3731,11 @@ static int ipa3_wwan_remove(struct platform_device *pdev)
 		if (ret < 0)
 			IPAWANERR("Failed to teardown IPA->APPS qmap pipe\n");
 	}
+	if (ipa3_ctx->rmnet_ll_enable) {
+		ret = ipa3_teardown_apps_low_lat_data_pipes();
+		if (ret < 0)
+			IPAWANERR("Failed to teardown IPA->APPS LL pipe\n");
+	}
 	ret = ipa3_teardown_sys_pipe(rmnet_ipa3_ctx->ipa3_to_apps_hdl);
 	if (ret < 0)
 		IPAWANERR("Failed to teardown IPA->APPS pipe\n");
@@ -3505,6 +3769,10 @@ static int ipa3_wwan_remove(struct platform_device *pdev)
 	/* No need to remove wwan_ioctl during SSR */
 	if (!atomic_read(&rmnet_ipa3_ctx->is_ssr))
 		ipa3_wan_ioctl_deinit();
+	if (ipa3_ctx->rmnet_ll_enable &&
+		(ipa_get_ep_mapping(IPA_CLIENT_APPS_WAN_LOW_LAT_DATA_CONS) !=
+		IPA_EP_NOT_ALLOCATED))
+		ipa3_del_low_lat_rt_rule();
 	if (ipa_get_ep_mapping(IPA_CLIENT_APPS_WAN_CONS) !=
 		IPA_EP_NOT_ALLOCATED) {
 		ipa3_del_dflt_wan_rt_tables();
@@ -4707,6 +4975,88 @@ static int rmnet_ipa3_query_tethering_stats_hw(
 		(unsigned long) data->ipv4_rx_bytes,
 		(unsigned long) data->ipv6_rx_bytes);
 
+	if(ipa3_ctx->ipa_hw_type < IPA_HW_v5_0)
+		goto skip_nlo_stats;
+
+	memset(con_stats, 0, sizeof(struct ipa_quota_stats_all));
+	rc = ipa_query_teth_stats(IPA_CLIENT_Q6_DL_NLO_LL_DATA_PROD,
+				con_stats, reset);
+	if (rc) {
+		IPAERR("IPA_CLIENT_Q6_DL_NLO_DATA_PROD query failed %d,\n", rc);
+		kfree(con_stats);
+		return rc;
+	}
+
+	if (ipa3_ctx->ipa_wdi3_over_gsi)
+		wlan_client = IPA_CLIENT_WLAN2_CONS;
+	else
+		wlan_client = IPA_CLIENT_WLAN1_CONS;
+
+	IPAWANDBG("wlan: v4_rx_p-b(%d,%lld) v6_rx_p-b(%d,%lld),client(%d)\n",
+		con_stats->client[wlan_client].num_ipv4_pkts,
+		con_stats->client[wlan_client].num_ipv4_bytes,
+		con_stats->client[wlan_client].num_ipv6_pkts,
+		con_stats->client[wlan_client].num_ipv6_bytes,
+		wlan_client);
+
+	IPAWANDBG("usb: v4_rx_p(%d) b(%lld) v6_rx_p(%d) b(%lld)\n",
+		con_stats->client[IPA_CLIENT_USB_CONS].num_ipv4_pkts,
+		con_stats->client[IPA_CLIENT_USB_CONS].num_ipv4_bytes,
+		con_stats->client[IPA_CLIENT_USB_CONS].num_ipv6_pkts,
+		con_stats->client[IPA_CLIENT_USB_CONS].num_ipv6_bytes);
+
+	for (i = 0; i < MAX_WIGIG_CLIENTS; i++) {
+		enum ipa_client_type wigig_client =
+			rmnet_ipa3_get_wigig_cons(i);
+
+		if (wigig_client > IPA_CLIENT_WIGIG4_CONS)
+			break;
+
+		IPAWANDBG("wigig%d: v4_rx_p(%d) b(%lld) v6_rx_p(%d) b(%lld)\n",
+			i + 1,
+			con_stats->client[wigig_client].num_ipv4_pkts,
+			con_stats->client[wigig_client].num_ipv4_bytes,
+			con_stats->client[wigig_client].num_ipv6_pkts,
+			con_stats->client[wigig_client].num_ipv6_bytes);
+	}
+
+	/* update the DL stats */
+	data->ipv4_rx_packets +=
+		con_stats->client[wlan_client].num_ipv4_pkts +
+			con_stats->client[IPA_CLIENT_USB_CONS].num_ipv4_pkts;
+	data->ipv6_rx_packets +=
+		con_stats->client[wlan_client].num_ipv6_pkts +
+			con_stats->client[IPA_CLIENT_USB_CONS].num_ipv6_pkts;
+	data->ipv4_rx_bytes +=
+		con_stats->client[wlan_client].num_ipv4_bytes +
+			con_stats->client[IPA_CLIENT_USB_CONS].num_ipv4_bytes;
+	data->ipv6_rx_bytes +=
+		con_stats->client[wlan_client].num_ipv6_bytes +
+		con_stats->client[IPA_CLIENT_USB_CONS].num_ipv6_bytes;
+
+	for (i = 0; i < MAX_WIGIG_CLIENTS; i++) {
+		enum ipa_client_type wigig_client =
+			rmnet_ipa3_get_wigig_cons(i);
+
+		if (wigig_client > IPA_CLIENT_WIGIG4_CONS)
+			break;
+
+		data->ipv4_rx_packets +=
+			con_stats->client[wigig_client].num_ipv4_pkts;
+		data->ipv6_rx_packets +=
+			con_stats->client[wigig_client].num_ipv6_pkts;
+		data->ipv4_rx_bytes +=
+			con_stats->client[wigig_client].num_ipv4_bytes;
+		data->ipv6_rx_bytes +=
+			con_stats->client[wigig_client].num_ipv6_bytes;
+	}
+
+	IPAWANDBG("v4_rx_p(%lu) v6_rx_p(%lu) v4_rx_b(%lu) v6_rx_b(%lu)\n",
+		(unsigned long) data->ipv4_rx_packets,
+		(unsigned long) data->ipv6_rx_packets,
+		(unsigned long) data->ipv4_rx_bytes,
+		(unsigned long) data->ipv6_rx_bytes);
+
 skip_nlo_stats:
 	/* query USB UL stats */
 	memset(con_stats, 0, sizeof(struct ipa_quota_stats_all));

+ 869 - 0
drivers/platform/msm/ipa/ipa_v3/rmnet_ll_ipa.c

@@ -0,0 +1,869 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2021, The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/debugfs.h>
+#include <linux/string.h>
+#include <linux/skbuff.h>
+#include <linux/workqueue.h>
+#include <linux/ipa.h>
+#include <uapi/linux/msm_rmnet.h>
+#include "ipa_i.h"
+
+enum ipa_rmnet_ll_state {
+	IPA_RMNET_LL_NOT_REG,
+	IPA_RMNET_LL_REGD, /* rmnet_ll register */
+	IPA_RMNET_LL_PIPE_READY, /* sys pipe setup */
+	IPA_RMNET_LL_START, /* rmnet_ll register + pipe setup */
+};
+
+#define IPA_RMNET_LL_PIPE_NOT_READY (0)
+#define IPA_RMNET_LL_PIPE_TX_READY (1 << 0)
+#define IPA_RMNET_LL_PIPE_RX_READY (1 << 1)
+#define IPA_RMNET_LL_PIPE_READY_ALL (IPA_RMNET_LL_PIPE_TX_READY | \
+	IPA_RMNET_LL_PIPE_RX_READY) /* TX Ready + RX ready */
+
+
+#define IPA_WWAN_CONS_DESC_FIFO_SZ 256
+#define RMNET_LL_QUEUE_MAX ((2 * IPA_WWAN_CONS_DESC_FIFO_SZ) - 1)
+
+struct ipa3_rmnet_ll_cb_info {
+	ipa_rmnet_ll_ready_cb ready_cb;
+	ipa_rmnet_ll_stop_cb stop_cb;
+	ipa_rmnet_ll_rx_notify_cb rx_notify_cb;
+	void *ready_cb_user_data;
+	void *stop_cb_user_data;
+	void *rx_notify_cb_user_data;
+};
+
+struct ipa3_rmnet_ll_stats {
+	atomic_t outstanding_pkts;
+	u32 tx_pkt_sent;
+	u32 rx_pkt_rcvd;
+	u64 tx_byte_sent;
+	u64 rx_byte_rcvd;
+	u32 tx_pkt_dropped;
+	u32 rx_pkt_dropped;
+	u64 tx_byte_dropped;
+	u64 rx_byte_dropped;
+};
+
+struct rmnet_ll_ipa3_debugfs_file {
+	const char *name;
+	umode_t mode;
+	void *data;
+	const struct file_operations fops;
+};
+
+struct rmnet_ll_ipa3_debugfs {
+	struct dentry *dent;
+};
+
+struct rmnet_ll_ipa3_context {
+	struct ipa3_rmnet_ll_stats stats;
+	enum ipa_rmnet_ll_state state;
+	u8 pipe_state;
+	struct ipa_sys_connect_params apps_to_ipa_low_lat_data_ep_cfg;
+	struct ipa_sys_connect_params ipa_to_apps_low_lat_data_ep_cfg;
+	u32 apps_to_ipa3_low_lat_data_hdl;
+	u32 ipa3_to_apps_low_lat_data_hdl;
+	spinlock_t tx_lock;
+	struct ipa3_rmnet_ll_cb_info cb_info;
+	struct sk_buff_head tx_queue;
+	u32 rmnet_ll_pm_hdl;
+	struct rmnet_ll_ipa3_debugfs dbgfs;
+	struct mutex lock;
+	struct workqueue_struct *wq;
+};
+
+static struct rmnet_ll_ipa3_context *rmnet_ll_ipa3_ctx;
+
+static void rmnet_ll_wakeup_ipa(struct work_struct *work);
+static DECLARE_DELAYED_WORK(rmnet_ll_wakeup_work,
+	rmnet_ll_wakeup_ipa);
+static void apps_rmnet_ll_tx_complete_notify(void *priv,
+	enum ipa_dp_evt_type evt, unsigned long data);
+static void apps_rmnet_ll_receive_notify(void *priv,
+	enum ipa_dp_evt_type evt, unsigned long data);
+static int ipa3_rmnet_ll_register_pm_client(void);
+static void ipa3_rmnet_ll_deregister_pm_client(void);
+#ifdef CONFIG_DEBUG_FS
+#define IPA_MAX_MSG_LEN 4096
+static char dbg_buff[IPA_MAX_MSG_LEN + 1];
+
+static ssize_t rmnet_ll_ipa3_read_stats(struct file *file, char __user *ubuf,
+		size_t count, loff_t *ppos)
+{
+	int nbytes;
+	int cnt = 0;
+
+	nbytes = scnprintf(dbg_buff, IPA_MAX_MSG_LEN,
+		"Queue Leng=%u\n"
+		"outstanding_pkts=%u\n"
+		"tx_pkt_sent=%u\n"
+		"rx_pkt_rcvd=%u\n"
+		"tx_byte_sent=%lu\n"
+		"rx_byte_rcvd=%lu\n"
+		"tx_pkt_dropped=%u\n"
+		"rx_pkt_dropped=%u\n"
+		"tx_byte_dropped=%lu\n"
+		"rx_byte_dropped=%lu\n",
+		skb_queue_len(&rmnet_ll_ipa3_ctx->tx_queue),
+		atomic_read(
+		&rmnet_ll_ipa3_ctx->stats.outstanding_pkts),
+		rmnet_ll_ipa3_ctx->stats.tx_pkt_sent,
+		rmnet_ll_ipa3_ctx->stats.rx_pkt_rcvd,
+		rmnet_ll_ipa3_ctx->stats.tx_byte_sent,
+		rmnet_ll_ipa3_ctx->stats.rx_byte_rcvd,
+		rmnet_ll_ipa3_ctx->stats.tx_pkt_dropped,
+		rmnet_ll_ipa3_ctx->stats.rx_pkt_dropped,
+		rmnet_ll_ipa3_ctx->stats.tx_byte_dropped,
+		rmnet_ll_ipa3_ctx->stats.rx_byte_dropped);
+	cnt += nbytes;
+
+	return simple_read_from_buffer(ubuf, count, ppos, dbg_buff, cnt);
+}
+
+#define READ_ONLY_MODE  0444
+static const struct rmnet_ll_ipa3_debugfs_file debugfs_files[] = {
+	{
+		"stats", READ_ONLY_MODE, NULL, {
+			.read = rmnet_ll_ipa3_read_stats
+		}
+	},
+};
+
+static void rmnet_ll_ipa3_debugfs_remove(void)
+{
+	if (IS_ERR(rmnet_ll_ipa3_ctx->dbgfs.dent))
+		return;
+
+	debugfs_remove_recursive(rmnet_ll_ipa3_ctx->dbgfs.dent);
+	memset(&rmnet_ll_ipa3_ctx->dbgfs, 0,
+		sizeof(struct rmnet_ll_ipa3_debugfs));
+}
+
+static void rmnet_ll_ipa3_debugfs_init(void)
+{
+	struct rmnet_ll_ipa3_debugfs *dbgfs = &rmnet_ll_ipa3_ctx->dbgfs;
+	struct dentry *file;
+	const size_t debugfs_files_num =
+		sizeof(debugfs_files) / sizeof(struct rmnet_ll_ipa3_debugfs_file);
+	size_t i;
+
+	dbgfs->dent = debugfs_create_dir("rmnet_ll_ipa", 0);
+	if (IS_ERR(dbgfs->dent)) {
+		pr_err("fail to create folder in debug_fs\n");
+		return;
+	}
+
+	for (i = 0; i < debugfs_files_num; ++i) {
+		const struct rmnet_ll_ipa3_debugfs_file *curr = &debugfs_files[i];
+
+		file = debugfs_create_file(curr->name, curr->mode, dbgfs->dent,
+			curr->data, &curr->fops);
+		if (!file || IS_ERR(file)) {
+			IPAERR("fail to create file for debug_fs %s\n",
+				curr->name);
+			goto fail;
+		}
+	}
+
+	return;
+
+fail:
+	rmnet_ll_ipa3_debugfs_remove();
+}
+#else /* CONFIG_DEBUG_FS */
+static void rmnet_ll_ipa3_debugfs_init(void){}
+static void rmnet_ll_ipa3_debugfs_remove(void){}
+#endif /* CONFIG_DEBUG_FS */
+
+int ipa3_rmnet_ll_init(void)
+{
+	char buff[IPA_RESOURCE_NAME_MAX];
+
+	if (!ipa3_ctx) {
+		IPAERR("ipa3_ctx was not initialized\n");
+		return -EINVAL;
+	}
+
+	if (ipa3_get_ep_mapping(IPA_CLIENT_APPS_WAN_LOW_LAT_DATA_PROD) == -1 ||
+		ipa3_get_ep_mapping(IPA_CLIENT_APPS_WAN_LOW_LAT_DATA_CONS) == -1)
+	{
+		IPAERR("invalid low lat data endpoints\n");
+		return -EINVAL;
+	}
+
+	rmnet_ll_ipa3_ctx = kzalloc(sizeof(*rmnet_ll_ipa3_ctx),
+			GFP_KERNEL);
+
+	if (!rmnet_ll_ipa3_ctx)
+		return -ENOMEM;
+
+	snprintf(buff, IPA_RESOURCE_NAME_MAX, "rmnet_llwq");
+	rmnet_ll_ipa3_ctx->wq = alloc_workqueue(buff,
+		WQ_MEM_RECLAIM | WQ_UNBOUND | WQ_SYSFS, 1);
+	if (!rmnet_ll_ipa3_ctx->wq) {
+		kfree(rmnet_ll_ipa3_ctx);
+		return -ENOMEM;
+	}
+	memset(&rmnet_ll_ipa3_ctx->apps_to_ipa_low_lat_data_ep_cfg, 0,
+		sizeof(struct ipa_sys_connect_params));
+	memset(&rmnet_ll_ipa3_ctx->ipa_to_apps_low_lat_data_ep_cfg, 0,
+		sizeof(struct ipa_sys_connect_params));
+	skb_queue_head_init(&rmnet_ll_ipa3_ctx->tx_queue);
+	rmnet_ll_ipa3_ctx->state = IPA_RMNET_LL_NOT_REG;
+	mutex_init(&rmnet_ll_ipa3_ctx->lock);
+	spin_lock_init(&rmnet_ll_ipa3_ctx->tx_lock);
+	rmnet_ll_ipa3_ctx->pipe_state = IPA_RMNET_LL_PIPE_NOT_READY;
+	rmnet_ll_ipa3_debugfs_init();
+	return 0;
+}
+
+int ipa3_register_rmnet_ll_cb(
+	void (*ipa_rmnet_ll_ready_cb)(void *user_data1),
+	void *user_data1,
+	void (*ipa_rmnet_ll_stop_cb)(void *user_data2),
+	void *user_data2,
+	void (*ipa_rmnet_ll_rx_notify_cb)(
+	void *user_data3, void *rx_data),
+	void *user_data3)
+{
+	/* check ipa3_ctx existed or not */
+	if (!ipa3_ctx) {
+		IPADBG("rmnet_ll_ctx haven't initialized\n");
+		return -EAGAIN;
+	}
+
+	if (!ipa3_ctx->rmnet_ll_enable) {
+		IPAERR("low lat data pipes are not supported");
+		return -ENXIO;
+	}
+
+	if (!rmnet_ll_ipa3_ctx) {
+		IPADBG("rmnet_ll_ctx haven't initialized\n");
+		return -EAGAIN;
+	}
+
+	mutex_lock(&rmnet_ll_ipa3_ctx->lock);
+	if (rmnet_ll_ipa3_ctx->state != IPA_RMNET_LL_NOT_REG &&
+		rmnet_ll_ipa3_ctx->state != IPA_RMNET_LL_PIPE_READY) {
+		IPADBG("rmnet_ll registered already\n", __func__);
+		mutex_unlock(&rmnet_ll_ipa3_ctx->lock);
+		return -EEXIST;
+	}
+	rmnet_ll_ipa3_ctx->cb_info.ready_cb = ipa_rmnet_ll_ready_cb;
+	rmnet_ll_ipa3_ctx->cb_info.ready_cb_user_data = user_data1;
+	rmnet_ll_ipa3_ctx->cb_info.stop_cb = ipa_rmnet_ll_stop_cb;
+	rmnet_ll_ipa3_ctx->cb_info.stop_cb_user_data = user_data2;
+	rmnet_ll_ipa3_ctx->cb_info.rx_notify_cb = ipa_rmnet_ll_rx_notify_cb;
+	rmnet_ll_ipa3_ctx->cb_info.rx_notify_cb_user_data = user_data3;
+	if (rmnet_ll_ipa3_ctx->state == IPA_RMNET_LL_NOT_REG) {
+		rmnet_ll_ipa3_ctx->state = IPA_RMNET_LL_REGD;
+	} else {
+		(*ipa_rmnet_ll_ready_cb)(user_data1);
+		rmnet_ll_ipa3_ctx->state = IPA_RMNET_LL_START;
+	}
+	ipa3_rmnet_ll_register_pm_client();
+	mutex_unlock(&rmnet_ll_ipa3_ctx->lock);
+	IPADBG("rmnet_ll registered successfually\n");
+	return 0;
+}
+
+int ipa3_unregister_rmnet_ll_cb(void)
+{
+	/* check ipa3_ctx existed or not */
+	if (!ipa3_ctx) {
+		IPADBG("IPA driver haven't initialized\n");
+		return -EAGAIN;
+	}
+
+	if (!ipa3_ctx->rmnet_ll_enable) {
+		IPAERR("low lat data pipe is disabled");
+		return -ENXIO;
+	}
+
+	if (!rmnet_ll_ipa3_ctx) {
+		IPADBG("rmnet_ll_ctx haven't initialized\n");
+		return -EAGAIN;
+	}
+
+	mutex_lock(&rmnet_ll_ipa3_ctx->lock);
+	if (rmnet_ll_ipa3_ctx->state != IPA_RMNET_LL_REGD &&
+		rmnet_ll_ipa3_ctx->state != IPA_RMNET_LL_START) {
+		IPADBG("rmnet_ll unregistered already\n", __func__);
+		mutex_unlock(&rmnet_ll_ipa3_ctx->lock);
+		return 0;
+	}
+	rmnet_ll_ipa3_ctx->cb_info.ready_cb = NULL;
+	rmnet_ll_ipa3_ctx->cb_info.ready_cb_user_data = NULL;
+	rmnet_ll_ipa3_ctx->cb_info.stop_cb = NULL;
+	rmnet_ll_ipa3_ctx->cb_info.stop_cb_user_data = NULL;
+	rmnet_ll_ipa3_ctx->cb_info.rx_notify_cb = NULL;
+	rmnet_ll_ipa3_ctx->cb_info.rx_notify_cb_user_data = NULL;
+	if (rmnet_ll_ipa3_ctx->state == IPA_RMNET_LL_REGD)
+		rmnet_ll_ipa3_ctx->state = IPA_RMNET_LL_NOT_REG;
+	else
+		rmnet_ll_ipa3_ctx->state = IPA_RMNET_LL_PIPE_READY;
+
+	ipa3_rmnet_ll_deregister_pm_client();
+	mutex_unlock(&rmnet_ll_ipa3_ctx->lock);
+
+	IPADBG("rmnet_ll unregistered successfually\n");
+	return 0;
+}
+
+int ipa3_setup_apps_low_lat_data_cons_pipe(
+	struct rmnet_ingress_param *ingress_param,
+	struct net_device *dev)
+{
+	struct ipa_sys_connect_params *ipa_low_lat_data_ep_cfg;
+	int ret = 0;
+	int ep_idx;
+
+	if (!ipa3_ctx->rmnet_ll_enable) {
+		IPAERR("low lat data pipe is disabled");
+		return 0;
+	}
+	ep_idx = ipa_get_ep_mapping(
+		IPA_CLIENT_APPS_WAN_LOW_LAT_DATA_CONS);
+	if (ep_idx == IPA_EP_NOT_ALLOCATED) {
+		IPADBG("Low lat datapath not supported\n");
+		return -ENXIO;
+	}
+	if (rmnet_ll_ipa3_ctx->state != IPA_RMNET_LL_NOT_REG &&
+		rmnet_ll_ipa3_ctx->state != IPA_RMNET_LL_REGD) {
+		IPADBG("rmnet_ll in bad state %d\n",
+			rmnet_ll_ipa3_ctx->state);
+		return -ENXIO;
+	}
+	ipa_low_lat_data_ep_cfg =
+		&rmnet_ll_ipa3_ctx->ipa_to_apps_low_lat_data_ep_cfg;
+	/*
+	 * Removing enable aggr from assign_policy
+	 * and placing it here for future enablement
+	 */
+	ipa_low_lat_data_ep_cfg->ipa_ep_cfg.aggr.aggr_en = IPA_ENABLE_AGGR;
+	if (ingress_param) {
+		/* Open for future cs offload disablement on low lat pipe */
+		if (ingress_param->cs_offload_en) {
+			ipa_low_lat_data_ep_cfg->ipa_ep_cfg.cfg.cs_offload_en =
+				IPA_ENABLE_CS_DL_QMAP;
+		} else {
+			ipa_low_lat_data_ep_cfg->ipa_ep_cfg.cfg.cs_offload_en =
+				IPA_DISABLE_CS_OFFLOAD;
+		}
+		ipa_low_lat_data_ep_cfg->ext_ioctl_v2 = true;
+		ipa_low_lat_data_ep_cfg->int_modt = ingress_param->int_modt;
+		ipa_low_lat_data_ep_cfg->int_modc = ingress_param->int_modc;
+		ipa_low_lat_data_ep_cfg->buff_size = ingress_param->buff_size;
+		ipa_low_lat_data_ep_cfg->ipa_ep_cfg.aggr.aggr_byte_limit =
+			ingress_param->agg_byte_limit;
+		ipa_low_lat_data_ep_cfg->ipa_ep_cfg.aggr.aggr_pkt_limit =
+			ingress_param->agg_pkt_limit;
+		ipa_low_lat_data_ep_cfg->ipa_ep_cfg.aggr.aggr_time_limit =
+			ingress_param->agg_time_limit;
+	} else {
+		ipa_low_lat_data_ep_cfg->ext_ioctl_v2 = false;
+		ipa_low_lat_data_ep_cfg->ipa_ep_cfg.cfg.cs_offload_en =
+			IPA_ENABLE_CS_DL_QMAP;
+		ipa_low_lat_data_ep_cfg->ipa_ep_cfg.aggr.aggr_byte_limit =
+			0;
+		ipa_low_lat_data_ep_cfg->ipa_ep_cfg.aggr.aggr_pkt_limit =
+			0;
+	}
+
+	ipa_low_lat_data_ep_cfg->ipa_ep_cfg.hdr.hdr_len = 8;
+	ipa_low_lat_data_ep_cfg->ipa_ep_cfg.hdr.hdr_ofst_metadata_valid
+		= 1;
+	ipa_low_lat_data_ep_cfg->ipa_ep_cfg.hdr.hdr_ofst_metadata
+		= 1;
+	ipa_low_lat_data_ep_cfg->ipa_ep_cfg.hdr.hdr_ofst_pkt_size_valid
+		= 1;
+	ipa_low_lat_data_ep_cfg->ipa_ep_cfg.hdr.hdr_ofst_pkt_size
+		= 2;
+	ipa_low_lat_data_ep_cfg->ipa_ep_cfg.hdr_ext.hdr_total_len_or_pad_valid
+		= true;
+	ipa_low_lat_data_ep_cfg->ipa_ep_cfg.hdr_ext.hdr_total_len_or_pad
+		= 0;
+	ipa_low_lat_data_ep_cfg->ipa_ep_cfg.hdr_ext.hdr_payload_len_inc_padding
+		= true;
+	ipa_low_lat_data_ep_cfg->ipa_ep_cfg.hdr_ext.hdr_total_len_or_pad_offset
+		= 0;
+	ipa_low_lat_data_ep_cfg->ipa_ep_cfg.hdr_ext.hdr_little_endian
+		= 0;
+	ipa_low_lat_data_ep_cfg->ipa_ep_cfg.metadata_mask.metadata_mask
+		= 0xFF000000;
+	ipa_low_lat_data_ep_cfg->client = IPA_CLIENT_APPS_WAN_LOW_LAT_DATA_CONS;
+	ipa_low_lat_data_ep_cfg->notify = apps_rmnet_ll_receive_notify;
+	ipa_low_lat_data_ep_cfg->priv = dev;
+	ipa_low_lat_data_ep_cfg->desc_fifo_sz =
+		IPA_WWAN_CONS_DESC_FIFO_SZ * IPA_FIFO_ELEMENT_SIZE;
+	ipa_low_lat_data_ep_cfg->priv = dev;
+	ret = ipa_setup_sys_pipe(
+		&rmnet_ll_ipa3_ctx->ipa_to_apps_low_lat_data_ep_cfg,
+		&rmnet_ll_ipa3_ctx->ipa3_to_apps_low_lat_data_hdl);
+	if (ret) {
+		IPADBG("Low lat data pipe setup fails\n");
+		return ret;
+	}
+	rmnet_ll_ipa3_ctx->pipe_state |= IPA_RMNET_LL_PIPE_RX_READY;
+	if (rmnet_ll_ipa3_ctx->cb_info.ready_cb) {
+		(*(rmnet_ll_ipa3_ctx->cb_info.ready_cb))
+			(rmnet_ll_ipa3_ctx->cb_info.ready_cb_user_data);
+	}
+	/*
+	 * if no ready_cb yet, which means rmnet_ll not
+	 * register to IPA, we will move state to pipe
+	 * ready and will wait for register event
+	 * coming and move to start state.
+	 * The ready_cb will called from regsiter itself.
+	 */
+	mutex_lock(&rmnet_ll_ipa3_ctx->lock);
+	if (rmnet_ll_ipa3_ctx->state == IPA_RMNET_LL_NOT_REG)
+		rmnet_ll_ipa3_ctx->state = IPA_RMNET_LL_PIPE_READY;
+	else
+		rmnet_ll_ipa3_ctx->state = IPA_RMNET_LL_START;
+	mutex_unlock(&rmnet_ll_ipa3_ctx->lock);
+
+	return 0;
+}
+
+int ipa3_setup_apps_low_lat_data_prod_pipe(
+	struct rmnet_egress_param *egress_param,
+	struct net_device *dev)
+{
+	struct ipa_sys_connect_params *ipa_low_lat_data_ep_cfg;
+	int ret = 0;
+	int ep_idx;
+
+	if (!ipa3_ctx->rmnet_ll_enable) {
+		IPAERR("Low lat pipe is disabled");
+		return 0;
+	}
+	ep_idx = ipa_get_ep_mapping(
+		IPA_CLIENT_APPS_WAN_LOW_LAT_DATA_PROD);
+	if (ep_idx == IPA_EP_NOT_ALLOCATED) {
+		IPAERR("low lat data pipe not supported\n");
+		return -EFAULT;
+	}
+	ipa_low_lat_data_ep_cfg =
+		&rmnet_ll_ipa3_ctx->apps_to_ipa_low_lat_data_ep_cfg;
+	if (egress_param) {
+		/* Open for future cs offload disablement on low lat pipe */
+		IPAERR("Configuring low lat data prod with rmnet config\n");
+		ipa_low_lat_data_ep_cfg->ext_ioctl_v2 = true;
+		ipa_low_lat_data_ep_cfg->int_modt = egress_param->int_modt;
+		ipa_low_lat_data_ep_cfg->int_modc = egress_param->int_modc;
+		if (egress_param->cs_offload_en) {
+			ipa_low_lat_data_ep_cfg->ipa_ep_cfg.hdr.hdr_len = 8;
+			ipa_low_lat_data_ep_cfg->ipa_ep_cfg.cfg.cs_offload_en =
+				IPA_ENABLE_CS_OFFLOAD_UL;
+			ipa_low_lat_data_ep_cfg->ipa_ep_cfg.cfg.cs_metadata_hdr_offset
+				= 1;
+			ipa_low_lat_data_ep_cfg->ipa_ep_cfg.hdr.hdr_ofst_metadata_valid
+				= 1;
+			/* modem want offset at 0! */
+			ipa_low_lat_data_ep_cfg->ipa_ep_cfg.hdr.hdr_ofst_metadata = 0;
+		} else {
+			ipa_low_lat_data_ep_cfg->ipa_ep_cfg.cfg.cs_offload_en =
+				IPA_DISABLE_CS_OFFLOAD;
+		}
+
+		/* Open for future deaggr enablement on low lat pipe */
+		if (egress_param->aggr_en) {
+			IPAERR("Enabling deaggr on low_lat_prod\n");
+			ipa_low_lat_data_ep_cfg->ipa_ep_cfg.aggr.aggr_en =
+				IPA_ENABLE_DEAGGR;
+			ipa_low_lat_data_ep_cfg->ipa_ep_cfg.aggr.aggr = IPA_QCMAP;
+			ipa_low_lat_data_ep_cfg->
+				ipa_ep_cfg.deaggr.packet_offset_valid = false;
+		} else {
+			IPAERR("Not enabling deaggr on low_lat_prod\n");
+			ipa_low_lat_data_ep_cfg->ipa_ep_cfg.aggr.aggr_en =
+				IPA_BYPASS_AGGR;
+		}
+	} else {
+		IPAERR("Configuring low lat data prod without rmnet config\n");
+		ipa_low_lat_data_ep_cfg->ext_ioctl_v2 = false;
+		ipa_low_lat_data_ep_cfg->ipa_ep_cfg.hdr.hdr_len = 8;
+		ipa_low_lat_data_ep_cfg->ipa_ep_cfg.cfg.cs_offload_en =
+			IPA_ENABLE_CS_OFFLOAD_UL;
+		ipa_low_lat_data_ep_cfg->ipa_ep_cfg.aggr.aggr_en =
+			IPA_BYPASS_AGGR;
+		ipa_low_lat_data_ep_cfg->ipa_ep_cfg.cfg.cs_metadata_hdr_offset
+			= 1;
+		ipa_low_lat_data_ep_cfg->ipa_ep_cfg.hdr.hdr_ofst_metadata_valid
+			= 1;
+		/* modem want offset at 0 */
+		ipa_low_lat_data_ep_cfg->ipa_ep_cfg.hdr.hdr_ofst_metadata = 0;
+	}
+	ipa_low_lat_data_ep_cfg->ipa_ep_cfg.metadata_mask.metadata_mask
+			= 0;
+	/* modem want offset at 0! */
+	ipa_low_lat_data_ep_cfg->ipa_ep_cfg.hdr.hdr_ofst_metadata = 0x00010000;
+	ipa_low_lat_data_ep_cfg->ipa_ep_cfg.deaggr.syspipe_err_detection = true;
+	ipa_low_lat_data_ep_cfg->ipa_ep_cfg.mode.dst =
+		IPA_CLIENT_APPS_WAN_LOW_LAT_DATA_PROD;
+	ipa_low_lat_data_ep_cfg->client =
+		IPA_CLIENT_APPS_WAN_LOW_LAT_DATA_PROD;
+	ipa_low_lat_data_ep_cfg->notify =
+		apps_rmnet_ll_tx_complete_notify;
+	ipa_low_lat_data_ep_cfg->priv = dev;
+	ipa_low_lat_data_ep_cfg->desc_fifo_sz =
+		IPA_SYS_TX_DATA_DESC_FIFO_SZ;
+
+	ret = ipa_setup_sys_pipe(ipa_low_lat_data_ep_cfg,
+		&rmnet_ll_ipa3_ctx->apps_to_ipa3_low_lat_data_hdl);
+	if (ret) {
+		IPAERR("failed to config apps low lat dtaa prod pipe\n");
+		return ret;
+	}
+	rmnet_ll_ipa3_ctx->pipe_state |= IPA_RMNET_LL_PIPE_TX_READY;
+	return 0;
+}
+
+int ipa3_teardown_apps_low_lat_data_pipes(void)
+{
+	int ret = 0;
+
+	if (rmnet_ll_ipa3_ctx->state != IPA_RMNET_LL_PIPE_READY &&
+		rmnet_ll_ipa3_ctx->state != IPA_RMNET_LL_START &&
+		rmnet_ll_ipa3_ctx->pipe_state == IPA_RMNET_LL_PIPE_NOT_READY) {
+		IPAERR("rmnet_ll in bad state %d\n",
+			rmnet_ll_ipa3_ctx->state);
+		return -EFAULT;
+	}
+	if (rmnet_ll_ipa3_ctx->pipe_state == IPA_RMNET_LL_PIPE_READY ||
+		rmnet_ll_ipa3_ctx->state == IPA_RMNET_LL_START) {
+		if (rmnet_ll_ipa3_ctx->cb_info.stop_cb) {
+			(*(rmnet_ll_ipa3_ctx->cb_info.stop_cb))
+				(rmnet_ll_ipa3_ctx->cb_info.stop_cb_user_data);
+		} else {
+			IPAERR("Invalid stop_cb\n");
+			return -EFAULT;
+		}
+		if (rmnet_ll_ipa3_ctx->state == IPA_RMNET_LL_PIPE_READY)
+			rmnet_ll_ipa3_ctx->state = IPA_RMNET_LL_NOT_REG;
+		else
+			rmnet_ll_ipa3_ctx->state = IPA_RMNET_LL_REGD;
+	}
+	if (rmnet_ll_ipa3_ctx->pipe_state & IPA_RMNET_LL_PIPE_RX_READY) {
+		ret = ipa3_teardown_sys_pipe(
+			rmnet_ll_ipa3_ctx->ipa3_to_apps_low_lat_data_hdl);
+		if (ret < 0) {
+			IPAERR("Failed to teardown APPS->IPA low lat data pipe\n");
+			return ret;
+		}
+		rmnet_ll_ipa3_ctx->ipa3_to_apps_low_lat_data_hdl = -1;
+		rmnet_ll_ipa3_ctx->pipe_state &= ~IPA_RMNET_LL_PIPE_RX_READY;
+	}
+
+	if (rmnet_ll_ipa3_ctx->pipe_state & IPA_RMNET_LL_PIPE_TX_READY) {
+		ret = ipa3_teardown_sys_pipe(
+			rmnet_ll_ipa3_ctx->apps_to_ipa3_low_lat_data_hdl);
+		if (ret < 0) {
+			return ret;
+			IPAERR("Failed to teardown APPS->IPA low lat data pipe\n");
+		}
+		rmnet_ll_ipa3_ctx->apps_to_ipa3_low_lat_data_hdl = -1;
+		rmnet_ll_ipa3_ctx->pipe_state &= ~IPA_RMNET_LL_PIPE_TX_READY;
+	}
+	return ret;
+}
+
+int ipa3_rmnet_ll_xmit(struct sk_buff *skb)
+{
+	int ret;
+	int len;
+	unsigned long flags;
+
+	if (!ipa3_ctx->rmnet_ll_enable) {
+		IPAERR("low lat data pipe not supported\n");
+		kfree_skb(skb);
+		return 0;
+	}
+
+	spin_lock_irqsave(&rmnet_ll_ipa3_ctx->tx_lock, flags);
+	/* we cannot infinitely queue the packet */
+	if ((atomic_read(
+		&rmnet_ll_ipa3_ctx->stats.outstanding_pkts)
+		>= RMNET_LL_QUEUE_MAX)) {
+		IPAERR_RL("IPA LL TX queue full\n");
+		rmnet_ll_ipa3_ctx->stats.tx_pkt_dropped++;
+		rmnet_ll_ipa3_ctx->stats.tx_byte_dropped +=
+			skb->len;
+		spin_unlock_irqrestore(&rmnet_ll_ipa3_ctx->tx_lock,
+			flags);
+		kfree_skb(skb);
+		return -EAGAIN;
+	}
+
+	if (rmnet_ll_ipa3_ctx->state != IPA_RMNET_LL_START) {
+		IPAERR("bad rmnet_ll state %d\n",
+			rmnet_ll_ipa3_ctx->state);
+		rmnet_ll_ipa3_ctx->stats.tx_pkt_dropped++;
+		rmnet_ll_ipa3_ctx->stats.tx_byte_dropped +=
+			skb->len;
+		spin_unlock_irqrestore(&rmnet_ll_ipa3_ctx->tx_lock,
+			flags);
+		kfree_skb(skb);
+		return 0;
+	}
+
+	/* if queue is not empty, means we still have pending wq */
+	if (skb_queue_len(&rmnet_ll_ipa3_ctx->tx_queue) != 0) {
+		skb_queue_tail(&rmnet_ll_ipa3_ctx->tx_queue, skb);
+		spin_unlock_irqrestore(&rmnet_ll_ipa3_ctx->tx_lock,
+			flags);
+		return 0;
+	}
+
+	/* rmnet_ll is calling from atomic context */
+	ret = ipa_pm_activate(rmnet_ll_ipa3_ctx->rmnet_ll_pm_hdl);
+	if (ret == -EINPROGRESS) {
+		skb_queue_tail(&rmnet_ll_ipa3_ctx->tx_queue, skb);
+		/*
+		 * delayed work is required here since we need to
+		 * reschedule in the same workqueue context on error
+		 */
+		queue_delayed_work(rmnet_ll_ipa3_ctx->wq,
+			&rmnet_ll_wakeup_work, 0);
+		spin_unlock_irqrestore(&rmnet_ll_ipa3_ctx->tx_lock,
+			flags);
+		return 0;
+	} else if (ret) {
+		IPAERR("[%s] fatal: ipa pm activate failed %d\n",
+			__func__, ret);
+		rmnet_ll_ipa3_ctx->stats.tx_pkt_dropped++;
+		rmnet_ll_ipa3_ctx->stats.tx_byte_dropped +=
+			skb->len;
+		spin_unlock_irqrestore(&rmnet_ll_ipa3_ctx->tx_lock,
+			flags);
+		kfree_skb(skb);
+		return 0;
+	}
+	spin_unlock_irqrestore(&rmnet_ll_ipa3_ctx->tx_lock, flags);
+
+	len = skb->len;
+	/*
+	 * both data packets and command will be routed to
+	 * IPA_CLIENT_Q6_WAN_CONS based on DMA settings
+	 */
+	ret = ipa3_tx_dp(IPA_CLIENT_APPS_WAN_LOW_LAT_DATA_PROD, skb, NULL);
+	if (ret) {
+		if (ret == -EPIPE) {
+			IPAERR("Low lat data fatal: pipe is not valid\n");
+			spin_lock_irqsave(&rmnet_ll_ipa3_ctx->tx_lock,
+				flags);
+			rmnet_ll_ipa3_ctx->stats.tx_pkt_dropped++;
+			rmnet_ll_ipa3_ctx->stats.tx_byte_dropped +=
+				skb->len;
+			spin_unlock_irqrestore(&rmnet_ll_ipa3_ctx->tx_lock,
+				flags);
+			kfree_skb(skb);
+			return 0;
+		}
+		spin_lock_irqsave(&rmnet_ll_ipa3_ctx->tx_lock, flags);
+		skb_queue_head(&rmnet_ll_ipa3_ctx->tx_queue, skb);
+		queue_delayed_work(rmnet_ll_ipa3_ctx->wq,
+			&rmnet_ll_wakeup_work, 0);
+		ret = 0;
+		goto out;
+	}
+
+	spin_lock_irqsave(&rmnet_ll_ipa3_ctx->tx_lock, flags);
+	atomic_inc(&rmnet_ll_ipa3_ctx->stats.outstanding_pkts);
+	rmnet_ll_ipa3_ctx->stats.tx_pkt_sent++;
+	rmnet_ll_ipa3_ctx->stats.tx_byte_sent += len;
+	ret = 0;
+
+out:
+	if (atomic_read(
+		&rmnet_ll_ipa3_ctx->stats.outstanding_pkts)
+		== 0)
+		ipa_pm_deferred_deactivate(rmnet_ll_ipa3_ctx->rmnet_ll_pm_hdl);
+	spin_unlock_irqrestore(&rmnet_ll_ipa3_ctx->tx_lock, flags);
+	return ret;
+}
+
+static void rmnet_ll_wakeup_ipa(struct work_struct *work)
+{
+	int ret;
+	unsigned long flags;
+	struct sk_buff *skb;
+	int len = 0;
+
+	/* calling from WQ */
+	ret = ipa_pm_activate_sync(rmnet_ll_ipa3_ctx->rmnet_ll_pm_hdl);
+	if (ret) {
+		IPAERR("[%s] fatal: ipa pm activate failed %d\n",
+			__func__, ret);
+		queue_delayed_work(rmnet_ll_ipa3_ctx->wq,
+			&rmnet_ll_wakeup_work,
+			msecs_to_jiffies(1));
+		return;
+	}
+
+	spin_lock_irqsave(&rmnet_ll_ipa3_ctx->tx_lock, flags);
+	/* dequeue the skb */
+	while (skb_queue_len(&rmnet_ll_ipa3_ctx->tx_queue) > 0) {
+		skb = skb_dequeue(&rmnet_ll_ipa3_ctx->tx_queue);
+		if (skb == NULL)
+			continue;
+		len = skb->len;
+		spin_unlock_irqrestore(&rmnet_ll_ipa3_ctx->tx_lock, flags);
+		/*
+		 * both data packets and command will be routed to
+		 * IPA_CLIENT_Q6_WAN_CONS based on DMA settings
+		 */
+		ret = ipa3_tx_dp(IPA_CLIENT_APPS_WAN_LOW_LAT_DATA_PROD, skb, NULL);
+		if (ret) {
+			if (ret == -EPIPE) {
+				/* try to drain skb from queue if pipe teardown */
+				IPAERR_RL("Low lat data fatal: pipe is not valid\n");
+				spin_lock_irqsave(&rmnet_ll_ipa3_ctx->tx_lock,
+					flags);
+				rmnet_ll_ipa3_ctx->stats.tx_pkt_dropped++;
+				rmnet_ll_ipa3_ctx->stats.tx_byte_dropped +=
+					skb->len;
+				kfree_skb(skb);
+				continue;
+			}
+			spin_lock_irqsave(&rmnet_ll_ipa3_ctx->tx_lock, flags);
+			skb_queue_head(&rmnet_ll_ipa3_ctx->tx_queue, skb);
+			spin_unlock_irqrestore(&rmnet_ll_ipa3_ctx->tx_lock, flags);
+			goto delayed_work;
+		}
+
+		atomic_inc(&rmnet_ll_ipa3_ctx->stats.outstanding_pkts);
+		spin_lock_irqsave(&rmnet_ll_ipa3_ctx->tx_lock, flags);
+		rmnet_ll_ipa3_ctx->stats.tx_pkt_sent++;
+		rmnet_ll_ipa3_ctx->stats.tx_byte_sent += len;
+	}
+	spin_unlock_irqrestore(&rmnet_ll_ipa3_ctx->tx_lock, flags);
+	goto out;
+
+delayed_work:
+	queue_delayed_work(rmnet_ll_ipa3_ctx->wq,
+		&rmnet_ll_wakeup_work,
+		msecs_to_jiffies(1));
+out:
+	if (atomic_read(
+		&rmnet_ll_ipa3_ctx->stats.outstanding_pkts)
+		== 0) {
+		ipa_pm_deferred_deactivate(rmnet_ll_ipa3_ctx->rmnet_ll_pm_hdl);
+	}
+
+}
+
+/**
+ * apps_rmnet_ll_tx_complete_notify() - Rx notify
+ *
+ * @priv: driver context
+ * @evt: event type
+ * @data: data provided with event
+ *
+ * Check that the packet is the one we sent and release it
+ * This function will be called in defered context in IPA wq.
+ */
+static void apps_rmnet_ll_tx_complete_notify(void *priv,
+	enum ipa_dp_evt_type evt, unsigned long data)
+{
+	struct sk_buff *skb = (struct sk_buff *)data;
+	unsigned long flags;
+
+	if (evt != IPA_WRITE_DONE) {
+		IPAERR("unsupported evt on Tx callback, Drop the packet\n");
+		spin_lock_irqsave(&rmnet_ll_ipa3_ctx->tx_lock,
+			flags);
+		rmnet_ll_ipa3_ctx->stats.tx_pkt_dropped++;
+		rmnet_ll_ipa3_ctx->stats.tx_byte_dropped +=
+			skb->len;
+		spin_unlock_irqrestore(&rmnet_ll_ipa3_ctx->tx_lock,
+			flags);
+		dev_kfree_skb_any(skb);
+		return;
+	}
+
+	atomic_dec(&rmnet_ll_ipa3_ctx->stats.outstanding_pkts);
+
+	if (atomic_read(
+		&rmnet_ll_ipa3_ctx->stats.outstanding_pkts) == 0)
+		ipa_pm_deferred_deactivate(rmnet_ll_ipa3_ctx->rmnet_ll_pm_hdl);
+
+	dev_kfree_skb_any(skb);
+}
+
+/**
+ * apps_rmnet_ll_receive_notify() - Rmnet_ll RX notify
+ *
+ * @priv: driver context
+ * @evt: event type
+ * @data: data provided with event
+ *
+ * IPA will pass a packet to the Linux network stack with skb->data
+ */
+static void apps_rmnet_ll_receive_notify(void *priv,
+	enum ipa_dp_evt_type evt, unsigned long data)
+{
+	void *rx_notify_cb_rx_data;
+	struct sk_buff *low_lat_data;
+	int len;
+
+	low_lat_data = (struct sk_buff *)data;
+	if (low_lat_data == NULL) {
+		IPAERR("Rx packet is invalid");
+		return;
+	}
+	len = low_lat_data->len;
+	if (evt == IPA_RECEIVE) {
+		IPADBG_LOW("Rx packet was received");
+		rx_notify_cb_rx_data = (void *)data;
+		if (rmnet_ll_ipa3_ctx->cb_info.rx_notify_cb) {
+			(*(rmnet_ll_ipa3_ctx->cb_info.rx_notify_cb))(
+			rmnet_ll_ipa3_ctx->cb_info.rx_notify_cb_user_data,
+			rx_notify_cb_rx_data);
+		} else
+			goto fail;
+		rmnet_ll_ipa3_ctx->stats.rx_pkt_rcvd++;
+		rmnet_ll_ipa3_ctx->stats.rx_byte_rcvd +=
+			len;
+	} else {
+		IPAERR("Invalid evt %d received in rmnet_ll\n", evt);
+		goto fail;
+	}
+	return;
+
+fail:
+	kfree_skb(low_lat_data);
+	rmnet_ll_ipa3_ctx->stats.rx_pkt_dropped++;
+}
+
+
+static int ipa3_rmnet_ll_register_pm_client(void)
+{
+	int result;
+	struct ipa_pm_register_params pm_reg;
+
+	memset(&pm_reg, 0, sizeof(pm_reg));
+	pm_reg.name = "rmnet_ll";
+	pm_reg.group = IPA_PM_GROUP_APPS;
+	result = ipa_pm_register(&pm_reg, &rmnet_ll_ipa3_ctx->rmnet_ll_pm_hdl);
+	if (result) {
+		IPAERR("failed to create IPA PM client %d\n", result);
+		return result;
+	}
+
+	IPAERR("%s register done\n", pm_reg.name);
+
+	return 0;
+}
+
+static void ipa3_rmnet_ll_deregister_pm_client(void)
+{
+	ipa_pm_deactivate_sync(rmnet_ll_ipa3_ctx->rmnet_ll_pm_hdl);
+	ipa_pm_deregister(rmnet_ll_ipa3_ctx->rmnet_ll_pm_hdl);
+}