Browse Source

msm: ipa: Support hardware accelerated DIAG over qdss

Add changes to accelerate QDSS diag traffic over IPA
to the PCIe host.

Change-Id: Ice72f8761d092677d4c5434d87bbed295ac435d6
Signed-off-by: Michael Adisumarta <[email protected]>
Michael Adisumarta 4 years ago
parent
commit
ae988186cc

+ 1 - 0
drivers/platform/msm/gsi/gsi.c

@@ -2392,6 +2392,7 @@ static void gsi_program_chan_ctx(struct gsi_chan_props *props, unsigned int ee,
 	case GSI_CHAN_PROT_AQC:
 	case GSI_CHAN_PROT_11AD:
 	case GSI_CHAN_PROT_RTK:
+	case GSI_CHAN_PROT_QDSS:
 		ch_k_cntxt_0.chtype_protocol_msb = 1;
 		break;
 	default:

+ 27 - 0
drivers/platform/msm/gsi/gsi.h

@@ -919,6 +919,32 @@ struct __packed gsi_wdi3_channel_scratch {
 	uint32_t reserved2 : 16;
 };
 
+/**
+ * gsi_qdss_channel_scratch - QDSS SW config area of
+ * channel scratch
+ *
+ * @bam_p_evt_dest_addr: equivalent to event_ring_doorbell_pa
+ *			physical address of the doorbell that IPA uC
+ *			will update the headpointer of the event ring.
+ *			QDSS should send BAM_P_EVNT_REG address in this var
+ *			Configured with the GSI Doorbell Address.
+ *			GSI sends Update RP by doing a write to this address
+ * @data_fifo_base_addr: Base address of the data FIFO used by BAM
+ * @data_fifo_size: Size of the data FIFO
+ * @bam_p_evt_threshold: Threshold level of how many bytes consumed
+ * @override_eot: if override EOT==1, it doesn't check the EOT bit in
+ *			the descriptor
+ */
+struct __packed gsi_qdss_channel_scratch {
+	uint32_t bam_p_evt_dest_addr;
+	uint32_t data_fifo_base_addr;
+	uint32_t data_fifo_size : 16;
+	uint32_t bam_p_evt_threshold : 16;
+	uint32_t reserved1 : 2;
+	uint32_t override_eot : 1;
+	uint32_t reserved2 : 29;
+};
+
 /**
  * gsi_wdi3_channel_scratch2 - WDI3 protocol SW config area of
  * channel scratch2
@@ -1015,6 +1041,7 @@ union __packed gsi_channel_scratch {
 	struct __packed gsi_wdi2_channel_scratch_new wdi2_new;
 	struct __packed gsi_aqc_channel_scratch aqc;
 	struct __packed gsi_rtk_channel_scratch rtk;
+	struct __packed gsi_qdss_channel_scratch qdss;
 	struct __packed {
 		uint32_t word1;
 		uint32_t word2;

+ 1 - 0
drivers/platform/msm/ipa/Makefile

@@ -25,6 +25,7 @@ ipam-y += \
 	ipa_v3/ipa_wdi3_i.o \
 	ipa_v3/ipa_odl.o \
 	ipa_v3/ipa_wigig_i.o \
+	ipa_v3/ipa_qdss.o \
 	ipa_v3/ipa_uc_holb_monitor.o \
 	ipa_v3/ipahal/ipahal.o \
 	ipa_v3/ipahal/ipahal_reg.o \

+ 2 - 0
drivers/platform/msm/ipa/ipa_clients/ipa_clients_manager.c

@@ -30,6 +30,8 @@ static int __init ipa_clients_manager_init(void)
 
 	ipa3_notify_clients_registered();
 
+	ipa3_qdss_register();
+
 	return 0;
 }
 subsys_initcall(ipa_clients_manager_init);

+ 1 - 1
drivers/platform/msm/ipa/ipa_clients/ipa_mhi_client.c

@@ -59,7 +59,7 @@
 #define IPA_MHI_SUSPEND_SLEEP_MAX 1100
 
 #define IPA_MHI_MAX_UL_CHANNELS 2
-#define IPA_MHI_MAX_DL_CHANNELS 3
+#define IPA_MHI_MAX_DL_CHANNELS 4
 
 /* bit #40 in address should be asserted for MHI transfers over pcie */
 #define IPA_MHI_CLIENT_HOST_ADDR_COND(addr) \

+ 2 - 1
drivers/platform/msm/ipa/ipa_v3/ipa_client.c

@@ -64,7 +64,8 @@ int ipa3_enable_data_path(u32 clnt_hdl)
 		 * on other end from IPA hw.
 		 */
 		if ((ep->client == IPA_CLIENT_USB_DPL_CONS) ||
-				(ep->client == IPA_CLIENT_MHI_DPL_CONS))
+				(ep->client == IPA_CLIENT_MHI_DPL_CONS) ||
+				(ep->client == IPA_CLIENT_MHI_QDSS_CONS))
 			holb_cfg.en = IPA_HOLB_TMR_EN;
 		else
 			holb_cfg.en = IPA_HOLB_TMR_DIS;

+ 10 - 0
drivers/platform/msm/ipa/ipa_v3/ipa_i.h

@@ -65,6 +65,8 @@
 #define IPA_UC_FINISH_MAX 6
 #define IPA_UC_WAIT_MIN_SLEEP 1000
 #define IPA_UC_WAII_MAX_SLEEP 1200
+#define IPA_HOLB_TMR_DIS 0x0
+#define IPA_HOLB_TMR_EN 0x1
 #define IPA_HOLB_TMR_VAL_4_5 31
 /*
  * The transport descriptor size was changed to GSI_CHAN_RE_SIZE_16B, but
@@ -2549,6 +2551,8 @@ void ipa3_cal_ep_holb_scale_base_val(u32 tmr_val,
 
 int ipa3_cfg_ep_cfg(u32 clnt_hdl, const struct ipa_ep_cfg_cfg *ipa_ep_cfg);
 
+int ipa3_force_cfg_ep_holb(u32 clnt_hdl, struct ipa_ep_cfg_holb *ipa_ep_cfg);
+
 int ipa3_cfg_ep_metadata_mask(u32 clnt_hdl,
 		const struct ipa_ep_cfg_metadata_mask *ipa_ep_cfg);
 
@@ -2856,6 +2860,12 @@ void ipa3_eth_debugfs_init(void);
 void ipa3_eth_debugfs_add(struct ipa_eth_client *client);
 
 void ipa3_dump_buff_internal(void *base, dma_addr_t phy_base, u32 size);
+
+void ipa3_qdss_register(void);
+int ipa3_conn_qdss_pipes(struct ipa_qdss_conn_in_params *in,
+	struct ipa_qdss_conn_out_params *out);
+int ipa3_disconn_qdss_pipes(void);
+
 #ifdef IPA_DEBUG
 #define IPA_DUMP_BUFF(base, phy_base, size) \
 	ipa3_dump_buff_internal(base, phy_base, size)

+ 272 - 0
drivers/platform/msm/ipa/ipa_v3/ipa_qdss.c

@@ -0,0 +1,272 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2018-2020, The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/ipa_qdss.h>
+#include <linux/msm_ipa.h>
+#include <linux/string.h>
+#include <linux/ipa_qdss.h>
+#include "ipa_i.h"
+
+#define IPA_HOLB_TMR_VALUE 0
+#define OFFLOAD_DRV_NAME "ipa_qdss"
+#define IPA_QDSS_DBG(fmt, args...) \
+	do { \
+		pr_debug(OFFLOAD_DRV_NAME " %s:%d " fmt, \
+			__func__, __LINE__, ## args); \
+		IPA_IPC_LOGGING(ipa3_get_ipc_logbuf(), \
+			OFFLOAD_DRV_NAME " %s:%d " fmt, ## args); \
+		IPA_IPC_LOGGING(ipa3_get_ipc_logbuf_low(), \
+			OFFLOAD_DRV_NAME " %s:%d " fmt, ## args); \
+	} while (0)
+
+#define IPA_QDSS_ERR(fmt, args...) \
+	do { \
+		pr_err(OFFLOAD_DRV_NAME " %s:%d " fmt, \
+			__func__, __LINE__, ## args); \
+		IPA_IPC_LOGGING(ipa3_get_ipc_logbuf(), \
+			OFFLOAD_DRV_NAME " %s:%d " fmt, ## args); \
+		IPA_IPC_LOGGING(ipa3_get_ipc_logbuf_low(), \
+			OFFLOAD_DRV_NAME " %s:%d " fmt, ## args); \
+	} while (0)
+
+static void ipa3_qdss_gsi_chan_err_cb(struct gsi_chan_err_notify *notify)
+{
+	switch (notify->evt_id) {
+	case GSI_CHAN_INVALID_TRE_ERR:
+			IPAERR("Got GSI_CHAN_INVALID_TRE_ERR\n");
+			break;
+	case GSI_CHAN_NON_ALLOCATED_EVT_ACCESS_ERR:
+			IPAERR("Got GSI_CHAN_NON_ALLOCATED_EVT_ACCESS_ERR\n");
+			break;
+	case GSI_CHAN_OUT_OF_BUFFERS_ERR:
+			IPAERR("Got GSI_CHAN_OUT_OF_BUFFERS_ERR\n");
+			break;
+	case GSI_CHAN_OUT_OF_RESOURCES_ERR:
+			IPAERR("Got GSI_CHAN_OUT_OF_RESOURCES_ERR\n");
+			break;
+	case GSI_CHAN_UNSUPPORTED_INTER_EE_OP_ERR:
+			IPAERR("Got GSI_CHAN_UNSUPPORTED_INTER_EE_OP_ERR\n");
+			break;
+	case GSI_CHAN_HWO_1_ERR:
+			IPAERR("Got GSI_CHAN_HWO_1_ERR\n");
+			break;
+	default:
+		IPAERR("Unexpected err evt: %d\n", notify->evt_id);
+	}
+	ipa_assert();
+}
+
+int ipa3_conn_qdss_pipes(struct ipa_qdss_conn_in_params *in,
+	struct ipa_qdss_conn_out_params *out)
+{
+	struct gsi_chan_props gsi_channel_props;
+	struct ipa3_ep_context *ep_rx;
+	const struct ipa_gsi_ep_config *gsi_ep_info;
+	union __packed gsi_channel_scratch ch_scratch;
+	u32 gsi_db_addr_low, gsi_db_addr_high;
+	struct ipa_ep_cfg ep_cfg = { { 0 } };
+	int ipa_ep_idx_rx, ipa_ep_idx_tx;
+	int result = 0;
+	struct ipa_ep_cfg_holb holb_cfg;
+
+	if (!(in && out)) {
+		IPA_QDSS_ERR("Empty parameters. in=%pK out=%pK\n", in, out);
+		return -IPA_QDSS_PIPE_CONN_FAILURE;
+	}
+
+	ipa_ep_idx_tx = ipa3_get_ep_mapping(IPA_CLIENT_MHI_QDSS_CONS);
+	if ((ipa_ep_idx_tx) < 0 || (!ipa3_ctx->ipa_config_is_mhi)) {
+		IPA_QDSS_ERR("getting EP map failed\n");
+		return -IPA_QDSS_PIPE_CONN_FAILURE;
+	}
+
+	ipa_ep_idx_rx = ipa3_get_ep_mapping(IPA_CLIENT_QDSS_PROD);
+	if ((ipa_ep_idx_rx == -1) ||
+		(ipa_ep_idx_rx >= IPA3_MAX_NUM_PIPES)) {
+		IPA_QDSS_ERR("out of range ipa_ep_idx_rx = %d\n",
+			ipa_ep_idx_rx);
+		return -IPA_QDSS_PIPE_CONN_FAILURE;
+	}
+
+	ep_rx = &ipa3_ctx->ep[ipa_ep_idx_rx];
+
+	if (ep_rx->valid) {
+		IPA_QDSS_ERR("EP already allocated.\n");
+		return IPA_QDSS_SUCCESS;
+	}
+
+	memset(ep_rx, 0, offsetof(struct ipa3_ep_context, sys));
+
+	IPA_ACTIVE_CLIENTS_INC_SIMPLE();
+
+	ep_rx->valid = 1;
+	ep_rx->client = IPA_CLIENT_QDSS_PROD;
+	if (ipa3_cfg_ep(ipa_ep_idx_rx, &ep_rx->cfg)) {
+		IPAERR("fail to setup rx pipe cfg\n");
+		goto fail;
+	}
+
+	/* setup channel ring */
+	memset(&gsi_channel_props, 0, sizeof(gsi_channel_props));
+	gsi_channel_props.prot = GSI_CHAN_PROT_QDSS;
+	gsi_channel_props.dir = GSI_CHAN_DIR_TO_GSI;
+
+	gsi_ep_info = ipa3_get_gsi_ep_info(ep_rx->client);
+	if (!gsi_ep_info) {
+		IPA_QDSS_ERR("Failed getting GSI EP info for client=%d\n",
+			ep_rx->client);
+		goto fail;
+	}
+
+	gsi_channel_props.ch_id = gsi_ep_info->ipa_gsi_chan_num;
+	gsi_channel_props.re_size = GSI_CHAN_RE_SIZE_8B;
+	gsi_channel_props.use_db_eng = GSI_CHAN_DB_MODE;
+	gsi_channel_props.err_cb = ipa3_qdss_gsi_chan_err_cb;
+	gsi_channel_props.ring_len = in->desc_fifo_size;
+	gsi_channel_props.ring_base_addr =
+			in->desc_fifo_base_addr;
+	result = gsi_alloc_channel(&gsi_channel_props, ipa3_ctx->gsi_dev_hdl,
+				&ep_rx->gsi_chan_hdl);
+	if (result != GSI_STATUS_SUCCESS) {
+		IPA_QDSS_ERR("Failed allocating gsi_chan_hdl=%d\n",
+				&ep_rx->gsi_chan_hdl);
+		goto fail;
+	}
+
+	ep_rx->gsi_mem_info.chan_ring_len = gsi_channel_props.ring_len;
+	ep_rx->gsi_mem_info.chan_ring_base_addr =
+				gsi_channel_props.ring_base_addr;
+
+	/* write channel scratch, do we need this? */
+	memset(&ch_scratch, 0, sizeof(ch_scratch));
+	ch_scratch.qdss.bam_p_evt_dest_addr = in->bam_p_evt_dest_addr;
+	ch_scratch.qdss.data_fifo_base_addr = in->data_fifo_base_addr;
+	ch_scratch.qdss.data_fifo_size = in->data_fifo_size;
+	ch_scratch.qdss.bam_p_evt_threshold = in->bam_p_evt_threshold;
+	ch_scratch.qdss.override_eot = in->override_eot;
+	result = gsi_write_channel_scratch(
+				ep_rx->gsi_chan_hdl, ch_scratch);
+	if (result != GSI_STATUS_SUCCESS) {
+		IPA_QDSS_ERR("failed to write channel scratch\n");
+		goto fail_write_scratch;
+	}
+
+	/* query channel db address */
+	if (gsi_query_channel_db_addr(ep_rx->gsi_chan_hdl,
+		&gsi_db_addr_low, &gsi_db_addr_high)) {
+		IPA_QDSS_ERR("failed to query gsi rx db addr\n");
+		goto fail_write_scratch;
+	}
+	out->ipa_rx_db_pa = (phys_addr_t)(gsi_db_addr_low);
+	IPA_QDSS_DBG("QDSS out->ipa_rx_db_pa %llu\n", out->ipa_rx_db_pa);
+
+	/* Configuring HOLB on MHI endpoint */
+	memset(&holb_cfg, 0, sizeof(holb_cfg));
+	holb_cfg.en = IPA_HOLB_TMR_EN;
+	holb_cfg.tmr_val = IPA_HOLB_TMR_VALUE;
+	result = ipa3_force_cfg_ep_holb(ipa_ep_idx_tx, &holb_cfg);
+	if (result)
+		IPA_QDSS_ERR("Configuring HOLB failed client_type =%d\n",
+			IPA_CLIENT_MHI_QDSS_CONS);
+
+	/* Set DMA */
+	IPA_QDSS_DBG("DMA from %d to %d", IPA_CLIENT_QDSS_PROD,
+		IPA_CLIENT_MHI_QDSS_CONS);
+	ep_cfg.mode.mode = IPA_DMA;
+	ep_cfg.mode.dst = IPA_CLIENT_MHI_QDSS_CONS;
+	ep_cfg.seq.set_dynamic = true;
+	if (ipa3_cfg_ep(ipa3_get_ep_mapping(IPA_CLIENT_QDSS_PROD),
+		&ep_cfg)) {
+		IPA_QDSS_ERR("Setting DMA mode failed\n");
+		goto fail_write_scratch;
+	}
+
+	/* Start QDSS_rx gsi channel */
+	result = ipa3_start_gsi_channel(ipa_ep_idx_rx);
+	if (result) {
+		IPA_QDSS_ERR("Failed starting QDSS gsi channel\n");
+		goto fail_write_scratch;
+	}
+
+	IPA_QDSS_DBG("QDSS connect pipe success");
+
+	return IPA_QDSS_SUCCESS;
+
+fail_write_scratch:
+	gsi_dealloc_channel(ep_rx->gsi_chan_hdl);
+	memset(ep_rx, 0, sizeof(struct ipa3_ep_context));
+fail:
+	IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
+	return -IPA_QDSS_PIPE_CONN_FAILURE;
+}
+
+int ipa3_disconn_qdss_pipes(void)
+{
+	int result = 0;
+	int ipa_ep_idx_rx;
+	struct ipa3_ep_context *ep_rx;
+	struct ipa_ep_cfg ep_cfg = { { 0 } };
+
+	ipa_ep_idx_rx = ipa_get_ep_mapping(IPA_CLIENT_QDSS_PROD);
+	if (ipa_ep_idx_rx == -1) {
+		IPA_QDSS_ERR("fail to get ep mapping\n");
+		return -IPA_QDSS_PIPE_DISCONN_FAILURE;
+	}
+
+	if (ipa_ep_idx_rx >= IPA3_MAX_NUM_PIPES) {
+		IPA_QDSS_ERR("ep out of range.\n");
+		return -IPA_QDSS_PIPE_DISCONN_FAILURE;
+	}
+
+	/* Stop QDSS_rx gsi channel / release channel */
+	result = ipa3_stop_gsi_channel(ipa_ep_idx_rx);
+	if (result) {
+		IPA_QDSS_ERR("Failed stopping QDSS gsi channel\n");
+		goto fail;
+	}
+
+	/* Resetting gsi channel */
+	result = ipa3_reset_gsi_channel(ipa_ep_idx_rx);
+	if (result) {
+		IPA_QDSS_ERR("Failed resetting QDSS gsi channel\n");
+		goto fail;
+	}
+
+	/* Reset DMA */
+	IPA_QDSS_ERR("Resetting DMA %d to %d",
+		IPA_CLIENT_QDSS_PROD, IPA_CLIENT_MHI_QDSS_CONS);
+	ep_cfg.mode.mode = IPA_BASIC;
+	ep_cfg.mode.dst = IPA_CLIENT_MHI_QDSS_CONS;
+	ep_cfg.seq.set_dynamic = true;
+	if (ipa3_cfg_ep(ipa3_get_ep_mapping(IPA_CLIENT_QDSS_PROD),
+		&ep_cfg)) {
+		IPAERR("Resetting DMA mode failed\n");
+	}
+
+	/* Deallocating and Clearing ep config */
+	ep_rx = &ipa3_ctx->ep[ipa_ep_idx_rx];
+	gsi_dealloc_channel(ep_rx->gsi_chan_hdl);
+	memset(ep_rx, 0, sizeof(struct ipa3_ep_context));
+
+	IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
+	IPA_QDSS_DBG("QDSS disconnect pipe success");
+
+	return IPA_QDSS_SUCCESS;
+fail:
+	IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
+	return -IPA_QDSS_PIPE_DISCONN_FAILURE;
+}
+
+void ipa3_qdss_register(void)
+{
+	struct ipa_qdss_data funcs;
+
+	funcs.ipa_qdss_conn_pipes = ipa3_conn_qdss_pipes;
+	funcs.ipa_qdss_disconn_pipes = ipa3_disconn_qdss_pipes;
+
+	if(ipa_fmwk_register_ipa_qdss(&funcs))
+		pr_err("failed to register ipa_qdss APIs\n");
+}
+EXPORT_SYMBOL(ipa3_qdss_register);

+ 91 - 5
drivers/platform/msm/ipa/ipa_v3/ipa_utils.c

@@ -2567,6 +2567,12 @@ static const struct ipa_ep_configuration ipa3_ep_mapping
 			IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP,
 			QMB_MASTER_SELECT_PCIE,
 			{ 3, 5, 8, 16, IPA_EE_AP, GSI_SMART_PRE_FETCH, 3 }, IPA_TX_INSTANCE_NA  },
+	[IPA_4_5_MHI][IPA_CLIENT_QDSS_PROD] = {
+			true, IPA_v4_5_MHI_GROUP_QDSS,
+			false,
+			IPA_DPS_HPS_SEQ_TYPE_DMA_ONLY,
+			QMB_MASTER_SELECT_DDR,
+			{ 11, 14, 10, 16, IPA_EE_AP, GSI_ESCAPE_BUF_ONLY, 0 } },
 	/* Only for test purpose */
 	[IPA_4_5_MHI][IPA_CLIENT_TEST_PROD]           = {
 			true, QMB_MASTER_SELECT_DDR,
@@ -2653,6 +2659,12 @@ static const struct ipa_ep_configuration ipa3_ep_mapping
 			IPA_DPS_HPS_SEQ_TYPE_INVALID,
 			QMB_MASTER_SELECT_PCIE,
 			{ 30, 6, 9, 9, IPA_EE_AP, GSI_SMART_PRE_FETCH, 4 }, IPA_TX_INSTANCE_NA },
+	[IPA_4_5_MHI][IPA_CLIENT_MHI_QDSS_CONS] = {
+			true, IPA_v4_5_MHI_GROUP_QDSS,
+			false,
+			IPA_DPS_HPS_SEQ_TYPE_INVALID,
+			QMB_MASTER_SELECT_PCIE,
+			{ 24, 3, 8, 14, IPA_EE_AP, GSI_ESCAPE_BUF_ONLY, 0 } },
 	/* Dummy consumer (pipe 31) is used in L2TP rt rule */
 	[IPA_4_5_MHI][IPA_CLIENT_DUMMY_CONS]          = {
 			true, QMB_MASTER_SELECT_DDR,
@@ -4664,21 +4676,22 @@ const char *ipa_clients_strings[IPA_CLIENT_MAX] = {
 	__stringify(IPA_CLIENT_WIGIG4_CONS),
 	__stringify(RESERVERD_PROD_94),
 	__stringify(IPA_CLIENT_APPS_WAN_COAL_CONS),
-	__stringify(IPA_CLIENT_MHI_PRIME_RMNET_PROD),
-	__stringify(IPA_CLIENT_MHI_PRIME_RMNET_CONS),
 	__stringify(IPA_CLIENT_MHI_PRIME_TETH_PROD),
 	__stringify(IPA_CLIENT_MHI_PRIME_TETH_CONS),
+        __stringify(IPA_CLIENT_MHI_PRIME_RMNET_PROD),
+        __stringify(IPA_CLIENT_MHI_PRIME_RMNET_CONS),
 	__stringify(IPA_CLIENT_MHI_PRIME_DPL_PROD),
 	__stringify(RESERVERD_CONS_101),
 	__stringify(IPA_CLIENT_AQC_ETHERNET_PROD),
 	__stringify(IPA_CLIENT_AQC_ETHERNET_CONS),
 	__stringify(IPA_CLIENT_APPS_WAN_LOW_LAT_PROD),
 	__stringify(IPA_CLIENT_APPS_WAN_LOW_LAT_CONS),
-	__stringify(IPA_CLIENT_QDSS_PROD),
-	__stringify(IPA_CLIENT_MHI_QDSS_CONS),
+        __stringify(IPA_CLIENT_QDSS_PROD),
+        __stringify(IPA_CLIENT_MHI_QDSS_CONS),
+	__stringify(IPA_CLIENT_RTK_ETHERNET_PROD),
+	__stringify(IPA_CLIENT_RTK_ETHERNET_CONS),
 	__stringify(IPA_CLIENT_MHI_LOW_LAT_PROD),
 	__stringify(IPA_CLIENT_MHI_LOW_LAT_CONS),
-	__stringify(RESERVERD_CONS_103),
 	__stringify(IPA_CLIENT_MHI2_PROD),
 	__stringify(IPA_CLIENT_MHI2_CONS),
 	__stringify(IPA_CLIENT_Q6_CV2X_PROD),
@@ -4871,6 +4884,7 @@ bool ipa3_should_pipe_be_suspended(enum ipa_client_type client)
 	    client == IPA_CLIENT_USB_DPL_CONS ||
 	    client == IPA_CLIENT_MHI_CONS     ||
 	    client == IPA_CLIENT_MHI_DPL_CONS ||
+	    client == IPA_CLIENT_MHI_QDSS_CONS ||
 	    client == IPA_CLIENT_HSIC1_CONS   ||
 	    client == IPA_CLIENT_WLAN1_CONS   ||
 	    client == IPA_CLIENT_WLAN2_CONS   ||
@@ -6891,6 +6905,78 @@ success:
 }
 EXPORT_SYMBOL(ipa3_cfg_ep_holb);
 
+/**
+ * ipa3_force_cfg_ep_holb() - IPA end-point holb configuration
+ *			for QDSS_MHI_CONS pipe
+ *
+ * If an IPA producer pipe is full, IPA HW by default will block
+ * indefinitely till space opens up. During this time no packets
+ * including those from unrelated pipes will be processed. Enabling
+ * HOLB means IPA HW will be allowed to drop packets as/when needed
+ * and indefinite blocking is avoided.
+ *
+ * @clnt_hdl:	[in] opaque client handle assigned by IPA to client
+ * @ipa_ep_cfg:	[in] IPA end-point configuration params
+ *
+ * Returns:	0 on success, negative on failure
+ */
+int ipa3_force_cfg_ep_holb(u32 clnt_hdl,
+	struct ipa_ep_cfg_holb *ep_holb)
+{
+	if (clnt_hdl >= ipa3_ctx->ipa_num_pipes ||
+		ep_holb == NULL) {
+		IPAERR("bad parm.\n");
+		return -EINVAL;
+	}
+
+	IPA_ACTIVE_CLIENTS_INC_EP(ipa3_get_client_mapping(clnt_hdl));
+
+	if (ep_holb->en == IPA_HOLB_TMR_DIS) {
+		ipahal_write_reg_n_fields(IPA_ENDP_INIT_HOL_BLOCK_EN_n,
+			clnt_hdl, ep_holb);
+		goto success;
+	}
+
+	/* Follow HPG sequence to DIS_HOLB, Configure Timer, and HOLB_EN */
+	if (ipa3_ctx->ipa_hw_type >= IPA_HW_v4_5) {
+		ep_holb->en = IPA_HOLB_TMR_DIS;
+		ipahal_write_reg_n_fields(IPA_ENDP_INIT_HOL_BLOCK_EN_n,
+			clnt_hdl, ep_holb);
+	}
+
+	if (ipa3_ctx->ipa_hw_type >= IPA_HW_v4_5) {
+		int res;
+
+		res = ipa3_process_timer_cfg(ep_holb->tmr_val * 1000,
+			&ep_holb->pulse_generator,
+			&ep_holb->scaled_time);
+		if (res) {
+			IPAERR("failed to process HOLB timer tmr=%u\n",
+				ep_holb->tmr_val);
+			ipa_assert();
+			return res;
+		}
+	}
+
+	ipahal_write_reg_n_fields(IPA_ENDP_INIT_HOL_BLOCK_TIMER_n,
+		clnt_hdl, ep_holb);
+
+	/* Enable HOLB */
+	ep_holb->en = IPA_HOLB_TMR_EN;
+	ipahal_write_reg_n_fields(IPA_ENDP_INIT_HOL_BLOCK_EN_n,
+		clnt_hdl, ep_holb);
+	/* IPA4.5 issue requires HOLB_EN to be written twice */
+	if (ipa3_ctx->ipa_hw_type >= IPA_HW_v4_5)
+		ipahal_write_reg_n_fields(IPA_ENDP_INIT_HOL_BLOCK_EN_n,
+			clnt_hdl, ep_holb);
+
+success:
+	IPA_ACTIVE_CLIENTS_DEC_EP(ipa3_get_client_mapping(clnt_hdl));
+	IPADBG("cfg holb %u ep=%d tmr=%d\n", ep_holb->en, clnt_hdl,
+		ep_holb->tmr_val);
+	return 0;
+}
+
 /**
  * ipa3_cfg_ep_holb_by_client() - IPA end-point holb configuration
  *