Forráskód Böngészése

dataipa: adding xr ipa-rtp depacketization interface commands

1) changes to establish xr ipa-rtp datapath and control path
   cmd's between IPA AP and IPA uC with input from IPA C2.
2) extend ipa-reg-save support to save all uC channel info used here.

Change-Id: I9b92ea76ad2d56e9d68aa9db043ce64bb4cca93a
Signed-off-by: Prasad Arepalli <[email protected]>
Prasad Arepalli 1 éve
szülő
commit
9ccb596820

+ 1 - 0
define_modules.bzl

@@ -211,6 +211,7 @@ def define_modules(target, variant):
                 True: [
                     "drivers/platform/msm/ipa/ipa_v3/ipa_rtp_genl.h",
                     "drivers/platform/msm/ipa/ipa_v3/ipa_rtp_genl.c",
+                    "drivers/platform/msm/ipa/ipa_v3/ipa_uc_rtp.c",
                 ],
             },
         },

+ 14 - 2
drivers/platform/msm/include/uapi/linux/msm_ipa.h

@@ -1,7 +1,7 @@
 /* SPDX-License-Identifier: GPL-2.0-only WITH Linux-syscall-note */
 /*
  * Copyright (c) 2012-2021, The Linux Foundation. All rights reserved.
- * Copyright (c) 2021-2022, Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2021-2022, 2024 Qualcomm Innovation Center, Inc. All rights reserved.
  */
 
 #ifndef _UAPI_MSM_IPA_H_
@@ -523,9 +523,21 @@ enum ipa_client_type {
 
 	IPA_CLIENT_Q6_DL_NLO_DATA_XLAT_PROD     = 132,
 	IPA_CLIENT_IPSEC_ENCAP_ERR_CONS		= 133,
+
+	/* RESERVED PROD			= 134, */
+	IPA_CLIENT_UC_RTP1_CONS = 135,
+
+	/* RESERVED PROD			= 136, */
+	IPA_CLIENT_UC_RTP2_CONS = 137,
+
+	/* RESERVED PROD			= 138, */
+	IPA_CLIENT_UC_RTP3_CONS = 139,
+
+	/* RESERVED PROD			= 140, */
+	IPA_CLIENT_UC_RTP4_CONS = 141,
 };
 
-#define IPA_CLIENT_MAX (IPA_CLIENT_IPSEC_ENCAP_ERR_CONS + 1)
+#define IPA_CLIENT_MAX (IPA_CLIENT_UC_RTP4_CONS + 1)
 
 #define IPA_CLIENT_WLAN2_PROD IPA_CLIENT_A5_WLAN_AMPDU_PROD
 #define IPA_CLIENT_Q6_DL_NLO_DATA_PROD IPA_CLIENT_Q6_DL_NLO_DATA_PROD

+ 5 - 5
drivers/platform/msm/ipa/ipa_v3/dump/ipa5.5/ipa_hw_common_ex.h

@@ -1,7 +1,7 @@
 // SPDX-License-Identifier: GPL-2.0-only
 /*
  * Copyright (c) 2021, The Linux Foundation. All rights reserved.
- * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2022, 2024 Qualcomm Innovation Center, Inc. All rights reserved.
  */
 #if !defined(_IPA_HW_COMMON_EX_H_)
 #define _IPA_HW_COMMON_EX_H_
@@ -444,12 +444,12 @@ enum ipa_hw_irq_srcs_e {
 /*
  * Total number of channel contexts that need to be saved for APPS
  */
-#define IPA_HW_REG_SAVE_GSI_NUM_CH_CNTXT_A7          25
+#define IPA_HW_REG_SAVE_GSI_NUM_CH_CNTXT_A7          30
 
 /*
  * Total number of channel contexts that need to be saved for UC
  */
-#define IPA_HW_REG_SAVE_GSI_NUM_CH_CNTXT_UC          2
+#define IPA_HW_REG_SAVE_GSI_NUM_CH_CNTXT_UC          7
 
  /*
  * Total number of channel contexts that need to be saved for Q6
@@ -459,12 +459,12 @@ enum ipa_hw_irq_srcs_e {
 /*
  * Total number of event ring contexts that need to be saved for APPS
  */
-#define IPA_HW_REG_SAVE_GSI_NUM_EVT_CNTXT_A7         25
+#define IPA_HW_REG_SAVE_GSI_NUM_EVT_CNTXT_A7         30
 
 /*
  * Total number of event ring contexts that need to be saved for UC
  */
-#define IPA_HW_REG_SAVE_GSI_NUM_EVT_CNTXT_UC         2
+#define IPA_HW_REG_SAVE_GSI_NUM_EVT_CNTXT_UC         7
 
 /*
  * Total number of event ring contexts that need to be saved for Q6

+ 97 - 25
drivers/platform/msm/ipa/ipa_v3/dump/ipa5.5/ipa_reg_dump.h

@@ -1,7 +1,7 @@
 /* SPDX-License-Identifier: GPL-2.0-only */
 /*
  * Copyright (c) 2019-2020, The Linux Foundation. All rights reserved.
- * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2022, 2024 Qualcomm Innovation Center, Inc. All rights reserved.
  */
 #if !defined(_IPA_REG_DUMP_H_)
 #define _IPA_REG_DUMP_H_
@@ -458,43 +458,79 @@ struct map_src_dst_addr_s {
 	{ GEN_2xVECTOR_REG_OFST(reg_name, IPA_HW_A7_EE, 24), \
 		(u32 *)&ipa_reg_save.gsi.ch_cntxt.a7[24].var_name, \
 		GEN_REG_ATTR(reg_name) }, \
-	{ GEN_2xVECTOR_REG_OFST(reg_name, IPA_HW_UC_EE, 0),	\
+	{ GEN_2xVECTOR_REG_OFST(reg_name, IPA_HW_A7_EE, 25), \
+		(u32 *)&ipa_reg_save.gsi.ch_cntxt.a7[25].var_name, \
+		GEN_REG_ATTR(reg_name) }, \
+	{ GEN_2xVECTOR_REG_OFST(reg_name, IPA_HW_A7_EE, 26), \
+		(u32 *)&ipa_reg_save.gsi.ch_cntxt.a7[26].var_name, \
+		GEN_REG_ATTR(reg_name) }, \
+	{ GEN_2xVECTOR_REG_OFST(reg_name, IPA_HW_A7_EE, 27), \
+		(u32 *)&ipa_reg_save.gsi.ch_cntxt.a7[27].var_name, \
+		GEN_REG_ATTR(reg_name) }, \
+	{ GEN_2xVECTOR_REG_OFST(reg_name, IPA_HW_A7_EE, 28), \
+		(u32 *)&ipa_reg_save.gsi.ch_cntxt.a7[28].var_name, \
+		GEN_REG_ATTR(reg_name) }, \
+	{ GEN_2xVECTOR_REG_OFST(reg_name, IPA_HW_A7_EE, 29), \
+		(u32 *)&ipa_reg_save.gsi.ch_cntxt.a7[29].var_name, \
+		GEN_REG_ATTR(reg_name) }, \
+	{ GEN_2xVECTOR_REG_OFST(reg_name, IPA_HW_A7_EE, 30), \
+		(u32 *)&ipa_reg_save.gsi.ch_cntxt.a7[30].var_name, \
+		GEN_REG_ATTR(reg_name) }, \
+	{ GEN_2xVECTOR_REG_OFST(reg_name, IPA_HW_UC_EE, 0), \
 		(u32 *)&ipa_reg_save.gsi.ch_cntxt.uc[0].var_name, \
 		GEN_REG_ATTR(reg_name) }, \
-	{ GEN_2xVECTOR_REG_OFST(reg_name, IPA_HW_UC_EE, 3), \
+	{ GEN_2xVECTOR_REG_OFST(reg_name, IPA_HW_UC_EE, 1), \
 		(u32 *)&ipa_reg_save.gsi.ch_cntxt.uc[1].var_name, \
 		GEN_REG_ATTR(reg_name) }, \
-	{ GEN_2xVECTOR_REG_OFST(reg_name, IPA_HW_Q6_EE, 0),	\
+	{ GEN_2xVECTOR_REG_OFST(reg_name, IPA_HW_UC_EE, 2), \
+		(u32 *)&ipa_reg_save.gsi.ch_cntxt.uc[2].var_name, \
+		GEN_REG_ATTR(reg_name) }, \
+	{ GEN_2xVECTOR_REG_OFST(reg_name, IPA_HW_UC_EE, 3), \
+		(u32 *)&ipa_reg_save.gsi.ch_cntxt.uc[3].var_name, \
+		GEN_REG_ATTR(reg_name) }, \
+	{ GEN_2xVECTOR_REG_OFST(reg_name, IPA_HW_UC_EE, 4), \
+		(u32 *)&ipa_reg_save.gsi.ch_cntxt.uc[4].var_name, \
+		GEN_REG_ATTR(reg_name) }, \
+	{ GEN_2xVECTOR_REG_OFST(reg_name, IPA_HW_UC_EE, 5), \
+		(u32 *)&ipa_reg_save.gsi.ch_cntxt.uc[5].var_name, \
+		GEN_REG_ATTR(reg_name) }, \
+	{ GEN_2xVECTOR_REG_OFST(reg_name, IPA_HW_UC_EE, 6), \
+		(u32 *)&ipa_reg_save.gsi.ch_cntxt.uc[6].var_name, \
+		GEN_REG_ATTR(reg_name) }, \
+	{ GEN_2xVECTOR_REG_OFST(reg_name, IPA_HW_UC_EE, 7), \
+		(u32 *)&ipa_reg_save.gsi.ch_cntxt.uc[7].var_name, \
+		GEN_REG_ATTR(reg_name) }, \
+	{ GEN_2xVECTOR_REG_OFST(reg_name, IPA_HW_Q6_EE, 0), \
 		(u32 *)&ipa_reg_save.gsi.ch_cntxt.q6[0].var_name, \
 		GEN_REG_ATTR(reg_name) }, \
-	{ GEN_2xVECTOR_REG_OFST(reg_name, IPA_HW_Q6_EE, 1),	\
+	{ GEN_2xVECTOR_REG_OFST(reg_name, IPA_HW_Q6_EE, 1), \
 		(u32 *)&ipa_reg_save.gsi.ch_cntxt.q6[1].var_name, \
 		GEN_REG_ATTR(reg_name) }, \
-	{ GEN_2xVECTOR_REG_OFST(reg_name, IPA_HW_Q6_EE, 2),	\
+	{ GEN_2xVECTOR_REG_OFST(reg_name, IPA_HW_Q6_EE, 2), \
 		(u32 *)&ipa_reg_save.gsi.ch_cntxt.q6[2].var_name, \
 		GEN_REG_ATTR(reg_name) }, \
-	{ GEN_2xVECTOR_REG_OFST(reg_name, IPA_HW_Q6_EE, 3),	\
+	{ GEN_2xVECTOR_REG_OFST(reg_name, IPA_HW_Q6_EE, 3), \
 		(u32 *)&ipa_reg_save.gsi.ch_cntxt.q6[3].var_name, \
 		GEN_REG_ATTR(reg_name) }, \
-	{ GEN_2xVECTOR_REG_OFST(reg_name, IPA_HW_Q6_EE, 4),	\
+	{ GEN_2xVECTOR_REG_OFST(reg_name, IPA_HW_Q6_EE, 4), \
 		(u32 *)&ipa_reg_save.gsi.ch_cntxt.q6[4].var_name, \
 		GEN_REG_ATTR(reg_name) }, \
-	{ GEN_2xVECTOR_REG_OFST(reg_name, IPA_HW_Q6_EE, 5),	\
+	{ GEN_2xVECTOR_REG_OFST(reg_name, IPA_HW_Q6_EE, 5), \
 		(u32 *)&ipa_reg_save.gsi.ch_cntxt.q6[5].var_name, \
 		GEN_REG_ATTR(reg_name) }, \
-	{ GEN_2xVECTOR_REG_OFST(reg_name, IPA_HW_Q6_EE, 6),	\
+	{ GEN_2xVECTOR_REG_OFST(reg_name, IPA_HW_Q6_EE, 6), \
 		(u32 *)&ipa_reg_save.gsi.ch_cntxt.q6[6].var_name, \
 		GEN_REG_ATTR(reg_name) }, \
-	{ GEN_2xVECTOR_REG_OFST(reg_name, IPA_HW_Q6_EE, 7),	\
+	{ GEN_2xVECTOR_REG_OFST(reg_name, IPA_HW_Q6_EE, 7), \
 		(u32 *)&ipa_reg_save.gsi.ch_cntxt.q6[7].var_name, \
 		GEN_REG_ATTR(reg_name) }, \
-	{ GEN_2xVECTOR_REG_OFST(reg_name, IPA_HW_Q6_EE, 8),	\
+	{ GEN_2xVECTOR_REG_OFST(reg_name, IPA_HW_Q6_EE, 8), \
 		(u32 *)&ipa_reg_save.gsi.ch_cntxt.q6[8].var_name, \
 		GEN_REG_ATTR(reg_name) }, \
-	{ GEN_2xVECTOR_REG_OFST(reg_name, IPA_HW_Q6_EE, 9),	\
+	{ GEN_2xVECTOR_REG_OFST(reg_name, IPA_HW_Q6_EE, 9), \
 		(u32 *)&ipa_reg_save.gsi.ch_cntxt.q6[9].var_name, \
 		GEN_REG_ATTR(reg_name) }, \
-	{ GEN_2xVECTOR_REG_OFST(reg_name, IPA_HW_Q6_EE, 10),	\
+	{ GEN_2xVECTOR_REG_OFST(reg_name, IPA_HW_Q6_EE, 10), \
 		(u32 *)&ipa_reg_save.gsi.ch_cntxt.q6[10].var_name, \
 		GEN_REG_ATTR(reg_name) }
 
@@ -634,40 +670,76 @@ struct map_src_dst_addr_s {
 	{ GEN_2xVECTOR_REG_OFST(reg_name, IPA_HW_A7_EE, 24), \
 		(u32 *)&ipa_reg_save.gsi.evt_cntxt.a7[24].var_name, \
 		GEN_REG_ATTR(reg_name) }, \
-	{ GEN_2xVECTOR_REG_OFST(reg_name, IPA_REG_SAVE_HWP_GSI_EE, 1), \
+	{ GEN_2xVECTOR_REG_OFST(reg_name, IPA_HW_A7_EE, 25), \
+		(u32 *)&ipa_reg_save.gsi.evt_cntxt.a7[25].var_name, \
+		GEN_REG_ATTR(reg_name) }, \
+	{ GEN_2xVECTOR_REG_OFST(reg_name, IPA_HW_A7_EE, 26), \
+		(u32 *)&ipa_reg_save.gsi.evt_cntxt.a7[27].var_name, \
+		GEN_REG_ATTR(reg_name) }, \
+	{ GEN_2xVECTOR_REG_OFST(reg_name, IPA_HW_A7_EE, 28), \
+		(u32 *)&ipa_reg_save.gsi.evt_cntxt.a7[28].var_name, \
+		GEN_REG_ATTR(reg_name) }, \
+	{ GEN_2xVECTOR_REG_OFST(reg_name, IPA_HW_A7_EE, 29), \
+		(u32 *)&ipa_reg_save.gsi.evt_cntxt.a7[29].var_name, \
+		GEN_REG_ATTR(reg_name) }, \
+	{ GEN_2xVECTOR_REG_OFST(reg_name, IPA_HW_A7_EE, 30), \
+		(u32 *)&ipa_reg_save.gsi.evt_cntxt.a7[30].var_name, \
+		GEN_REG_ATTR(reg_name) }, \
+	{ GEN_2xVECTOR_REG_OFST(reg_name, IPA_HW_UC_EE, 0), \
 		(u32 *)&ipa_reg_save.gsi.evt_cntxt.uc[0].var_name, \
 		GEN_REG_ATTR(reg_name) }, \
+	{ GEN_2xVECTOR_REG_OFST(reg_name, IPA_HW_UC_EE, 1), \
+		(u32 *)&ipa_reg_save.gsi.evt_cntxt.uc[1].var_name, \
+		GEN_REG_ATTR(reg_name) }, \
+	{ GEN_2xVECTOR_REG_OFST(reg_name, IPA_HW_UC_EE, 2), \
+		(u32 *)&ipa_reg_save.gsi.evt_cntxt.uc[2].var_name, \
+		GEN_REG_ATTR(reg_name) }, \
+	{ GEN_2xVECTOR_REG_OFST(reg_name, IPA_HW_UC_EE, 3), \
+		(u32 *)&ipa_reg_save.gsi.evt_cntxt.uc[3].var_name, \
+		GEN_REG_ATTR(reg_name) }, \
+	{ GEN_2xVECTOR_REG_OFST(reg_name, IPA_HW_UC_EE, 4), \
+		(u32 *)&ipa_reg_save.gsi.evt_cntxt.uc[4].var_name, \
+		GEN_REG_ATTR(reg_name) }, \
+	{ GEN_2xVECTOR_REG_OFST(reg_name, IPA_HW_UC_EE, 5), \
+		(u32 *)&ipa_reg_save.gsi.evt_cntxt.uc[5].var_name, \
+		GEN_REG_ATTR(reg_name) }, \
+	{ GEN_2xVECTOR_REG_OFST(reg_name, IPA_HW_UC_EE, 6), \
+		(u32 *)&ipa_reg_save.gsi.evt_cntxt.uc[6].var_name, \
+		GEN_REG_ATTR(reg_name) }, \
+	{ GEN_2xVECTOR_REG_OFST(reg_name, IPA_HW_UC_EE, 7), \
+		(u32 *)&ipa_reg_save.gsi.evt_cntxt.uc[7].var_name, \
+		GEN_REG_ATTR(reg_name) }, \
 	{ GEN_2xVECTOR_REG_OFST(reg_name, IPA_HW_Q6_EE, 0),	\
 		(u32 *)&ipa_reg_save.gsi.evt_cntxt.q6[0].var_name, \
 		GEN_REG_ATTR(reg_name) }, \
-	{ GEN_2xVECTOR_REG_OFST(reg_name, IPA_HW_Q6_EE, 1),	\
+	{ GEN_2xVECTOR_REG_OFST(reg_name, IPA_HW_Q6_EE, 1), \
 		(u32 *)&ipa_reg_save.gsi.evt_cntxt.q6[1].var_name, \
 		GEN_REG_ATTR(reg_name) }, \
-	{ GEN_2xVECTOR_REG_OFST(reg_name, IPA_HW_Q6_EE, 2),	\
+	{ GEN_2xVECTOR_REG_OFST(reg_name, IPA_HW_Q6_EE, 2), \
 		(u32 *)&ipa_reg_save.gsi.evt_cntxt.q6[2].var_name, \
 		GEN_REG_ATTR(reg_name) }, \
-	{ GEN_2xVECTOR_REG_OFST(reg_name, IPA_HW_Q6_EE, 3),	\
+	{ GEN_2xVECTOR_REG_OFST(reg_name, IPA_HW_Q6_EE, 3), \
 		(u32 *)&ipa_reg_save.gsi.evt_cntxt.q6[3].var_name, \
 		GEN_REG_ATTR(reg_name) }, \
-	{ GEN_2xVECTOR_REG_OFST(reg_name, IPA_HW_Q6_EE, 4),	\
+	{ GEN_2xVECTOR_REG_OFST(reg_name, IPA_HW_Q6_EE, 4), \
 		(u32 *)&ipa_reg_save.gsi.evt_cntxt.q6[4].var_name, \
 		GEN_REG_ATTR(reg_name) }, \
-	{ GEN_2xVECTOR_REG_OFST(reg_name, IPA_HW_Q6_EE, 5),	\
+	{ GEN_2xVECTOR_REG_OFST(reg_name, IPA_HW_Q6_EE, 5), \
 		(u32 *)&ipa_reg_save.gsi.evt_cntxt.q6[5].var_name, \
 		GEN_REG_ATTR(reg_name) }, \
-	{ GEN_2xVECTOR_REG_OFST(reg_name, IPA_HW_Q6_EE, 6),	\
+	{ GEN_2xVECTOR_REG_OFST(reg_name, IPA_HW_Q6_EE, 6), \
 		(u32 *)&ipa_reg_save.gsi.evt_cntxt.q6[6].var_name, \
 		GEN_REG_ATTR(reg_name) }, \
-	{ GEN_2xVECTOR_REG_OFST(reg_name, IPA_HW_Q6_EE, 7),	\
+	{ GEN_2xVECTOR_REG_OFST(reg_name, IPA_HW_Q6_EE, 7), \
 		(u32 *)&ipa_reg_save.gsi.evt_cntxt.q6[7].var_name, \
 		GEN_REG_ATTR(reg_name) }, \
-	{ GEN_2xVECTOR_REG_OFST(reg_name, IPA_HW_Q6_EE, 8),	\
+	{ GEN_2xVECTOR_REG_OFST(reg_name, IPA_HW_Q6_EE, 8), \
 		(u32 *)&ipa_reg_save.gsi.evt_cntxt.q6[8].var_name, \
 		GEN_REG_ATTR(reg_name) }, \
-	{ GEN_2xVECTOR_REG_OFST(reg_name, IPA_HW_Q6_EE, 9),	\
+	{ GEN_2xVECTOR_REG_OFST(reg_name, IPA_HW_Q6_EE, 9), \
 		(u32 *)&ipa_reg_save.gsi.evt_cntxt.q6[9].var_name, \
 		GEN_REG_ATTR(reg_name) }, \
-	{ GEN_2xVECTOR_REG_OFST(reg_name, IPA_HW_Q6_EE, 10),	\
+	{ GEN_2xVECTOR_REG_OFST(reg_name, IPA_HW_Q6_EE, 10), \
 		(u32 *)&ipa_reg_save.gsi.evt_cntxt.q6[10].var_name, \
 		GEN_REG_ATTR(reg_name) }
 

+ 67 - 0
drivers/platform/msm/ipa/ipa_v3/ipa.c

@@ -8270,6 +8270,37 @@ static int ipa3_post_init(const struct ipa3_plat_drv_res *resource_p,
 	if(!ipa_tlpd_stats_init())
 		IPADBG("Fail to init tlpd ipa lnx module");
 
+#ifdef CONFIG_IPA_RTP
+	if (ipa3_ctx->platform_type == IPA_PLAT_TYPE_XR) {
+		/* uC is getting loaded through XBL here */
+		ipa3_ctx->uc_ctx.uc_inited = true;
+		ipa3_ctx->uc_ctx.uc_loaded = true;
+		result = ipa3_alloc_temp_buffs_to_uc(TEMP_BUFF_SIZE, NO_OF_BUFFS);
+		if (result) {
+			IPAERR("Temp buffer allocations for uC failed %d\n", result);
+			result = -ENODEV;
+			goto fail_teth_bridge_driver_init;
+		}
+
+		result = ipa3_allocate_uc_pipes_er_tr_send_to_uc();
+		if (result) {
+			IPAERR("ER and TR allocations for uC pipes failed %d\n", result);
+			ipa3_free_uc_temp_buffs(NO_OF_BUFFS);
+			result = -ENODEV;
+			goto fail_teth_bridge_driver_init;
+		}
+
+		result = ipa3_create_hfi_send_uc();
+		if (result) {
+			IPAERR("HFI Creation failed %d\n", result);
+			ipa3_free_uc_temp_buffs(NO_OF_BUFFS);
+			ipa3_free_uc_pipes_er_tr();
+			result = -ENODEV;
+			goto fail_teth_bridge_driver_init;
+		}
+	}
+#endif
+
 	pr_info("IPA driver initialization was successful.\n");
 #if IS_ENABLED(CONFIG_QCOM_VA_MINIDUMP)
 	/*Adding ipa3_ctx pointer to minidump list*/
@@ -11093,6 +11124,15 @@ static int ipa_smmu_uc_cb_probe(struct device *dev)
 	int bypass = 0;
 	int fast = 0;
 	u32 iova_ap_mapping[2];
+	u32 iova = 0;
+	u32 pa = 0;
+	u32 size = 0;
+	unsigned long iova_p;
+	phys_addr_t pa_p;
+	u32 size_p;
+	u32 add_map_size;
+	const u32 *add_map;
+	int i = 0;
 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 13, 0))
 	int mapping_config;
 #endif
@@ -11177,6 +11217,33 @@ static int ipa_smmu_uc_cb_probe(struct device *dev)
 	ipa3_ctx->s1_bypass_arr[IPA_SMMU_CB_UC] = (bypass != 0);
 
 	ipa3_ctx->uc_pdev = dev;
+
+	add_map = of_get_property(dev->of_node,
+		"qcom,ipcc-mapping", &add_map_size);
+	if (add_map) {
+		/* mapping size is an array of 3-tuple of u32 */
+		if (add_map_size % (3 * sizeof(u32))) {
+			IPAERR("wrong ipcc mapping format\n");
+			cb->valid = false;
+			return -EFAULT;
+		}
+
+		/* iterate of each entry of the ipcc mapping array */
+		for (i = 0; i < add_map_size / sizeof(u32); i += 3) {
+			iova = be32_to_cpu(add_map[i]);
+			pa = be32_to_cpu(add_map[i + 1]);
+			size = be32_to_cpu(add_map[i + 2]);
+
+			IPA_SMMU_ROUND_TO_PAGE(iova, pa, size,
+				iova_p, pa_p, size_p);
+			IPADBG_LOW("mapping 0x%lx to 0x%pa size %d\n",
+				iova_p, &pa_p, size_p);
+			ipa3_iommu_map(cb->iommu_domain,
+				iova_p, pa_p, size_p,
+				IOMMU_READ | IOMMU_WRITE | IOMMU_MMIO);
+		}
+	}
+
 	cb->done = true;
 	return 0;
 }

+ 20 - 0
drivers/platform/msm/ipa/ipa_v3/ipa_i.h

@@ -46,6 +46,7 @@
 #ifdef CONFIG_IPA_RTP
 #include "ipa_rtp_genl.h"
 #endif
+#include <linux/dma-buf.h>
 
 #define IPA_DEV_NAME_MAX_LEN 15
 #define DRV_NAME "ipa"
@@ -573,6 +574,11 @@ enum {
 
 #define IPA_RULE_CNT_MAX 512
 
+/* XR-IPA uC temp buffers sizes */
+#define TEMP_BUFF_SIZE	0x300000
+/* XR-IPA uC no. of temp buffers */
+#define NO_OF_BUFFS	0x04
+
 /* miscellaneous for rmnet_ipa and qmi_service */
 enum ipa_type_mode {
 	IPA_HW_TYPE,
@@ -3831,4 +3837,18 @@ int ipa3_update_apps_per_stats(enum ipa_per_stats_type_e stats_type, uint32_t da
 int ipa3_update_client_holb_per_stats(enum ipa_per_stats_type_e stats_type, uint32_t data);
 int ipa3_update_dma_per_stats(enum ipa_per_stats_type_e stats_type, uint32_t data);
 
+/* XR-IPA API's */
+#ifdef CONFIG_IPA_RTP
+int ipa3_uc_send_tuple_info_cmd(struct traffic_tuple_info *data);
+int ipa3_alloc_temp_buffs_to_uc(unsigned int size, unsigned int no_of_buffs);
+int ipa3_map_buff_to_device_addr(struct map_buffer *map_buffs);
+int ipa3_unmap_buff_from_device_addr(struct unmap_buffer *unmap_buffs);
+int ipa3_send_bitstream_buff_info(struct bitstream_buffers *data);
+int ipa3_tuple_info_cmd_to_wlan_uc(struct traffic_tuple_info *req, u32 stream_id);
+int ipa3_uc_send_remove_stream_cmd(struct remove_bitstream_buffers *data);
+int ipa3_create_hfi_send_uc(void);
+int ipa3_allocate_uc_pipes_er_tr_send_to_uc(void);
+void ipa3_free_uc_temp_buffs(unsigned int no_of_buffs);
+void ipa3_free_uc_pipes_er_tr(void);
+#endif
 #endif /* _IPA3_I_H_ */

+ 3 - 0
drivers/platform/msm/ipa/ipa_v3/ipa_uc_offload_i.h

@@ -1,6 +1,8 @@
 /* SPDX-License-Identifier: GPL-2.0-only */
 /*
  * Copyright (c) 2016-2021, The Linux Foundation. All rights reserved.
+ *
+ * Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved.
  */
 
 #ifndef _IPA_UC_OFFLOAD_I_H_
@@ -69,6 +71,7 @@ enum ipa3_hw_features {
 	IPA_HW_FEATURE_ZIP		=	0x4,
 	IPA_HW_FEATURE_NTN		=	0x5,
 	IPA_HW_FEATURE_OFFLOAD		=	0x6,
+	IPA_HW_FEATURE_RTP		=	0x8,
 	IPA_HW_FEATURE_MAX		=	IPA_HW_NUM_FEATURES
 };
 

+ 1021 - 0
drivers/platform/msm/ipa/ipa_v3/ipa_uc_rtp.c

@@ -0,0 +1,1021 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#include "ipa_i.h"
+#include <linux/delay.h>
+#include <linux/soc/qcom/msm_hw_fence.h>
+#include <synx_api.h>
+
+/* ER ==> (16B * 512 entries ) * 4 frames = 8k *4 = 32k */
+#define IPA_UC_PROD_EVENT_RING_SIZE 512
+/* TR ==> (16B *512 entries per frame * 6 frames) * 4 prodpipes=48k *4 = 192k */
+#define IPA_UC_PROD_TRANSFER_RING_SIZE (512 * 3)
+/* TR==> 1024B  * 8B TRE * 2 pipes */
+#define IPA_UC_CON_TRANSFER_RING_SIZE  1024
+
+#define MAX_NUMBER_OF_STREAMS 4
+#define MAX_NUMBER_OF_PARTITIONS MAX_NUMBER_OF_STREAMS
+
+#define MAX_UC_PROD_PIPES 4
+#define MAX_UC_CONS_PIPES 2
+
+#define MAX_UC_PROD_PIPES_TR_INDEX MAX_UC_PROD_PIPES
+#define MAX_UC_PROD_PIPES_ER_INDEX (MAX_UC_PROD_PIPES_TR_INDEX + MAX_UC_PROD_PIPES)
+#define MAX_UC_CONS_PIPES_TR_INDEX (MAX_UC_PROD_PIPES_ER_INDEX + MAX_UC_CONS_PIPES)
+
+#define ER_TR_UC_BUFFS (MAX_UC_PROD_PIPES + MAX_UC_PROD_PIPES + MAX_UC_CONS_PIPES)
+
+#define MAX_SYNX_FENCE_SESSION_NAME  64
+#define DMA_DIR DMA_BIDIRECTIONAL
+
+#define GSI_TRE_RE_XFER 2
+#define TRE_SIZE 2048
+
+MODULE_IMPORT_NS(DMA_BUF);
+
+enum ipa3_cpu_2_hw_rtp_commands {
+	IPA_CPU_2_HW_CMD_RTP_TUPLE_INFO             =
+		FEATURE_ENUM_VAL(IPA_HW_FEATURE_RTP, 0),
+	IPA_CPU_2_HW_CMD_RTP_ADD_TEMP_BUFF_INFO     =
+		FEATURE_ENUM_VAL(IPA_HW_FEATURE_RTP, 1),
+	IPA_CPU_2_HW_CMD_RTP_ADD_BIT_STREAM_BUFF    =
+		FEATURE_ENUM_VAL(IPA_HW_FEATURE_RTP, 2),
+	IPA_CPU_2_HW_CMD_RTP_GET_HFI_STRUCT         =
+		FEATURE_ENUM_VAL(IPA_HW_FEATURE_RTP, 3),
+	IPA_CPU_2_HW_CMD_RTP_START_STREAM           =
+		FEATURE_ENUM_VAL(IPA_HW_FEATURE_RTP, 4),
+	IPA_CPU_2_HW_CMD_RTP_STOP_STREAM            =
+		FEATURE_ENUM_VAL(IPA_HW_FEATURE_RTP, 5),
+	IPA_CPU_2_HW_CMD_RTP_TEAR_DOWN_STREAM       =
+		FEATURE_ENUM_VAL(IPA_HW_FEATURE_RTP, 6),
+	IPA_CPU_2_HW_CMD_RTP_UPDATE_STREAM_INFO     =
+		FEATURE_ENUM_VAL(IPA_HW_FEATURE_RTP, 7),
+	IPA_CPU_2_HW_CMD_RTP_SIGNAL_FENCE           =
+		FEATURE_ENUM_VAL(IPA_HW_FEATURE_RTP, 8),
+	IPA_CPU_2_HW_CMD_RTP_PIPE_SETUP             =
+		FEATURE_ENUM_VAL(IPA_HW_FEATURE_RTP, 10),
+	IPA_CPU_2_HW_CMD_RTP_REMOVE_STREAM          =
+		FEATURE_ENUM_VAL(IPA_HW_FEATURE_RTP, 11),
+};
+
+struct bitstream_buffer_info_to_uc {
+	uint8_t stream_id;
+	uint16_t fence_id;
+	uint8_t reserved;
+	u64 buff_addr;
+	u32 buff_fd;
+	u32 buff_size;
+	u64 meta_buff_addr;
+	u32 meta_buff_fd;
+	u32 meta_buff_size;
+} __packed;
+
+struct bitstream_buffers_to_uc {
+	uint16_t buff_cnt;
+	uint16_t cookie;
+	struct bitstream_buffer_info_to_uc bs_info[MAX_BUFF];
+} __packed;
+
+struct dma_address_map_table {
+	struct dma_buf *dma_buf_list[2];
+	struct dma_buf_attachment *attachment[2];
+	struct sg_table *sgt[2];
+};
+
+/* Bitstream and meta buffer dma addresses list */
+struct list_node {
+	struct list_head list_obj;
+	struct dma_address_map_table *data;
+};
+
+struct prod_pipe_tre {
+	uint64_t buffer_ptr;
+	uint16_t buf_len;
+	uint16_t resvd1;
+	uint16_t chain:1;
+	uint16_t resvd4:7;
+	uint16_t ieob:1;
+	uint16_t ieot:1;
+	uint16_t bei:1;
+	uint16_t resvd3:5;
+	uint8_t re_type;
+	uint8_t resvd2;
+} __packed;
+
+struct con_pipe_tre {
+	uint64_t buffer_ptr;
+	uint16_t buf_len;
+	uint16_t resvd1;
+	uint16_t chain:1;
+	uint16_t resvd4:7;
+	uint16_t ieob:1;
+	uint16_t ieot:1;
+	uint16_t bei:1;
+	uint16_t resvd3:5;
+	uint8_t re_type;
+	uint8_t resvd2;
+} __packed;
+
+struct temp_buff_info {
+	uint64_t temp_buff_pa;
+	uint32_t temp_buff_size;
+} __packed;
+
+struct rtp_pipe_setup_cmd_data {
+	struct temp_buff_info uc_prod_tr[MAX_UC_PROD_PIPES];
+	struct temp_buff_info uc_prod_er[MAX_UC_PROD_PIPES];
+	struct temp_buff_info uc_cons_tr[MAX_UC_CONS_PIPES];
+} __packed;
+
+struct hfi_queue_info {
+	u64 hfi_queue_addr;
+	u32 hfi_queue_size;
+	u64 queue_header_start_addr;
+	u64 queue_payload_start_addr;
+} __packed;
+
+struct temp_buffer_info {
+	uint64_t temp_buff_pa;
+	uint32_t temp_buff_size;
+} __packed;
+
+struct uc_temp_buffer_info {
+	uint16_t number_of_partitions;
+	struct temp_buffer_info buffer_info[MAX_NUMBER_OF_PARTITIONS];
+} __packed;
+
+struct er_tr_to_free {
+	void *cpu_address[ER_TR_UC_BUFFS];
+	struct rtp_pipe_setup_cmd_data rtp_tr_er;
+	uint16_t no_buffs;
+} __packed;
+
+struct er_tr_to_free er_tr_cpu_addresses;
+void *cpu_address[NO_OF_BUFFS];
+struct uc_temp_buffer_info tb_info;
+struct list_head mapped_bs_buff_lst[MAX_NUMBER_OF_STREAMS];
+
+int ipa3_uc_send_tuple_info_cmd(struct traffic_tuple_info *data)
+{
+	int result = 0;
+	struct ipa_mem_buffer cmd;
+	struct traffic_tuple_info *cmd_data;
+
+	if (!data) {
+		IPAERR("Invalid params.\n");
+		return -EINVAL;
+	}
+
+	cmd.size = sizeof(*cmd_data);
+	cmd.base = dma_alloc_coherent(ipa3_ctx->uc_pdev, cmd.size,
+			&cmd.phys_base, GFP_KERNEL);
+	if (cmd.base == NULL) {
+		IPAERR("failed to alloc DMA memory.\n");
+		return -ENOMEM;
+	}
+
+	cmd_data = (struct traffic_tuple_info *)cmd.base;
+	cmd_data->ts_info.no_of_openframe = data->ts_info.no_of_openframe;
+	cmd_data->ts_info.max_pkt_frame = data->ts_info.max_pkt_frame;
+	cmd_data->ts_info.stream_type = data->ts_info.stream_type;
+	cmd_data->ts_info.reorder_timeout = data->ts_info.reorder_timeout;
+	cmd_data->ts_info.num_slices_per_frame = data->ts_info.num_slices_per_frame;
+	cmd_data->ip_type = data->ip_type;
+	if (cmd_data->ip_type) {
+		cmd_data->ip_info.ipv6.src_port_number = data->ip_info.ipv6.src_port_number;
+		cmd_data->ip_info.ipv6.dst_port_number = data->ip_info.ipv6.dst_port_number;
+		memcpy(cmd_data->ip_info.ipv6.src_ip, data->ip_info.ipv6.src_ip, 16);
+		memcpy(cmd_data->ip_info.ipv6.dst_ip, data->ip_info.ipv6.dst_ip, 16);
+		cmd_data->ip_info.ipv6.protocol = data->ip_info.ipv6.protocol;
+	} else {
+		cmd_data->ip_info.ipv4.src_port_number = data->ip_info.ipv4.src_port_number;
+		cmd_data->ip_info.ipv4.dst_port_number = data->ip_info.ipv4.dst_port_number;
+		cmd_data->ip_info.ipv4.src_ip = data->ip_info.ipv4.src_ip;
+		cmd_data->ip_info.ipv4.dst_ip = data->ip_info.ipv4.dst_ip;
+		cmd_data->ip_info.ipv4.protocol = data->ip_info.ipv4.protocol;
+	}
+
+	result = ipa3_uc_send_cmd((u32)(cmd.phys_base),
+				IPA_CPU_2_HW_CMD_RTP_TUPLE_INFO,
+				0,
+				false, 10*HZ);
+	if (result) {
+		IPAERR("uc send tuple info cmd failed\n");
+		result = -EPERM;
+	}
+
+	dma_free_coherent(ipa3_ctx->uc_pdev, cmd.size, cmd.base, cmd.phys_base);
+	return result;
+}
+
+int ipa3_tuple_info_cmd_to_wlan_uc(struct traffic_tuple_info *req, u32 stream_id)
+{
+	int result = 0;
+	struct ipa_wdi_opt_dpath_flt_add_cb_params flt_add_req;
+
+	if (!req) {
+		IPAERR("Invalid params.\n");
+		return -EINVAL;
+	}
+
+	if (!ipa3_ctx->ipa_xr_wdi_flt_rsv_status) {
+		result = ipa_xr_wdi_opt_dpath_rsrv_filter_req();
+		ipa3_ctx->ipa_xr_wdi_flt_rsv_status = !result;
+		if (result) {
+			IPAERR("filter reservation failed at WLAN %d\n", result);
+			return result;
+		}
+	}
+
+	memset(&flt_add_req, 0, sizeof(struct ipa_wdi_opt_dpath_flt_add_cb_params));
+	flt_add_req.num_tuples = 1;
+	flt_add_req.flt_info[0].version = req->ip_type;
+	if (!flt_add_req.flt_info[0].version) {
+		flt_add_req.flt_info[0].ipv4_addr.ipv4_saddr = req->ip_info.ipv4.src_ip;
+		flt_add_req.flt_info[0].ipv4_addr.ipv4_daddr = req->ip_info.ipv4.dst_ip;
+		flt_add_req.flt_info[0].protocol = req->ip_info.ipv4.protocol;
+		flt_add_req.flt_info[0].sport = req->ip_info.ipv4.src_port_number;
+		flt_add_req.flt_info[0].dport = req->ip_info.ipv4.dst_port_number;
+		IPADBG("IPv4 saddr:0x%x, daddr:0x%x\n",
+			flt_add_req.flt_info[0].ipv4_addr.ipv4_saddr,
+			flt_add_req.flt_info[0].ipv4_addr.ipv4_daddr);
+	} else {
+		memcpy(flt_add_req.flt_info[0].ipv6_addr.ipv6_saddr,
+			req->ip_info.ipv6.src_ip,
+			sizeof(req->ip_info.ipv6.src_ip));
+		memcpy(flt_add_req.flt_info[0].ipv6_addr.ipv6_daddr,
+			req->ip_info.ipv6.dst_ip,
+			sizeof(req->ip_info.ipv6.dst_ip));
+		flt_add_req.flt_info[0].protocol = req->ip_info.ipv6.protocol;
+		flt_add_req.flt_info[0].sport = req->ip_info.ipv6.src_port_number;
+		flt_add_req.flt_info[0].dport = req->ip_info.ipv6.dst_port_number;
+		IPADBG("IPv6 saddr:0x%x:%x:%x:%x, daddr:0x%x:%x:%x:%x\n",
+			flt_add_req.flt_info[0].ipv6_addr.ipv6_saddr[0],
+			flt_add_req.flt_info[0].ipv6_addr.ipv6_saddr[1],
+			flt_add_req.flt_info[0].ipv6_addr.ipv6_saddr[2],
+			flt_add_req.flt_info[0].ipv6_addr.ipv6_saddr[3],
+			flt_add_req.flt_info[0].ipv6_addr.ipv6_daddr[0],
+			flt_add_req.flt_info[0].ipv6_addr.ipv6_daddr[1],
+			flt_add_req.flt_info[0].ipv6_addr.ipv6_daddr[2],
+			flt_add_req.flt_info[0].ipv6_addr.ipv6_daddr[3]);
+	}
+
+	result = ipa_xr_wdi_opt_dpath_add_filter_req(&flt_add_req, stream_id);
+	if (result) {
+		IPAERR("Fail to send tuple info cmd to wlan\n");
+		return -EPERM;
+	}
+
+	result = ipa3_uc_send_tuple_info_cmd(req);
+	if (result)
+		IPAERR("Fail to send tuple info cmd to uc\n");
+	else
+		IPADBG("send tuple info cmd to uc succeeded\n\n");
+
+	return result;
+}
+
+int ipa3_uc_send_remove_stream_cmd(struct remove_bitstream_buffers *data)
+{
+	int result = 0;
+	struct ipa_mem_buffer cmd;
+	struct remove_bitstream_buffers *cmd_data;
+
+	if (!data) {
+		IPAERR("Invalid params.\n");
+		return -EINVAL;
+	}
+
+	result = ipa_xr_wdi_opt_dpath_remove_filter_req(data->stream_id);
+	if (result)
+		IPAERR("Failed to remove wlan filter of stream ID %d\n", data->stream_id);
+
+	cmd.size = sizeof(*cmd_data);
+	cmd.base = dma_alloc_coherent(ipa3_ctx->uc_pdev, cmd.size,
+			&cmd.phys_base, GFP_KERNEL);
+	if (cmd.base == NULL) {
+		IPAERR("failed to alloc DMA memory.\n");
+		return -ENOMEM;
+	}
+
+	cmd_data = (struct remove_bitstream_buffers *)cmd.base;
+	cmd_data->stream_id = data->stream_id;
+	result = ipa3_uc_send_cmd((u32)(cmd.phys_base),
+				IPA_CPU_2_HW_CMD_RTP_REMOVE_STREAM,
+				0,
+				false, 10*HZ);
+	if (result) {
+		IPAERR("uc send remove stream cmd failed\n");
+		result = -EPERM;
+	}
+
+	dma_free_coherent(ipa3_ctx->uc_pdev, cmd.size, cmd.base, cmd.phys_base);
+	return result;
+}
+
+int ipa3_uc_send_add_bitstream_buffers_cmd(struct bitstream_buffers_to_uc *data)
+{
+	int result = 0;
+	struct ipa_mem_buffer cmd;
+	struct bitstream_buffers_to_uc *cmd_data = NULL;
+
+	if (!data) {
+		IPAERR("Invalid params.\n");
+		return -EINVAL;
+	}
+
+	cmd.size = sizeof(*cmd_data);
+	cmd.base = dma_alloc_coherent(ipa3_ctx->uc_pdev, cmd.size,
+			&cmd.phys_base, GFP_KERNEL);
+	if (cmd.base == NULL) {
+		IPAERR("failed to alloc DMA memory.\n");
+		return -ENOMEM;
+	}
+
+	cmd_data = (struct bitstream_buffers_to_uc *)cmd.base;
+	cmd_data->buff_cnt = data->buff_cnt;
+	cmd_data->cookie = data->cookie;
+	memcpy(cmd_data->bs_info, data->bs_info, (cmd_data->buff_cnt *
+		sizeof(struct bitstream_buffer_info_to_uc)));
+	result = ipa3_uc_send_cmd((u32)(cmd.phys_base),
+				IPA_CPU_2_HW_CMD_RTP_ADD_BIT_STREAM_BUFF,
+				0,
+				false, 10*HZ);
+	if (result) {
+		IPAERR("uc send bitstream buffers info cmd failed\n");
+		result = -EPERM;
+	}
+
+	dma_free_coherent(ipa3_ctx->uc_pdev, cmd.size, cmd.base, cmd.phys_base);
+
+	return result;
+}
+
+int ipa3_uc_send_temp_buffers_info_cmd(struct uc_temp_buffer_info *data)
+{
+	int result = 0;
+	struct ipa_mem_buffer cmd;
+	struct uc_temp_buffer_info *cmd_data = NULL;
+
+	if (!data) {
+		IPAERR("Invalid params.\n");
+		return -EINVAL;
+	}
+
+	cmd.size = sizeof(*cmd_data);
+	cmd.base = dma_alloc_coherent(ipa3_ctx->uc_pdev, cmd.size,
+			&cmd.phys_base, GFP_KERNEL);
+	if (cmd.base == NULL) {
+		IPAERR("failed to alloc DMA memory.\n");
+		return -ENOMEM;
+	}
+
+	cmd_data = (struct uc_temp_buffer_info *)cmd.base;
+	cmd_data->number_of_partitions = data->number_of_partitions;
+	memcpy(cmd_data->buffer_info, data->buffer_info,
+		(sizeof(struct temp_buffer_info)*cmd_data->number_of_partitions));
+	result = ipa3_uc_send_cmd((u32)(cmd.phys_base),
+				IPA_CPU_2_HW_CMD_RTP_ADD_TEMP_BUFF_INFO,
+				0,
+				false, 10*HZ);
+	if (result) {
+		IPAERR("uc send bitstream buffers info cmd failed\n");
+		result = -EPERM;
+	}
+
+	dma_free_coherent(ipa3_ctx->uc_pdev, cmd.size, cmd.base, cmd.phys_base);
+	return result;
+}
+
+void ipa3_free_uc_temp_buffs(unsigned int no_of_buffs)
+{
+	unsigned int indx = 0;
+
+	for (indx = 0; indx < no_of_buffs; indx++) {
+		dma_free_attrs(ipa3_ctx->uc_pdev,
+		tb_info.buffer_info[indx].temp_buff_size, cpu_address[indx],
+		tb_info.buffer_info[indx].temp_buff_pa,
+		(DMA_ATTR_NO_KERNEL_MAPPING | DMA_ATTR_FORCE_CONTIGUOUS));
+	}
+}
+
+int ipa3_alloc_temp_buffs_to_uc(unsigned int size, unsigned int no_of_buffs)
+{
+	void *cpu_addr = NULL;
+	unsigned int indx = 0;
+	dma_addr_t phys_base;
+
+	if (size < 1 || no_of_buffs < 1) {
+		IPAERR("Invallid params\n");
+		return -EINVAL;
+	}
+
+	for (indx = 0; indx < no_of_buffs; indx++) {
+		cpu_addr = dma_alloc_attrs(ipa3_ctx->uc_pdev, size, &phys_base,
+		GFP_KERNEL, DMA_ATTR_NO_KERNEL_MAPPING | DMA_ATTR_FORCE_CONTIGUOUS);
+		if (!cpu_addr) {
+			IPAERR("No mem for tmp buffs\n");
+			ipa3_free_uc_temp_buffs(indx);
+			return -ENOMEM;
+		}
+
+		cpu_address[indx] = cpu_addr;
+		tb_info.buffer_info[indx].temp_buff_pa = phys_base;
+		tb_info.buffer_info[indx].temp_buff_size =  size;
+		tb_info.number_of_partitions += 1;
+	}
+
+	return ipa3_uc_send_temp_buffers_info_cmd(&tb_info);
+}
+
+int ipa3_uc_send_RTPPipeSetup_cmd(struct rtp_pipe_setup_cmd_data *rtp_cmd_data)
+{
+	int result = 0;
+	struct ipa_mem_buffer cmd;
+	struct rtp_pipe_setup_cmd_data *cmd_data = NULL;
+
+	if (!rtp_cmd_data) {
+		IPAERR("Invalid params.\n");
+		return -EINVAL;
+	}
+
+	cmd.size = sizeof(*cmd_data);
+	cmd.base = dma_alloc_coherent(ipa3_ctx->uc_pdev, cmd.size,
+			&cmd.phys_base, GFP_KERNEL);
+	if (cmd.base == NULL) {
+		IPAERR("failed to alloc DMA memory.\n");
+		return -ENOMEM;
+	}
+
+	cmd_data = (struct rtp_pipe_setup_cmd_data *)cmd.base;
+	memcpy(cmd_data->uc_prod_tr, rtp_cmd_data->uc_prod_tr,
+		(sizeof(struct temp_buff_info) * MAX_UC_PROD_PIPES));
+	memcpy(cmd_data->uc_prod_er, rtp_cmd_data->uc_prod_er,
+		(sizeof(struct temp_buff_info) * MAX_UC_PROD_PIPES));
+	memcpy(cmd_data->uc_cons_tr, rtp_cmd_data->uc_cons_tr,
+		(sizeof(struct temp_buff_info) * MAX_UC_CONS_PIPES));
+	result = ipa3_uc_send_cmd((u32)(cmd.phys_base),
+				IPA_CPU_2_HW_CMD_RTP_PIPE_SETUP,
+				0,
+				false, 10*HZ);
+	if (result) {
+		IPAERR("send RTP pipe setup cmd failed\n");
+		result = -EPERM;
+	}
+
+	dma_free_coherent(ipa3_ctx->uc_pdev, cmd.size, cmd.base, cmd.phys_base);
+	return result;
+}
+
+static int ipa3_uc_setup_prod_pipe_transfer_ring(
+	struct rtp_pipe_setup_cmd_data *rtp_cmd_data, int idx)
+{
+	struct ipa_mem_buffer ring;
+	struct prod_pipe_tre *tr = NULL;
+	int val = 0;
+	u64 next = 0;
+
+	if (!rtp_cmd_data) {
+		IPAERR("Invalid params.\n");
+		return -EINVAL;
+	}
+
+	ring.size = sizeof(struct prod_pipe_tre) * IPA_UC_PROD_TRANSFER_RING_SIZE;
+	ring.base = dma_alloc_coherent(ipa3_ctx->pdev, ring.size,
+		&ring.phys_base, GFP_KERNEL);
+	if (ring.base == NULL) {
+		IPAERR("dma alloc coherent failed.\n");
+		return -ENOMEM;
+	}
+
+	tr = (struct prod_pipe_tre *)ring.base;
+	next = tb_info.buffer_info[idx].temp_buff_pa;
+
+	for (val = 0; val < IPA_UC_PROD_TRANSFER_RING_SIZE; val++) {
+		tr->buffer_ptr = next;
+		tr->buf_len = TRE_SIZE;
+		tr->re_type = GSI_TRE_RE_XFER;
+		tr->bei = 0;
+		tr->ieot = 1;
+		next = tr->buffer_ptr + 2048;
+		tr++;
+	}
+
+	rtp_cmd_data->uc_prod_tr[idx].temp_buff_pa = ring.phys_base;
+	rtp_cmd_data->uc_prod_tr[idx].temp_buff_size = IPA_UC_PROD_TRANSFER_RING_SIZE;
+	er_tr_cpu_addresses.cpu_address[er_tr_cpu_addresses.no_buffs] = ring.base;
+	er_tr_cpu_addresses.no_buffs += 1;
+	return 0;
+}
+
+static int ipa3_uc_setup_prod_pipe_event_ring(
+	struct rtp_pipe_setup_cmd_data *rtp_cmd_data, int index)
+{
+	struct ipa_mem_buffer ring;
+
+	if (!rtp_cmd_data) {
+		IPAERR("Invalid params.\n");
+		return -EINVAL;
+	}
+
+	ring.size = sizeof(struct prod_pipe_tre) * IPA_UC_PROD_EVENT_RING_SIZE;
+	ring.base = dma_alloc_coherent(ipa3_ctx->pdev, ring.size,
+		&ring.phys_base, GFP_KERNEL);
+	if (ring.base == NULL) {
+		IPAERR("dma alloc coherent failed.\n");
+		return -EFAULT;
+	}
+
+	rtp_cmd_data->uc_prod_er[index].temp_buff_pa = ring.phys_base;
+	rtp_cmd_data->uc_prod_er[index].temp_buff_size = IPA_UC_PROD_EVENT_RING_SIZE;
+	er_tr_cpu_addresses.cpu_address[er_tr_cpu_addresses.no_buffs] = ring.base;
+	er_tr_cpu_addresses.no_buffs += 1;
+	return 0;
+}
+
+static int ipa3_uc_setup_con_pipe_transfer_ring(
+	struct rtp_pipe_setup_cmd_data *rtp_cmd_data, int index)
+{
+	struct ipa_mem_buffer ring;
+
+	if (!rtp_cmd_data) {
+		IPAERR("Invalid params.\n");
+		return -EINVAL;
+	}
+
+	ring.size = sizeof(struct con_pipe_tre) * IPA_UC_CON_TRANSFER_RING_SIZE;
+	ring.base = dma_alloc_coherent(ipa3_ctx->pdev, ring.size,
+		&ring.phys_base, GFP_KERNEL);
+	if (ring.base == NULL) {
+		IPAERR("dma alloc coherent failed.\n");
+		return -ENOMEM;
+	}
+
+	rtp_cmd_data->uc_cons_tr[index].temp_buff_pa = ring.phys_base;
+	rtp_cmd_data->uc_cons_tr[index].temp_buff_size = IPA_UC_CON_TRANSFER_RING_SIZE;
+	er_tr_cpu_addresses.cpu_address[er_tr_cpu_addresses.no_buffs] = ring.base;
+	er_tr_cpu_addresses.no_buffs += 1;
+	return 0;
+}
+
+void ipa3_free_uc_pipes_er_tr(void)
+{
+	uint16_t index = 0;
+
+	for (index = 0; index < er_tr_cpu_addresses.no_buffs; index++) {
+		if (index < MAX_UC_PROD_PIPES_TR_INDEX) {
+			dma_free_coherent(ipa3_ctx->pdev,
+			er_tr_cpu_addresses.rtp_tr_er.uc_prod_tr[index].temp_buff_size,
+			er_tr_cpu_addresses.cpu_address[index],
+			er_tr_cpu_addresses.rtp_tr_er.uc_prod_tr[index].temp_buff_pa);
+		} else if (index < MAX_UC_PROD_PIPES_ER_INDEX) {
+			dma_free_coherent(ipa3_ctx->pdev,
+			er_tr_cpu_addresses.rtp_tr_er.uc_prod_er[index].temp_buff_size,
+			er_tr_cpu_addresses.cpu_address[index],
+			er_tr_cpu_addresses.rtp_tr_er.uc_prod_er[index].temp_buff_pa);
+		} else if (index < MAX_UC_CONS_PIPES_TR_INDEX) {
+			dma_free_coherent(ipa3_ctx->pdev,
+			er_tr_cpu_addresses.rtp_tr_er.uc_cons_tr[index].temp_buff_size,
+			er_tr_cpu_addresses.cpu_address[index],
+			er_tr_cpu_addresses.rtp_tr_er.uc_cons_tr[index].temp_buff_pa);
+		}
+	}
+}
+
+int ipa3_allocate_uc_pipes_er_tr_send_to_uc(void)
+{
+	int res = 0;
+	struct rtp_pipe_setup_cmd_data rtp_cmd_data;
+	int indx = 0;
+
+	for (indx = 0; indx < MAX_UC_PROD_PIPES; indx++) {
+		res = ipa3_uc_setup_prod_pipe_transfer_ring(&rtp_cmd_data, indx);
+		if (res) {
+			IPAERR("In RTP Pipe setup prod tr func failed\n");
+			memcpy(&er_tr_cpu_addresses.rtp_tr_er, &rtp_cmd_data,
+			sizeof(rtp_cmd_data));
+			ipa3_free_uc_pipes_er_tr();
+			return res;
+		}
+		res = ipa3_uc_setup_prod_pipe_event_ring(&rtp_cmd_data, indx);
+		if (res) {
+			IPAERR("In RTP Pipe setup pprod er func failed\n");
+			memcpy(&er_tr_cpu_addresses.rtp_tr_er, &rtp_cmd_data,
+			sizeof(rtp_cmd_data));
+			ipa3_free_uc_pipes_er_tr();
+			return res;
+		}
+
+		if (indx < MAX_UC_CONS_PIPES) {
+			res = ipa3_uc_setup_con_pipe_transfer_ring(&rtp_cmd_data, indx);
+			if (res) {
+				memcpy(&er_tr_cpu_addresses.rtp_tr_er, &rtp_cmd_data,
+				sizeof(rtp_cmd_data));
+				ipa3_free_uc_pipes_er_tr();
+				IPAERR("In RTP Pipe setup con tr func failed\n");
+				return res;
+			}
+		}
+	}
+
+	memcpy(&er_tr_cpu_addresses.rtp_tr_er, &rtp_cmd_data, sizeof(rtp_cmd_data));
+	res = ipa3_uc_send_RTPPipeSetup_cmd(&rtp_cmd_data);
+	return res;
+}
+
+int ipa3_insert_dma_info(struct dma_address_map_table *map, uint32_t stream_id)
+{
+	struct list_node *new_node = kzalloc(sizeof(struct list_node), GFP_KERNEL);
+
+	if (!new_node) {
+		IPAERR("failed to alloc memory.\n");
+		return -ENOMEM;
+	}
+
+	if (!map) {
+		IPAERR("Invalid params.\n");
+		kfree(new_node);
+		return -EINVAL;
+	}
+
+	new_node->data = map;
+	list_add(&new_node->list_obj, &mapped_bs_buff_lst[stream_id]);
+
+	return 0;
+}
+
+struct dma_address_map_table *ipa3_search_dma_info(struct dma_buf *dma_buf, uint32_t stream_id)
+{
+	struct list_head *ptr = NULL;
+	struct list_node *entry = NULL;
+
+	if (IS_ERR_OR_NULL(dma_buf)) {
+		IPAERR("Invalid params.\n");
+		return NULL;
+	}
+	list_for_each(ptr, &mapped_bs_buff_lst[stream_id]) {
+		entry = list_entry(ptr, struct list_node, list_obj);
+		if (!entry || !entry->data)
+			continue;
+
+		if (dma_buf == entry->data->dma_buf_list[0])
+			return entry->data;
+	}
+
+	return NULL;
+}
+
+struct dma_address_map_table *ipa3_delete_dma_info(struct dma_buf *dma_buf, int stream_id)
+{
+	struct list_head *ptr = NULL;
+	struct list_node *entry = NULL;
+	struct dma_address_map_table *table_entry = NULL;
+	int found = 0;
+
+	if (IS_ERR_OR_NULL(dma_buf)) {
+		IPAERR("Invalid params.\n");
+		return NULL;
+	}
+	list_for_each(ptr, &mapped_bs_buff_lst[stream_id]) {
+		entry = list_entry(ptr, struct list_node, list_obj);
+		if (!entry || !entry->data)
+			continue;
+		if (dma_buf == entry->data->dma_buf_list[0]) {
+			found = 1;
+			break;
+		}
+	}
+
+	if (found && entry) {
+		table_entry = entry->data;
+		list_del(ptr);
+		kfree(entry);
+	}
+
+	return table_entry;
+}
+
+int ipa3_smmu_map_buff(uint64_t bitstream_buffer_fd,
+		uint64_t meta_buff_fd, int stream_id)
+{
+	int err = 0;
+	struct dma_buf *dbuff = NULL;
+	struct dma_buf_attachment *attachment = NULL;
+	struct dma_address_map_table *map_table = NULL;
+
+	map_table = kzalloc(sizeof(struct dma_address_map_table), GFP_KERNEL);
+	if (!map_table) {
+		IPAERR("failed to alloc memory.\n");
+		return -ENOMEM;
+	}
+
+	dbuff = dma_buf_get(bitstream_buffer_fd);
+	if (IS_ERR_OR_NULL(dbuff)) {
+		IPAERR("no dma handle for the fd.\n");
+		err = -EFAULT;
+		goto map_table_free;
+	}
+
+	attachment = dma_buf_attach(dbuff, ipa3_ctx->pdev);
+	if (IS_ERR_OR_NULL(attachment)) {
+		IPAERR("dma buf attachment failed\n");
+		err = -EFAULT;
+		goto dma_buff_put;
+	}
+
+	map_table->dma_buf_list[0] = dbuff;
+	map_table->attachment[0] = attachment;
+	map_table->sgt[0] = NULL;
+
+	if (bitstream_buffer_fd == meta_buff_fd) {
+		map_table->dma_buf_list[1] = NULL;
+		map_table->attachment[1] = NULL;
+		map_table->sgt[1] = NULL;
+		err = ipa3_insert_dma_info(map_table, stream_id);
+		if (err) {
+			IPAERR("dma info insertion failed.\n");
+			goto dma_buff_det;
+		}
+		return err;
+	}
+
+	dbuff = dma_buf_get(meta_buff_fd);
+	if (IS_ERR_OR_NULL(dbuff)) {
+		IPAERR("no dma handle for the fd.\n");
+		err = -EFAULT;
+		goto dma_buff_det;
+	}
+
+	attachment = dma_buf_attach(dbuff, ipa3_ctx->pdev);
+	if (IS_ERR_OR_NULL(attachment)) {
+		IPAERR("dma buf attachment failed.\n");
+		err = -EFAULT;
+		goto dma_buff_det;
+	}
+
+	map_table->dma_buf_list[1] = dbuff;
+	map_table->attachment[1] = attachment;
+	map_table->sgt[1] = NULL;
+	err = ipa3_insert_dma_info(map_table, stream_id);
+	if (err) {
+		IPAERR("dma info insertion failed.\n");
+		goto dma_buff_det;
+	}
+
+	return err;
+
+dma_buff_det:
+	if (map_table->dma_buf_list[0])
+		dma_buf_detach(map_table->dma_buf_list[0], map_table->attachment[0]);
+	if (map_table->dma_buf_list[1])
+		dma_buf_detach(map_table->dma_buf_list[1], map_table->attachment[1]);
+
+dma_buff_put:
+	if (map_table->dma_buf_list[0])
+		dma_buf_put(map_table->dma_buf_list[0]);
+	if (map_table->dma_buf_list[1])
+		dma_buf_put(map_table->dma_buf_list[1]);
+
+map_table_free:
+	kfree(map_table);
+
+	return err;
+}
+
+int ipa3_smmu_unmap_buff(uint64_t bitstream_buffer_fd, uint64_t meta_buff_fd, int stream_id)
+{
+	struct dma_buf *dbuff = NULL;
+	struct dma_address_map_table *map_table = NULL;
+
+	dbuff = dma_buf_get(bitstream_buffer_fd);
+	if (IS_ERR_OR_NULL(dbuff)) {
+		IPAERR("no dma handle for the fd.\n");
+		return -EFAULT;
+	}
+
+	map_table = ipa3_delete_dma_info(dbuff, stream_id);
+	if (!map_table) {
+		dma_buf_put(dbuff);
+		IPAERR("Buffer is not mapped\n");
+		return -EFAULT;
+	}
+
+	if (map_table->sgt[0] !=  NULL) {
+		dma_buf_unmap_attachment(map_table->attachment[0],
+			map_table->sgt[0], DMA_DIR);
+	}
+
+	dma_buf_detach(map_table->dma_buf_list[0], map_table->attachment[0]);
+	dma_buf_put(map_table->dma_buf_list[0]);
+	if (bitstream_buffer_fd != meta_buff_fd) {
+		if (map_table->sgt[1] !=  NULL) {
+			dma_buf_unmap_attachment(map_table->attachment[1],
+				map_table->sgt[1], DMA_DIR);
+		}
+		dma_buf_detach(map_table->dma_buf_list[1], map_table->attachment[1]);
+		dma_buf_put(map_table->dma_buf_list[1]);
+	}
+
+	kfree(map_table);
+	return 0;
+}
+
+int ipa3_map_buff_to_device_addr(struct map_buffer *map_buffs)
+{
+	int index = 0;
+	int err = 0;
+
+	if (!map_buffs) {
+		IPAERR("Invalid params.\n");
+		return -EINVAL;
+	}
+
+	INIT_LIST_HEAD(&mapped_bs_buff_lst[map_buffs->stream_id]);
+	for (index = 0; index < map_buffs->nfd; index++) {
+		err = ipa3_smmu_map_buff(map_buffs->buff_info[index].bitstream_buffer_fd,
+			map_buffs->buff_info[index].meta_buff_fd, map_buffs->stream_id);
+		if (err) {
+			IPAERR("smmu map failed\n");
+			return err;
+		}
+	}
+
+	return err;
+}
+
+int ipa3_unmap_buff_from_device_addr(struct unmap_buffer *unmap_buffs)
+{
+	unsigned char index = 0;
+	int err = 0;
+
+	if (!unmap_buffs) {
+		IPAERR("Invalid params.\n");
+		return -EINVAL;
+	}
+
+	for (index = 0; index < unmap_buffs->nfd; index++) {
+		err = ipa3_smmu_unmap_buff(unmap_buffs->buff_info[index].bitstream_buffer_fd,
+			unmap_buffs->buff_info[index].meta_buff_fd, unmap_buffs->stream_id);
+		if (err) {
+			IPAERR("smmu unmap failed\n");
+			return err;
+		}
+	}
+
+	return err;
+}
+
+int ipa3_send_bitstream_buff_info(struct bitstream_buffers *data)
+{
+	struct bitstream_buffers_to_uc tmp;
+	int index = 0;
+	struct dma_buf *dmab = NULL;
+	struct dma_address_map_table *map_table = NULL;
+	struct sg_table *sgt = NULL;
+
+	if (!data || data->buff_cnt < 1) {
+		IPAERR("Invalid params.\n");
+		return -EINVAL;
+	}
+
+	IPADBG("Entry\n");
+	memset(&tmp, 0, sizeof(struct bitstream_buffers_to_uc));
+	tmp.buff_cnt = data->buff_cnt;
+	tmp.cookie = data->cookie;
+
+	for (index = 0; index < data->buff_cnt; index++) {
+		tmp.bs_info[index].stream_id = data->bs_info[index].stream_id;
+		tmp.bs_info[index].fence_id = data->bs_info[index].fence_id;
+		tmp.bs_info[index].buff_fd = data->bs_info[index].buff_fd;
+		tmp.bs_info[index].buff_size = data->bs_info[index].buff_size;
+		tmp.bs_info[index].meta_buff_fd = data->bs_info[index].meta_buff_fd;
+		tmp.bs_info[index].meta_buff_size = data->bs_info[index].meta_buff_size;
+		dmab = dma_buf_get(tmp.bs_info[index].buff_fd);
+		if (IS_ERR_OR_NULL(dmab)) {
+			IPAERR("no dma handle for the fd.\n");
+			return -EFAULT;
+		}
+
+		map_table = ipa3_search_dma_info(dmab, tmp.bs_info[index].stream_id);
+		if (!map_table) {
+			IPAERR("no map table from search dma info.\n");
+			dma_buf_put(dmab);
+			return -EFAULT;
+		}
+
+		if (!map_table->sgt[0]) {
+			sgt = dma_buf_map_attachment(map_table->attachment[0], DMA_DIR);
+			if (IS_ERR_OR_NULL(sgt)) {
+				dma_buf_put(dmab);
+				IPAERR("dma buf map attachment failed\n");
+				return -EFAULT;
+			}
+			map_table->sgt[0] = sgt;
+		}
+
+		if (data->bs_info[index].meta_buff_fd != data->bs_info[index].buff_fd) {
+			if (!map_table->sgt[1]) {
+				sgt = dma_buf_map_attachment(map_table->attachment[1], DMA_DIR);
+				if (IS_ERR_OR_NULL(sgt)) {
+					dma_buf_detach(map_table->dma_buf_list[0],
+						map_table->attachment[0]);
+					dma_buf_put(dmab);
+					IPAERR("dma buf map attachment failed\n");
+					return -EFAULT;
+				}
+				map_table->sgt[1] = sgt;
+			}
+
+			tmp.bs_info[index].buff_addr = map_table->sgt[0]->sgl->dma_address;
+			tmp.bs_info[index].meta_buff_addr  = map_table->sgt[1]->sgl->dma_address;
+		} else {
+			tmp.bs_info[index].buff_addr = map_table->sgt[0]->sgl->dma_address +
+			data->bs_info[index].buff_offset;
+			tmp.bs_info[index].meta_buff_addr  = map_table->sgt[1]->sgl->dma_address;
+		}
+	}
+
+	return ipa3_uc_send_add_bitstream_buffers_cmd(&tmp);
+}
+
+int ipa3_uc_send_hfi_cmd(struct hfi_queue_info *data)
+{
+	int result = 0;
+	struct ipa_mem_buffer cmd;
+	struct hfi_queue_info *cmd_data;
+
+	if (!data) {
+		IPAERR("Invalid params.\n");
+		return -EINVAL;
+	}
+
+	cmd.size = sizeof(*cmd_data);
+	cmd.base = dma_alloc_coherent(ipa3_ctx->uc_pdev, cmd.size,
+			&cmd.phys_base, GFP_KERNEL);
+	if (cmd.base == NULL) {
+		IPAERR("failed to alloc DMA memory.\n");
+		return -ENOMEM;
+	}
+
+	cmd_data = (struct hfi_queue_info *)cmd.base;
+	memcpy(cmd_data, data, sizeof(struct hfi_queue_info));
+	result = ipa3_uc_send_cmd((u32)(cmd.phys_base),
+				IPA_CPU_2_HW_CMD_RTP_GET_HFI_STRUCT,
+				0,
+				false, 10*HZ);
+	if (result) {
+		IPAERR("uc send hfi queue info cmd failed\n");
+		result = -EPERM;
+	}
+
+	dma_free_coherent(ipa3_ctx->uc_pdev, cmd.size, cmd.base, cmd.phys_base);
+	return result;
+}
+
+int ipa3_create_hfi_send_uc(void)
+{
+	int res = 0;
+	struct synx_session *synx_session_ptr = NULL;
+	struct synx_initialization_params params;
+	struct synx_queue_desc queue_desc;
+	char synx_session_name[MAX_SYNX_FENCE_SESSION_NAME];
+	struct hfi_queue_info data;
+	dma_addr_t hfi_queue_addr = 0;
+
+	snprintf(synx_session_name, MAX_SYNX_FENCE_SESSION_NAME, "ipa synx fence");
+	queue_desc.vaddr = NULL;
+	queue_desc.mem_data = NULL;
+	queue_desc.size = 0;
+	queue_desc.dev_addr = 0;
+
+	params.name = (const char *)synx_session_name;
+	params.ptr = &queue_desc;
+	params.id = SYNX_CLIENT_HW_FENCE_VID_CTX0;
+	params.flags = SYNX_INIT_MAX;
+
+	synx_session_ptr = synx_initialize(&params);
+	if (IS_ERR_OR_NULL(synx_session_ptr)) {
+		IPAERR("invalid synx fence session\n");
+		return -EFAULT;
+	}
+
+	res = iommu_map(iommu_get_domain_for_dev(ipa3_ctx->pdev),
+			queue_desc.dev_addr, queue_desc.dev_addr,
+			queue_desc.size, IOMMU_READ | IOMMU_WRITE);
+	if (res) {
+		IPAERR("HFI - smmu map failed\n");
+		return -EFAULT;
+	}
+
+	hfi_queue_addr = queue_desc.dev_addr;
+	data.hfi_queue_addr = hfi_queue_addr;
+	data.hfi_queue_size = queue_desc.size;
+	data.queue_header_start_addr = hfi_queue_addr +
+			sizeof(struct msm_hw_fence_hfi_queue_table_header);
+	data.queue_payload_start_addr = data.queue_header_start_addr +
+			sizeof(struct msm_hw_fence_hfi_queue_header);
+	res = ipa3_uc_send_hfi_cmd(&data);
+	return res;
+}

+ 109 - 0
drivers/platform/msm/ipa/ipa_v3/ipa_utils.c

@@ -331,6 +331,7 @@ enum ipa_ver {
 	IPA_5_1_APQ,
 	IPA_5_2,
 	IPA_5_5,
+	IPA_5_5_XR,
 	IPA_VER_MAX,
 };
 
@@ -633,6 +634,20 @@ static const struct rsrc_min_max ipa3_rsrc_src_grp_config
 		[IPA_v5_0_RSRC_GRP_TYPE_SRC_ACK_ENTRIES] = {
 		{22, 22}, {16, 16}, {0, 0}, {0, 0}, {16, 16}, {0, 0}, {0, 0},  },
 	},
+
+	[IPA_5_5_XR] = {
+		/* UL  DL  DMA  QDSS  URLLC UC_RX_Q N/A */
+		[IPA_v5_0_RSRC_GRP_TYPE_SRC_PKT_CONTEXTS] = {
+		{3, 9}, {4, 10}, {1, 1}, {1, 1}, {1, 63}, {0, 63}, {0, 0},  },
+		[IPA_v5_0_RSRC_GRP_TYPE_SRC_DESCRIPTOR_LISTS] = {
+		{9, 9}, {12, 12}, {2, 2}, {2, 2}, {10, 10}, {0, 0}, {0, 0},  },
+		[IPA_v5_0_RSRC_GRP_TYPE_SRC_DESCRIPTOR_BUFF] = {
+		{9, 9}, {24, 24}, {4, 4}, {4, 4}, {20, 20}, {0, 0}, {0, 0},  },
+		[IPA_v5_0_RSRC_GRP_TYPE_SRC_HPS_DMARS] = {
+		{0, 63}, {0, 63}, {0, 63}, {0, 63}, {1, 63}, {0, 63}, {0, 0},  },
+		[IPA_v5_0_RSRC_GRP_TYPE_SRC_ACK_ENTRIES] = {
+		{22, 22}, {16, 16}, {6, 6}, {2, 2}, {16, 16}, {0, 0}, {0, 0},  },
+	},
 };
 
 static const struct rsrc_min_max ipa3_rsrc_dst_grp_config
@@ -806,6 +821,16 @@ static const struct rsrc_min_max ipa3_rsrc_dst_grp_config
 		[IPA_v5_0_RSRC_GRP_TYPE_DST_ULSO_SEGMENTS] = {
 		{0, 0x3f}, {0, 0x3f}, {0, 0}, {0, 0}, {0, 0}, {0, 0}, {0, 0},  },
 	},
+
+	[IPA_5_5_XR] = {
+		/* UL  DL  DMA  QDSS unused  UC_RX_Q DRBIP N/A */
+		[IPA_v5_0_RSRC_GRP_TYPE_DST_DATA_SECTORS] = {
+		{6, 6}, {5, 5}, {2, 2}, {2, 2}, {0, 0}, {0, 0}, {0, 0},  },
+		[IPA_v5_0_RSRC_GRP_TYPE_DST_DPS_DMARS] = {
+		{0, 3}, {0, 3}, {1, 2}, {1, 1}, {0, 0}, {0, 0}, {0, 0},  },
+		[IPA_v5_0_RSRC_GRP_TYPE_DST_ULSO_SEGMENTS] = {
+		{0, 0x3f}, {0, 0x3f}, {0, 0}, {0, 0}, {0, 0}, {0, 0}, {0, 0},  },
+	},
 };
 
 static const struct rsrc_min_max ipa3_rsrc_rx_grp_config
@@ -923,6 +948,12 @@ static const struct rsrc_min_max ipa3_rsrc_rx_grp_config
 		[IPA_RSRC_GRP_TYPE_RX_HPS_CMDQ] = {
 		{3, 3}, {3, 3}, {0, 0}, {0, 0}, {3, 3}, {0, 0}  },
 	},
+
+	[IPA_5_5_XR] = {
+		/* UL  DL  DMA  QDSS  URLLC UC_RX_Q */
+		[IPA_RSRC_GRP_TYPE_RX_HPS_CMDQ] = {
+		{3, 3}, {3, 3}, {3, 3}, {3, 3}, {3, 3}, {0, 0}  },
+	},
 };
 
 static const u32 ipa3_rsrc_rx_grp_hps_weight_config
@@ -1040,6 +1071,8 @@ static const struct ipa_qmb_outstanding ipa3_qmb_outstanding
 	[IPA_5_2][IPA_QMB_INSTANCE_DDR]		= {13, 13, 0},
 	[IPA_5_5][IPA_QMB_INSTANCE_DDR]		= {16, 12, 0},
 	[IPA_5_5][IPA_QMB_INSTANCE_PCIE]	= {16, 8, 0},
+	[IPA_5_5_XR][IPA_QMB_INSTANCE_DDR]	= {16, 12, 0},
+	[IPA_5_5_XR][IPA_QMB_INSTANCE_PCIE]	= {16, 8, 0},
 };
 
 enum ipa_tx_instance {
@@ -5737,6 +5770,78 @@ static const struct ipa_ep_configuration ipa3_ep_mapping
 			QMB_MASTER_SELECT_DDR,
 			{ 24, 1, 9, 9, IPA_EE_AP, GSI_SMART_PRE_FETCH, 3 },
 			IPA_TX_INSTANCE_DL },
+
+	/* IPA_5_5_XR */
+
+	[IPA_5_5_XR][IPA_CLIENT_APPS_LAN_PROD] = {
+			true, IPA_v5_5_GROUP_UL,
+			false,
+			IPA_DPS_HPS_SEQ_TYPE_PKT_PROCESS_NO_DEC_UCP,
+			QMB_MASTER_SELECT_DDR,
+			{ 9, 19, 26, 32, IPA_EE_AP, GSI_SMART_PRE_FETCH, 4},
+			IPA_TX_INSTANCE_NA },
+	[IPA_5_5_XR][IPA_CLIENT_APPS_CMD_PROD] = {
+			true, IPA_v5_5_GROUP_UL,
+			false,
+			IPA_DPS_HPS_SEQ_TYPE_DMA_ONLY,
+			QMB_MASTER_SELECT_DDR,
+			{ 14, 12, 20, 24, IPA_EE_AP, GSI_ESCAPE_BUF_ONLY, 0},
+			IPA_TX_INSTANCE_NA },
+	[IPA_5_5_XR][IPA_CLIENT_WLAN2_PROD] = {
+			true, IPA_v5_5_GROUP_UL,
+			true,
+			IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP,
+			QMB_MASTER_SELECT_DDR,
+			{ 6, 16, 8, 16, IPA_EE_AP, GSI_SMART_PRE_FETCH, 2},
+			IPA_TX_INSTANCE_NA },
+
+	[IPA_5_5_XR][IPA_CLIENT_APPS_LAN_CONS] = {
+			true, IPA_v5_5_GROUP_UL,
+			false,
+			IPA_DPS_HPS_SEQ_TYPE_INVALID,
+			QMB_MASTER_SELECT_DDR,
+			{ 17, 14, 9, 9, IPA_EE_AP, GSI_ESCAPE_BUF_ONLY, 0},
+			IPA_TX_INSTANCE_UL },
+
+	[IPA_5_5_XR][IPA_CLIENT_WLAN2_CONS] = {
+			true, IPA_v5_5_GROUP_DL,
+			false,
+			IPA_DPS_HPS_SEQ_TYPE_INVALID,
+			QMB_MASTER_SELECT_DDR,
+			{ 28, 3, 8, 14, IPA_EE_AP, GSI_SMART_PRE_FETCH, 3},
+			IPA_TX_INSTANCE_DL },
+
+	[IPA_5_5_XR][IPA_CLIENT_UC_RTP1_CONS] = {
+			true, IPA_v5_5_GROUP_DL,
+			false,
+			IPA_DPS_HPS_SEQ_TYPE_INVALID,
+			QMB_MASTER_SELECT_DDR,
+			{ 31, 2, 9, 9, IPA_EE_UC, GSI_SMART_PRE_FETCH, 3},
+			IPA_TX_INSTANCE_DL},
+
+	[IPA_5_5_XR][IPA_CLIENT_UC_RTP2_CONS] = {
+			true, IPA_v5_5_GROUP_DL,
+			false,
+			IPA_DPS_HPS_SEQ_TYPE_INVALID,
+			QMB_MASTER_SELECT_DDR,
+			{ 32, 3, 9, 9, IPA_EE_UC, GSI_SMART_PRE_FETCH, 3},
+			IPA_TX_INSTANCE_DL},
+
+	[IPA_5_5_XR][IPA_CLIENT_UC_RTP3_CONS] = {
+			true, IPA_v5_5_GROUP_DL,
+			false,
+			IPA_DPS_HPS_SEQ_TYPE_INVALID,
+			QMB_MASTER_SELECT_DDR,
+			{ 33, 4, 9, 9, IPA_EE_UC, GSI_SMART_PRE_FETCH, 3},
+			IPA_TX_INSTANCE_DL},
+
+	[IPA_5_5_XR][IPA_CLIENT_UC_RTP4_CONS] = {
+			true, IPA_v5_5_GROUP_DL,
+			false,
+			IPA_DPS_HPS_SEQ_TYPE_INVALID,
+			QMB_MASTER_SELECT_DDR,
+			{ 34, 5, 9, 9, IPA_EE_UC, GSI_SMART_PRE_FETCH, 3},
+			IPA_TX_INSTANCE_DL},
 };
 
 static struct ipa3_mem_partition ipa_3_0_mem_part = {
@@ -7532,6 +7637,8 @@ u8 ipa3_get_hw_type_index(void)
 		break;
 	case IPA_HW_v5_5:
 		hw_type_index = IPA_5_5;
+		if (ipa3_ctx->platform_type == IPA_PLAT_TYPE_XR)
+			hw_type_index = IPA_5_5_XR;
 		break;
 	default:
 		IPAERR("Incorrect IPA version %d\n", ipa3_ctx->ipa_hw_type);
@@ -11810,6 +11917,7 @@ static void ipa3_write_rsrc_grp_type_reg(int group_index,
 		break;
 
 	case IPA_5_5:
+	case IPA_5_5_XR:
 		if (src) {
 			switch (group_index) {
 			case IPA_v5_5_GROUP_UL:
@@ -12063,6 +12171,7 @@ void ipa3_set_resorce_groups_min_max_limits(void)
 		dst_grp_idx_max = IPA_v5_2_DST_GROUP_MAX;
 		break;
 	case IPA_5_5:
+	case IPA_5_5_XR:
 		src_rsrc_type_max = IPA_v5_0_RSRC_GRP_TYPE_SRC_MAX;
 		dst_rsrc_type_max = IPA_v5_0_RSRC_GRP_TYPE_DST_MAX;
 		src_grp_idx_max = IPA_v5_5_SRC_GROUP_MAX;