Ver código fonte

Merge f3f5767b03c52ba064a1f913b0d082c276ea381c on remote branch

Change-Id: I6990293fdb7f5894eb84e0e4c48f6e3ab6b15c69
Linux Build Service Account 8 meses atrás
pai
commit
c40698e611

+ 1 - 1
drivers/platform/msm/ipa/ipa_clients/ipa_wdi3.c

@@ -1612,7 +1612,7 @@ static void ipa_xr_wdi_opt_dpath_rsrv_filter_wq_handler(struct work_struct *work
 	int res = 0;
 
 	res = ipa_xr_wdi_opt_dpath_rsrv_filter_req();
-	if (!res)
+	if (res)
 		IPAERR("Failed to reserve the filters in wlan\n");
 }
 

+ 188 - 42
drivers/platform/msm/ipa/ipa_v3/ipa.c

@@ -317,6 +317,7 @@ static const struct of_device_id ipa_plat_drv_match[] = {
 	{ .compatible = "qcom,ipa", },
 	{ .compatible = "qcom,ipa-smmu-ap-cb", },
 	{ .compatible = "qcom,ipa-smmu-wlan-cb", },
+	{ .compatible = "qcom,ipa-smmu-rtp-cb", },
 	{ .compatible = "qcom,ipa-smmu-uc-cb", },
 	{ .compatible = "qcom,ipa-smmu-11ad-cb", },
 	{ .compatible = "qcom,ipa-smmu-eth-cb", },
@@ -956,6 +957,11 @@ struct iommu_domain *ipa3_get_wlan_smmu_domain(void)
 	return ipa3_get_smmu_domain_by_type(IPA_SMMU_CB_WLAN);
 }
 
+struct iommu_domain *ipa3_get_rtp_smmu_domain(void)
+{
+	return ipa3_get_smmu_domain_by_type(IPA_SMMU_CB_RTP);
+}
+
 struct iommu_domain *ipa3_get_wlan1_smmu_domain(void)
 {
 	return ipa3_get_smmu_domain_by_type(IPA_SMMU_CB_WLAN1);
@@ -9874,6 +9880,7 @@ static int ipa3_pre_init(const struct ipa3_plat_drv_res *resource_p,
 	ipa3_ctx->free_page_task_scheduled = false;
 
 	mutex_init(&ipa3_ctx->app_clock_vote.mutex);
+	mutex_init(&ipa3_ctx->ssr_lock);
 	ipa3_ctx->is_modem_up = false;
 	ipa3_ctx->mhi_ctrl_state = IPA_MHI_CTRL_NOT_SETUP;
 
@@ -11011,6 +11018,12 @@ static int ipa_smmu_perph_cb_probe(struct device *dev,
 	u32 add_map_size;
 	const u32 *add_map;
 	int i;
+	u32 iova;
+	u32 pa;
+	u32 size;
+	unsigned long iova_p;
+	phys_addr_t pa_p;
+	u32 size_p;
 	u32 iova_ap_mapping[2];
 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 13, 0))
 	int mapping_config;
@@ -11099,12 +11112,9 @@ static int ipa_smmu_perph_cb_probe(struct device *dev,
 
 		/* iterate of each entry of the additional mapping array */
 		for (i = 0; i < add_map_size / sizeof(u32); i += 3) {
-			u32 iova = be32_to_cpu(add_map[i]);
-			u32 pa = be32_to_cpu(add_map[i + 1]);
-			u32 size = be32_to_cpu(add_map[i + 2]);
-			unsigned long iova_p;
-			phys_addr_t pa_p;
-			u32 size_p;
+			iova = be32_to_cpu(add_map[i]);
+			pa = be32_to_cpu(add_map[i + 1]);
+			size = be32_to_cpu(add_map[i + 2]);
 
 			IPA_SMMU_ROUND_TO_PAGE(iova, pa, size,
 				iova_p, pa_p, size_p);
@@ -11287,11 +11297,12 @@ static int ipa_smmu_ap_cb_probe(struct device *dev)
 	u32 ipa_smem_size = 0;
 	int ret;
 	int i;
+	u32 iova;
+	u32 pa;
+	u32 size;
 	unsigned long iova_p;
 	phys_addr_t pa_p;
 	u32 size_p;
-	phys_addr_t iova;
-	phys_addr_t pa;
 	u32 iova_ap_mapping[2];
 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 13, 0))
 	int mapping_config;
@@ -11402,12 +11413,9 @@ static int ipa_smmu_ap_cb_probe(struct device *dev)
 
 		/* iterate of each entry of the additional mapping array */
 		for (i = 0; i < add_map_size / sizeof(u32); i += 3) {
-			u32 iova = be32_to_cpu(add_map[i]);
-			u32 pa = be32_to_cpu(add_map[i + 1]);
-			u32 size = be32_to_cpu(add_map[i + 2]);
-			unsigned long iova_p;
-			phys_addr_t pa_p;
-			u32 size_p;
+			iova = be32_to_cpu(add_map[i]);
+			pa = be32_to_cpu(add_map[i + 1]);
+			size = be32_to_cpu(add_map[i + 2]);
 
 			IPA_SMMU_ROUND_TO_PAGE(iova, pa, size,
 				iova_p, pa_p, size_p);
@@ -11490,6 +11498,154 @@ static int ipa_smmu_ap_cb_probe(struct device *dev)
 	return 0;
 }
 
+
+static int ipa_smmu_rtp_cb_probe(struct device *dev)
+{
+	struct ipa_smmu_cb_ctx *cb = ipa3_get_smmu_ctx(IPA_SMMU_CB_RTP);
+	int fast = 0;
+	int bypass = 0;
+	u32 add_map_size;
+	const u32 *add_map;
+	int i;
+	u32 iova;
+	u32 pa;
+	u32 size;
+	unsigned long iova_p;
+	phys_addr_t pa_p;
+	u32 size_p;
+	u32 iova_ap_mapping[2];
+#if (KERNEL_VERSION(5, 13, 0) <= LINUX_VERSION_CODE)
+	int mapping_config;
+#endif
+	u32 geometry_ap_mapping[2];
+
+	IPADBG("RTP CB PROBE dev=%pK\n", dev);
+
+	if (!smmu_info.present[IPA_SMMU_CB_RTP]) {
+		IPAERR("RTP SMMU is disabled\n");
+		return 0;
+	}
+
+	if (smmu_info.use_64_bit_dma_mask) {
+		if (dma_set_mask(dev, DMA_BIT_MASK(64)) ||
+			dma_set_coherent_mask(dev, DMA_BIT_MASK(64))) {
+			IPAERR("DMA set 64bit mask failed\n");
+			return -EOPNOTSUPP;
+		}
+	} else {
+		if (dma_set_mask(dev, DMA_BIT_MASK(32)) ||
+			dma_set_coherent_mask(dev, DMA_BIT_MASK(32))) {
+			IPAERR("DMA set 32bit mask failed\n");
+			return -EOPNOTSUPP;
+		}
+	}
+
+	IPADBG("RTP CB PROBE dev=%pK retrieving IOMMU mapping\n", dev);
+
+	cb->iommu_domain = iommu_get_domain_for_dev(dev);
+	if (IS_ERR_OR_NULL(cb->iommu_domain)) {
+		IPAERR("could not get iommu domain\n");
+		return -EINVAL;
+	}
+
+	IPADBG("RTP CB PROBE mapping retrieved\n");
+
+	cb->is_cache_coherent = of_property_read_bool(dev->of_node,
+						"dma-coherent");
+	cb->dev   = dev;
+	cb->valid = true;
+
+	cb->va_start = cb->va_end  = cb->va_size = 0;
+	if (of_property_read_u32_array(
+			dev->of_node, "qcom,iommu-dma-addr-pool",
+			iova_ap_mapping, 2) == 0) {
+		cb->va_start = iova_ap_mapping[0];
+		cb->va_size  = iova_ap_mapping[1];
+		cb->va_end   = cb->va_start + cb->va_size;
+	}
+
+	IPADBG("RTP CB PROBE dev=%pK va_start=0x%x va_size=0x%x\n",
+		   dev, cb->va_start, cb->va_size);
+	if (of_property_read_u32_array(
+			dev->of_node, "qcom,iommu-geometry",
+			geometry_ap_mapping, 2) == 0) {
+		cb->geometry_start = geometry_ap_mapping[0];
+		cb->geometry_end  = geometry_ap_mapping[1];
+	} else {
+		IPADBG("RTP CB PROBE Geometry not defined using max!\n");
+		cb->geometry_start = 0;
+		cb->geometry_end = 0xF0000000;
+	}
+
+	IPADBG("RTP CB PROBE dev=%pK geometry_start=0x%x geometry_end=0x%x\n",
+		   dev, cb->geometry_start, cb->geometry_end);
+
+	/*
+	 * Prior to these calls to iommu_domain_get_attr(), these
+	 * attributes were set in this function relative to dtsi values
+	 * defined for this driver.  In other words, if corresponding ipa
+	 * driver owned values were found in the dtsi, they were read and
+	 * set here.
+	 *
+	 * In this new world, the developer will use iommu owned dtsi
+	 * settings to set them there.  This new logic below, simply
+	 * checks to see if they've been set in dtsi.  If so, the logic
+	 * further below acts accordingly...
+	 */
+#if (KERNEL_VERSION(5, 13, 0) <= LINUX_VERSION_CODE)
+
+	mapping_config = qcom_iommu_get_mappings_configuration(cb->iommu_domain);
+
+	if (mapping_config < 0) {
+		IPAERR("No Mapping configuration found for RTP CB\n");
+	} else {
+		bypass = (mapping_config & QCOM_IOMMU_MAPPING_CONF_S1_BYPASS) ? 1 : 0;
+		fast = (mapping_config & QCOM_IOMMU_MAPPING_CONF_FAST) ? 1 : 0;
+	}
+#else
+	iommu_domain_get_attr(cb->iommu_domain, DOMAIN_ATTR_S1_BYPASS, &bypass);
+	iommu_domain_get_attr(cb->iommu_domain, DOMAIN_ATTR_FAST, &fast);
+#endif
+	IPADBG("RTP CB PROBE dev=%pK DOMAIN ATTRS bypass=%d fast=%d\n",
+		   dev, bypass, fast);
+
+	ipa3_ctx->s1_bypass_arr[IPA_SMMU_CB_RTP] = (bypass != 0);
+
+	add_map = of_get_property(dev->of_node,
+		"qcom,additional-mapping", &add_map_size);
+	if (add_map) {
+		/* mapping size is an array of 3-tuple of u32 */
+		if (add_map_size % (3 * sizeof(u32))) {
+			IPAERR("wrong additional mapping format\n");
+			cb->valid = false;
+			return -EFAULT;
+		}
+
+		/* iterate of each entry of the additional mapping array */
+		for (i = 0; i < add_map_size / sizeof(u32); i += 3) {
+			iova = be32_to_cpu(add_map[i]);
+			pa = be32_to_cpu(add_map[i + 1]);
+			size = be32_to_cpu(add_map[i + 2]);
+
+			IPA_SMMU_ROUND_TO_PAGE(iova, pa, size,
+				iova_p, pa_p, size_p);
+			IPADBG_LOW("mapping 0x%lx to 0x%pa size %d\n",
+				iova_p, &pa_p, size_p);
+			ipa3_iommu_map(cb->iommu_domain,
+				iova_p, pa_p, size_p,
+				IOMMU_READ | IOMMU_WRITE | IOMMU_MMIO);
+		}
+	}
+
+	smmu_info.present[IPA_SMMU_CB_RTP] = true;
+
+	cb->done = true;
+	ipa3_ctx->rtp_pdev = dev;
+	cb->next_addr = cb->va_end;
+
+	return 0;
+}
+
 static int ipa_smmu_11ad_cb_probe(struct device *dev)
 {
 	int bypass = 0;
@@ -11572,6 +11728,9 @@ static int ipa_smmu_cb_probe(struct device *dev, enum ipa_smmu_cb_type cb_type)
 	case IPA_SMMU_CB_UC:
 		ipa3_ctx->uc_pdev = &ipa3_ctx->master_pdev->dev;
 		return ipa_smmu_uc_cb_probe(dev);
+	case IPA_SMMU_CB_RTP:
+		ipa3_ctx->rtp_pdev = &ipa3_ctx->master_pdev->dev;
+		return ipa_smmu_rtp_cb_probe(dev);
 	case IPA_SMMU_CB_11AD:
 		return ipa_smmu_11ad_cb_probe(dev);
 	case IPA_SMMU_CB_MAX:
@@ -11753,10 +11912,6 @@ int ipa3_plat_drv_probe(struct platform_device *pdev_p)
 	IPADBG("dev->of_node->name = %s\n", dev->of_node->name);
 
 	if (of_device_is_compatible(dev->of_node, "qcom,ipa-smmu-ap-cb")) {
-		if (ipa3_ctx == NULL) {
-			IPAERR("ipa3_ctx was not initialized\n");
-			return -EPROBE_DEFER;
-		}
 		cb = ipa3_get_smmu_ctx(IPA_SMMU_CB_AP);
 		cb->dev = dev;
 		smmu_info.present[IPA_SMMU_CB_AP] = true;
@@ -11765,10 +11920,6 @@ int ipa3_plat_drv_probe(struct platform_device *pdev_p)
 	}
 
 	if (of_device_is_compatible(dev->of_node, "qcom,ipa-smmu-wlan-cb")) {
-		if (ipa3_ctx == NULL) {
-			IPAERR("ipa3_ctx was not initialized\n");
-			return -EPROBE_DEFER;
-		}
 		cb = ipa3_get_smmu_ctx(IPA_SMMU_CB_WLAN);
 		cb->dev = dev;
 		smmu_info.present[IPA_SMMU_CB_WLAN] = true;
@@ -11777,10 +11928,6 @@ int ipa3_plat_drv_probe(struct platform_device *pdev_p)
 	}
 
 	if (of_device_is_compatible(dev->of_node, "qcom,ipa-smmu-wlan1-cb")) {
-		if (ipa3_ctx == NULL) {
-			IPAERR("ipa3_ctx was not initialized\n");
-			return -EPROBE_DEFER;
-		}
 		cb = ipa3_get_smmu_ctx(IPA_SMMU_CB_WLAN1);
 		cb->dev = dev;
 		smmu_info.present[IPA_SMMU_CB_WLAN1] = true;
@@ -11789,10 +11936,6 @@ int ipa3_plat_drv_probe(struct platform_device *pdev_p)
 	}
 
 	if (of_device_is_compatible(dev->of_node, "qcom,ipa-smmu-eth-cb")) {
-		if (ipa3_ctx == NULL) {
-			IPAERR("ipa3_ctx was not initialized\n");
-			return -EPROBE_DEFER;
-		}
 		cb = ipa3_get_smmu_ctx(IPA_SMMU_CB_ETH);
 		cb->dev = dev;
 		smmu_info.present[IPA_SMMU_CB_ETH] = true;
@@ -11801,10 +11944,6 @@ int ipa3_plat_drv_probe(struct platform_device *pdev_p)
 	}
 
 	if (of_device_is_compatible(dev->of_node, "qcom,ipa-smmu-eth1-cb")) {
-		if (ipa3_ctx == NULL) {
-			IPAERR("ipa3_ctx was not initialized\n");
-			return -EPROBE_DEFER;
-		}
 		cb = ipa3_get_smmu_ctx(IPA_SMMU_CB_ETH1);
 		cb->dev = dev;
 		smmu_info.present[IPA_SMMU_CB_ETH1] = true;
@@ -11813,10 +11952,6 @@ int ipa3_plat_drv_probe(struct platform_device *pdev_p)
 	}
 
 	if (of_device_is_compatible(dev->of_node, "qcom,ipa-smmu-uc-cb")) {
-		if (ipa3_ctx == NULL) {
-			IPAERR("ipa3_ctx was not initialized\n");
-			return -EPROBE_DEFER;
-		}
 		cb =  ipa3_get_smmu_ctx(IPA_SMMU_CB_UC);
 		cb->dev = dev;
 		smmu_info.present[IPA_SMMU_CB_UC] = true;
@@ -11825,10 +11960,6 @@ int ipa3_plat_drv_probe(struct platform_device *pdev_p)
 	}
 
 	if (of_device_is_compatible(dev->of_node, "qcom,ipa-smmu-11ad-cb")) {
-		if (ipa3_ctx == NULL) {
-			IPAERR("ipa3_ctx was not initialized\n");
-			return -EPROBE_DEFER;
-		}
 		cb = ipa3_get_smmu_ctx(IPA_SMMU_CB_11AD);
 		cb->dev = dev;
 		smmu_info.present[IPA_SMMU_CB_11AD] = true;
@@ -11836,6 +11967,14 @@ int ipa3_plat_drv_probe(struct platform_device *pdev_p)
 		return ipa_smmu_update_fw_loader();
 	}
 
+	if (of_device_is_compatible(dev->of_node, "qcom,ipa-smmu-rtp-cb")) {
+		cb = ipa3_get_smmu_ctx(IPA_SMMU_CB_RTP);
+		cb->dev = dev;
+		smmu_info.present[IPA_SMMU_CB_RTP] = true;
+		ipa3_ctx->num_smmu_cb_probed++;
+		return ipa_smmu_update_fw_loader();
+	}
+
 	if (of_device_is_compatible(dev->of_node,
 	    "qcom,smp2p-map-ipa-1-out"))
 		return ipa3_smp2p_probe(dev);
@@ -12166,6 +12305,13 @@ int ipa3_iommu_map(struct iommu_domain *domain,
 			ipa_assert();
 			return -EFAULT;
 		}
+	} else if (domain == ipa3_get_rtp_smmu_domain()) {
+		cb = ipa3_get_smmu_ctx(IPA_SMMU_CB_RTP);
+		if (iova >= cb->va_start && iova < cb->va_end) {
+			IPAERR("iommu rtp overlap addr 0x%lx\n", iova);
+			ipa_assert();
+			return -EFAULT;
+		}
 	} else {
 		IPAERR("Unexpected domain 0x%pK\n", domain);
 		ipa_assert();

+ 3 - 0
drivers/platform/msm/ipa/ipa_v3/ipa_i.h

@@ -2013,6 +2013,7 @@ enum ipa_smmu_cb_type {
 	IPA_SMMU_CB_11AD,
 	IPA_SMMU_CB_ETH,
 	IPA_SMMU_CB_ETH1,
+	IPA_SMMU_CB_RTP,
 	IPA_SMMU_CB_MAX
 };
 
@@ -2460,6 +2461,7 @@ struct ipa3_context {
 	struct platform_device *master_pdev;
 	struct device *pdev;
 	struct device *uc_pdev;
+	struct device *rtp_pdev;
 	spinlock_t idr_lock;
 	u32 enable_clock_scaling;
 	u32 enable_napi_chain;
@@ -2635,6 +2637,7 @@ struct ipa3_context {
 	struct ipa3_page_recycle_stats prev_default_recycle_stats;
 	struct ipa3_page_recycle_stats prev_low_lat_data_recycle_stats;
 	struct mutex recycle_stats_collection_lock;
+	struct mutex ssr_lock;
 };
 
 struct ipa3_plat_drv_res {

+ 29 - 10
drivers/platform/msm/ipa/ipa_v3/ipa_uc_rtp.c

@@ -197,6 +197,7 @@ int ipa3_uc_send_tuple_info_cmd(struct traffic_tuple_info *data)
 		cmd_data->ip_info.ipv4.protocol = data->ip_info.ipv4.protocol;
 	}
 
+	IPADBG("Sending uc CMD RTP_TUPLE_INFO\n");
 	result = ipa3_uc_send_cmd((u32)(cmd.phys_base),
 				IPA_CPU_2_HW_CMD_RTP_TUPLE_INFO,
 				0,
@@ -272,7 +273,7 @@ int ipa3_tuple_info_cmd_to_wlan_uc(struct traffic_tuple_info *req, u32 stream_id
 	if (result)
 		IPAERR("Fail to send tuple info cmd to uc\n");
 	else
-		IPADBG("send tuple info cmd to uc succeeded\n\n");
+		IPADBG("send tuple info cmd to uc succeeded\n");
 
 	return result;
 }
@@ -302,6 +303,7 @@ int ipa3_uc_send_remove_stream_cmd(struct remove_bitstream_buffers *data)
 
 	cmd_data = (struct remove_bitstream_buffers *)cmd.base;
 	cmd_data->stream_id = data->stream_id;
+	IPADBG("Sending uc CMD RTP_REMOVE_STREAM\n");
 	result = ipa3_uc_send_cmd((u32)(cmd.phys_base),
 				IPA_CPU_2_HW_CMD_RTP_REMOVE_STREAM,
 				0,
@@ -339,6 +341,7 @@ int ipa3_uc_send_add_bitstream_buffers_cmd(struct bitstream_buffers_to_uc *data)
 	cmd_data->cookie = data->cookie;
 	memcpy(cmd_data->bs_info, data->bs_info, (cmd_data->buff_cnt *
 		sizeof(struct bitstream_buffer_info_to_uc)));
+	IPADBG("Sending uc CMD RTP_ADD_BIT_STREAM_BUFF\n");
 	result = ipa3_uc_send_cmd((u32)(cmd.phys_base),
 				IPA_CPU_2_HW_CMD_RTP_ADD_BIT_STREAM_BUFF,
 				0,
@@ -349,7 +352,6 @@ int ipa3_uc_send_add_bitstream_buffers_cmd(struct bitstream_buffers_to_uc *data)
 	}
 
 	dma_free_coherent(ipa3_ctx->uc_pdev, cmd.size, cmd.base, cmd.phys_base);
-
 	return result;
 }
 
@@ -376,12 +378,13 @@ int ipa3_uc_send_temp_buffers_info_cmd(struct uc_temp_buffer_info *data)
 	cmd_data->number_of_partitions = data->number_of_partitions;
 	memcpy(cmd_data->buffer_info, data->buffer_info,
 		(sizeof(struct temp_buffer_info)*cmd_data->number_of_partitions));
+	IPADBG("Sending uc CMD RTP_ADD_TEMP_BUFF_INFO\n");
 	result = ipa3_uc_send_cmd((u32)(cmd.phys_base),
 				IPA_CPU_2_HW_CMD_RTP_ADD_TEMP_BUFF_INFO,
 				0,
 				false, 10*HZ);
 	if (result) {
-		IPAERR("uc send bitstream buffers info cmd failed\n");
+		IPAERR("uc send temp buffers info cmd failed\n");
 		result = -EPERM;
 	}
 
@@ -399,6 +402,8 @@ void ipa3_free_uc_temp_buffs(unsigned int no_of_buffs)
 		tb_info.buffer_info[indx].temp_buff_pa,
 		(DMA_ATTR_NO_KERNEL_MAPPING | DMA_ATTR_FORCE_CONTIGUOUS));
 	}
+
+	IPADBG("freed uc temp buffs\n");
 }
 
 int ipa3_alloc_temp_buffs_to_uc(unsigned int size, unsigned int no_of_buffs)
@@ -427,6 +432,7 @@ int ipa3_alloc_temp_buffs_to_uc(unsigned int size, unsigned int no_of_buffs)
 		tb_info.number_of_partitions += 1;
 	}
 
+	IPADBG("allocated mem for temp buffs\n");
 	return ipa3_uc_send_temp_buffers_info_cmd(&tb_info);
 }
 
@@ -456,6 +462,7 @@ int ipa3_uc_send_RTPPipeSetup_cmd(struct rtp_pipe_setup_cmd_data *rtp_cmd_data)
 		(sizeof(struct temp_buff_info) * MAX_UC_PROD_PIPES));
 	memcpy(cmd_data->uc_cons_tr, rtp_cmd_data->uc_cons_tr,
 		(sizeof(struct temp_buff_info) * MAX_UC_CONS_PIPES));
+	IPADBG("Sending uc CMD RTP_PIPE_SETUP\n");
 	result = ipa3_uc_send_cmd((u32)(cmd.phys_base),
 				IPA_CPU_2_HW_CMD_RTP_PIPE_SETUP,
 				0,
@@ -504,9 +511,10 @@ static int ipa3_uc_setup_prod_pipe_transfer_ring(
 	}
 
 	rtp_cmd_data->uc_prod_tr[idx].temp_buff_pa = ring.phys_base;
-	rtp_cmd_data->uc_prod_tr[idx].temp_buff_size = IPA_UC_PROD_TRANSFER_RING_SIZE;
+	rtp_cmd_data->uc_prod_tr[idx].temp_buff_size = ring.size;
 	er_tr_cpu_addresses.cpu_address[er_tr_cpu_addresses.no_buffs] = ring.base;
 	er_tr_cpu_addresses.no_buffs += 1;
+	IPADBG("prod pipe transfer ring setup done\n");
 	return 0;
 }
 
@@ -529,9 +537,10 @@ static int ipa3_uc_setup_prod_pipe_event_ring(
 	}
 
 	rtp_cmd_data->uc_prod_er[index].temp_buff_pa = ring.phys_base;
-	rtp_cmd_data->uc_prod_er[index].temp_buff_size = IPA_UC_PROD_EVENT_RING_SIZE;
+	rtp_cmd_data->uc_prod_er[index].temp_buff_size = ring.size;
 	er_tr_cpu_addresses.cpu_address[er_tr_cpu_addresses.no_buffs] = ring.base;
 	er_tr_cpu_addresses.no_buffs += 1;
+	IPADBG("prod pipe event ring setup done\n");
 	return 0;
 }
 
@@ -554,9 +563,10 @@ static int ipa3_uc_setup_con_pipe_transfer_ring(
 	}
 
 	rtp_cmd_data->uc_cons_tr[index].temp_buff_pa = ring.phys_base;
-	rtp_cmd_data->uc_cons_tr[index].temp_buff_size = IPA_UC_CON_TRANSFER_RING_SIZE;
+	rtp_cmd_data->uc_cons_tr[index].temp_buff_size = ring.size;
 	er_tr_cpu_addresses.cpu_address[er_tr_cpu_addresses.no_buffs] = ring.base;
 	er_tr_cpu_addresses.no_buffs += 1;
+	IPADBG("con pipe transfer ring setup done\n");
 	return 0;
 }
 
@@ -594,6 +604,8 @@ void ipa3_free_uc_pipes_er_tr(void)
 					-MAX_UC_PROD_PIPES_ER_INDEX].temp_buff_pa);
 		}
 	}
+
+	IPADBG("freed uc pipes er and tr memory\n");
 }
 
 int ipa3_allocate_uc_pipes_er_tr_send_to_uc(void)
@@ -634,6 +646,7 @@ int ipa3_allocate_uc_pipes_er_tr_send_to_uc(void)
 
 	memcpy(&er_tr_cpu_addresses.rtp_tr_er, &rtp_cmd_data, sizeof(rtp_cmd_data));
 	res = ipa3_uc_send_RTPPipeSetup_cmd(&rtp_cmd_data);
+	IPADBG("allocated uc pipes er, tr memory and send to uc\n");
 	return res;
 }
 
@@ -654,7 +667,7 @@ int ipa3_insert_dma_info(struct dma_address_map_table *map, uint32_t stream_id)
 
 	new_node->data = map;
 	list_add(&new_node->list_obj, &mapped_bs_buff_lst[stream_id]);
-
+	IPADBG("inserted dma buff info into list\n");
 	return 0;
 }
 
@@ -676,6 +689,7 @@ struct dma_address_map_table *ipa3_search_dma_info(struct dma_buf *dma_buf, uint
 			return entry->data;
 	}
 
+	IPADBG("Not found dma buff info in list\n");
 	return NULL;
 }
 
@@ -706,6 +720,7 @@ struct dma_address_map_table *ipa3_delete_dma_info(struct dma_buf *dma_buf, int
 		kfree(entry);
 	}
 
+	IPADBG("deleted dma buff info from list\n");
 	return table_entry;
 }
 
@@ -730,7 +745,7 @@ int ipa3_smmu_map_buff(uint64_t bitstream_buffer_fd,
 		goto map_table_free;
 	}
 
-	attachment = dma_buf_attach(dbuff, ipa3_ctx->pdev);
+	attachment = dma_buf_attach(dbuff, ipa3_ctx->rtp_pdev);
 	if (IS_ERR_OR_NULL(attachment)) {
 		IPAERR("dma buf attachment failed\n");
 		err = -EFAULT;
@@ -760,7 +775,7 @@ int ipa3_smmu_map_buff(uint64_t bitstream_buffer_fd,
 		goto dma_buff_det;
 	}
 
-	attachment = dma_buf_attach(dbuff, ipa3_ctx->pdev);
+	attachment = dma_buf_attach(dbuff, ipa3_ctx->rtp_pdev);
 	if (IS_ERR_OR_NULL(attachment)) {
 		IPAERR("dma buf attachment failed.\n");
 		err = -EFAULT;
@@ -776,6 +791,7 @@ int ipa3_smmu_map_buff(uint64_t bitstream_buffer_fd,
 		goto dma_buff_det;
 	}
 
+	IPADBG("smmu map buff addr done\n");
 	return err;
 
 dma_buff_det:
@@ -830,6 +846,7 @@ int ipa3_smmu_unmap_buff(uint64_t bitstream_buffer_fd, uint64_t meta_buff_fd, in
 		dma_buf_put(map_table->dma_buf_list[1]);
 	}
 
+	IPADBG("smmu unmap done\n");
 	kfree(map_table);
 	return 0;
 }
@@ -854,6 +871,7 @@ int ipa3_map_buff_to_device_addr(struct map_buffer *map_buffs)
 		}
 	}
 
+	IPADBG("maped buff addr to device addr\n");
 	return err;
 }
 
@@ -876,6 +894,7 @@ int ipa3_unmap_buff_from_device_addr(struct unmap_buffer *unmap_buffs)
 		}
 	}
 
+	IPADBG("unmaped buff addr from device addr\n");
 	return err;
 }
 
@@ -892,7 +911,6 @@ int ipa3_send_bitstream_buff_info(struct bitstream_buffers *data)
 		return -EINVAL;
 	}
 
-	IPADBG("Entry\n");
 	memset(&tmp, 0, sizeof(struct bitstream_buffers_to_uc));
 	tmp.buff_cnt = data->buff_cnt;
 	tmp.cookie = data->cookie;
@@ -973,6 +991,7 @@ int ipa3_uc_send_hfi_cmd(struct hfi_queue_info *data)
 
 	cmd_data = (struct hfi_queue_info *)cmd.base;
 	memcpy(cmd_data, data, sizeof(struct hfi_queue_info));
+	IPADBG("Sending uc CMD RTP_GET_HFI_STRUCT\n");
 	result = ipa3_uc_send_cmd((u32)(cmd.phys_base),
 				IPA_CPU_2_HW_CMD_RTP_GET_HFI_STRUCT,
 				0,

+ 4 - 4
drivers/platform/msm/ipa/ipa_v3/ipa_utils.c

@@ -13932,17 +13932,17 @@ bool ipa3_is_modem_up(void)
 {
 	bool is_up;
 
-	mutex_lock(&ipa3_ctx->lock);
+	mutex_lock(&ipa3_ctx->ssr_lock);
 	is_up = ipa3_ctx->is_modem_up;
-	mutex_unlock(&ipa3_ctx->lock);
+	mutex_unlock(&ipa3_ctx->ssr_lock);
 	return is_up;
 }
 
 void ipa3_set_modem_up(bool is_up)
 {
-	mutex_lock(&ipa3_ctx->lock);
+	mutex_lock(&ipa3_ctx->ssr_lock);
 	ipa3_ctx->is_modem_up = is_up;
-	mutex_unlock(&ipa3_ctx->lock);
+	mutex_unlock(&ipa3_ctx->ssr_lock);
 }
 
 /**

+ 1 - 1
drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal.c

@@ -2309,7 +2309,7 @@ static int ipahal_cp_proc_ctx_to_hw_buff_v3(enum ipa_hdr_proc_type type,
 			(struct ipa_hw_hdr_proc_ctx_rtp_hdr_cmd_seq *)
 			(base + offset);
 		ctx->hdr_add.tlv.type = IPA_PROC_CTX_TLV_TYPE_HDR_ADD;
-		ctx->hdr_add.tlv.length = 0;
+		ctx->hdr_add.tlv.length = 2;
 		ctx->hdr_add.tlv.value = 0;
 		hdr_addr = hdr_base_addr + offset_entry->offset;
 		IPAHAL_DBG("header address 0x%llx length %d\n",