소스 검색

msm: camera: icp: Add support for new mem region cmd

To avoid using GP registers to send different memory region
info, use GP registers to configure only the consolidated region.
The specifics for different regions within the consolidated region
are later sent to FW as a new HFI cmd.

CRs-Fixed: 3469619
Change-Id: I2eb9511a4df5c8eb4ca09b60acd1fcffb3ac4dff
Signed-off-by: Karthik Anantha Ram <[email protected]>
Karthik Anantha Ram 2 년 전
부모
커밋
2acac43289

+ 2 - 3
drivers/cam_icp/fw_inc/hfi_intf.h

@@ -44,7 +44,7 @@ struct hfi_mem {
  * @io_mem2: 2nd io memory info
  * @fw_uncached: FW uncached region info
  * @global_sync: Global sync mem for IPC
- * @hwmutex: HWMutex memory
+ * @device_mem: device memory
  */
 struct hfi_mem_info {
 	struct hfi_mem qtbl;
@@ -58,8 +58,7 @@ struct hfi_mem_info {
 	struct hfi_mem io_mem;
 	struct hfi_mem io_mem2;
 	struct hfi_mem fw_uncached;
-	struct hfi_mem global_sync;
-	struct hfi_mem hwmutex;
+	struct hfi_mem device_mem;
 };
 
 /**

+ 2 - 4
drivers/cam_icp/fw_inc/hfi_reg.h

@@ -30,10 +30,8 @@
 #define HFI_REG_IO2_REGION_SIZE              GEN_PURPOSE_REG(16)
 #define HFI_REG_FWUNCACHED_REGION_IOVA       GEN_PURPOSE_REG(17)
 #define HFI_REG_FWUNCACHED_REGION_SIZE       GEN_PURPOSE_REG(18)
-#define HFI_REG_FWUNCACHED_GLOBAL_SYNC_IOVA  GEN_PURPOSE_REG(19)
-#define HFI_REG_FWUNCACHED_GLOBAL_SYNC_LEN   GEN_PURPOSE_REG(20)
-#define HFI_REG_DEVICE_HWMUTEX_IOVA          GEN_PURPOSE_REG(21)
-#define HFI_REG_DEVICE_HWMUTEX_SIZE          GEN_PURPOSE_REG(22)
+#define HFI_REG_DEVICE_REGION_IOVA           GEN_PURPOSE_REG(19)
+#define HFI_REG_DEVICE_REGION_IOVA_SIZE      GEN_PURPOSE_REG(20)
 
 /* start of Queue table and queues */
 #define MAX_ICP_HFI_QUEUES                      4

+ 39 - 3
drivers/cam_icp/fw_inc/hfi_sys_defs.h

@@ -1,7 +1,7 @@
 /* SPDX-License-Identifier: GPL-2.0-only */
 /*
  * Copyright (c) 2017-2019, 2021 The Linux Foundation. All rights reserved.
- * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved.
  */
 
 #ifndef _HFI_DEFS_H_
@@ -209,6 +209,7 @@
 #define HFI_PROP_SYS_ICP_HW_FREQUENCY      (HFI_PROPERTY_ICP_COMMON_START + 0xa)
 #define HFI_PROP_SYS_ICP_RAMDUMP_MODE      (HFI_PROPERTY_ICP_COMMON_START + 0xb)
 #define HFI_PROP_SYS_OFE_PC                (HFI_PROPERTY_ICP_COMMON_START + 0xc)
+#define HFI_PROP_SYS_MEM_REGIONS           (HFI_PROPERTY_ICP_COMMON_START + 0xe)
 
 /* Capabilities reported at sys init */
 #define HFI_CAPS_PLACEHOLDER_1         (HFI_COMMON_BASE + 0x1)
@@ -275,6 +276,17 @@
 #define ICP_PWR_CLP_IPE1         0x00020000
 #define ICP_PWR_CLP_OFE          0x00000001
 
+/* New mem region definitions */
+#define HFI_MEM_REGION_ID_IPCLITE_SHARED_MEM      0
+#define HFI_MEM_REGION_ID_SYNX_HW_MUTEX           1
+#define HFI_MEM_REGION_ID_GLOBAL_ATOMIC_HW_MUTEX  2
+#define HFI_MEM_REGION_ID_GLOBAL_CNTR             3
+
+/* Type of the new regions */
+#define HFI_MEM_REGION_TYPE_CACHED       0
+#define HFI_MEM_REGION_TYPE_UNCACHED     1
+#define HFI_MEM_REGION_TYPE_DEVICE       2
+
 /**
  * start of sys command packet types
  * These commands are used to get system level information
@@ -424,13 +436,38 @@ struct hfi_cmd_ping_pkt {
  *             as part of HFI_MSG_SYS_RESET_ACK
  * @HFI_CMD_SYS_RESET
  */
-
 struct hfi_cmd_sys_reset_pkt {
 	uint32_t size;
 	uint32_t pkt_type;
 	uint64_t user_data;
 } __packed;
 
+/**
+ * struct hfi_cmd_mem_region_info
+ * Payload structure to configure HFI_PROP_SYS_MEM_REGIONS
+ * @region_id: region id (HW mutex/synx global mem/...)
+ * @region_type: Type of the region (cached/uncached/device/..)
+ * @start_addr: va to the start of this region
+ * @size: size of this region
+ */
+struct hfi_cmd_mem_region_info {
+	uint32_t region_id;
+	uint32_t region_type;
+	uint64_t start_addr;
+	uint32_t size;
+} __packed;
+
+/**
+ * struct hfi_cmd_mem_region_info
+ * Payload structure to configure HFI_PROP_SYS_MEM_REGIONS
+ * @num_valid_regions: Valid number of regions configured to FW
+ * @region_info: Region specific info
+ */
+struct hfi_cmd_config_mem_regions {
+	uint32_t num_valid_regions;
+	struct hfi_cmd_mem_region_info region_info[1];
+} __packed;
+
 /* end of sys command packet types */
 
 /* start of sys message packet types */
@@ -615,5 +652,4 @@ struct hfi_msg_event_notify {
 /**
  * end of sys message packet types
  */
-
 #endif /* _HFI_DEFS_H_ */

+ 14 - 29
drivers/cam_icp/hfi.c

@@ -848,15 +848,10 @@ int cam_hfi_resume(int client_handle)
 	cam_io_w_mb((uint32_t)hfi_mem->fw_uncached.len,
 		icp_base + HFI_REG_FWUNCACHED_REGION_SIZE);
 
-	cam_io_w_mb((uint32_t)hfi_mem->global_sync.iova,
-		icp_base + HFI_REG_FWUNCACHED_GLOBAL_SYNC_IOVA);
-	cam_io_w_mb((uint32_t)hfi_mem->global_sync.len,
-		icp_base + HFI_REG_FWUNCACHED_GLOBAL_SYNC_LEN);
-
-	cam_io_w_mb((uint32_t)hfi_mem->hwmutex.iova,
-		icp_base + HFI_REG_DEVICE_HWMUTEX_IOVA);
-	cam_io_w_mb((uint32_t)hfi_mem->hwmutex.len,
-		icp_base + HFI_REG_DEVICE_HWMUTEX_SIZE);
+	cam_io_w_mb((uint32_t)hfi_mem->device_mem.iova,
+		icp_base + HFI_REG_DEVICE_REGION_IOVA);
+	cam_io_w_mb((uint32_t)hfi_mem->device_mem.len,
+		icp_base + HFI_REG_DEVICE_REGION_IOVA_SIZE);
 
 	CAM_DBG(CAM_HFI, "IO1 : [0x%x 0x%x] IO2 [0x%x 0x%x]",
 		hfi_mem->io_mem.iova, hfi_mem->io_mem.len,
@@ -870,13 +865,10 @@ int cam_hfi_resume(int client_handle)
 		hfi_mem->sec_heap.iova, hfi_mem->sec_heap.len,
 		hfi_mem->qdss.iova, hfi_mem->qdss.len);
 
-	CAM_DBG(CAM_HFI, "QTbl : [0x%x 0x%x] Sfr [0x%x 0x%x]",
+	CAM_DBG(CAM_HFI, "QTbl : [0x%x 0x%x] Sfr [0x%x 0x%x] Device [0x%x 0x%x]",
 		hfi_mem->qtbl.iova, hfi_mem->qtbl.len,
-		hfi_mem->sfr_buf.iova, hfi_mem->sfr_buf.len);
-
-	CAM_DBG(CAM_HFI, "global sync : [0x%x 0x%x] hwmutex [0x%x 0x%x]",
-		hfi_mem->global_sync.iova, hfi_mem->global_sync.len,
-		hfi_mem->hwmutex.iova, hfi_mem->hwmutex.len);
+		hfi_mem->sfr_buf.iova, hfi_mem->sfr_buf.len,
+		hfi_mem->device_mem.iova, hfi_mem->device_mem.len);
 
 	return rc;
 }
@@ -1067,14 +1059,10 @@ int cam_hfi_init(int client_handle, struct hfi_mem_info *hfi_mem,
 		icp_base + HFI_REG_FWUNCACHED_REGION_IOVA);
 	cam_io_w_mb((uint32_t)hfi_mem->fw_uncached.len,
 		icp_base + HFI_REG_FWUNCACHED_REGION_SIZE);
-	cam_io_w_mb((uint32_t)hfi_mem->global_sync.iova,
-		icp_base + HFI_REG_FWUNCACHED_GLOBAL_SYNC_IOVA);
-	cam_io_w_mb((uint32_t)hfi_mem->global_sync.len,
-		icp_base + HFI_REG_FWUNCACHED_GLOBAL_SYNC_LEN);
-	cam_io_w_mb((uint32_t)hfi_mem->hwmutex.iova,
-		icp_base + HFI_REG_DEVICE_HWMUTEX_IOVA);
-	cam_io_w_mb((uint32_t)hfi_mem->hwmutex.len,
-		icp_base + HFI_REG_DEVICE_HWMUTEX_SIZE);
+	cam_io_w_mb((uint32_t)hfi_mem->device_mem.iova,
+		icp_base + HFI_REG_DEVICE_REGION_IOVA);
+	cam_io_w_mb((uint32_t)hfi_mem->device_mem.len,
+		icp_base + HFI_REG_DEVICE_REGION_IOVA_SIZE);
 
 	CAM_DBG(CAM_HFI, "[%s] HFI handle: %d",
 		hfi->client_name, client_handle);
@@ -1091,13 +1079,10 @@ int cam_hfi_init(int client_handle, struct hfi_mem_info *hfi_mem,
 		hfi_mem->sec_heap.iova, hfi_mem->sec_heap.len,
 		hfi_mem->qdss.iova, hfi_mem->qdss.len);
 
-	CAM_DBG(CAM_HFI, "QTbl : [0x%x 0x%x] Sfr [0x%x 0x%x]",
+	CAM_DBG(CAM_HFI, "QTbl : [0x%x 0x%x] Sfr [0x%x 0x%x] Device [0x%x 0x%x]",
 		hfi_mem->qtbl.iova, hfi_mem->qtbl.len,
-		hfi_mem->sfr_buf.iova, hfi_mem->sfr_buf.len);
-
-	CAM_DBG(CAM_HFI, "global sync : [0x%x 0x%x] hwmutex [0x%x 0x%x]",
-		hfi_mem->global_sync.iova, hfi_mem->global_sync.len,
-		hfi_mem->hwmutex.iova, hfi_mem->hwmutex.len);
+		hfi_mem->sfr_buf.iova, hfi_mem->sfr_buf.len,
+		hfi_mem->device_mem.iova, hfi_mem->device_mem.len);
 
 	if (cam_presil_mode_enabled())
 		cam_hfi_presil_setup(hfi_mem);

+ 227 - 17
drivers/cam_icp/icp_hw/icp_hw_mgr/cam_icp_hw_mgr.c

@@ -55,6 +55,13 @@
 /* Memory required to setup all HFI queues and sec heap */
 #define ICP_HFI_QUEUES_MEM_SIZE 0x700000
 
+/*
+ * If synx fencing is enabled, send FW memory mapping
+ * for synx hw_mutex, ipc hw_mutex, synx global mem
+ * and global cntr for qtimer
+ */
+#define ICP_NUM_MEM_REGIONS_FOR_SYNX 4
+
 DECLARE_RWSEM(frame_in_process_sem);
 
 static struct cam_icp_hw_mgr *g_icp_hw_mgr[CAM_ICP_SUBDEV_MAX];
@@ -3282,12 +3289,16 @@ static void cam_icp_free_hfi_mem(struct cam_icp_hw_mgr *hw_mgr)
 
 	cam_smmu_unmap_phy_mem_region(hw_mgr->iommu_hdl, CAM_SMMU_REGION_QDSS, 0);
 
-	/* Skip freeing if not allocated */
+	/* Skip freeing if not mapped */
 	if (hw_mgr->synx_signaling_en) {
 		cam_smmu_unmap_phy_mem_region(hw_mgr->iommu_hdl, CAM_SMMU_REGION_FWUNCACHED,
 			CAM_SMMU_SUBREGION_GLOBAL_SYNC_MEM);
 		cam_smmu_unmap_phy_mem_region(hw_mgr->iommu_hdl, CAM_SMMU_REGION_DEVICE,
 			CAM_SMMU_SUBREGION_SYNX_HWMUTEX);
+		cam_smmu_unmap_phy_mem_region(hw_mgr->iommu_hdl, CAM_SMMU_REGION_DEVICE,
+			CAM_SMMU_SUBREGION_IPC_HWMUTEX);
+		cam_smmu_unmap_phy_mem_region(hw_mgr->iommu_hdl, CAM_SMMU_REGION_DEVICE,
+			CAM_SMMU_SUBREGION_GLOBAL_CNTR);
 	}
 }
 
@@ -3451,12 +3462,13 @@ static int cam_icp_allocate_global_sync_mem(struct cam_icp_hw_mgr *hw_mgr)
 	hw_mgr->hfi_mem.fw_uncached_global_sync.smmu_hdl = hw_mgr->iommu_hdl;
 
 	CAM_DBG(CAM_ICP, "[%s] iova: %llx, len: %zu",
-		iova, len, hw_mgr->hw_mgr_name);
+		hw_mgr->hw_mgr_name, iova, len);
 
 	return rc;
 }
 
-static int cam_icp_allocate_device_hwmutex_mem(struct cam_icp_hw_mgr *hw_mgr)
+static int cam_icp_allocate_device_synx_hwmutex_mem(
+	struct cam_icp_hw_mgr *hw_mgr)
 {
 	int rc;
 	size_t len;
@@ -3470,34 +3482,113 @@ static int cam_icp_allocate_device_hwmutex_mem(struct cam_icp_hw_mgr *hw_mgr)
 		return rc;
 	}
 
-	hw_mgr->hfi_mem.hwmutex.len = len;
-	hw_mgr->hfi_mem.hwmutex.iova = iova;
-	hw_mgr->hfi_mem.hwmutex.smmu_hdl = hw_mgr->iommu_hdl;
+	hw_mgr->hfi_mem.synx_hwmutex.len = len;
+	hw_mgr->hfi_mem.synx_hwmutex.iova = iova;
+	hw_mgr->hfi_mem.synx_hwmutex.smmu_hdl = hw_mgr->iommu_hdl;
 
 	CAM_DBG(CAM_ICP, "[%s] iova: %llx, len: %zu",
-		iova, len, hw_mgr->hw_mgr_name);
+		hw_mgr->hw_mgr_name, iova, len);
 
 	return rc;
 }
 
-static int cam_icp_allocate_mem_for_fence_signaling(struct cam_icp_hw_mgr *hw_mgr)
+static int cam_icp_allocate_device_global_cnt_mem(
+	struct cam_icp_hw_mgr *hw_mgr)
 {
 	int rc;
+	size_t len;
+	dma_addr_t iova;
+
+	rc = cam_smmu_map_phy_mem_region(hw_mgr->iommu_hdl,
+		CAM_SMMU_REGION_DEVICE, CAM_SMMU_SUBREGION_GLOBAL_CNTR,
+		&iova, &len);
+	if (rc) {
+		CAM_ERR(CAM_ICP,
+			"Failed in allocating global cntr mem rc %d", rc);
+		return rc;
+	}
+
+	hw_mgr->hfi_mem.global_cntr.len = len;
+	hw_mgr->hfi_mem.global_cntr.iova = iova;
+	hw_mgr->hfi_mem.global_cntr.smmu_hdl = hw_mgr->iommu_hdl;
+
+	CAM_DBG(CAM_ICP, "[%s] iova: %llx, len: %zu",
+		hw_mgr->hw_mgr_name, iova, len);
+
+	return rc;
+}
+
+static int cam_icp_allocate_device_ipc_hwmutex_mem(
+	struct cam_icp_hw_mgr *hw_mgr)
+{
+	int rc;
+	size_t len;
+	dma_addr_t iova;
+
+	rc = cam_smmu_map_phy_mem_region(hw_mgr->iommu_hdl,
+		CAM_SMMU_REGION_DEVICE, CAM_SMMU_SUBREGION_IPC_HWMUTEX,
+		&iova, &len);
+	if (rc) {
+		CAM_ERR(CAM_ICP,
+			"Failed in allocating hwmutex mem rc %d", rc);
+		return rc;
+	}
+
+	hw_mgr->hfi_mem.ipc_hwmutex.len = len;
+	hw_mgr->hfi_mem.ipc_hwmutex.iova = iova;
+	hw_mgr->hfi_mem.ipc_hwmutex.smmu_hdl = hw_mgr->iommu_hdl;
+
+	CAM_DBG(CAM_ICP, "[%s] iova: %llx, len: %zu",
+		hw_mgr->hw_mgr_name, iova, len);
+
+	return rc;
+}
+
+static int cam_icp_allocate_mem_for_fence_signaling(
+	struct cam_icp_hw_mgr *hw_mgr)
+{
+	int rc;
+
+	rc = cam_smmu_get_region_info(hw_mgr->iommu_hdl,
+		CAM_SMMU_REGION_DEVICE, &hw_mgr->hfi_mem.device);
+	if (rc) {
+		CAM_ERR(CAM_ICP,
+			"[%s] Unable to get device memory info rc %d",
+			hw_mgr->hw_mgr_name, rc);
+		return rc;
+	}
 
 	rc = cam_icp_allocate_global_sync_mem(hw_mgr);
 	if (rc)
 		return rc;
 
-	rc = cam_icp_allocate_device_hwmutex_mem(hw_mgr);
+	rc = cam_icp_allocate_device_synx_hwmutex_mem(hw_mgr);
 	if (rc)
 		goto unmap_global_sync;
 
+	rc = cam_icp_allocate_device_ipc_hwmutex_mem(hw_mgr);
+	if (rc)
+		goto unmap_synx_hwmutex;
+
+	rc = cam_icp_allocate_device_global_cnt_mem(hw_mgr);
+	if (rc)
+		goto unmap_ipc_mutex;
+
 	return 0;
 
+unmap_ipc_mutex:
+	cam_smmu_unmap_phy_mem_region(hw_mgr->iommu_hdl,
+		CAM_SMMU_REGION_DEVICE,
+		CAM_SMMU_SUBREGION_IPC_HWMUTEX);
+unmap_synx_hwmutex:
+	cam_smmu_unmap_phy_mem_region(hw_mgr->iommu_hdl,
+		CAM_SMMU_REGION_DEVICE,
+		CAM_SMMU_SUBREGION_SYNX_HWMUTEX);
 unmap_global_sync:
 	cam_smmu_unmap_phy_mem_region(hw_mgr->iommu_hdl,
 		CAM_SMMU_REGION_FWUNCACHED,
 		CAM_SMMU_SUBREGION_GLOBAL_SYNC_MEM);
+
 	return rc;
 }
 
@@ -4092,15 +4183,11 @@ static void cam_icp_mgr_populate_hfi_mem_info(struct cam_icp_hw_mgr *hw_mgr,
 	hfi_mem->qdss.len = hw_mgr->hfi_mem.qdss_buf.len;
 
 	if (hw_mgr->synx_signaling_en) {
-		hfi_mem->global_sync.iova = hw_mgr->hfi_mem.fw_uncached_global_sync.iova;
-		hfi_mem->global_sync.len = hw_mgr->hfi_mem.fw_uncached_global_sync.len;
-
-		hfi_mem->hwmutex.iova = hw_mgr->hfi_mem.hwmutex.iova;
-		hfi_mem->hwmutex.len = hw_mgr->hfi_mem.hwmutex.len;
+		hfi_mem->device_mem.iova = hw_mgr->hfi_mem.device.iova_start;
+		hfi_mem->device_mem.len = hw_mgr->hfi_mem.device.iova_len;
 		CAM_DBG(CAM_ICP,
-			"global sync [iova = 0x%llx len = 0x%llx] hwmutex [iova = 0x%llx len = 0x%llx]",
-			hfi_mem->global_sync.iova, hfi_mem->global_sync.len,
-			hfi_mem->hwmutex.iova, hfi_mem->hwmutex.len);
+			"device memory [iova = 0x%llx len = 0x%llx]",
+			hfi_mem->device_mem.iova, hfi_mem->device_mem.len);
 	}
 
 	if (hw_mgr->hfi_mem.io_mem.discard_iova_start &&
@@ -4675,6 +4762,27 @@ static int cam_icp_mgr_hfi_init(struct cam_icp_hw_mgr *hw_mgr)
 		return rc;
 	}
 
+	if (hw_mgr->synx_signaling_en) {
+		/* Expect global sync to be at the start of FW uncached region */
+		if (hw_mgr->hfi_mem.fw_uncached_global_sync.iova >=
+			hw_mgr->hfi_mem.fw_uncached_generic.iova) {
+			CAM_ERR(CAM_ICP,
+				"global sync memory [start: 0x%x] expected to be at the start of FW uncached [uncached_generic start: 0x%x]",
+				hw_mgr->hfi_mem.fw_uncached_global_sync.iova,
+				hw_mgr->hfi_mem.fw_uncached_generic.iova);
+			return -EINVAL;
+		}
+
+		/*
+		 * Global sync memory is part of FW uncached region, but to FW remove this entry
+		 * from FW uncached to avoid it being mapped with FW uncached. Global sync
+		 * mem will be mapped with sharable attributes for IPC access, and hence
+		 * an independent mapping of it's own.
+		 */
+		hfi_mem.fw_uncached.iova += hw_mgr->hfi_mem.fw_uncached_global_sync.len;
+		hfi_mem.fw_uncached.len -= hw_mgr->hfi_mem.fw_uncached_global_sync.len;
+	}
+
 	rc = cam_hfi_init(hw_mgr->hfi_handle, &hfi_mem, hfi_ops,
 		icp_dev_intf->hw_priv, 0);
 	if (rc) {
@@ -4727,6 +4835,101 @@ static int cam_icp_mgr_send_fw_init(struct cam_icp_hw_mgr *hw_mgr)
 	return rc;
 }
 
+static int cam_icp_mgr_send_memory_region_info(
+	struct cam_icp_hw_mgr *hw_mgr)
+{
+	struct hfi_cmd_prop *set_prop = NULL;
+	struct hfi_cmd_config_mem_regions *region_info = NULL;
+	uint32_t num_regions = 0;
+	size_t payload_size;
+
+	if (hw_mgr->synx_signaling_en)
+		num_regions += ICP_NUM_MEM_REGIONS_FOR_SYNX;
+
+	if (!num_regions)
+		return 0;
+
+	payload_size = sizeof(struct hfi_cmd_prop) +
+		(sizeof(struct hfi_cmd_config_mem_regions)) +
+		(sizeof(struct hfi_cmd_mem_region_info) * (num_regions - 1));
+
+	set_prop = kzalloc(payload_size, GFP_KERNEL);
+	if (!set_prop)
+		return -ENOMEM;
+
+	set_prop->size = payload_size;
+	set_prop->pkt_type = HFI_CMD_SYS_SET_PROPERTY;
+	set_prop->num_prop = 1;
+	set_prop->prop_data[0] = HFI_PROP_SYS_MEM_REGIONS;
+
+	region_info = (struct hfi_cmd_config_mem_regions *)&set_prop->prop_data[1];
+	if (hw_mgr->synx_signaling_en) {
+		/* Update synx global mem */
+		region_info->region_info[region_info->num_valid_regions].region_id =
+			HFI_MEM_REGION_ID_IPCLITE_SHARED_MEM;
+		region_info->region_info[region_info->num_valid_regions].region_type =
+			HFI_MEM_REGION_TYPE_UNCACHED;
+		region_info->region_info[region_info->num_valid_regions].start_addr =
+			hw_mgr->hfi_mem.fw_uncached_global_sync.iova;
+		region_info->region_info[region_info->num_valid_regions].size =
+			hw_mgr->hfi_mem.fw_uncached_global_sync.len;
+
+		region_info->num_valid_regions++;
+
+		/* Update synx hw_mutex mem */
+		region_info->region_info[region_info->num_valid_regions].region_id =
+			HFI_MEM_REGION_ID_SYNX_HW_MUTEX;
+		region_info->region_info[region_info->num_valid_regions].region_type =
+			HFI_MEM_REGION_TYPE_DEVICE;
+		region_info->region_info[region_info->num_valid_regions].start_addr =
+			hw_mgr->hfi_mem.synx_hwmutex.iova;
+		region_info->region_info[region_info->num_valid_regions].size =
+			hw_mgr->hfi_mem.synx_hwmutex.len;
+
+		region_info->num_valid_regions++;
+
+		/* Update ipc hw_mutex mem */
+		region_info->region_info[region_info->num_valid_regions].region_id =
+			HFI_MEM_REGION_ID_GLOBAL_ATOMIC_HW_MUTEX;
+		region_info->region_info[region_info->num_valid_regions].region_type =
+			HFI_MEM_REGION_TYPE_DEVICE;
+		region_info->region_info[region_info->num_valid_regions].start_addr =
+			hw_mgr->hfi_mem.ipc_hwmutex.iova;
+		region_info->region_info[region_info->num_valid_regions].size =
+			hw_mgr->hfi_mem.ipc_hwmutex.len;
+
+		region_info->num_valid_regions++;
+
+		/* Update global cntr mem */
+		region_info->region_info[region_info->num_valid_regions].region_id =
+			HFI_MEM_REGION_ID_GLOBAL_CNTR;
+		region_info->region_info[region_info->num_valid_regions].region_type =
+			HFI_MEM_REGION_TYPE_DEVICE;
+		region_info->region_info[region_info->num_valid_regions].start_addr =
+			hw_mgr->hfi_mem.global_cntr.iova;
+		region_info->region_info[region_info->num_valid_regions].size =
+			hw_mgr->hfi_mem.global_cntr.len;
+
+		region_info->num_valid_regions++;
+		CAM_DBG(CAM_ICP,
+			"Synx mem regions global_sync[0x%x:0x%x] synx_hw_mutex[0x%x:0x%x] ipc_hw_mutex[0x%x:0x%x] global_cntr[0x%x:0x%x]",
+			hw_mgr->hfi_mem.fw_uncached_global_sync.iova,
+			hw_mgr->hfi_mem.fw_uncached_global_sync.len,
+			hw_mgr->hfi_mem.synx_hwmutex.iova, hw_mgr->hfi_mem.synx_hwmutex.len,
+			hw_mgr->hfi_mem.ipc_hwmutex.iova, hw_mgr->hfi_mem.ipc_hwmutex.len,
+			hw_mgr->hfi_mem.global_cntr.iova, hw_mgr->hfi_mem.global_cntr.len);
+	}
+
+	CAM_DBG(CAM_ICP,
+		"Mem region property payload size: %zu num_regions: %u",
+		payload_size, region_info->num_valid_regions);
+
+	hfi_write_cmd(hw_mgr->hfi_handle, set_prop);
+	kfree(set_prop);
+
+	return 0;
+}
+
 static int cam_icp_mgr_hw_open_u(void *hw_mgr_priv, void *download_fw_args)
 {
 	struct cam_icp_hw_mgr *hw_mgr = hw_mgr_priv;
@@ -4881,6 +5084,13 @@ static int cam_icp_mgr_hw_open(void *hw_mgr_priv, void *download_fw_args)
 		goto fw_init_failed;
 	}
 
+	rc = cam_icp_mgr_send_memory_region_info(hw_mgr);
+	if (rc) {
+		CAM_ERR(CAM_ICP, "[%s] Failed in sending mem region info, rc %d",
+			hw_mgr->hw_mgr_name, rc);
+		goto fw_init_failed;
+	}
+
 	hw_mgr->ctxt_cnt = 0;
 	hw_mgr->icp_booted = true;
 	atomic_set(&hw_mgr->recovery, 0);

+ 8 - 2
drivers/cam_icp/icp_hw/icp_hw_mgr/cam_icp_hw_mgr.h

@@ -107,10 +107,13 @@ struct hfi_mini_dump_info;
  * @sfr_buf: Memory info for sfr buffer
  * @fw_uncached_generic: Memory info for fw uncached region
  * @fw_uncached_global_sync: Memory info for global sync, in fw uncached region
- * @hwmutex: Memory info for hwmutex region mapped as device memory
+ * @synx_hwmutex: Memory info for synx hwmutex region mapped as device memory
+ * @ipc_hwmutex: Memory info for ipc hwmutex region mapped as device memory
+ * @global_cntr: Memory info for global cntr region mapped as device memory
  * @shmem: Memory info for shared region
  * @io_mem: Memory info for io region
  * @fw_uncached: Memory info for fw uncached nested region
+ * @device: Memory info for the device region
  * @fw_uncached_region: region support for fw uncached
  */
 struct icp_hfi_mem_info {
@@ -124,10 +127,13 @@ struct icp_hfi_mem_info {
 	struct cam_mem_mgr_memory_desc sfr_buf;
 	struct cam_mem_mgr_memory_desc fw_uncached_generic;
 	struct cam_mem_mgr_memory_desc fw_uncached_global_sync;
-	struct cam_mem_mgr_memory_desc hwmutex;
+	struct cam_mem_mgr_memory_desc synx_hwmutex;
+	struct cam_mem_mgr_memory_desc ipc_hwmutex;
+	struct cam_mem_mgr_memory_desc global_cntr;
 	struct cam_smmu_region_info shmem;
 	struct cam_smmu_region_info io_mem;
 	struct cam_smmu_region_info fw_uncached;
+	struct cam_smmu_region_info device;
 	bool fw_uncached_region;
 };