Ver Fonte

msm: camera: icp: Add fw uncached region support in icp smmu

Use fw_uncached region instead of secondary heap region. Pass
this region information to FW through CSR registers.
Allocate Qtlb, cmd_q, msg_q, dbg_q from fw_uncached region
instead of shared mem region. Allocate Sec heap also from
fw uncached region instead of its own dedicated region.

CRs-Fixed: 2722486
Change-Id: Ib88b2202ca1b610946c712fcca936b72d4eecd15
Signed-off-by: Pavan Kumar Chilamkurthi <[email protected]>
Signed-off-by: Karthik Anantha Ram <[email protected]>
Pavan Kumar Chilamkurthi há 4 anos atrás
pai
commit
fa9be8c725

+ 3 - 1
drivers/cam_icp/fw_inc/hfi_intf.h

@@ -1,6 +1,6 @@
 /* SPDX-License-Identifier: GPL-2.0-only */
 /*
- * Copyright (c) 2017-2020, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2017-2021, The Linux Foundation. All rights reserved.
  */
 
 #ifndef _HFI_INTF_H_
@@ -33,6 +33,7 @@ struct hfi_mem {
  * @qdss: qdss mapped memory for fw
  * @io_mem: io memory info
  * @io_mem2: 2nd io memory info
+ * @fw_uncached: FW uncached region info
  */
 struct hfi_mem_info {
 	struct hfi_mem qtbl;
@@ -45,6 +46,7 @@ struct hfi_mem_info {
 	struct hfi_mem qdss;
 	struct hfi_mem io_mem;
 	struct hfi_mem io_mem2;
+	struct hfi_mem fw_uncached;
 };
 
 /**

+ 5 - 3
drivers/cam_icp/fw_inc/hfi_reg.h

@@ -1,6 +1,6 @@
 /* SPDX-License-Identifier: GPL-2.0-only */
 /*
- * Copyright (c) 2018-2020, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2018-2021, The Linux Foundation. All rights reserved.
  */
 
 #ifndef _CAM_HFI_REG_H_
@@ -18,8 +18,8 @@
 #define HFI_REG_SHARED_MEM_PTR          GEN_PURPOSE_REG(4)
 #define HFI_REG_SHARED_MEM_SIZE         GEN_PURPOSE_REG(5)
 #define HFI_REG_QTBL_PTR                GEN_PURPOSE_REG(6)
-#define HFI_REG_UNCACHED_HEAP_PTR       GEN_PURPOSE_REG(7)
-#define HFI_REG_UNCACHED_HEAP_SIZE      GEN_PURPOSE_REG(8)
+#define HFI_REG_SECONDARY_HEAP_PTR      GEN_PURPOSE_REG(7)
+#define HFI_REG_SECONDARY_HEAP_SIZE     GEN_PURPOSE_REG(8)
 #define HFI_REG_SFR_PTR                 GEN_PURPOSE_REG(10)
 #define HFI_REG_QDSS_IOVA               GEN_PURPOSE_REG(11)
 #define HFI_REG_QDSS_IOVA_SIZE          GEN_PURPOSE_REG(12)
@@ -27,6 +27,8 @@
 #define HFI_REG_IO_REGION_SIZE          GEN_PURPOSE_REG(14)
 #define HFI_REG_IO2_REGION_IOVA         GEN_PURPOSE_REG(15)
 #define HFI_REG_IO2_REGION_SIZE         GEN_PURPOSE_REG(16)
+#define HFI_REG_FWUNCACHED_REGION_IOVA  GEN_PURPOSE_REG(17)
+#define HFI_REG_FWUNCACHED_REGION_SIZE  GEN_PURPOSE_REG(18)
 
 /* start of Queue table and queues */
 #define MAX_ICP_HFI_QUEUES                      4

+ 43 - 9
drivers/cam_icp/hfi.c

@@ -623,9 +623,9 @@ int cam_hfi_resume(struct hfi_mem_info *hfi_mem)
 	cam_io_w_mb((uint32_t)hfi_mem->shmem.len,
 		icp_base + HFI_REG_SHARED_MEM_SIZE);
 	cam_io_w_mb((uint32_t)hfi_mem->sec_heap.iova,
-		icp_base + HFI_REG_UNCACHED_HEAP_PTR);
+		icp_base + HFI_REG_SECONDARY_HEAP_PTR);
 	cam_io_w_mb((uint32_t)hfi_mem->sec_heap.len,
-		icp_base + HFI_REG_UNCACHED_HEAP_SIZE);
+		icp_base + HFI_REG_SECONDARY_HEAP_SIZE);
 	cam_io_w_mb((uint32_t)hfi_mem->qdss.iova,
 		icp_base + HFI_REG_QDSS_IOVA);
 	cam_io_w_mb((uint32_t)hfi_mem->qdss.len,
@@ -640,10 +640,27 @@ int cam_hfi_resume(struct hfi_mem_info *hfi_mem)
 	cam_io_w_mb((uint32_t)hfi_mem->io_mem2.len,
 		icp_base + HFI_REG_IO2_REGION_SIZE);
 
-	CAM_INFO(CAM_HFI, "Resume IO1 : [0x%x 0x%x] IO2 [0x%x 0x%x]",
+	cam_io_w_mb((uint32_t)hfi_mem->fw_uncached.iova,
+		icp_base + HFI_REG_FWUNCACHED_REGION_IOVA);
+	cam_io_w_mb((uint32_t)hfi_mem->fw_uncached.len,
+		icp_base + HFI_REG_FWUNCACHED_REGION_SIZE);
+
+	CAM_DBG(CAM_HFI, "IO1 : [0x%x 0x%x] IO2 [0x%x 0x%x]",
 		hfi_mem->io_mem.iova, hfi_mem->io_mem.len,
 		hfi_mem->io_mem2.iova, hfi_mem->io_mem2.len);
 
+	CAM_DBG(CAM_HFI, "FwUncached : [0x%x 0x%x] Shared [0x%x 0x%x]",
+		hfi_mem->fw_uncached.iova, hfi_mem->fw_uncached.len,
+		hfi_mem->shmem.iova, hfi_mem->shmem.len);
+
+	CAM_DBG(CAM_HFI, "SecHeap : [0x%x 0x%x] QDSS [0x%x 0x%x]",
+		hfi_mem->sec_heap.iova, hfi_mem->sec_heap.len,
+		hfi_mem->qdss.iova, hfi_mem->qdss.len);
+
+	CAM_DBG(CAM_HFI, "QTbl : [0x%x 0x%x] Sfr [0x%x 0x%x]",
+		hfi_mem->qtbl.iova, hfi_mem->qtbl.len,
+		hfi_mem->sfr_buf.iova, hfi_mem->sfr_buf.len);
+
 	return rc;
 }
 
@@ -818,9 +835,9 @@ int cam_hfi_init(struct hfi_mem_info *hfi_mem, const struct hfi_ops *hfi_ops,
 	cam_io_w_mb((uint32_t)hfi_mem->shmem.len,
 		icp_base + HFI_REG_SHARED_MEM_SIZE);
 	cam_io_w_mb((uint32_t)hfi_mem->sec_heap.iova,
-		icp_base + HFI_REG_UNCACHED_HEAP_PTR);
+		icp_base + HFI_REG_SECONDARY_HEAP_PTR);
 	cam_io_w_mb((uint32_t)hfi_mem->sec_heap.len,
-		icp_base + HFI_REG_UNCACHED_HEAP_SIZE);
+		icp_base + HFI_REG_SECONDARY_HEAP_SIZE);
 	cam_io_w_mb((uint32_t)ICP_INIT_REQUEST_SET,
 		icp_base + HFI_REG_HOST_ICP_INIT_REQUEST);
 	cam_io_w_mb((uint32_t)hfi_mem->qdss.iova,
@@ -835,15 +852,32 @@ int cam_hfi_init(struct hfi_mem_info *hfi_mem, const struct hfi_ops *hfi_ops,
 		icp_base + HFI_REG_IO2_REGION_IOVA);
 	cam_io_w_mb((uint32_t)hfi_mem->io_mem2.len,
 		icp_base + HFI_REG_IO2_REGION_SIZE);
+	cam_io_w_mb((uint32_t)hfi_mem->fw_uncached.iova,
+		icp_base + HFI_REG_FWUNCACHED_REGION_IOVA);
+	cam_io_w_mb((uint32_t)hfi_mem->fw_uncached.len,
+		icp_base + HFI_REG_FWUNCACHED_REGION_SIZE);
 
-	CAM_INFO(CAM_HFI, "Init IO1 : [0x%x 0x%x] IO2 [0x%x 0x%x]",
+	CAM_DBG(CAM_HFI, "IO1 : [0x%x 0x%x] IO2 [0x%x 0x%x]",
 		hfi_mem->io_mem.iova, hfi_mem->io_mem.len,
 		hfi_mem->io_mem2.iova, hfi_mem->io_mem2.len);
 
+	CAM_DBG(CAM_HFI, "FwUncached : [0x%x 0x%x] Shared [0x%x 0x%x]",
+		hfi_mem->fw_uncached.iova, hfi_mem->fw_uncached.len,
+		hfi_mem->shmem.iova, hfi_mem->shmem.len);
+
+	CAM_DBG(CAM_HFI, "SecHeap : [0x%x 0x%x] QDSS [0x%x 0x%x]",
+		hfi_mem->sec_heap.iova, hfi_mem->sec_heap.len,
+		hfi_mem->qdss.iova, hfi_mem->qdss.len);
+
+	CAM_DBG(CAM_HFI, "QTbl : [0x%x 0x%x] Sfr [0x%x 0x%x]",
+		hfi_mem->qtbl.iova, hfi_mem->qtbl.len,
+		hfi_mem->sfr_buf.iova, hfi_mem->sfr_buf.len);
+
 	if (readl_poll_timeout(icp_base + HFI_REG_ICP_HOST_INIT_RESPONSE,
-			       status, status == ICP_INIT_RESP_SUCCESS,
-			       HFI_POLL_DELAY_US, HFI_POLL_TIMEOUT_US)) {
-		CAM_ERR(CAM_HFI, "response poll timed out: status=0x%08x",
+			status, status == ICP_INIT_RESP_SUCCESS,
+			HFI_POLL_DELAY_US, HFI_POLL_TIMEOUT_US)) {
+		CAM_ERR(CAM_HFI,
+			"response poll timed out: status=0x%08x",
 			status);
 		rc = -ETIMEDOUT;
 		goto regions_fail;

+ 199 - 38
drivers/cam_icp/icp_hw/icp_hw_mgr/cam_icp_hw_mgr.c

@@ -2728,18 +2728,37 @@ static void cam_icp_free_fw_mem(void)
 static void cam_icp_free_hfi_mem(void)
 {
 	int rc;
+	struct cam_smmu_region_info fwuncached_region_info;
+	bool fwuncached_region_exists = false;
 
 	cam_icp_free_fw_mem();
-	rc = cam_mem_mgr_free_memory_region(&icp_hw_mgr.hfi_mem.sec_heap);
-	if (rc)
-		CAM_ERR(CAM_ICP, "failed to unreserve sec heap");
+
+	rc = cam_smmu_get_region_info(icp_hw_mgr.iommu_hdl,
+		CAM_SMMU_REGION_FWUNCACHED,
+		&fwuncached_region_info);
+	if (!rc)
+		fwuncached_region_exists = true;
+
+	if (fwuncached_region_exists) {
+		rc = cam_mem_mgr_free_memory_region(
+			&icp_hw_mgr.hfi_mem.fw_uncached);
+		if (rc)
+			CAM_ERR(CAM_ICP,
+				"failed to unreserve fwuncached region");
+	} else {
+		rc = cam_mem_mgr_free_memory_region(
+			&icp_hw_mgr.hfi_mem.sec_heap);
+		if (rc)
+			CAM_ERR(CAM_ICP, "failed to unreserve sec heap");
+
+		cam_mem_mgr_release_mem(&icp_hw_mgr.hfi_mem.qtbl);
+		cam_mem_mgr_release_mem(&icp_hw_mgr.hfi_mem.cmd_q);
+		cam_mem_mgr_release_mem(&icp_hw_mgr.hfi_mem.msg_q);
+		cam_mem_mgr_release_mem(&icp_hw_mgr.hfi_mem.dbg_q);
+		cam_mem_mgr_release_mem(&icp_hw_mgr.hfi_mem.sfr_buf);
+	}
 
 	cam_smmu_dealloc_qdss(icp_hw_mgr.iommu_hdl);
-	cam_mem_mgr_release_mem(&icp_hw_mgr.hfi_mem.qtbl);
-	cam_mem_mgr_release_mem(&icp_hw_mgr.hfi_mem.cmd_q);
-	cam_mem_mgr_release_mem(&icp_hw_mgr.hfi_mem.msg_q);
-	cam_mem_mgr_release_mem(&icp_hw_mgr.hfi_mem.dbg_q);
-	cam_mem_mgr_release_mem(&icp_hw_mgr.hfi_mem.sfr_buf);
 }
 
 static int cam_icp_alloc_secheap_mem(struct cam_mem_mgr_memory_desc *secheap)
@@ -2899,6 +2918,8 @@ static int cam_icp_get_io_mem_info(void)
 static int cam_icp_allocate_hfi_mem(void)
 {
 	int rc;
+	struct cam_smmu_region_info fwuncached_region_info;
+	bool fwuncached_region_exists = false;
 
 	rc = cam_smmu_get_region_info(icp_hw_mgr.iommu_hdl,
 		CAM_SMMU_REGION_SHARED,
@@ -2920,46 +2941,180 @@ static int cam_icp_allocate_hfi_mem(void)
 		goto fw_alloc_failed;
 	}
 
-	rc = cam_icp_alloc_shared_mem(&icp_hw_mgr.hfi_mem.qtbl);
-	if (rc) {
-		CAM_ERR(CAM_ICP, "Unable to allocate qtbl memory");
-		goto qtbl_alloc_failed;
-	}
+	rc = cam_smmu_get_region_info(icp_hw_mgr.iommu_hdl,
+		CAM_SMMU_REGION_FWUNCACHED,
+		&fwuncached_region_info);
+	if (!rc)
+		fwuncached_region_exists = true;
+
+	if (fwuncached_region_exists) {
+		struct cam_mem_mgr_request_desc alloc;
+		struct cam_mem_mgr_memory_desc out;
+		uint32_t offset;
+		uint64_t size;
+
+		memset(&alloc, 0, sizeof(alloc));
+		memset(&out, 0, sizeof(out));
+
+		alloc.size = fwuncached_region_info.iova_len;
+		alloc.align = 0;
+		alloc.flags = CAM_MEM_FLAG_KMD_ACCESS;
+		alloc.smmu_hdl = icp_hw_mgr.iommu_hdl;
+		rc = cam_mem_mgr_reserve_memory_region(&alloc,
+			CAM_SMMU_REGION_FWUNCACHED,
+			&out);
+		if (rc) {
+			CAM_ERR(CAM_ICP, "Unable to reserve secheap memory");
+			goto qtbl_alloc_failed;
+		}
 
-	rc = cam_icp_alloc_shared_mem(&icp_hw_mgr.hfi_mem.cmd_q);
-	if (rc) {
-		CAM_ERR(CAM_ICP, "Unable to allocate cmd q memory");
-		goto cmd_q_alloc_failed;
-	}
+		icp_hw_mgr.hfi_mem.fw_uncached = out;
+
+		offset = 0;
+
+		size = SZ_1M;
+		icp_hw_mgr.hfi_mem.sec_heap.iova       = out.iova + offset;
+		icp_hw_mgr.hfi_mem.sec_heap.kva        = out.kva + offset;
+		icp_hw_mgr.hfi_mem.sec_heap.len        = size;
+		icp_hw_mgr.hfi_mem.sec_heap.smmu_hdl   = out.smmu_hdl;
+		icp_hw_mgr.hfi_mem.sec_heap.mem_handle = out.mem_handle;
+		icp_hw_mgr.hfi_mem.sec_heap.region     = out.region;
+		offset += (uint32_t)size;
+
+		size = SZ_1M;
+		icp_hw_mgr.hfi_mem.qtbl.iova       = out.iova + offset;
+		icp_hw_mgr.hfi_mem.qtbl.kva        = out.kva + offset;
+		icp_hw_mgr.hfi_mem.qtbl.len        = size;
+		icp_hw_mgr.hfi_mem.qtbl.smmu_hdl   = out.smmu_hdl;
+		icp_hw_mgr.hfi_mem.qtbl.mem_handle = out.mem_handle;
+		icp_hw_mgr.hfi_mem.qtbl.region     = out.region;
+		offset += (uint32_t)size;
+
+		size = SZ_1M;
+		icp_hw_mgr.hfi_mem.cmd_q.iova       = out.iova + offset;
+		icp_hw_mgr.hfi_mem.cmd_q.kva        = out.kva + offset;
+		icp_hw_mgr.hfi_mem.cmd_q.len        = size;
+		icp_hw_mgr.hfi_mem.cmd_q.smmu_hdl   = out.smmu_hdl;
+		icp_hw_mgr.hfi_mem.cmd_q.mem_handle = out.mem_handle;
+		icp_hw_mgr.hfi_mem.cmd_q.region     = out.region;
+		offset += (uint32_t)size;
+
+		size = SZ_1M;
+		icp_hw_mgr.hfi_mem.msg_q.iova       = out.iova + offset;
+		icp_hw_mgr.hfi_mem.msg_q.kva        = out.kva + offset;
+		icp_hw_mgr.hfi_mem.msg_q.len        = size;
+		icp_hw_mgr.hfi_mem.msg_q.smmu_hdl   = out.smmu_hdl;
+		icp_hw_mgr.hfi_mem.msg_q.mem_handle = out.mem_handle;
+		icp_hw_mgr.hfi_mem.msg_q.region     = out.region;
+		offset += (uint32_t)size;
+
+		size = SZ_1M;
+		icp_hw_mgr.hfi_mem.dbg_q.iova       = out.iova + offset;
+		icp_hw_mgr.hfi_mem.dbg_q.kva        = out.kva + offset;
+		icp_hw_mgr.hfi_mem.dbg_q.len        = size;
+		icp_hw_mgr.hfi_mem.dbg_q.smmu_hdl   = out.smmu_hdl;
+		icp_hw_mgr.hfi_mem.dbg_q.mem_handle = out.mem_handle;
+		icp_hw_mgr.hfi_mem.dbg_q.region     = out.region;
+		offset += (uint32_t)size;
+
+		size = SZ_8K;
+		icp_hw_mgr.hfi_mem.sfr_buf.iova       = out.iova + offset;
+		icp_hw_mgr.hfi_mem.sfr_buf.kva        = out.kva + offset;
+		icp_hw_mgr.hfi_mem.sfr_buf.len        = size;
+		icp_hw_mgr.hfi_mem.sfr_buf.smmu_hdl   = out.smmu_hdl;
+		icp_hw_mgr.hfi_mem.sfr_buf.mem_handle = out.mem_handle;
+		icp_hw_mgr.hfi_mem.sfr_buf.region     = out.region;
+		offset += (uint32_t)size;
+
+		if (offset > out.len) {
+			CAM_ERR(CAM_ICP,
+				"FW uncached region size %lld not enough, required %lld",
+				offset, out.len);
+			cam_mem_mgr_free_memory_region(
+				&icp_hw_mgr.hfi_mem.fw_uncached);
+			goto qtbl_alloc_failed;
+		}
+	} else {
+		rc = cam_icp_alloc_shared_mem(&icp_hw_mgr.hfi_mem.qtbl);
+		if (rc) {
+			CAM_ERR(CAM_ICP, "Unable to allocate qtbl memory");
+			goto qtbl_alloc_failed;
+		}
 
-	rc = cam_icp_alloc_shared_mem(&icp_hw_mgr.hfi_mem.msg_q);
-	if (rc) {
-		CAM_ERR(CAM_ICP, "Unable to allocate msg q memory");
-		goto msg_q_alloc_failed;
-	}
+		rc = cam_icp_alloc_shared_mem(&icp_hw_mgr.hfi_mem.cmd_q);
+		if (rc) {
+			CAM_ERR(CAM_ICP, "Unable to allocate cmd q memory");
+			goto cmd_q_alloc_failed;
+		}
 
-	rc = cam_icp_alloc_shared_mem(&icp_hw_mgr.hfi_mem.dbg_q);
-	if (rc) {
-		CAM_ERR(CAM_ICP, "Unable to allocate dbg q memory");
-		goto dbg_q_alloc_failed;
-	}
+		rc = cam_icp_alloc_shared_mem(&icp_hw_mgr.hfi_mem.msg_q);
+		if (rc) {
+			CAM_ERR(CAM_ICP, "Unable to allocate msg q memory");
+			goto msg_q_alloc_failed;
+		}
 
-	rc = cam_icp_alloc_sfr_mem(&icp_hw_mgr.hfi_mem.sfr_buf);
-	if (rc) {
-		CAM_ERR(CAM_ICP, "Unable to allocate sfr buffer");
-		goto sfr_buf_alloc_failed;
-	}
+		rc = cam_icp_alloc_shared_mem(&icp_hw_mgr.hfi_mem.dbg_q);
+		if (rc) {
+			CAM_ERR(CAM_ICP, "Unable to allocate dbg q memory");
+			goto dbg_q_alloc_failed;
+		}
 
-	rc = cam_icp_alloc_secheap_mem(&icp_hw_mgr.hfi_mem.sec_heap);
-	if (rc) {
-		CAM_ERR(CAM_ICP, "Unable to allocate sec heap memory");
-		goto sec_heap_alloc_failed;
+		rc = cam_icp_alloc_sfr_mem(&icp_hw_mgr.hfi_mem.sfr_buf);
+		if (rc) {
+			CAM_ERR(CAM_ICP, "Unable to allocate sfr buffer");
+			goto sfr_buf_alloc_failed;
+		}
+
+		rc = cam_icp_alloc_secheap_mem(&icp_hw_mgr.hfi_mem.sec_heap);
+		if (rc) {
+			CAM_ERR(CAM_ICP, "Unable to allocate sec heap memory");
+			goto sec_heap_alloc_failed;
+		}
 	}
 
+	CAM_DBG(CAM_ICP, "Shared Region [0x%x %lld] FW Uncached Region [0x%x %lld]",
+		icp_hw_mgr.hfi_mem.shmem.iova_start,
+		icp_hw_mgr.hfi_mem.shmem.iova_len,
+		fwuncached_region_info.iova_start,
+		fwuncached_region_info.iova_len);
+
+	CAM_DBG(CAM_ICP,
+		"FwUncached[0x%x %p %lld] QTbl[0x%x %p %lld] CmdQ[0x%x %p %lld] MsgQ[0x%x %p %lld]",
+		icp_hw_mgr.hfi_mem.fw_uncached.iova,
+		icp_hw_mgr.hfi_mem.fw_uncached.kva,
+		icp_hw_mgr.hfi_mem.fw_uncached.len,
+		icp_hw_mgr.hfi_mem.qtbl.iova,
+		icp_hw_mgr.hfi_mem.qtbl.kva,
+		icp_hw_mgr.hfi_mem.qtbl.len,
+		icp_hw_mgr.hfi_mem.cmd_q.iova,
+		icp_hw_mgr.hfi_mem.cmd_q.kva,
+		icp_hw_mgr.hfi_mem.cmd_q.len,
+		icp_hw_mgr.hfi_mem.msg_q.iova,
+		icp_hw_mgr.hfi_mem.msg_q.kva,
+		icp_hw_mgr.hfi_mem.msg_q.len);
+
+	CAM_DBG(CAM_ICP,
+		"DbgQ[0x%x %p %lld] SFR[0x%x %p %lld] SecHeap[0x%x %p %lld]",
+		icp_hw_mgr.hfi_mem.dbg_q.iova,
+		icp_hw_mgr.hfi_mem.dbg_q.kva,
+		icp_hw_mgr.hfi_mem.dbg_q.len,
+		icp_hw_mgr.hfi_mem.sfr_buf.iova,
+		icp_hw_mgr.hfi_mem.sfr_buf.kva,
+		icp_hw_mgr.hfi_mem.sfr_buf.len,
+		icp_hw_mgr.hfi_mem.sec_heap.iova,
+		icp_hw_mgr.hfi_mem.sec_heap.kva,
+		icp_hw_mgr.hfi_mem.sec_heap.len);
+
 	rc = cam_icp_get_io_mem_info();
 	if (rc) {
 		CAM_ERR(CAM_ICP, "Unable to get I/O region info");
-		goto get_io_mem_failed;
+		if (fwuncached_region_exists) {
+			cam_mem_mgr_free_memory_region(
+				&icp_hw_mgr.hfi_mem.fw_uncached);
+			goto qtbl_alloc_failed;
+		} else {
+			goto get_io_mem_failed;
+		}
 	}
 
 	return rc;
@@ -3331,6 +3486,9 @@ static int cam_icp_mgr_hfi_resume(struct cam_icp_hw_mgr *hw_mgr)
 		hfi_mem.io_mem2.len = 0x0;
 	}
 
+	hfi_mem.fw_uncached.iova = icp_hw_mgr.hfi_mem.fw_uncached.iova;
+	hfi_mem.fw_uncached.len = icp_hw_mgr.hfi_mem.fw_uncached.len;
+
 	CAM_DBG(CAM_ICP,
 		"IO region1 IOVA = %X length = %lld, IO region2 IOVA = %X length = %lld",
 		hfi_mem.io_mem.iova,
@@ -3761,6 +3919,9 @@ static int cam_icp_mgr_hfi_init(struct cam_icp_hw_mgr *hw_mgr)
 		hfi_mem.io_mem2.len = 0x0;
 	}
 
+	hfi_mem.fw_uncached.iova = icp_hw_mgr.hfi_mem.fw_uncached.iova;
+	hfi_mem.fw_uncached.len = icp_hw_mgr.hfi_mem.fw_uncached.len;
+
 	if (icp_dev_intf->hw_type == CAM_ICP_DEV_LX7)
 		hfi_ops = &hfi_lx7_ops;
 	else

+ 2 - 0
drivers/cam_icp/icp_hw/icp_hw_mgr/cam_icp_hw_mgr.h

@@ -83,6 +83,7 @@
  * @fw_buf: Memory info of firmware
  * @qdss_buf: Memory info of qdss
  * @sfr_buf: Memory info for sfr buffer
+ * @fw_uncached: Memory info for fw uncached region
  * @shmem: Memory info for shared region
  * @io_mem: Memory info for io region
  */
@@ -95,6 +96,7 @@ struct icp_hfi_mem_info {
 	struct cam_mem_mgr_memory_desc fw_buf;
 	struct cam_mem_mgr_memory_desc qdss_buf;
 	struct cam_mem_mgr_memory_desc sfr_buf;
+	struct cam_mem_mgr_memory_desc fw_uncached;
 	struct cam_smmu_region_info shmem;
 	struct cam_smmu_region_info io_mem;
 };

+ 23 - 11
drivers/cam_req_mgr/cam_mem_mgr.c

@@ -909,7 +909,7 @@ int cam_mem_mgr_alloc_and_map(struct cam_mem_mgr_alloc_cmd *cmd)
 			region = CAM_SMMU_REGION_SHARED;
 
 		if (cmd->flags & CAM_MEM_FLAG_PROTECTED_MODE)
-			region = CAM_SMMU_REGION_SECHEAP;
+			region = CAM_SMMU_REGION_IO;
 
 		rc = cam_mem_util_map_hw_va(cmd->flags,
 			cmd->mmu_hdls,
@@ -1567,6 +1567,7 @@ int cam_mem_mgr_reserve_memory_region(struct cam_mem_mgr_request_desc *inp,
 	int32_t idx;
 	int32_t smmu_hdl = 0;
 	int32_t num_hdl = 0;
+	uintptr_t kvaddr = 0;
 
 	if (!atomic_read(&cam_mem_mgr_state)) {
 		CAM_ERR(CAM_MEM, "failed. mem_mgr not initialized");
@@ -1583,7 +1584,8 @@ int cam_mem_mgr_reserve_memory_region(struct cam_mem_mgr_request_desc *inp,
 		return -EINVAL;
 	}
 
-	if (region != CAM_SMMU_REGION_SECHEAP) {
+	if ((region != CAM_SMMU_REGION_SECHEAP) &&
+		(region != CAM_SMMU_REGION_FWUNCACHED)) {
 		CAM_ERR(CAM_MEM, "Only secondary heap supported");
 		return -EINVAL;
 	}
@@ -1599,10 +1601,16 @@ int cam_mem_mgr_reserve_memory_region(struct cam_mem_mgr_request_desc *inp,
 		CAM_DBG(CAM_MEM, "Got dma_buf = %pK", buf);
 	}
 
-	rc = cam_smmu_reserve_sec_heap(inp->smmu_hdl,
-		buf,
-		&iova,
-		&request_len);
+	if (inp->flags & CAM_MEM_FLAG_KMD_ACCESS) {
+		rc = cam_mem_util_map_cpu_va(buf, &kvaddr, &request_len);
+		if (rc) {
+			CAM_ERR(CAM_MEM, "Failed to get kernel vaddr");
+			goto kmap_fail;
+		}
+	}
+
+	rc = cam_smmu_reserve_buf_region(region,
+		inp->smmu_hdl, buf, &iova, &request_len);
 
 	if (rc) {
 		CAM_ERR(CAM_MEM, "Reserving secondary heap failed");
@@ -1625,7 +1633,7 @@ int cam_mem_mgr_reserve_memory_region(struct cam_mem_mgr_request_desc *inp,
 	tbl.bufq[idx].dma_buf = buf;
 	tbl.bufq[idx].flags = inp->flags;
 	tbl.bufq[idx].buf_handle = mem_handle;
-	tbl.bufq[idx].kmdvaddr = 0;
+	tbl.bufq[idx].kmdvaddr = kvaddr;
 
 	tbl.bufq[idx].vaddr = iova;
 
@@ -1636,7 +1644,7 @@ int cam_mem_mgr_reserve_memory_region(struct cam_mem_mgr_request_desc *inp,
 	tbl.bufq[idx].is_imported = false;
 	mutex_unlock(&tbl.bufq[idx].q_lock);
 
-	out->kva = 0;
+	out->kva = kvaddr;
 	out->iova = (uint32_t)iova;
 	out->smmu_hdl = smmu_hdl;
 	out->mem_handle = mem_handle;
@@ -1646,8 +1654,11 @@ int cam_mem_mgr_reserve_memory_region(struct cam_mem_mgr_request_desc *inp,
 	return rc;
 
 slot_fail:
-	cam_smmu_release_sec_heap(smmu_hdl);
+	cam_smmu_release_buf_region(region, smmu_hdl);
 smmu_fail:
+	if (region == CAM_SMMU_REGION_FWUNCACHED)
+		cam_mem_util_unmap_cpu_va(buf, kvaddr);
+kmap_fail:
 	dma_buf_put(buf);
 ion_fail:
 	return rc;
@@ -1670,7 +1681,8 @@ int cam_mem_mgr_free_memory_region(struct cam_mem_mgr_memory_desc *inp)
 		return -EINVAL;
 	}
 
-	if (inp->region != CAM_SMMU_REGION_SECHEAP) {
+	if ((inp->region != CAM_SMMU_REGION_SECHEAP) &&
+		(inp->region != CAM_SMMU_REGION_FWUNCACHED)) {
 		CAM_ERR(CAM_MEM, "Only secondary heap supported");
 		return -EINVAL;
 	}
@@ -1710,7 +1722,7 @@ int cam_mem_mgr_free_memory_region(struct cam_mem_mgr_memory_desc *inp)
 		return -ENODEV;
 	}
 
-	rc = cam_smmu_release_sec_heap(inp->smmu_hdl);
+	rc = cam_smmu_release_buf_region(inp->region, inp->smmu_hdl);
 	if (rc) {
 		CAM_ERR(CAM_MEM,
 			"Sec heap region release failed");

+ 139 - 66
drivers/cam_smmu/cam_smmu_api.c

@@ -99,7 +99,7 @@ struct scratch_mapping {
 	dma_addr_t base;
 };
 
-struct secheap_buf_info {
+struct region_buf_info {
 	struct dma_buf *buf;
 	struct dma_buf_attachment *attach;
 	struct sg_table *table;
@@ -128,10 +128,12 @@ struct cam_context_bank_info {
 	uint8_t shared_support;
 	uint8_t io_support;
 	uint8_t secheap_support;
+	uint8_t fwuncached_region_support;
 	uint8_t qdss_support;
 	dma_addr_t qdss_phy_addr;
 	bool is_fw_allocated;
 	bool is_secheap_allocated;
+	bool is_fwuncached_buf_allocated;
 	bool is_qdss_allocated;
 
 	struct scratch_mapping scratch_map;
@@ -142,8 +144,10 @@ struct cam_context_bank_info {
 	struct cam_smmu_region_info shared_info;
 	struct cam_smmu_region_info io_info;
 	struct cam_smmu_region_info secheap_info;
+	struct cam_smmu_region_info fwuncached_region;
 	struct cam_smmu_region_info qdss_info;
-	struct secheap_buf_info secheap_buf;
+	struct region_buf_info secheap_buf;
+	struct region_buf_info fwuncached_reg_buf;
 
 	struct list_head smmu_buf_list;
 	struct list_head smmu_buf_kernel_list;
@@ -1833,6 +1837,15 @@ int cam_smmu_get_region_info(int32_t smmu_hdl,
 		region_info->iova_start = cb->secheap_info.iova_start;
 		region_info->iova_len = cb->secheap_info.iova_len;
 		break;
+	case CAM_SMMU_REGION_FWUNCACHED:
+		if (!cb->fwuncached_region_support) {
+			CAM_WARN(CAM_SMMU, "FW uncached region not supported");
+			mutex_unlock(&iommu_cb_set.cb_info[idx].lock);
+			return -ENODEV;
+		}
+		region_info->iova_start = cb->fwuncached_region.iova_start;
+		region_info->iova_len = cb->fwuncached_region.iova_len;
+		break;
 	default:
 		CAM_ERR(CAM_SMMU, "Invalid region id: %d for smmu hdl: %X",
 			smmu_hdl, region_id);
@@ -1845,15 +1858,18 @@ int cam_smmu_get_region_info(int32_t smmu_hdl,
 }
 EXPORT_SYMBOL(cam_smmu_get_region_info);
 
-int cam_smmu_reserve_sec_heap(int32_t smmu_hdl,
+int cam_smmu_reserve_buf_region(enum cam_smmu_region_id region,
+	int32_t smmu_hdl,
 	struct dma_buf *buf,
 	dma_addr_t *iova,
 	size_t *request_len)
 {
-	struct secheap_buf_info *secheap_buf = NULL;
+	struct cam_context_bank_info *cb_info;
+	struct region_buf_info *buf_info = NULL;
+	struct cam_smmu_region_info *region_info = NULL;
+	bool *is_buf_allocated;
+	bool region_supported;
 	size_t size = 0;
-	uint32_t sec_heap_iova = 0;
-	size_t sec_heap_iova_len = 0;
 	int idx;
 	int rc = 0;
 	int prot = 0;
@@ -1866,17 +1882,46 @@ int cam_smmu_reserve_sec_heap(int32_t smmu_hdl,
 		return -EINVAL;
 	}
 
-	if (!iommu_cb_set.cb_info[idx].secheap_support) {
-		CAM_ERR(CAM_SMMU, "Secondary heap not supported");
+	cb_info = &iommu_cb_set.cb_info[idx];
+
+	if (region == CAM_SMMU_REGION_SECHEAP) {
+		region_supported = cb_info->secheap_support;
+	} else if (region == CAM_SMMU_REGION_FWUNCACHED) {
+		region_supported = cb_info->fwuncached_region_support;
+	} else {
+		CAM_ERR(CAM_SMMU, "Region not supported for reserving %d",
+			region);
 		return -EINVAL;
 	}
 
-	mutex_lock(&iommu_cb_set.cb_info[idx].lock);
+	if (!region_supported) {
+		CAM_ERR(CAM_SMMU, "Reserve for region %d not supported",
+			region);
+		return -EINVAL;
+	}
+
+	mutex_lock(&cb_info->lock);
 
-	if (iommu_cb_set.cb_info[idx].is_secheap_allocated) {
-		CAM_ERR(CAM_SMMU, "Trying to allocate secheap twice");
+	if (region == CAM_SMMU_REGION_SECHEAP) {
+		is_buf_allocated = &cb_info->is_secheap_allocated;
+		buf_info = &cb_info->secheap_buf;
+		region_info = &cb_info->secheap_info;
+	} else if (region == CAM_SMMU_REGION_FWUNCACHED) {
+		is_buf_allocated = &cb_info->is_fwuncached_buf_allocated;
+		buf_info = &cb_info->fwuncached_reg_buf;
+		region_info = &cb_info->fwuncached_region;
+	} else {
+		CAM_ERR(CAM_SMMU, "Region not supported for reserving %d",
+			region);
+		mutex_unlock(&cb_info->lock);
+		return -EINVAL;
+	}
+
+	if (*is_buf_allocated) {
+		CAM_ERR(CAM_SMMU, "Trying to allocate heap twice for region %d",
+			region);
 		rc = -ENOMEM;
-		mutex_unlock(&iommu_cb_set.cb_info[idx].lock);
+		mutex_unlock(&cb_info->lock);
 		return rc;
 	}
 
@@ -1887,72 +1932,71 @@ int cam_smmu_reserve_sec_heap(int32_t smmu_hdl,
 		goto err_out;
 	}
 
-	secheap_buf = &iommu_cb_set.cb_info[idx].secheap_buf;
-	secheap_buf->buf = buf;
-	secheap_buf->attach = dma_buf_attach(secheap_buf->buf,
-		iommu_cb_set.cb_info[idx].dev);
-	if (IS_ERR_OR_NULL(secheap_buf->attach)) {
-		rc = PTR_ERR(secheap_buf->attach);
+	buf_info->buf = buf;
+	buf_info->attach = dma_buf_attach(buf_info->buf,
+		cb_info->dev);
+	if (IS_ERR_OR_NULL(buf_info->attach)) {
+		rc = PTR_ERR(buf_info->attach);
 		CAM_ERR(CAM_SMMU, "Error: dma buf attach failed");
 		goto err_put;
 	}
 
-	secheap_buf->table = dma_buf_map_attachment(secheap_buf->attach,
+	buf_info->table = dma_buf_map_attachment(buf_info->attach,
 		DMA_BIDIRECTIONAL);
-	if (IS_ERR_OR_NULL(secheap_buf->table)) {
-		rc = PTR_ERR(secheap_buf->table);
+	if (IS_ERR_OR_NULL(buf_info->table)) {
+		rc = PTR_ERR(buf_info->table);
 		CAM_ERR(CAM_SMMU, "Error: dma buf map attachment failed");
 		goto err_detach;
 	}
 
-	sec_heap_iova = iommu_cb_set.cb_info[idx].secheap_info.iova_start;
-	sec_heap_iova_len = iommu_cb_set.cb_info[idx].secheap_info.iova_len;
-
 	prot = IOMMU_READ | IOMMU_WRITE;
 	if (iommu_cb_set.force_cache_allocs)
 		prot |= IOMMU_CACHE;
 
-	size = iommu_map_sg(iommu_cb_set.cb_info[idx].domain,
-		sec_heap_iova,
-		secheap_buf->table->sgl,
-		secheap_buf->table->orig_nents,
+	size = iommu_map_sg(cb_info->domain,
+		region_info->iova_start,
+		buf_info->table->sgl,
+		buf_info->table->orig_nents,
 		prot);
-	if (size != sec_heap_iova_len) {
+	if (size != region_info->iova_len) {
 		CAM_ERR(CAM_SMMU,
-			"IOMMU mapping failed size=%zu, sec_heap_iova_len=%zu",
-			size, sec_heap_iova_len);
+			"IOMMU mapping failed size=%zu, iova_len=%zu",
+			size, region_info->iova_len);
 		goto err_unmap_sg;
 	}
 
-	iommu_cb_set.cb_info[idx].is_secheap_allocated = true;
-	*iova = (uint32_t)sec_heap_iova;
-	*request_len = sec_heap_iova_len;
-	mutex_unlock(&iommu_cb_set.cb_info[idx].lock);
+	*is_buf_allocated = true;
+	*iova = (uint32_t)region_info->iova_start;
+	*request_len = region_info->iova_len;
+	mutex_unlock(&cb_info->lock);
 
 	return rc;
 
 err_unmap_sg:
-	dma_buf_unmap_attachment(secheap_buf->attach,
-		secheap_buf->table,
+	dma_buf_unmap_attachment(buf_info->attach,
+		buf_info->table,
 		DMA_BIDIRECTIONAL);
 err_detach:
-	dma_buf_detach(secheap_buf->buf,
-		secheap_buf->attach);
+	dma_buf_detach(buf_info->buf,
+		buf_info->attach);
 err_put:
-	dma_buf_put(secheap_buf->buf);
+	dma_buf_put(buf_info->buf);
 err_out:
-	mutex_unlock(&iommu_cb_set.cb_info[idx].lock);
+	mutex_unlock(&cb_info->lock);
 	return rc;
 }
-EXPORT_SYMBOL(cam_smmu_reserve_sec_heap);
+EXPORT_SYMBOL(cam_smmu_reserve_buf_region);
 
-int cam_smmu_release_sec_heap(int32_t smmu_hdl)
+int cam_smmu_release_buf_region(enum cam_smmu_region_id region,
+	int32_t smmu_hdl)
 {
 	int idx;
 	size_t size = 0;
-	uint32_t sec_heap_iova = 0;
-	size_t sec_heap_iova_len = 0;
-	struct secheap_buf_info *secheap_buf = NULL;
+	struct region_buf_info *buf_info = NULL;
+	struct cam_context_bank_info *cb_info;
+	bool *is_buf_allocated;
+	bool region_supported;
+	struct cam_smmu_region_info *region_info = NULL;
 
 	idx = GET_SMMU_TABLE_IDX(smmu_hdl);
 	if (idx < 0 || idx >= iommu_cb_set.cb_num) {
@@ -1962,41 +2006,64 @@ int cam_smmu_release_sec_heap(int32_t smmu_hdl)
 		return -EINVAL;
 	}
 
-	if (!iommu_cb_set.cb_info[idx].secheap_support) {
+	cb_info = &iommu_cb_set.cb_info[idx];
+
+	if (region == CAM_SMMU_REGION_SECHEAP) {
+		region_supported = cb_info->secheap_support;
+	} else if (region == CAM_SMMU_REGION_FWUNCACHED) {
+		region_supported = cb_info->fwuncached_region_support;
+	} else {
+		CAM_ERR(CAM_SMMU, "Region not supported for reserving %d",
+			region);
+		return -EINVAL;
+	}
+
+	if (!region_supported) {
 		CAM_ERR(CAM_SMMU, "Secondary heap not supported");
 		return -EINVAL;
 	}
-	mutex_lock(&iommu_cb_set.cb_info[idx].lock);
+	mutex_lock(&cb_info->lock);
+
+	if (region == CAM_SMMU_REGION_SECHEAP) {
+		is_buf_allocated = &cb_info->is_secheap_allocated;
+		buf_info = &cb_info->secheap_buf;
+		region_info = &cb_info->secheap_info;
+	} else if (region == CAM_SMMU_REGION_FWUNCACHED) {
+		is_buf_allocated = &cb_info->is_fwuncached_buf_allocated;
+		buf_info = &cb_info->fwuncached_reg_buf;
+		region_info = &cb_info->fwuncached_region;
+	} else {
+		CAM_ERR(CAM_SMMU, "Region not supported for reserving %d",
+			region);
+		mutex_unlock(&cb_info->lock);
+		return -EINVAL;
+	}
 
-	if (!iommu_cb_set.cb_info[idx].is_secheap_allocated) {
+	if (!(*is_buf_allocated)) {
 		CAM_ERR(CAM_SMMU, "Trying to release secheap twice");
-		mutex_unlock(&iommu_cb_set.cb_info[idx].lock);
+		mutex_unlock(&cb_info->lock);
 		return -ENOMEM;
 	}
 
-	secheap_buf = &iommu_cb_set.cb_info[idx].secheap_buf;
-	sec_heap_iova = iommu_cb_set.cb_info[idx].secheap_info.iova_start;
-	sec_heap_iova_len = iommu_cb_set.cb_info[idx].secheap_info.iova_len;
-
-	size = iommu_unmap(iommu_cb_set.cb_info[idx].domain,
-		sec_heap_iova,
-		sec_heap_iova_len);
-	if (size != sec_heap_iova_len) {
+	size = iommu_unmap(cb_info->domain,
+		region_info->iova_start,
+		region_info->iova_len);
+	if (size != region_info->iova_len) {
 		CAM_ERR(CAM_SMMU, "Failed: Unmapped = %zu, requested = %zu",
 			size,
-			sec_heap_iova_len);
+			region_info->iova_len);
 	}
 
-	dma_buf_unmap_attachment(secheap_buf->attach,
-		secheap_buf->table, DMA_BIDIRECTIONAL);
-	dma_buf_detach(secheap_buf->buf, secheap_buf->attach);
-	dma_buf_put(secheap_buf->buf);
-	iommu_cb_set.cb_info[idx].is_secheap_allocated = false;
-	mutex_unlock(&iommu_cb_set.cb_info[idx].lock);
+	dma_buf_unmap_attachment(buf_info->attach,
+		buf_info->table, DMA_BIDIRECTIONAL);
+	dma_buf_detach(buf_info->buf, buf_info->attach);
+	dma_buf_put(buf_info->buf);
+	*is_buf_allocated = false;
+	mutex_unlock(&cb_info->lock);
 
 	return 0;
 }
-EXPORT_SYMBOL(cam_smmu_release_sec_heap);
+EXPORT_SYMBOL(cam_smmu_release_buf_region);
 
 static int cam_smmu_map_buffer_validate(struct dma_buf *buf,
 	int idx, enum dma_data_direction dma_dir, dma_addr_t *paddr_ptr,
@@ -3660,6 +3727,7 @@ static int cam_smmu_setup_cb(struct cam_context_bank_info *cb,
 	cb->dev = dev;
 	cb->is_fw_allocated = false;
 	cb->is_secheap_allocated = false;
+	cb->is_fwuncached_buf_allocated = false;
 
 	atomic64_set(&cb->monitor_head, -1);
 
@@ -3943,6 +4011,11 @@ static int cam_smmu_get_memory_regions_info(struct device_node *of_node,
 			cb->secheap_info.iova_start = region_start;
 			cb->secheap_info.iova_len = region_len;
 			break;
+		case CAM_SMMU_REGION_FWUNCACHED:
+			cb->fwuncached_region_support = 1;
+			cb->fwuncached_region.iova_start = region_start;
+			cb->fwuncached_region.iova_len = region_len;
+			break;
 		case CAM_SMMU_REGION_QDSS:
 			cb->qdss_support = 1;
 			cb->qdss_info.iova_start = region_start;

+ 11 - 8
drivers/cam_smmu/cam_smmu_api.h

@@ -39,7 +39,8 @@ enum cam_smmu_region_id {
 	CAM_SMMU_REGION_SCRATCH,
 	CAM_SMMU_REGION_IO,
 	CAM_SMMU_REGION_SECHEAP,
-	CAM_SMMU_REGION_QDSS
+	CAM_SMMU_REGION_QDSS,
+	CAM_SMMU_REGION_FWUNCACHED,
 };
 
 /**
@@ -354,8 +355,9 @@ int cam_smmu_get_region_info(int32_t smmu_hdl,
 	struct cam_smmu_region_info *region_info);
 
 /**
- * @brief Reserves secondary heap
+ * @brief Reserves a region with buffer
  *
+ * @param region: Region id
  * @param smmu_hdl: SMMU handle identifying the context bank
  * @param iova: IOVA of secondary heap after reservation has completed
  * @param buf: Allocated dma_buf for secondary heap
@@ -363,19 +365,20 @@ int cam_smmu_get_region_info(int32_t smmu_hdl,
  *
  * @return Status of operation. Negative in case of error. Zero otherwise.
  */
-int cam_smmu_reserve_sec_heap(int32_t smmu_hdl,
-	struct dma_buf *buf,
-	dma_addr_t *iova,
-	size_t *request_len);
+int cam_smmu_reserve_buf_region(enum cam_smmu_region_id region,
+	int32_t smmu_hdl, struct dma_buf *buf,
+	dma_addr_t *iova, size_t *request_len);
 
 /**
- * @brief Releases secondary heap
+ * @brief Releases buffer in reserved region
  *
+ * @param region: Region id
  * @param smmu_hdl: SMMU handle identifying the context bank
  *
  * @return Status of operation. Negative in case of error. Zero otherwise.
  */
-int cam_smmu_release_sec_heap(int32_t smmu_hdl);
+int cam_smmu_release_buf_region(enum cam_smmu_region_id region,
+	int32_t smmu_hdl);
 
 /**
  * @brief Allocates qdss for context bank