فهرست منبع

FF: Upto snap463: 'quic/camera-kernel.lnx.4.0' into 5.0 12/10/20

* quic/camera-kernel.lnx.4.0:
  msm: camera: cpas: Add hw version checks for Yupik camera
  msm: camera: cpas: Add support for Yupik camnoc
  msm: camera: isp: Remove WARN_ON() from error case in buf done
  msm: camera: icp: Fetch the firmware name from soc info
  msm: camera: cdm: Improve error handling during cdm hang
  msm: camera: cci: Fix logic to update cci clk freq
  msm: camera: isp: Transition to HALT state prior to HW stop
  msm: camera: isp: Print the expected height & width
  msm: camera: ope: Check array size of input sync obj
  msm: camera: isp: Add null check for vfe out res data
  msm: camera: isp: Update err type when notifying to HW MGR
  msm: camera: ife: Handle vfe out array properly
  msm: camera: ope: free unused memory in ope acquire
  msm: camera: isp: reduce bufdone error logs
  msm: camera: ope: Avoid submitting NULL request to CDM
  msm: camera: isp: Choose appropriate WM for frame header
  msm: camera: reqmgr: check applied req id for sync mode
  msm: camera: common: Improve IFE and CRM Logging
  msm: camera: smmu: Add support to force all CACHED

Change-Id: I589164549bc930986c1f5bb968d15e3714f03870
Signed-off-by: Abhijit Trivedi <[email protected]>
Abhijit Trivedi 4 سال پیش
والد
کامیت
5418bcb5d6
37فایلهای تغییر یافته به همراه1361 افزوده شده و 373 حذف شده
  1. 9 0
      Makefile
  2. 18 0
      config/yupikcamera.conf
  3. 11 0
      config/yupikcameraconf.h
  4. 1 0
      drivers/cam_cdm/cam_cdm.h
  5. 62 59
      drivers/cam_cdm/cam_cdm_hw_core.c
  6. 17 0
      drivers/cam_cpas/cpas_top/cam_cpastop_hw.c
  7. 3 0
      drivers/cam_cpas/cpas_top/cam_cpastop_hw.h
  8. 605 0
      drivers/cam_cpas/cpas_top/cpastop_v165_100.h
  9. 3 0
      drivers/cam_cpas/include/cam_cpas_api.h
  10. 16 4
      drivers/cam_icp/icp_hw/a5_hw/a5_core.c
  11. 2 1
      drivers/cam_icp/icp_hw/a5_hw/a5_soc.c
  12. 1 1
      drivers/cam_icp/icp_hw/a5_hw/a5_soc.h
  13. 32 13
      drivers/cam_isp/cam_isp_context.c
  14. 2 0
      drivers/cam_isp/cam_isp_context.h
  15. 45 20
      drivers/cam_isp/isp_hw_mgr/cam_ife_hw_mgr.c
  16. 11 6
      drivers/cam_isp/isp_hw_mgr/hw_utils/cam_isp_packet_parser.c
  17. 37 18
      drivers/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid_core.c
  18. 2 0
      drivers/cam_isp/isp_hw_mgr/isp_hw/include/cam_isp_hw.h
  19. 1 0
      drivers/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/cam_vfe_soc.c
  20. 0 15
      drivers/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe17x/cam_vfe480.h
  21. 0 15
      drivers/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe17x/cam_vfe680.h
  22. 4 1
      drivers/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_bus/cam_vfe_bus_ver2.c
  23. 144 64
      drivers/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_bus/cam_vfe_bus_ver3.c
  24. 2 1
      drivers/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_camif_lite_ver2.c
  25. 2 1
      drivers/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_camif_ver2.c
  26. 2 0
      drivers/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_camif_ver3.c
  27. 2 1
      drivers/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_rdi.c
  28. 51 32
      drivers/cam_ope/ope_hw_mgr/cam_ope_hw_mgr.c
  29. 12 4
      drivers/cam_ope/ope_hw_mgr/ope_hw/top/ope_top.c
  30. 11 0
      drivers/cam_req_mgr/cam_mem_mgr.c
  31. 2 0
      drivers/cam_req_mgr/cam_mem_mgr.h
  32. 20 6
      drivers/cam_req_mgr/cam_req_mgr_core.c
  33. 51 104
      drivers/cam_sensor_module/cam_cci/cam_cci_core.c
  34. 1 2
      drivers/cam_sensor_module/cam_cci/cam_cci_dev.h
  35. 2 2
      drivers/cam_sensor_module/cam_cci/cam_cci_soc.c
  36. 171 3
      drivers/cam_smmu/cam_smmu_api.c
  37. 6 0
      drivers/cam_smmu/cam_smmu_api.h

+ 9 - 0
Makefile

@@ -26,6 +26,10 @@ ifeq ($(CONFIG_ARCH_SHIMA), y)
 include $(srctree)/techpack/camera/config/shimacamera.conf
 endif
 
+ifeq ($(CONFIG_ARCH_YUPIK), y)
+include $(srctree)/techpack/camera/config/yupikcamera.conf
+endif
+
 ifeq ($(CONFIG_ARCH_KONA), y)
 LINUXINCLUDE    += \
 		-include $(srctree)/techpack/camera/config/konacameraconf.h
@@ -56,6 +60,11 @@ LINUXINCLUDE    += \
 		-include $(srctree)/techpack/camera/config/shimacameraconf.h
 endif
 
+ifeq ($(CONFIG_ARCH_YUPIK), y)
+LINUXINCLUDE    += \
+		-include $(srctree)/techpack/camera/config/yupikcameraconf.h
+endif
+
 endif
 
 ifneq (,$(filter $(CONFIG_SPECTRA_CAMERA), y m))

+ 18 - 0
config/yupikcamera.conf

@@ -0,0 +1,18 @@
+# SPDX-License-Identifier: GPL-2.0-only
+# Copyright (c) 2020, The Linux Foundation. All rights reserved.
+
+ifeq ($(CONFIG_QGKI),y)
+export CONFIG_SPECTRA_CAMERA=m
+$(info "SPECTRA_CAMERA IS MODULAR")
+else
+$(info "SPECTRA_CAMERA IS MODULAR")
+export CONFIG_SPECTRA_CAMERA=m
+endif
+
+ifneq (,$(filter $(CONFIG_SPECTRA_CAMERA), y m))
+export CONFIG_SPECTRA_ISP=y
+export CONFIG_SPECTRA_SENSOR=y
+export CONFIG_SPECTRA_ICP=y
+export CONFIG_SPECTRA_JPEG=y
+export CONFIG_SPECTRA_LRME=y
+endif

+ 11 - 0
config/yupikcameraconf.h

@@ -0,0 +1,11 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2020, The Linux Foundation. All rights reserved.
+ */
+
+#define CONFIG_SPECTRA_CAMERA 1
+#define CONFIG_SPECTRA_ISP    1
+#define CONFIG_SPECTRA_SENSOR 1
+#define CONFIG_SPECTRA_ICP    1
+#define CONFIG_SPECTRA_JPEG   1
+#define CONFIG_SPECTRA_LRME   1

+ 1 - 0
drivers/cam_cdm/cam_cdm.h

@@ -83,6 +83,7 @@
 #define CAM_CDM_RESET_HW_STATUS 0x4
 #define CAM_CDM_ERROR_HW_STATUS 0x5
 #define CAM_CDM_FLUSH_HW_STATUS 0x6
+#define CAM_CDM_RESET_ERR_STATUS 0x7
 
 /* Curent BL command masks and shifts */
 #define CAM_CDM_CURRENT_BL_LEN   0xFFFFF

+ 62 - 59
drivers/cam_cdm/cam_cdm_hw_core.c

@@ -297,15 +297,15 @@ static void cam_hw_cdm_dump_bl_fifo_data(struct cam_hw_info *cdm_hw)
 void cam_hw_cdm_dump_core_debug_registers(struct cam_hw_info *cdm_hw,
 	bool pause_core)
 {
-	uint32_t dump_reg, core_dbg = 0x100;
+	uint32_t dump_reg[4], core_dbg = 0x100;
 	int i;
 	bool is_core_paused_already;
 	struct cam_cdm *core = (struct cam_cdm *)cdm_hw->core_info;
 	const struct cam_cdm_icl_regs *inv_cmd_log =
 		core->offsets->cmn_reg->icl_reg;
 
-	cam_cdm_read_hw_reg(cdm_hw, core->offsets->cmn_reg->core_en, &dump_reg);
-	CAM_INFO(CAM_CDM, "CDM HW core status=0x%x", dump_reg);
+	cam_cdm_read_hw_reg(cdm_hw, core->offsets->cmn_reg->core_en,
+		&dump_reg[0]);
 
 	if (pause_core) {
 		cam_hw_cdm_pause_core(cdm_hw, true);
@@ -314,23 +314,24 @@ void cam_hw_cdm_dump_core_debug_registers(struct cam_hw_info *cdm_hw,
 	cam_hw_cdm_enable_core_dbg(cdm_hw, core_dbg);
 
 	cam_cdm_read_hw_reg(cdm_hw, core->offsets->cmn_reg->usr_data,
-		&dump_reg);
-	CAM_INFO(CAM_CDM, "CDM HW core userdata=0x%x", dump_reg);
+		&dump_reg[1]);
 
 	cam_cdm_read_hw_reg(cdm_hw,
 		core->offsets->cmn_reg->debug_status,
-		&dump_reg);
-	CAM_INFO(CAM_CDM, "CDM HW Debug status reg=0x%x", dump_reg);
+		&dump_reg[2]);
+
+	CAM_INFO(CAM_CDM, "Core stat 0x%x udata 0x%x dbg_stat 0x%x",
+			dump_reg[0], dump_reg[1], dump_reg[2]);
 
 	if (core_dbg & 0x100) {
 		cam_cdm_read_hw_reg(cdm_hw,
 			core->offsets->cmn_reg->last_ahb_addr,
-			&dump_reg);
-		CAM_INFO(CAM_CDM, "AHB dump reglastaddr=0x%x", dump_reg);
+			&dump_reg[0]);
 		cam_cdm_read_hw_reg(cdm_hw,
 			core->offsets->cmn_reg->last_ahb_data,
-			&dump_reg);
-		CAM_INFO(CAM_CDM, "AHB dump reglastdata=0x%x", dump_reg);
+			&dump_reg[1]);
+		CAM_INFO(CAM_CDM, "AHB dump lastaddr=0x%x lastdata=0x%x",
+			dump_reg[0], dump_reg[1]);
 	} else {
 		CAM_INFO(CAM_CDM, "CDM HW AHB dump not enable");
 	}
@@ -339,46 +340,41 @@ void cam_hw_cdm_dump_core_debug_registers(struct cam_hw_info *cdm_hw,
 		if (inv_cmd_log->misc_regs) {
 			cam_cdm_read_hw_reg(cdm_hw,
 				inv_cmd_log->misc_regs->icl_status,
-				&dump_reg);
-			CAM_INFO(CAM_CDM,
-				"Last Inv Cmd Log(ICL)Status: 0x%x",
-				dump_reg);
+				&dump_reg[0]);
 			cam_cdm_read_hw_reg(cdm_hw,
 				inv_cmd_log->misc_regs->icl_inv_bl_addr,
-				&dump_reg);
+				&dump_reg[1]);
 			CAM_INFO(CAM_CDM,
-				"Last Inv bl_addr: 0x%x",
-				dump_reg);
+				"Last Inv Cmd Log(ICL)Status: 0x%x bl_addr: 0x%x",
+				dump_reg[0], dump_reg[1]);
 		}
 		if (inv_cmd_log->data_regs) {
 			cam_cdm_read_hw_reg(cdm_hw,
 				inv_cmd_log->data_regs->icl_inv_data,
-				&dump_reg);
+				&dump_reg[0]);
 			CAM_INFO(CAM_CDM, "First word of Last Inv cmd: 0x%x",
-				dump_reg);
+				dump_reg[0]);
 
 			cam_cdm_read_hw_reg(cdm_hw,
 				inv_cmd_log->data_regs->icl_last_data_0,
-				&dump_reg);
-			CAM_INFO(CAM_CDM, "First word of Last Good cmd: 0x%x",
-				dump_reg);
+				&dump_reg[0]);
 			cam_cdm_read_hw_reg(cdm_hw,
 				inv_cmd_log->data_regs->icl_last_data_1,
-				&dump_reg);
-			CAM_INFO(CAM_CDM, "Second word of Last Good cmd: 0x%x",
-				dump_reg);
+				&dump_reg[1]);
 			cam_cdm_read_hw_reg(cdm_hw,
 				inv_cmd_log->data_regs->icl_last_data_2,
-				&dump_reg);
-			CAM_INFO(CAM_CDM, "Third word of Last Good cmd: 0x%x",
-				dump_reg);
+				&dump_reg[2]);
+
+
+			CAM_INFO(CAM_CDM, "Last good cmd word 0x%x 0x%x 0x%x",
+					dump_reg[0], dump_reg[1], dump_reg[2]);
 		}
 	}
 
 	if (core_dbg & 0x10000) {
 		cam_cdm_read_hw_reg(cdm_hw,
-			core->offsets->cmn_reg->core_en, &dump_reg);
-		is_core_paused_already = (bool)(dump_reg & 0x20);
+			core->offsets->cmn_reg->core_en, &dump_reg[0]);
+		is_core_paused_already = (bool)(dump_reg[0] & 0x20);
 		if (!is_core_paused_already) {
 			cam_hw_cdm_pause_core(cdm_hw, true);
 			usleep_range(1000, 1010);
@@ -390,49 +386,44 @@ void cam_hw_cdm_dump_core_debug_registers(struct cam_hw_info *cdm_hw,
 			cam_hw_cdm_pause_core(cdm_hw, false);
 	}
 
-	CAM_INFO(CAM_CDM, "CDM HW default dump");
 	cam_cdm_read_hw_reg(cdm_hw,
-		core->offsets->cmn_reg->core_cfg, &dump_reg);
-	CAM_INFO(CAM_CDM, "CDM HW core cfg=0x%x", dump_reg);
+		core->offsets->cmn_reg->core_cfg, &dump_reg[0]);
+	CAM_INFO(CAM_CDM, "CDM HW core cfg=0x%x", dump_reg[0]);
 
 	for (i = 0; i < core->offsets->reg_data->num_bl_fifo_irq; i++) {
 		cam_cdm_read_hw_reg(cdm_hw,
-			core->offsets->irq_reg[i]->irq_status, &dump_reg);
-		CAM_INFO(CAM_CDM, "CDM HW irq status%d=0x%x", i, dump_reg);
-
+			core->offsets->irq_reg[i]->irq_status, &dump_reg[0]);
 		cam_cdm_read_hw_reg(cdm_hw,
-			core->offsets->irq_reg[i]->irq_set, &dump_reg);
-		CAM_INFO(CAM_CDM, "CDM HW irq set%d=0x%x", i, dump_reg);
-
+			core->offsets->irq_reg[i]->irq_set, &dump_reg[1]);
 		cam_cdm_read_hw_reg(cdm_hw,
-			core->offsets->irq_reg[i]->irq_mask, &dump_reg);
-		CAM_INFO(CAM_CDM, "CDM HW irq mask%d=0x%x", i, dump_reg);
-
+			core->offsets->irq_reg[i]->irq_mask, &dump_reg[2]);
 		cam_cdm_read_hw_reg(cdm_hw,
-			core->offsets->irq_reg[i]->irq_clear, &dump_reg);
-		CAM_INFO(CAM_CDM, "CDM HW irq clear%d=0x%x", i, dump_reg);
+			core->offsets->irq_reg[i]->irq_clear, &dump_reg[3]);
+
+		CAM_INFO(CAM_CDM,
+			"cnt %d irq status 0x%x set 0x%x mask 0x%x clear 0x%x",
+			i, dump_reg[0], dump_reg[1], dump_reg[2], dump_reg[3]);
 	}
 
 	cam_cdm_read_hw_reg(cdm_hw,
-		core->offsets->cmn_reg->current_bl_base, &dump_reg);
-	CAM_INFO(CAM_CDM, "CDM HW current BL base=0x%x", dump_reg);
+		core->offsets->cmn_reg->current_bl_base, &dump_reg[0]);
+	cam_cdm_read_hw_reg(cdm_hw,
+		core->offsets->cmn_reg->current_used_ahb_base, &dump_reg[1]);
+	CAM_INFO(CAM_CDM, "curr BL base 0x%x AHB base 0x%x",
+		dump_reg[0], dump_reg[1]);
 
 	cam_cdm_read_hw_reg(cdm_hw,
-		core->offsets->cmn_reg->current_bl_len, &dump_reg);
+		core->offsets->cmn_reg->current_bl_len, &dump_reg[0]);
 	CAM_INFO(CAM_CDM,
 		"CDM HW current BL len=%d ARB %d FIFO %d tag=%d, ",
-		(dump_reg & CAM_CDM_CURRENT_BL_LEN),
-		(dump_reg & CAM_CDM_CURRENT_BL_ARB) >>
+		(dump_reg[0] & CAM_CDM_CURRENT_BL_LEN),
+		(dump_reg[0] & CAM_CDM_CURRENT_BL_ARB) >>
 			CAM_CDM_CURRENT_BL_ARB_SHIFT,
-		(dump_reg & CAM_CDM_CURRENT_BL_FIFO) >>
+		(dump_reg[0] & CAM_CDM_CURRENT_BL_FIFO) >>
 			CAM_CDM_CURRENT_BL_FIFO_SHIFT,
-		(dump_reg & CAM_CDM_CURRENT_BL_TAG) >>
+		(dump_reg[0] & CAM_CDM_CURRENT_BL_TAG) >>
 			CAM_CDM_CURRENT_BL_TAG_SHIFT);
 
-	cam_cdm_read_hw_reg(cdm_hw,
-		core->offsets->cmn_reg->current_used_ahb_base, &dump_reg);
-	CAM_INFO(CAM_CDM, "CDM HW current AHB base=0x%x", dump_reg);
-
 	cam_hw_cdm_disable_core_dbg(cdm_hw);
 	if (pause_core)
 		cam_hw_cdm_pause_core(cdm_hw, false);
@@ -1128,11 +1119,15 @@ static void cam_hw_cdm_reset_cleanup(
 	int i;
 	struct cam_cdm_bl_cb_request_entry *node, *tnode;
 	bool flush_hw = false;
+	bool reset_err = false;
 
 	if (test_bit(CAM_CDM_ERROR_HW_STATUS, &core->cdm_status) ||
 		test_bit(CAM_CDM_FLUSH_HW_STATUS, &core->cdm_status))
 		flush_hw = true;
 
+	if (test_bit(CAM_CDM_ERROR_HW_STATUS, &core->cdm_status))
+		reset_err = true;
+
 	for (i = 0; i < core->offsets->reg_data->num_bl_fifo; i++) {
 		list_for_each_entry_safe(node, tnode,
 			&core->bl_fifo[i].bl_request_list, entry) {
@@ -1141,12 +1136,19 @@ static void cam_hw_cdm_reset_cleanup(
 				CAM_DBG(CAM_CDM,
 					"Notifying client %d for tag %d",
 					node->client_hdl, node->bl_tag);
-				if (flush_hw)
+				if (flush_hw) {
+					enum cam_cdm_cb_status status;
+
+					status = reset_err ?
+						CAM_CDM_CB_STATUS_HW_ERROR :
+						CAM_CDM_CB_STATUS_HW_RESUBMIT;
+
 					cam_cdm_notify_clients(cdm_hw,
 						(node->client_hdl == handle) ?
 						CAM_CDM_CB_STATUS_HW_FLUSH :
-						CAM_CDM_CB_STATUS_HW_RESUBMIT,
+						status,
 						(void *)node);
+				}
 				else
 					cam_cdm_notify_clients(cdm_hw,
 						CAM_CDM_CB_STATUS_HW_RESET_DONE,
@@ -1695,7 +1697,7 @@ int cam_hw_cdm_handle_error_info(
 	if (time_left <= 0) {
 		rc = -ETIMEDOUT;
 		CAM_ERR(CAM_CDM, "CDM HW reset Wait failed rc=%d", rc);
-		goto end;
+		set_bit(CAM_CDM_RESET_ERR_STATUS, &cdm_core->cdm_status);
 	}
 
 	rc = cam_hw_cdm_set_cdm_core_cfg(cdm_hw);
@@ -1734,6 +1736,7 @@ int cam_hw_cdm_handle_error_info(
 end:
 	clear_bit(CAM_CDM_FLUSH_HW_STATUS, &cdm_core->cdm_status);
 	clear_bit(CAM_CDM_RESET_HW_STATUS, &cdm_core->cdm_status);
+	clear_bit(CAM_CDM_RESET_ERR_STATUS, &cdm_core->cdm_status);
 	for (i = 0; i < cdm_core->offsets->reg_data->num_bl_fifo; i++)
 		mutex_unlock(&cdm_core->bl_fifo[i].fifo_lock);
 

+ 17 - 0
drivers/cam_cpas/cpas_top/cam_cpastop_hw.c

@@ -30,6 +30,7 @@
 #include "cpastop_v545_100.h"
 #include "cpastop_v570_200.h"
 #include "cpastop_v680_100.h"
+#include "cpastop_v165_100.h"
 #include "cam_req_mgr_workq.h"
 
 struct cam_camnoc_info *camnoc_info;
@@ -130,6 +131,15 @@ static const uint32_t cam_cpas_hw_version_map
 		0,
 		CAM_CPAS_TITAN_570_V200,
 	},
+	/* for camera_165 */
+	{
+		CAM_CPAS_TITAN_165_V100,
+		0,
+		0,
+		0,
+		0,
+		0,
+	},
 };
 
 static int cam_cpas_translate_camera_cpas_version_id(
@@ -181,6 +191,10 @@ static int cam_cpas_translate_camera_cpas_version_id(
 		*cam_version_id = CAM_CPAS_CAMERA_VERSION_ID_680;
 		break;
 
+	case CAM_CPAS_CAMERA_VERSION_165:
+		*cam_version_id = CAM_CPAS_CAMERA_VERSION_ID_165;
+		break;
+
 	default:
 		CAM_ERR(CAM_CPAS, "Invalid cam version %u",
 			cam_version);
@@ -857,6 +871,9 @@ static int cam_cpastop_init_hw_version(struct cam_hw_info *cpas_hw,
 	case CAM_CPAS_TITAN_680_V100:
 		camnoc_info = &cam680_cpas100_camnoc_info;
 		break;
+	case CAM_CPAS_TITAN_165_V100:
+		camnoc_info = &cam165_cpas100_camnoc_info;
+		break;
 	default:
 		CAM_ERR(CAM_CPAS, "Camera Version not supported %d.%d.%d",
 			hw_caps->camera_version.major,

+ 3 - 0
drivers/cam_cpas/cpas_top/cam_cpastop_hw.h

@@ -191,6 +191,9 @@ enum cam_camnoc_port_type {
 	CAM_CAMNOC_TFE_1,
 	CAM_CAMNOC_TFE_2,
 	CAM_CAMNOC_OPE,
+	CAM_CAMNOC_IFE01234_RDI_WRITE,
+	CAM_CAMNOC_IFE01_NRDI_WRITE,
+	CAM_CAMNOC_IFE2_NRDI_WRITE,
 };
 
 /**

+ 605 - 0
drivers/cam_cpas/cpas_top/cpastop_v165_100.h

@@ -0,0 +1,605 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2020, The Linux Foundation. All rights reserved.
+ */
+
+ #ifndef _CPASTOP_V165_100_H_
+ #define _CPASTOP_V165_100_H_
+
+ #define TEST_IRQ_ENABLE 0
+
+static struct cam_camnoc_irq_sbm cam_cpas_v165_100_irq_sbm = {
+	.sbm_enable = {
+		.access_type = CAM_REG_TYPE_READ_WRITE,
+		.enable = true,
+		.offset = 0x2240, /* SBM_FAULTINEN0_LOW */
+		.value =  0x1 | /* SBM_FAULTINEN0_LOW_PORT0_MASK*/
+			0x2 | /* SBM_FAULTINEN0_LOW_PORT1_MASK */
+			0x4 | /* SBM_FAULTINEN0_LOW_PORT2_MASK */
+			0x8 | /* SBM_FAULTINEN0_LOW_PORT3_MASK */
+			0x10 | /* SBM_FAULTINEN0_LOW_PORT4_MASK */
+			(TEST_IRQ_ENABLE ?
+			0x100 : /* SBM_FAULTINEN0_LOW_PORT8_MASK */
+			0x0),
+	},
+	.sbm_status = {
+		.access_type = CAM_REG_TYPE_READ,
+		.enable = true,
+		.offset = 0x2248, /* SBM_FAULTINSTATUS0_LOW */
+	},
+	.sbm_clear = {
+		.access_type = CAM_REG_TYPE_WRITE,
+		.enable = true,
+		.offset = 0x2280, /* SBM_FLAGOUTCLR0_LOW */
+		.value = TEST_IRQ_ENABLE ? 0x6 : 0x2,
+	}
+};
+
+static struct cam_camnoc_irq_err
+	cam_cpas_v165_100_irq_err[] = {
+	{
+		.irq_type = CAM_CAMNOC_HW_IRQ_SLAVE_ERROR,
+		.enable = true,
+		.sbm_port = 0x1, /* SBM_FAULTINSTATUS0_LOW_PORT0_MASK */
+		.err_enable = {
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.enable = true,
+			.offset = 0x4F08, /* ERL_MAINCTL_LOW */
+			.value = 1,
+		},
+		.err_status = {
+			.access_type = CAM_REG_TYPE_READ,
+			.enable = true,
+			.offset = 0x4F10, /* ERL_ERRVLD_LOW */
+		},
+		.err_clear = {
+			.access_type = CAM_REG_TYPE_WRITE,
+			.enable = true,
+			.offset = 0x4F18, /* ERL_ERRCLR_LOW */
+			.value = 1,
+		},
+	},
+	{
+		.irq_type = CAM_CAMNOC_HW_IRQ_IFE0_UBWC_ENCODE_ERROR,
+		.enable = true,
+		.sbm_port = 0x2, /* SBM_FAULTINSTATUS0_LOW_PORT1_MASK */
+		.err_enable = {
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.enable = true,
+			.offset = 0x3BA0, /* SPECIFIC_IFE0_MAIN_ENCERREN_LOW */
+			.value = 1,
+		},
+		.err_status = {
+			.access_type = CAM_REG_TYPE_READ,
+			.enable = true,
+			.offset = 0x3B90,
+			/* SPECIFIC_IFE0_MAIN_ENCERRSTATUS_LOW */
+		},
+		.err_clear = {
+			.access_type = CAM_REG_TYPE_WRITE,
+			.enable = true,
+			.offset = 0x3B98, /* SPECIFIC_IFE0_MAIN_ENCERRCLR_LOW */
+			.value = 1,
+		},
+	},
+	{
+		.irq_type = CAM_CAMNOC_HW_IRQ_IFE1_WRITE_UBWC_ENCODE_ERROR,
+		.enable = true,
+		.sbm_port = 0x4, /* SBM_FAULTINSTATUS0_LOW_PORT2_MASK */
+		.err_enable = {
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.enable = true,
+			.offset = 0x55A0, /* SPECIFIC_IFE1_WR_ENCERREN_LOW */
+			.value = 1,
+		},
+		.err_status = {
+			.access_type = CAM_REG_TYPE_READ,
+			.enable = true,
+			.offset = 0x5590, /* SPECIFIC_IFE1_WR_ENCERRSTATUS_LOW */
+		},
+		.err_clear = {
+			.access_type = CAM_REG_TYPE_WRITE,
+			.enable = true,
+			.offset = 0x5598, /* SPECIFIC_IFE1_WR_ENCERRCLR_LOW */
+			.value = 1,
+		},
+	},
+	{
+		.irq_type = CAM_CAMNOC_HW_IRQ_IPE_BPS_UBWC_DECODE_ERROR,
+		.enable = true,
+		.sbm_port = 0x8, /* SBM_FAULTINSTATUS0_LOW_PORT3_MASK */
+		.err_enable = {
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.enable = true,
+			.offset = 0x2F20, /* SPECIFIC_IBL_RD_DECERREN_LOW */
+			.value = 1,
+		},
+		.err_status = {
+			.access_type = CAM_REG_TYPE_READ,
+			.enable = true,
+			.offset = 0x2F10, /* SPECIFIC_IBL_RD_DECERRSTATUS_LOW */
+		},
+		.err_clear = {
+			.access_type = CAM_REG_TYPE_WRITE,
+			.enable = true,
+			.offset = 0x2F18, /* SPECIFIC_IBL_RD_DECERRCLR_LOW */
+			.value = 1,
+		},
+	},
+	{
+		.irq_type = CAM_CAMNOC_HW_IRQ_IPE_BPS_UBWC_ENCODE_ERROR,
+		.enable = true,
+		.sbm_port = 0x10, /* SBM_FAULTINSTATUS0_LOW_PORT4_MASK */
+		.err_enable = {
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.enable = true,
+			.offset = 0x2BA0, /* SPECIFIC_IBL_WR_ENCERREN_LOW */
+			.value = 1,
+		},
+		.err_status = {
+			.access_type = CAM_REG_TYPE_READ,
+			.enable = true,
+			.offset = 0x2B90,
+			/* SPECIFIC_IBL_WR_ENCERRSTATUS_LOW */
+		},
+		.err_clear = {
+			.access_type = CAM_REG_TYPE_WRITE,
+			.enable = true,
+			.offset = 0x2B98, /* SPECIFIC_IBL_WR_ENCERRCLR_LOW */
+			.value = 1,
+		},
+	},
+	{
+		.irq_type = CAM_CAMNOC_HW_IRQ_AHB_TIMEOUT,
+		.enable = false,
+		.sbm_port = 0x20, /* SBM_FAULTINSTATUS0_LOW_PORT5_MASK */
+		.err_enable = {
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.enable = true,
+			.offset = 0x2288, /* SBM_FLAGOUTSET0_LOW */
+			.value = 0x1,
+		},
+		.err_status = {
+			.access_type = CAM_REG_TYPE_READ,
+			.enable = true,
+			.offset = 0x2290, /* SBM_FLAGOUTSTATUS0_LOW */
+		},
+		.err_clear = {
+			.enable = false,
+		},
+	},
+	{
+		.irq_type = CAM_CAMNOC_HW_IRQ_RESERVED1,
+		.enable = false,
+	},
+	{
+		.irq_type = CAM_CAMNOC_HW_IRQ_RESERVED2,
+		.enable = false,
+	},
+	{
+		.irq_type = CAM_CAMNOC_HW_IRQ_CAMNOC_TEST,
+		.enable = TEST_IRQ_ENABLE ? true : false,
+		.sbm_port = 0x40, /* SBM_FAULTINSTATUS0_LOW_PORT8_MASK */
+		.err_enable = {
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.enable = true,
+			.offset = 0x2288, /* SBM_FLAGOUTSET0_LOW */
+			.value = 0x5,
+		},
+		.err_status = {
+			.access_type = CAM_REG_TYPE_READ,
+			.enable = true,
+			.offset = 0x2290, /* SBM_FLAGOUTSTATUS0_LOW */
+		},
+		.err_clear = {
+			.enable = false,
+		},
+	},
+};
+
+static struct cam_camnoc_specific
+	cam_cpas_v165_100_camnoc_specific[] = {
+	{
+		.port_type = CAM_CAMNOC_CDM,
+		.enable = true,
+		.priority_lut_low = {
+			.enable = false,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 0,
+			.offset = 0x4230, /* CDM_PRIORITYLUT_LOW */
+			.value = 0x0,
+		},
+		.priority_lut_high = {
+			.enable = false,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 0,
+			.offset = 0x4234, /* CDM_PRIORITYLUT_HIGH */
+			.value = 0x0,
+		},
+		.urgency = {
+			.enable = true,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 0, /* CDM_Urgency_Low */
+			.offset = 0x4238,
+			.mask = 0x7, /* SPECIFIC_CDM_URGENCY_LOW_READ_MASK */
+			.shift = 0x0, /* SPECIFIC_CDM_URGENCY_LOW_READ_SHIFT */
+			.value = 0x3,
+		},
+		.danger_lut = {
+			.enable = false,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 0,
+			.offset = 0x4240, /* CDM_DANGERLUT_LOW */
+			.value = 0x0,
+		},
+		.safe_lut = {
+			.enable = false,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 0,
+			.offset = 0x4248, /* CDM_SAFELUT_LOW */
+			.value = 0xFFFF,
+		},
+		.ubwc_ctl = {
+			.enable = false,
+		},
+	},
+	{
+		.port_type = CAM_CAMNOC_IFE01234_RDI_WRITE,
+		.enable = true,
+		.priority_lut_low = {
+			.enable = true,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 0,
+			.offset = 0x3630, /* IFE01234_RDI_PRIORITYLUT_LOW */
+			.value = 0x55554433,
+		},
+		.priority_lut_high = {
+			.enable = true,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 0,
+			.offset = 0x3634, /* IFE01234_RDI_PRIORITYLUT_HIGH */
+			.value = 0x66666666,
+		},
+		.urgency = {
+			.enable = true,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 0,
+			.offset = 0x3638, /* IFE01234_RDI_LINEAR_URGENCY_LOW */
+			.mask = 0x1FF0,
+			.shift = 0x4,
+			.value = 0x1030,
+		},
+		.danger_lut = {
+			.enable = true,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.offset = 0x3640, /* IFE01234_RDI_DANGERLUT_LOW */
+			.value = 0xFFFFFF00,
+		},
+		.safe_lut = {
+			.enable = true,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.offset = 0x3648, /* IFE01234_RDI_SAFELUT_LOW */
+			.value = 0x000F,
+		},
+		.ubwc_ctl = {
+			/*
+			 * Do not explicitly set ubwc config register.
+			 * Power on default values are taking care of required
+			 * register settings.
+			 */
+			.enable = false,
+		},
+	},
+	{
+		.port_type = CAM_CAMNOC_IFE01_NRDI_WRITE,
+		.enable = true,
+		.priority_lut_low = {
+			.enable = true,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 0,
+			.offset = 0x3A30, /* IFE01_NRDI_PRIORITYLUT_LOW */
+			.value = 0x55554433,
+		},
+		.priority_lut_high = {
+			.enable = true,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 0,
+			.offset = 0x3A34, /* IFE01_NRDI_PRIORITYLUT_HIGH */
+			.value = 0x66666666,
+		},
+		.urgency = {
+			.enable = true,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 1,
+			.offset = 0x3A38, /* IFE01_NRDI_URGENCY_LOW */
+			/* IFE01_NRDI_URGENCY_LOW_WRITE_MASK */
+			.mask = 0x1FF0,
+			/* IFE01_NRDI_URGENCY_LOW_WRITE_SHIFT */
+			.shift = 0x4,
+			.value = 0x1030,
+		},
+		.danger_lut = {
+			.enable = true,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.offset = 0x3A40, /* IFE01_NRDI_DANGERLUT_LOW */
+			.value = 0xFFFFFF00,
+		},
+		.safe_lut = {
+			.enable = true,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.offset = 0x3A48, /* IFE01_NRDI_SAFELUT_LOW */
+			.value = 0xF,
+		},
+		/* no reg for this */
+		.ubwc_ctl = {
+			/*
+			 * Do not explicitly set ubwc config register.
+			 * Power on default values are taking care of required
+			 * register settings.
+			 */
+			.enable = true,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 0,
+			.offset = 0x3B88, /* IFE01_NRDI_ENCCTL_LOW */
+			.value = 1,
+		},
+	},
+	{
+		.port_type = CAM_CAMNOC_IFE2_NRDI_WRITE,
+		.enable = true,
+		.priority_lut_low = {
+			.enable = true,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 0,
+			.offset = 0x5430, /* IFE2_NDRI_PRIORITYLUT_LOW */
+			.value = 0x55554433,
+		},
+		.priority_lut_high = {
+			.enable = true,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 0,
+			/* IFE2_NRDI_PRIORITYLUT_HIGH */
+			.offset = 0x5434,
+			.value = 0x66666666,
+		},
+		.urgency = {
+			.enable = true,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 1,
+			.offset = 0x5438, /* IFE2_NRDI_URGENCY_LOW */
+			/* IFE2_NRDI_URGENCY_LOW_WRITE_MASK */
+			.mask = 0x1FF0,
+			/* IFE2_NRDI_URGENCY_LOW_WRITE_SHIFT */
+			.shift = 0x4,
+			.value = 0x1030,
+		},
+		.danger_lut = {
+			.enable = true,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.offset = 0x5440, /* IFE2_NRDI_DANGERLUT_LOW */
+			.value = 0xFFFFFF00,
+		},
+		.safe_lut = {
+			.enable = true,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.offset = 0x5448, /* IFE2_NRDI_SAFELUT_LOW */
+			.value = 0xF,
+		},
+		.ubwc_ctl = {
+			/*
+			 * Do not explicitly set ubwc config register.
+			 * Power on default values are taking care of required
+			 * register settings.
+			 */
+			.enable = false,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 0,
+			.offset = 0x5588, /* IFE2_NRDI_ENCCTL_LOW */
+			.value = 0,
+		},
+	},
+	{
+		.port_type = CAM_CAMNOC_IPE_BPS_LRME_READ,
+		.enable = true,
+		.priority_lut_low = {
+			.enable = false,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 0,
+			.offset = 0x2E30, /* IPE_BPS_LRME_RD_PRIORITYLUT_LOW */
+			.value = 0x33333333, /*Value is 0 in excel sheet */
+		},
+		.priority_lut_high = {
+			.enable = false,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 0,
+			.offset = 0x2E34, /* IPE_BPS_LRME_RD_PRIORITYLUT_HIGH */
+			.value = 0x33333333, /*Value is 0 in excel sheet */
+		},
+		.urgency = {
+			.enable = true,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 1,
+			.offset = 0x2E38, /* IPE_BPS_LRME_RD_URGENCY_LOW */
+			/* IPE_BPS_LRME_RD_URGENCY_LOW_READ_MASK */
+			.mask = 0x7,
+			/* IPE_BPS_LRME_RD_URGENCY_LOW_READ_SHIFT */
+			.shift = 0x0,
+			.value = 0x3,
+		},
+		.danger_lut = {
+			.enable = false,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 0,
+			.offset = 0x2E40, /* IPE_BPS_LRME_RD_DANGERLUT_LOW */
+			.value = 0x0,
+		},
+		.safe_lut = {
+			.enable = false,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 0,
+			.offset = 0x2E48, /* IPE_BPS_LRME_RD_SAFELUT_LOW */
+			.value = 0xFFFF,
+		},
+		.ubwc_ctl = {
+			/*
+			 * Do not explicitly set ubwc config register.
+			 * Power on default values are taking care of required
+			 * register settings.
+			 */
+			.enable = false,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 0,
+			.offset = 0x2F08, /* IPE_BPS_LRME_RD_DECCTL_LOW */
+			.value = 1,
+		},
+	},
+	{
+		.port_type = CAM_CAMNOC_IPE_BPS_LRME_WRITE,
+		.enable = true,
+		.priority_lut_low = {
+			.enable = true,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 0,
+			.offset = 0x2A30, /* IPE_BPS_LRME_WR_PRIORITYLUT_LOW */
+			.value = 0x33333333,
+		},
+		.priority_lut_high = {
+			.enable = true,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 0,
+			.offset = 0x2A34, /* IPE_BPS_LRME_WR_PRIORITYLUT_HIGH */
+			.value = 0x33333333,
+		},
+		.urgency = {
+			.enable = true,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 1,
+			.offset = 0x2A38, /* IPE_BPS_LRME_WR_URGENCY_LOW */
+			/* IPE_BPS_LRME_WR_URGENCY_LOW_WRITE_MASK */
+			.mask = 0x70,
+			/* IPE_BPS_LRME_WR_URGENCY_LOW_WRITE_SHIFT */
+			.shift = 0x4,
+			.value = 0x30,
+		},
+		.danger_lut = {
+			.enable = false,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 0,
+			.offset = 0x2A40, /* IPE_BPS_LRME_WR_DANGERLUT_LOW */
+			.value = 0x0,
+		},
+		.safe_lut = {
+			.enable = false,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 0,
+			.offset = 0x2A48, /* IPE_BPS_LRME_WR_SAFELUT_LOW */
+			.value = 0xFFFF,
+		},
+		.ubwc_ctl = {
+			/*
+			 * Do not explicitly set ubwc config register.
+			 * Power on default values are taking care of required
+			 * register settings.
+			 */
+			.enable = false,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 0,
+			.offset = 0x2B88, /* IPE_BPS_LRME_WR_ENCCTL_LOW */
+			.value = 0,
+		},
+	},
+	{
+		.port_type = CAM_CAMNOC_JPEG,
+		.enable = true,
+		.priority_lut_low = {
+			.enable = true,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 0,
+			.offset = 0x2630, /* JPEG_PRIORITYLUT_LOW */
+			.value = 0x22222222,
+		},
+		.priority_lut_high = {
+			.enable = true,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 0,
+			.offset = 0x2634, /* JPEG_PRIORITYLUT_HIGH */
+			.value = 0x22222222,
+		},
+		.urgency = {
+			.enable = true,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 1,
+			.offset = 0x2638, /* JPEG_URGENCY_LOW */
+			.mask = 0x3F,
+			.shift = 0x0,
+			.value = 0x22,
+		},
+		.danger_lut = {
+			.enable = false,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 0,
+			.offset = 0x2640, /* JPEG_DANGERLUT_LOW */
+			.value = 0x0,
+		},
+		.safe_lut = {
+			.enable = false,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 0,
+			.offset = 0x2648, /* JPEG_SAFELUT_LOW */
+			.value = 0xFFFF,
+		},
+		.ubwc_ctl = {
+			.enable = false,
+		},
+	},
+	{
+		/*SidebandManager_main_SidebandManager_FlagOutSet0_Low*/
+		.port_type = CAM_CAMNOC_ICP,
+		.enable = true,
+		.flag_out_set0_low = {
+			.enable = true,
+			.access_type = CAM_REG_TYPE_WRITE,
+			.masked_value = 0,
+			.offset = 0x5088,
+			.value = 0x50,
+		},
+	},
+};
+
+static struct cam_camnoc_err_logger_info cam165_cpas100_err_logger_offsets = {
+	.mainctrl     =  0x4F08, /* ERRLOGGER_MAINCTL_LOW */
+	.errvld       =  0x4F10, /* ERRLOGGER_ERRVLD_LOW */
+	.errlog0_low  =  0x4F20, /* ERRLOGGER_ERRLOG0_LOW */
+	.errlog0_high =  0x4F24, /* ERRLOGGER_ERRLOG0_HIGH */
+	.errlog1_low  =  0x4F28, /* ERRLOGGER_ERRLOG1_LOW */
+	.errlog1_high =  0x4F2C, /* ERRLOGGER_ERRLOG1_HIGH */
+	.errlog2_low  =  0x4F30, /* ERRLOGGER_ERRLOG2_LOW */
+	.errlog2_high =  0x4F34, /* ERRLOGGER_ERRLOG2_HIGH */
+	.errlog3_low  =  0x4F38, /* ERRLOGGER_ERRLOG3_LOW */
+	.errlog3_high =  0x4F3C, /* ERRLOGGER_ERRLOG3_HIGH */
+};
+
+static struct cam_cpas_hw_errata_wa_list cam165_cpas100_errata_wa_list = {
+	.camnoc_flush_slave_pending_trans = {
+		.enable = false,
+		.data.reg_info = {
+			.access_type = CAM_REG_TYPE_READ,
+			.offset = 0x2300, /* sbm_SenseIn0_Low */
+			.mask = 0xE0000, /* Bits 17, 18, 19 */
+			.value = 0, /* expected to be 0 */
+		},
+	},
+};
+
+static struct cam_camnoc_info cam165_cpas100_camnoc_info = {
+	.specific = &cam_cpas_v165_100_camnoc_specific[0],
+	.specific_size = ARRAY_SIZE(cam_cpas_v165_100_camnoc_specific),
+	.irq_sbm = &cam_cpas_v165_100_irq_sbm,
+	.irq_err = &cam_cpas_v165_100_irq_err[0],
+	.irq_err_size = ARRAY_SIZE(cam_cpas_v165_100_irq_err),
+	.err_logger = &cam165_cpas100_err_logger_offsets,
+	.errata_wa_list = &cam165_cpas100_errata_wa_list,
+};
+
+#endif /* _CPASTOP_V165_100_H_ */
+

+ 3 - 0
drivers/cam_cpas/include/cam_cpas_api.h

@@ -68,6 +68,7 @@ enum cam_cpas_camera_version {
 	CAM_CPAS_CAMERA_VERSION_545  = 0x00050405,
 	CAM_CPAS_CAMERA_VERSION_570  = 0x00050700,
 	CAM_CPAS_CAMERA_VERSION_680  = 0x00060800,
+	CAM_CPAS_CAMERA_VERSION_165  = 0x00010605,
 	CAM_CPAS_CAMERA_VERSION_MAX
 };
 
@@ -100,6 +101,7 @@ enum cam_cpas_camera_version_map_id {
 	CAM_CPAS_CAMERA_VERSION_ID_545  = 0x7,
 	CAM_CPAS_CAMERA_VERSION_ID_570  = 0x8,
 	CAM_CPAS_CAMERA_VERSION_ID_680  = 0x9,
+	CAM_CPAS_CAMERA_VERSION_ID_165  = 0xA,
 	CAM_CPAS_CAMERA_VERSION_ID_MAX
 };
 
@@ -123,6 +125,7 @@ enum cam_cpas_version_map_id {
 enum cam_cpas_hw_version {
 	CAM_CPAS_TITAN_NONE = 0,
 	CAM_CPAS_TITAN_150_V100 = 0x150100,
+	CAM_CPAS_TITAN_165_V100 = 0x165100,
 	CAM_CPAS_TITAN_170_V100 = 0x170100,
 	CAM_CPAS_TITAN_170_V110 = 0x170110,
 	CAM_CPAS_TITAN_170_V120 = 0x170120,

+ 16 - 4
drivers/cam_icp/icp_hw/a5_hw/a5_core.c

@@ -199,10 +199,22 @@ static int32_t cam_a5_download_fw(void *device_priv)
 	pdev = soc_info->pdev;
 	cam_a5_soc_info = soc_info->soc_private;
 
-	rc = request_firmware(&core_info->fw_elf, "CAMERA_ICP.elf", &pdev->dev);
-	if (rc) {
-		CAM_ERR(CAM_ICP, "Failed to locate fw: %d", rc);
-		return rc;
+	if (cam_a5_soc_info->fw_name) {
+		CAM_INFO(CAM_ICP, "Downloading firmware %s",
+			cam_a5_soc_info->fw_name);
+		rc = request_firmware(&core_info->fw_elf,
+				cam_a5_soc_info->fw_name, &pdev->dev);
+		if (rc) {
+			CAM_ERR(CAM_ICP, "Failed to locate fw: %d", rc);
+			return rc;
+		}
+	} else {
+		rc = request_firmware(&core_info->fw_elf,
+				"CAMERA_ICP.elf", &pdev->dev);
+		if (rc) {
+			CAM_ERR(CAM_ICP, "Failed to locate fw: %d", rc);
+			return rc;
+		}
 	}
 
 	if (!core_info->fw_elf) {

+ 2 - 1
drivers/cam_icp/icp_hw/a5_hw/a5_soc.c

@@ -32,7 +32,6 @@ static int cam_a5_get_dt_properties(struct cam_hw_soc_info *soc_info)
 	}
 
 	a5_soc_info = soc_info->soc_private;
-	fw_name = a5_soc_info->fw_name;
 
 	rc = of_property_read_string(of_node, "fw_name", &fw_name);
 	if (rc < 0) {
@@ -40,6 +39,8 @@ static int cam_a5_get_dt_properties(struct cam_hw_soc_info *soc_info)
 		goto end;
 	}
 
+	a5_soc_info->fw_name = fw_name;
+
 	rc = of_property_read_u32(of_node, "qos-val",
 		&a5_soc_info->a5_qos_val);
 	if (rc < 0) {

+ 1 - 1
drivers/cam_icp/icp_hw/a5_hw/a5_soc.h

@@ -18,7 +18,7 @@ struct a5_ubwc_cfg_ext {
 };
 
 struct a5_soc_info {
-	char *fw_name;
+	const char *fw_name;
 	bool ubwc_config_ext;
 	uint32_t a5_qos_val;
 	union {

+ 32 - 13
drivers/cam_isp/cam_isp_context.c

@@ -839,6 +839,7 @@ static int __cam_isp_ctx_handle_buf_done_for_req_list(
 				"Move active request %lld to free list(cnt = %d) [flushed], ctx %u",
 				buf_done_req_id, ctx_isp->active_req_cnt,
 				ctx->ctx_id);
+			ctx_isp->last_bufdone_err_apply_req_id = 0;
 		} else {
 			list_add(&req->list, &ctx->pending_req_list);
 			CAM_DBG(CAM_REQ,
@@ -864,6 +865,7 @@ static int __cam_isp_ctx_handle_buf_done_for_req_list(
 			"Move active request %lld to free list(cnt = %d) [all fences done], ctx %u",
 			buf_done_req_id, ctx_isp->active_req_cnt, ctx->ctx_id);
 		ctx_isp->req_info.last_bufdone_req_id = req->request_id;
+		ctx_isp->last_bufdone_err_apply_req_id = 0;
 	}
 
 	cam_cpas_notify_event("IFE BufDone", buf_done_req_id);
@@ -1167,11 +1169,11 @@ static int __cam_isp_ctx_handle_buf_done_for_request_verify_addr(
 			 */
 			req_isp->deferred_fence_map_index[deferred_indx] = j;
 			req_isp->num_deferred_acks++;
-			CAM_WARN(CAM_ISP,
+			CAM_DBG(CAM_ISP,
 				"ctx[%d] : Deferred buf done for %llu with bubble state %d recovery %d",
 				ctx->ctx_id, req->request_id, bubble_state,
 				req_isp->bubble_report);
-			CAM_WARN(CAM_ISP,
+			CAM_DBG(CAM_ISP,
 				"ctx[%d] : Deferred info : num_acks=%d, fence_map_index=%d, resource_handle=0x%x, sync_id=%d",
 				ctx->ctx_id, req_isp->num_deferred_acks, j,
 				req_isp->fence_map_out[j].resource_handle,
@@ -1264,7 +1266,6 @@ static int __cam_isp_ctx_handle_buf_done_for_request_verify_addr(
 			"WARNING: req_id %lld num_acked %d > map_out %d, ctx %u",
 			req->request_id, req_isp->num_acked,
 			req_isp->num_fence_map_out, ctx->ctx_id);
-		WARN_ON(req_isp->num_acked > req_isp->num_fence_map_out);
 	}
 
 	if (req_isp->num_acked != req_isp->num_fence_map_out)
@@ -1383,20 +1384,28 @@ static int __cam_isp_ctx_handle_buf_done_verify_addr(
 	struct cam_ctx_request *req;
 	struct cam_ctx_request *next_req = NULL;
 	struct cam_context *ctx = ctx_isp->base;
+	bool  req_in_wait_list = false;
 
 	if (list_empty(&ctx->active_req_list)) {
-		CAM_WARN(CAM_ISP,
-			"Buf done with no active request bubble_state=%d",
-			bubble_state);
 
 		if (!list_empty(&ctx->wait_req_list)) {
 			struct cam_isp_ctx_req *req_isp;
 
 			req = list_first_entry(&ctx->wait_req_list,
 				struct cam_ctx_request, list);
-			CAM_WARN(CAM_ISP,
-				"Buf done with no active request but with req in wait list, req %llu",
-				req->request_id);
+
+			req_in_wait_list = true;
+			if (ctx_isp->last_applied_req_id !=
+				ctx_isp->last_bufdone_err_apply_req_id) {
+				CAM_WARN(CAM_ISP,
+					"Buf done with no active request but with req in wait list, req %llu last apply id:%lld last err id:%lld",
+					req->request_id,
+					ctx_isp->last_applied_req_id,
+					ctx_isp->last_bufdone_err_apply_req_id);
+				ctx_isp->last_bufdone_err_apply_req_id =
+					ctx_isp->last_applied_req_id;
+			}
+
 			req_isp = (struct cam_isp_ctx_req *) req->req_priv;
 
 			/*
@@ -1410,6 +1419,15 @@ static int __cam_isp_ctx_handle_buf_done_verify_addr(
 			__cam_isp_ctx_handle_buf_done_for_request_verify_addr(
 				ctx_isp, req, done, bubble_state, true, true);
 		}
+
+		if (!req_in_wait_list  && (ctx_isp->last_applied_req_id !=
+			ctx_isp->last_bufdone_err_apply_req_id)) {
+			CAM_WARN(CAM_ISP,
+				"Buf done with no active request bubble_state=%d last_applied_req_id:%lld ",
+				bubble_state, ctx_isp->last_applied_req_id);
+			ctx_isp->last_bufdone_err_apply_req_id =
+					ctx_isp->last_applied_req_id;
+		}
 		return 0;
 	}
 
@@ -5362,6 +5380,11 @@ static int __cam_isp_ctx_stop_dev_in_activated_unlock(
 		(struct cam_isp_context *) ctx->ctx_priv;
 	struct cam_isp_stop_args         stop_isp;
 
+	/* Mask off all the incoming hardware events */
+	spin_lock_bh(&ctx->lock);
+	ctx_isp->substate_activated = CAM_ISP_CTX_ACTIVATED_HALT;
+	spin_unlock_bh(&ctx->lock);
+
 	/* stop hw first */
 	if (ctx_isp->hw_ctx) {
 		stop.ctxt_to_hw_map = ctx_isp->hw_ctx;
@@ -5374,10 +5397,6 @@ static int __cam_isp_ctx_stop_dev_in_activated_unlock(
 			&stop);
 	}
 
-	/* Mask off all the incoming hardware events */
-	spin_lock_bh(&ctx->lock);
-	ctx_isp->substate_activated = CAM_ISP_CTX_ACTIVATED_HALT;
-	spin_unlock_bh(&ctx->lock);
 	CAM_DBG(CAM_ISP, "next Substate[%s]",
 		__cam_isp_ctx_substate_val_to_type(
 		ctx_isp->substate_activated));

+ 2 - 0
drivers/cam_isp/cam_isp_context.h

@@ -277,6 +277,7 @@ struct cam_isp_context_event_record {
  *                             decide whether to apply request in offline ctx
  * @workq:                     Worker thread for offline ife
  * @trigger_id:                ID provided by CRM for each ctx on the link
+ * @last_bufdone_err_apply_req_id:  last bufdone error apply request id
  *
  */
 struct cam_isp_context {
@@ -323,6 +324,7 @@ struct cam_isp_context {
 	atomic_t                              rxd_epoch;
 	struct cam_req_mgr_core_workq        *workq;
 	int32_t                               trigger_id;
+	int64_t                               last_bufdone_err_apply_req_id;
 };
 
 /**

+ 45 - 20
drivers/cam_isp/isp_hw_mgr/cam_ife_hw_mgr.c

@@ -742,6 +742,12 @@ static void cam_ife_hw_mgr_dump_all_ctx(void)
 	struct cam_ife_hw_mgr_ctx       *ctx_temp;
 
 	mutex_lock(&g_ife_hw_mgr.ctx_mutex);
+	if (list_empty(&g_ife_hw_mgr.used_ctx_list)) {
+		CAM_INFO(CAM_ISP, "Currently no ctx in use");
+		mutex_unlock(&g_ife_hw_mgr.ctx_mutex);
+		return;
+	}
+
 	list_for_each_entry_safe(ctx, ctx_temp,
 		&g_ife_hw_mgr.used_ctx_list, list) {
 		CAM_INFO_RATE_LIMIT(CAM_ISP,
@@ -799,17 +805,15 @@ static void cam_ife_hw_mgr_print_acquire_info(
 	int hw_idx[CAM_ISP_HW_SPLIT_MAX] = {-1, -1};
 	int i = 0;
 
-	hw_mgr_res = list_first_entry(&hw_mgr_ctx->res_list_ife_csid,
-		struct cam_isp_hw_mgr_res, list);
-
-	if (hw_mgr_ctx->is_offline)
+	if (!list_empty(&hw_mgr_ctx->res_list_ife_src)) {
 		hw_mgr_res = list_first_entry(&hw_mgr_ctx->res_list_ife_src,
 			struct cam_isp_hw_mgr_res, list);
 
-	for (i = 0; i < CAM_ISP_HW_SPLIT_MAX; i++) {
-		hw_res = hw_mgr_res->hw_res[i];
-		if (hw_res && hw_res->hw_intf)
-			hw_idx[i] = hw_res->hw_intf->hw_idx;
+		for (i = 0; i < CAM_ISP_HW_SPLIT_MAX; i++) {
+			hw_res = hw_mgr_res->hw_res[i];
+			if (hw_res && hw_res->hw_intf)
+				hw_idx[i] = hw_res->hw_intf->hw_idx;
+		}
 	}
 
 	if (acquire_failed)
@@ -824,12 +828,12 @@ static void cam_ife_hw_mgr_print_acquire_info(
 	return;
 
 fail:
-	CAM_ERR(CAM_ISP, "Acquire HW failed for ctx:%u", hw_mgr_ctx->ctx_index);
-	CAM_INFO(CAM_ISP,
-		"Acquire fail for ctx:%u %s IFE[%d %d] with [%u pix] [%u pd] [%u rdi] ports",
-		hw_mgr_ctx->ctx_index, (hw_mgr_ctx->is_dual) ? "dual" : "single",
-		hw_idx[CAM_ISP_HW_SPLIT_LEFT], hw_idx[CAM_ISP_HW_SPLIT_RIGHT],
-		num_pix_port, num_pd_port, num_rdi_port);
+	CAM_ERR(CAM_ISP,
+		"Failed to acquire %s-IFE with [%u pix] [%u pd] [%u rdi] ports for ctx:%u",
+		(hw_mgr_ctx->is_dual) ? "dual" : "single",
+		num_pix_port, num_pd_port, num_rdi_port, hw_mgr_ctx->ctx_index);
+	CAM_INFO(CAM_ISP, "Previously acquired IFEs[%d %d]",
+		hw_idx[CAM_ISP_HW_SPLIT_LEFT], hw_idx[CAM_ISP_HW_SPLIT_RIGHT]);
 
 	list_for_each_entry_safe(hw_mgr_res, hw_mgr_res_temp,
 		&hw_mgr_ctx->res_list_ife_src, list) {
@@ -1475,6 +1479,7 @@ static int cam_convert_hw_idx_to_ife_hw_num(int hw_idx)
 			break;
 		case CAM_CPAS_TITAN_580_V100:
 		case CAM_CPAS_TITAN_570_V200:
+		case CAM_CPAS_TITAN_165_V100:
 			if (hw_idx == 0)
 				return CAM_ISP_IFE0_HW;
 			else if (hw_idx == 1)
@@ -1706,6 +1711,7 @@ static int cam_ife_hw_mgr_acquire_csid_hw(
 		return -EINVAL;
 	}
 
+	CAM_DBG(CAM_ISP, "Acquire CSID HW lower_idx: %d", is_start_lower_idx);
 	if (is_start_lower_idx) {
 		for (i =  0; i < CAM_IFE_CSID_HW_NUM_MAX; i++) {
 			if (!ife_hw_mgr->csid_devices[i])
@@ -1767,6 +1773,7 @@ static int cam_ife_mgr_acquire_cid_res(
 	csid_acquire.res_type = CAM_ISP_RESOURCE_CID;
 	csid_acquire.in_port = in_port;
 	csid_acquire.res_id =  path_res_id;
+	csid_acquire.node_res = NULL;
 
 	if (ife_ctx->is_rdi_only_context)
 		csid_acquire.can_use_lite = true;
@@ -1840,8 +1847,12 @@ static int cam_ife_mgr_acquire_cid_res(
 		}
 	}
 
-	/* Acquire right if not already acquired */
-	/* For dual IFE cases, master will be lower idx */
+	/* Acquire Left if not already acquired */
+	/* For dual IFE cases, start acquiring the lower idx first */
+	CAM_DBG(CAM_ISP, "acquire new rsrc fe: %d usage_type %d dsp %d rsrc %p",
+		ife_ctx->is_fe_enabled, in_port->usage_type,
+		ife_ctx->dsp_enabled, csid_acquire.node_res);
+
 	if (ife_ctx->is_fe_enabled ||
 		ife_ctx->dsp_enabled)
 		rc = cam_ife_hw_mgr_acquire_csid_hw(ife_hw_mgr,
@@ -1851,7 +1862,8 @@ static int cam_ife_mgr_acquire_cid_res(
 			&csid_acquire, false);
 
 	if (rc || !csid_acquire.node_res) {
-		CAM_ERR(CAM_ISP, "No %d paths available", path_res_id);
+		CAM_ERR(CAM_ISP, "No %d paths available rc %d rsrc %p",
+			path_res_id, rc, csid_acquire.node_res);
 		goto put_res;
 	}
 
@@ -2718,6 +2730,7 @@ static int cam_ife_mgr_acquire_hw_for_ctx(
 
 	is_dual_isp = in_port->usage_type;
 	ife_ctx->dsp_enabled = (bool)in_port->dsp_mode;
+	ife_ctx->is_dual = (bool)in_port->usage_type;
 
 	/* get root node resource */
 	rc = cam_ife_hw_mgr_acquire_res_root(ife_ctx, in_port);
@@ -3132,6 +3145,7 @@ static int cam_ife_mgr_acquire_hw(void *hw_mgr_priv, void *acquire_hw_args)
 			CAM_ERR(CAM_ISP, "Failed in parsing: %d", rc);
 			goto free_mem;
 		}
+		CAM_DBG(CAM_ISP, "in_res_type %x", in_port->res_type);
 
 		cam_ife_hw_mgr_preprocess_port(ife_ctx, &in_port[i]);
 		total_pix_port += in_port[i].ipp_count +
@@ -3149,7 +3163,6 @@ static int cam_ife_mgr_acquire_hw(void *hw_mgr_priv, void *acquire_hw_args)
 
 	/* acquire HW resources */
 	for (i = 0; i < acquire_hw_info->num_inputs; i++) {
-
 		CAM_DBG(CAM_ISP, "in_res_type %x", in_port[i].res_type);
 
 		if ((in_port[i].cust_node) && (!ife_ctx->custom_enabled)) {
@@ -3185,8 +3198,11 @@ static int cam_ife_mgr_acquire_hw(void *hw_mgr_priv, void *acquire_hw_args)
 
 		if (rc) {
 			cam_ife_hw_mgr_print_acquire_info(ife_ctx,
-				total_pix_port, total_pd_port,
-				total_rdi_port, rc);
+				(in_port[i].ipp_count +
+				in_port[i].ife_rd_count +
+				in_port[i].lcr_count),
+				in_port[i].ppp_count,
+				in_port[i].rdi_count, rc);
 			goto free_res;
 		}
 
@@ -6485,6 +6501,7 @@ static int cam_ife_mgr_prepare_hw_update(void *hw_mgr_priv,
 			return rc;
 
 		frame_header_enable = true;
+		prepare_hw_data->frame_header_res_id = 0x0;
 	}
 
 	if (ctx->internal_cdm)
@@ -6590,6 +6607,14 @@ static int cam_ife_mgr_prepare_hw_update(void *hw_mgr_priv,
 		}
 	}
 
+	/* Check if frame header was enabled for any WM */
+	if ((ctx->custom_config & CAM_IFE_CUSTOM_CFG_FRAME_HEADER_TS) &&
+		(prepare->num_out_map_entries) &&
+		(!prepare_hw_data->frame_header_res_id)) {
+		CAM_ERR(CAM_ISP, "Failed to configure frame header");
+		goto end;
+	}
+
 	/*
 	 * reg update will be done later for the initial configure.
 	 * need to plus one to the op_code and only take the lower

+ 11 - 6
drivers/cam_isp/isp_hw_mgr/hw_utils/cam_isp_packet_parser.c

@@ -700,6 +700,7 @@ int cam_isp_add_io_buffers(
 			wm_update.num_buf   = plane_id;
 			wm_update.io_cfg    = &io_cfg[i];
 			wm_update.frame_header = 0;
+			wm_update.fh_enabled = false;
 
 			for (plane_id = 0; plane_id < CAM_PACKET_MAX_PLANES;
 				plane_id++)
@@ -709,14 +710,8 @@ int cam_isp_add_io_buffers(
 			if ((frame_header_info->frame_header_enable) &&
 				!(frame_header_info->frame_header_res_id)) {
 				wm_update.frame_header = iova_addr;
-				frame_header_info->frame_header_res_id =
-					res->res_id;
 				wm_update.local_id =
 					prepare->packet->header.request_id;
-				CAM_DBG(CAM_ISP,
-					"Frame header enabled for res: 0x%x iova: %pK",
-					frame_header_info->frame_header_res_id,
-					wm_update.frame_header);
 			}
 
 			update_buf.cmd.size = kmd_buf_remain_size;
@@ -736,6 +731,16 @@ int cam_isp_add_io_buffers(
 				rc = -ENOMEM;
 				return rc;
 			}
+
+			if (wm_update.fh_enabled) {
+				frame_header_info->frame_header_res_id =
+					res->res_id;
+				CAM_DBG(CAM_ISP,
+					"Frame header enabled for res: 0x%x iova: %pK",
+					frame_header_info->frame_header_res_id,
+					wm_update.frame_header);
+			}
+
 			io_cfg_used_bytes += update_buf.cmd.used_bytes;
 
 			if (!out_map_entries) {

+ 37 - 18
drivers/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid_core.c

@@ -692,19 +692,20 @@ int cam_ife_csid_cid_reserve(struct cam_ife_csid_hw *csid_hw,
 	struct cam_csid_soc_private *soc_priv;
 
 	CAM_DBG(CAM_ISP,
-		"CSID:%d res_sel:0x%x Lane type:%d lane_num:%d dt:%d vc:%d",
+		"CSID:%d res_sel:0x%x Lane type:%d lane_num:%d dt:%d vc:%d cust_node:%u",
 		csid_hw->hw_intf->hw_idx,
 		cid_reserv->in_port->res_type,
 		cid_reserv->in_port->lane_type,
 		cid_reserv->in_port->lane_num,
 		cid_reserv->in_port->dt[0],
-		cid_reserv->in_port->vc[0]);
+		cid_reserv->in_port->vc[0],
+		cid_reserv->in_port->cust_node);
 
 	soc_priv = (struct cam_csid_soc_private *)
 		(csid_hw->hw_info->soc_info.soc_private);
 
 	if (soc_priv->is_ife_csid_lite && !cid_reserv->can_use_lite) {
-		CAM_INFO(CAM_ISP, "CSID[%u] not lite context",
+		CAM_DBG(CAM_ISP, "CSID[%u] not lite context",
 			csid_hw->hw_intf->hw_idx);
 		return -EINVAL;
 	}
@@ -821,8 +822,10 @@ int cam_ife_csid_cid_reserve(struct cam_ife_csid_hw *csid_hw,
 			if (cid_reserv->in_port->cust_node ==
 				CAM_ISP_ACQ_CUSTOM_PRIMARY) {
 				if (csid_hw->hw_intf->hw_idx != 0) {
-					CAM_ERR(CAM_ISP, "CSID%d not eligible",
-						csid_hw->hw_intf->hw_idx);
+					CAM_ERR(CAM_ISP,
+						"CSID%d not eligible for cust_node: %u",
+						csid_hw->hw_intf->hw_idx,
+						cid_reserv->in_port->cust_node);
 					rc = -EINVAL;
 					goto end;
 				}
@@ -831,8 +834,10 @@ int cam_ife_csid_cid_reserve(struct cam_ife_csid_hw *csid_hw,
 			if (cid_reserv->in_port->cust_node ==
 				CAM_ISP_ACQ_CUSTOM_SECONDARY) {
 				if (csid_hw->hw_intf->hw_idx != 1) {
-					CAM_ERR(CAM_ISP, "CSID%d not eligible",
-						csid_hw->hw_intf->hw_idx);
+					CAM_ERR(CAM_ISP,
+						"CSID%d not eligible for cust_node: %u",
+						csid_hw->hw_intf->hw_idx,
+						cid_reserv->in_port->cust_node);
 					rc = -EINVAL;
 					goto end;
 				}
@@ -859,7 +864,6 @@ int cam_ife_csid_cid_reserve(struct cam_ife_csid_hw *csid_hw,
 			cid_reserv->in_port->lane_type ||
 			csid_hw->csi2_rx_cfg.lane_num !=
 			cid_reserv->in_port->lane_num)) {
-
 			rc = -EINVAL;
 			goto end;
 		}
@@ -873,7 +877,6 @@ int cam_ife_csid_cid_reserve(struct cam_ife_csid_hw *csid_hw,
 			cid_reserv->in_port->height     ||
 			csid_hw->tpg_cfg.test_pattern !=
 			cid_reserv->in_port->test_pattern)) {
-
 			rc = -EINVAL;
 			goto end;
 		}
@@ -953,6 +956,7 @@ int cam_ife_csid_cid_reserve(struct cam_ife_csid_hw *csid_hw,
 	cid_data = (struct cam_ife_csid_cid_data *)
 		cid_reserv->node_res->res_priv;
 
+	CAM_DBG(CAM_ISP, "Obtained cid:%d", cid_reserv->node_res->res_id);
 	if (!csid_hw->csi2_reserve_cnt) {
 		csid_hw->res_type = cid_reserv->in_port->res_type;
 
@@ -1018,9 +1022,10 @@ int cam_ife_csid_cid_reserve(struct cam_ife_csid_hw *csid_hw,
 	}
 
 	csid_hw->csi2_reserve_cnt++;
-	CAM_DBG(CAM_ISP, "CSID:%d CID:%d acquired",
+	CAM_DBG(CAM_ISP, "CSID:%d CID:%d acquired phy_sel %u",
 		csid_hw->hw_intf->hw_idx,
-		cid_reserv->node_res->res_id);
+		cid_reserv->node_res->res_id,
+		csid_hw->csi2_rx_cfg.phy_sel);
 
 end:
 	return rc;
@@ -2554,7 +2559,8 @@ static int cam_ife_csid_init_config_rdi_path(
 	if (camera_hw_version == CAM_CPAS_TITAN_480_V100 ||
 		camera_hw_version == CAM_CPAS_TITAN_175_V130 ||
 		camera_hw_version == CAM_CPAS_TITAN_580_V100 ||
-		camera_hw_version == CAM_CPAS_TITAN_570_V200) {
+		camera_hw_version == CAM_CPAS_TITAN_570_V200 ||
+		camera_hw_version == CAM_CPAS_TITAN_165_V100) {
 		val |= (path_data->drop_enable <<
 			csid_reg->cmn_reg->drop_h_en_shift_val) |
 			(path_data->drop_enable <<
@@ -3681,9 +3687,10 @@ int cam_ife_csid_release(void *hw_priv,
 			memset(&csid_hw->csi2_rx_cfg, 0,
 				sizeof(struct cam_ife_csid_csi2_rx_cfg));
 
-		CAM_DBG(CAM_ISP, "CSID:%d res id :%d cnt:%d reserv cnt:%d",
+		CAM_DBG(CAM_ISP, "CSID:%d res id :%d cnt:%d reserv cnt:%d res_state:%d",
 			 csid_hw->hw_intf->hw_idx,
-			res->res_id, cid_data->cnt, csid_hw->csi2_reserve_cnt);
+			res->res_id, cid_data->cnt, csid_hw->csi2_reserve_cnt,
+			res->res_state);
 
 		break;
 	case CAM_ISP_RESOURCE_PIX_PATH:
@@ -5202,6 +5209,9 @@ handle_fatal_error:
 			CSID_PATH_ERROR_LINE_COUNT)) {
 			val = cam_io_r_mb(soc_info->reg_map[0].mem_base +
 			csid_reg->ipp_reg->csid_pxl_format_measure0_addr);
+			val2 = cam_io_r_mb(soc_info->reg_map[0].mem_base +
+			csid_reg->ipp_reg->csid_pxl_format_measure_cfg1_addr
+			);
 
 			CAM_ERR(CAM_ISP,
 				"CSID:%d irq_status_ipp:0x%x",
@@ -5209,8 +5219,11 @@ handle_fatal_error:
 				irq_status[CAM_IFE_CSID_IRQ_REG_IPP]);
 			CAM_ERR(CAM_ISP,
 			"Expected:: h: 0x%x w: 0x%x actual:: h: 0x%x w: 0x%x [format_measure0: 0x%x]",
-			csid_hw->ipp_path_config.height,
-			csid_hw->ipp_path_config.width,
+			((val2 >>
+			csid_reg->cmn_reg->format_measure_height_shift_val) &
+			csid_reg->cmn_reg->format_measure_height_mask_val),
+			val2 &
+			csid_reg->cmn_reg->format_measure_width_mask_val,
 			((val >>
 			csid_reg->cmn_reg->format_measure_height_shift_val) &
 			csid_reg->cmn_reg->format_measure_height_mask_val),
@@ -5279,6 +5292,9 @@ handle_fatal_error:
 			CSID_PATH_ERROR_LINE_COUNT)) {
 			val = cam_io_r_mb(soc_info->reg_map[0].mem_base +
 			csid_reg->ppp_reg->csid_pxl_format_measure0_addr);
+			val2 = cam_io_r_mb(soc_info->reg_map[0].mem_base +
+			csid_reg->ppp_reg->csid_pxl_format_measure_cfg1_addr
+			);
 
 			CAM_ERR(CAM_ISP,
 				"CSID:%d irq_status_ppp:0x%x",
@@ -5286,8 +5302,11 @@ handle_fatal_error:
 				irq_status[CAM_IFE_CSID_IRQ_REG_PPP]);
 			CAM_ERR(CAM_ISP,
 			"Expected:: h:  0x%x w: 0x%x actual:: h: 0x%x w: 0x%x [format_measure0: 0x%x]",
-			csid_hw->ppp_path_config.height,
-			csid_hw->ppp_path_config.width,
+			((val2 >>
+			csid_reg->cmn_reg->format_measure_height_shift_val) &
+			csid_reg->cmn_reg->format_measure_height_mask_val),
+			val2 &
+			csid_reg->cmn_reg->format_measure_width_mask_val,
 			((val >>
 			csid_reg->cmn_reg->format_measure_height_shift_val) &
 			csid_reg->cmn_reg->format_measure_height_mask_val),

+ 2 - 0
drivers/cam_isp/isp_hw_mgr/isp_hw/include/cam_isp_hw.h

@@ -249,6 +249,7 @@ struct cam_isp_hw_cmd_buf_update {
  * @ image_buf_offset: image buffer address offset array
  * @ num_buf:          Number of buffers in the image_buf array
  * @ frame_header:     frame header iova
+ * @ fh_enabled:       flag to indicate if this WM enables frame header
  * @ local_id:         local id for the wm
  * @ width:            width of scratch buffer
  * @ height:           height of scratch buffer
@@ -262,6 +263,7 @@ struct cam_isp_hw_get_wm_update {
 	uint32_t                        image_buf_offset[CAM_PACKET_MAX_PLANES];
 	uint32_t                        num_buf;
 	uint64_t                        frame_header;
+	bool                            fh_enabled;
 	uint32_t                        local_id;
 	uint32_t                        width;
 	uint32_t                        height;

+ 1 - 0
drivers/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/cam_vfe_soc.c

@@ -62,6 +62,7 @@ static int cam_vfe_get_dt_properties(struct cam_hw_soc_info *soc_info)
 	case CAM_CPAS_TITAN_480_V100:
 	case CAM_CPAS_TITAN_580_V100:
 	case CAM_CPAS_TITAN_570_V200:
+	case CAM_CPAS_TITAN_165_V100:
 		num_ubwc_cfg = of_property_count_u32_elems(of_node,
 			"ubwc-static-cfg");
 

+ 0 - 15
drivers/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe17x/cam_vfe480.h

@@ -481,9 +481,6 @@ static struct cam_vfe_bus_ver3_hw_info vfe480_bus_hw_info = {
 			.image_cfg_1              = 0x0000AE10,
 			.image_cfg_2              = 0x0000AE14,
 			.packer_cfg               = 0x0000AE18,
-			.frame_header_addr        = 0x0000AE20,
-			.frame_header_incr        = 0x0000AE24,
-			.frame_header_cfg         = 0x0000AE28,
 			.line_done_cfg            = 0,
 			.irq_subsample_period     = 0x0000AE30,
 			.irq_subsample_pattern    = 0x0000AE34,
@@ -510,9 +507,6 @@ static struct cam_vfe_bus_ver3_hw_info vfe480_bus_hw_info = {
 			.image_cfg_1              = 0x0000AF10,
 			.image_cfg_2              = 0x0000AF14,
 			.packer_cfg               = 0x0000AF18,
-			.frame_header_addr        = 0x0000AF20,
-			.frame_header_incr        = 0x0000AF24,
-			.frame_header_cfg         = 0x0000AF28,
 			.line_done_cfg            = 0,
 			.irq_subsample_period     = 0x0000AF30,
 			.irq_subsample_pattern    = 0x0000AF34,
@@ -597,9 +591,6 @@ static struct cam_vfe_bus_ver3_hw_info vfe480_bus_hw_info = {
 			.image_cfg_1              = 0x0000B210,
 			.image_cfg_2              = 0x0000B214,
 			.packer_cfg               = 0x0000B218,
-			.frame_header_addr        = 0x0000B220,
-			.frame_header_incr        = 0x0000B224,
-			.frame_header_cfg         = 0x0000B228,
 			.line_done_cfg            = 0,
 			.irq_subsample_period     = 0x0000B230,
 			.irq_subsample_pattern    = 0x0000B234,
@@ -626,9 +617,6 @@ static struct cam_vfe_bus_ver3_hw_info vfe480_bus_hw_info = {
 			.image_cfg_1              = 0x0000B310,
 			.image_cfg_2              = 0x0000B314,
 			.packer_cfg               = 0x0000B318,
-			.frame_header_addr        = 0x0000B320,
-			.frame_header_incr        = 0x0000B324,
-			.frame_header_cfg         = 0x0000B328,
 			.line_done_cfg            = 0,
 			.irq_subsample_period     = 0x0000B330,
 			.irq_subsample_pattern    = 0x0000B334,
@@ -684,9 +672,6 @@ static struct cam_vfe_bus_ver3_hw_info vfe480_bus_hw_info = {
 			.image_cfg_1              = 0x0000B510,
 			.image_cfg_2              = 0x0000B514,
 			.packer_cfg               = 0x0000B518,
-			.frame_header_addr        = 0x0000B520,
-			.frame_header_incr        = 0x0000B524,
-			.frame_header_cfg         = 0x0000B528,
 			.line_done_cfg            = 0,
 			.irq_subsample_period     = 0x0000B530,
 			.irq_subsample_pattern    = 0x0000B534,

+ 0 - 15
drivers/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe17x/cam_vfe680.h

@@ -280,9 +280,6 @@ static struct cam_vfe_bus_ver3_hw_info vfe680_bus_hw_info = {
 			.image_cfg_1              = 0x00001010,
 			.image_cfg_2              = 0x00001014,
 			.packer_cfg               = 0x00001018,
-			.frame_header_addr        = 0x00001020,
-			.frame_header_incr        = 0x00001024,
-			.frame_header_cfg         = 0x00001028,
 			.irq_subsample_period     = 0x00001030,
 			.irq_subsample_pattern    = 0x00001034,
 			.framedrop_period         = 0x00001038,
@@ -309,9 +306,6 @@ static struct cam_vfe_bus_ver3_hw_info vfe680_bus_hw_info = {
 			.image_cfg_1              = 0x00001110,
 			.image_cfg_2              = 0x00001114,
 			.packer_cfg               = 0x00001118,
-			.frame_header_addr        = 0x00001120,
-			.frame_header_incr        = 0x00001124,
-			.frame_header_cfg         = 0x00001128,
 			.irq_subsample_period     = 0x00001130,
 			.irq_subsample_pattern    = 0x00001134,
 			.framedrop_period         = 0x00001138,
@@ -396,9 +390,6 @@ static struct cam_vfe_bus_ver3_hw_info vfe680_bus_hw_info = {
 			.image_cfg_1              = 0x00001410,
 			.image_cfg_2              = 0x00001414,
 			.packer_cfg               = 0x00001418,
-			.frame_header_addr        = 0x00001420,
-			.frame_header_incr        = 0x00001424,
-			.frame_header_cfg         = 0x00001428,
 			.irq_subsample_period     = 0x00001430,
 			.irq_subsample_pattern    = 0x00001434,
 			.framedrop_period         = 0x00001438,
@@ -425,9 +416,6 @@ static struct cam_vfe_bus_ver3_hw_info vfe680_bus_hw_info = {
 			.image_cfg_1              = 0x00001510,
 			.image_cfg_2              = 0x00001514,
 			.packer_cfg               = 0x00001518,
-			.frame_header_addr        = 0x00001520,
-			.frame_header_incr        = 0x00001524,
-			.frame_header_cfg         = 0x00001528,
 			.irq_subsample_period     = 0x00001530,
 			.irq_subsample_pattern    = 0x00001534,
 			.framedrop_period         = 0x00001538,
@@ -483,9 +471,6 @@ static struct cam_vfe_bus_ver3_hw_info vfe680_bus_hw_info = {
 			.image_cfg_1              = 0x00001710,
 			.image_cfg_2              = 0x00001714,
 			.packer_cfg               = 0x00001718,
-			.frame_header_addr        = 0x00001720,
-			.frame_header_incr        = 0x00001724,
-			.frame_header_cfg         = 0x00001728,
 			.irq_subsample_period     = 0x00001730,
 			.irq_subsample_pattern    = 0x00001734,
 			.framedrop_period         = 0x00001738,

+ 4 - 1
drivers/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_bus/cam_vfe_bus_ver2.c

@@ -1269,7 +1269,8 @@ static int cam_vfe_bus_start_wm(
 		} else if ((camera_hw_version == CAM_CPAS_TITAN_175_V100) ||
 			(camera_hw_version == CAM_CPAS_TITAN_175_V101) ||
 			(camera_hw_version == CAM_CPAS_TITAN_175_V120) ||
-			(camera_hw_version == CAM_CPAS_TITAN_175_V130)) {
+			(camera_hw_version == CAM_CPAS_TITAN_175_V130) ||
+			(camera_hw_version == CAM_CPAS_TITAN_165_V100)) {
 			struct cam_vfe_bus_ver2_reg_offset_ubwc_3_client
 				*ubwc_regs;
 
@@ -2620,6 +2621,7 @@ static void cam_vfe_bus_update_ubwc_meta_addr(
 	case CAM_CPAS_TITAN_175_V101:
 	case CAM_CPAS_TITAN_175_V120:
 	case CAM_CPAS_TITAN_175_V130:
+	case CAM_CPAS_TITAN_165_V100:
 		ubwc_3_regs =
 			(struct cam_vfe_bus_ver2_reg_offset_ubwc_3_client *)
 			regs;
@@ -2835,6 +2837,7 @@ static int cam_vfe_bus_update_ubwc_regs(
 	case CAM_CPAS_TITAN_175_V101:
 	case CAM_CPAS_TITAN_175_V120:
 	case CAM_CPAS_TITAN_175_V130:
+	case CAM_CPAS_TITAN_165_V100:
 		rc = cam_vfe_bus_update_ubwc_3_regs(
 			wm_data, reg_val_pair, i, j);
 		break;

+ 144 - 64
drivers/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_bus/cam_vfe_bus_ver3.c

@@ -199,6 +199,7 @@ struct cam_vfe_bus_ver3_priv {
 	struct cam_isp_resource_node       *bus_client;
 	struct cam_isp_resource_node       *comp_grp;
 	struct cam_isp_resource_node       *vfe_out;
+	uint32_t  vfe_out_map_outtype[CAM_VFE_BUS_VER3_VFE_OUT_MAX];
 
 	struct list_head                    free_comp_grp;
 	struct list_head                    used_comp_grp;
@@ -566,74 +567,114 @@ static bool cam_vfe_bus_ver3_can_be_secure(uint32_t out_type)
 }
 
 static enum cam_vfe_bus_ver3_vfe_out_type
-	cam_vfe_bus_ver3_get_out_res_id(uint32_t res_type)
+	cam_vfe_bus_ver3_get_out_res_id_and_index(
+	struct cam_vfe_bus_ver3_priv  *bus_priv,
+	uint32_t res_type, uint32_t  *index)
 {
+	uint32_t  vfe_out_type;
+
 	switch (res_type) {
 	case CAM_ISP_IFE_OUT_RES_FULL:
-		return CAM_VFE_BUS_VER3_VFE_OUT_FULL;
+		vfe_out_type = CAM_VFE_BUS_VER3_VFE_OUT_FULL;
+		break;
 	case CAM_ISP_IFE_OUT_RES_DS4:
-		return CAM_VFE_BUS_VER3_VFE_OUT_DS4;
+		vfe_out_type = CAM_VFE_BUS_VER3_VFE_OUT_DS4;
+		break;
 	case CAM_ISP_IFE_OUT_RES_DS16:
-		return CAM_VFE_BUS_VER3_VFE_OUT_DS16;
+		vfe_out_type = CAM_VFE_BUS_VER3_VFE_OUT_DS16;
+		break;
 	case CAM_ISP_IFE_OUT_RES_FD:
-		return CAM_VFE_BUS_VER3_VFE_OUT_FD;
+		vfe_out_type = CAM_VFE_BUS_VER3_VFE_OUT_FD;
+		break;
 	case CAM_ISP_IFE_OUT_RES_RAW_DUMP:
-		return CAM_VFE_BUS_VER3_VFE_OUT_RAW_DUMP;
+		vfe_out_type = CAM_VFE_BUS_VER3_VFE_OUT_RAW_DUMP;
+		break;
 	case CAM_ISP_IFE_OUT_RES_2PD:
-		return CAM_VFE_BUS_VER3_VFE_OUT_2PD;
+		vfe_out_type = CAM_VFE_BUS_VER3_VFE_OUT_2PD;
+		break;
 	case CAM_ISP_IFE_OUT_RES_RDI_0:
-		return CAM_VFE_BUS_VER3_VFE_OUT_RDI0;
+		vfe_out_type = CAM_VFE_BUS_VER3_VFE_OUT_RDI0;
+		break;
 	case CAM_ISP_IFE_OUT_RES_RDI_1:
-		return CAM_VFE_BUS_VER3_VFE_OUT_RDI1;
+		vfe_out_type = CAM_VFE_BUS_VER3_VFE_OUT_RDI1;
+		break;
 	case CAM_ISP_IFE_OUT_RES_RDI_2:
-		return CAM_VFE_BUS_VER3_VFE_OUT_RDI2;
+		vfe_out_type = CAM_VFE_BUS_VER3_VFE_OUT_RDI2;
+		break;
 	case CAM_ISP_IFE_OUT_RES_RDI_3:
-		return CAM_VFE_BUS_VER3_VFE_OUT_RDI3;
+		vfe_out_type = CAM_VFE_BUS_VER3_VFE_OUT_RDI3;
+		break;
 	case CAM_ISP_IFE_OUT_RES_STATS_HDR_BE:
-		return CAM_VFE_BUS_VER3_VFE_OUT_STATS_HDR_BE;
+		vfe_out_type = CAM_VFE_BUS_VER3_VFE_OUT_STATS_HDR_BE;
+		break;
 	case CAM_ISP_IFE_OUT_RES_STATS_HDR_BHIST:
-		return CAM_VFE_BUS_VER3_VFE_OUT_STATS_HDR_BHIST;
+		vfe_out_type = CAM_VFE_BUS_VER3_VFE_OUT_STATS_HDR_BHIST;
+		break;
 	case CAM_ISP_IFE_OUT_RES_STATS_TL_BG:
-		return CAM_VFE_BUS_VER3_VFE_OUT_STATS_TL_BG;
+		vfe_out_type = CAM_VFE_BUS_VER3_VFE_OUT_STATS_TL_BG;
+		break;
 	case CAM_ISP_IFE_OUT_RES_STATS_BF:
-		return CAM_VFE_BUS_VER3_VFE_OUT_STATS_BF;
+		vfe_out_type = CAM_VFE_BUS_VER3_VFE_OUT_STATS_BF;
+		break;
 	case CAM_ISP_IFE_OUT_RES_STATS_AWB_BG:
-		return CAM_VFE_BUS_VER3_VFE_OUT_STATS_AWB_BG;
+		vfe_out_type = CAM_VFE_BUS_VER3_VFE_OUT_STATS_AWB_BG;
+		break;
 	case CAM_ISP_IFE_OUT_RES_STATS_BHIST:
-		return CAM_VFE_BUS_VER3_VFE_OUT_STATS_BHIST;
+		vfe_out_type = CAM_VFE_BUS_VER3_VFE_OUT_STATS_BHIST;
+		break;
 	case CAM_ISP_IFE_OUT_RES_STATS_RS:
-		return CAM_VFE_BUS_VER3_VFE_OUT_STATS_RS;
+		vfe_out_type = CAM_VFE_BUS_VER3_VFE_OUT_STATS_RS;
+		break;
 	case CAM_ISP_IFE_OUT_RES_STATS_CS:
-		return CAM_VFE_BUS_VER3_VFE_OUT_STATS_CS;
+		vfe_out_type = CAM_VFE_BUS_VER3_VFE_OUT_STATS_CS;
+		break;
 	case CAM_ISP_IFE_OUT_RES_STATS_IHIST:
-		return CAM_VFE_BUS_VER3_VFE_OUT_STATS_IHIST;
+		vfe_out_type = CAM_VFE_BUS_VER3_VFE_OUT_STATS_IHIST;
+		break;
 	case CAM_ISP_IFE_OUT_RES_FULL_DISP:
-		return CAM_VFE_BUS_VER3_VFE_OUT_FULL_DISP;
+		vfe_out_type = CAM_VFE_BUS_VER3_VFE_OUT_FULL_DISP;
+		break;
 	case CAM_ISP_IFE_OUT_RES_DS4_DISP:
-		return CAM_VFE_BUS_VER3_VFE_OUT_DS4_DISP;
+		vfe_out_type = CAM_VFE_BUS_VER3_VFE_OUT_DS4_DISP;
+		break;
 	case CAM_ISP_IFE_OUT_RES_DS16_DISP:
-		return CAM_VFE_BUS_VER3_VFE_OUT_DS16_DISP;
+		vfe_out_type = CAM_VFE_BUS_VER3_VFE_OUT_DS16_DISP;
+		break;
 	case CAM_ISP_IFE_OUT_RES_LCR:
-		return CAM_VFE_BUS_VER3_VFE_OUT_LCR;
+		vfe_out_type = CAM_VFE_BUS_VER3_VFE_OUT_LCR;
+		break;
 	case CAM_ISP_IFE_OUT_RES_AWB_BFW:
-		return CAM_VFE_BUS_VER3_VFE_OUT_AWB_BFW;
+		vfe_out_type = CAM_VFE_BUS_VER3_VFE_OUT_AWB_BFW;
+		break;
 	case CAM_ISP_IFE_OUT_RES_2PD_STATS:
-		return CAM_VFE_BUS_VER3_VFE_OUT_2PD_STATS;
+		vfe_out_type = CAM_VFE_BUS_VER3_VFE_OUT_2PD_STATS;
+		break;
 	case CAM_ISP_IFE_OUT_RES_STATS_AEC_BE:
-		return CAM_VFE_BUS_VER3_VFE_OUT_STATS_AEC_BE;
+		vfe_out_type = CAM_VFE_BUS_VER3_VFE_OUT_STATS_AEC_BE;
+		break;
+
 	case CAM_ISP_IFE_OUT_RES_LTM_STATS:
-		return CAM_VFE_BUS_VER3_VFE_OUT_LTM_STATS;
+		vfe_out_type = CAM_VFE_BUS_VER3_VFE_OUT_LTM_STATS;
+		break;
 	case CAM_ISP_IFE_OUT_RES_STATS_GTM_BHIST:
-		return CAM_VFE_BUS_VER3_VFE_OUT_STATS_GTM_BHIST;
+		vfe_out_type = CAM_VFE_BUS_VER3_VFE_OUT_STATS_GTM_BHIST;
+		break;
 	case CAM_ISP_IFE_LITE_OUT_RES_STATS_BE:
-		return CAM_VFE_BUS_VER3_VFE_OUT_STATS_BE;
+		vfe_out_type = CAM_VFE_BUS_VER3_VFE_OUT_STATS_BE;
+		break;
 	case CAM_ISP_IFE_LITE_OUT_RES_GAMMA:
-		return CAM_VFE_BUS_VER3_VFE_OUT_GAMMA;
+		vfe_out_type = CAM_VFE_BUS_VER3_VFE_OUT_GAMMA;
+		break;
 	default:
 		CAM_WARN(CAM_ISP, "Invalid isp res id: %d , assigning max",
 			res_type);
+		vfe_out_type = CAM_VFE_BUS_VER3_VFE_OUT_MAX;
+		*index = CAM_VFE_BUS_VER3_VFE_OUT_MAX;
 		return CAM_VFE_BUS_VER3_VFE_OUT_MAX;
 	}
+	*index = bus_priv->vfe_out_map_outtype[vfe_out_type];
+
+	return vfe_out_type;
 }
 
 static int cam_vfe_bus_ver3_get_comp_vfe_out_res_id_list(
@@ -862,6 +903,11 @@ static void cam_vfe_bus_ver3_get_constraint_errors(
 
 	for (i = 0; i < bus_priv->num_out; i++) {
 		out_rsrc_node = &bus_priv->vfe_out[i];
+		if (!out_rsrc_node || !out_rsrc_node->res_priv) {
+			CAM_DBG(CAM_ISP,
+				"Vfe out:%d out rsrc node or data is NULL", i);
+			continue;
+		}
 		out_rsrc_data = out_rsrc_node->res_priv;
 		for (j = 0; j < out_rsrc_data->num_wm; j++) {
 			wm_data = out_rsrc_data->wm_res[j].res_priv;
@@ -1888,6 +1934,7 @@ static int cam_vfe_bus_ver3_acquire_vfe_out(void *bus_priv, void *acquire_args,
 	struct cam_vfe_bus_ver3_vfe_out_data   *rsrc_data = NULL;
 	uint32_t                                secure_caps = 0, mode;
 	struct cam_vfe_bus_ver3_comp_grp_acquire_args comp_acq_args = {0};
+	uint32_t       outmap_index = CAM_VFE_BUS_VER3_VFE_OUT_MAX;
 
 	if (!bus_priv || !acquire_args) {
 		CAM_ERR(CAM_ISP, "Invalid Param");
@@ -1901,12 +1948,20 @@ static int cam_vfe_bus_ver3_acquire_vfe_out(void *bus_priv, void *acquire_args,
 		ver3_bus_priv->common_data.core_index,
 		out_acquire_args->out_port_info->res_type);
 
-	vfe_out_res_id = cam_vfe_bus_ver3_get_out_res_id(
-		out_acquire_args->out_port_info->res_type);
-	if (vfe_out_res_id == CAM_VFE_BUS_VER3_VFE_OUT_MAX)
+	vfe_out_res_id = cam_vfe_bus_ver3_get_out_res_id_and_index(
+				ver3_bus_priv,
+				out_acquire_args->out_port_info->res_type,
+				&outmap_index);
+	if ((vfe_out_res_id == CAM_VFE_BUS_VER3_VFE_OUT_MAX) ||
+		(outmap_index >= ver3_bus_priv->num_out)) {
+		CAM_WARN(CAM_ISP,
+			"target does not support req res id :0x%x outtype:%d index:%d",
+			out_acquire_args->out_port_info->res_type,
+			vfe_out_res_id, outmap_index);
 		return -ENODEV;
+	}
 
-	rsrc_node = &ver3_bus_priv->vfe_out[vfe_out_res_id];
+	rsrc_node = &ver3_bus_priv->vfe_out[outmap_index];
 	if (rsrc_node->res_state != CAM_ISP_RESOURCE_STATE_AVAILABLE) {
 		CAM_ERR(CAM_ISP,
 			"VFE:%d out_type:0x%X resource not available state:%d",
@@ -2307,15 +2362,20 @@ static uint32_t cam_vfe_bus_ver3_get_last_consumed_addr(
 	struct cam_vfe_bus_ver3_vfe_out_data     *rsrc_data = NULL;
 	struct cam_vfe_bus_ver3_wm_resource_data *wm_rsrc_data = NULL;
 	enum cam_vfe_bus_ver3_vfe_out_type        res_id;
+	uint32_t           outmap_index = CAM_VFE_BUS_VER3_VFE_OUT_MAX;
 
-	res_id = cam_vfe_bus_ver3_get_out_res_id(res_type);
+	res_id = cam_vfe_bus_ver3_get_out_res_id_and_index(bus_priv,
+		res_type, &outmap_index);
 
-	if (res_id >= CAM_VFE_BUS_VER3_VFE_OUT_MAX) {
-		CAM_ERR(CAM_ISP, "invalid res id:%u", res_id);
+	if ((res_id >= CAM_VFE_BUS_VER3_VFE_OUT_MAX) ||
+		(outmap_index >= bus_priv->num_out)) {
+		CAM_WARN(CAM_ISP,
+			"target does not support req res id :0x%x outtype:%d index:%d",
+			res_type, res_id, outmap_index);
 		return 0;
 	}
 
-	rsrc_node = &bus_priv->vfe_out[res_id];
+	rsrc_node = &bus_priv->vfe_out[outmap_index];
 	rsrc_data = rsrc_node->res_priv;
 	wm_rsrc_data = rsrc_data->wm_res[PLANE_Y].res_priv;
 
@@ -2394,7 +2454,8 @@ static int cam_vfe_bus_ver3_init_vfe_out_resource(uint32_t  index,
 		return -EINVAL;
 	}
 
-	vfe_out = &ver3_bus_priv->vfe_out[vfe_out_type];
+	ver3_bus_priv->vfe_out_map_outtype[vfe_out_type] = index;
+	vfe_out = &ver3_bus_priv->vfe_out[index];
 	if (vfe_out->res_state != CAM_ISP_RESOURCE_STATE_UNAVAILABLE ||
 		vfe_out->res_priv) {
 		CAM_ERR(CAM_ISP, "vfe_out_type %d has already been initialized",
@@ -2506,7 +2567,7 @@ static int cam_vfe_bus_ver3_deinit_vfe_out_resource(
 }
 
 static int cam_vfe_bus_ver3_print_dimensions(
-	enum cam_vfe_bus_ver3_vfe_out_type         vfe_out_res_id,
+	uint32_t                                   res_id,
 	struct cam_vfe_bus_ver3_priv              *bus_priv)
 {
 	struct cam_isp_resource_node              *rsrc_node = NULL;
@@ -2515,20 +2576,29 @@ static int cam_vfe_bus_ver3_print_dimensions(
 	struct cam_vfe_bus_ver3_common_data  *common_data = NULL;
 	int                                        i;
 	uint32_t addr_status0, addr_status1, addr_status2, addr_status3;
+	enum cam_vfe_bus_ver3_vfe_out_type  vfe_out_res_id =
+		CAM_VFE_BUS_VER3_VFE_OUT_MAX;
+	uint32_t  outmap_index = CAM_VFE_BUS_VER3_VFE_OUT_MAX;
 
 	if (!bus_priv) {
 		CAM_ERR(CAM_ISP, "Invalid bus private data, res_id: %d",
-			vfe_out_res_id);
+			res_id);
 		return -EINVAL;
 	}
 
-	if (vfe_out_res_id >= CAM_VFE_BUS_VER3_VFE_OUT_MAX) {
-		CAM_ERR(CAM_ISP, "Invalid out resource for dump: %d",
-			vfe_out_res_id);
+	vfe_out_res_id = cam_vfe_bus_ver3_get_out_res_id_and_index(bus_priv,
+				res_id, &outmap_index);
+
+	if ((vfe_out_res_id == CAM_VFE_BUS_VER3_VFE_OUT_MAX) ||
+		(outmap_index >= bus_priv->num_out)) {
+		CAM_WARN_RATE_LIMIT(CAM_ISP,
+			"target does not support req res id :0x%x outtype:%d index:%d",
+			res_id,
+			vfe_out_res_id, outmap_index);
 		return -EINVAL;
 	}
 
-	rsrc_node = &bus_priv->vfe_out[vfe_out_res_id];
+	rsrc_node = &bus_priv->vfe_out[outmap_index];
 	rsrc_data = rsrc_node->res_priv;
 	if (!rsrc_data) {
 		CAM_ERR(CAM_ISP, "VFE out data is null, res_id: %d",
@@ -2820,7 +2890,6 @@ static int cam_vfe_bus_ver3_update_wm(void *priv, void *cmd_args,
 	uint32_t  i, j, k, size = 0;
 	uint32_t  frame_inc = 0, val;
 	uint32_t loop_size = 0;
-	bool frame_header_enable = false;
 
 	bus_priv = (struct cam_vfe_bus_ver3_priv  *) priv;
 	update_buf =  (struct cam_isp_hw_get_cmd_update *) cmd_args;
@@ -2859,20 +2928,22 @@ static int cam_vfe_bus_ver3_update_wm(void *priv, void *cmd_args,
 			wm_data->en_cfg &= ~(1 << 2);
 
 		if (update_buf->wm_update->frame_header &&
-			!frame_header_enable) {
-			wm_data->en_cfg |= 1 << 2;
-			frame_header_enable = true;
-			CAM_VFE_ADD_REG_VAL_PAIR(reg_val_pair, j,
-					wm_data->hw_regs->frame_header_addr,
-					update_buf->wm_update->frame_header);
-			CAM_VFE_ADD_REG_VAL_PAIR(reg_val_pair, j,
-					wm_data->hw_regs->frame_header_cfg,
+			!update_buf->wm_update->fh_enabled) {
+			if (wm_data->hw_regs->frame_header_addr) {
+				wm_data->en_cfg |= 1 << 2;
+				update_buf->wm_update->fh_enabled = true;
+				CAM_VFE_ADD_REG_VAL_PAIR(reg_val_pair, j,
+						wm_data->hw_regs->frame_header_addr,
+						update_buf->wm_update->frame_header);
+				CAM_VFE_ADD_REG_VAL_PAIR(reg_val_pair, j,
+						wm_data->hw_regs->frame_header_cfg,
+						update_buf->wm_update->local_id);
+				CAM_DBG(CAM_ISP,
+					"WM: %d en_cfg 0x%x frame_header %pK local_id %u",
+					wm_data->index, wm_data->en_cfg,
+					update_buf->wm_update->frame_header,
 					update_buf->wm_update->local_id);
-			CAM_DBG(CAM_ISP,
-				"WM: %d en_cfg 0x%x frame_header %pK local_id %u",
-				wm_data->index, wm_data->en_cfg,
-				update_buf->wm_update->frame_header,
-				update_buf->wm_update->local_id);
+			}
 		}
 
 		CAM_VFE_ADD_REG_VAL_PAIR(reg_val_pair, j,
@@ -3600,15 +3671,13 @@ static int cam_vfe_bus_ver3_process_cmd(
 		break;
 	case CAM_ISP_HW_CMD_DUMP_BUS_INFO: {
 		struct cam_isp_hw_event_info  *event_info;
-		enum cam_vfe_bus_ver3_vfe_out_type vfe_out_res_id;
 
 		event_info =
 			(struct cam_isp_hw_event_info *)cmd_args;
 		bus_priv = (struct cam_vfe_bus_ver3_priv  *) priv;
-		vfe_out_res_id =
-			cam_vfe_bus_ver3_get_out_res_id(event_info->res_id);
+
 		rc = cam_vfe_bus_ver3_print_dimensions(
-			vfe_out_res_id, bus_priv);
+			event_info->res_id, bus_priv);
 		break;
 		}
 	case CAM_ISP_HW_CMD_UBWC_UPDATE_V2:
@@ -3721,6 +3790,13 @@ int cam_vfe_bus_ver3_init(
 	bus_priv->common_data.comp_config_needed =
 		ver3_hw_info->comp_cfg_needed;
 
+	if (bus_priv->num_out >= CAM_VFE_BUS_VER3_VFE_OUT_MAX) {
+		CAM_ERR(CAM_ISP, "number of vfe out:%d more than max value:%d ",
+			bus_priv->num_out, CAM_VFE_BUS_VER3_VFE_OUT_MAX);
+		rc = -EINVAL;
+		goto free_bus_priv;
+	}
+
 	bus_priv->comp_grp = kzalloc((sizeof(struct cam_isp_resource_node) *
 		bus_priv->num_comp_grp), GFP_KERNEL);
 	if (!bus_priv->comp_grp) {
@@ -3775,6 +3851,10 @@ int cam_vfe_bus_ver3_init(
 		}
 	}
 
+	for (i = 0; i < CAM_VFE_BUS_VER3_VFE_OUT_MAX; i++)
+		bus_priv->vfe_out_map_outtype[i] =
+			CAM_VFE_BUS_VER3_VFE_OUT_MAX;
+
 	for (i = 0; i < bus_priv->num_out; i++) {
 		rc = cam_vfe_bus_ver3_init_vfe_out_resource(i, bus_priv,
 			bus_hw_info);

+ 2 - 1
drivers/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_camif_lite_ver2.c

@@ -427,7 +427,8 @@ static int cam_vfe_camif_lite_cpas_fifo_levels_reg_dump(
 	uint32_t  val;
 
 	if (soc_private->cpas_version == CAM_CPAS_TITAN_175_V120 ||
-		soc_private->cpas_version == CAM_CPAS_TITAN_175_V130) {
+		soc_private->cpas_version == CAM_CPAS_TITAN_175_V130 ||
+		soc_private->cpas_version == CAM_CPAS_TITAN_165_V100) {
 		rc = cam_cpas_reg_read(soc_private->cpas_handle,
 				CAM_CPAS_REG_CAMNOC, 0x3A20, true, &val);
 		if (rc) {

+ 2 - 1
drivers/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_camif_ver2.c

@@ -557,7 +557,8 @@ static int cam_vfe_camif_reg_dump(
 
 	soc_private = camif_priv->soc_info->soc_private;
 	if (soc_private->cpas_version == CAM_CPAS_TITAN_175_V120 ||
-		soc_private->cpas_version == CAM_CPAS_TITAN_175_V130) {
+		soc_private->cpas_version == CAM_CPAS_TITAN_175_V130 ||
+		soc_private->cpas_version == CAM_CPAS_TITAN_165_V100) {
 		cam_cpas_reg_read(soc_private->cpas_handle,
 			CAM_CPAS_REG_CAMNOC, 0x3A20, true, &val);
 		CAM_INFO(CAM_ISP, "IFE0_nRDI_MAXWR_LOW offset 0x3A20 val 0x%x",

+ 2 - 0
drivers/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_camif_ver3.c

@@ -1487,6 +1487,7 @@ static int cam_vfe_camif_ver3_handle_irq_bottom_half(void *handler_priv,
 			ts.tv_sec, ts.tv_nsec/1000);
 
 		ret = CAM_VFE_IRQ_STATUS_OVERFLOW;
+		evt_info.err_type = CAM_VFE_IRQ_STATUS_OVERFLOW;
 
 		CAM_INFO(CAM_ISP, "ife_clk_src:%lld",
 			soc_private->ife_clk_src);
@@ -1519,6 +1520,7 @@ static int cam_vfe_camif_ver3_handle_irq_bottom_half(void *handler_priv,
 			ts.tv_sec, ts.tv_nsec/1000);
 
 		ret = CAM_VFE_IRQ_STATUS_VIOLATION;
+		evt_info.err_type = CAM_VFE_IRQ_STATUS_VIOLATION;
 
 		CAM_INFO(CAM_ISP, "ife_clk_src:%lld",
 			soc_private->ife_clk_src);

+ 2 - 1
drivers/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_rdi.c

@@ -94,7 +94,8 @@ struct cam_vfe_mux_rdi_data *rdi_priv)
 	uint32_t  val;
 
 	if (soc_private->cpas_version == CAM_CPAS_TITAN_175_V120 ||
-		soc_private->cpas_version == CAM_CPAS_TITAN_175_V130) {
+		soc_private->cpas_version == CAM_CPAS_TITAN_175_V130 ||
+		soc_private->cpas_version == CAM_CPAS_TITAN_165_V100) {
 		rc = cam_cpas_reg_read(soc_private->cpas_handle,
 				CAM_CPAS_REG_CAMNOC, 0x3A20, true, &val);
 		if (rc) {

+ 51 - 32
drivers/cam_ope/ope_hw_mgr/cam_ope_hw_mgr.c

@@ -81,6 +81,11 @@ end:
 	return rsc_idx;
 }
 
+static bool cam_ope_is_pending_request(struct cam_ope_ctx *ctx_data)
+{
+	return !bitmap_empty(ctx_data->bitmap, CAM_CTX_REQ_MAX);
+}
+
 static int cam_ope_mgr_process_cmd(void *priv, void *data)
 {
 	int rc;
@@ -96,14 +101,16 @@ static int cam_ope_mgr_process_cmd(void *priv, void *data)
 
 	ctx_data = priv;
 	task_data = (struct ope_cmd_work_data *)data;
+
+	mutex_lock(&hw_mgr->hw_mgr_mutex);
 	cdm_cmd = task_data->data;
 
 	if (!cdm_cmd) {
 		CAM_ERR(CAM_OPE, "Invalid params%pK", cdm_cmd);
+		mutex_unlock(&hw_mgr->hw_mgr_mutex);
 		return -EINVAL;
 	}
 
-	mutex_lock(&hw_mgr->hw_mgr_mutex);
 	if (ctx_data->ctx_state != OPE_CTX_STATE_ACQUIRED) {
 		mutex_unlock(&hw_mgr->hw_mgr_mutex);
 		CAM_ERR(CAM_OPE, "ctx id :%u is not in use",
@@ -119,6 +126,13 @@ static int cam_ope_mgr_process_cmd(void *priv, void *data)
 		return -EINVAL;
 	}
 
+	if (!cam_ope_is_pending_request(ctx_data)) {
+		CAM_WARN(CAM_OPE, "no pending req, req %lld last flush %lld",
+			task_data->req_id, ctx_data->last_flush_req);
+		mutex_unlock(&hw_mgr->hw_mgr_mutex);
+		return -EINVAL;
+	}
+
 	CAM_DBG(CAM_OPE,
 		"cam_cdm_submit_bls: handle 0x%x, ctx_id %d req %d cookie %d",
 		ctx_data->ope_cdm.cdm_handle, ctx_data->ctx_id,
@@ -256,11 +270,6 @@ static int cam_ope_mgr_reapply_config(struct cam_ope_hw_mgr *hw_mgr,
 	return rc;
 }
 
-static bool cam_ope_is_pending_request(struct cam_ope_ctx *ctx_data)
-{
-	return !bitmap_empty(ctx_data->bitmap, CAM_CTX_REQ_MAX);
-}
-
 static int cam_get_valid_ctx_id(void)
 {
 	struct cam_ope_hw_mgr *hw_mgr = ope_hw_mgr;
@@ -686,9 +695,7 @@ static int32_t cam_ope_process_request_timer(void *priv, void *data)
 		}
 
 		CAM_ERR(CAM_OPE,
-			"pending requests means, issue is with HW for ctx %d",
-			ctx_data->ctx_id);
-		CAM_ERR(CAM_OPE, "ctx: %d, lrt: %llu, lct: %llu",
+			"pending req at HW, ctx %d lrt %llu lct %llu",
 			ctx_data->ctx_id, ctx_data->last_req_time,
 			ope_hw_mgr->last_callback_time);
 		hw_mgr->ope_dev_intf[i]->hw_ops.process_cmd(
@@ -1757,7 +1764,7 @@ end:
 
 static int cam_ope_mgr_process_io_cfg(struct cam_ope_hw_mgr *hw_mgr,
 	struct cam_packet *packet,
-	struct cam_hw_prepare_update_args *prep_args,
+	struct cam_hw_prepare_update_args *prep_arg,
 	struct cam_ope_ctx *ctx_data, uint32_t req_idx)
 {
 
@@ -1768,8 +1775,8 @@ static int cam_ope_mgr_process_io_cfg(struct cam_ope_hw_mgr *hw_mgr,
 	struct cam_ope_request *ope_request;
 
 	ope_request = ctx_data->req_list[req_idx];
-	prep_args->num_out_map_entries = 0;
-	prep_args->num_in_map_entries = 0;
+	prep_arg->num_out_map_entries = 0;
+	prep_arg->num_in_map_entries = 0;
 
 	ope_request = ctx_data->req_list[req_idx];
 	CAM_DBG(CAM_OPE, "E: req_idx = %u %x", req_idx, packet);
@@ -1779,8 +1786,16 @@ static int cam_ope_mgr_process_io_cfg(struct cam_ope_hw_mgr *hw_mgr,
 			io_buf = ope_request->io_buf[i][l];
 			if (io_buf->direction == CAM_BUF_INPUT) {
 				if (io_buf->fence != -1) {
-					sync_in_obj[j++] = io_buf->fence;
-					prep_args->num_in_map_entries++;
+					if (j < CAM_MAX_IN_RES) {
+						sync_in_obj[j++] =
+							io_buf->fence;
+						prep_arg->num_in_map_entries++;
+					} else {
+						CAM_ERR(CAM_OPE,
+						"reached max in_res %d %d",
+						io_buf->resource_type,
+						ope_request->request_id);
+					}
 				} else {
 					CAM_ERR(CAM_OPE, "Invalid fence %d %d",
 						io_buf->resource_type,
@@ -1788,10 +1803,10 @@ static int cam_ope_mgr_process_io_cfg(struct cam_ope_hw_mgr *hw_mgr,
 				}
 			} else {
 				if (io_buf->fence != -1) {
-					prep_args->out_map_entries[k].sync_id =
+					prep_arg->out_map_entries[k].sync_id =
 						io_buf->fence;
 					k++;
-					prep_args->num_out_map_entries++;
+					prep_arg->num_out_map_entries++;
 				} else {
 					if (io_buf->resource_type
 						!= OPE_OUT_RES_STATS_LTM) {
@@ -1812,38 +1827,38 @@ static int cam_ope_mgr_process_io_cfg(struct cam_ope_hw_mgr *hw_mgr,
 		}
 	}
 
-	if (prep_args->num_in_map_entries > 1 &&
-		prep_args->num_in_map_entries <= CAM_MAX_IN_RES)
-		prep_args->num_in_map_entries =
+	if (prep_arg->num_in_map_entries > 1 &&
+		prep_arg->num_in_map_entries <= CAM_MAX_IN_RES)
+		prep_arg->num_in_map_entries =
 			cam_common_util_remove_duplicate_arr(
-			sync_in_obj, prep_args->num_in_map_entries);
+			sync_in_obj, prep_arg->num_in_map_entries);
 
-	if (prep_args->num_in_map_entries > 1 &&
-		prep_args->num_in_map_entries <= CAM_MAX_IN_RES) {
+	if (prep_arg->num_in_map_entries > 1 &&
+		prep_arg->num_in_map_entries <= CAM_MAX_IN_RES) {
 		rc = cam_sync_merge(&sync_in_obj[0],
-			prep_args->num_in_map_entries, &merged_sync_in_obj);
+			prep_arg->num_in_map_entries, &merged_sync_in_obj);
 		if (rc) {
-			prep_args->num_out_map_entries = 0;
-			prep_args->num_in_map_entries = 0;
+			prep_arg->num_out_map_entries = 0;
+			prep_arg->num_in_map_entries = 0;
 			return rc;
 		}
 
 		ope_request->in_resource = merged_sync_in_obj;
 
-		prep_args->in_map_entries[0].sync_id = merged_sync_in_obj;
-		prep_args->num_in_map_entries = 1;
+		prep_arg->in_map_entries[0].sync_id = merged_sync_in_obj;
+		prep_arg->num_in_map_entries = 1;
 		CAM_DBG(CAM_REQ, "ctx_id: %u req_id: %llu Merged Sync obj: %d",
 			ctx_data->ctx_id, packet->header.request_id,
 			merged_sync_in_obj);
-	} else if (prep_args->num_in_map_entries == 1) {
-		prep_args->in_map_entries[0].sync_id = sync_in_obj[0];
-		prep_args->num_in_map_entries = 1;
+	} else if (prep_arg->num_in_map_entries == 1) {
+		prep_arg->in_map_entries[0].sync_id = sync_in_obj[0];
+		prep_arg->num_in_map_entries = 1;
 		ope_request->in_resource = 0;
 		CAM_DBG(CAM_OPE, "fence = %d", sync_in_obj[0]);
 	} else {
 		CAM_DBG(CAM_OPE, "Invalid count of input fences, count: %d",
-			prep_args->num_in_map_entries);
-		prep_args->num_in_map_entries = 0;
+			prep_arg->num_in_map_entries);
+		prep_arg->num_in_map_entries = 0;
 		ope_request->in_resource = 0;
 		rc = -EINVAL;
 	}
@@ -2734,6 +2749,10 @@ static int cam_ope_mgr_acquire_hw(void *hw_priv, void *hw_acquire_args)
 	ctx->ctxt_event_cb = args->event_cb;
 	cam_ope_ctx_clk_info_init(ctx);
 	ctx->ctx_state = OPE_CTX_STATE_ACQUIRED;
+	kzfree(cdm_acquire);
+	cdm_acquire = NULL;
+	kzfree(bw_update);
+	bw_update = NULL;
 
 	mutex_unlock(&ctx->ctx_mutex);
 	mutex_unlock(&hw_mgr->hw_mgr_mutex);

+ 12 - 4
drivers/cam_ope/ope_hw_mgr/ope_hw/top/ope_top.c

@@ -31,14 +31,22 @@ static struct ope_top ope_top_info;
 
 static int cam_ope_top_dump_debug_reg(struct ope_hw *ope_hw_info)
 {
-	uint32_t i, val;
+	uint32_t i, val[3];
 	struct cam_ope_top_reg *top_reg;
 
 	top_reg = ope_hw_info->top_reg;
-	for (i = 0; i < top_reg->num_debug_registers; i++) {
-		val = cam_io_r_mb(top_reg->base +
+	for (i = 0; i < top_reg->num_debug_registers; i = i+3) {
+		val[0] = cam_io_r_mb(top_reg->base +
 			top_reg->debug_regs[i].offset);
-		CAM_INFO(CAM_OPE, "Debug_status_%d val: 0x%x", i, val);
+		val[1] = ((i+1) < top_reg->num_debug_registers) ?
+			cam_io_r_mb(top_reg->base +
+			top_reg->debug_regs[i+1].offset) : 0;
+		val[2] = ((i+2) < top_reg->num_debug_registers) ?
+			cam_io_r_mb(top_reg->base +
+			top_reg->debug_regs[i+2].offset) : 0;
+
+		CAM_INFO(CAM_OPE, "status[%d-%d] : 0x%x 0x%x 0x%x",
+			i, i+2, val[0], val[1], val[2]);
 	}
 	return 0;
 }

+ 11 - 0
drivers/cam_req_mgr/cam_mem_mgr.c

@@ -166,6 +166,11 @@ int cam_mem_mgr_init(void)
 
 	memset(tbl.bufq, 0, sizeof(tbl.bufq));
 
+	if (cam_smmu_need_force_alloc_cached(&tbl.force_cache_allocs)) {
+		CAM_ERR(CAM_MEM, "Error in getting force cache alloc flag");
+		return -EINVAL;
+	}
+
 	bitmap_size = BITS_TO_LONGS(CAM_MEM_BUFQ_MAX) * sizeof(long);
 	tbl.bitmap = kzalloc(bitmap_size, GFP_KERNEL);
 	if (!tbl.bitmap)
@@ -415,6 +420,9 @@ static int cam_mem_util_get_dma_buf(size_t len,
 		return -EINVAL;
 	}
 
+	if (tbl.force_cache_allocs && (!(flags & ION_FLAG_SECURE)))
+		flags |= ION_FLAG_CACHED;
+
 	*buf = ion_alloc(len, heap_id_mask, flags);
 	if (IS_ERR_OR_NULL(*buf))
 		return -ENOMEM;
@@ -442,6 +450,9 @@ static int cam_mem_util_get_dma_buf_fd(size_t len,
 	if (tbl.alloc_profile_enable)
 		CAM_GET_TIMESTAMP(ts1);
 
+	if (tbl.force_cache_allocs && (!(flags & ION_FLAG_SECURE)))
+		flags |= ION_FLAG_CACHED;
+
 	*buf = ion_alloc(len, heap_id_mask, flags);
 	if (IS_ERR_OR_NULL(*buf))
 		return -ENOMEM;

+ 2 - 0
drivers/cam_req_mgr/cam_mem_mgr.h

@@ -70,6 +70,7 @@ struct cam_mem_buf_queue {
  * @dentry: Debugfs entry
  * @alloc_profile_enable: Whether to enable alloc profiling
  * @dbg_buf_idx: debug buffer index to get usecases info
+ * @force_cache_allocs: Force all internal buffer allocations with cache
  */
 struct cam_mem_table {
 	struct mutex m_lock;
@@ -79,6 +80,7 @@ struct cam_mem_table {
 	struct dentry *dentry;
 	bool alloc_profile_enable;
 	size_t dbg_buf_idx;
+	bool force_cache_allocs;
 };
 
 /**

+ 20 - 6
drivers/cam_req_mgr/cam_req_mgr_core.c

@@ -950,7 +950,7 @@ static int __cam_req_mgr_send_req(struct cam_req_mgr_core_link *link,
 
 			apply_req.trigger_point = trigger;
 			CAM_DBG(CAM_REQ,
-				"SEND: %d link_hdl: %x pd %d req_id %lld",
+				"SEND:  link_hdl: %x pd %d req_id %lld",
 				link->link_hdl, pd, apply_req.request_id);
 			if (dev->ops && dev->ops->apply_req) {
 				rc = dev->ops->apply_req(&apply_req);
@@ -1334,6 +1334,7 @@ static int __cam_req_mgr_check_sync_req_is_ready(
 	uint64_t sof_timestamp_delta = 0;
 	uint64_t master_slave_diff = 0;
 	bool ready = true, sync_ready = true;
+	int slot_idx_diff = 0;
 
 	if (!sync_link || !link) {
 		CAM_ERR(CAM_CRM, "Sync link null");
@@ -1439,12 +1440,14 @@ static int __cam_req_mgr_check_sync_req_is_ready(
 		return -EAGAIN;
 	}
 
+	slot_idx_diff = (sync_slot_idx - sync_rd_idx + sync_num_slots) %
+		sync_num_slots;
 	if ((sync_link->req.in_q->slot[sync_slot_idx].status !=
 		CRM_SLOT_STATUS_REQ_APPLIED) &&
-		(((sync_slot_idx - sync_rd_idx + sync_num_slots) %
-		sync_num_slots) >= 1) &&
+		((slot_idx_diff > 1) ||
+		((slot_idx_diff == 1) &&
 		(sync_rd_slot->status !=
-		CRM_SLOT_STATUS_REQ_APPLIED)) {
+		CRM_SLOT_STATUS_REQ_APPLIED)))) {
 		CAM_DBG(CAM_CRM,
 			"Req: %lld [other link] not next req to be applied on link: %x",
 			req_id, sync_link->link_hdl);
@@ -2048,9 +2051,11 @@ static void __cam_req_mgr_notify_sof_freeze(
 static int __cam_req_mgr_process_sof_freeze(void *priv, void *data)
 {
 	struct cam_req_mgr_core_link    *link = NULL;
+	struct cam_req_mgr_req_queue    *in_q = NULL;
 	struct cam_req_mgr_core_session *session = NULL;
 	struct cam_req_mgr_message       msg;
 	int rc = 0;
+	int64_t last_applied_req_id = -EINVAL;
 
 	if (!data || !priv) {
 		CAM_ERR(CAM_CRM, "input args NULL %pK %pK", data, priv);
@@ -2064,6 +2069,13 @@ static int __cam_req_mgr_process_sof_freeze(void *priv, void *data)
 		return -EINVAL;
 	}
 
+	in_q = link->req.in_q;
+	if (in_q) {
+		mutex_lock(&link->req.lock);
+		last_applied_req_id = in_q->slot[in_q->last_applied_idx].req_id;
+		mutex_unlock(&link->req.lock);
+	}
+
 	spin_lock_bh(&link->link_state_spin_lock);
 	if ((link->watchdog) && (link->watchdog->pause_timer)) {
 		CAM_INFO(CAM_CRM, "Watchdog Paused");
@@ -2072,8 +2084,10 @@ static int __cam_req_mgr_process_sof_freeze(void *priv, void *data)
 	}
 	spin_unlock_bh(&link->link_state_spin_lock);
 
-	CAM_ERR(CAM_CRM, "SOF freeze for session %d link 0x%x",
-		session->session_hdl, link->link_hdl);
+	CAM_ERR(CAM_CRM,
+		"SOF freeze for session: %d link: 0x%x max_pd: %d last_req_id:%d",
+		session->session_hdl, link->link_hdl, link->max_delay,
+		last_applied_req_id);
 
 	__cam_req_mgr_notify_sof_freeze(link);
 	memset(&msg, 0, sizeof(msg));

+ 51 - 104
drivers/cam_sensor_module/cam_cci/cam_cci_core.c

@@ -609,19 +609,42 @@ static int32_t cam_cci_set_clk_param(struct cci_device *cci_dev,
 	struct cam_cci_clk_params_t *clk_params = NULL;
 	enum cci_i2c_master_t master = c_ctrl->cci_info->cci_i2c_master;
 	enum i2c_freq_mode i2c_freq_mode = c_ctrl->cci_info->i2c_freq_mode;
-	struct cam_hw_soc_info *soc_info =
-		&cci_dev->soc_info;
-	void __iomem *base = soc_info->reg_map[0].mem_base;
+	void __iomem *base = cci_dev->soc_info.reg_map[0].mem_base;
+	struct cam_cci_master_info *cci_master =
+		&cci_dev->cci_master_info[master];
 
 	if ((i2c_freq_mode >= I2C_MAX_MODES) || (i2c_freq_mode < 0)) {
 		CAM_ERR(CAM_CCI, "invalid i2c_freq_mode = %d", i2c_freq_mode);
 		return -EINVAL;
 	}
+	/*
+	 * If no change in i2c freq, then acquire semaphore only for the first
+	 * i2c transaction to indicate I2C transaction is in progress, else
+	 * always try to acquire semaphore, to make sure that no other I2C
+	 * transaction is in progress.
+	 */
+	mutex_lock(&cci_master->mutex);
+	if (i2c_freq_mode == cci_dev->i2c_freq_mode[master]) {
+		CAM_DBG(CAM_CCI, "Master: %d, curr_freq: %d", master,
+			i2c_freq_mode);
+		spin_lock(&cci_master->freq_cnt_lock);
+		if (cci_master->freq_ref_cnt == 0)
+			down(&cci_master->master_sem);
+		cci_master->freq_ref_cnt++;
+		spin_unlock(&cci_master->freq_cnt_lock);
+		mutex_unlock(&cci_master->mutex);
+		return 0;
+	}
+	CAM_DBG(CAM_CCI, "Master: %d, curr_freq: %d, req_freq: %d",
+		master, cci_dev->i2c_freq_mode[master], i2c_freq_mode);
+	down(&cci_master->master_sem);
+
+	spin_lock(&cci_master->freq_cnt_lock);
+	cci_master->freq_ref_cnt++;
+	spin_unlock(&cci_master->freq_cnt_lock);
 
 	clk_params = &cci_dev->cci_clk_params[i2c_freq_mode];
 
-	if (cci_dev->i2c_freq_mode[master] == i2c_freq_mode)
-		return 0;
 	if (master == MASTER_0) {
 		cam_io_w_mb(clk_params->hw_thigh << 16 |
 			clk_params->hw_tlow,
@@ -655,6 +678,7 @@ static int32_t cam_cci_set_clk_param(struct cci_device *cci_dev,
 	}
 	cci_dev->i2c_freq_mode[master] = i2c_freq_mode;
 
+	mutex_unlock(&cci_master->mutex);
 	return 0;
 }
 
@@ -938,42 +962,19 @@ static int32_t cam_cci_burst_read(struct v4l2_subdev *sd,
 		return -EINVAL;
 	}
 
-	soc_info = &cci_dev->soc_info;
-	base = soc_info->reg_map[0].mem_base;
-
-	mutex_lock(&cci_dev->cci_master_info[master].mutex);
-	if (cci_dev->cci_master_info[master].is_first_req) {
-		cci_dev->cci_master_info[master].is_first_req = false;
-		CAM_DBG(CAM_CCI, "Master: %d, curr_freq: %d, req_freq: %d",
-			master, cci_dev->i2c_freq_mode[master],
-			c_ctrl->cci_info->i2c_freq_mode);
-		down(&cci_dev->cci_master_info[master].master_sem);
-	} else if (c_ctrl->cci_info->i2c_freq_mode
-		!= cci_dev->i2c_freq_mode[master]) {
-		CAM_DBG(CAM_CCI, "Master: %d, curr_freq: %d, req_freq: %d",
-			master, cci_dev->i2c_freq_mode[master],
-			c_ctrl->cci_info->i2c_freq_mode);
-		down(&cci_dev->cci_master_info[master].master_sem);
-	} else {
-		CAM_DBG(CAM_CCI, "Master: %d, curr_freq: %d, req_freq: %d",
-			master, cci_dev->i2c_freq_mode[master],
-			c_ctrl->cci_info->i2c_freq_mode);
-		spin_lock(&cci_dev->cci_master_info[master].freq_cnt);
-		cci_dev->cci_master_info[master].freq_ref_cnt++;
-		spin_unlock(&cci_dev->cci_master_info[master].freq_cnt);
-	}
-
 	/* Set the I2C Frequency */
 	rc = cam_cci_set_clk_param(cci_dev, c_ctrl);
 	if (rc < 0) {
 		CAM_ERR(CAM_CCI, "cam_cci_set_clk_param failed rc = %d", rc);
-		mutex_unlock(&cci_dev->cci_master_info[master].mutex);
-		goto rel_master;
+		return rc;
 	}
-	mutex_unlock(&cci_dev->cci_master_info[master].mutex);
 
 	mutex_lock(&cci_dev->cci_master_info[master].mutex_q[queue]);
 	reinit_completion(&cci_dev->cci_master_info[master].report_q[queue]);
+
+	soc_info = &cci_dev->soc_info;
+	base = soc_info->reg_map[0].mem_base;
+
 	/*
 	 * Call validate queue to make sure queue is empty before starting.
 	 * If this call fails, don't proceed with i2c_read call. This is to
@@ -1198,13 +1199,11 @@ static int32_t cam_cci_burst_read(struct v4l2_subdev *sd,
 
 rel_mutex_q:
 	mutex_unlock(&cci_dev->cci_master_info[master].mutex_q[queue]);
-rel_master:
-	spin_lock(&cci_dev->cci_master_info[master].freq_cnt);
-	if (cci_dev->cci_master_info[master].freq_ref_cnt == 0)
+
+	spin_lock(&cci_dev->cci_master_info[master].freq_cnt_lock);
+	if (--cci_dev->cci_master_info[master].freq_ref_cnt == 0)
 		up(&cci_dev->cci_master_info[master].master_sem);
-	else
-		cci_dev->cci_master_info[master].freq_ref_cnt--;
-	spin_unlock(&cci_dev->cci_master_info[master].freq_cnt);
+	spin_unlock(&cci_dev->cci_master_info[master].freq_cnt_lock);
 	return rc;
 }
 
@@ -1234,42 +1233,19 @@ static int32_t cam_cci_read(struct v4l2_subdev *sd,
 		return -EINVAL;
 	}
 
-	soc_info = &cci_dev->soc_info;
-	base = soc_info->reg_map[0].mem_base;
-
-	mutex_lock(&cci_dev->cci_master_info[master].mutex);
-	if (cci_dev->cci_master_info[master].is_first_req) {
-		cci_dev->cci_master_info[master].is_first_req = false;
-		CAM_DBG(CAM_CCI, "Master: %d, curr_freq: %d, req_freq: %d",
-			master, cci_dev->i2c_freq_mode[master],
-			c_ctrl->cci_info->i2c_freq_mode);
-		down(&cci_dev->cci_master_info[master].master_sem);
-	} else if (c_ctrl->cci_info->i2c_freq_mode
-		!= cci_dev->i2c_freq_mode[master]) {
-		CAM_DBG(CAM_CCI, "Master: %d, curr_freq: %d, req_freq: %d",
-			master, cci_dev->i2c_freq_mode[master],
-			c_ctrl->cci_info->i2c_freq_mode);
-		down(&cci_dev->cci_master_info[master].master_sem);
-	} else {
-		CAM_DBG(CAM_CCI, "Master: %d, curr_freq: %d, req_freq: %d",
-			master, cci_dev->i2c_freq_mode[master],
-			c_ctrl->cci_info->i2c_freq_mode);
-		spin_lock(&cci_dev->cci_master_info[master].freq_cnt);
-		cci_dev->cci_master_info[master].freq_ref_cnt++;
-		spin_unlock(&cci_dev->cci_master_info[master].freq_cnt);
-	}
-
 	/* Set the I2C Frequency */
 	rc = cam_cci_set_clk_param(cci_dev, c_ctrl);
 	if (rc < 0) {
-		mutex_unlock(&cci_dev->cci_master_info[master].mutex);
 		CAM_ERR(CAM_CCI, "cam_cci_set_clk_param failed rc = %d", rc);
-		goto rel_master;
+		return rc;
 	}
-	mutex_unlock(&cci_dev->cci_master_info[master].mutex);
 
 	mutex_lock(&cci_dev->cci_master_info[master].mutex_q[queue]);
 	reinit_completion(&cci_dev->cci_master_info[master].report_q[queue]);
+
+	soc_info = &cci_dev->soc_info;
+	base = soc_info->reg_map[0].mem_base;
+
 	/*
 	 * Call validate queue to make sure queue is empty before starting.
 	 * If this call fails, don't proceed with i2c_read call. This is to
@@ -1425,13 +1401,11 @@ static int32_t cam_cci_read(struct v4l2_subdev *sd,
 	}
 rel_mutex_q:
 	mutex_unlock(&cci_dev->cci_master_info[master].mutex_q[queue]);
-rel_master:
-	spin_lock(&cci_dev->cci_master_info[master].freq_cnt);
-	if (cci_dev->cci_master_info[master].freq_ref_cnt == 0)
+
+	spin_lock(&cci_dev->cci_master_info[master].freq_cnt_lock);
+	if (--cci_dev->cci_master_info[master].freq_ref_cnt == 0)
 		up(&cci_dev->cci_master_info[master].master_sem);
-	else
-		cci_dev->cci_master_info[master].freq_ref_cnt--;
-	spin_unlock(&cci_dev->cci_master_info[master].freq_cnt);
+	spin_unlock(&cci_dev->cci_master_info[master].freq_cnt_lock);
 	return rc;
 }
 
@@ -1455,37 +1429,12 @@ static int32_t cam_cci_i2c_write(struct v4l2_subdev *sd,
 		c_ctrl->cci_info->sid, c_ctrl->cci_info->retries,
 		c_ctrl->cci_info->id_map);
 
-	mutex_lock(&cci_dev->cci_master_info[master].mutex);
-	if (cci_dev->cci_master_info[master].is_first_req) {
-		cci_dev->cci_master_info[master].is_first_req = false;
-		CAM_DBG(CAM_CCI, "Master: %d, curr_freq: %d, req_freq: %d",
-			master, cci_dev->i2c_freq_mode[master],
-			c_ctrl->cci_info->i2c_freq_mode);
-		down(&cci_dev->cci_master_info[master].master_sem);
-	} else if (c_ctrl->cci_info->i2c_freq_mode
-		!= cci_dev->i2c_freq_mode[master]) {
-		CAM_DBG(CAM_CCI, "Master: %d, curr_freq: %d, req_freq: %d",
-			master, cci_dev->i2c_freq_mode[master],
-			c_ctrl->cci_info->i2c_freq_mode);
-		down(&cci_dev->cci_master_info[master].master_sem);
-	} else {
-		CAM_DBG(CAM_CCI, "Master: %d, curr_freq: %d, req_freq: %d",
-			master, cci_dev->i2c_freq_mode[master],
-			c_ctrl->cci_info->i2c_freq_mode);
-		spin_lock(&cci_dev->cci_master_info[master].freq_cnt);
-		cci_dev->cci_master_info[master].freq_ref_cnt++;
-		spin_unlock(&cci_dev->cci_master_info[master].freq_cnt);
-	}
-
 	/* Set the I2C Frequency */
 	rc = cam_cci_set_clk_param(cci_dev, c_ctrl);
 	if (rc < 0) {
 		CAM_ERR(CAM_CCI, "cam_cci_set_clk_param failed rc = %d", rc);
-		mutex_unlock(&cci_dev->cci_master_info[master].mutex);
-		goto ERROR;
+		return rc;
 	}
-	mutex_unlock(&cci_dev->cci_master_info[master].mutex);
-
 	reinit_completion(&cci_dev->cci_master_info[master].report_q[queue]);
 	/*
 	 * Call validate queue to make sure queue is empty before starting.
@@ -1515,12 +1464,10 @@ static int32_t cam_cci_i2c_write(struct v4l2_subdev *sd,
 	}
 
 ERROR:
-	spin_lock(&cci_dev->cci_master_info[master].freq_cnt);
-	if (cci_dev->cci_master_info[master].freq_ref_cnt == 0)
+	spin_lock(&cci_dev->cci_master_info[master].freq_cnt_lock);
+	if (--cci_dev->cci_master_info[master].freq_ref_cnt == 0)
 		up(&cci_dev->cci_master_info[master].master_sem);
-	else
-		cci_dev->cci_master_info[master].freq_ref_cnt--;
-	spin_unlock(&cci_dev->cci_master_info[master].freq_cnt);
+	spin_unlock(&cci_dev->cci_master_info[master].freq_cnt_lock);
 	return rc;
 }
 

+ 1 - 2
drivers/cam_sensor_module/cam_cci/cam_cci_dev.h

@@ -132,9 +132,8 @@ struct cam_cci_master_info {
 	struct completion report_q[NUM_QUEUES];
 	atomic_t done_pending[NUM_QUEUES];
 	spinlock_t lock_q[NUM_QUEUES];
-	spinlock_t freq_cnt;
 	struct semaphore master_sem;
-	bool is_first_req;
+	spinlock_t freq_cnt_lock;
 	uint16_t freq_ref_cnt;
 	bool is_initilized;
 };

+ 2 - 2
drivers/cam_sensor_module/cam_cci/cam_cci_soc.c

@@ -219,11 +219,11 @@ static void cam_cci_init_cci_params(struct cci_device *new_cci_dev)
 
 	for (i = 0; i < MASTER_MAX; i++) {
 		new_cci_dev->cci_master_info[i].status = 0;
-		new_cci_dev->cci_master_info[i].is_first_req = true;
+		new_cci_dev->cci_master_info[i].freq_ref_cnt = 0;
 		new_cci_dev->cci_master_info[i].is_initilized = false;
 		mutex_init(&new_cci_dev->cci_master_info[i].mutex);
 		sema_init(&new_cci_dev->cci_master_info[i].master_sem, 1);
-		spin_lock_init(&new_cci_dev->cci_master_info[i].freq_cnt);
+		spin_lock_init(&new_cci_dev->cci_master_info[i].freq_cnt_lock);
 		init_completion(
 			&new_cci_dev->cci_master_info[i].reset_complete);
 		init_completion(

+ 171 - 3
drivers/cam_smmu/cam_smmu_api.c

@@ -62,6 +62,12 @@ struct cam_smmu_work_payload {
 	struct list_head list;
 };
 
+enum cam_io_coherency_mode {
+	CAM_SMMU_NO_COHERENCY,
+	CAM_SMMU_DMA_COHERENT,
+	CAM_SMMU_DMA_COHERENT_HINT_CACHED,
+};
+
 enum cam_protection_type {
 	CAM_PROT_INVALID,
 	CAM_NON_SECURE,
@@ -155,6 +161,7 @@ struct cam_context_bank_info {
 	bool is_mul_client;
 	int device_count;
 	int num_shared_hdl;
+	enum cam_io_coherency_mode coherency_mode;
 
 	/* discard iova - non-zero values are valid */
 	dma_addr_t discard_iova_start;
@@ -175,6 +182,7 @@ struct cam_iommu_cb_set {
 	struct dentry *dentry;
 	bool cb_dump_enable;
 	bool map_profile_enable;
+	bool force_cache_allocs;
 };
 
 static const struct of_device_id msm_cam_smmu_dt_match[] = {
@@ -357,6 +365,111 @@ static void cam_smmu_dump_monitor_array(
 	}
 }
 
+int cam_smmu_need_force_alloc_cached(bool *force_alloc_cached)
+{
+	int idx;
+	uint32_t curr_mode = 0, final_mode = 0;
+	bool validate = false;
+
+	if (!force_alloc_cached) {
+		CAM_ERR(CAM_SMMU, "Invalid arg");
+		return -EINVAL;
+	}
+
+	CAM_INFO(CAM_SMMU, "force_cache_allocs=%d",
+		iommu_cb_set.force_cache_allocs);
+
+	/*
+	 * First validate whether all SMMU CBs are properly setup to comply with
+	 * iommu_cb_set.force_alloc_cached flag.
+	 * This helps as a validation check to make sure a valid DT combination
+	 * is set for a given chipset.
+	 */
+	for (idx = 0; idx < iommu_cb_set.cb_num; idx++) {
+		/* ignore secure cb for now. need to revisit */
+		if (iommu_cb_set.cb_info[idx].is_secure)
+			continue;
+
+		curr_mode = iommu_cb_set.cb_info[idx].coherency_mode;
+
+		/*
+		 * 1. No coherency:
+		 *    We can map both CACHED and UNCACHED buffers into same CB.
+		 *    We need to allocate UNCACHED buffers for Cmdbuffers
+		 *    and Shared Buffers. UNCAHE support must exists with memory
+		 *    allocators (ion or dma-buf-heaps) for CmdBuffers,
+		 *    SharedBuffers to work - as it is difficult to do
+		 *    cache operations on these buffers in camera design.
+		 *    ImageBuffers can be CACHED or UNCACHED. If CACHED, clients
+		 *    need to make required CACHE operations.
+		 *    Cannot force all allocations to CACHE.
+		 * 2. dma-coherent:
+		 *    We cannot map CACHED and UNCACHED buffers into the same CB
+		 *    This means, we must force allocate all buffers to be
+		 *    CACHED.
+		 * 3. dma-coherent-hint-cached
+		 *    We can map both CACHED and UNCACHED buffers into the same
+		 *    CB. So any option is fine force_cache_allocs.
+		 *    Forcing to cache is preferable though.
+		 *
+		 * Other rule we are enforcing is - all camera CBs (except
+		 * secure CB) must have same coherency mode set. Assume one CB
+		 * is having no_coherency mode  and other CB is having
+		 * dma_coherent. For no_coherency CB to work - we must not force
+		 * buffers to be CACHE (exa cmd buffers), for dma_coherent mode
+		 * we must force all buffers to be CACHED. But at the time of
+		 * allocation, we dont know to which CB we will be mapping this
+		 * buffer. So it becomes difficult to generalize cache
+		 * allocations and io coherency mode that we want to support.
+		 * So, to simplify, all camera CBs will have same mode.
+		 */
+
+		CAM_DBG(CAM_SMMU, "[%s] : curr_mode=%d",
+			iommu_cb_set.cb_info[idx].name[0], curr_mode);
+
+		if (curr_mode == CAM_SMMU_NO_COHERENCY) {
+			if (iommu_cb_set.force_cache_allocs) {
+				CAM_ERR(CAM_SMMU,
+					"[%s] Can't force alloc cache with no coherency",
+					iommu_cb_set.cb_info[idx].name[0]);
+				return -EINVAL;
+			}
+		} else if (curr_mode == CAM_SMMU_DMA_COHERENT) {
+			if (!iommu_cb_set.force_cache_allocs) {
+				CAM_ERR(CAM_SMMU,
+					"[%s] Must force cache allocs for dma coherent device",
+					iommu_cb_set.cb_info[idx].name[0]);
+				return -EINVAL;
+			}
+		}
+
+		if (validate) {
+			if (curr_mode !=  final_mode) {
+				CAM_ERR(CAM_SMMU,
+					"[%s] CBs having different coherency modes final=%d, curr=%d",
+					iommu_cb_set.cb_info[idx].name[0],
+					final_mode, curr_mode);
+				return -EINVAL;
+			}
+		} else {
+			validate = true;
+			final_mode = curr_mode;
+		}
+	}
+
+	/*
+	 * To be more accurate - if this flag is TRUE and if this buffer will
+	 * be mapped to external devices like CVP - we need to ensure we do
+	 * one of below :
+	 * 1. CVP CB having dma-coherent or dma-coherent-hint-cached
+	 * 2. camera/cvp sw layers properly doing required cache operations. We
+	 *    cannot anymore assume these buffers (camera <--> cvp) are uncached
+	 */
+	*force_alloc_cached = iommu_cb_set.force_cache_allocs;
+
+	return 0;
+}
+
 static void cam_smmu_page_fault_work(struct work_struct *work)
 {
 	int j;
@@ -1352,6 +1465,14 @@ int cam_smmu_alloc_firmware(int32_t smmu_hdl,
 			icp_fw.fw_kva, (void *)icp_fw.fw_hdl);
 
 	domain = iommu_cb_set.cb_info[idx].domain;
+
+	/*
+	 * Revisit this - what should we map this with - CACHED or UNCACHED?
+	 * chipsets using dma-coherent-hint-cached - leaving it like this is
+	 * fine as we can map both CACHED and UNCACHED on same CB.
+	 * But on chipsets which use dma-coherent - all the buffers that are
+	 * being mapped to this CB must be CACHED
+	 */
 	rc = iommu_map(domain,
 		firmware_start,
 		(phys_addr_t) icp_fw.fw_hdl,
@@ -1492,6 +1613,14 @@ int cam_smmu_alloc_qdss(int32_t smmu_hdl,
 	CAM_DBG(CAM_SMMU, "QDSS area len from DT = %zu", qdss_len);
 
 	domain = iommu_cb_set.cb_info[idx].domain;
+
+	/*
+	 * Revisit this - what should we map this with - CACHED or UNCACHED?
+	 * chipsets using dma-coherent-hint-cached - leaving it like this is
+	 * fine as we can map both CACHED and UNCACHED on same CB.
+	 * But on chipsets which use dma-coherent - all the buffers that are
+	 * being mapped to this CB must be CACHED
+	 */
 	rc = iommu_map(domain,
 		qdss_start,
 		qdss_phy_addr,
@@ -1721,6 +1850,7 @@ int cam_smmu_reserve_sec_heap(int32_t smmu_hdl,
 	size_t sec_heap_iova_len = 0;
 	int idx;
 	int rc = 0;
+	int prot = 0;
 
 	idx = GET_SMMU_TABLE_IDX(smmu_hdl);
 	if (idx < 0 || idx >= iommu_cb_set.cb_num) {
@@ -1771,11 +1901,16 @@ int cam_smmu_reserve_sec_heap(int32_t smmu_hdl,
 
 	sec_heap_iova = iommu_cb_set.cb_info[idx].secheap_info.iova_start;
 	sec_heap_iova_len = iommu_cb_set.cb_info[idx].secheap_info.iova_len;
+
+	prot = IOMMU_READ | IOMMU_WRITE;
+	if (iommu_cb_set.force_cache_allocs)
+		prot |= IOMMU_CACHE;
+
 	size = iommu_map_sg(iommu_cb_set.cb_info[idx].domain,
 		sec_heap_iova,
 		secheap_buf->table->sgl,
 		secheap_buf->table->nents,
-		IOMMU_READ | IOMMU_WRITE);
+		prot);
 	if (size != sec_heap_iova_len) {
 		CAM_ERR(CAM_SMMU, "IOMMU mapping failed");
 		goto err_unmap_sg;
@@ -1868,6 +2003,7 @@ static int cam_smmu_map_buffer_validate(struct dma_buf *buf,
 	int rc = 0;
 	struct timespec64 ts1, ts2;
 	long microsec = 0;
+	int prot = 0;
 
 	if (IS_ERR_OR_NULL(buf)) {
 		rc = PTR_ERR(buf);
@@ -1918,8 +2054,12 @@ static int cam_smmu_map_buffer_validate(struct dma_buf *buf,
 			goto err_unmap_sg;
 		}
 
+		prot = IOMMU_READ | IOMMU_WRITE;
+		if (iommu_cb_set.force_cache_allocs)
+			prot |= IOMMU_CACHE;
+
 		size = iommu_map_sg(domain, iova, table->sgl, table->nents,
-				IOMMU_READ | IOMMU_WRITE);
+				prot);
 
 		if (size < 0) {
 			CAM_ERR(CAM_SMMU, "IOMMU mapping failed");
@@ -2415,6 +2555,9 @@ static int cam_smmu_alloc_scratch_buffer_add_to_list(int idx,
 		goto err_iommu_map;
 	}
 
+	if (iommu_cb_set.force_cache_allocs)
+		iommu_dir |= IOMMU_CACHE;
+
 	if (iommu_map_sg(domain,
 		iova,
 		table->sgl,
@@ -3880,6 +4023,7 @@ static int cam_populate_smmu_context_banks(struct device *dev,
 	struct cam_context_bank_info *cb;
 	struct device *ctx = NULL;
 	int i = 0;
+	bool dma_coherent, dma_coherent_hint;
 
 	if (!dev) {
 		CAM_ERR(CAM_SMMU, "Error: Invalid device");
@@ -3944,6 +4088,27 @@ static int cam_populate_smmu_context_banks(struct device *dev,
 	ctx = dev;
 	CAM_DBG(CAM_SMMU, "getting Arm SMMU ctx : %s", cb->name[0]);
 
+	cb->coherency_mode = CAM_SMMU_NO_COHERENCY;
+
+	dma_coherent = of_property_read_bool(dev->of_node, "dma-coherent");
+	dma_coherent_hint = of_property_read_bool(dev->of_node,
+		"dma-coherent-hint-cached");
+
+	if (dma_coherent && dma_coherent_hint) {
+		CAM_ERR(CAM_SMMU,
+			"[%s] : Cannot enable both dma-coherent and dma-coherent-hint-cached",
+			cb->name[0]);
+		return -EBADR;
+	}
+
+	if (dma_coherent)
+		cb->coherency_mode = CAM_SMMU_DMA_COHERENT;
+	else if (dma_coherent_hint)
+		cb->coherency_mode = CAM_SMMU_DMA_COHERENT_HINT_CACHED;
+
+	CAM_DBG(CAM_SMMU, "[%s] : io cohereny mode %d", cb->name[0],
+		cb->coherency_mode);
+
 	rc = cam_smmu_setup_cb(cb, ctx);
 	if (rc < 0) {
 		CAM_ERR(CAM_SMMU, "Error: failed to setup cb : %s",
@@ -3998,7 +4163,7 @@ static int cam_smmu_create_debug_fs(void)
 		iommu_cb_set.dentry, &iommu_cb_set.map_profile_enable);
 	if (IS_ERR(dbgfileptr)) {
 		if (PTR_ERR(dbgfileptr) == -ENODEV)
-			CAM_WARN(CAM_MEM, "DebugFS not enabled in kernel!");
+			CAM_WARN(CAM_SMMU, "DebugFS not enabled in kernel!");
 		else
 			rc = PTR_ERR(dbgfileptr);
 	}
@@ -4098,6 +4263,9 @@ static int cam_smmu_component_bind(struct device *dev,
 	INIT_LIST_HEAD(&iommu_cb_set.payload_list);
 	cam_smmu_create_debug_fs();
 
+	iommu_cb_set.force_cache_allocs =
+		of_property_read_bool(dev->of_node, "force_cache_allocs");
+
 	CAM_DBG(CAM_SMMU, "Main component bound successfully");
 	return 0;
 }

+ 6 - 0
drivers/cam_smmu/cam_smmu_api.h

@@ -432,4 +432,10 @@ int cam_smmu_init_module(void);
  */
 void cam_smmu_exit_module(void);
 
+/**
+ * @brief : API to determine whether to force all allocations to CACHED
+ */
+int cam_smmu_need_force_alloc_cached(bool *force_alloc_cached);
+
+
 #endif /* _CAM_SMMU_API_H_ */