Browse Source

FF: Upto snap456: 'quic/camera-kernel.lnx.4.0' into 5.0 11/24/20

* origin/camera-kernel.lnx.4.0:
  msm: camera: isp: Fix for NULL dereference for failure case of dual IFE
  msm: camera: sensor: Reset actuator flush requestId
  msm: camera: reqmgr: Rate limit workq delay warning
  msm: camera: icp: Check HW state prior to IRQ processing
  msm: camera: reqmgr: update timer even after skip frame
  msm: camera: reqmgr: Add checks before reporting the error
  msm: camera: csiphy: CSIPHY DPHY DeSkew Calibration
  msm: camera: utils: Rate limit error log in case of invalid handles
  msm: camera: isp: Handle buf_done without request in active_list
  msm: camera: cpas: Update jpeg qosgen settings
  msm: camera: cdm: Add more registers in cdm dump
  msm: camera: ife: Fix Constraint error print
  msm: camera: isp: Bubble re-apply with CDM callback detect
  msm: camera: isp: Add trigger id for EOF event
  msm: camera: isp: Add checks to ensure valid out resource info in dump
  msm: camera: cpas: Enhance cpas dump with more info
  msm: camera: reqmgr: Remove idx check when detecting congestion
  msm: camera: isp: Fixing KW errors
  msm: camera: custom: Add support for bubble handling
  msm: camera: isp: Handle QCFA bin eanbled case for MP limit fuse
  msm: camera: isp: Move the CSID irq logs to bottom half
  msm: camera: ife: Fix bus print dimension
  msm: camera: tfe: Enable the tfe diag debug feature
  msm: camera: isp: Wait for cdm submit incase of custom HW
  msm: camera: isp: Change dual IFE acquire sequence
  msm: camera: isp: Validate the ife bw blob data
  msm: camera: reqmgr: Fix timing issue while destroying the session
  msm: camera: isp: Fix the callback arguments in IFE
  msm: camera: reqmgr: Move skip frame log to rate limit for WQ congestion
  msm: camera: smmu: Avoid NULL dereference in cam_smmu_alloc_firmware
  msm: camera: isp: Skip qtimer shutter for invalid req_id

Change-Id: I422bafdb6349b08f911acb6bf3c9245cf7e03d3f
Signed-off-by: Abhijit Trivedi <[email protected]>
Abhijit Trivedi 4 năm trước cách đây
mục cha
commit
4cfd29c06d
38 tập tin đã thay đổi với 1134 bổ sung343 xóa
  1. 18 1
      drivers/cam_cdm/cam_cdm_hw_core.c
  2. 2 2
      drivers/cam_core/cam_context_utils.c
  3. 11 8
      drivers/cam_core/cam_hw_mgr_intf.h
  4. 18 9
      drivers/cam_core/cam_node.c
  5. 69 5
      drivers/cam_cpas/cam_cpas_hw.c
  6. 7 1
      drivers/cam_cpas/cam_cpas_hw.h
  7. 1 0
      drivers/cam_cpas/camss_top/cam_camsstop_hw.c
  8. 36 0
      drivers/cam_cpas/cpas_top/cam_cpastop_hw.c
  9. 2 2
      drivers/cam_cpas/cpas_top/cpastop_v570_200.h
  10. 157 25
      drivers/cam_cust/cam_custom_context.c
  11. 7 1
      drivers/cam_cust/cam_custom_context.h
  12. 18 0
      drivers/cam_icp/icp_hw/a5_hw/a5_core.c
  13. 270 18
      drivers/cam_isp/cam_isp_context.c
  14. 33 15
      drivers/cam_isp/cam_isp_context.h
  15. 86 40
      drivers/cam_isp/isp_hw_mgr/cam_ife_hw_mgr.c
  16. 2 0
      drivers/cam_isp/isp_hw_mgr/cam_ife_hw_mgr.h
  17. 61 17
      drivers/cam_isp/isp_hw_mgr/cam_tfe_hw_mgr.c
  18. 2 0
      drivers/cam_isp/isp_hw_mgr/cam_tfe_hw_mgr.h
  19. 6 1
      drivers/cam_isp/isp_hw_mgr/hw_utils/cam_isp_packet_parser.c
  20. 8 5
      drivers/cam_isp/isp_hw_mgr/include/cam_isp_hw_mgr_intf.h
  21. 110 61
      drivers/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid_core.c
  22. 3 2
      drivers/cam_isp/isp_hw_mgr/isp_hw/sfe_hw/sfe_bus/cam_sfe_bus_rd.c
  23. 1 0
      drivers/cam_isp/isp_hw_mgr/isp_hw/sfe_hw/sfe_bus/cam_sfe_bus_wr.c
  24. 2 2
      drivers/cam_isp/isp_hw_mgr/isp_hw/sfe_hw/sfe_top/cam_sfe_top.c
  25. 10 0
      drivers/cam_isp/isp_hw_mgr/isp_hw/tfe_hw/cam_tfe530.h
  26. 87 15
      drivers/cam_isp/isp_hw_mgr/isp_hw/tfe_hw/cam_tfe_core.c
  27. 7 0
      drivers/cam_isp/isp_hw_mgr/isp_hw/tfe_hw/cam_tfe_core.h
  28. 39 19
      drivers/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_bus/cam_vfe_bus_ver3.c
  29. 2 0
      drivers/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_camif_ver3.c
  30. 39 31
      drivers/cam_req_mgr/cam_req_mgr_core.c
  31. 4 2
      drivers/cam_req_mgr/cam_req_mgr_core.h
  32. 10 8
      drivers/cam_req_mgr/cam_req_mgr_util.c
  33. 1 1
      drivers/cam_req_mgr/cam_req_mgr_workq.c
  34. 1 0
      drivers/cam_sensor_module/cam_actuator/cam_actuator_core.c
  35. 1 31
      drivers/cam_sensor_module/cam_csiphy/include/cam_csiphy_1_2_3_hwreg.h
  36. 1 19
      drivers/cam_sensor_module/cam_csiphy/include/cam_csiphy_1_2_5_hwreg.h
  37. 1 1
      drivers/cam_sensor_module/cam_flash/cam_flash_core.c
  38. 1 1
      drivers/cam_smmu/cam_smmu_api.c

+ 18 - 1
drivers/cam_cdm/cam_cdm_hw_core.c

@@ -354,7 +354,24 @@ void cam_hw_cdm_dump_core_debug_registers(struct cam_hw_info *cdm_hw,
 			cam_cdm_read_hw_reg(cdm_hw,
 				inv_cmd_log->data_regs->icl_inv_data,
 				&dump_reg);
-			CAM_INFO(CAM_CDM, "Last Inv cmd: 0x%x", dump_reg);
+			CAM_INFO(CAM_CDM, "First word of Last Inv cmd: 0x%x",
+				dump_reg);
+
+			cam_cdm_read_hw_reg(cdm_hw,
+				inv_cmd_log->data_regs->icl_last_data_0,
+				&dump_reg);
+			CAM_INFO(CAM_CDM, "First word of Last Good cmd: 0x%x",
+				dump_reg);
+			cam_cdm_read_hw_reg(cdm_hw,
+				inv_cmd_log->data_regs->icl_last_data_1,
+				&dump_reg);
+			CAM_INFO(CAM_CDM, "Second word of Last Good cmd: 0x%x",
+				dump_reg);
+			cam_cdm_read_hw_reg(cdm_hw,
+				inv_cmd_log->data_regs->icl_last_data_2,
+				&dump_reg);
+			CAM_INFO(CAM_CDM, "Third word of Last Good cmd: 0x%x",
+				dump_reg);
 		}
 	}
 

+ 2 - 2
drivers/cam_core/cam_context_utils.c

@@ -535,8 +535,8 @@ int32_t cam_context_acquire_dev_to_hw(struct cam_context *ctx,
 		cmd->resource_hdl);
 
 	if (cmd->num_resources > CAM_CTX_RES_MAX) {
-		CAM_ERR(CAM_CTXT, "[%s][%d] resource limit exceeded",
-			ctx->dev_name, ctx->ctx_id);
+		CAM_ERR(CAM_CTXT, "[%s][%d] resource[%d] limit exceeded",
+			ctx->dev_name, ctx->ctx_id, cmd->num_resources);
 		rc = -ENOMEM;
 		goto end;
 	}

+ 11 - 8
drivers/cam_core/cam_hw_mgr_intf.h

@@ -243,14 +243,16 @@ struct cam_hw_stream_setttings {
 /**
  * struct cam_hw_config_args - Payload for config command
  *
- * @ctxt_to_hw_map:        HW context from the acquire
- * @num_hw_update_entries: Number of hardware update entries
- * @hw_update_entries:     Hardware update list
- * @out_map_entries:       Out map info
- * @num_out_map_entries:   Number of out map entries
- * @priv:                  Private pointer
- * @request_id:            Request ID
- * @reapply                True if reapplying after bubble
+ * @ctxt_to_hw_map:            HW context from the acquire
+ * @num_hw_update_entries:     Number of hardware update entries
+ * @hw_update_entries:         Hardware update list
+ * @out_map_entries:           Out map info
+ * @num_out_map_entries:       Number of out map entries
+ * @priv:                      Private pointer
+ * @request_id:                Request ID
+ * @reapply:                   True if reapplying after bubble
+ * @cdm_reset_before_apply:    True is need to reset CDM before re-apply bubble
+ *                             request
  *
  */
 struct cam_hw_config_args {
@@ -263,6 +265,7 @@ struct cam_hw_config_args {
 	uint64_t                        request_id;
 	bool                            init_packet;
 	bool                            reapply;
+	bool                            cdm_reset_before_apply;
 };
 
 /**

+ 18 - 9
drivers/cam_core/cam_node.c

@@ -346,31 +346,36 @@ static int __cam_node_handle_flush_dev(struct cam_node *node,
 		return -EINVAL;
 
 	if (flush->dev_handle <= 0) {
-		CAM_ERR(CAM_CORE, "Invalid device handle for context");
+		CAM_ERR_RATE_LIMIT(CAM_CORE,
+			"Invalid device handle for context");
 		return -EINVAL;
 	}
 
 	if (flush->session_handle <= 0) {
-		CAM_ERR(CAM_CORE, "Invalid session handle for context");
+		CAM_ERR_RATE_LIMIT(CAM_CORE,
+			"Invalid session handle for context");
 		return -EINVAL;
 	}
 
 	ctx = (struct cam_context *)cam_get_device_priv(flush->dev_handle);
 	if (!ctx) {
-		CAM_ERR(CAM_CORE, "Can not get context for handle %d",
+		CAM_ERR_RATE_LIMIT(CAM_CORE,
+			"Can not get context for handle %d",
 			flush->dev_handle);
 		return -EINVAL;
 	}
 
 	if (strcmp(node->name, ctx->dev_name)) {
-		CAM_ERR(CAM_CORE, "node name %s dev name:%s not matching",
+		CAM_ERR_RATE_LIMIT(CAM_CORE,
+			"node name %s dev name:%s not matching",
 			node->name, ctx->dev_name);
 		return -EINVAL;
 	}
 
 	rc = cam_context_handle_flush_dev(ctx, flush);
 	if (rc)
-		CAM_ERR(CAM_CORE, "Flush failure for node %s", node->name);
+		CAM_ERR_RATE_LIMIT(CAM_CORE,
+			"Flush failure for node %s", node->name);
 
 	return rc;
 }
@@ -446,25 +451,29 @@ static int __cam_node_handle_dump_dev(struct cam_node *node,
 		return -EINVAL;
 
 	if (dump->dev_handle <= 0) {
-		CAM_ERR(CAM_CORE, "Invalid device handle for context");
+		CAM_ERR_RATE_LIMIT(CAM_CORE,
+			"Invalid device handle for context");
 		return -EINVAL;
 	}
 
 	if (dump->session_handle <= 0) {
-		CAM_ERR(CAM_CORE, "Invalid session handle for context");
+		CAM_ERR_RATE_LIMIT(CAM_CORE,
+			"Invalid session handle for context");
 		return -EINVAL;
 	}
 
 	ctx = (struct cam_context *)cam_get_device_priv(dump->dev_handle);
 	if (!ctx) {
-		CAM_ERR(CAM_CORE, "Can not get context for handle %d",
+		CAM_ERR_RATE_LIMIT(CAM_CORE,
+			"Can not get context for handle %d",
 			dump->dev_handle);
 		return -EINVAL;
 	}
 
 	rc = cam_context_handle_dump_dev(ctx, dump);
 	if (rc)
-		CAM_ERR(CAM_CORE, "Dump failure for node %s", node->name);
+		CAM_ERR_RATE_LIMIT(CAM_CORE,
+			"Dump failure for node %s", node->name);
 
 	return rc;
 }

+ 69 - 5
drivers/cam_cpas/cam_cpas_hw.c

@@ -77,6 +77,30 @@ static void cam_cpas_process_bw_overrides(
 		bus_client->common_data.name, *ab, *ib, curr_ab, curr_ib);
 }
 
+int cam_cpas_util_reg_read(struct cam_hw_info *cpas_hw,
+	enum cam_cpas_reg_base reg_base, struct cam_cpas_reg *reg_info)
+{
+	struct cam_cpas *cpas_core = (struct cam_cpas *) cpas_hw->core_info;
+	struct cam_hw_soc_info *soc_info = &cpas_hw->soc_info;
+	uint32_t value;
+	int reg_base_index;
+
+	if (!reg_info->enable)
+		return 0;
+
+	reg_base_index = cpas_core->regbase_index[reg_base];
+	if (reg_base_index == -1)
+		return -EINVAL;
+
+	value = cam_io_r_mb(
+		soc_info->reg_map[reg_base_index].mem_base + reg_info->offset);
+
+	CAM_INFO(CAM_CPAS, "Base[%d] Offset[0x%08x] Value[0x%08x]",
+		reg_base, reg_info->offset, value);
+
+	return 0;
+}
+
 int cam_cpas_util_reg_update(struct cam_hw_info *cpas_hw,
 	enum cam_cpas_reg_base reg_base, struct cam_cpas_reg *reg_info)
 {
@@ -1089,10 +1113,6 @@ static int cam_cpas_hw_update_axi_vote(struct cam_hw_info *cpas_hw,
 	cam_cpas_dump_axi_vote_info(cpas_core->cpas_client[client_indx],
 		"Translated Vote", axi_vote);
 
-	/* Log an entry whenever there is an AXI update - before updating */
-	cam_cpas_update_monitor_array(cpas_hw, "CPAS AXI pre-update",
-		client_indx);
-
 	rc = cam_cpas_util_apply_client_axi_vote(cpas_hw,
 		cpas_core->cpas_client[client_indx], axi_vote);
 
@@ -1917,6 +1937,11 @@ static int cam_cpas_log_vote(struct cam_hw_info *cpas_hw)
 
 	cam_cpas_dump_monitor_array(cpas_core);
 
+	if (cpas_core->internal_ops.print_poweron_settings)
+		cpas_core->internal_ops.print_poweron_settings(cpas_hw);
+	else
+		CAM_DBG(CAM_CPAS, "No ops for print_poweron_settings");
+
 	return 0;
 }
 
@@ -1930,6 +1955,7 @@ static void cam_cpas_update_monitor_array(struct cam_hw_info *cpas_hw,
 	struct cam_cpas_monitor *entry;
 	int iterator;
 	int i;
+	int reg_camnoc = cpas_core->regbase_index[CAM_CPAS_REG_CAMNOC];
 
 	CAM_CPAS_INC_MONITOR_HEAD(&cpas_core->monitor_head, &iterator);
 
@@ -1985,6 +2011,20 @@ static void cam_cpas_update_monitor_array(struct cam_hw_info *cpas_hw,
 		entry->be_ddr = cam_io_r_mb(rpmh_base + be_ddr_offset);
 		entry->be_mnoc = cam_io_r_mb(rpmh_base + be_mnoc_offset);
 	}
+
+	entry->camnoc_fill_level[0] = cam_io_r_mb(
+		soc_info->reg_map[reg_camnoc].mem_base + 0xA20);
+	entry->camnoc_fill_level[1] = cam_io_r_mb(
+		soc_info->reg_map[reg_camnoc].mem_base + 0x1420);
+	entry->camnoc_fill_level[2] = cam_io_r_mb(
+		soc_info->reg_map[reg_camnoc].mem_base + 0x1A20);
+
+	if (cpas_hw->soc_info.hw_version == CAM_CPAS_TITAN_580_V100) {
+		entry->camnoc_fill_level[3] = cam_io_r_mb(
+			soc_info->reg_map[reg_camnoc].mem_base + 0x7620);
+		entry->camnoc_fill_level[4] = cam_io_r_mb(
+			soc_info->reg_map[reg_camnoc].mem_base + 0x7420);
+	}
 }
 
 static void cam_cpas_dump_monitor_array(
@@ -1995,6 +2035,7 @@ static void cam_cpas_dump_monitor_array(
 	uint32_t index, num_entries, oldest_entry;
 	uint64_t ms, tmp, hrs, min, sec;
 	struct cam_cpas_monitor *entry;
+	struct timespec64 curr_timestamp;
 
 	if (!cpas_core->full_state_dump)
 		return;
@@ -2013,7 +2054,17 @@ static void cam_cpas_dump_monitor_array(
 			CAM_CPAS_MONITOR_MAX_ENTRIES, &oldest_entry);
 	}
 
-	CAM_INFO(CAM_CPAS, "======== Dumping monitor information ===========");
+
+	ktime_get_real_ts64(&curr_timestamp);
+	tmp = curr_timestamp.tv_sec;
+	ms = (curr_timestamp.tv_nsec) / 1000000;
+	sec = do_div(tmp, 60);
+	min = do_div(tmp, 60);
+	hrs = do_div(tmp, 24);
+
+	CAM_INFO(CAM_CPAS,
+		"**** %llu:%llu:%llu.%llu : ======== Dumping monitor information ===========",
+		hrs, min, sec, ms);
 
 	index = oldest_entry;
 
@@ -2048,6 +2099,19 @@ static void cam_cpas_dump_monitor_array(
 				entry->be_ddr, entry->be_mnoc);
 		}
 
+		CAM_INFO(CAM_CPAS,
+			"CAMNOC REG[Queued Pending] linear[%d %d] rdi0_wr[%d %d] ubwc_stats0[%d %d] ubwc_stats1[%d %d] rdi1_wr[%d %d]",
+			(entry->camnoc_fill_level[0] & 0x7FF),
+			(entry->camnoc_fill_level[0] & 0x7F0000) >> 16,
+			(entry->camnoc_fill_level[1] & 0x7FF),
+			(entry->camnoc_fill_level[1] & 0x7F0000) >> 16,
+			(entry->camnoc_fill_level[2] & 0x7FF),
+			(entry->camnoc_fill_level[2] & 0x7F0000) >> 16,
+			(entry->camnoc_fill_level[3] & 0x7FF),
+			(entry->camnoc_fill_level[3] & 0x7F0000) >> 16,
+			(entry->camnoc_fill_level[4] & 0x7FF),
+			(entry->camnoc_fill_level[4] & 0x7F0000) >> 16);
+
 		index = (index + 1) % CAM_CPAS_MONITOR_MAX_ENTRIES;
 	}
 }

+ 7 - 1
drivers/cam_cpas/cam_cpas_hw.h

@@ -45,7 +45,7 @@
 #define CAM_RPMH_BCM_MNOC_INDEX 4
 #define CAM_RPMH_BCM_INFO_MAX   5
 
-#define CAM_CPAS_MONITOR_MAX_ENTRIES   20
+#define CAM_CPAS_MONITOR_MAX_ENTRIES   60
 #define CAM_CPAS_INC_MONITOR_HEAD(head, ret) \
 	div_u64_rem(atomic64_add_return(1, head),\
 	CAM_CPAS_MONITOR_MAX_ENTRIES, (ret))
@@ -69,6 +69,7 @@ enum cam_cpas_access_type {
  * @power_on: Function pointer for hw core specific power on settings
  * @power_off: Function pointer for hw core specific power off settings
  * @setup_qos_settings: Function pointer for hw to select a specific qos header
+ * @print_poweron_settings: Function pointer for hw to print poweron settings
  *
  */
 struct cam_cpas_internal_ops {
@@ -83,6 +84,7 @@ struct cam_cpas_internal_ops {
 	int (*power_off)(struct cam_hw_info *cpas_hw);
 	int (*setup_qos_settings)(struct cam_hw_info *cpas_hw,
 		uint32_t selection_mask);
+	int (*print_poweron_settings)(struct cam_hw_info *cpas_hw);
 };
 
 /**
@@ -212,6 +214,7 @@ struct cam_cpas_axi_port_debug_info {
  *           This indicates requested clock plan
  * @be_mnoc: RPMH MNOC BCM BE (back-end) status register value.
  *           This indicates actual current clock plan
+ * @camnoc_fill_level: Camnoc fill level register info
  */
 struct cam_cpas_monitor {
 	struct timespec64                   timestamp;
@@ -224,6 +227,7 @@ struct cam_cpas_monitor {
 	uint32_t                            be_ddr;
 	uint32_t                            fe_mnoc;
 	uint32_t                            be_mnoc;
+	uint32_t                            camnoc_fill_level[5];
 };
 
 /**
@@ -284,6 +288,8 @@ int cam_cpastop_get_internal_ops(struct cam_cpas_internal_ops *internal_ops);
 
 int cam_cpas_util_reg_update(struct cam_hw_info *cpas_hw,
 	enum cam_cpas_reg_base reg_base, struct cam_cpas_reg *reg_info);
+int cam_cpas_util_reg_read(struct cam_hw_info *cpas_hw,
+	enum cam_cpas_reg_base reg_base, struct cam_cpas_reg *reg_info);
 
 int cam_cpas_util_client_cleanup(struct cam_hw_info *cpas_hw);
 

+ 1 - 0
drivers/cam_cpas/camss_top/cam_camsstop_hw.c

@@ -78,6 +78,7 @@ int cam_camsstop_get_internal_ops(struct cam_cpas_internal_ops *internal_ops)
 	internal_ops->power_on = NULL;
 	internal_ops->power_off = NULL;
 	internal_ops->setup_qos_settings = NULL;
+	internal_ops->print_poweron_settings = NULL;
 
 	return 0;
 }

+ 36 - 0
drivers/cam_cpas/cpas_top/cam_cpastop_hw.c

@@ -683,6 +683,40 @@ done:
 	return IRQ_HANDLED;
 }
 
+static int cam_cpastop_print_poweron_settings(struct cam_hw_info *cpas_hw)
+{
+	int i;
+
+	for (i = 0; i < camnoc_info->specific_size; i++) {
+		if (camnoc_info->specific[i].enable) {
+			CAM_INFO(CAM_CPAS, "Reading QoS settings for %d",
+				camnoc_info->specific[i].port_type);
+			cam_cpas_util_reg_read(cpas_hw, CAM_CPAS_REG_CAMNOC,
+				&camnoc_info->specific[i].priority_lut_low);
+			cam_cpas_util_reg_read(cpas_hw, CAM_CPAS_REG_CAMNOC,
+				&camnoc_info->specific[i].priority_lut_high);
+			cam_cpas_util_reg_read(cpas_hw, CAM_CPAS_REG_CAMNOC,
+				&camnoc_info->specific[i].urgency);
+			cam_cpas_util_reg_read(cpas_hw, CAM_CPAS_REG_CAMNOC,
+				&camnoc_info->specific[i].danger_lut);
+			cam_cpas_util_reg_read(cpas_hw, CAM_CPAS_REG_CAMNOC,
+				&camnoc_info->specific[i].safe_lut);
+			cam_cpas_util_reg_read(cpas_hw, CAM_CPAS_REG_CAMNOC,
+				&camnoc_info->specific[i].ubwc_ctl);
+			cam_cpas_util_reg_read(cpas_hw, CAM_CPAS_REG_CAMNOC,
+				&camnoc_info->specific[i].flag_out_set0_low);
+			cam_cpas_util_reg_read(cpas_hw, CAM_CPAS_REG_CAMNOC,
+				&camnoc_info->specific[i].qosgen_mainctl);
+			cam_cpas_util_reg_read(cpas_hw, CAM_CPAS_REG_CAMNOC,
+				&camnoc_info->specific[i].qosgen_shaping_low);
+			cam_cpas_util_reg_read(cpas_hw, CAM_CPAS_REG_CAMNOC,
+				&camnoc_info->specific[i].qosgen_shaping_high);
+		}
+	}
+
+	return 0;
+}
+
 static int cam_cpastop_poweron(struct cam_hw_info *cpas_hw)
 {
 	int i;
@@ -899,6 +933,8 @@ int cam_cpastop_get_internal_ops(struct cam_cpas_internal_ops *internal_ops)
 	internal_ops->power_on = cam_cpastop_poweron;
 	internal_ops->power_off = cam_cpastop_poweroff;
 	internal_ops->setup_qos_settings = cam_cpastop_setup_qos_settings;
+	internal_ops->print_poweron_settings =
+		cam_cpastop_print_poweron_settings;
 
 	return 0;
 }

+ 2 - 2
drivers/cam_cpas/cpas_top/cpastop_v570_200.h

@@ -802,14 +802,14 @@ static struct cam_camnoc_specific
 			.access_type = CAM_REG_TYPE_READ_WRITE,
 			.masked_value = 0,
 			.offset = 0x2D20, /* JPEG_QOSGEN_SHAPING_LOW */
-			.value = 0x05050505,
+			.value = 0xA0A0A0A,
 		},
 		.qosgen_shaping_high = {
 			.enable = true,
 			.access_type = CAM_REG_TYPE_READ_WRITE,
 			.masked_value = 0,
 			.offset = 0x2D24, /* JPEG_QOSGEN_SHAPING_HIGH */
-			.value = 0x05050505,
+			.value = 0xA0A0A0A,
 		},
 	},
 	{

+ 157 - 25
drivers/cam_cust/cam_custom_context.c

@@ -181,11 +181,28 @@ static int __cam_custom_ctx_frame_done(
 			continue;
 		}
 
-		rc = cam_sync_signal(req_custom->fence_map_out[j].sync_id,
+		if (!req_custom->bubble_detected) {
+			rc = cam_sync_signal(
+				req_custom->fence_map_out[j].sync_id,
 				CAM_SYNC_STATE_SIGNALED_SUCCESS,
 				CAM_SYNC_COMMON_EVENT_SUCCESS);
-		if (rc)
-			CAM_ERR(CAM_CUSTOM, "Sync failed with rc = %d", rc);
+			if (rc)
+				CAM_ERR(CAM_CUSTOM,
+					"Sync failed with rc = %d", rc);
+		} else if (!req_custom->bubble_report) {
+			rc = cam_sync_signal(
+				req_custom->fence_map_out[j].sync_id,
+				CAM_SYNC_STATE_SIGNALED_ERROR,
+				CAM_SYNC_ISP_EVENT_BUBBLE);
+			if (rc)
+				CAM_ERR(CAM_CUSTOM,
+					"Sync failed with rc = %d", rc);
+		} else {
+			req_custom->num_acked++;
+			CAM_DBG(CAM_CUSTOM, "frame done with bubble for %llu",
+				req->request_id);
+			continue;
+		}
 
 		req_custom->num_acked++;
 		req_custom->fence_map_out[j].sync_id = -1;
@@ -203,15 +220,131 @@ static int __cam_custom_ctx_frame_done(
 
 	custom_ctx->active_req_cnt--;
 	frame_done_req_id = req->request_id;
-	list_del_init(&req->list);
-	list_add_tail(&req->list, &ctx->free_req_list);
-	CAM_DBG(CAM_REQ,
-		"Move active request %lld to free list(cnt = %d) [all fences done], ctx %u",
-		frame_done_req_id, custom_ctx->active_req_cnt, ctx->ctx_id);
+	if (req_custom->bubble_detected && req_custom->bubble_report) {
+		req_custom->num_acked = 0;
+		req_custom->bubble_detected = false;
+		list_del_init(&req->list);
+		if (frame_done_req_id <= ctx->last_flush_req) {
+			for (i = 0; i < req_custom->num_fence_map_out; i++)
+				rc = cam_sync_signal(
+					req_custom->fence_map_out[i].sync_id,
+					CAM_SYNC_STATE_SIGNALED_ERROR,
+					CAM_SYNC_ISP_EVENT_BUBBLE);
+
+			list_add_tail(&req->list, &ctx->free_req_list);
+			atomic_set(&custom_ctx->process_bubble, 0);
+			CAM_DBG(CAM_REQ,
+				"Move active request %lld to free list(cnt = %d) [flushed], ctx %u",
+				frame_done_req_id, custom_ctx->active_req_cnt,
+				ctx->ctx_id);
+		} else {
+			list_add(&req->list, &ctx->pending_req_list);
+			atomic_set(&custom_ctx->process_bubble, 0);
+			CAM_DBG(CAM_REQ,
+				"Move active request %lld to pending list in ctx %u",
+				frame_done_req_id, ctx->ctx_id);
+		}
+	} else {
+		list_del_init(&req->list);
+		list_add_tail(&req->list, &ctx->free_req_list);
+		CAM_DBG(CAM_REQ,
+			"Move active request %lld to free list(cnt = %d) [all fences done], ctx %u",
+			frame_done_req_id,
+			custom_ctx->active_req_cnt,
+			ctx->ctx_id);
+	}
 
 	return rc;
 }
 
+static int __cam_custom_ctx_handle_bubble(
+	struct cam_context *ctx, uint64_t req_id)
+{
+	int                              rc = -EINVAL;
+	bool                             found = false;
+	struct cam_ctx_request          *req = NULL;
+	struct cam_ctx_request          *req_temp;
+	struct cam_custom_dev_ctx_req   *req_custom;
+
+	list_for_each_entry_safe(req, req_temp,
+		&ctx->wait_req_list, list) {
+		if (req->request_id == req_id) {
+			req_custom =
+				(struct cam_custom_dev_ctx_req *)req->req_priv;
+			if (!req_custom->bubble_report) {
+				CAM_DBG(CAM_CUSTOM,
+					"Skip bubble recovery for %llu",
+					req_id);
+				goto end;
+			}
+
+			req_custom->bubble_detected = true;
+			found = true;
+			CAM_DBG(CAM_CUSTOM,
+				"Found bubbled req %llu in wait list",
+				req_id);
+		}
+	}
+
+	if (found) {
+		rc = 0;
+		goto end;
+	}
+
+	list_for_each_entry_safe(req, req_temp,
+		&ctx->active_req_list, list) {
+		if (req->request_id == req_id) {
+			req_custom =
+				(struct cam_custom_dev_ctx_req *)req->req_priv;
+			if (!req_custom->bubble_report) {
+				CAM_DBG(CAM_CUSTOM,
+					"Skip bubble recovery for %llu",
+					req_id);
+				goto end;
+			}
+
+			req_custom->bubble_detected = true;
+			found = true;
+			CAM_DBG(CAM_CUSTOM,
+				"Found bubbled req %llu in active list",
+				req_id);
+		}
+	}
+
+	if (found)
+		rc = 0;
+	else
+		CAM_ERR(CAM_CUSTOM,
+			"req %llu not found in wait or active list bubble recovery failed ctx: %u",
+			req_id, ctx->ctx_id);
+
+end:
+	return rc;
+}
+
+static int __cam_custom_ctx_handle_evt(
+	struct cam_context *ctx,
+	struct cam_req_mgr_link_evt_data *evt_data)
+{
+	int rc = -1;
+	struct cam_custom_context *custom_ctx =
+		(struct cam_custom_context *) ctx->ctx_priv;
+
+	if (evt_data->u.error == CRM_KMD_ERR_BUBBLE) {
+		rc = __cam_custom_ctx_handle_bubble(ctx, evt_data->req_id);
+		if (rc)
+			return rc;
+	} else {
+		CAM_WARN(CAM_CUSTOM, "Unsupported error type %d",
+			evt_data->u.error);
+	}
+
+	CAM_DBG(CAM_CUSTOM, "Set bubble flag for req %llu in ctx %u",
+		evt_data->req_id, ctx->ctx_id);
+	atomic_set(&custom_ctx->process_bubble, 1);
+	return 0;
+}
+
 static struct cam_ctx_ops
 	cam_custom_ctx_activated_state_machine
 	[CAM_CUSTOM_CTX_ACTIVATED_MAX] = {
@@ -487,6 +620,7 @@ static int __cam_custom_ctx_flush_req_in_top_state(
 	}
 
 end:
+	atomic_set(&custom_ctx->process_bubble, 0);
 	return rc;
 }
 
@@ -495,6 +629,8 @@ static int __cam_custom_ctx_flush_req_in_ready(
 	struct cam_req_mgr_flush_request *flush_req)
 {
 	int rc = 0;
+	struct cam_custom_context *custom_ctx =
+		(struct cam_custom_context *) ctx->ctx_priv;
 
 	CAM_DBG(CAM_CUSTOM, "try to flush pending list");
 	spin_lock_bh(&ctx->lock);
@@ -505,6 +641,7 @@ static int __cam_custom_ctx_flush_req_in_ready(
 		ctx->state = CAM_CTX_ACQUIRED;
 	spin_unlock_bh(&ctx->lock);
 
+	atomic_set(&custom_ctx->process_bubble, 0);
 	CAM_DBG(CAM_CUSTOM, "Flush request in ready state. next state %d",
 		 ctx->state);
 	return rc;
@@ -751,6 +888,14 @@ static int __cam_custom_ctx_apply_req_in_activated_state(
 	struct cam_custom_context       *custom_ctx = NULL;
 	struct cam_hw_config_args        cfg;
 
+	if (atomic_read(&custom_ctx->process_bubble)) {
+		CAM_WARN(CAM_CUSTOM,
+			"ctx_id:%d Processing bubble cannot apply Request Id %llu",
+			ctx->ctx_id, apply->request_id);
+		rc = -EAGAIN;
+		goto end;
+	}
+
 	if (list_empty(&ctx->pending_req_list)) {
 		CAM_ERR(CAM_CUSTOM, "No available request for Apply id %lld",
 			apply->request_id);
@@ -779,7 +924,7 @@ static int __cam_custom_ctx_apply_req_in_activated_state(
 	}
 
 	req_custom = (struct cam_custom_dev_ctx_req *) req->req_priv;
-
+	req_custom->bubble_report = apply->report_if_bubble;
 	cfg.ctxt_to_hw_map = custom_ctx->hw_ctx;
 	cfg.request_id = req->request_id;
 	cfg.hw_update_entries = req_custom->cfg;
@@ -1287,6 +1432,7 @@ static int __cam_custom_ctx_start_dev_in_ready(struct cam_context *ctx,
 
 	ctx_custom->frame_id = 0;
 	ctx_custom->active_req_cnt = 0;
+	atomic_set(&ctx_custom->process_bubble, 0);
 	ctx_custom->substate_activated =
 		(req_custom->num_fence_map_out) ?
 		CAM_CUSTOM_CTX_ACTIVATED_APPLIED :
@@ -1349,21 +1495,6 @@ static int __cam_custom_ctx_unlink_in_activated(struct cam_context *ctx,
 	return rc;
 }
 
-static int __cam_custom_ctx_process_evt(struct cam_context *ctx,
-	struct cam_req_mgr_link_evt_data *link_evt_data)
-{
-	switch (link_evt_data->evt_type) {
-	case CAM_REQ_MGR_LINK_EVT_ERR:
-		/* Handle error/bubble related issues */
-		break;
-	default:
-		CAM_WARN(CAM_CUSTOM, "Unknown event from CRM");
-		break;
-	}
-
-	return 0;
-}
-
 static int __cam_custom_ctx_handle_irq_in_activated(void *context,
 	uint32_t evt_id, void *evt_data)
 {
@@ -1537,6 +1668,7 @@ static struct cam_ctx_ops
 		},
 		.crm_ops = {
 			.unlink = __cam_custom_ctx_unlink_in_ready,
+			.process_evt = __cam_custom_ctx_handle_evt,
 		},
 		.irq_ops = NULL,
 	},
@@ -1556,7 +1688,7 @@ static struct cam_ctx_ops
 			.notify_frame_skip =
 				__cam_custom_ctx_apply_default_req,
 			.flush_req = __cam_custom_ctx_flush_req_in_top_state,
-			.process_evt = __cam_custom_ctx_process_evt,
+			.process_evt = __cam_custom_ctx_handle_evt,
 		},
 		.irq_ops = __cam_custom_ctx_handle_irq_in_activated,
 		.pagefault_ops = NULL,

+ 7 - 1
drivers/cam_cust/cam_custom_context.h

@@ -1,6 +1,6 @@
 /* SPDX-License-Identifier: GPL-2.0-only
  *
- * Copyright (c) 2019, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2019-2020, The Linux Foundation. All rights reserved.
  */
 
 #ifndef _CAM_CUSTOM_CONTEXT_H_
@@ -65,6 +65,8 @@ struct cam_custom_ctx_irq_ops {
  * @num_acked:             Count to track acked entried for output.
  *                         If count equals the number of fence out, it means
  *                         the request has been completed.
+ * @bubble_report:         If bubble recovery is needed
+ * @bubble_detected:       request has bubbled
  * @hw_update_data:        HW update data for this request
  *
  */
@@ -80,6 +82,8 @@ struct cam_custom_dev_ctx_req {
 						[CAM_CUSTOM_DEV_CTX_RES_MAX];
 	uint32_t                                 num_fence_map_in;
 	uint32_t                                 num_acked;
+	int32_t                                  bubble_report;
+	bool                                     bubble_detected;
 	struct cam_custom_prepare_hw_update_data hw_update_data;
 };
 
@@ -95,6 +99,7 @@ struct cam_custom_dev_ctx_req {
  * @active_req_cnt: Counter for the active request
  * @frame_id: Frame id tracking for the custom context
  * @hw_acquired: Flag to indicate if HW is acquired for this context
+ * @process_bubble: If ctx currently processing bubble
  * @substate_actiavted: Current substate for the activated state.
  * @substate_machine: Custom substate machine for external interface
  * @substate_machine_irq: Custom substate machine for irq handling
@@ -113,6 +118,7 @@ struct cam_custom_context {
 	int64_t                        frame_id;
 	bool                           hw_acquired;
 	uint32_t                       substate_activated;
+	atomic_t                       process_bubble;
 	struct cam_ctx_ops            *substate_machine;
 	struct cam_custom_ctx_irq_ops *substate_machine_irq;
 	struct cam_ctx_request         req_base[CAM_CTX_REQ_MAX];

+ 18 - 0
drivers/cam_icp/icp_hw/a5_hw/a5_core.c

@@ -297,6 +297,7 @@ int cam_a5_init_hw(void *device_priv,
 	struct cam_a5_device_core_info *core_info = NULL;
 	struct a5_soc_info *a5_soc_info;
 	struct cam_icp_cpas_vote cpas_vote;
+	unsigned long flags;
 	int rc = 0;
 
 	if (!device_priv) {
@@ -356,6 +357,10 @@ int cam_a5_init_hw(void *device_priv,
 				ICP_SIERRA_A5_CSR_ACCESS);
 	}
 
+	spin_lock_irqsave(&a5_dev->hw_lock, flags);
+	a5_dev->hw_state = CAM_HW_STATE_POWER_UP;
+	spin_unlock_irqrestore(&a5_dev->hw_lock, flags);
+
 error:
 	return rc;
 }
@@ -366,6 +371,7 @@ int cam_a5_deinit_hw(void *device_priv,
 	struct cam_hw_info *a5_dev = device_priv;
 	struct cam_hw_soc_info *soc_info = NULL;
 	struct cam_a5_device_core_info *core_info = NULL;
+	unsigned long flags;
 	int rc = 0;
 
 	if (!device_priv) {
@@ -381,6 +387,10 @@ int cam_a5_deinit_hw(void *device_priv,
 		return -EINVAL;
 	}
 
+	spin_lock_irqsave(&a5_dev->hw_lock, flags);
+	a5_dev->hw_state = CAM_HW_STATE_POWER_DOWN;
+	spin_unlock_irqrestore(&a5_dev->hw_lock, flags);
+
 	rc = cam_a5_disable_soc_resources(soc_info);
 	if (rc)
 		CAM_ERR(CAM_ICP, "soc disable is failed: %d", rc);
@@ -462,6 +472,14 @@ irqreturn_t cam_a5_irq(int irq_num, void *data)
 		return IRQ_HANDLED;
 	}
 
+	spin_lock(&a5_dev->hw_lock);
+	if (a5_dev->hw_state == CAM_HW_STATE_POWER_DOWN) {
+		CAM_WARN(CAM_ICP, "ICP HW powered off");
+		spin_unlock(&a5_dev->hw_lock);
+		return IRQ_HANDLED;
+	}
+	spin_unlock(&a5_dev->hw_lock);
+
 	soc_info = &a5_dev->soc_info;
 	core_info = (struct cam_a5_device_core_info *)a5_dev->core_info;
 	hw_info = core_info->a5_hw_info;

+ 270 - 18
drivers/cam_isp/cam_isp_context.c

@@ -20,6 +20,7 @@
 #include "cam_isp_context.h"
 #include "cam_common_util.h"
 #include "cam_req_mgr_debug.h"
+#include "cam_cpas_api.h"
 
 static const char isp_dev_name[] = "cam-isp";
 
@@ -726,8 +727,7 @@ static void __cam_isp_ctx_send_sof_timestamp(
 {
 	struct cam_req_mgr_message   req_msg;
 
-	if ((ctx_isp->use_frame_header_ts) && (request_id) &&
-		(sof_event_status == CAM_REQ_MGR_SOF_EVENT_SUCCESS))
+	if ((ctx_isp->use_frame_header_ts) || (request_id == 0))
 		goto end;
 
 	req_msg.session_hdl = ctx_isp->base->session_hdl;
@@ -820,9 +820,12 @@ static int __cam_isp_ctx_handle_buf_done_for_req_list(
 
 	if (req_isp->bubble_detected && req_isp->bubble_report) {
 		req_isp->num_acked = 0;
+		req_isp->num_deferred_acks = 0;
 		req_isp->bubble_detected = false;
 		list_del_init(&req->list);
 		atomic_set(&ctx_isp->process_bubble, 0);
+		req_isp->cdm_reset_before_apply = false;
+		ctx_isp->bubble_frame_cnt = 0;
 
 		if (buf_done_req_id <= ctx->last_flush_req) {
 			for (i = 0; i < req_isp->num_fence_map_out; i++)
@@ -855,6 +858,7 @@ static int __cam_isp_ctx_handle_buf_done_for_req_list(
 		list_del_init(&req->list);
 		list_add_tail(&req->list, &ctx->free_req_list);
 		req_isp->reapply = false;
+		req_isp->cdm_reset_before_apply = false;
 
 		CAM_DBG(CAM_REQ,
 			"Move active request %lld to free list(cnt = %d) [all fences done], ctx %u",
@@ -862,6 +866,8 @@ static int __cam_isp_ctx_handle_buf_done_for_req_list(
 		ctx_isp->req_info.last_bufdone_req_id = req->request_id;
 	}
 
+	cam_cpas_notify_event("IFE BufDone", buf_done_req_id);
+
 	__cam_isp_ctx_update_state_monitor_array(ctx_isp,
 		CAM_ISP_STATE_CHANGE_TRIGGER_DONE, buf_done_req_id);
 
@@ -1017,12 +1023,75 @@ static int __cam_isp_ctx_handle_buf_done_for_request(
 	return rc;
 }
 
+static int __cam_isp_handle_deferred_buf_done(
+	struct cam_isp_context *ctx_isp,
+	struct cam_ctx_request  *req,
+	bool bubble_handling,
+	uint32_t status, uint32_t event_cause)
+{
+	int i, j;
+	int rc = 0;
+	struct cam_isp_ctx_req *req_isp =
+		(struct cam_isp_ctx_req *) req->req_priv;
+	struct cam_context *ctx = ctx_isp->base;
+
+	CAM_DBG(CAM_ISP,
+		"ctx[%d] : Req %llu : Handling %d deferred buf_dones num_acked=%d, bubble_handling=%d",
+		ctx->ctx_id, req->request_id, req_isp->num_deferred_acks,
+		req_isp->num_acked, bubble_handling);
+
+	for (i = 0; i < req_isp->num_deferred_acks; i++) {
+		j = req_isp->deferred_fence_map_index[i];
+
+		CAM_DBG(CAM_ISP,
+			"ctx[%d] : Sync with status=%d, event_cause=%d: req %lld res 0x%x sync_id 0x%x",
+			ctx->ctx_id, status, event_cause,
+			req->request_id,
+			req_isp->fence_map_out[j].resource_handle,
+			req_isp->fence_map_out[j].sync_id);
+
+		if (req_isp->fence_map_out[j].sync_id == -1) {
+			CAM_WARN(CAM_ISP,
+				"ctx[%d Deferred buf_done already signalled, req_id=%llu, j=%d, res=0x%x",
+				ctx->ctx_id, req->request_id, j,
+				req_isp->fence_map_out[j].resource_handle);
+			continue;
+		}
+
+		if (!bubble_handling) {
+			CAM_WARN(CAM_ISP,
+				"ctx[%d] : Req %llu, status=%d res=0x%x should never happen",
+				ctx->ctx_id, req->request_id, status,
+				req_isp->fence_map_out[j].resource_handle);
+
+			rc = cam_sync_signal(req_isp->fence_map_out[j].sync_id,
+				status, event_cause);
+			if (rc) {
+				CAM_ERR(CAM_ISP,
+					"ctx[%d] : Sync signal for Req %llu, sync_id %d status=%d failed with rc = %d",
+					ctx->ctx_id, req->request_id,
+					req_isp->fence_map_out[j].sync_id,
+					status, rc);
+			} else {
+				req_isp->num_acked++;
+				req_isp->fence_map_out[j].sync_id = -1;
+			}
+		} else {
+			req_isp->num_acked++;
+		}
+	}
+
+	req_isp->num_deferred_acks = 0;
+
+	return rc;
+}
 static int __cam_isp_ctx_handle_buf_done_for_request_verify_addr(
 	struct cam_isp_context *ctx_isp,
 	struct cam_ctx_request  *req,
 	struct cam_isp_hw_done_event_data *done,
 	uint32_t bubble_state,
-	bool verify_consumed_addr)
+	bool verify_consumed_addr,
+	bool defer_buf_done)
 {
 	int rc = 0;
 	int i, j;
@@ -1083,7 +1152,32 @@ static int __cam_isp_ctx_handle_buf_done_for_request_verify_addr(
 			continue;
 		}
 
-		if (!req_isp->bubble_detected) {
+		if (defer_buf_done) {
+			uint32_t deferred_indx = req_isp->num_deferred_acks;
+
+			/*
+			 * If we are handling this BUF_DONE event for a request
+			 * that is still in wait_list, do not signal now,
+			 * instead mark it as done and handle it later -
+			 * if this request is going into BUBBLE state later
+			 * it will automatically be re-applied. If this is not
+			 * going into BUBBLE, signal fences later.
+			 * Note - we will come here only if the last consumed
+			 * address matches with this ports buffer.
+			 */
+			req_isp->deferred_fence_map_index[deferred_indx] = j;
+			req_isp->num_deferred_acks++;
+			CAM_WARN(CAM_ISP,
+				"ctx[%d] : Deferred buf done for %llu with bubble state %d recovery %d",
+				ctx->ctx_id, req->request_id, bubble_state,
+				req_isp->bubble_report);
+			CAM_WARN(CAM_ISP,
+				"ctx[%d] : Deferred info : num_acks=%d, fence_map_index=%d, resource_handle=0x%x, sync_id=%d",
+				ctx->ctx_id, req_isp->num_deferred_acks, j,
+				req_isp->fence_map_out[j].resource_handle,
+				req_isp->fence_map_out[j].sync_id);
+			continue;
+		} else if (!req_isp->bubble_detected) {
 			CAM_DBG(CAM_ISP,
 				"Sync with success: req %lld res 0x%x fd 0x%x, ctx %u",
 				req->request_id,
@@ -1094,9 +1188,16 @@ static int __cam_isp_ctx_handle_buf_done_for_request_verify_addr(
 			rc = cam_sync_signal(req_isp->fence_map_out[j].sync_id,
 				CAM_SYNC_STATE_SIGNALED_SUCCESS,
 				CAM_SYNC_COMMON_EVENT_SUCCESS);
-			if (rc)
+			if (rc) {
 				CAM_DBG(CAM_ISP, "Sync failed with rc = %d",
 					 rc);
+			} else if (req_isp->num_deferred_acks) {
+				/* Process deferred buf_done acks */
+				__cam_isp_handle_deferred_buf_done(ctx_isp,
+					req, false,
+					CAM_SYNC_STATE_SIGNALED_SUCCESS,
+					CAM_SYNC_COMMON_EVENT_SUCCESS);
+			}
 		} else if (!req_isp->bubble_report) {
 			CAM_DBG(CAM_ISP,
 				"Sync with failure: req %lld res 0x%x fd 0x%x, ctx %u",
@@ -1108,9 +1209,16 @@ static int __cam_isp_ctx_handle_buf_done_for_request_verify_addr(
 			rc = cam_sync_signal(req_isp->fence_map_out[j].sync_id,
 				CAM_SYNC_STATE_SIGNALED_ERROR,
 				CAM_SYNC_ISP_EVENT_BUBBLE);
-			if (rc)
+			if (rc) {
 				CAM_ERR(CAM_ISP, "Sync failed with rc = %d",
 					rc);
+			} else if (req_isp->num_deferred_acks) {
+				/* Process deferred buf_done acks */
+				__cam_isp_handle_deferred_buf_done(ctx_isp, req,
+					false,
+					CAM_SYNC_STATE_SIGNALED_ERROR,
+					CAM_SYNC_ISP_EVENT_BUBBLE);
+			}
 		} else {
 			/*
 			 * Ignore the buffer done if bubble detect is on
@@ -1122,6 +1230,14 @@ static int __cam_isp_ctx_handle_buf_done_for_request_verify_addr(
 			CAM_DBG(CAM_ISP,
 				"buf done with bubble state %d recovery %d",
 				bubble_state, req_isp->bubble_report);
+				/* Process deferred buf_done acks */
+
+			if (req_isp->num_deferred_acks)
+				__cam_isp_handle_deferred_buf_done(ctx_isp, req,
+					true,
+					CAM_SYNC_STATE_SIGNALED_ERROR,
+					CAM_SYNC_ISP_EVENT_BUBBLE);
+
 			continue;
 		}
 
@@ -1169,7 +1285,7 @@ static int __cam_isp_ctx_handle_buf_done(
 	struct cam_isp_hw_done_event_data done_next_req;
 
 	if (list_empty(&ctx->active_req_list)) {
-		CAM_DBG(CAM_ISP, "Buf done with no active request");
+		CAM_WARN(CAM_ISP, "Buf done with no active request");
 		return 0;
 	}
 
@@ -1269,7 +1385,31 @@ static int __cam_isp_ctx_handle_buf_done_verify_addr(
 	struct cam_context *ctx = ctx_isp->base;
 
 	if (list_empty(&ctx->active_req_list)) {
-		CAM_DBG(CAM_ISP, "Buf done with no active request");
+		CAM_WARN(CAM_ISP,
+			"Buf done with no active request bubble_state=%d",
+			bubble_state);
+
+		if (!list_empty(&ctx->wait_req_list)) {
+			struct cam_isp_ctx_req *req_isp;
+
+			req = list_first_entry(&ctx->wait_req_list,
+				struct cam_ctx_request, list);
+			CAM_WARN(CAM_ISP,
+				"Buf done with no active request but with req in wait list, req %llu",
+				req->request_id);
+			req_isp = (struct cam_isp_ctx_req *) req->req_priv;
+
+			/*
+			 * Verify consumed address for this request to make sure
+			 * we are handling the buf_done for the correct
+			 * buffer. Also defer actual buf_done handling, i.e
+			 * do not signal the fence as this request may go into
+			 * Bubble state eventully.
+			 */
+			rc =
+			__cam_isp_ctx_handle_buf_done_for_request_verify_addr(
+				ctx_isp, req, done, bubble_state, true, true);
+		}
 		return 0;
 	}
 
@@ -1297,7 +1437,7 @@ static int __cam_isp_ctx_handle_buf_done_verify_addr(
 	 */
 	rc = __cam_isp_ctx_handle_buf_done_for_request_verify_addr(
 		ctx_isp, req, done, bubble_state,
-		!irq_delay_detected);
+		!irq_delay_detected, false);
 
 	/*
 	 * Verify the consumed address for next req all the time,
@@ -1307,7 +1447,7 @@ static int __cam_isp_ctx_handle_buf_done_verify_addr(
 	if (!rc && irq_delay_detected)
 		rc = __cam_isp_ctx_handle_buf_done_for_request_verify_addr(
 			ctx_isp, next_req, done,
-			bubble_state, true);
+			bubble_state, true, false);
 
 	return rc;
 }
@@ -1390,6 +1530,7 @@ static int __cam_isp_ctx_apply_req_offline(
 	ctx_isp->substate_activated = CAM_ISP_CTX_ACTIVATED_APPLIED;
 	prev_applied_req = ctx_isp->last_applied_req_id;
 	ctx_isp->last_applied_req_id = req->request_id;
+	atomic_set(&ctx_isp->apply_in_progress, 1);
 
 	list_del_init(&req->list);
 	list_add_tail(&req->list, &ctx->wait_req_list);
@@ -1403,12 +1544,14 @@ static int __cam_isp_ctx_apply_req_offline(
 
 		ctx_isp->substate_activated = CAM_ISP_CTX_ACTIVATED_SOF;
 		ctx_isp->last_applied_req_id = prev_applied_req;
+		atomic_set(&ctx_isp->apply_in_progress, 0);
 
 		list_del_init(&req->list);
 		list_add(&req->list, &ctx->pending_req_list);
 
 		spin_unlock_bh(&ctx->lock);
 	} else {
+		atomic_set(&ctx_isp->apply_in_progress, 0);
 		CAM_DBG(CAM_ISP, "New substate state %d, applied req %lld",
 			CAM_ISP_CTX_ACTIVATED_APPLIED,
 			ctx_isp->last_applied_req_id);
@@ -1551,6 +1694,9 @@ static int __cam_isp_ctx_notify_sof_in_activated_state(
 	struct cam_context *ctx = ctx_isp->base;
 	struct cam_ctx_request  *req;
 	struct cam_isp_ctx_req  *req_isp;
+	struct cam_hw_cmd_args   hw_cmd_args;
+	struct cam_isp_hw_cmd_args  isp_hw_cmd_args;
+	uint64_t last_cdm_done_req = 0;
 	struct cam_isp_hw_epoch_event_data *epoch_done_event_data =
 			(struct cam_isp_hw_epoch_event_data *)evt_data;
 
@@ -1561,6 +1707,82 @@ static int __cam_isp_ctx_notify_sof_in_activated_state(
 
 	ctx_isp->frame_id_meta = epoch_done_event_data->frame_id_meta;
 
+	if (atomic_read(&ctx_isp->process_bubble)) {
+		if (list_empty(&ctx->active_req_list)) {
+			CAM_ERR(CAM_ISP,
+				"No available active req in bubble");
+			atomic_set(&ctx_isp->process_bubble, 0);
+			ctx_isp->bubble_frame_cnt = 0;
+			rc = -EINVAL;
+			return rc;
+		}
+
+		if (ctx_isp->last_sof_timestamp ==
+			ctx_isp->sof_timestamp_val) {
+			CAM_DBG(CAM_ISP,
+				"Tasklet delay detected! Bubble frame check skipped, sof_timestamp: %lld",
+				ctx_isp->sof_timestamp_val);
+			goto notify_only;
+		}
+
+		req = list_first_entry(&ctx->active_req_list,
+			struct cam_ctx_request, list);
+		req_isp = (struct cam_isp_ctx_req *) req->req_priv;
+
+		if (ctx_isp->bubble_frame_cnt >= 1 &&
+			req_isp->bubble_detected) {
+			hw_cmd_args.ctxt_to_hw_map = ctx_isp->hw_ctx;
+			hw_cmd_args.cmd_type = CAM_HW_MGR_CMD_INTERNAL;
+			isp_hw_cmd_args.cmd_type =
+				CAM_ISP_HW_MGR_GET_LAST_CDM_DONE;
+			hw_cmd_args.u.internal_args = (void *)&isp_hw_cmd_args;
+			rc = ctx->hw_mgr_intf->hw_cmd(
+				ctx->hw_mgr_intf->hw_mgr_priv,
+				&hw_cmd_args);
+			if (rc) {
+				CAM_ERR(CAM_ISP, "HW command failed");
+				return rc;
+			}
+
+			last_cdm_done_req = isp_hw_cmd_args.u.last_cdm_done;
+			CAM_DBG(CAM_ISP, "last_cdm_done req: %d",
+				last_cdm_done_req);
+
+			if (last_cdm_done_req >= req->request_id) {
+				CAM_DBG(CAM_ISP,
+					"CDM callback detected for req: %lld, possible buf_done delay, waiting for buf_done",
+					req->request_id);
+				ctx_isp->bubble_frame_cnt = 0;
+			} else {
+				CAM_DBG(CAM_ISP,
+					"CDM callback not happened for req: %lld, possible CDM stuck or workqueue delay",
+					req->request_id);
+				req_isp->num_acked = 0;
+				ctx_isp->bubble_frame_cnt = 0;
+				req_isp->bubble_detected = false;
+				req_isp->cdm_reset_before_apply = true;
+				list_del_init(&req->list);
+				list_add(&req->list, &ctx->pending_req_list);
+				atomic_set(&ctx_isp->process_bubble, 0);
+				ctx_isp->active_req_cnt--;
+				CAM_DBG(CAM_REQ,
+					"Move active req: %lld to pending list(cnt = %d) [bubble re-apply],ctx %u",
+					req->request_id,
+					ctx_isp->active_req_cnt, ctx->ctx_id);
+			}
+		} else if (req_isp->bubble_detected) {
+			ctx_isp->bubble_frame_cnt++;
+			CAM_DBG(CAM_ISP,
+				"Waiting on bufdone for bubble req: %lld, since frame_cnt = %lld",
+				req->request_id,
+				ctx_isp->bubble_frame_cnt);
+		} else {
+			CAM_DBG(CAM_ISP, "Delayed bufdone for req: %lld",
+				req->request_id);
+		}
+	}
+
+notify_only:
 	/*
 	 * notify reqmgr with sof signal. Note, due to scheduling delay
 	 * we can run into situation that two active requests has already
@@ -1611,7 +1833,7 @@ static int __cam_isp_ctx_notify_sof_in_activated_state(
 			ctx->ctx_id);
 		rc = -EFAULT;
 	}
-
+	ctx_isp->last_sof_timestamp = ctx_isp->sof_timestamp_val;
 	return 0;
 }
 
@@ -1631,6 +1853,7 @@ static int __cam_isp_ctx_notify_eof_in_activated_state(
 		notify.dev_hdl = ctx->dev_hdl;
 		notify.frame_id = ctx_isp->frame_id;
 		notify.trigger = CAM_TRIGGER_POINT_EOF;
+		notify.trigger_id = ctx_isp->trigger_id;
 
 		ctx->ctx_crm_intf->notify_trigger(&notify);
 		CAM_DBG(CAM_ISP, "Notify CRM EOF frame %lld ctx %u",
@@ -1776,6 +1999,7 @@ static int __cam_isp_ctx_epoch_in_applied(struct cam_isp_context *ctx_isp,
 	req_isp = (struct cam_isp_ctx_req *)req->req_priv;
 	req_isp->bubble_detected = true;
 	req_isp->reapply = true;
+	req_isp->cdm_reset_before_apply = false;
 
 	CAM_INFO_RATE_LIMIT(CAM_ISP, "ctx:%d Report Bubble flag %d req id:%lld",
 		ctx->ctx_id, req_isp->bubble_report, req->request_id);
@@ -1892,6 +2116,9 @@ static int __cam_isp_ctx_sof_in_epoch(struct cam_isp_context *ctx_isp,
 		return -EINVAL;
 	}
 
+	if (atomic_read(&ctx_isp->apply_in_progress))
+		CAM_INFO(CAM_ISP, "Apply is in progress at the time of SOF");
+
 	ctx_isp->frame_id++;
 	ctx_isp->sof_timestamp_val = sof_event_data->timestamp;
 	ctx_isp->boot_timestamp = sof_event_data->boot_time;
@@ -1990,6 +2217,7 @@ static int __cam_isp_ctx_epoch_in_bubble_applied(
 	CAM_INFO_RATE_LIMIT(CAM_ISP, "Ctx:%d Report Bubble flag %d req id:%lld",
 		ctx->ctx_id, req_isp->bubble_report, req->request_id);
 	req_isp->reapply = true;
+	req_isp->cdm_reset_before_apply = false;
 
 	if (req_isp->bubble_report && ctx->ctx_crm_intf &&
 		ctx->ctx_crm_intf->notify_err) {
@@ -2914,14 +3142,12 @@ static int __cam_isp_ctx_apply_req_in_activated_state(
 	cfg.priv  = &req_isp->hw_update_data;
 	cfg.init_packet = 0;
 	cfg.reapply = req_isp->reapply;
+	cfg.cdm_reset_before_apply = req_isp->cdm_reset_before_apply;
 
-	rc = ctx->hw_mgr_intf->hw_config(ctx->hw_mgr_intf->hw_mgr_priv,
-		&cfg);
-	if (rc) {
-		CAM_ERR_RATE_LIMIT(CAM_ISP,
-			"ctx_id:%d ,Can not apply the configuration",
-			ctx->ctx_id);
-	} else {
+	atomic_set(&ctx_isp->apply_in_progress, 1);
+
+	rc = ctx->hw_mgr_intf->hw_config(ctx->hw_mgr_intf->hw_mgr_priv, &cfg);
+	if (!rc) {
 		spin_lock_bh(&ctx->lock);
 		ctx_isp->substate_activated = next_state;
 		ctx_isp->last_applied_req_id = apply->request_id;
@@ -2937,7 +3163,24 @@ static int __cam_isp_ctx_apply_req_in_activated_state(
 			req->request_id);
 		__cam_isp_ctx_update_event_record(ctx_isp,
 			CAM_ISP_CTX_EVENT_APPLY, req);
+	} else if (rc == -EALREADY) {
+		spin_lock_bh(&ctx->lock);
+		req_isp->bubble_detected = true;
+		req_isp->cdm_reset_before_apply = false;
+		atomic_set(&ctx_isp->process_bubble, 1);
+		list_del_init(&req->list);
+		list_add(&req->list, &ctx->active_req_list);
+		ctx_isp->active_req_cnt++;
+		spin_unlock_bh(&ctx->lock);
+		CAM_DBG(CAM_REQ,
+			"move request %lld to active list(cnt = %d), ctx %u",
+			req->request_id, ctx_isp->active_req_cnt, ctx->ctx_id);
+	} else {
+		CAM_ERR_RATE_LIMIT(CAM_ISP,
+			"ctx_id:%d ,Can not apply (req %lld) the configuration, rc %d",
+			ctx->ctx_id, apply->request_id, rc);
 	}
+	atomic_set(&ctx_isp->apply_in_progress, 0);
 end:
 	return rc;
 }
@@ -3274,6 +3517,7 @@ static int __cam_isp_ctx_flush_req(struct cam_context *ctx,
 			}
 		}
 		req_isp->reapply = false;
+		req_isp->cdm_reset_before_apply = false;
 		list_del_init(&req->list);
 		list_add_tail(&req->list, &ctx->free_req_list);
 	}
@@ -3373,6 +3617,7 @@ static int __cam_isp_ctx_flush_req_in_top_state(
 	}
 
 end:
+	ctx_isp->bubble_frame_cnt = 0;
 	atomic_set(&ctx_isp->process_bubble, 0);
 	atomic_set(&ctx_isp->rxd_epoch, 0);
 	return rc;
@@ -3638,6 +3883,7 @@ static int __cam_isp_ctx_rdi_only_sof_in_bubble_applied(
 	CAM_INFO_RATE_LIMIT(CAM_ISP, "Ctx:%d Report Bubble flag %d req id:%lld",
 		ctx->ctx_id, req_isp->bubble_report, req->request_id);
 	req_isp->reapply = true;
+	req_isp->cdm_reset_before_apply = false;
 
 	if (req_isp->bubble_report && ctx->ctx_crm_intf &&
 		ctx->ctx_crm_intf->notify_err) {
@@ -4250,7 +4496,9 @@ static int __cam_isp_ctx_config_dev_in_top_state(
 	req_isp->num_fence_map_out = cfg.num_out_map_entries;
 	req_isp->num_fence_map_in = cfg.num_in_map_entries;
 	req_isp->num_acked = 0;
+	req_isp->num_deferred_acks = 0;
 	req_isp->bubble_detected = false;
+	req_isp->cdm_reset_before_apply = false;
 	req_isp->hw_update_data.packet = packet;
 
 	for (i = 0; i < req_isp->num_fence_map_out; i++) {
@@ -5008,6 +5256,7 @@ static int __cam_isp_ctx_start_dev_in_ready(struct cam_context *ctx,
 	start_isp.hw_config.priv  = &req_isp->hw_update_data;
 	start_isp.hw_config.init_packet = 1;
 	start_isp.hw_config.reapply = 0;
+	start_isp.hw_config.cdm_reset_before_apply = false;
 
 	ctx_isp->last_applied_req_id = req->request_id;
 
@@ -5021,6 +5270,7 @@ static int __cam_isp_ctx_start_dev_in_ready(struct cam_context *ctx,
 	ctx_isp->frame_id = 0;
 	ctx_isp->active_req_cnt = 0;
 	ctx_isp->reported_req_id = 0;
+	ctx_isp->bubble_frame_cnt = 0;
 	ctx_isp->substate_activated = ctx_isp->rdi_only_context ?
 		CAM_ISP_CTX_ACTIVATED_APPLIED :
 		(req_isp->num_fence_map_out) ? CAM_ISP_CTX_ACTIVATED_EPOCH :
@@ -5200,6 +5450,7 @@ static int __cam_isp_ctx_stop_dev_in_activated_unlock(
 	ctx_isp->reported_req_id = 0;
 	ctx_isp->last_applied_req_id = 0;
 	ctx_isp->req_info.last_bufdone_req_id = 0;
+	ctx_isp->bubble_frame_cnt = 0;
 	atomic_set(&ctx_isp->process_bubble, 0);
 	atomic_set(&ctx_isp->rxd_epoch, 0);
 	atomic64_set(&ctx_isp->state_monitor_head, -1);
@@ -5715,6 +5966,7 @@ int cam_isp_context_init(struct cam_isp_context *ctx,
 	ctx->use_frame_header_ts = false;
 	ctx->active_req_cnt = 0;
 	ctx->reported_req_id = 0;
+	ctx->bubble_frame_cnt = 0;
 	ctx->req_info.last_bufdone_req_id = 0;
 
 	ctx->hw_ctx = NULL;

+ 33 - 15
drivers/cam_isp/cam_isp_context.h

@@ -138,21 +138,30 @@ struct cam_isp_ctx_irq_ops {
 /**
  * struct cam_isp_ctx_req - ISP context request object
  *
- * @base:                  Common request object ponter
- * @cfg:                   ISP hardware configuration array
- * @num_cfg:               Number of ISP hardware configuration entries
- * @fence_map_out:         Output fence mapping array
- * @num_fence_map_out:     Number of the output fence map
- * @fence_map_in:          Input fence mapping array
- * @num_fence_map_in:      Number of input fence map
- * @num_acked:             Count to track acked entried for output.
- *                         If count equals the number of fence out, it means
- *                         the request has been completed.
- * @bubble_report:         Flag to track if bubble report is active on
- *                         current request
- * @hw_update_data:        HW update data for this request
- * @event_timestamp:       Timestamp for different stage of request
- * @reapply:               True if reapplying after bubble
+ * @base:                      Common request object ponter
+ * @cfg:                       ISP hardware configuration array
+ * @num_cfg:                   Number of ISP hardware configuration entries
+ * @fence_map_out:             Output fence mapping array
+ * @num_fence_map_out:         Number of the output fence map
+ * @fence_map_in:              Input fence mapping array
+ * @num_fence_map_in:          Number of input fence map
+ * @num_acked:                 Count to track acked entried for output.
+ *                             If count equals the number of fence out, it means
+ *                             the request has been completed.
+ * @num_deferred_acks:         Number of buf_dones/acks that are deferred to
+ *                             handle or signalled in special scenarios.
+ *                             Increment this count instead of num_acked and
+ *                             handle the events later where eventually
+ *                             increment num_acked.
+ * @deferred_fence_map_index   Saves the indices of fence_map_out for which
+ *                             handling of buf_done is deferred.
+ * @bubble_report:             Flag to track if bubble report is active on
+ *                             current request
+ * @hw_update_data:            HW update data for this request
+ * @event_timestamp:           Timestamp for different stage of request
+ * @reapply:                   True if reapplying after bubble
+ * @cdm_reset_before_apply:    For bubble re-apply when buf done not coming set
+ *                             to True
  *
  */
 struct cam_isp_ctx_req {
@@ -165,12 +174,15 @@ struct cam_isp_ctx_req {
 	struct cam_hw_fence_map_entry         fence_map_in[CAM_ISP_CTX_RES_MAX];
 	uint32_t                              num_fence_map_in;
 	uint32_t                              num_acked;
+	uint32_t                              num_deferred_acks;
+	uint32_t                  deferred_fence_map_index[CAM_ISP_CTX_RES_MAX];
 	int32_t                               bubble_report;
 	struct cam_isp_prepare_hw_update_data hw_update_data;
 	ktime_t                               event_timestamp
 		[CAM_ISP_CTX_EVENT_MAX];
 	bool                                  bubble_detected;
 	bool                                  reapply;
+	bool                                  cdm_reset_before_apply;
 };
 
 /**
@@ -242,6 +254,8 @@ struct cam_isp_context_event_record {
  * @subscribe_event:           The irq event mask that CRM subscribes to, IFE
  *                             will invoke CRM cb at those event.
  * @last_applied_req_id:       Last applied request id
+ * @last_sof_timestamp:        SOF timestamp of the last frame
+ * @bubble_frame_cnt:          Count of the frame after bubble
  * @state_monitor_head:        Write index to the state monitoring array
  * @req_info                   Request id information about last buf done
  * @cam_isp_ctx_state_monitor: State monitoring array
@@ -256,6 +270,7 @@ struct cam_isp_context_event_record {
  * @custom_enabled:            Custom HW enabled for this ctx
  * @use_frame_header_ts:       Use frame header for qtimer ts
  * @support_consumed_addr:     Indicate whether HW has last consumed addr reg
+ * @apply_in_progress          Whether request apply is in progress
  * @init_timestamp:            Timestamp at which this context is initialized
  * @isp_device_type:           ISP device type
  * @rxd_epoch:                 Indicate whether epoch has been received. Used to
@@ -284,6 +299,8 @@ struct cam_isp_context {
 	int64_t                          reported_req_id;
 	uint32_t                         subscribe_event;
 	int64_t                          last_applied_req_id;
+	uint64_t                         last_sof_timestamp;
+	uint32_t                         bubble_frame_cnt;
 	atomic64_t                       state_monitor_head;
 	struct cam_isp_context_state_monitor cam_isp_ctx_state_monitor[
 		CAM_ISP_CTX_STATE_MONITOR_MAX_ENTRIES];
@@ -300,6 +317,7 @@ struct cam_isp_context {
 	bool                                  custom_enabled;
 	bool                                  use_frame_header_ts;
 	bool                                  support_consumed_addr;
+	atomic_t                              apply_in_progress;
 	unsigned int                          init_timestamp;
 	uint32_t                              isp_device_type;
 	atomic_t                              rxd_epoch;

+ 86 - 40
drivers/cam_isp/isp_hw_mgr/cam_ife_hw_mgr.c

@@ -1840,9 +1840,9 @@ static int cam_ife_mgr_acquire_cid_res(
 		}
 	}
 
-	/* Acquire Left if not already acquired */
-	/* For dual IFE cases, start acquiring the lower idx first */
-	if (ife_ctx->is_fe_enabled || in_port->usage_type ||
+	/* Acquire right if not already acquired */
+	/* For dual IFE cases, master will be lower idx */
+	if (ife_ctx->is_fe_enabled ||
 		ife_ctx->dsp_enabled)
 		rc = cam_ife_hw_mgr_acquire_csid_hw(ife_hw_mgr,
 			&csid_acquire, true);
@@ -1854,10 +1854,15 @@ static int cam_ife_mgr_acquire_cid_res(
 		CAM_ERR(CAM_ISP, "No %d paths available", path_res_id);
 		goto put_res;
 	}
-	cid_res_temp->hw_res[acquired_cnt++] = csid_acquire.node_res;
+
+	if (in_port->usage_type)
+		cid_res_temp->hw_res[++acquired_cnt] = csid_acquire.node_res;
+	else
+		cid_res_temp->hw_res[acquired_cnt++] = csid_acquire.node_res;
 
 acquire_successful:
-	CAM_DBG(CAM_ISP, "CID left acquired success is_dual %d",
+	CAM_DBG(CAM_ISP, "CID %s acquired success is_dual %d",
+		(in_port->usage_type ? "Right" : " Left"),
 		in_port->usage_type);
 
 	cid_res_temp->res_type = CAM_ISP_RESOURCE_CID;
@@ -1865,16 +1870,13 @@ acquire_successful:
 	cid_res_temp->res_id = csid_acquire.node_res->res_id;
 	cid_res_temp->is_dual_isp = in_port->usage_type;
 	ife_ctx->is_dual = (bool)in_port->usage_type;
-	if (ife_ctx->is_dual)
-		ife_ctx->master_hw_idx =
-			cid_res_temp->hw_res[0]->hw_intf->hw_idx;
 	if (in_port->num_out_res)
 		cid_res_temp->is_secure = out_port->secure_mode;
 
 	cam_ife_hw_mgr_put_res(&ife_ctx->res_list_ife_cid, cid_res);
 
 	/*
-	 * Acquire Right if not already acquired.
+	 * Acquire left if not already acquired.
 	 * Dual IFE for RDI and PPP is not currently supported.
 	 */
 	if (cid_res_temp->is_dual_isp && path_res_id
@@ -1891,11 +1893,11 @@ acquire_successful:
 				csid_acquire.phy_sel = CAM_ISP_IFE_IN_RES_PHY_1;
 		}
 
-		for (j = 0; j < CAM_IFE_CSID_HW_NUM_MAX; j++) {
+		for (j = CAM_IFE_CSID_HW_NUM_MAX - 1; j >= 0; j--) {
 			if (!ife_hw_mgr->csid_devices[j])
 				continue;
 
-			if (j == cid_res_temp->hw_res[0]->hw_intf->hw_idx)
+			if (j == cid_res_temp->hw_res[1]->hw_intf->hw_idx)
 				continue;
 
 			hw_intf = ife_hw_mgr->csid_devices[j];
@@ -1907,16 +1909,20 @@ acquire_successful:
 				break;
 		}
 
-		if (j == CAM_IFE_CSID_HW_NUM_MAX) {
+		if (j == -1 || !csid_acquire.node_res) {
 			CAM_ERR(CAM_ISP,
-				"Can not acquire ife csid rdi resource");
+				"Can not acquire ife csid dual resource");
 			goto end;
 		}
-		cid_res_temp->hw_res[1] = csid_acquire.node_res;
+		cid_res_temp->hw_res[0] = csid_acquire.node_res;
 		ife_ctx->slave_hw_idx =
 			cid_res_temp->hw_res[1]->hw_intf->hw_idx;
-		CAM_DBG(CAM_ISP, "CID right acquired success is_dual %d",
-			in_port->usage_type);
+		ife_ctx->master_hw_idx =
+			cid_res_temp->hw_res[0]->hw_intf->hw_idx;
+		CAM_DBG(CAM_ISP, "CID left acquired success is_dual %d [master %u: slave %u]",
+			in_port->usage_type,
+			ife_ctx->master_hw_idx,
+			ife_ctx->slave_hw_idx);
 	}
 
 	return 0;
@@ -2831,6 +2837,7 @@ void cam_ife_cam_cdm_callback(uint32_t handle, void *userdata,
 		complete_all(&ctx->config_done_complete);
 		reg_dump_done = atomic_read(&ctx->cdm_done);
 		atomic_set(&ctx->cdm_done, 1);
+		ctx->last_cdm_done_req = cookie;
 		if ((g_ife_hw_mgr.debug_cfg.per_req_reg_dump) &&
 			(!reg_dump_done))
 			cam_ife_mgr_handle_reg_dump(ctx,
@@ -3233,6 +3240,7 @@ static int cam_ife_mgr_acquire_hw(void *hw_mgr_priv, void *acquire_hw_args)
 	if (cdm_acquire.id == CAM_CDM_IFE)
 		ife_ctx->internal_cdm = true;
 	atomic_set(&ife_ctx->cdm_done, 1);
+	ife_ctx->last_cdm_done_req = 0;
 
 	acquire_args->support_consumed_addr =
 		g_ife_hw_mgr.support_consumed_addr;
@@ -3511,6 +3519,7 @@ static int cam_ife_mgr_acquire_dev(void *hw_mgr_priv, void *acquire_hw_args)
 	ife_ctx->cdm_handle = cdm_acquire.handle;
 	ife_ctx->cdm_id = cdm_acquire.id;
 	atomic_set(&ife_ctx->cdm_done, 1);
+	ife_ctx->last_cdm_done_req = 0;
 
 	acquire_args->ctxt_to_hw_map = ife_ctx;
 	ife_ctx->ctx_in_use = 1;
@@ -3847,6 +3856,7 @@ static int cam_ife_mgr_config_hw(void *hw_mgr_priv,
 	struct cam_ife_hw_mgr_ctx *ctx;
 	struct cam_isp_prepare_hw_update_data *hw_update_data;
 	unsigned long rem_jiffies = 0;
+	bool cdm_hang_detect = false;
 
 	if (!hw_mgr_priv || !config_hw_args) {
 		CAM_ERR(CAM_ISP,
@@ -3882,6 +3892,31 @@ static int cam_ife_mgr_config_hw(void *hw_mgr_priv,
 	CAM_DBG(CAM_ISP, "Ctx[%pK][%d] : Applying Req %lld, init_packet=%d",
 		ctx, ctx->ctx_index, cfg->request_id, cfg->init_packet);
 
+	if (cfg->reapply && cfg->cdm_reset_before_apply) {
+		if (ctx->last_cdm_done_req < cfg->request_id) {
+			cdm_hang_detect =
+				cam_cdm_detect_hang_error(ctx->cdm_handle);
+			CAM_ERR_RATE_LIMIT(CAM_ISP,
+				"CDM callback not received for req: %lld, last_cdm_done_req: %lld, cdm_hang_detect: %d",
+				cfg->request_id, ctx->last_cdm_done_req,
+				cdm_hang_detect);
+			rc = cam_cdm_reset_hw(ctx->cdm_handle);
+			if (rc) {
+				CAM_ERR_RATE_LIMIT(CAM_ISP,
+					"CDM reset unsuccessful for req: %lld. ctx: %d, rc: %d",
+					cfg->request_id, ctx->ctx_index, rc);
+				ctx->last_cdm_done_req = 0;
+				return rc;
+			}
+		} else {
+			CAM_ERR_RATE_LIMIT(CAM_ISP,
+				"CDM callback received, should wait for buf done for req: %lld",
+				cfg->request_id);
+			return -EALREADY;
+		}
+		ctx->last_cdm_done_req = 0;
+	}
+
 	for (i = 0; i < CAM_IFE_HW_NUM_MAX; i++) {
 		if (hw_update_data->bw_config_valid[i] == true) {
 
@@ -3960,7 +3995,8 @@ static int cam_ife_mgr_config_hw(void *hw_mgr_priv,
 			return rc;
 		}
 
-		if (cfg->init_packet) {
+		if (cfg->init_packet ||
+			(ctx->custom_config & CAM_IFE_CUSTOM_CFG_SW_SYNC_ON)) {
 			rem_jiffies = wait_for_completion_timeout(
 				&ctx->config_done_complete,
 				msecs_to_jiffies(60));
@@ -4779,6 +4815,7 @@ static int cam_ife_mgr_release_hw(void *hw_mgr_priv,
 	ctx->is_fe_enabled = false;
 	ctx->is_offline = false;
 	ctx->pf_mid_found = false;
+	ctx->last_cdm_done_req = 0;
 	atomic_set(&ctx->overflow_pending, 0);
 	for (i = 0; i < CAM_IFE_HW_NUM_MAX; i++) {
 		ctx->sof_cnt[i] = 0;
@@ -5962,7 +5999,8 @@ static int cam_isp_packet_generic_blob_handler(void *user_data,
 
 		if (!prepare || !prepare->priv ||
 			(bw_config->usage_type >= CAM_IFE_HW_NUM_MAX)) {
-			CAM_ERR(CAM_ISP, "Invalid inputs");
+			CAM_ERR(CAM_ISP, "Invalid inputs usage type %d",
+				bw_config->usage_type);
 			return -EINVAL;
 		}
 
@@ -5987,7 +6025,8 @@ static int cam_isp_packet_generic_blob_handler(void *user_data,
 
 		bw_config = (struct cam_isp_bw_config_v2 *)blob_data;
 
-		if (bw_config->num_paths > CAM_ISP_MAX_PER_PATH_VOTES) {
+		if (bw_config->num_paths > CAM_ISP_MAX_PER_PATH_VOTES ||
+			!bw_config->num_paths) {
 			CAM_ERR(CAM_ISP, "Invalid num paths %d",
 				bw_config->num_paths);
 			return -EINVAL;
@@ -6022,7 +6061,8 @@ static int cam_isp_packet_generic_blob_handler(void *user_data,
 
 		if (!prepare || !prepare->priv ||
 			(bw_config->usage_type >= CAM_IFE_HW_NUM_MAX)) {
-			CAM_ERR(CAM_ISP, "Invalid inputs");
+			CAM_ERR(CAM_ISP, "Invalid inputs usage type %d",
+				bw_config->usage_type);
 			return -EINVAL;
 		}
 
@@ -7004,6 +7044,10 @@ static int cam_ife_mgr_cmd(void *hw_mgr_priv, void *cmd_args)
 				isp_hw_cmd_args->u.packet_op_code =
 				CAM_ISP_PACKET_UPDATE_DEV;
 			break;
+		case CAM_ISP_HW_MGR_GET_LAST_CDM_DONE:
+			isp_hw_cmd_args->u.last_cdm_done =
+				ctx->last_cdm_done_req;
+			break;
 		default:
 			CAM_ERR(CAM_ISP, "Invalid HW mgr command:0x%x",
 				hw_cmd_args->cmd_type);
@@ -7589,19 +7633,21 @@ static int cam_ife_hw_mgr_handle_hw_dump_info(
 		}
 	}
 
-	out_port_id = event_info->res_id & 0xFF;
-	hw_mgr_res =
-		&ife_hw_mgr_ctx->res_list_ife_out[out_port_id];
-	for (i = 0; i < CAM_ISP_HW_SPLIT_MAX; i++) {
-		if (!hw_mgr_res->hw_res[i])
-			continue;
-		hw_intf = hw_mgr_res->hw_res[i]->hw_intf;
-		if (hw_intf->hw_ops.process_cmd) {
-			rc = hw_intf->hw_ops.process_cmd(
-				hw_intf->hw_priv,
-				CAM_ISP_HW_CMD_DUMP_BUS_INFO,
-				(void *)event_info,
-				sizeof(struct cam_isp_hw_event_info));
+	if (event_info->res_type == CAM_ISP_RESOURCE_VFE_OUT) {
+		out_port_id = event_info->res_id & 0xFF;
+		hw_mgr_res =
+			&ife_hw_mgr_ctx->res_list_ife_out[out_port_id];
+		for (i = 0; i < CAM_ISP_HW_SPLIT_MAX; i++) {
+			if (!hw_mgr_res->hw_res[i])
+				continue;
+			hw_intf = hw_mgr_res->hw_res[i]->hw_intf;
+			if (hw_intf->hw_ops.process_cmd) {
+				rc = hw_intf->hw_ops.process_cmd(
+					hw_intf->hw_priv,
+					CAM_ISP_HW_CMD_DUMP_BUS_INFO,
+					(void *)event_info,
+					sizeof(struct cam_isp_hw_event_info));
+			}
 		}
 	}
 
@@ -7718,7 +7764,7 @@ static int cam_ife_hw_mgr_handle_hw_rup(
 		if (atomic_read(&ife_hw_mgr_ctx->overflow_pending))
 			break;
 		ife_hwr_irq_rup_cb(ife_hw_mgr_ctx->common.cb_priv,
-			CAM_ISP_HW_EVENT_REG_UPDATE, &rup_event_data);
+			CAM_ISP_HW_EVENT_REG_UPDATE, (void *)&rup_event_data);
 		break;
 
 	case CAM_ISP_HW_VFE_IN_RDI0:
@@ -7730,7 +7776,7 @@ static int cam_ife_hw_mgr_handle_hw_rup(
 		if (atomic_read(&ife_hw_mgr_ctx->overflow_pending))
 			break;
 		ife_hwr_irq_rup_cb(ife_hw_mgr_ctx->common.cb_priv,
-			CAM_ISP_HW_EVENT_REG_UPDATE, &rup_event_data);
+			CAM_ISP_HW_EVENT_REG_UPDATE, (void *)&rup_event_data);
 		break;
 
 	case CAM_ISP_HW_VFE_IN_PDLIB:
@@ -7768,7 +7814,7 @@ static int cam_ife_hw_mgr_handle_hw_epoch(
 
 		epoch_done_event_data.frame_id_meta = event_info->reg_val;
 		ife_hw_irq_epoch_cb(ife_hw_mgr_ctx->common.cb_priv,
-			CAM_ISP_HW_EVENT_EPOCH, &epoch_done_event_data);
+			CAM_ISP_HW_EVENT_EPOCH, (void *)&epoch_done_event_data);
 
 		break;
 
@@ -7836,7 +7882,7 @@ static int cam_ife_hw_mgr_handle_hw_sof(
 			break;
 
 		ife_hw_irq_sof_cb(ife_hw_mgr_ctx->common.cb_priv,
-			CAM_ISP_HW_EVENT_SOF, &sof_done_event_data);
+			CAM_ISP_HW_EVENT_SOF, (void *)&sof_done_event_data);
 
 		break;
 
@@ -7852,7 +7898,7 @@ static int cam_ife_hw_mgr_handle_hw_sof(
 		if (atomic_read(&ife_hw_mgr_ctx->overflow_pending))
 			break;
 		ife_hw_irq_sof_cb(ife_hw_mgr_ctx->common.cb_priv,
-			CAM_ISP_HW_EVENT_SOF, &sof_done_event_data);
+			CAM_ISP_HW_EVENT_SOF, (void *)&sof_done_event_data);
 		break;
 
 	case CAM_ISP_HW_VFE_IN_PDLIB:
@@ -7889,7 +7935,7 @@ static int cam_ife_hw_mgr_handle_hw_eof(
 			break;
 
 		ife_hw_irq_eof_cb(ife_hw_mgr_ctx->common.cb_priv,
-			CAM_ISP_HW_EVENT_EOF, &eof_done_event_data);
+			CAM_ISP_HW_EVENT_EOF, (void *)&eof_done_event_data);
 
 		break;
 
@@ -7936,7 +7982,7 @@ static int cam_ife_hw_mgr_handle_hw_buf_done(
 	if (buf_done_event_data.num_handles > 0 && ife_hwr_irq_wm_done_cb) {
 		CAM_DBG(CAM_ISP, "Notify ISP context");
 		ife_hwr_irq_wm_done_cb(ife_hw_mgr_ctx->common.cb_priv,
-			CAM_ISP_HW_EVENT_DONE, &buf_done_event_data);
+			CAM_ISP_HW_EVENT_DONE, (void *)&buf_done_event_data);
 	}
 
 	CAM_DBG(CAM_ISP,

+ 2 - 0
drivers/cam_isp/isp_hw_mgr/cam_ife_hw_mgr.h

@@ -85,6 +85,7 @@ struct cam_ife_hw_mgr_debug {
  *                          context
  * @cdm_done                flag to indicate cdm has finished writing shadow
  *                          registers
+ * @last_cdm_done_req:      Last cdm done request
  * @is_rdi_only_context     flag to specify the context has only rdi resource
  * @config_done_complete    indicator for configuration complete
  * @reg_dump_buf_desc:      cmd buffer descriptors for reg dump
@@ -138,6 +139,7 @@ struct cam_ife_hw_mgr_ctx {
 	uint32_t                        eof_cnt[CAM_IFE_HW_NUM_MAX];
 	atomic_t                        overflow_pending;
 	atomic_t                        cdm_done;
+	uint64_t                        last_cdm_done_req;
 	uint32_t                        is_rdi_only_context;
 	struct completion               config_done_complete;
 	uint32_t                        hw_version;

+ 61 - 17
drivers/cam_isp/isp_hw_mgr/cam_tfe_hw_mgr.c

@@ -1774,6 +1774,7 @@ void cam_tfe_cam_cdm_callback(uint32_t handle, void *userdata,
 		(struct cam_tfe_hw_mgr_ctx *)hw_update_data->isp_mgr_ctx;
 		complete_all(&ctx->config_done_complete);
 		atomic_set(&ctx->cdm_done, 1);
+		ctx->last_cdm_done_req = cookie;
 		if (g_tfe_hw_mgr.debug_cfg.per_req_reg_dump)
 			cam_tfe_mgr_handle_reg_dump(ctx,
 				hw_update_data->reg_dump_buf_desc,
@@ -1891,6 +1892,7 @@ static int cam_tfe_mgr_acquire_hw(void *hw_mgr_priv, void *acquire_hw_args)
 	tfe_ctx->cdm_handle = cdm_acquire.handle;
 	tfe_ctx->cdm_ops = cdm_acquire.ops;
 	atomic_set(&tfe_ctx->cdm_done, 1);
+	tfe_ctx->last_cdm_done_req = 0;
 
 	acquire_hw_info = (struct cam_isp_tfe_acquire_hw_info *)
 		acquire_args->acquire_info;
@@ -2127,6 +2129,7 @@ static int cam_tfe_mgr_acquire_dev(void *hw_mgr_priv, void *acquire_hw_args)
 	tfe_ctx->cdm_handle = cdm_acquire.handle;
 	tfe_ctx->cdm_ops = cdm_acquire.ops;
 	atomic_set(&tfe_ctx->cdm_done, 1);
+	tfe_ctx->last_cdm_done_req = 0;
 
 	isp_resource = (struct cam_isp_resource *)acquire_args->acquire_info;
 
@@ -2440,6 +2443,7 @@ static int cam_tfe_mgr_config_hw(void *hw_mgr_priv,
 	struct cam_cdm_bl_request *cdm_cmd;
 	struct cam_tfe_hw_mgr_ctx *ctx;
 	struct cam_isp_prepare_hw_update_data *hw_update_data;
+	bool cdm_hang_detect = false;
 
 	if (!hw_mgr_priv || !config_hw_args) {
 		CAM_ERR(CAM_ISP, "Invalid arguments");
@@ -2463,6 +2467,31 @@ static int cam_tfe_mgr_config_hw(void *hw_mgr_priv,
 	hw_update_data = (struct cam_isp_prepare_hw_update_data  *) cfg->priv;
 	hw_update_data->isp_mgr_ctx = ctx;
 
+	if (cfg->reapply && cfg->cdm_reset_before_apply) {
+		if (ctx->last_cdm_done_req < cfg->request_id) {
+			cdm_hang_detect =
+				cam_cdm_detect_hang_error(ctx->cdm_handle);
+			CAM_ERR_RATE_LIMIT(CAM_ISP,
+				"CDM callback not received for req: %lld, last_cdm_done_req: %lld, cdm_hang_detect: %d",
+				cfg->request_id, ctx->last_cdm_done_req,
+				cdm_hang_detect);
+			rc = cam_cdm_reset_hw(ctx->cdm_handle);
+			if (rc) {
+				CAM_ERR_RATE_LIMIT(CAM_ISP,
+					"CDM reset unsuccessful for req: %lld, ctx: %d, rc: %d",
+					cfg->request_id, ctx->ctx_index, rc);
+				ctx->last_cdm_done_req = 0;
+				return rc;
+			}
+		} else {
+			CAM_ERR_RATE_LIMIT(CAM_ISP,
+				"CDM callback received, should wait for buf done for req: %lld",
+				cfg->request_id);
+			return -EALREADY;
+		}
+		ctx->last_cdm_done_req = 0;
+	}
+
 	for (i = 0; i < CAM_TFE_HW_NUM_MAX; i++) {
 		if (hw_update_data->bw_config_valid[i] == true) {
 
@@ -2981,11 +3010,12 @@ static int cam_tfe_mgr_start_hw(void *hw_mgr_priv, void *start_hw_args)
 	struct cam_isp_stop_args          stop_isp;
 	struct cam_tfe_hw_mgr_ctx        *ctx;
 	struct cam_isp_hw_mgr_res        *hw_mgr_res;
-	struct cam_isp_resource_node     *rsrc_node = NULL;
-	uint32_t                          i, camif_debug;
+	struct cam_hw_intf               *hw_intf;
+	uint32_t                          i;
 	bool                              res_rdi_context_set = false;
 	uint32_t                          primary_rdi_in_res;
 	uint32_t                          primary_rdi_out_res;
+	bool                              hw_id[CAM_TFE_HW_NUM_MAX] = {0};
 
 	primary_rdi_in_res = CAM_ISP_HW_TFE_IN_MAX;
 	primary_rdi_out_res = CAM_ISP_TFE_OUT_RES_MAX;
@@ -3018,31 +3048,40 @@ static int cam_tfe_mgr_start_hw(void *hw_mgr_priv, void *start_hw_args)
 	if (ctx->init_done && start_isp->start_only)
 		goto start_only;
 
-	/* set current csid debug information to CSID HW */
-	for (i = 0; i < CAM_TFE_CSID_HW_NUM_MAX; i++) {
-		if (g_tfe_hw_mgr.csid_devices[i])
-			rc = g_tfe_hw_mgr.csid_devices[i]->hw_ops.process_cmd(
-				g_tfe_hw_mgr.csid_devices[i]->hw_priv,
+	list_for_each_entry(hw_mgr_res, &ctx->res_list_tfe_csid, list) {
+		for (i = 0; i < CAM_ISP_HW_SPLIT_MAX; i++) {
+			if (!hw_mgr_res->hw_res[i])
+				continue;
+
+			hw_intf = hw_mgr_res->hw_res[i]->hw_intf;
+			if (hw_id[hw_intf->hw_idx])
+				continue;
+
+			rc = hw_intf->hw_ops.process_cmd(
+				hw_intf->hw_priv,
 				CAM_TFE_CSID_SET_CSID_DEBUG,
 				&g_tfe_hw_mgr.debug_cfg.csid_debug,
 				sizeof(g_tfe_hw_mgr.debug_cfg.csid_debug));
+			hw_id[hw_intf->hw_idx] = true;
+		}
 	}
 
-	camif_debug = g_tfe_hw_mgr.debug_cfg.camif_debug;
+	memset(&hw_id[0], 0, sizeof(hw_id));
 	list_for_each_entry(hw_mgr_res, &ctx->res_list_tfe_in, list) {
 		for (i = 0; i < CAM_ISP_HW_SPLIT_MAX; i++) {
 			if (!hw_mgr_res->hw_res[i])
 				continue;
 
-			rsrc_node = hw_mgr_res->hw_res[i];
-			if (rsrc_node->process_cmd && (rsrc_node->res_id ==
-				CAM_ISP_HW_TFE_IN_CAMIF)) {
-				rc = hw_mgr_res->hw_res[i]->process_cmd(
-					hw_mgr_res->hw_res[i],
-					CAM_ISP_HW_CMD_SET_CAMIF_DEBUG,
-					&camif_debug,
-					sizeof(camif_debug));
-			}
+			hw_intf = hw_mgr_res->hw_res[i]->hw_intf;
+			if (hw_id[hw_intf->hw_idx])
+				continue;
+
+			rc = hw_intf->hw_ops.process_cmd(
+				hw_intf->hw_priv,
+				CAM_ISP_HW_CMD_SET_CAMIF_DEBUG,
+				&g_tfe_hw_mgr.debug_cfg.camif_debug,
+				sizeof(g_tfe_hw_mgr.debug_cfg.camif_debug));
+			hw_id[hw_intf->hw_idx] = true;
 		}
 	}
 
@@ -3426,6 +3465,7 @@ static int cam_tfe_mgr_release_hw(void *hw_mgr_priv,
 	ctx->is_tpg  = false;
 	ctx->num_reg_dump_buf = 0;
 	ctx->res_list_tpg.res_type = CAM_ISP_RESOURCE_MAX;
+	ctx->last_cdm_done_req = 0;
 	atomic_set(&ctx->overflow_pending, 0);
 
 	for (i = 0; i < ctx->last_submit_bl_cmd.bl_count; i++) {
@@ -4718,6 +4758,10 @@ static int cam_tfe_mgr_cmd(void *hw_mgr_priv, void *cmd_args)
 				isp_hw_cmd_args->u.packet_op_code =
 				CAM_ISP_TFE_PACKET_CONFIG_DEV;
 			break;
+		case CAM_ISP_HW_MGR_GET_LAST_CDM_DONE:
+			isp_hw_cmd_args->u.last_cdm_done =
+				ctx->last_cdm_done_req;
+			break;
 		default:
 			CAM_ERR(CAM_ISP, "Invalid HW mgr command:0x%x",
 				hw_cmd_args->cmd_type);

+ 2 - 0
drivers/cam_isp/isp_hw_mgr/cam_tfe_hw_mgr.h

@@ -70,6 +70,7 @@ struct cam_tfe_hw_mgr_debug {
  *                            context
  * @cdm_done                  flag to indicate cdm has finished writing shadow
  *                            registers
+ * @last_cdm_done_req:        Last CDM done request
  * @is_rdi_only_context       flag to specify the context has only rdi resource
  * @reg_dump_buf_desc:        cmd buffer descriptors for reg dump
  * @num_reg_dump_buf:         count of descriptors in reg_dump_buf_desc
@@ -112,6 +113,7 @@ struct cam_tfe_hw_mgr_ctx {
 
 	atomic_t                        overflow_pending;
 	atomic_t                        cdm_done;
+	uint64_t                        last_cdm_done_req;
 	uint32_t                        is_rdi_only_context;
 	struct cam_cmd_buf_desc         reg_dump_buf_desc[
 						CAM_REG_DUMP_MAX_BUF_ENTRIES];

+ 6 - 1
drivers/cam_isp/isp_hw_mgr/hw_utils/cam_isp_packet_parser.c

@@ -476,7 +476,7 @@ int cam_isp_add_io_buffers(
 	struct cam_isp_hw_get_cmd_update    update_buf;
 	struct cam_isp_hw_get_wm_update     wm_update;
 	struct cam_isp_hw_get_wm_update     bus_rd_update;
-	struct cam_hw_fence_map_entry      *out_map_entries;
+	struct cam_hw_fence_map_entry      *out_map_entries = NULL;
 	struct cam_hw_fence_map_entry      *in_map_entries;
 	struct cam_isp_hw_get_cmd_update    secure_mode;
 	uint32_t                            kmd_buf_remain_size;
@@ -738,6 +738,11 @@ int cam_isp_add_io_buffers(
 			}
 			io_cfg_used_bytes += update_buf.cmd.used_bytes;
 
+			if (!out_map_entries) {
+				CAM_ERR(CAM_ISP, "out_map_entries is NULL");
+				rc = -EINVAL;
+				return rc;
+			}
 
 			image_buf_addr =
 				out_map_entries->image_buf_addr;

+ 8 - 5
drivers/cam_isp/isp_hw_mgr/include/cam_isp_hw_mgr_intf.h

@@ -234,6 +234,7 @@ enum cam_isp_hw_mgr_command {
 	CAM_ISP_HW_MGR_CMD_SOF_DEBUG,
 	CAM_ISP_HW_MGR_CMD_CTX_TYPE,
 	CAM_ISP_HW_MGR_GET_PACKET_OPCODE,
+	CAM_ISP_HW_MGR_GET_LAST_CDM_DONE,
 	CAM_ISP_HW_MGR_CMD_MAX,
 };
 
@@ -247,11 +248,12 @@ enum cam_isp_ctx_type {
 /**
  * struct cam_isp_hw_cmd_args - Payload for hw manager command
  *
- * @cmd_type               HW command type
- * @cmd_data               command data
- * @sof_irq_enable         To debug if SOF irq is enabled
- * @ctx_type               RDI_ONLY, PIX and RDI, or FS2
- * @packet_op_code         packet opcode
+ * @cmd_type:              HW command type
+ * @cmd_data:              Command data
+ * @sof_irq_enable:        To debug if SOF irq is enabled
+ * @ctx_type:              RDI_ONLY, PIX and RDI, or FS2
+ * @packet_op_code:        Packet opcode
+ * @last_cdm_done:         Last cdm done request
  */
 struct cam_isp_hw_cmd_args {
 	uint32_t                          cmd_type;
@@ -260,6 +262,7 @@ struct cam_isp_hw_cmd_args {
 		uint32_t                      sof_irq_enable;
 		uint32_t                      ctx_type;
 		uint32_t                      packet_op_code;
+		uint64_t                      last_cdm_done;
 	} u;
 };
 

+ 110 - 61
drivers/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid_core.c

@@ -1143,6 +1143,7 @@ int cam_ife_csid_path_reserve(struct cam_ife_csid_hw *csid_hw,
 	struct cam_ife_csid_path_cfg    *path_data;
 	struct cam_isp_resource_node    *res;
 	bool                             is_rdi = false;
+	uint32_t                         width = 0;
 
 	/* CSID  CSI2 v2.0 supports 31 vc */
 	if (reserve->sync_mode >= CAM_ISP_HW_SYNC_MAX) {
@@ -1313,10 +1314,13 @@ int cam_ife_csid_path_reserve(struct cam_ife_csid_hw *csid_hw,
 	}
 
 	if (reserve->sync_mode == CAM_ISP_HW_SYNC_MASTER) {
+		width = reserve->in_port->left_stop -
+			reserve->in_port->left_start + 1;
+		if (path_data->horizontal_bin || path_data->qcfa_bin)
+			width /= 2;
 		if ((reserve->res_id == CAM_IFE_PIX_PATH_RES_IPP) &&
 			!(cam_ife_csid_is_resolution_supported(csid_hw,
-			reserve->in_port->left_stop -
-			reserve->in_port->left_start + 1))) {
+			width))) {
 			rc = -EINVAL;
 			goto end;
 		}
@@ -1339,10 +1343,13 @@ int cam_ife_csid_path_reserve(struct cam_ife_csid_hw *csid_hw,
 			csid_hw->hw_intf->hw_idx, reserve->res_id,
 			path_data->start_line, path_data->end_line);
 	} else if (reserve->sync_mode == CAM_ISP_HW_SYNC_SLAVE) {
+		width = reserve->in_port->right_stop -
+			reserve->in_port->right_start + 1;
+		if (path_data->horizontal_bin || path_data->qcfa_bin)
+			width /= 2;
 		if ((reserve->res_id == CAM_IFE_PIX_PATH_RES_IPP) &&
 			!(cam_ife_csid_is_resolution_supported(csid_hw,
-			reserve->in_port->right_stop -
-			reserve->in_port->right_start + 1))) {
+			width))) {
 			rc = -EINVAL;
 			goto end;
 		}
@@ -1362,10 +1369,13 @@ int cam_ife_csid_path_reserve(struct cam_ife_csid_hw *csid_hw,
 			csid_hw->hw_intf->hw_idx, reserve->res_id,
 			path_data->start_line, path_data->end_line);
 	} else {
+		width = reserve->in_port->left_stop -
+			reserve->in_port->left_start + 1;
+		if (path_data->horizontal_bin || path_data->qcfa_bin)
+			width /= 2;
 		if ((reserve->res_id == CAM_IFE_PIX_PATH_RES_IPP) &&
 			!(cam_ife_csid_is_resolution_supported(csid_hw,
-			reserve->in_port->left_stop -
-			reserve->in_port->left_start + 1))) {
+			width))) {
 			rc = -EINVAL;
 			goto end;
 		}
@@ -1910,9 +1920,6 @@ static void cam_ife_csid_halt_csi2(
 		csid_reg->csi2_reg->csid_csi2_rx_cfg0_addr);
 	cam_io_w_mb(0, soc_info->reg_map[0].mem_base +
 		csid_reg->csi2_reg->csid_csi2_rx_cfg1_addr);
-	cam_subdev_notify_message(CAM_CSIPHY_DEVICE_TYPE,
-			CAM_SUBDEV_MESSAGE_IRQ_ERR,
-			csid_hw->csi2_rx_cfg.phy_sel);
 }
 
 static int cam_ife_csid_init_config_pxl_path(
@@ -4199,7 +4206,8 @@ static int cam_ife_csid_sof_irq_debug(
 	if (csid_hw->hw_info->hw_state ==
 		CAM_HW_STATE_POWER_DOWN) {
 		CAM_WARN(CAM_ISP,
-			"CSID powered down unable to %s sof irq",
+			"CSID:%d powered down unable to %s sof irq",
+			 csid_hw->hw_intf->hw_idx,
 			(sof_irq_enable == true) ? "enable" : "disable");
 		return 0;
 	}
@@ -4258,9 +4266,10 @@ static int cam_ife_csid_sof_irq_debug(
 		csid_hw->sof_irq_triggered = false;
 	}
 
-	CAM_INFO(CAM_ISP, "SOF freeze: CSID SOF irq %s, CSID HW:%d",
-		(sof_irq_enable) ? "enabled" : "disabled",
-		csid_hw->hw_intf->hw_idx);
+	if (!in_irq())
+		CAM_INFO(CAM_ISP, "SOF freeze: CSID SOF irq %s, CSID HW:%d",
+			(sof_irq_enable) ? "enabled" : "disabled",
+			csid_hw->hw_intf->hw_idx);
 
 	return 0;
 }
@@ -4601,35 +4610,6 @@ static int cam_csid_put_evt_payload(
 
 	return 0;
 }
-static char *cam_csid_status_to_str(uint32_t status)
-{
-	switch (status) {
-	case CAM_IFE_CSID_IRQ_REG_TOP:
-		return "TOP";
-	case CAM_IFE_CSID_IRQ_REG_RX:
-		return "RX";
-	case CAM_IFE_CSID_IRQ_REG_IPP:
-		return "IPP";
-	case CAM_IFE_CSID_IRQ_REG_PPP:
-		return "PPP";
-	case CAM_IFE_CSID_IRQ_REG_RDI_0:
-		return "RDI0";
-	case CAM_IFE_CSID_IRQ_REG_RDI_1:
-		return "RDI1";
-	case CAM_IFE_CSID_IRQ_REG_RDI_2:
-		return "RDI2";
-	case CAM_IFE_CSID_IRQ_REG_RDI_3:
-		return "RDI3";
-	case CAM_IFE_CSID_IRQ_REG_UDI_0:
-		return "UDI0";
-	case CAM_IFE_CSID_IRQ_REG_UDI_1:
-		return "UDI1";
-	case CAM_IFE_CSID_IRQ_REG_UDI_2:
-		return "UDI2";
-	default:
-		return "Invalid IRQ";
-	}
-}
 
 static int cam_csid_evt_bottom_half_handler(
 	void *handler_priv,
@@ -4640,6 +4620,8 @@ static int cam_csid_evt_bottom_half_handler(
 	int i;
 	int rc = 0;
 	struct cam_isp_hw_event_info event_info;
+	const struct cam_ife_csid_reg_offset    *csid_reg;
+	int udi_start_idx = CAM_IFE_CSID_IRQ_REG_UDI_0;
 
 	if (!handler_priv || !evt_payload_priv) {
 		CAM_ERR(CAM_ISP,
@@ -4650,6 +4632,7 @@ static int cam_csid_evt_bottom_half_handler(
 
 	csid_hw = (struct cam_ife_csid_hw *)handler_priv;
 	evt_payload = (struct cam_csid_evt_payload *)evt_payload_priv;
+	csid_reg = csid_hw->csid_info->csid_reg;
 
 	if (!csid_hw->event_cb || !csid_hw->priv) {
 		CAM_ERR_RATE_LIMIT(CAM_ISP,
@@ -4669,16 +4652,60 @@ static int cam_csid_evt_bottom_half_handler(
 		goto end;
 	}
 
-	CAM_ERR_RATE_LIMIT(CAM_ISP, "idx %d err %d phy %d cnt %d",
-		csid_hw->hw_intf->hw_idx,
-		evt_payload->evt_type,
-		csid_hw->csi2_rx_cfg.phy_sel,
-		csid_hw->csi2_cfg_cnt);
+	if (csid_hw->sof_irq_triggered && (evt_payload->evt_type ==
+		CAM_ISP_HW_ERROR_NONE)) {
+		if (evt_payload->irq_status[CAM_IFE_CSID_IRQ_REG_IPP] &
+			CSID_PATH_INFO_INPUT_SOF) {
+			CAM_INFO_RATE_LIMIT(CAM_ISP,
+				"CSID:%d IPP SOF received",
+				csid_hw->hw_intf->hw_idx);
+		}
 
-	for (i = 0; i < CAM_IFE_CSID_IRQ_REG_MAX; i++)
-		CAM_ERR_RATE_LIMIT(CAM_ISP, "status %s: %x",
-			cam_csid_status_to_str(i),
-			evt_payload->irq_status[i]);
+		if (evt_payload->irq_status[CAM_IFE_CSID_IRQ_REG_PPP] &
+			CSID_PATH_INFO_INPUT_SOF) {
+			CAM_INFO_RATE_LIMIT(CAM_ISP,
+				"CSID:%d PPP SOF received",
+				csid_hw->hw_intf->hw_idx);
+		}
+
+		for (i = 0; i < csid_reg->cmn_reg->num_rdis; i++) {
+			if (evt_payload->irq_status[i] &
+				CSID_PATH_INFO_INPUT_SOF)
+				CAM_INFO_RATE_LIMIT(CAM_ISP,
+					"CSID:%d RDI:%d SOF received",
+					csid_hw->hw_intf->hw_idx, i);
+		}
+
+		for (i = 0; i < csid_reg->cmn_reg->num_udis; i++) {
+			if (evt_payload->irq_status[udi_start_idx + i] &
+				CSID_PATH_INFO_INPUT_SOF)
+				CAM_INFO_RATE_LIMIT(CAM_ISP,
+					"CSID:%d UDI:%d SOF received",
+					csid_hw->hw_intf->hw_idx, i);
+		}
+	} else {
+		CAM_ERR_RATE_LIMIT(CAM_ISP,
+			"CSID %d err %d phy %d irq status TOP: 0x%x RX: 0x%x IPP: 0x%x PPP: 0x%x RDI0: 0x%x RDI1: 0x%x RDI2: 0x%x RDI3: 0x%x UDI0:  0x%x  UDI1:  0x%x  UDI2:  0x%x",
+			csid_hw->hw_intf->hw_idx,
+			evt_payload->evt_type,
+			csid_hw->csi2_rx_cfg.phy_sel,
+			evt_payload->irq_status[CAM_IFE_CSID_IRQ_REG_TOP],
+			evt_payload->irq_status[CAM_IFE_CSID_IRQ_REG_RX],
+			evt_payload->irq_status[CAM_IFE_CSID_IRQ_REG_IPP],
+			evt_payload->irq_status[CAM_IFE_CSID_IRQ_REG_PPP],
+			evt_payload->irq_status[CAM_IFE_CSID_IRQ_REG_RDI_0],
+			evt_payload->irq_status[CAM_IFE_CSID_IRQ_REG_RDI_1],
+			evt_payload->irq_status[CAM_IFE_CSID_IRQ_REG_RDI_2],
+			evt_payload->irq_status[CAM_IFE_CSID_IRQ_REG_RDI_3],
+			evt_payload->irq_status[CAM_IFE_CSID_IRQ_REG_UDI_0],
+			evt_payload->irq_status[CAM_IFE_CSID_IRQ_REG_UDI_1],
+			evt_payload->irq_status[CAM_IFE_CSID_IRQ_REG_UDI_2]);
+	}
+
+	if (evt_payload->evt_type == CAM_ISP_HW_ERROR_CSID_FATAL)
+		cam_subdev_notify_message(CAM_CSIPHY_DEVICE_TYPE,
+				CAM_SUBDEV_MESSAGE_IRQ_ERR,
+				csid_hw->csi2_rx_cfg.phy_sel);
 
 	/* this hunk can be extended to handle more cases
 	 * which we want to offload to bottom half from
@@ -4697,7 +4724,7 @@ static int cam_csid_evt_bottom_half_handler(
 		break;
 
 	default:
-		CAM_DBG(CAM_ISP, "CSID[%d] invalid error type %d",
+		CAM_DBG(CAM_ISP, "CSID[%d] error type %d",
 			csid_hw->hw_intf->hw_idx,
 			evt_payload->evt_type);
 		break;
@@ -4763,7 +4790,7 @@ irqreturn_t cam_ife_csid_irq(int irq_num, void *data)
 	uint32_t irq_status[CAM_IFE_CSID_IRQ_REG_MAX] = {0};
 	uint32_t i, val, val2;
 	bool fatal_err_detected = false;
-	uint32_t sof_irq_debug_en = 0;
+	uint32_t sof_irq_debug_en = 0, log_en = 0;
 	unsigned long flags;
 
 	csid_hw = (struct cam_ife_csid_hw *)data;
@@ -5129,8 +5156,13 @@ handle_fatal_error:
 		if ((irq_status[CAM_IFE_CSID_IRQ_REG_IPP] &
 			CSID_PATH_INFO_INPUT_SOF) &&
 			(csid_hw->csid_debug & CSID_DEBUG_ENABLE_SOF_IRQ)) {
-			CAM_INFO_RATE_LIMIT(CAM_ISP, "CSID:%d IPP SOF received",
-				csid_hw->hw_intf->hw_idx);
+			if (!csid_hw->sof_irq_triggered)
+				CAM_INFO_RATE_LIMIT(CAM_ISP,
+				"CSID:%d IPP SOF received",
+					csid_hw->hw_intf->hw_idx);
+			else
+				log_en = 1;
+
 			if (csid_hw->sof_irq_triggered)
 				csid_hw->irq_debug_cnt++;
 		}
@@ -5201,8 +5233,13 @@ handle_fatal_error:
 		if ((irq_status[CAM_IFE_CSID_IRQ_REG_PPP] &
 			CSID_PATH_INFO_INPUT_SOF) &&
 			(csid_hw->csid_debug & CSID_DEBUG_ENABLE_SOF_IRQ)) {
-			CAM_INFO_RATE_LIMIT(CAM_ISP, "CSID:%d PPP SOF received",
-				csid_hw->hw_intf->hw_idx);
+			if (!csid_hw->sof_irq_triggered)
+				CAM_INFO_RATE_LIMIT(CAM_ISP,
+				"CSID:%d IPP SOF received",
+					csid_hw->hw_intf->hw_idx);
+			else
+				log_en = 1;
+
 			if (csid_hw->sof_irq_triggered)
 				csid_hw->irq_debug_cnt++;
 		}
@@ -5270,9 +5307,13 @@ handle_fatal_error:
 
 		if ((irq_status[i] & CSID_PATH_INFO_INPUT_SOF) &&
 			(csid_hw->csid_debug & CSID_DEBUG_ENABLE_SOF_IRQ)) {
-			CAM_INFO_RATE_LIMIT(CAM_ISP,
+			if (!csid_hw->sof_irq_triggered)
+				CAM_INFO_RATE_LIMIT(CAM_ISP,
 				"CSID:%d RDI:%d SOF received",
-				csid_hw->hw_intf->hw_idx, i);
+					csid_hw->hw_intf->hw_idx, i);
+			else
+				log_en = 1;
+
 			if (csid_hw->sof_irq_triggered)
 				csid_hw->irq_debug_cnt++;
 		}
@@ -5340,9 +5381,13 @@ handle_fatal_error:
 		if ((irq_status[CAM_IFE_CSID_IRQ_REG_UDI_0 + i] &
 			CSID_PATH_INFO_INPUT_SOF) &&
 			(csid_hw->csid_debug & CSID_DEBUG_ENABLE_SOF_IRQ)) {
-			CAM_INFO_RATE_LIMIT(CAM_ISP,
+			if (!csid_hw->sof_irq_triggered)
+				CAM_INFO_RATE_LIMIT(CAM_ISP,
 				"CSID:%d UDI:%d SOF received",
-				csid_hw->hw_intf->hw_idx, i);
+					csid_hw->hw_intf->hw_idx, i);
+			else
+				log_en = 1;
+
 			if (csid_hw->sof_irq_triggered)
 				csid_hw->irq_debug_cnt++;
 		}
@@ -5378,6 +5423,10 @@ handle_fatal_error:
 		}
 	}
 
+	if (log_en)
+		cam_csid_handle_hw_err_irq(csid_hw,
+			CAM_ISP_HW_ERROR_NONE, irq_status);
+
 	if (csid_hw->irq_debug_cnt >= CAM_CSID_IRQ_SOF_DEBUG_CNT_MAX) {
 		cam_ife_csid_sof_irq_debug(csid_hw, &sof_irq_debug_en);
 		csid_hw->irq_debug_cnt = 0;

+ 3 - 2
drivers/cam_isp/isp_hw_mgr/isp_hw/sfe_hw/sfe_bus/cam_sfe_bus_rd.c

@@ -774,7 +774,7 @@ static int cam_sfe_bus_release_bus_rd(void *bus_priv, void *release_args,
 static int cam_sfe_bus_start_bus_rd(
 	void *hw_priv, void *stop_hw_args, uint32_t arg_size)
 {
-	int rc, i;
+	int rc = -ENODEV, i;
 	struct cam_isp_resource_node *sfe_bus_rd = NULL;
 	struct cam_sfe_bus_rd_data *rsrc_data = NULL;
 	struct cam_sfe_bus_rd_common_data *common_data = NULL;
@@ -811,8 +811,9 @@ static int cam_sfe_bus_start_bus_rd(
 		rc = cam_sfe_bus_start_rm(rsrc_data->rm_res[i]);
 
 	/* TO DO Subscribe mask for buf_done */
+	if (!rc)
+		sfe_bus_rd->res_state = CAM_ISP_RESOURCE_STATE_STREAMING;
 
-	sfe_bus_rd->res_state = CAM_ISP_RESOURCE_STATE_STREAMING;
 	return rc;
 }
 

+ 1 - 0
drivers/cam_isp/isp_hw_mgr/isp_hw/sfe_hw/sfe_bus/cam_sfe_bus_wr.c

@@ -937,6 +937,7 @@ static int cam_sfe_bus_acquire_sfe_out(void *priv, void *acquire_args,
 		return -EINVAL;
 	}
 
+	comp_grp_id = CAM_SFE_BUS_WR_COMP_GRP_MAX;
 	out_acquire_args = &acq_args->sfe_out;
 	format = out_acquire_args->out_port_info->format;
 

+ 2 - 2
drivers/cam_isp/isp_hw_mgr/isp_hw/sfe_hw/sfe_top/cam_sfe_top.c

@@ -866,7 +866,7 @@ static int cam_sfe_top_handle_irq_bottom_half(
 {
 	int i;
 	uint32_t irq_status[CAM_SFE_IRQ_REGISTERS_MAX] = {0};
-	enum cam_sfe_hw_irq_status          ret;
+	enum cam_sfe_hw_irq_status          ret = CAM_SFE_IRQ_STATUS_MAX;
 	struct cam_isp_hw_event_info        evt_info;
 	struct cam_isp_resource_node       *res = handler_priv;
 	struct cam_sfe_path_data           *path_data = res->res_priv;
@@ -1126,7 +1126,7 @@ int cam_sfe_top_init(
 	memset(top_priv->last_vote, 0x0, sizeof(struct cam_axi_vote) *
 		CAM_SFE_DELAY_BW_REDUCTION_NUM_FRAMES);
 	memset(&top_priv->core_cfg, 0x0,
-		sizeof(struct cam_sfe_core_config_args));
+		sizeof(struct cam_sfe_core_cfg));
 
 	CAM_DBG(CAM_SFE,
 		"Initializing SFE [%u] top with hw_version: 0x%x",

+ 10 - 0
drivers/cam_isp/isp_hw_mgr/isp_hw/tfe_hw/cam_tfe530.h

@@ -40,6 +40,9 @@ static struct cam_tfe_top_reg_offset_common  tfe530_top_commong_reg  = {
 	.perf_stall_count             = 0x000010EC,
 	.perf_always_count            = 0x000010F0,
 	.perf_count_status            = 0x000010F4,
+	.diag_min_hbi_error_shift     = 15,
+	.diag_neq_hbi_shift           = 14,
+	.diag_sensor_hbi_mask         = 0x3FFF,
 };
 
 static struct cam_tfe_camif_reg  tfe530_camif_reg = {
@@ -131,6 +134,8 @@ static struct cam_tfe_rdi_reg_data tfe530_rdi0_reg_data = {
 		0x00000000,
 	},
 	.enable_diagnostic_hw        = 0x1,
+	.diag_sensor_sel             = 0x1,
+	.diag_sensor_shift           = 0x1,
 };
 
 static struct cam_tfe_rdi_reg  tfe530_rdi1_reg = {
@@ -166,6 +171,8 @@ static struct cam_tfe_rdi_reg_data tfe530_rdi1_reg_data = {
 		0x00000000,
 	},
 	.enable_diagnostic_hw        = 0x1,
+	.diag_sensor_sel             = 0x2,
+	.diag_sensor_shift           = 0x1,
 };
 
 static struct cam_tfe_rdi_reg  tfe530_rdi2_reg = {
@@ -201,6 +208,9 @@ static struct cam_tfe_rdi_reg_data tfe530_rdi2_reg_data = {
 		0x00000000,
 	},
 	.enable_diagnostic_hw        = 0x1,
+	.diag_sensor_sel             = 0x3,
+	.diag_sensor_shift           = 0x1,
+
 };
 
 static struct cam_tfe_clc_hw_status  tfe530_clc_hw_info[CAM_TFE_MAX_CLC] = {

+ 87 - 15
drivers/cam_isp/isp_hw_mgr/isp_hw/tfe_hw/cam_tfe_core.c

@@ -52,6 +52,7 @@ struct cam_tfe_top_priv {
 	struct timeval                    epoch_ts;
 	struct timeval                    eof_ts;
 	struct timeval                    error_ts;
+	uint32_t                          top_debug;
 };
 
 struct cam_tfe_camif_data {
@@ -497,9 +498,11 @@ static int cam_tfe_rdi_irq_bottom_half(
 	bool                                  epoch_process,
 	struct cam_tfe_irq_evt_payload       *evt_payload)
 {
-	struct cam_tfe_rdi_data             *rdi_priv;
-	struct cam_isp_hw_event_info         evt_info;
-	struct cam_hw_info                  *hw_info;
+	struct cam_tfe_rdi_data               *rdi_priv;
+	struct cam_isp_hw_event_info           evt_info;
+	struct cam_hw_info                    *hw_info;
+	struct cam_tfe_top_reg_offset_common  *common_reg;
+	uint32_t                               val, val2;
 
 	rdi_priv = (struct cam_tfe_rdi_data    *)rdi_node->res_priv;
 	hw_info = rdi_node->hw_intf->hw_priv;
@@ -532,6 +535,23 @@ static int cam_tfe_rdi_irq_bottom_half(
 		if (rdi_priv->event_cb)
 			rdi_priv->event_cb(rdi_priv->priv,
 				CAM_ISP_HW_EVENT_SOF, (void *)&evt_info);
+
+		if (top_priv->top_debug &
+			CAMIF_DEBUG_ENABLE_SENSOR_DIAG_STATUS) {
+			common_reg  = rdi_priv->common_reg;
+			val = cam_io_r(rdi_priv->mem_base +
+				common_reg->diag_sensor_status_0);
+			val2 =  cam_io_r(rdi_priv->mem_base +
+				common_reg->diag_sensor_status_1);
+			CAM_INFO(CAM_ISP,
+				"TFE:%d diag sensor hbi min error:%d neq hbi:%d HBI:%d VBI:%d",
+				rdi_node->hw_intf->hw_idx,
+				((val >> common_reg->diag_min_hbi_error_shift)
+					& 0x1),
+				((val >> common_reg->diag_neq_hbi_shift) & 0x1),
+				(val & common_reg->diag_sensor_hbi_mask),
+				val2);
+		}
 	}
 
 	if (epoch_process && (evt_payload->irq_reg_val[1] &
@@ -556,10 +576,11 @@ static int cam_tfe_camif_irq_bottom_half(
 	bool                                  epoch_process,
 	struct cam_tfe_irq_evt_payload       *evt_payload)
 {
-	struct cam_tfe_camif_data            *camif_priv;
-	struct cam_isp_hw_event_info          evt_info;
-	struct cam_hw_info                   *hw_info;
-	uint32_t                              val;
+	struct cam_tfe_camif_data             *camif_priv;
+	struct cam_isp_hw_event_info           evt_info;
+	struct cam_hw_info                    *hw_info;
+	struct cam_tfe_top_reg_offset_common  *common_reg;
+	uint32_t                              val, val2;
 
 	camif_priv = camif_node->res_priv;
 	hw_info = camif_node->hw_intf->hw_priv;
@@ -606,6 +627,23 @@ static int cam_tfe_camif_irq_bottom_half(
 		if (camif_priv->event_cb)
 			camif_priv->event_cb(camif_priv->priv,
 				CAM_ISP_HW_EVENT_SOF, (void *)&evt_info);
+
+		if (top_priv->top_debug &
+			CAMIF_DEBUG_ENABLE_SENSOR_DIAG_STATUS) {
+			common_reg  = camif_priv->common_reg;
+			val = cam_io_r(camif_priv->mem_base +
+				common_reg->diag_sensor_status_0);
+			val2 =  cam_io_r(camif_priv->mem_base +
+				common_reg->diag_sensor_status_1);
+			CAM_INFO(CAM_ISP,
+				"TFE:%d diag sensor hbi min error:%d neq hbi:%d HBI:%d VBI:%d",
+				camif_node->hw_intf->hw_idx,
+				((val >> common_reg->diag_min_hbi_error_shift)
+					& 0x1),
+				((val >> common_reg->diag_neq_hbi_shift) & 0x1),
+				(val & common_reg->diag_sensor_hbi_mask),
+				val2);
+		}
 	}
 
 	if (epoch_process  && (evt_payload->irq_reg_val[1] &
@@ -622,13 +660,6 @@ static int cam_tfe_camif_irq_bottom_half(
 				CAM_ISP_HW_EVENT_EPOCH, (void *)&evt_info);
 	}
 
-	if (camif_priv->camif_debug & CAMIF_DEBUG_ENABLE_SENSOR_DIAG_STATUS) {
-		val = cam_io_r(camif_priv->mem_base +
-			camif_priv->common_reg->diag_sensor_status_0);
-		CAM_DBG(CAM_ISP, "TFE_DIAG_SENSOR_STATUS: 0x%x",
-			camif_priv->mem_base, val);
-	}
-
 	return 0;
 }
 
@@ -1770,6 +1801,29 @@ static int cam_tfe_camif_irq_reg_dump(
 	return rc;
 }
 
+int cam_tfe_set_top_debug(struct cam_tfe_hw_core_info    *core_info,
+	void *cmd_args, uint32_t arg_size)
+{
+	struct cam_tfe_top_priv              *top_priv;
+	uint32_t                             *debug_val;
+
+	if (!cmd_args) {
+		CAM_ERR(CAM_ISP, "Error! Invalid input arguments");
+		return -EINVAL;
+	}
+
+	top_priv = (struct cam_tfe_top_priv  *)core_info->top_priv;
+	debug_val = (uint32_t  *)cmd_args;
+	top_priv->top_debug =  *debug_val;
+
+	CAM_DBG(CAM_ISP, "TFE:%d top debug set:%d",
+		core_info->core_index,
+		top_priv->top_debug);
+
+	return 0;
+}
+
+
 int cam_tfe_top_reserve(void *device_priv,
 	void *reserve_args, uint32_t arg_size)
 {
@@ -1910,6 +1964,7 @@ static int cam_tfe_camif_resource_start(
 {
 	struct cam_tfe_camif_data           *rsrc_data;
 	struct cam_tfe_soc_private          *soc_private;
+	struct cam_tfe_top_priv             *top_priv;
 	uint32_t                             val = 0;
 	uint32_t                             epoch0_irq_mask;
 	uint32_t                             epoch1_irq_mask;
@@ -1929,6 +1984,7 @@ static int cam_tfe_camif_resource_start(
 
 	rsrc_data = (struct cam_tfe_camif_data  *)camif_res->res_priv;
 	soc_private = rsrc_data->soc_info->soc_private;
+	top_priv = (struct cam_tfe_top_priv  *)core_info->top_priv;
 
 	if (!soc_private) {
 		CAM_ERR(CAM_ISP, "TFE:%d Error soc_private NULL",
@@ -2007,7 +2063,7 @@ static int cam_tfe_camif_resource_start(
 	rsrc_data->enable_sof_irq_debug = false;
 	rsrc_data->irq_debug_cnt = 0;
 
-	if (rsrc_data->camif_debug & CAMIF_DEBUG_ENABLE_SENSOR_DIAG_STATUS) {
+	if (top_priv->top_debug & CAMIF_DEBUG_ENABLE_SENSOR_DIAG_STATUS) {
 		val = cam_io_r_mb(rsrc_data->mem_base +
 			rsrc_data->common_reg->diag_config);
 		val |= rsrc_data->reg_data->enable_diagnostic_hw;
@@ -2109,6 +2165,17 @@ int cam_tfe_top_start(struct cam_tfe_hw_core_info *core_info,
 				rsrc_rdi_data->reg_data->subscribe_irq_mask,
 				CAM_TFE_TOP_IRQ_REG_NUM, true);
 
+		if (top_priv->top_debug &
+			CAMIF_DEBUG_ENABLE_SENSOR_DIAG_STATUS) {
+			val = cam_io_r_mb(rsrc_rdi_data->mem_base +
+				rsrc_rdi_data->common_reg->diag_config);
+			val |= ((rsrc_rdi_data->reg_data->enable_diagnostic_hw)|
+				(rsrc_rdi_data->reg_data->diag_sensor_sel <<
+				rsrc_rdi_data->reg_data->diag_sensor_shift));
+			cam_io_w_mb(val, rsrc_rdi_data->mem_base +
+				rsrc_rdi_data->common_reg->diag_config);
+		}
+
 		CAM_DBG(CAM_ISP, "TFE:%d Start RDI %d", core_info->core_index,
 			in_res->res_id - CAM_ISP_HW_TFE_IN_RDI0);
 	}
@@ -2762,6 +2829,7 @@ int cam_tfe_process_cmd(void *hw_priv, uint32_t cmd_type,
 	struct cam_hw_soc_info            *soc_info = NULL;
 	struct cam_tfe_hw_core_info       *core_info = NULL;
 	struct cam_tfe_hw_info            *hw_info = NULL;
+
 	int rc = 0;
 
 	if (!hw_priv) {
@@ -2809,6 +2877,10 @@ int cam_tfe_process_cmd(void *hw_priv, uint32_t cmd_type,
 		rc = cam_tfe_hw_dump(core_info,
 			cmd_args, arg_size);
 		break;
+	case CAM_ISP_HW_CMD_SET_CAMIF_DEBUG:
+		rc = cam_tfe_set_top_debug(core_info, cmd_args,
+			arg_size);
+		break;
 	case CAM_ISP_HW_CMD_GET_BUF_UPDATE:
 	case CAM_ISP_HW_CMD_GET_HFR_UPDATE:
 	case CAM_ISP_HW_CMD_STRIPE_UPDATE:

+ 7 - 0
drivers/cam_isp/isp_hw_mgr/isp_hw/tfe_hw/cam_tfe_core.h

@@ -97,6 +97,11 @@ struct cam_tfe_top_reg_offset_common {
 	uint32_t perf_stall_count;
 	uint32_t perf_always_count;
 	uint32_t perf_count_status;
+
+	/*reg data */
+	uint32_t diag_min_hbi_error_shift;
+	uint32_t diag_neq_hbi_shift;
+	uint32_t diag_sensor_hbi_mask;
 };
 
 struct cam_tfe_camif_reg {
@@ -190,6 +195,8 @@ struct cam_tfe_rdi_reg_data {
 	uint32_t     error_irq_mask2;
 	uint32_t     subscribe_irq_mask[CAM_TFE_TOP_IRQ_REG_NUM];
 	uint32_t     enable_diagnostic_hw;
+	uint32_t     diag_sensor_sel;
+	uint32_t     diag_sensor_shift;
 };
 
 struct cam_tfe_clc_hw_status {

+ 39 - 19
drivers/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_bus/cam_vfe_bus_ver3.c

@@ -630,6 +630,8 @@ static enum cam_vfe_bus_ver3_vfe_out_type
 	case CAM_ISP_IFE_LITE_OUT_RES_GAMMA:
 		return CAM_VFE_BUS_VER3_VFE_OUT_GAMMA;
 	default:
+		CAM_WARN(CAM_ISP, "Invalid isp res id: %d , assigning max",
+			res_type);
 		return CAM_VFE_BUS_VER3_VFE_OUT_MAX;
 	}
 }
@@ -853,17 +855,23 @@ static void cam_vfe_bus_ver3_print_constraint_errors(
 static void cam_vfe_bus_ver3_get_constraint_errors(
 	struct cam_vfe_bus_ver3_priv *bus_priv)
 {
-	uint32_t i, constraint_errors;
-	struct cam_vfe_bus_ver3_wm_resource_data *wm_data;
+	uint32_t i, j, constraint_errors;
+	struct cam_isp_resource_node              *out_rsrc_node = NULL;
+	struct cam_vfe_bus_ver3_vfe_out_data      *out_rsrc_data = NULL;
+	struct cam_vfe_bus_ver3_wm_resource_data  *wm_data   = NULL;
 
-	for (i = 0; i < bus_priv->num_client; i++) {
-		wm_data = bus_priv->bus_client[i].res_priv;
-		if (wm_data) {
-			constraint_errors = cam_io_r_mb(
-				bus_priv->common_data.mem_base +
-				wm_data->hw_regs->debug_status_1);
-			cam_vfe_bus_ver3_print_constraint_errors(i,
-				constraint_errors);
+	for (i = 0; i < bus_priv->num_out; i++) {
+		out_rsrc_node = &bus_priv->vfe_out[i];
+		out_rsrc_data = out_rsrc_node->res_priv;
+		for (j = 0; j < out_rsrc_data->num_wm; j++) {
+			wm_data = out_rsrc_data->wm_res[j].res_priv;
+			if (wm_data) {
+				constraint_errors = cam_io_r_mb(
+					bus_priv->common_data.mem_base +
+					wm_data->hw_regs->debug_status_1);
+				cam_vfe_bus_ver3_print_constraint_errors(j,
+					constraint_errors);
+			}
 		}
 	}
 }
@@ -2505,19 +2513,31 @@ static int cam_vfe_bus_ver3_print_dimensions(
 	struct cam_vfe_bus_ver3_vfe_out_data      *rsrc_data = NULL;
 	struct cam_vfe_bus_ver3_wm_resource_data  *wm_data   = NULL;
 	struct cam_vfe_bus_ver3_common_data  *common_data = NULL;
-	int                                        i, wm_idx;
+	int                                        i;
 	uint32_t addr_status0, addr_status1, addr_status2, addr_status3;
 
+	if (!bus_priv) {
+		CAM_ERR(CAM_ISP, "Invalid bus private data, res_id: %d",
+			vfe_out_res_id);
+		return -EINVAL;
+	}
+
+	if (vfe_out_res_id >= CAM_VFE_BUS_VER3_VFE_OUT_MAX) {
+		CAM_ERR(CAM_ISP, "Invalid out resource for dump: %d",
+			vfe_out_res_id);
+		return -EINVAL;
+	}
+
 	rsrc_node = &bus_priv->vfe_out[vfe_out_res_id];
 	rsrc_data = rsrc_node->res_priv;
+	if (!rsrc_data) {
+		CAM_ERR(CAM_ISP, "VFE out data is null, res_id: %d",
+			vfe_out_res_id);
+		return -EINVAL;
+	}
+
 	for (i = 0; i < rsrc_data->num_wm; i++) {
-		wm_idx = 0;
-		if (wm_idx < 0 || wm_idx >= bus_priv->num_client) {
-			CAM_ERR(CAM_ISP, "Unsupported VFE out %d",
-				vfe_out_res_id);
-			return -EINVAL;
-		}
-		wm_data = bus_priv->bus_client[wm_idx].res_priv;
+		wm_data = rsrc_data->wm_res[i].res_priv;
 		common_data = rsrc_data->common_data;
 		addr_status0 = cam_io_r_mb(common_data->mem_base +
 			wm_data->hw_regs->addr_status_0);
@@ -2530,7 +2550,7 @@ static int cam_vfe_bus_ver3_print_dimensions(
 
 		CAM_INFO(CAM_ISP,
 			"VFE:%d WM:%d width:%u height:%u stride:%u x_init:%u en_cfg:%u acquired width:%u height:%u",
-			wm_data->common_data->core_index, wm_idx,
+			wm_data->common_data->core_index, wm_data->index,
 			wm_data->width,
 			wm_data->height,
 			wm_data->stride, wm_data->h_init,

+ 2 - 0
drivers/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_camif_ver3.c

@@ -1468,6 +1468,8 @@ static int cam_vfe_camif_ver3_handle_irq_bottom_half(void *handler_priv,
 		camif_priv->eof_ts.tv_usec =
 			payload->ts.mono_time.tv_usec;
 
+		cam_cpas_notify_event("IFE EOF", evt_info.hw_idx);
+
 		if (camif_priv->event_cb)
 			camif_priv->event_cb(camif_priv->priv,
 				CAM_ISP_HW_EVENT_EOF, (void *)&evt_info);

+ 39 - 31
drivers/cam_req_mgr/cam_req_mgr_core.c

@@ -54,7 +54,8 @@ void cam_req_mgr_core_link_reset(struct cam_req_mgr_core_link *link)
 	link->prev_sof_timestamp = 0;
 	link->skip_init_frame = false;
 	link->num_sync_links = 0;
-	link->last_applied_jiffies = 0;
+	link->last_sof_trigger_jiffies = 0;
+	link->wq_congestion = false;
 	atomic_set(&link->eof_event_cnt, 0);
 
 	for (pd = 0; pd < CAM_PIPELINE_DELAY_MAX; pd++) {
@@ -214,11 +215,16 @@ static void __cam_req_mgr_find_dev_name(
 		if (dev->dev_info.p_delay == pd) {
 			if (masked_val & (1 << dev->dev_bit))
 				continue;
-
-			CAM_INFO(CAM_CRM,
-				"Skip Frame: req: %lld not ready on link: 0x%x for pd: %d dev: %s open_req count: %d",
-				req_id, link->link_hdl, pd, dev->dev_info.name,
-				link->open_req_cnt);
+			if (link->wq_congestion)
+				CAM_INFO_RATE_LIMIT(CAM_CRM,
+					"WQ congestion, Skip Frame: req: %lld not ready on link: 0x%x for pd: %d dev: %s open_req count: %d",
+					req_id, link->link_hdl, pd,
+					dev->dev_info.name, link->open_req_cnt);
+			else
+				CAM_INFO(CAM_CRM,
+					"Skip Frame: req: %lld not ready on link: 0x%x for pd: %d dev: %s open_req count: %d",
+					req_id, link->link_hdl, pd,
+					dev->dev_info.name, link->open_req_cnt);
 		}
 	}
 }
@@ -1622,12 +1628,11 @@ static int __cam_req_mgr_process_req(struct cam_req_mgr_core_link *link,
 {
 	int                                  rc = 0, idx, i;
 	int                                  reset_step = 0;
-	bool                                 check_retry_cnt = false;
 	uint32_t                             trigger = trigger_data->trigger;
 	struct cam_req_mgr_slot             *slot = NULL;
 	struct cam_req_mgr_req_queue        *in_q;
 	struct cam_req_mgr_core_session     *session;
-	struct cam_req_mgr_connected_device *dev;
+	struct cam_req_mgr_connected_device *dev = NULL;
 	struct cam_req_mgr_core_link        *tmp_link = NULL;
 	uint32_t                             max_retry = 0;
 
@@ -1656,18 +1661,18 @@ static int __cam_req_mgr_process_req(struct cam_req_mgr_core_link *link,
 	if (slot->status == CRM_SLOT_STATUS_NO_REQ) {
 		CAM_DBG(CAM_CRM, "No Pending req");
 		rc = 0;
-		goto error;
+		goto end;
 	}
 
 	if ((trigger != CAM_TRIGGER_POINT_SOF) &&
 		(trigger != CAM_TRIGGER_POINT_EOF))
-		goto error;
+		goto end;
 
 	if ((trigger == CAM_TRIGGER_POINT_EOF) &&
 		(!(link->trigger_mask & CAM_TRIGGER_POINT_SOF))) {
 		CAM_DBG(CAM_CRM, "Applying for last SOF fails");
 		rc = -EINVAL;
-		goto error;
+		goto end;
 	}
 
 	if (trigger == CAM_TRIGGER_POINT_SOF) {
@@ -1678,11 +1683,19 @@ static int __cam_req_mgr_process_req(struct cam_req_mgr_core_link *link,
 		link->prev_sof_timestamp = link->sof_timestamp;
 		link->sof_timestamp = trigger_data->sof_timestamp_val;
 
+		/* Check for WQ congestion */
+		if (jiffies_to_msecs(jiffies -
+			link->last_sof_trigger_jiffies) <
+			MINIMUM_WORKQUEUE_SCHED_TIME_IN_MS)
+			link->wq_congestion = true;
+		else
+			link->wq_congestion = false;
+
 		if (link->trigger_mask) {
 			CAM_ERR_RATE_LIMIT(CAM_CRM,
 				"Applying for last EOF fails");
 			rc = -EINVAL;
-			goto error;
+			goto end;
 		}
 
 		if (slot->sync_mode == CAM_REQ_MGR_SYNC_MODE_SYNC) {
@@ -1733,7 +1746,8 @@ static int __cam_req_mgr_process_req(struct cam_req_mgr_core_link *link,
 			}
 			spin_unlock_bh(&link->link_state_spin_lock);
 			__cam_req_mgr_notify_frame_skip(link, trigger);
-			goto error;
+			__cam_req_mgr_validate_crm_wd_timer(link);
+			goto end;
 		}
 	}
 
@@ -1745,11 +1759,7 @@ static int __cam_req_mgr_process_req(struct cam_req_mgr_core_link *link,
 		if (link->max_delay == 1)
 			max_retry++;
 
-		if (jiffies_to_msecs(jiffies - link->last_applied_jiffies) >
-			MINIMUM_WORKQUEUE_SCHED_TIME_IN_MS)
-			check_retry_cnt = true;
-
-		if ((in_q->last_applied_idx < in_q->rd_idx) && check_retry_cnt) {
+		if (!link->wq_congestion && dev) {
 			link->retry_cnt++;
 			if (link->retry_cnt == max_retry) {
 				CAM_DBG(CAM_CRM,
@@ -1769,7 +1779,7 @@ static int __cam_req_mgr_process_req(struct cam_req_mgr_core_link *link,
 				link->retry_cnt = 0;
 			}
 		} else
-			CAM_WARN(CAM_CRM,
+			CAM_WARN_RATE_LIMIT(CAM_CRM,
 				"workqueue congestion, last applied idx:%d rd idx:%d",
 				in_q->last_applied_idx,
 				in_q->rd_idx);
@@ -1831,20 +1841,14 @@ static int __cam_req_mgr_process_req(struct cam_req_mgr_core_link *link,
 			link->open_req_cnt--;
 		}
 	}
-
+end:
 	/*
-	 * Only update the jiffies of last applied request
-	 * for SOF trigger, since it is used to protect from
+	 * Only update the jiffies for SOF trigger,
+	 * since it is used to protect from
 	 * applying fails in ISP which is triggered at SOF.
-	 * And, also don't need to do update for error case
-	 * since error case doesn't check the retry count.
 	 */
 	if (trigger == CAM_TRIGGER_POINT_SOF)
-		link->last_applied_jiffies = jiffies;
-
-	mutex_unlock(&session->lock);
-	return rc;
-error:
+		link->last_sof_trigger_jiffies = jiffies;
 	mutex_unlock(&session->lock);
 	return rc;
 }
@@ -3321,6 +3325,10 @@ static int cam_req_mgr_cb_notify_trigger(
 		CAM_ERR_RATE_LIMIT(CAM_CRM, "no empty task frame %lld",
 			trigger_data->frame_id);
 		rc = -EBUSY;
+		spin_lock_bh(&link->link_state_spin_lock);
+		if ((link->watchdog) && !(link->watchdog->pause_timer))
+			link->watchdog->pause_timer = true;
+		spin_unlock_bh(&link->link_state_spin_lock);
 		goto end;
 	}
 	task_data = (struct crm_task_payload *)task->payload;
@@ -3618,12 +3626,12 @@ static int __cam_req_mgr_unlink(struct cam_req_mgr_core_link *link)
 
 	mutex_lock(&link->lock);
 
-	/* Destroy workq of link */
-	cam_req_mgr_workq_destroy(&link->workq);
 	spin_lock_bh(&link->link_state_spin_lock);
 	/* Destroy timer of link */
 	crm_timer_exit(&link->watchdog);
 	spin_unlock_bh(&link->link_state_spin_lock);
+	/* Destroy workq of link */
+	cam_req_mgr_workq_destroy(&link->workq);
 
 	/* Cleanup request tables and unlink devices */
 	__cam_req_mgr_destroy_link_info(link);

+ 4 - 2
drivers/cam_req_mgr/cam_req_mgr_core.h

@@ -373,7 +373,8 @@ struct cam_req_mgr_connected_device {
  * @eof_event_cnt        : Atomic variable to track the number of EOF requests
  * @skip_init_frame      : skip initial frames crm_wd_timer validation in the
  *                         case of long exposure use case
- * @last_applied_jiffies : Record the jiffies of last applied req
+ * @last_sof_trigger_jiffies : Record the jiffies of last sof trigger jiffies
+ * @wq_congestion        : Indicates if WQ congestion is detected or not
  */
 struct cam_req_mgr_core_link {
 	int32_t                              link_hdl;
@@ -410,7 +411,8 @@ struct cam_req_mgr_core_link {
 	uint32_t    trigger_cnt[CAM_REQ_MGR_MAX_TRIGGERS];
 	atomic_t                             eof_event_cnt;
 	bool                                 skip_init_frame;
-	uint64_t                             last_applied_jiffies;
+	uint64_t                             last_sof_trigger_jiffies;
+	bool                                 wq_congestion;
 };
 
 /**

+ 10 - 8
drivers/cam_req_mgr/cam_req_mgr_util.c

@@ -223,23 +223,25 @@ void *cam_get_device_priv(int32_t dev_hdl)
 
 	idx = CAM_REQ_MGR_GET_HDL_IDX(dev_hdl);
 	if (idx >= CAM_REQ_MGR_MAX_HANDLES_V2) {
-		CAM_ERR_RATE_LIMIT(CAM_CRM, "Invalid idx");
+		CAM_ERR_RATE_LIMIT(CAM_CRM, "Invalid idx:%d", idx);
 		goto device_priv_fail;
 	}
 
-	if (hdl_tbl->hdl[idx].state != HDL_ACTIVE) {
-		CAM_ERR_RATE_LIMIT(CAM_CRM, "Invalid state");
+	if (hdl_tbl->hdl[idx].hdl_value != dev_hdl) {
+		CAM_ERR_RATE_LIMIT(CAM_CRM, "Invalid hdl [%d] [%d]",
+			dev_hdl, hdl_tbl->hdl[idx].hdl_value);
 		goto device_priv_fail;
 	}
 
-	type = CAM_REQ_MGR_GET_HDL_TYPE(dev_hdl);
-	if (HDL_TYPE_DEV != type && HDL_TYPE_SESSION != type) {
-		CAM_ERR_RATE_LIMIT(CAM_CRM, "Invalid type");
+	if (hdl_tbl->hdl[idx].state != HDL_ACTIVE) {
+		CAM_ERR_RATE_LIMIT(CAM_CRM, "Invalid state:%d",
+			hdl_tbl->hdl[idx].state);
 		goto device_priv_fail;
 	}
 
-	if (hdl_tbl->hdl[idx].hdl_value != dev_hdl) {
-		CAM_ERR_RATE_LIMIT(CAM_CRM, "Invalid hdl");
+	type = CAM_REQ_MGR_GET_HDL_TYPE(dev_hdl);
+	if (HDL_TYPE_DEV != type && HDL_TYPE_SESSION != type) {
+		CAM_ERR_RATE_LIMIT(CAM_CRM, "Invalid type:%d", type);
 		goto device_priv_fail;
 	}
 

+ 1 - 1
drivers/cam_req_mgr/cam_req_mgr_workq.c

@@ -281,7 +281,7 @@ void cam_req_mgr_thread_switch_delay_detect(ktime_t workq_scheduled)
 	cur_ts = ktime_to_timespec64(cur_time);
 
 	if (diff > CAM_WORKQ_RESPONSE_TIME_THRESHOLD) {
-		CAM_WARN(CAM_CRM,
+		CAM_WARN_RATE_LIMIT(CAM_CRM,
 			"Workq delay detected %ld:%06ld %ld:%06ld %ld:",
 			workq_scheduled_ts.tv_sec,
 			workq_scheduled_ts.tv_nsec/NSEC_PER_USEC,

+ 1 - 0
drivers/cam_sensor_module/cam_actuator/cam_actuator_core.c

@@ -801,6 +801,7 @@ void cam_actuator_shutdown(struct cam_actuator_ctrl_t *a_ctrl)
 	power_info->power_down_setting = NULL;
 	power_info->power_setting_size = 0;
 	power_info->power_down_setting_size = 0;
+	a_ctrl->last_flush_req = 0;
 
 	a_ctrl->cam_act_state = CAM_ACTUATOR_INIT;
 }

+ 1 - 31
drivers/cam_sensor_module/cam_csiphy/include/cam_csiphy_1_2_3_hwreg.h

@@ -15,7 +15,7 @@ struct csiphy_reg_parms_t csiphy_v1_2_3 = {
 	.csiphy_interrupt_status_size = 11,
 	.csiphy_common_array_size = 8,
 	.csiphy_reset_array_size = 5,
-	.csiphy_2ph_config_array_size = 19,
+	.csiphy_2ph_config_array_size = 16,
 	.csiphy_3ph_config_array_size = 26,
 	.csiphy_2ph_3ph_config_array_size = 0,
 	.csiphy_2ph_clock_lane = 0x1,
@@ -72,9 +72,6 @@ struct csiphy_reg_t csiphy_2ph_v1_2_3_reg[MAX_LANES][MAX_SETTINGS_PER_LANE] = {
 		{0x0004, 0x0C, 0x00, CSIPHY_DEFAULT_PARAMS},
 		{0x0020, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
 		{0x0008, 0x10, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE},
-		{0x005C, 0xC0, 0x00, CSIPHY_SKEW_CAL},
-		{0x0060, 0x0D, 0x00, CSIPHY_SKEW_CAL},
-		{0x0064, 0x7F, 0x00, CSIPHY_SKEW_CAL},
 		{0x0800, 0x02, 0x00, CSIPHY_DEFAULT_PARAMS},
 	},
 	{
@@ -93,9 +90,6 @@ struct csiphy_reg_t csiphy_2ph_v1_2_3_reg[MAX_LANES][MAX_SETTINGS_PER_LANE] = {
 		{0x0704, 0x0C, 0x00, CSIPHY_DEFAULT_PARAMS},
 		{0x0720, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
 		{0x0708, 0x10, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE},
-		{0x075C, 0x00, 0x00, CSIPHY_DNP_PARAMS},
-		{0x0760, 0x00, 0x00, CSIPHY_DNP_PARAMS},
-		{0x0764, 0x00, 0x00, CSIPHY_DNP_PARAMS},
 		{0x0800, 0x02, 0x00, CSIPHY_DEFAULT_PARAMS},
 	},
 	{
@@ -114,9 +108,6 @@ struct csiphy_reg_t csiphy_2ph_v1_2_3_reg[MAX_LANES][MAX_SETTINGS_PER_LANE] = {
 		{0x0204, 0x0C, 0x00, CSIPHY_DEFAULT_PARAMS},
 		{0x0220, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
 		{0x0208, 0x10, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE},
-		{0x025C, 0xC0, 0x00, CSIPHY_SKEW_CAL},
-		{0x0260, 0x0D, 0x00, CSIPHY_SKEW_CAL},
-		{0x0264, 0x7F, 0x00, CSIPHY_SKEW_CAL},
 		{0x0800, 0x02, 0x00, CSIPHY_DEFAULT_PARAMS},
 	},
 	{
@@ -135,9 +126,6 @@ struct csiphy_reg_t csiphy_2ph_v1_2_3_reg[MAX_LANES][MAX_SETTINGS_PER_LANE] = {
 		{0x0404, 0x0C, 0x00, CSIPHY_DEFAULT_PARAMS},
 		{0x0420, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
 		{0x0408, 0x10, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE},
-		{0x045C, 0xC0, 0x00, CSIPHY_SKEW_CAL},
-		{0x0460, 0x0D, 0x00, CSIPHY_SKEW_CAL},
-		{0x0464, 0x7F, 0x00, CSIPHY_SKEW_CAL},
 		{0x0800, 0x02, 0x00, CSIPHY_DEFAULT_PARAMS},
 	},
 	{
@@ -156,9 +144,6 @@ struct csiphy_reg_t csiphy_2ph_v1_2_3_reg[MAX_LANES][MAX_SETTINGS_PER_LANE] = {
 		{0x0604, 0x0C, 0x00, CSIPHY_DEFAULT_PARAMS},
 		{0x0620, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
 		{0x0608, 0x10, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE},
-		{0x065C, 0xC0, 0x00, CSIPHY_SKEW_CAL},
-		{0x0660, 0x0D, 0x00, CSIPHY_SKEW_CAL},
-		{0x0664, 0x7F, 0x00, CSIPHY_SKEW_CAL},
 		{0x0800, 0x02, 0x00, CSIPHY_DEFAULT_PARAMS},
 	},
 };
@@ -181,9 +166,6 @@ struct csiphy_reg_t
 		{0x0004, 0x0C, 0x00, CSIPHY_DEFAULT_PARAMS},
 		{0x0020, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
 		{0x0008, 0x10, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE},
-		{0x005C, 0xC0, 0x00, CSIPHY_SKEW_CAL},
-		{0x0060, 0x0D, 0x00, CSIPHY_SKEW_CAL},
-		{0x0064, 0x7F, 0x00, CSIPHY_SKEW_CAL},
 		{0x0800, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
 	},
 	{
@@ -202,9 +184,6 @@ struct csiphy_reg_t
 		{0x0704, 0x0C, 0x00, CSIPHY_DEFAULT_PARAMS},
 		{0x0720, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
 		{0x0708, 0x10, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE},
-		{0x075C, 0x00, 0x00, CSIPHY_DNP_PARAMS},
-		{0x0760, 0x00, 0x00, CSIPHY_DNP_PARAMS},
-		{0x0764, 0x00, 0x00, CSIPHY_DNP_PARAMS},
 		{0x0800, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
 	},
 	{
@@ -223,9 +202,6 @@ struct csiphy_reg_t
 		{0x0204, 0x0C, 0x00, CSIPHY_DEFAULT_PARAMS},
 		{0x0220, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
 		{0x0208, 0x10, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE},
-		{0x025C, 0xC0, 0x00, CSIPHY_SKEW_CAL},
-		{0x0260, 0x0D, 0x00, CSIPHY_SKEW_CAL},
-		{0x0264, 0x7F, 0x00, CSIPHY_SKEW_CAL},
 		{0x0800, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
 	},
 	{
@@ -244,9 +220,6 @@ struct csiphy_reg_t
 		{0x0404, 0x0C, 0x00, CSIPHY_DEFAULT_PARAMS},
 		{0x0420, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
 		{0x0408, 0x10, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE},
-		{0x045C, 0xC0, 0x00, CSIPHY_SKEW_CAL},
-		{0x0460, 0x0D, 0x00, CSIPHY_SKEW_CAL},
-		{0x0464, 0x7F, 0x00, CSIPHY_SKEW_CAL},
 		{0x0800, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
 	},
 	{
@@ -265,9 +238,6 @@ struct csiphy_reg_t
 		{0x0604, 0x0C, 0x00, CSIPHY_DEFAULT_PARAMS},
 		{0x0620, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
 		{0x0608, 0x10, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE},
-		{0x065C, 0x00, 0x00, CSIPHY_DNP_PARAMS},
-		{0x0660, 0x00, 0x00, CSIPHY_DNP_PARAMS},
-		{0x0664, 0x00, 0x00, CSIPHY_DNP_PARAMS},
 		{0x0800, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
 	},
 };

+ 1 - 19
drivers/cam_sensor_module/cam_csiphy/include/cam_csiphy_1_2_5_hwreg.h

@@ -15,7 +15,7 @@ struct csiphy_reg_parms_t csiphy_v1_2_5 = {
 	.csiphy_interrupt_status_size = 11,
 	.csiphy_common_array_size = 6,
 	.csiphy_reset_array_size = 5,
-	.csiphy_2ph_config_array_size = 23,
+	.csiphy_2ph_config_array_size = 21,
 	.csiphy_3ph_config_array_size = 30,
 	.csiphy_2ph_clock_lane = 0x1,
 	.csiphy_2ph_combo_ck_ln = 0x10,
@@ -74,8 +74,6 @@ csiphy_reg_t csiphy_2ph_v1_2_5_reg[MAX_LANES][MAX_SETTINGS_PER_LANE] = {
 		{0x0008, 0x10, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE},
 		{0x0010, 0x52, 0x00, CSIPHY_DEFAULT_PARAMS},
 		{0x0038, 0xFE, 0x00, CSIPHY_DEFAULT_PARAMS},
-		{0x005C, 0xC0, 0x00, CSIPHY_DEFAULT_PARAMS},
-		{0x0060, 0x0D, 0x00, CSIPHY_DEFAULT_PARAMS},
 		{0x0800, 0x02, 0x00, CSIPHY_DEFAULT_PARAMS},
 		{0x0000, 0x00, 0x00, CSIPHY_DNP_PARAMS},
 	},
@@ -124,8 +122,6 @@ csiphy_reg_t csiphy_2ph_v1_2_5_reg[MAX_LANES][MAX_SETTINGS_PER_LANE] = {
 		{0x0208, 0x04, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE},
 		{0x0210, 0x52, 0x00, CSIPHY_DEFAULT_PARAMS},
 		{0x0238, 0xFE, 0x00, CSIPHY_DEFAULT_PARAMS},
-		{0x025C, 0xC0, 0x00, CSIPHY_DEFAULT_PARAMS},
-		{0x0260, 0x0D, 0x00, CSIPHY_DEFAULT_PARAMS},
 		{0x0800, 0x02, 0x00, CSIPHY_DEFAULT_PARAMS},
 		{0x0000, 0x00, 0x00, CSIPHY_DNP_PARAMS},
 	},
@@ -149,8 +145,6 @@ csiphy_reg_t csiphy_2ph_v1_2_5_reg[MAX_LANES][MAX_SETTINGS_PER_LANE] = {
 		{0x0408, 0x04, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE},
 		{0x0410, 0x52, 0x00, CSIPHY_DEFAULT_PARAMS},
 		{0x0438, 0xFE, 0x00, CSIPHY_DEFAULT_PARAMS},
-		{0x045C, 0xC0, 0x00, CSIPHY_DEFAULT_PARAMS},
-		{0x0460, 0x0D, 0x00, CSIPHY_DEFAULT_PARAMS},
 		{0x0800, 0x02, 0x00, CSIPHY_DEFAULT_PARAMS},
 		{0x0000, 0x00, 0x00, CSIPHY_DNP_PARAMS},
 	},
@@ -174,8 +168,6 @@ csiphy_reg_t csiphy_2ph_v1_2_5_reg[MAX_LANES][MAX_SETTINGS_PER_LANE] = {
 		{0x0608, 0x04, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE},
 		{0x0610, 0x52, 0x00, CSIPHY_DEFAULT_PARAMS},
 		{0x0638, 0xFE, 0x00, CSIPHY_DEFAULT_PARAMS},
-		{0x065C, 0xC0, 0x00, CSIPHY_DEFAULT_PARAMS},
-		{0x0660, 0x0D, 0x00, CSIPHY_DEFAULT_PARAMS},
 		{0x0800, 0x02, 0x00, CSIPHY_DEFAULT_PARAMS},
 		{0x0000, 0x00, 0x00, CSIPHY_DNP_PARAMS},
 	},
@@ -202,8 +194,6 @@ struct csiphy_reg_t
 		{0x0008, 0x10, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE},
 		{0x0010, 0x52, 0x00, CSIPHY_DEFAULT_PARAMS},
 		{0x0038, 0xFE, 0x00, CSIPHY_DEFAULT_PARAMS},
-		{0x005C, 0xC0, 0x00, CSIPHY_DEFAULT_PARAMS},
-		{0x0060, 0x0D, 0x00, CSIPHY_DEFAULT_PARAMS},
 		{0x0800, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
 		{0x0000, 0x00, 0x00, CSIPHY_DNP_PARAMS},
 		{0x0000, 0x00, 0x00, CSIPHY_DNP_PARAMS},
@@ -229,8 +219,6 @@ struct csiphy_reg_t
 		{0x070c, 0xFF, 0x00, CSIPHY_DEFAULT_PARAMS},
 		{0x0710, 0x52, 0x00, CSIPHY_DEFAULT_PARAMS},
 		{0x0738, 0x1F, 0x00, CSIPHY_DEFAULT_PARAMS},
-		{0x075C, 0xC0, 0x00, CSIPHY_DEFAULT_PARAMS},
-		{0x0760, 0x0D, 0x00, CSIPHY_DEFAULT_PARAMS},
 		{0x0800, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
 	},
 	{
@@ -252,8 +240,6 @@ struct csiphy_reg_t
 		{0x0208, 0x04, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE},
 		{0x0210, 0x52, 0x00, CSIPHY_DEFAULT_PARAMS},
 		{0x0238, 0xFE, 0x00, CSIPHY_DEFAULT_PARAMS},
-		{0x025C, 0xC0, 0x00, CSIPHY_DEFAULT_PARAMS},
-		{0x0260, 0x0D, 0x00, CSIPHY_DEFAULT_PARAMS},
 		{0x0800, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
 		{0x0000, 0x00, 0x00, CSIPHY_DNP_PARAMS},
 		{0x0000, 0x00, 0x00, CSIPHY_DNP_PARAMS},
@@ -278,8 +264,6 @@ struct csiphy_reg_t
 		{0x0408, 0x04, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE},
 		{0x0410, 0x52, 0x00, CSIPHY_DEFAULT_PARAMS},
 		{0x0438, 0xFE, 0x00, CSIPHY_DEFAULT_PARAMS},
-		{0x045C, 0xC0, 0x00, CSIPHY_DEFAULT_PARAMS},
-		{0x0460, 0x0D, 0x00, CSIPHY_DEFAULT_PARAMS},
 		{0x0800, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
 		{0x0000, 0x00, 0x00, CSIPHY_DNP_PARAMS},
 	},
@@ -304,8 +288,6 @@ struct csiphy_reg_t
 		{0x060c, 0xFF, 0x00, CSIPHY_DEFAULT_PARAMS},
 		{0x0610, 0x52, 0x00, CSIPHY_DEFAULT_PARAMS},
 		{0x0638, 0xFE, 0x00, CSIPHY_DEFAULT_PARAMS},
-		{0x065C, 0xC0, 0x00, CSIPHY_DEFAULT_PARAMS},
-		{0x0660, 0x0D, 0x00, CSIPHY_DEFAULT_PARAMS},
 		{0x0800, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
 	},
 };

+ 1 - 1
drivers/cam_sensor_module/cam_flash/cam_flash_core.c

@@ -1748,7 +1748,7 @@ int cam_flash_pmic_pkt_parser(struct cam_flash_ctrl *fctrl, void *arg)
 			add_req.skip_before_applying |= SKIP_NEXT_FRAME;
 			add_req.trigger_eof = true;
 
-			if ((flash_data->opcode !=
+			if (flash_data && (flash_data->opcode !=
 				CAMERA_SENSOR_FLASH_OP_FIREDURATION))
 				add_req.skip_before_applying |= 1;
 			else

+ 1 - 1
drivers/cam_smmu/cam_smmu_api.c

@@ -3552,7 +3552,7 @@ static int cam_smmu_setup_cb(struct cam_context_bank_info *cb,
 	/* create a virtual mapping */
 	if (cb->io_support) {
 		cb->domain = iommu_get_domain_for_dev(dev);
-		if (IS_ERR(cb->domain)) {
+		if (IS_ERR_OR_NULL(cb->domain)) {
 			CAM_ERR(CAM_SMMU, "Error: create domain Failed");
 			rc = -ENODEV;
 			goto end;