Pārlūkot izejas kodu

msm: camera: common: Merge camera-kernel.3.1 changes in camera-kernel.4.0

msm: camera: cdm: Fix dangling pointer issue
msm: camera: cdm: change work record to atomic variable
msm: camera: utils: Adding device type to track device handles
msm: camera: tfe: Reduce stack footprint during bw vote
msm: camera: req_mgr: Thread switch delay detection mechanisms
msm: camera: cdm: Avoid submitting BL if FIFO is full
msm: camera: tfe: check cdm hang in the tfe config timeout
msm: camera: req_mgr: Delay detection mechanism
msm: camera: cdm: Debug info in case of cdm page fault
msm: camera: isp: Max context reduction for TFE in isp driver
msm: camera: ope: Maintain current clock value during acquire
msm: camera: req_mgr: Limit CAM_ERR log in case of no empty task
msm: camera: cdm: Decrement write-count only after Bl commit
msm: camera: isp: Added CSID recovery mechanism.

CRs-Fixed: 2792394
Change-Id: I1c7a903ae15b572acf3f6318cda7394cb6549c8d
Signed-off-by: Tejas Prajapati <[email protected]>
Tejas Prajapati 4 gadi atpakaļ
vecāks
revīzija
4574450a12
46 mainītis faili ar 1130 papildinājumiem un 252 dzēšanām
  1. 2 1
      drivers/cam_cdm/cam_cdm.h
  2. 1 4
      drivers/cam_cdm/cam_cdm_core_common.c
  3. 205 117
      drivers/cam_cdm/cam_cdm_hw_core.c
  4. 37 0
      drivers/cam_cdm/cam_cdm_intf_api.h
  5. 66 12
      drivers/cam_cdm/cam_cdm_util.c
  6. 9 2
      drivers/cam_cdm/cam_cdm_virtual_core.c
  7. 1 1
      drivers/cam_core/cam_context_utils.c
  8. 5 4
      drivers/cam_core/cam_node.c
  9. 19 9
      drivers/cam_cpas/cam_cpas_hw.c
  10. 0 1
      drivers/cam_cpas/cam_cpas_soc.c
  11. 5 0
      drivers/cam_cpas/cpas_top/cam_cpastop_hw.c
  12. 2 0
      drivers/cam_cpas/cpas_top/cam_cpastop_hw.h
  13. 16 1
      drivers/cam_isp/cam_isp_context.c
  14. 45 11
      drivers/cam_isp/cam_isp_dev.c
  15. 7 3
      drivers/cam_isp/cam_isp_dev.h
  16. 4 4
      drivers/cam_isp/isp_hw_mgr/cam_ife_hw_mgr.c
  17. 2 2
      drivers/cam_isp/isp_hw_mgr/cam_ife_hw_mgr.h
  18. 261 49
      drivers/cam_isp/isp_hw_mgr/cam_tfe_hw_mgr.c
  19. 11 2
      drivers/cam_isp/isp_hw_mgr/cam_tfe_hw_mgr.h
  20. 8 0
      drivers/cam_isp/isp_hw_mgr/include/cam_isp_hw_mgr_intf.h
  21. 1 0
      drivers/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid_core.c
  22. 215 0
      drivers/cam_isp/isp_hw_mgr/isp_hw/tfe_csid_hw/cam_tfe_csid_core.c
  23. 26 0
      drivers/cam_isp/isp_hw_mgr/isp_hw/tfe_csid_hw/cam_tfe_csid_core.h
  24. 4 4
      drivers/cam_ope/ope_hw_mgr/cam_ope_hw_mgr.c
  25. 19 3
      drivers/cam_req_mgr/cam_req_mgr_core.c
  26. 10 1
      drivers/cam_req_mgr/cam_req_mgr_debug.c
  27. 5 1
      drivers/cam_req_mgr/cam_req_mgr_debug.h
  28. 2 1
      drivers/cam_req_mgr/cam_req_mgr_dev.c
  29. 25 4
      drivers/cam_req_mgr/cam_req_mgr_util.c
  30. 5 1
      drivers/cam_req_mgr/cam_req_mgr_util.h
  31. 24 0
      drivers/cam_req_mgr/cam_req_mgr_workq.c
  32. 24 8
      drivers/cam_req_mgr/cam_req_mgr_workq.h
  33. 1 0
      drivers/cam_sensor_module/cam_actuator/cam_actuator_core.c
  34. 4 0
      drivers/cam_sensor_module/cam_cci/cam_cci_core.c
  35. 2 0
      drivers/cam_sensor_module/cam_cci/cam_cci_dev.h
  36. 1 0
      drivers/cam_sensor_module/cam_csiphy/cam_csiphy_core.c
  37. 1 0
      drivers/cam_sensor_module/cam_eeprom/cam_eeprom_core.c
  38. 1 0
      drivers/cam_sensor_module/cam_flash/cam_flash_dev.c
  39. 1 0
      drivers/cam_sensor_module/cam_ois/cam_ois_core.c
  40. 1 0
      drivers/cam_sensor_module/cam_sensor/cam_sensor_core.c
  41. 2 0
      drivers/cam_sync/cam_sync.c
  42. 8 6
      drivers/cam_sync/cam_sync_private.h
  43. 3 0
      drivers/cam_sync/cam_sync_util.c
  44. 3 0
      drivers/cam_utils/cam_packet_util.c
  45. 35 0
      drivers/cam_utils/cam_trace.h
  46. 1 0
      include/uapi/camera/media/cam_req_mgr.h

+ 2 - 1
drivers/cam_cdm/cam_cdm.h

@@ -440,6 +440,7 @@ struct cam_cdm_work_payload {
 	uint32_t irq_status;
 	uint32_t irq_data;
 	int fifo_idx;
+	ktime_t workq_scheduled_ts;
 	struct work_struct work;
 };
 
@@ -476,7 +477,7 @@ struct cam_cdm_bl_fifo {
 	uint8_t bl_tag;
 	uint32_t bl_depth;
 	uint8_t last_bl_tag_done;
-	uint32_t work_record;
+	atomic_t work_record;
 };
 
 /**

+ 1 - 4
drivers/cam_cdm/cam_cdm_core_common.c

@@ -206,6 +206,7 @@ void cam_cdm_notify_clients(struct cam_hw_info *cdm_hw,
 	} else if (status == CAM_CDM_CB_STATUS_HW_RESET_DONE ||
 			status == CAM_CDM_CB_STATUS_HW_FLUSH ||
 			status == CAM_CDM_CB_STATUS_HW_RESUBMIT ||
+			status == CAM_CDM_CB_STATUS_INVALID_BL_CMD ||
 			status == CAM_CDM_CB_STATUS_HW_ERROR) {
 		int client_idx;
 		struct cam_cdm_bl_cb_request_entry *node =
@@ -800,13 +801,11 @@ int cam_cdm_process_cmd(void *hw_priv,
 		}
 
 		idx = CAM_CDM_GET_CLIENT_IDX(*handle);
-		mutex_lock(&cdm_hw->hw_mutex);
 		client = core->clients[idx];
 		if (!client) {
 			CAM_ERR(CAM_CDM,
 				"Client not present for handle %d",
 				*handle);
-			mutex_unlock(&cdm_hw->hw_mutex);
 			break;
 		}
 
@@ -814,12 +813,10 @@ int cam_cdm_process_cmd(void *hw_priv,
 			CAM_ERR(CAM_CDM,
 				"handle mismatch, client handle %d index %d received handle %d",
 				client->handle, idx, *handle);
-			mutex_unlock(&cdm_hw->hw_mutex);
 			break;
 		}
 
 		rc = cam_hw_cdm_hang_detect(cdm_hw, *handle);
-		mutex_unlock(&cdm_hw->hw_mutex);
 		break;
 	}
 	case CAM_CDM_HW_INTF_DUMP_DBG_REGS:

+ 205 - 117
drivers/cam_cdm/cam_cdm_hw_core.c

@@ -25,6 +25,7 @@
 #include "cam_cdm_hw_reg_2_1.h"
 #include "camera_main.h"
 #include "cam_trace.h"
+#include "cam_req_mgr_workq.h"
 
 #define CAM_CDM_BL_FIFO_WAIT_TIMEOUT 2000
 #define CAM_CDM_DBG_GEN_IRQ_USR_DATA 0xff
@@ -569,7 +570,7 @@ int cam_hw_cdm_wait_for_bl_fifo(
 			CAM_DBG(CAM_CDM,
 				"BL slot available_cnt=%d requested=%d",
 				(available_bl_slots - 1), bl_count);
-				rc = bl_count;
+				rc = available_bl_slots - 1;
 				break;
 		} else if (0 == (available_bl_slots - 1)) {
 			rc = cam_hw_cdm_enable_bl_done_irq(cdm_hw,
@@ -595,7 +596,7 @@ int cam_hw_cdm_wait_for_bl_fifo(
 			if (cam_hw_cdm_enable_bl_done_irq(cdm_hw,
 					false, fifo_idx))
 				CAM_ERR(CAM_CDM, "Disable BL done irq failed");
-			rc = 0;
+			rc = 1;
 			CAM_DBG(CAM_CDM, "CDM HW is ready for data");
 		} else {
 			rc = (bl_count - (available_bl_slots - 1));
@@ -702,6 +703,7 @@ int cam_hw_cdm_submit_gen_irq(
 			core->bl_fifo[fifo_idx].bl_tag);
 		list_del_init(&node->entry);
 		kfree(node);
+		node = NULL;
 		rc = -EIO;
 		goto end;
 	}
@@ -712,6 +714,7 @@ int cam_hw_cdm_submit_gen_irq(
 			core->bl_fifo[fifo_idx].bl_tag);
 		list_del_init(&node->entry);
 		kfree(node);
+		node = NULL;
 		rc = -EIO;
 	}
 
@@ -922,8 +925,6 @@ int cam_hw_cdm_submit_bl(struct cam_hw_info *cdm_hw,
 				rc = -EIO;
 				break;
 			}
-		} else {
-			write_count--;
 		}
 
 		if (req->data->type == CAM_CDM_BL_CMD_TYPE_MEM_HANDLE) {
@@ -1026,27 +1027,58 @@ int cam_hw_cdm_submit_bl(struct cam_hw_info *cdm_hw,
 					rc = -EIO;
 					break;
 				}
+				write_count--;
 				CAM_DBG(CAM_CDM, "commit success BL %d tag=%d",
 					i, core->bl_fifo[fifo_idx].bl_tag);
 			}
 			core->bl_fifo[fifo_idx].bl_tag++;
 
 			if (cdm_cmd->cmd[i].enable_debug_gen_irq) {
+				if (write_count == 0) {
+					write_count =
+						cam_hw_cdm_wait_for_bl_fifo(
+						cdm_hw, 1, fifo_idx);
+					if (write_count < 0) {
+						CAM_ERR(CAM_CDM,
+						"wait for bl fifo failed %d:%d",
+						i, req->data->cmd_arrary_count);
+						rc = -EIO;
+						break;
+					}
+				}
+
 				rc = cam_hw_cdm_submit_debug_gen_irq(cdm_hw,
 					fifo_idx);
-				if (rc == 0)
+				if (rc == 0) {
+					write_count--;
 					core->bl_fifo[fifo_idx].bl_tag++;
+				}
 				if (core->bl_fifo[fifo_idx].bl_tag >=
 						(bl_fifo->bl_depth -
 						1))
 					core->bl_fifo[fifo_idx].bl_tag = 0;
 			}
 
-			if ((req->data->flag == true) &&
+			if ((!rc) && (req->data->flag == true) &&
 				(i == (req->data->cmd_arrary_count -
 				1))) {
+
+				if (write_count == 0) {
+					write_count =
+						cam_hw_cdm_wait_for_bl_fifo(
+						cdm_hw, 1, fifo_idx);
+					if (write_count < 0) {
+						CAM_ERR(CAM_CDM,
+						"wait for bl fifo failed %d:%d",
+						i, req->data->cmd_arrary_count);
+						rc = -EIO;
+						break;
+					}
+				}
+
 				if (core->arbitration !=
 					CAM_CDM_ARBITRATION_PRIORITY_BASED) {
+
 					rc = cam_hw_cdm_submit_gen_irq(
 						cdm_hw, req, fifo_idx,
 						cdm_cmd->gen_irq_arb);
@@ -1080,7 +1112,8 @@ static void cam_hw_cdm_reset_cleanup(
 	struct cam_cdm_bl_cb_request_entry *node, *tnode;
 	bool flush_hw = false;
 
-	if (test_bit(CAM_CDM_FLUSH_HW_STATUS, &core->cdm_status))
+	if (test_bit(CAM_CDM_ERROR_HW_STATUS, &core->cdm_status) ||
+		test_bit(CAM_CDM_FLUSH_HW_STATUS, &core->cdm_status))
 		flush_hw = true;
 
 	for (i = 0; i < core->offsets->reg_data->num_bl_fifo; i++) {
@@ -1104,10 +1137,11 @@ static void cam_hw_cdm_reset_cleanup(
 			}
 			list_del_init(&node->entry);
 			kfree(node);
+			node = NULL;
 		}
 		core->bl_fifo[i].bl_tag = 0;
 		core->bl_fifo[i].last_bl_tag_done = -1;
-		core->bl_fifo[i].work_record = 0;
+		atomic_set(&core->bl_fifo[i].work_record, 0);
 	}
 }
 
@@ -1116,121 +1150,165 @@ static void cam_hw_cdm_work(struct work_struct *work)
 	struct cam_cdm_work_payload *payload;
 	struct cam_hw_info *cdm_hw;
 	struct cam_cdm *core;
-	int i;
+	int i, fifo_idx;
+	struct cam_cdm_bl_cb_request_entry *tnode = NULL;
+	struct cam_cdm_bl_cb_request_entry *node = NULL;
 
 	payload = container_of(work, struct cam_cdm_work_payload, work);
-	if (payload) {
-		cdm_hw = payload->hw;
-		core = (struct cam_cdm *)cdm_hw->core_info;
-		if (payload->fifo_idx >= core->offsets->reg_data->num_bl_fifo) {
-			CAM_ERR(CAM_CDM, "Invalid fifo idx %d",
-				payload->fifo_idx);
+	if (!payload) {
+		CAM_ERR(CAM_CDM, "NULL payload");
+		return;
+	}
+
+	cdm_hw = payload->hw;
+	core = (struct cam_cdm *)cdm_hw->core_info;
+	fifo_idx = payload->fifo_idx;
+	if (fifo_idx >= core->offsets->reg_data->num_bl_fifo) {
+		CAM_ERR(CAM_CDM, "Invalid fifo idx %d",
+			fifo_idx);
+		kfree(payload);
+		payload = NULL;
+		return;
+	}
+
+	cam_req_mgr_thread_switch_delay_detect(
+		payload->workq_scheduled_ts);
+
+	CAM_DBG(CAM_CDM, "IRQ status=0x%x", payload->irq_status);
+	if (payload->irq_status &
+		CAM_CDM_IRQ_STATUS_INLINE_IRQ_MASK) {
+		CAM_DBG(CAM_CDM, "inline IRQ data=0x%x last tag: 0x%x",
+			payload->irq_data,
+			core->bl_fifo[payload->fifo_idx]
+				.last_bl_tag_done);
+
+		if (payload->irq_data == 0xff) {
+			CAM_INFO(CAM_CDM, "Debug genirq received");
 			kfree(payload);
+			payload = NULL;
 			return;
 		}
 
-		CAM_DBG(CAM_CDM, "IRQ status=0x%x", payload->irq_status);
-		if (payload->irq_status &
-			CAM_CDM_IRQ_STATUS_INLINE_IRQ_MASK) {
-			struct cam_cdm_bl_cb_request_entry *node, *tnode;
-
-			CAM_DBG(CAM_CDM, "inline IRQ data=0x%x last tag: 0x%x",
-				payload->irq_data,
-				core->bl_fifo[payload->fifo_idx]
-					.last_bl_tag_done);
-
-			if (payload->irq_data == 0xff) {
-				CAM_INFO(CAM_CDM, "Debug genirq received");
-				kfree(payload);
-				return;
-			}
+		mutex_lock(&core->bl_fifo[fifo_idx].fifo_lock);
 
-			mutex_lock(&core->bl_fifo[payload->fifo_idx]
-				.fifo_lock);
-
-			if (core->bl_fifo[payload->fifo_idx].work_record)
-				core->bl_fifo[payload->fifo_idx].work_record--;
-
-			if (list_empty(&core->bl_fifo[payload->fifo_idx]
-					.bl_request_list)) {
-				CAM_INFO(CAM_CDM,
-					"Fifo list empty, idx %d tag %d arb %d",
-					payload->fifo_idx, payload->irq_data,
-					core->arbitration);
-				mutex_unlock(&core->bl_fifo[payload->fifo_idx]
-						.fifo_lock);
-				return;
-			}
+		if (atomic_read(&core->bl_fifo[fifo_idx].work_record))
+			atomic_dec(&core->bl_fifo[fifo_idx].work_record);
 
-			if (core->bl_fifo[payload->fifo_idx]
-				.last_bl_tag_done !=
-				payload->irq_data) {
-				core->bl_fifo[payload->fifo_idx]
-					.last_bl_tag_done =
-					payload->irq_data;
-				list_for_each_entry_safe(node, tnode,
-					&core->bl_fifo[payload->fifo_idx]
-						.bl_request_list,
-					entry) {
-					if (node->request_type ==
-						CAM_HW_CDM_BL_CB_CLIENT) {
-						cam_cdm_notify_clients(cdm_hw,
-						CAM_CDM_CB_STATUS_BL_SUCCESS,
-						(void *)node);
-					} else if (node->request_type ==
-						CAM_HW_CDM_BL_CB_INTERNAL) {
-						CAM_ERR(CAM_CDM,
-							"Invalid node=%pK %d",
-							node,
-							node->request_type);
-					}
-					list_del_init(&node->entry);
-					if (node->bl_tag == payload->irq_data) {
-						kfree(node);
-						break;
-					}
+		if (list_empty(&core->bl_fifo[fifo_idx]
+				.bl_request_list)) {
+			CAM_INFO(CAM_CDM,
+				"Fifo list empty, idx %d tag %d arb %d",
+				fifo_idx, payload->irq_data,
+				core->arbitration);
+			mutex_unlock(&core->bl_fifo[fifo_idx]
+					.fifo_lock);
+			return;
+		}
+
+		if (core->bl_fifo[fifo_idx].last_bl_tag_done !=
+			payload->irq_data) {
+			core->bl_fifo[fifo_idx].last_bl_tag_done =
+				payload->irq_data;
+			list_for_each_entry_safe(node, tnode,
+				&core->bl_fifo[fifo_idx].bl_request_list,
+				entry) {
+				if (node->request_type ==
+					CAM_HW_CDM_BL_CB_CLIENT) {
+					cam_cdm_notify_clients(cdm_hw,
+					CAM_CDM_CB_STATUS_BL_SUCCESS,
+					(void *)node);
+				} else if (node->request_type ==
+					CAM_HW_CDM_BL_CB_INTERNAL) {
+					CAM_ERR(CAM_CDM,
+						"Invalid node=%pK %d",
+						node,
+						node->request_type);
 				}
-			} else {
-				CAM_INFO(CAM_CDM,
-					"Skip GenIRQ, tag 0x%x fifo %d",
-					payload->irq_data, payload->fifo_idx);
+				list_del_init(&node->entry);
+				if (node->bl_tag == payload->irq_data) {
+					kfree(node);
+					node = NULL;
+					break;
+				}
+				kfree(node);
+				node = NULL;
 			}
-			mutex_unlock(&core->bl_fifo[payload->fifo_idx]
-				.fifo_lock);
+		} else {
+			CAM_INFO(CAM_CDM,
+				"Skip GenIRQ, tag 0x%x fifo %d",
+				payload->irq_data, payload->fifo_idx);
+		}
+		mutex_unlock(&core->bl_fifo[payload->fifo_idx]
+			.fifo_lock);
+	}
+
+	if (payload->irq_status &
+		CAM_CDM_IRQ_STATUS_BL_DONE_MASK) {
+		if (test_bit(payload->fifo_idx, &core->cdm_status)) {
+			CAM_DBG(CAM_CDM, "CDM HW BL done IRQ");
+			complete(&core->bl_fifo[payload->fifo_idx]
+				.bl_complete);
 		}
+	}
+	if (payload->irq_status &
+		CAM_CDM_IRQ_STATUS_ERRORS) {
+		int reset_hw_hdl = 0x0;
+
+		CAM_ERR_RATE_LIMIT(CAM_CDM,
+			"CDM Error IRQ status %d\n",
+			payload->irq_status);
+		set_bit(CAM_CDM_ERROR_HW_STATUS, &core->cdm_status);
+		mutex_lock(&cdm_hw->hw_mutex);
+		for (i = 0; i < core->offsets->reg_data->num_bl_fifo;
+				i++)
+			mutex_lock(&core->bl_fifo[i].fifo_lock);
+		/*
+		 * First pause CDM, If it fails still proceed
+		 * to dump debug info
+		 */
+		cam_hw_cdm_pause_core(cdm_hw, true);
+		cam_hw_cdm_dump_core_debug_registers(cdm_hw, true);
 
 		if (payload->irq_status &
-			CAM_CDM_IRQ_STATUS_BL_DONE_MASK) {
-			if (test_bit(payload->fifo_idx, &core->cdm_status)) {
-				CAM_DBG(CAM_CDM, "CDM HW BL done IRQ");
-				complete(&core->bl_fifo[payload->fifo_idx]
-					.bl_complete);
+		CAM_CDM_IRQ_STATUS_ERROR_INV_CMD_MASK) {
+			node = list_first_entry_or_null(
+			&core->bl_fifo[payload->fifo_idx].bl_request_list,
+			struct cam_cdm_bl_cb_request_entry, entry);
+
+			if (node != NULL) {
+				if (node->request_type ==
+					CAM_HW_CDM_BL_CB_CLIENT) {
+					cam_cdm_notify_clients(cdm_hw,
+					CAM_CDM_CB_STATUS_INVALID_BL_CMD,
+						(void *)node);
+				} else if (node->request_type ==
+					CAM_HW_CDM_BL_CB_INTERNAL) {
+					CAM_ERR(CAM_CDM,
+						"Invalid node=%pK %d", node,
+						node->request_type);
+				}
+				list_del_init(&node->entry);
+				kfree(node);
 			}
 		}
+		/* Resume CDM back */
+		cam_hw_cdm_pause_core(cdm_hw, false);
+		for (i = 0; i < core->offsets->reg_data->num_bl_fifo;
+				i++)
+			mutex_unlock(&core->bl_fifo[i].fifo_lock);
+
 		if (payload->irq_status &
-				CAM_CDM_IRQ_STATUS_ERRORS) {
-			CAM_ERR_RATE_LIMIT(CAM_CDM,
-				"CDM Error IRQ status %d\n",
-				payload->irq_status);
-			set_bit(CAM_CDM_ERROR_HW_STATUS, &core->cdm_status);
-			mutex_lock(&cdm_hw->hw_mutex);
-			for (i = 0; i < core->offsets->reg_data->num_bl_fifo;
-					i++)
-				mutex_lock(&core->bl_fifo[i].fifo_lock);
-			cam_hw_cdm_dump_core_debug_registers(cdm_hw, true);
-			for (i = 0; i < core->offsets->reg_data->num_bl_fifo;
-					i++)
-				mutex_unlock(&core->bl_fifo[i].fifo_lock);
-			mutex_unlock(&cdm_hw->hw_mutex);
-			if (!(payload->irq_status &
-					CAM_CDM_IRQ_STATUS_ERROR_INV_CMD_MASK))
-				clear_bit(CAM_CDM_ERROR_HW_STATUS,
-					&core->cdm_status);
-		}
-		kfree(payload);
-	} else {
-		CAM_ERR(CAM_CDM, "NULL payload");
+			CAM_CDM_IRQ_STATUS_ERROR_INV_CMD_MASK)
+			cam_hw_cdm_reset_hw(cdm_hw, reset_hw_hdl);
+
+		mutex_unlock(&cdm_hw->hw_mutex);
+		if (!(payload->irq_status &
+				CAM_CDM_IRQ_STATUS_ERROR_INV_CMD_MASK))
+			clear_bit(CAM_CDM_ERROR_HW_STATUS,
+				&core->cdm_status);
 	}
+	kfree(payload);
+	payload = NULL;
 
 }
 
@@ -1369,7 +1447,9 @@ irqreturn_t cam_hw_cdm_irq(int irq_num, void *data)
 			return IRQ_HANDLED;
 		}
 
-		cdm_core->bl_fifo[i].work_record++;
+		atomic_inc(&cdm_core->bl_fifo[i].work_record);
+		payload[i]->workq_scheduled_ts = ktime_get();
+
 		work_status = queue_work(
 			cdm_core->bl_fifo[i].work_queue,
 			&payload[i]->work);
@@ -1379,6 +1459,7 @@ irqreturn_t cam_hw_cdm_irq(int irq_num, void *data)
 				"Failed to queue work for FIFO: %d irq=0x%x",
 				i, payload[i]->irq_status);
 			kfree(payload[i]);
+			payload[i] = NULL;
 		}
 	}
 	if (rst_done_cnt == cdm_core->offsets->reg_data->num_bl_fifo_irq) {
@@ -1629,6 +1710,7 @@ int cam_hw_cdm_handle_error_info(
 		}
 		list_del_init(&node->entry);
 		kfree(node);
+		node = NULL;
 	}
 
 	cam_hw_cdm_reset_cleanup(cdm_hw, reset_hw_hdl);
@@ -1679,20 +1761,14 @@ int cam_hw_cdm_hang_detect(
 	cdm_core = (struct cam_cdm *)cdm_hw->core_info;
 
 	for (i = 0; i < cdm_core->offsets->reg_data->num_bl_fifo; i++)
-		mutex_lock(&cdm_core->bl_fifo[i].fifo_lock);
-
-	for (i = 0; i < cdm_core->offsets->reg_data->num_bl_fifo; i++)
-		if (cdm_core->bl_fifo[i].work_record) {
+		if (atomic_read(&cdm_core->bl_fifo[i].work_record)) {
 			CAM_WARN(CAM_CDM,
-				"workqueue got delayed, bl_fifo: %d, work_record :%u",
-				i, cdm_core->bl_fifo[i].work_record);
+				"workqueue got delayed, work_record :%u",
+				atomic_read(&cdm_core->bl_fifo[i].work_record));
 			rc = 0;
 			break;
 		}
 
-	for (i = 0; i < cdm_core->offsets->reg_data->num_bl_fifo; i++)
-		mutex_unlock(&cdm_core->bl_fifo[i].fifo_lock);
-
 	return rc;
 }
 
@@ -1797,7 +1873,7 @@ int cam_hw_cdm_init(void *hw_priv,
 	}
 	for (i = 0; i < cdm_core->offsets->reg_data->num_bl_fifo; i++) {
 		cdm_core->bl_fifo[i].last_bl_tag_done = -1;
-		cdm_core->bl_fifo[i].work_record = 0;
+		atomic_set(&cdm_core->bl_fifo[i].work_record, 0);
 	}
 
 	rc = cam_hw_cdm_reset_hw(cdm_hw, reset_hw_hdl);
@@ -1856,6 +1932,7 @@ int cam_hw_cdm_deinit(void *hw_priv,
 			&cdm_core->bl_fifo[i].bl_request_list, entry) {
 			list_del_init(&node->entry);
 			kfree(node);
+			node = NULL;
 		}
 	}
 
@@ -1931,13 +2008,16 @@ static int cam_hw_cdm_component_bind(struct device *dev,
 	cdm_hw = kzalloc(sizeof(struct cam_hw_info), GFP_KERNEL);
 	if (!cdm_hw) {
 		kfree(cdm_hw_intf);
+		cdm_hw_intf = NULL;
 		return -ENOMEM;
 	}
 
 	cdm_hw->core_info = kzalloc(sizeof(struct cam_cdm), GFP_KERNEL);
 	if (!cdm_hw->core_info) {
 		kfree(cdm_hw);
+		cdm_hw = NULL;
 		kfree(cdm_hw_intf);
+		cdm_hw_intf = NULL;
 		return -ENOMEM;
 	}
 
@@ -2174,11 +2254,15 @@ unlock_release_mem:
 	mutex_unlock(&cdm_hw->hw_mutex);
 release_private_mem:
 	kfree(cdm_hw->soc_info.soc_private);
+	cdm_hw->soc_info.soc_private = NULL;
 release_mem:
 	mutex_destroy(&cdm_hw->hw_mutex);
 	kfree(cdm_hw_intf);
+	cdm_hw_intf = NULL;
 	kfree(cdm_hw->core_info);
+	cdm_hw->core_info = NULL;
 	kfree(cdm_hw);
+	cdm_hw = NULL;
 	return rc;
 }
 
@@ -2256,9 +2340,13 @@ static void cam_hw_cdm_component_unbind(struct device *dev,
 
 	mutex_destroy(&cdm_hw->hw_mutex);
 	kfree(cdm_hw->soc_info.soc_private);
+	cdm_hw->soc_info.soc_private = NULL;
 	kfree(cdm_hw_intf);
+	cdm_hw_intf = NULL;
 	kfree(cdm_hw->core_info);
+	cdm_hw->core_info = NULL;
 	kfree(cdm_hw);
+	cdm_hw = NULL;
 }
 
 const static struct component_ops cam_hw_cdm_component_ops = {

+ 37 - 0
drivers/cam_cdm/cam_cdm_intf_api.h

@@ -10,6 +10,8 @@
 #include "cam_cdm_util.h"
 #include "cam_soc_util.h"
 
+#define CAM_CDM_BL_CMD_MAX  25
+
 /* enum cam_cdm_id - Enum for possible CAM CDM hardwares */
 enum cam_cdm_id {
 	CAM_CDM_VIRTUAL,
@@ -151,6 +153,41 @@ struct cam_cdm_bl_request {
 	struct cam_cdm_bl_cmd cmd[1];
 };
 
+/**
+ * struct cam_cdm_bl_data - last submiited CDM BL data
+ *
+ * @mem_handle : Input mem handle of bl cmd
+ * @hw_addr    : Hw address of submitted Bl command
+ * @offset     : Input offset of the actual bl cmd in the memory pointed
+ *               by mem_handle
+ * @len        : length of submitted Bl command to CDM.
+ * @input_len  : Input length of the BL command, Cannot be more than 1MB and
+ *           this is will be validated with offset+size of the memory pointed
+ *           by mem_handle
+ * @type       :  CDM bl cmd addr types.
+ */
+struct cam_cdm_bl_data {
+	int32_t mem_handle;
+	dma_addr_t hw_addr;
+	uint32_t offset;
+	size_t len;
+	uint32_t  input_len;
+	enum cam_cdm_bl_cmd_addr_type type;
+};
+
+/**
+ * struct cam_cdm_bl_info
+ *
+ * @bl_count   : No. of Bl commands submiited to CDM.
+ * @cmd        : payload holding the BL cmd's arrary
+ *               that is sumbitted.
+ *
+ */
+struct cam_cdm_bl_info {
+	int32_t bl_count;
+	struct cam_cdm_bl_data cmd[CAM_CDM_BL_CMD_MAX];
+};
+
 /**
  * @brief : API to get the CDM capabilities for a camera device type
  *

+ 66 - 12
drivers/cam_cdm/cam_cdm_util.c

@@ -689,25 +689,53 @@ int cam_cdm_util_cmd_buf_write(void __iomem **current_device_base,
 	return ret;
 }
 
-static long cam_cdm_util_dump_dmi_cmd(uint32_t *cmd_buf_addr)
+static long cam_cdm_util_dump_dmi_cmd(uint32_t *cmd_buf_addr,
+	uint32_t *cmd_buf_addr_end)
 {
 	long ret = 0;
+	struct cdm_dmi_cmd *p_dmi_cmd;
+	uint32_t *temp_ptr = cmd_buf_addr;
 
+	p_dmi_cmd = (struct cdm_dmi_cmd *)cmd_buf_addr;
+	temp_ptr += CDMCmdHeaderSizes[CAM_CDM_CMD_DMI];
 	ret += CDMCmdHeaderSizes[CAM_CDM_CMD_DMI];
-	CAM_INFO(CAM_CDM, "DMI");
+
+	if (temp_ptr > cmd_buf_addr_end)
+		CAM_ERR(CAM_CDM,
+			"Invalid cmd start addr:%pK end addr:%pK",
+			temp_ptr, cmd_buf_addr_end);
+
+	CAM_INFO(CAM_CDM,
+		"DMI: LEN: %u DMIAddr: 0x%X DMISel: 0x%X LUT_addr: 0x%X",
+		p_dmi_cmd->length, p_dmi_cmd->DMIAddr,
+		p_dmi_cmd->DMISel, p_dmi_cmd->addr);
 	return ret;
 }
 
-static long cam_cdm_util_dump_buff_indirect(uint32_t *cmd_buf_addr)
+static long cam_cdm_util_dump_buff_indirect(uint32_t *cmd_buf_addr,
+	uint32_t *cmd_buf_addr_end)
 {
 	long ret = 0;
+	struct cdm_indirect_cmd *p_indirect_cmd;
+	uint32_t *temp_ptr = cmd_buf_addr;
 
+	p_indirect_cmd = (struct cdm_indirect_cmd *)cmd_buf_addr;
+	temp_ptr += CDMCmdHeaderSizes[CAM_CDM_CMD_BUFF_INDIRECT];
 	ret += CDMCmdHeaderSizes[CAM_CDM_CMD_BUFF_INDIRECT];
-	CAM_INFO(CAM_CDM, "Buff Indirect");
+
+	if (temp_ptr > cmd_buf_addr_end)
+		CAM_ERR(CAM_CDM,
+			"Invalid cmd start addr:%pK end addr:%pK",
+			temp_ptr, cmd_buf_addr_end);
+
+	CAM_INFO(CAM_CDM,
+		"Buff Indirect: LEN: %u addr: 0x%X",
+		p_indirect_cmd->length, p_indirect_cmd->addr);
 	return ret;
 }
 
-static long cam_cdm_util_dump_reg_cont_cmd(uint32_t *cmd_buf_addr)
+static long cam_cdm_util_dump_reg_cont_cmd(uint32_t *cmd_buf_addr,
+	uint32_t *cmd_buf_addr_end)
 {
 	long ret = 0;
 	struct cdm_regcontinuous_cmd *p_regcont_cmd;
@@ -722,6 +750,12 @@ static long cam_cdm_util_dump_reg_cont_cmd(uint32_t *cmd_buf_addr)
 		p_regcont_cmd->count, p_regcont_cmd->offset);
 
 	for (i = 0; i < p_regcont_cmd->count; i++) {
+		if (temp_ptr > cmd_buf_addr_end) {
+			CAM_ERR(CAM_CDM,
+				"Invalid cmd(%d) start addr:%pK end addr:%pK",
+				i, temp_ptr, cmd_buf_addr_end);
+			break;
+		}
 		CAM_INFO(CAM_CDM, "DATA_%d: 0x%X", i,
 			*temp_ptr);
 		temp_ptr++;
@@ -731,7 +765,8 @@ static long cam_cdm_util_dump_reg_cont_cmd(uint32_t *cmd_buf_addr)
 	return ret;
 }
 
-static long cam_cdm_util_dump_reg_random_cmd(uint32_t *cmd_buf_addr)
+static long cam_cdm_util_dump_reg_random_cmd(uint32_t *cmd_buf_addr,
+	uint32_t *cmd_buf_addr_end)
 {
 	struct cdm_regrandom_cmd *p_regrand_cmd;
 	uint32_t *temp_ptr = cmd_buf_addr;
@@ -746,6 +781,12 @@ static long cam_cdm_util_dump_reg_random_cmd(uint32_t *cmd_buf_addr)
 		p_regrand_cmd->count);
 
 	for (i = 0; i < p_regrand_cmd->count; i++) {
+		if (temp_ptr > cmd_buf_addr_end) {
+			CAM_ERR(CAM_CDM,
+				"Invalid cmd(%d) start addr:%pK end addr:%pK",
+				i, temp_ptr, cmd_buf_addr_end);
+			break;
+		}
 		CAM_INFO(CAM_CDM, "OFFSET_%d: 0x%X DATA_%d: 0x%X",
 			i, *temp_ptr & CAM_CDM_REG_OFFSET_MASK, i,
 			*(temp_ptr + 1));
@@ -778,15 +819,22 @@ static long cam_cdm_util_dump_wait_event_cmd(uint32_t *cmd_buf_addr)
 	return ret;
 }
 
-static long cam_cdm_util_dump_change_base_cmd(uint32_t *cmd_buf_addr)
+static long cam_cdm_util_dump_change_base_cmd(uint32_t *cmd_buf_addr,
+	uint32_t *cmd_buf_addr_end)
 {
 	long ret = 0;
 	struct cdm_changebase_cmd *p_cbase_cmd;
 	uint32_t *temp_ptr = cmd_buf_addr;
 
 	p_cbase_cmd = (struct cdm_changebase_cmd *)temp_ptr;
+	temp_ptr += CDMCmdHeaderSizes[CAM_CDM_CMD_CHANGE_BASE];
 	ret += CDMCmdHeaderSizes[CAM_CDM_CMD_CHANGE_BASE];
 
+	if (temp_ptr > cmd_buf_addr_end)
+		CAM_ERR(CAM_CDM,
+			"Invalid cmd start addr:%pK end addr:%pK",
+			temp_ptr, cmd_buf_addr_end);
+
 	CAM_INFO(CAM_CDM, "CHANGE_BASE: 0x%X",
 		p_cbase_cmd->base);
 
@@ -819,6 +867,7 @@ void cam_cdm_util_dump_cmd_buf(
 	uint32_t *cmd_buf_start, uint32_t *cmd_buf_end)
 {
 	uint32_t *buf_now = cmd_buf_start;
+	uint32_t *buf_end = cmd_buf_end;
 	uint32_t cmd = 0;
 
 	if (!cmd_buf_start || !cmd_buf_end) {
@@ -834,16 +883,20 @@ void cam_cdm_util_dump_cmd_buf(
 		case CAM_CDM_CMD_DMI:
 		case CAM_CDM_CMD_DMI_32:
 		case CAM_CDM_CMD_DMI_64:
-			buf_now += cam_cdm_util_dump_dmi_cmd(buf_now);
+			buf_now += cam_cdm_util_dump_dmi_cmd(buf_now,
+				buf_end);
 			break;
 		case CAM_CDM_CMD_REG_CONT:
-			buf_now += cam_cdm_util_dump_reg_cont_cmd(buf_now);
+			buf_now += cam_cdm_util_dump_reg_cont_cmd(buf_now,
+				buf_end);
 			break;
 		case CAM_CDM_CMD_REG_RANDOM:
-			buf_now += cam_cdm_util_dump_reg_random_cmd(buf_now);
+			buf_now += cam_cdm_util_dump_reg_random_cmd(buf_now,
+				buf_end);
 			break;
 		case CAM_CDM_CMD_BUFF_INDIRECT:
-			buf_now += cam_cdm_util_dump_buff_indirect(buf_now);
+			buf_now += cam_cdm_util_dump_buff_indirect(buf_now,
+				buf_end);
 			break;
 		case CAM_CDM_CMD_GEN_IRQ:
 			buf_now += cam_cdm_util_dump_gen_irq_cmd(buf_now);
@@ -852,7 +905,8 @@ void cam_cdm_util_dump_cmd_buf(
 			buf_now += cam_cdm_util_dump_wait_event_cmd(buf_now);
 			break;
 		case CAM_CDM_CMD_CHANGE_BASE:
-			buf_now += cam_cdm_util_dump_change_base_cmd(buf_now);
+			buf_now += cam_cdm_util_dump_change_base_cmd(buf_now,
+				buf_end);
 			break;
 		case CAM_CDM_CMD_PERF_CTRL:
 			buf_now += cam_cdm_util_dump_perf_ctrl_cmd(buf_now);

+ 9 - 2
drivers/cam_cdm/cam_cdm_virtual_core.c

@@ -1,6 +1,6 @@
 // SPDX-License-Identifier: GPL-2.0-only
 /*
- * Copyright (c) 2017-2019, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2017-2020, The Linux Foundation. All rights reserved.
  */
 
 #include <linux/delay.h>
@@ -19,6 +19,7 @@
 #include "cam_cdm_core_common.h"
 #include "cam_cdm_soc.h"
 #include "cam_io_util.h"
+#include "cam_req_mgr_workq.h"
 
 #define CAM_CDM_VIRTUAL_NAME "qcom,cam_virtual_cdm"
 
@@ -32,6 +33,10 @@ static void cam_virtual_cdm_work(struct work_struct *work)
 	if (payload) {
 		cdm_hw = payload->hw;
 		core = (struct cam_cdm *)cdm_hw->core_info;
+
+		cam_req_mgr_thread_switch_delay_detect(
+			payload->workq_scheduled_ts);
+
 		if (payload->irq_status & 0x2) {
 			struct cam_cdm_bl_cb_request_entry *node;
 
@@ -183,9 +188,11 @@ int cam_virtual_cdm_submit_bl(struct cam_hw_info *cdm_hw,
 					INIT_WORK((struct work_struct *)
 						&payload->work,
 						cam_virtual_cdm_work);
+					payload->workq_scheduled_ts =
+						ktime_get();
 					queue_work(core->work_queue,
 						&payload->work);
-					}
+				}
 			}
 			core->bl_tag++;
 			CAM_DBG(CAM_CDM,

+ 1 - 1
drivers/cam_core/cam_context_utils.c

@@ -573,7 +573,7 @@ int32_t cam_context_acquire_dev_to_hw(struct cam_context *ctx,
 	req_hdl_param.media_entity_flag = 0;
 	req_hdl_param.priv = ctx;
 	req_hdl_param.ops = ctx->crm_ctx_intf;
-
+	req_hdl_param.dev_id = ctx->dev_id;
 	ctx->dev_hdl = cam_create_device_hdl(&req_hdl_param);
 	if (ctx->dev_hdl <= 0) {
 		rc = -EFAULT;

+ 5 - 4
drivers/cam_core/cam_node.c

@@ -96,8 +96,9 @@ static int __cam_node_handle_acquire_dev(struct cam_node *node,
 
 	ctx = cam_node_get_ctxt_from_free_list(node);
 	if (!ctx) {
-		CAM_ERR(CAM_CORE, "No free ctx in free list node %s",
-			node->name);
+		CAM_ERR(CAM_CORE,
+			"No free ctx in free list node %s with size:%d",
+			node->name, node->ctx_size);
 		cam_node_print_ctx_state(node);
 
 		rc = -ENOMEM;
@@ -818,14 +819,14 @@ int cam_node_handle_ioctl(struct cam_node *node, struct cam_control *cmd)
 			rc = __cam_node_handle_acquire_hw_v1(node, acquire_ptr);
 			if (rc) {
 				CAM_ERR(CAM_CORE,
-					"acquire device failed(rc = %d)", rc);
+					"acquire hw failed(rc = %d)", rc);
 				goto acquire_kfree;
 			}
 		} else if (api_version == 2) {
 			rc = __cam_node_handle_acquire_hw_v2(node, acquire_ptr);
 			if (rc) {
 				CAM_ERR(CAM_CORE,
-					"acquire device failed(rc = %d)", rc);
+					"acquire hw failed(rc = %d)", rc);
 				goto acquire_kfree;
 			}
 		}

+ 19 - 9
drivers/cam_cpas/cam_cpas_hw.c

@@ -1041,7 +1041,7 @@ static int cam_cpas_hw_update_axi_vote(struct cam_hw_info *cpas_hw,
 {
 	struct cam_cpas *cpas_core = (struct cam_cpas *) cpas_hw->core_info;
 	struct cam_cpas_client *cpas_client = NULL;
-	struct cam_axi_vote axi_vote = {0};
+	struct cam_axi_vote *axi_vote = NULL;
 	uint32_t client_indx = CAM_CPAS_GET_CLIENT_IDX(client_handle);
 	int rc = 0;
 
@@ -1051,16 +1051,24 @@ static int cam_cpas_hw_update_axi_vote(struct cam_hw_info *cpas_hw,
 		return -EINVAL;
 	}
 
-	memcpy(&axi_vote, client_axi_vote, sizeof(struct cam_axi_vote));
-
 	if (!CAM_CPAS_CLIENT_VALID(client_indx))
 		return -EINVAL;
 
-	cam_cpas_dump_axi_vote_info(cpas_core->cpas_client[client_indx],
-		"Incoming Vote", &axi_vote);
-
 	mutex_lock(&cpas_hw->hw_mutex);
 	mutex_lock(&cpas_core->client_mutex[client_indx]);
+
+	axi_vote = kmemdup(client_axi_vote, sizeof(struct cam_axi_vote),
+		GFP_KERNEL);
+	if (!axi_vote) {
+		CAM_ERR(CAM_CPAS, "Out of memory");
+		mutex_unlock(&cpas_core->client_mutex[client_indx]);
+		mutex_unlock(&cpas_hw->hw_mutex);
+		return -ENOMEM;
+	}
+
+	cam_cpas_dump_axi_vote_info(cpas_core->cpas_client[client_indx],
+		"Incoming Vote", axi_vote);
+
 	cpas_client = cpas_core->cpas_client[client_indx];
 
 	if (!CAM_CPAS_CLIENT_STARTED(cpas_core, client_indx)) {
@@ -1071,7 +1079,7 @@ static int cam_cpas_hw_update_axi_vote(struct cam_hw_info *cpas_hw,
 		goto unlock_client;
 	}
 
-	rc = cam_cpas_util_translate_client_paths(&axi_vote);
+	rc = cam_cpas_util_translate_client_paths(axi_vote);
 	if (rc) {
 		CAM_ERR(CAM_CPAS,
 			"Unable to translate per path votes rc: %d", rc);
@@ -1079,19 +1087,21 @@ static int cam_cpas_hw_update_axi_vote(struct cam_hw_info *cpas_hw,
 	}
 
 	cam_cpas_dump_axi_vote_info(cpas_core->cpas_client[client_indx],
-		"Translated Vote", &axi_vote);
+		"Translated Vote", axi_vote);
 
 	/* Log an entry whenever there is an AXI update - before updating */
 	cam_cpas_update_monitor_array(cpas_hw, "CPAS AXI pre-update",
 		client_indx);
 
 	rc = cam_cpas_util_apply_client_axi_vote(cpas_hw,
-		cpas_core->cpas_client[client_indx], &axi_vote);
+		cpas_core->cpas_client[client_indx], axi_vote);
 
 	/* Log an entry whenever there is an AXI update - after updating */
 	cam_cpas_update_monitor_array(cpas_hw, "CPAS AXI post-update",
 		client_indx);
 unlock_client:
+	kzfree(axi_vote);
+	axi_vote = NULL;
 	mutex_unlock(&cpas_core->client_mutex[client_indx]);
 	mutex_unlock(&cpas_hw->hw_mutex);
 	return rc;

+ 0 - 1
drivers/cam_cpas/cam_cpas_soc.c

@@ -933,7 +933,6 @@ int cam_cpas_get_custom_dt_info(struct cam_hw_info *cpas_hw,
 		CAM_DBG(CAM_CPAS, "RPMH BCM info not available in DT, count=%d",
 			count);
 	}
-
 	return 0;
 
 cleanup_tree:

+ 5 - 0
drivers/cam_cpas/cpas_top/cam_cpastop_hw.c

@@ -30,6 +30,7 @@
 #include "cpastop_v545_100.h"
 #include "cpastop_v570_200.h"
 #include "cpastop_v680_100.h"
+#include "cam_req_mgr_workq.h"
 
 struct cam_camnoc_info *camnoc_info;
 
@@ -571,6 +572,9 @@ static void cam_cpastop_work(struct work_struct *work)
 		return;
 	}
 
+	cam_req_mgr_thread_switch_delay_detect(
+			payload->workq_scheduled_ts);
+
 	cpas_hw = payload->hw;
 	cpas_core = (struct cam_cpas *) cpas_hw->core_info;
 	soc_info = &cpas_hw->soc_info;
@@ -670,6 +674,7 @@ static irqreturn_t cam_cpastop_handle_irq(int irq_num, void *data)
 
 	cam_cpastop_reset_irq(cpas_hw);
 
+	payload->workq_scheduled_ts = ktime_get();
 	queue_work(cpas_core->work_queue, &payload->work);
 done:
 	atomic_dec(&cpas_core->irq_count);

+ 2 - 0
drivers/cam_cpas/cpas_top/cam_cpastop_hw.h

@@ -341,6 +341,7 @@ struct cam_camnoc_info {
  * @hw: Pointer to HW info
  * @irq_status: IRQ status value
  * @irq_data: IRQ data
+ * @workq_scheduled_ts: workqueue scheduled timestamp
  * @work: Work handle
  *
  */
@@ -348,6 +349,7 @@ struct cam_cpas_work_payload {
 	struct cam_hw_info *hw;
 	uint32_t irq_status;
 	uint32_t irq_data;
+	ktime_t workq_scheduled_ts;
 	struct work_struct work;
 };
 

+ 16 - 1
drivers/cam_isp/cam_isp_context.c

@@ -19,6 +19,7 @@
 #include "cam_cdm_util.h"
 #include "cam_isp_context.h"
 #include "cam_common_util.h"
+#include "cam_req_mgr_debug.h"
 
 static const char isp_dev_name[] = "cam-isp";
 
@@ -1832,6 +1833,12 @@ static int __cam_isp_ctx_epoch_in_applied(struct cam_isp_context *ctx_isp,
 	CAM_DBG(CAM_ISP, "next Substate[%s]",
 		__cam_isp_ctx_substate_val_to_type(
 		ctx_isp->substate_activated));
+
+	cam_req_mgr_debug_delay_detect();
+	trace_cam_delay_detect("ISP",
+		"bubble epoch_in_applied", req->request_id,
+		ctx->ctx_id, ctx->link_hdl, ctx->session_hdl,
+		CAM_DEFAULT_VALUE);
 end:
 	if (request_id == 0) {
 		req = list_last_entry(&ctx->active_req_list,
@@ -2048,6 +2055,13 @@ static int __cam_isp_ctx_epoch_in_bubble_applied(
 	CAM_DBG(CAM_ISP, "next Substate[%s]",
 		__cam_isp_ctx_substate_val_to_type(
 		ctx_isp->substate_activated));
+
+	cam_req_mgr_debug_delay_detect();
+	trace_cam_delay_detect("ISP",
+		"bubble epoch_in_bubble_applied",
+		req->request_id, ctx->ctx_id,
+		ctx->link_hdl, ctx->session_hdl,
+		CAM_DEFAULT_VALUE);
 end:
 	req = list_last_entry(&ctx->active_req_list, struct cam_ctx_request,
 		list);
@@ -4223,6 +4237,7 @@ static int __cam_isp_ctx_config_dev_in_top_state(
 	req_isp->num_fence_map_in = cfg.num_in_map_entries;
 	req_isp->num_acked = 0;
 	req_isp->bubble_detected = false;
+	req_isp->hw_update_data.packet = packet;
 
 	for (i = 0; i < req_isp->num_fence_map_out; i++) {
 		rc = cam_sync_get_obj_ref(req_isp->fence_map_out[i].sync_id);
@@ -4444,7 +4459,7 @@ get_dev_handle:
 	req_hdl_param.media_entity_flag = 0;
 	req_hdl_param.ops = ctx->crm_ctx_intf;
 	req_hdl_param.priv = ctx;
-
+	req_hdl_param.dev_id = CAM_ISP;
 	CAM_DBG(CAM_ISP, "get device handle form bridge");
 	ctx->dev_hdl = cam_create_device_hdl(&req_hdl_param);
 	if (ctx->dev_hdl <= 0) {

+ 45 - 11
drivers/cam_isp/cam_isp_dev.c

@@ -96,7 +96,6 @@ static int cam_isp_dev_component_bind(struct device *dev,
 	struct cam_hw_mgr_intf         hw_mgr_intf;
 	struct cam_node               *node;
 	const char                    *compat_str = NULL;
-	uint32_t                       isp_device_type;
 	struct platform_device *pdev = to_platform_device(dev);
 
 	int iommu_hdl = -1;
@@ -109,11 +108,13 @@ static int cam_isp_dev_component_bind(struct device *dev,
 	if (strnstr(compat_str, "ife", strlen(compat_str))) {
 		rc = cam_subdev_probe(&g_isp_dev.sd, pdev, CAM_ISP_DEV_NAME,
 		CAM_IFE_DEVICE_TYPE);
-		isp_device_type = CAM_IFE_DEVICE_TYPE;
+		g_isp_dev.isp_device_type = CAM_IFE_DEVICE_TYPE;
+		g_isp_dev.max_context = CAM_IFE_CTX_MAX;
 	} else if (strnstr(compat_str, "tfe", strlen(compat_str))) {
 		rc = cam_subdev_probe(&g_isp_dev.sd, pdev, CAM_ISP_DEV_NAME,
 		CAM_TFE_DEVICE_TYPE);
-		isp_device_type = CAM_TFE_DEVICE_TYPE;
+		g_isp_dev.isp_device_type = CAM_TFE_DEVICE_TYPE;
+		g_isp_dev.max_context = CAM_TFE_CTX_MAX;
 	} else  {
 		CAM_ERR(CAM_ISP, "Invalid ISP hw type %s", compat_str);
 		rc = -EINVAL;
@@ -127,30 +128,51 @@ static int cam_isp_dev_component_bind(struct device *dev,
 	node = (struct cam_node *) g_isp_dev.sd.token;
 
 	memset(&hw_mgr_intf, 0, sizeof(hw_mgr_intf));
+	g_isp_dev.ctx = kcalloc(g_isp_dev.max_context,
+		sizeof(struct cam_context),
+		GFP_KERNEL);
+	if (!g_isp_dev.ctx) {
+		CAM_ERR(CAM_ISP,
+			"Mem Allocation failed for ISP base context");
+		goto unregister;
+	}
+
+	g_isp_dev.ctx_isp = kcalloc(g_isp_dev.max_context,
+		sizeof(struct cam_isp_context),
+		GFP_KERNEL);
+	if (!g_isp_dev.ctx_isp) {
+		CAM_ERR(CAM_ISP,
+			"Mem Allocation failed for Isp private context");
+		kfree(g_isp_dev.ctx);
+		g_isp_dev.ctx = NULL;
+		goto unregister;
+	}
+
 	rc = cam_isp_hw_mgr_init(compat_str, &hw_mgr_intf, &iommu_hdl);
 	if (rc != 0) {
 		CAM_ERR(CAM_ISP, "Can not initialized ISP HW manager!");
-		goto unregister;
+		goto kfree;
 	}
 
-	for (i = 0; i < CAM_CTX_MAX; i++) {
+	for (i = 0; i < g_isp_dev.max_context; i++) {
 		rc = cam_isp_context_init(&g_isp_dev.ctx_isp[i],
 			&g_isp_dev.ctx[i],
 			&node->crm_node_intf,
 			&node->hw_mgr_intf,
 			i,
-			isp_device_type);
+			g_isp_dev.isp_device_type);
 		if (rc) {
 			CAM_ERR(CAM_ISP, "ISP context init failed!");
-			goto unregister;
+			goto kfree;
 		}
 	}
 
-	rc = cam_node_init(node, &hw_mgr_intf, g_isp_dev.ctx, CAM_CTX_MAX,
-		CAM_ISP_DEV_NAME);
+	rc = cam_node_init(node, &hw_mgr_intf, g_isp_dev.ctx,
+			g_isp_dev.max_context, CAM_ISP_DEV_NAME);
+
 	if (rc) {
 		CAM_ERR(CAM_ISP, "ISP node init failed!");
-		goto unregister;
+		goto kfree;
 	}
 
 	cam_smmu_set_client_page_fault_handler(iommu_hdl,
@@ -161,6 +183,13 @@ static int cam_isp_dev_component_bind(struct device *dev,
 	CAM_DBG(CAM_ISP, "Component bound successfully");
 
 	return 0;
+
+kfree:
+	kfree(g_isp_dev.ctx);
+	g_isp_dev.ctx = NULL;
+	kfree(g_isp_dev.ctx_isp);
+	g_isp_dev.ctx_isp = NULL;
+
 unregister:
 	rc = cam_subdev_remove(&g_isp_dev.sd);
 err:
@@ -180,13 +209,18 @@ static void cam_isp_dev_component_unbind(struct device *dev,
 
 	cam_isp_hw_mgr_deinit(compat_str);
 	/* clean up resources */
-	for (i = 0; i < CAM_CTX_MAX; i++) {
+	for (i = 0; i < g_isp_dev.max_context; i++) {
 		rc = cam_isp_context_deinit(&g_isp_dev.ctx_isp[i]);
 		if (rc)
 			CAM_ERR(CAM_ISP, "ISP context %d deinit failed",
 				 i);
 	}
 
+	kfree(g_isp_dev.ctx);
+	g_isp_dev.ctx = NULL;
+	kfree(g_isp_dev.ctx_isp);
+	g_isp_dev.ctx_isp = NULL;
+
 	rc = cam_subdev_remove(&g_isp_dev.sd);
 	if (rc)
 		CAM_ERR(CAM_ISP, "Unregister failed rc: %d", rc);

+ 7 - 3
drivers/cam_isp/cam_isp_dev.h

@@ -1,6 +1,6 @@
 /* SPDX-License-Identifier: GPL-2.0-only */
 /*
- * Copyright (c) 2017-2019, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2017-2020, The Linux Foundation. All rights reserved.
  */
 
 #ifndef _CAM_ISP_DEV_H_
@@ -19,13 +19,17 @@
  * @ctx_isp:               Isp private context storage
  * @isp_mutex:             ISP dev mutex
  * @open_cnt:              Open device count
+ * @isp_device_type        ISP device type
+ * @max_context            maximum contexts for TFE is 4 and for IFE is 8
  */
 struct cam_isp_dev {
 	struct cam_subdev          sd;
-	struct cam_context         ctx[CAM_CTX_MAX];
-	struct cam_isp_context     ctx_isp[CAM_CTX_MAX];
+	struct cam_context         *ctx;
+	struct cam_isp_context     *ctx_isp;
 	struct mutex               isp_mutex;
 	int32_t                    open_cnt;
+	uint32_t                   isp_device_type;
+	int32_t                    max_context;
 };
 
 /**

+ 4 - 4
drivers/cam_isp/isp_hw_mgr/cam_ife_hw_mgr.c

@@ -7401,7 +7401,7 @@ static int  cam_ife_hw_mgr_find_affected_ctx(
 		/* Add affected_context in list of recovery data */
 		CAM_DBG(CAM_ISP, "Add affected ctx %d to list",
 			ife_hwr_mgr_ctx->ctx_index);
-		if (recovery_data->no_of_context < CAM_CTX_MAX)
+		if (recovery_data->no_of_context < CAM_IFE_CTX_MAX)
 			recovery_data->affected_ctx[
 				recovery_data->no_of_context++] =
 				ife_hwr_mgr_ctx;
@@ -8141,7 +8141,7 @@ int cam_ife_hw_mgr_init(struct cam_hw_mgr_intf *hw_mgr_intf, int *iommu_hdl)
 	}
 
 	atomic_set(&g_ife_hw_mgr.active_ctx_cnt, 0);
-	for (i = 0; i < CAM_CTX_MAX; i++) {
+	for (i = 0; i < CAM_IFE_CTX_MAX; i++) {
 		memset(&g_ife_hw_mgr.ctx_pool[i], 0,
 			sizeof(g_ife_hw_mgr.ctx_pool[i]));
 		INIT_LIST_HEAD(&g_ife_hw_mgr.ctx_pool[i].list);
@@ -8224,7 +8224,7 @@ int cam_ife_hw_mgr_init(struct cam_hw_mgr_intf *hw_mgr_intf, int *iommu_hdl)
 	return 0;
 end:
 	if (rc) {
-		for (i = 0; i < CAM_CTX_MAX; i++) {
+		for (i = 0; i < CAM_IFE_CTX_MAX; i++) {
 			cam_tasklet_deinit(
 				&g_ife_hw_mgr.mgr_common.tasklet_pool[i]);
 			kfree(g_ife_hw_mgr.ctx_pool[i].cdm_cmd);
@@ -8249,7 +8249,7 @@ void cam_ife_hw_mgr_deinit(void)
 	debugfs_remove_recursive(g_ife_hw_mgr.debug_cfg.dentry);
 	g_ife_hw_mgr.debug_cfg.dentry = NULL;
 
-	for (i = 0; i < CAM_CTX_MAX; i++) {
+	for (i = 0; i < CAM_IFE_CTX_MAX; i++) {
 		cam_tasklet_deinit(
 			&g_ife_hw_mgr.mgr_common.tasklet_pool[i]);
 		kfree(g_ife_hw_mgr.ctx_pool[i].cdm_cmd);

+ 2 - 2
drivers/cam_isp/isp_hw_mgr/cam_ife_hw_mgr.h

@@ -194,7 +194,7 @@ struct cam_ife_hw_mgr {
 	atomic_t                       active_ctx_cnt;
 	struct list_head               free_ctx_list;
 	struct list_head               used_ctx_list;
-	struct cam_ife_hw_mgr_ctx      ctx_pool[CAM_CTX_MAX];
+	struct cam_ife_hw_mgr_ctx      ctx_pool[CAM_IFE_CTX_MAX];
 
 	struct cam_ife_csid_hw_caps    ife_csid_dev_caps[
 						CAM_IFE_CSID_HW_NUM_MAX];
@@ -218,7 +218,7 @@ struct cam_ife_hw_mgr {
 struct cam_ife_hw_event_recovery_data {
 	uint32_t                   error_type;
 	uint32_t                   affected_core[CAM_ISP_HW_NUM_MAX];
-	struct cam_ife_hw_mgr_ctx *affected_ctx[CAM_CTX_MAX];
+	struct cam_ife_hw_mgr_ctx *affected_ctx[CAM_IFE_CTX_MAX];
 	uint32_t                   no_of_context;
 };
 

+ 261 - 49
drivers/cam_isp/isp_hw_mgr/cam_tfe_hw_mgr.c

@@ -23,9 +23,12 @@
 #include "cam_mem_mgr_api.h"
 #include "cam_common_util.h"
 #include "cam_compat.h"
+#include "cam_req_mgr_debug.h"
+#include "cam_trace.h"
 
 #define CAM_TFE_HW_ENTRIES_MAX  20
 #define CAM_TFE_HW_CONFIG_TIMEOUT 60
+#define CAM_TFE_HW_CONFIG_WAIT_MAX_TRY  3
 
 #define TZ_SVC_SMMU_PROGRAM 0x15
 #define TZ_SAFE_SYSCALL_ID  0x3
@@ -1497,6 +1500,8 @@ static int cam_tfe_hw_mgr_acquire_res_tfe_csid_rdi(
 		csid_acquire.out_port = out_port;
 		csid_acquire.sync_mode = CAM_ISP_HW_SYNC_NONE;
 		csid_acquire.node_res = NULL;
+		csid_acquire.event_cb = cam_tfe_hw_mgr_event_handler;
+		csid_acquire.event_cb_prv = tfe_ctx;
 
 		if (tfe_ctx->is_tpg) {
 			if (tfe_ctx->res_list_tpg.hw_res[0]->hw_intf->hw_idx ==
@@ -1747,16 +1752,21 @@ void cam_tfe_cam_cdm_callback(uint32_t handle, void *userdata,
 {
 	struct cam_isp_prepare_hw_update_data *hw_update_data = NULL;
 	struct cam_tfe_hw_mgr_ctx *ctx = NULL;
+	uint32_t *buf_start, *buf_end;
+	int i, rc = 0;
+	size_t len = 0;
+	uint32_t *buf_addr;
 
 	if (!userdata) {
 		CAM_ERR(CAM_ISP, "Invalid args");
 		return;
 	}
 
-	hw_update_data = (struct cam_isp_prepare_hw_update_data *)userdata;
-	ctx = (struct cam_tfe_hw_mgr_ctx *)hw_update_data->isp_mgr_ctx;
-
 	if (status == CAM_CDM_CB_STATUS_BL_SUCCESS) {
+		hw_update_data =
+			(struct cam_isp_prepare_hw_update_data *)userdata;
+		ctx =
+		(struct cam_tfe_hw_mgr_ctx *)hw_update_data->isp_mgr_ctx;
 		complete_all(&ctx->config_done_complete);
 		atomic_set(&ctx->cdm_done, 1);
 		if (g_tfe_hw_mgr.debug_cfg.per_req_reg_dump)
@@ -1768,6 +1778,40 @@ void cam_tfe_cam_cdm_callback(uint32_t handle, void *userdata,
 		CAM_DBG(CAM_ISP,
 			"Called by CDM hdl=%x, udata=%pK, status=%d, cookie=%llu ctx_index=%d",
 			 handle, userdata, status, cookie, ctx->ctx_index);
+	} else if (status == CAM_CDM_CB_STATUS_PAGEFAULT ||
+		status == CAM_CDM_CB_STATUS_INVALID_BL_CMD ||
+		status == CAM_CDM_CB_STATUS_HW_ERROR) {
+		ctx = userdata;
+		CAM_INFO(CAM_ISP,
+			"req_id =%d ctx_id =%d Bl_cmd_count =%d status=%d",
+			ctx->applied_req_id, ctx->ctx_index,
+			ctx->last_submit_bl_cmd.bl_count, status);
+
+		for (i = 0; i < ctx->last_submit_bl_cmd.bl_count; i++) {
+			CAM_INFO(CAM_ISP,
+				"BL(%d) hdl=0x%x addr=0x%x len=%d input_len =%d offset=0x%x type=%d",
+				i, ctx->last_submit_bl_cmd.cmd[i].mem_handle,
+				ctx->last_submit_bl_cmd.cmd[i].hw_addr,
+				ctx->last_submit_bl_cmd.cmd[i].len,
+				ctx->last_submit_bl_cmd.cmd[i].input_len,
+				ctx->last_submit_bl_cmd.cmd[i].offset,
+				ctx->last_submit_bl_cmd.cmd[i].type);
+
+			rc = cam_packet_util_get_cmd_mem_addr(
+				ctx->last_submit_bl_cmd.cmd[i].mem_handle,
+				&buf_addr, &len);
+
+			buf_start = (uint32_t *)((uint8_t *) buf_addr +
+				ctx->last_submit_bl_cmd.cmd[i].offset);
+			buf_end = (uint32_t *)((uint8_t *) buf_start +
+				ctx->last_submit_bl_cmd.cmd[i].input_len - 1);
+
+			cam_cdm_util_dump_cmd_buf(buf_start, buf_end);
+		}
+		if (ctx->packet != NULL)
+			cam_packet_dump_patch_info(ctx->packet,
+				g_tfe_hw_mgr.mgr_common.img_iommu_hdl,
+				g_tfe_hw_mgr.mgr_common.img_iommu_hdl_secure);
 	} else {
 		CAM_WARN(CAM_ISP,
 			"Called by CDM hdl=%x, udata=%pK, status=%d, cookie=%llu",
@@ -2311,7 +2355,7 @@ static int cam_isp_tfe_blob_bw_update(
 {
 	struct cam_isp_hw_mgr_res             *hw_mgr_res;
 	struct cam_hw_intf                    *hw_intf;
-	struct cam_tfe_bw_update_args          bw_upd_args;
+	struct cam_tfe_bw_update_args          *bw_upd_args = NULL;
 	int                                    rc = -EINVAL;
 	uint32_t                               i, split_idx;
 	bool                                   camif_l_bw_updated = false;
@@ -2332,32 +2376,38 @@ static int cam_isp_tfe_blob_bw_update(
 			bw_config->axi_path[i].mnoc_ib_bw);
 	}
 
+	bw_upd_args = kzalloc(sizeof(struct cam_tfe_bw_update_args),
+		GFP_KERNEL);
+	if (!bw_upd_args) {
+		CAM_ERR(CAM_ISP, "Out of memory");
+		return -ENOMEM;
+	}
 	list_for_each_entry(hw_mgr_res, &ctx->res_list_tfe_in, list) {
 		for (split_idx = 0; split_idx < CAM_ISP_HW_SPLIT_MAX;
 			split_idx++) {
 			if (!hw_mgr_res->hw_res[split_idx])
 				continue;
 
-			memset(&bw_upd_args.isp_vote, 0,
+			memset(&bw_upd_args->isp_vote, 0,
 				sizeof(struct cam_axi_vote));
 			rc = cam_tfe_classify_vote_info(hw_mgr_res, bw_config,
-				&bw_upd_args.isp_vote, split_idx,
+				&bw_upd_args->isp_vote, split_idx,
 				&camif_l_bw_updated, &camif_r_bw_updated);
 			if (rc)
-				return rc;
+				goto end;
 
-			if (!bw_upd_args.isp_vote.num_paths)
+			if (!bw_upd_args->isp_vote.num_paths)
 				continue;
 
 			hw_intf = hw_mgr_res->hw_res[split_idx]->hw_intf;
 			if (hw_intf && hw_intf->hw_ops.process_cmd) {
-				bw_upd_args.node_res =
+				bw_upd_args->node_res =
 					hw_mgr_res->hw_res[split_idx];
 
 				rc = hw_intf->hw_ops.process_cmd(
 					hw_intf->hw_priv,
 					CAM_ISP_HW_CMD_BW_UPDATE_V2,
-					&bw_upd_args,
+					bw_upd_args,
 					sizeof(
 					struct cam_tfe_bw_update_args));
 				if (rc)
@@ -2369,6 +2419,9 @@ static int cam_isp_tfe_blob_bw_update(
 		}
 	}
 
+end:
+	kzfree(bw_upd_args);
+	bw_upd_args = NULL;
 	return rc;
 }
 
@@ -2430,21 +2483,28 @@ static int cam_tfe_mgr_config_hw(void *hw_mgr_priv,
 		"Enter ctx id:%d num_hw_upd_entries %d request id: %llu",
 		ctx->ctx_index, cfg->num_hw_update_entries, cfg->request_id);
 
-	if (cfg->num_hw_update_entries > 0) {
-		cdm_cmd                       = ctx->cdm_cmd;
-		cdm_cmd->cmd_arrary_count     = cfg->num_hw_update_entries;
-		cdm_cmd->type                 = CAM_CDM_BL_CMD_TYPE_MEM_HANDLE;
-		cdm_cmd->flag                 = true;
-		cdm_cmd->userdata             = hw_update_data;
-		cdm_cmd->cookie               = cfg->request_id;
-		cdm_cmd->gen_irq_arb          = false;
-
-		for (i = 0; i < cfg->num_hw_update_entries; i++) {
-			cmd = cfg->hw_update_entries + i;
-			if (cfg->reapply && cmd->flags == CAM_ISP_IQ_BL) {
-				skip++;
-				continue;
-			}
+	if (cfg->num_hw_update_entries <= 0) {
+		CAM_ERR_RATE_LIMIT(CAM_ISP,
+			"Enter ctx id:%d no valid hw entries:%d request id: %llu",
+			ctx->ctx_index, cfg->num_hw_update_entries,
+			cfg->request_id);
+		goto end;
+	}
+
+	cdm_cmd                       = ctx->cdm_cmd;
+	cdm_cmd->cmd_arrary_count     = cfg->num_hw_update_entries;
+	cdm_cmd->type                 = CAM_CDM_BL_CMD_TYPE_MEM_HANDLE;
+	cdm_cmd->flag                 = true;
+	cdm_cmd->userdata             = hw_update_data;
+	cdm_cmd->cookie               = cfg->request_id;
+	cdm_cmd->gen_irq_arb          = false;
+
+	for (i = 0; i < cfg->num_hw_update_entries; i++) {
+		cmd = (cfg->hw_update_entries + i);
+		if (cfg->reapply && cmd->flags == CAM_ISP_IQ_BL) {
+			skip++;
+			continue;
+		}
 
 			if (cmd->flags == CAM_ISP_UNUSED_BL ||
 				cmd->flags >= CAM_ISP_BL_MAX)
@@ -2471,29 +2531,99 @@ static int cam_tfe_mgr_config_hw(void *hw_mgr_priv,
 			return rc;
 		}
 
-		if (cfg->init_packet) {
-			rc = wait_for_completion_timeout(
-				&ctx->config_done_complete,
-				msecs_to_jiffies(CAM_TFE_HW_CONFIG_TIMEOUT));
-			if (rc <= 0) {
+	ctx->packet = (struct cam_packet *)hw_update_data->packet;
+	ctx->last_submit_bl_cmd.bl_count = cdm_cmd->cmd_arrary_count;
+
+	for (i = 0; i < cdm_cmd->cmd_arrary_count; i++) {
+		if (cdm_cmd->type == CAM_CDM_BL_CMD_TYPE_MEM_HANDLE) {
+			ctx->last_submit_bl_cmd.cmd[i].mem_handle =
+				cdm_cmd->cmd[i].bl_addr.mem_handle;
+
+			rc = cam_mem_get_io_buf(
+			cdm_cmd->cmd[i].bl_addr.mem_handle,
+			g_tfe_hw_mgr.mgr_common.cmd_iommu_hdl,
+			&ctx->last_submit_bl_cmd.cmd[i].hw_addr,
+			&ctx->last_submit_bl_cmd.cmd[i].len);
+		} else if (cdm_cmd->type ==
+			CAM_CDM_BL_CMD_TYPE_HW_IOVA) {
+			if (!cdm_cmd->cmd[i].bl_addr.hw_iova) {
+				CAM_ERR(CAM_CDM,
+					"Submitted Hw bl hw_iova is invalid %d:%d",
+					i, cdm_cmd->cmd_arrary_count);
+				rc = -EINVAL;
+				break;
+			}
+			rc = 0;
+			ctx->last_submit_bl_cmd.cmd[i].hw_addr =
+			(uint64_t)cdm_cmd->cmd[i].bl_addr.hw_iova;
+			ctx->last_submit_bl_cmd.cmd[i].len =
+			cdm_cmd->cmd[i].len + cdm_cmd->cmd[i].offset;
+			ctx->last_submit_bl_cmd.cmd[i].mem_handle = 0;
+		} else
+			CAM_INFO(CAM_ISP,
+				"submitted invalid bl cmd addr type :%d for Bl(%d)",
+				cdm_cmd->type, i);
+
+		ctx->last_submit_bl_cmd.cmd[i].offset =
+			cdm_cmd->cmd[i].offset;
+		ctx->last_submit_bl_cmd.cmd[i].type =
+			cdm_cmd->type;
+		ctx->last_submit_bl_cmd.cmd[i].input_len =
+		 cdm_cmd->cmd[i].len;
+	}
+
+	if (!cfg->init_packet)
+		goto end;
+
+	for (i = 0; i < CAM_TFE_HW_CONFIG_WAIT_MAX_TRY; i++) {
+		rc = wait_for_completion_timeout(
+			&ctx->config_done_complete,
+			msecs_to_jiffies(
+			CAM_TFE_HW_CONFIG_TIMEOUT));
+		if (rc <= 0) {
+			if (!cam_cdm_detect_hang_error(ctx->cdm_handle)) {
 				CAM_ERR(CAM_ISP,
-					"config done completion timeout for req_id=%llu rc=%d ctx_index %d",
+					"CDM workqueue delay detected, wait for some more time req_id=%llu rc=%d ctx_index %d",
 					cfg->request_id, rc,
 					ctx->ctx_index);
-				if (rc == 0)
-					rc = -ETIMEDOUT;
-			} else {
-				rc = 0;
-				CAM_DBG(CAM_ISP,
-					"config done Success for req_id=%llu ctx_index %d",
-					cfg->request_id,
-					ctx->ctx_index);
+				cam_req_mgr_debug_delay_detect();
+				trace_cam_delay_detect("CDM",
+					"CDM workqueue delay detected",
+					cfg->request_id, ctx->ctx_index,
+					CAM_DEFAULT_VALUE,
+					CAM_DEFAULT_VALUE, rc);
+				continue;
 			}
+
+			CAM_ERR(CAM_ISP,
+				"config done completion timeout for req_id=%llu rc=%d ctx_index %d",
+				cfg->request_id, rc,
+				ctx->ctx_index);
+
+			cam_req_mgr_debug_delay_detect();
+			trace_cam_delay_detect("ISP",
+				"config done completion timeout",
+				cfg->request_id, ctx->ctx_index,
+				CAM_DEFAULT_VALUE, CAM_DEFAULT_VALUE,
+				rc);
+
+			if (rc == 0)
+				rc = -ETIMEDOUT;
+
+			goto end;
+		} else {
+			rc = 0;
+			CAM_DBG(CAM_ISP,
+				"config done Success for req_id=%llu ctx_index %d",
+				cfg->request_id, ctx->ctx_index);
+			break;
 		}
-	} else {
-		CAM_ERR(CAM_ISP, "No commands to config");
 	}
 
+	if ((i == CAM_TFE_HW_CONFIG_WAIT_MAX_TRY) && (rc == 0))
+		rc = -ETIMEDOUT;
+
+end:
 	CAM_DBG(CAM_ISP, "Exit: Config Done: %llu",  cfg->request_id);
 
 	return rc;
@@ -2734,6 +2864,17 @@ static int cam_tfe_mgr_stop_hw(void *hw_mgr_priv, void *stop_hw_args)
 	atomic_dec_return(&g_tfe_hw_mgr.active_ctx_cnt);
 	mutex_unlock(&g_tfe_hw_mgr.ctx_mutex);
 
+	for (i = 0; i < ctx->last_submit_bl_cmd.bl_count; i++) {
+		ctx->last_submit_bl_cmd.cmd[i].mem_handle = 0;
+		ctx->last_submit_bl_cmd.cmd[i].hw_addr = 0;
+		ctx->last_submit_bl_cmd.cmd[i].len = 0;
+		ctx->last_submit_bl_cmd.cmd[i].offset = 0;
+		ctx->last_submit_bl_cmd.cmd[i].type = 0;
+		ctx->last_submit_bl_cmd.cmd[i].input_len = 0;
+	}
+	ctx->last_submit_bl_cmd.bl_count = 0;
+	ctx->packet = NULL;
+
 end:
 	return rc;
 }
@@ -3288,6 +3429,18 @@ static int cam_tfe_mgr_release_hw(void *hw_mgr_priv,
 	ctx->num_reg_dump_buf = 0;
 	ctx->res_list_tpg.res_type = CAM_ISP_RESOURCE_MAX;
 	atomic_set(&ctx->overflow_pending, 0);
+
+	for (i = 0; i < ctx->last_submit_bl_cmd.bl_count; i++) {
+		ctx->last_submit_bl_cmd.cmd[i].mem_handle = 0;
+		ctx->last_submit_bl_cmd.cmd[i].hw_addr = 0;
+		ctx->last_submit_bl_cmd.cmd[i].len = 0;
+		ctx->last_submit_bl_cmd.cmd[i].offset = 0;
+		ctx->last_submit_bl_cmd.cmd[i].type = 0;
+		ctx->last_submit_bl_cmd.cmd[i].input_len = 0;
+	}
+	ctx->last_submit_bl_cmd.bl_count = 0;
+	ctx->packet = NULL;
+
 	for (i = 0; i < CAM_TFE_HW_NUM_MAX; i++) {
 		ctx->sof_cnt[i] = 0;
 		ctx->eof_cnt[i] = 0;
@@ -4987,13 +5140,19 @@ static int  cam_tfe_hw_mgr_find_affected_ctx(
 			affected_core, CAM_TFE_HW_NUM_MAX))
 			continue;
 
+		if (atomic_read(&tfe_hwr_mgr_ctx->overflow_pending)) {
+			CAM_INFO(CAM_ISP, "CTX:%d already error reported",
+				tfe_hwr_mgr_ctx->ctx_index);
+			continue;
+		}
+
 		atomic_set(&tfe_hwr_mgr_ctx->overflow_pending, 1);
 		notify_err_cb = tfe_hwr_mgr_ctx->common.event_cb[event_type];
 
 		/* Add affected_context in list of recovery data */
 		CAM_DBG(CAM_ISP, "Add affected ctx %d to list",
 			tfe_hwr_mgr_ctx->ctx_index);
-		if (recovery_data->no_of_context < CAM_CTX_MAX)
+		if (recovery_data->no_of_context < CAM_TFE_CTX_MAX)
 			recovery_data->affected_ctx[
 				recovery_data->no_of_context++] =
 				tfe_hwr_mgr_ctx;
@@ -5002,8 +5161,13 @@ static int  cam_tfe_hw_mgr_find_affected_ctx(
 		 * In the call back function corresponding ISP context
 		 * will update CRM about fatal Error
 		 */
-		notify_err_cb(tfe_hwr_mgr_ctx->common.cb_priv,
+		if (notify_err_cb) {
+			notify_err_cb(tfe_hwr_mgr_ctx->common.cb_priv,
 			CAM_ISP_HW_EVENT_ERROR, error_event_data);
+		} else {
+			CAM_WARN(CAM_ISP, "Error call back is not set");
+			goto end;
+		}
 	}
 
 	/* fill the affected_core in recovery data */
@@ -5012,7 +5176,34 @@ static int  cam_tfe_hw_mgr_find_affected_ctx(
 		CAM_DBG(CAM_ISP, "tfe core %d is affected (%d)",
 			 i, recovery_data->affected_core[i]);
 	}
+end:
+	return 0;
+}
+
+static int cam_tfe_hw_mgr_handle_csid_event(
+	struct cam_isp_hw_event_info *event_info)
+{
+	struct cam_isp_hw_error_event_data  error_event_data = {0};
+	struct cam_tfe_hw_event_recovery_data     recovery_data = {0};
+
+	/* this can be extended based on the types of error
+	 * received from CSID
+	 */
+	switch (event_info->err_type) {
+	case CAM_ISP_HW_ERROR_CSID_FATAL: {
+
+		if (!g_tfe_hw_mgr.debug_cfg.enable_csid_recovery)
+			break;
 
+		error_event_data.error_type = event_info->err_type;
+		cam_tfe_hw_mgr_find_affected_ctx(&error_event_data,
+			event_info->hw_idx,
+			&recovery_data);
+		break;
+	}
+	default:
+		break;
+	}
 	return 0;
 }
 
@@ -5033,6 +5224,13 @@ static int cam_tfe_hw_mgr_handle_hw_err(
 	else if (event_info->res_type == CAM_ISP_RESOURCE_TFE_OUT)
 		error_event_data.error_type = CAM_ISP_HW_ERROR_BUSIF_OVERFLOW;
 
+	spin_lock(&g_tfe_hw_mgr.ctx_lock);
+	if (event_info->err_type == CAM_ISP_HW_ERROR_CSID_FATAL) {
+		rc = cam_tfe_hw_mgr_handle_csid_event(event_info);
+		spin_unlock(&g_tfe_hw_mgr.ctx_lock);
+		return rc;
+	}
+
 	core_idx = event_info->hw_idx;
 
 	if (g_tfe_hw_mgr.debug_cfg.enable_recovery)
@@ -5042,9 +5240,13 @@ static int cam_tfe_hw_mgr_handle_hw_err(
 
 	rc = cam_tfe_hw_mgr_find_affected_ctx(&error_event_data,
 		core_idx, &recovery_data);
+	if (rc || !(recovery_data.no_of_context))
+		goto end;
 
-	if (event_info->res_type == CAM_ISP_RESOURCE_TFE_OUT)
+	if (event_info->res_type == CAM_ISP_RESOURCE_TFE_OUT) {
+		spin_unlock(&g_tfe_hw_mgr.ctx_lock);
 		return rc;
+	}
 
 	if (g_tfe_hw_mgr.debug_cfg.enable_recovery) {
 		/* Trigger for recovery */
@@ -5057,7 +5259,8 @@ static int cam_tfe_hw_mgr_handle_hw_err(
 		CAM_DBG(CAM_ISP, "recovery is not enabled");
 		rc = 0;
 	}
-
+end:
+	spin_unlock(&g_tfe_hw_mgr.ctx_lock);
 	return rc;
 }
 
@@ -5169,8 +5372,13 @@ static int cam_tfe_hw_mgr_check_irq_for_dual_tfe(
 			tfe_hw_mgr_ctx->dual_tfe_irq_mismatch_cnt++;
 		}
 
-		if (tfe_hw_mgr_ctx->dual_tfe_irq_mismatch_cnt == 1)
+		if (tfe_hw_mgr_ctx->dual_tfe_irq_mismatch_cnt == 1) {
 			cam_tfe_mgr_ctx_irq_dump(tfe_hw_mgr_ctx);
+			trace_cam_delay_detect("ISP", "dual tfe irq mismatch",
+				CAM_DEFAULT_VALUE, tfe_hw_mgr_ctx->ctx_index,
+				CAM_DEFAULT_VALUE, CAM_DEFAULT_VALUE,
+				rc);
+		}
 		rc = 0;
 	}
 
@@ -5499,6 +5707,9 @@ static int cam_tfe_hw_mgr_debug_register(void)
 	dbgfileptr = debugfs_create_u32("enable_reg_dump", 0644,
 		g_tfe_hw_mgr.debug_cfg.dentry,
 		&g_tfe_hw_mgr.debug_cfg.enable_reg_dump);
+	dbgfileptr = debugfs_create_u32("enable_csid_recovery", 0644,
+		g_tfe_hw_mgr.debug_cfg.dentry,
+		&g_tfe_hw_mgr.debug_cfg.enable_csid_recovery);
 	dbgfileptr = debugfs_create_file("tfe_camif_debug", 0644,
 		g_tfe_hw_mgr.debug_cfg.dentry, NULL, &cam_tfe_camif_debug);
 	dbgfileptr = debugfs_create_u32("per_req_reg_dump", 0644,
@@ -5542,6 +5753,7 @@ int cam_tfe_hw_mgr_init(struct cam_hw_mgr_intf *hw_mgr_intf, int *iommu_hdl)
 	memset(&g_tfe_hw_mgr, 0, sizeof(g_tfe_hw_mgr));
 
 	mutex_init(&g_tfe_hw_mgr.ctx_mutex);
+	spin_lock_init(&g_tfe_hw_mgr.ctx_lock);
 
 	if (CAM_TFE_HW_NUM_MAX != CAM_TFE_CSID_HW_NUM_MAX) {
 		CAM_ERR(CAM_ISP, "CSID num is different then TFE num");
@@ -5645,7 +5857,7 @@ int cam_tfe_hw_mgr_init(struct cam_hw_mgr_intf *hw_mgr_intf, int *iommu_hdl)
 	}
 
 	atomic_set(&g_tfe_hw_mgr.active_ctx_cnt, 0);
-	for (i = 0; i < CAM_CTX_MAX; i++) {
+	for (i = 0; i < CAM_TFE_CTX_MAX; i++) {
 		memset(&g_tfe_hw_mgr.ctx_pool[i], 0,
 			sizeof(g_tfe_hw_mgr.ctx_pool[i]));
 		INIT_LIST_HEAD(&g_tfe_hw_mgr.ctx_pool[i].list);
@@ -5724,7 +5936,7 @@ int cam_tfe_hw_mgr_init(struct cam_hw_mgr_intf *hw_mgr_intf, int *iommu_hdl)
 	return 0;
 end:
 	if (rc) {
-		for (i = 0; i < CAM_CTX_MAX; i++) {
+		for (i = 0; i < CAM_TFE_CTX_MAX; i++) {
 			cam_tasklet_deinit(
 				&g_tfe_hw_mgr.mgr_common.tasklet_pool[i]);
 			kfree(g_tfe_hw_mgr.ctx_pool[i].cdm_cmd);
@@ -5749,7 +5961,7 @@ void cam_tfe_hw_mgr_deinit(void)
 	debugfs_remove_recursive(g_tfe_hw_mgr.debug_cfg.dentry);
 	g_tfe_hw_mgr.debug_cfg.dentry = NULL;
 
-	for (i = 0; i < CAM_CTX_MAX; i++) {
+	for (i = 0; i < CAM_TFE_CTX_MAX; i++) {
 		cam_tasklet_deinit(
 			&g_tfe_hw_mgr.mgr_common.tasklet_pool[i]);
 		kfree(g_tfe_hw_mgr.ctx_pool[i].cdm_cmd);

+ 11 - 2
drivers/cam_isp/isp_hw_mgr/cam_tfe_hw_mgr.h

@@ -13,6 +13,7 @@
 #include "cam_tfe_csid_hw_intf.h"
 #include "cam_top_tpg_hw_intf.h"
 #include "cam_tasklet_util.h"
+#include "cam_cdm_intf_api.h"
 
 
 
@@ -27,6 +28,7 @@
  * @dentry:                    Debugfs entry
  * @csid_debug:                csid debug information
  * @enable_recovery:           enable recovery
+ * @enable_csid_recovery:      enable csid recovery
  * @camif_debug:               enable sensor diagnosis status
  * @enable_reg_dump:           enable reg dump on error;
  * @per_req_reg_dump:          Enable per request reg dump
@@ -36,6 +38,7 @@ struct cam_tfe_hw_mgr_debug {
 	struct dentry  *dentry;
 	uint64_t       csid_debug;
 	uint32_t       enable_recovery;
+	uint32_t       enable_csid_recovery;
 	uint32_t       camif_debug;
 	uint32_t       enable_reg_dump;
 	uint32_t       per_req_reg_dump;
@@ -61,6 +64,7 @@ struct cam_tfe_hw_mgr_debug {
  * @cdm_ops                   cdm util operation pointer for building
  *                            cdm commands
  * @cdm_cmd                   cdm base and length request pointer
+ * @last_submit_bl_cmd        last submiited CDM BL command data
  * @config_done_complete      indicator for configuration complete
  * @sof_cnt                   sof count value per core, used for dual TFE
  * @epoch_cnt                 epoch count value per core, used for dual TFE
@@ -82,6 +86,7 @@ struct cam_tfe_hw_mgr_debug {
  * @slave_hw_idx              slave hardware index in dual tfe case
  * @dual_tfe_irq_mismatch_cnt irq mismatch count value per core, used for
  *                              dual TFE
+ * packet                     CSL packet from user mode driver
  */
 struct cam_tfe_hw_mgr_ctx {
 	struct list_head                list;
@@ -105,6 +110,7 @@ struct cam_tfe_hw_mgr_ctx {
 	uint32_t                        cdm_handle;
 	struct cam_cdm_utils_ops       *cdm_ops;
 	struct cam_cdm_bl_request      *cdm_cmd;
+	struct cam_cdm_bl_info          last_submit_bl_cmd;
 	struct completion               config_done_complete;
 
 	uint32_t                        sof_cnt[CAM_TFE_HW_NUM_MAX];
@@ -125,6 +131,7 @@ struct cam_tfe_hw_mgr_ctx {
 	uint32_t                        master_hw_idx;
 	uint32_t                        slave_hw_idx;
 	uint32_t                        dual_tfe_irq_mismatch_cnt;
+	struct cam_packet              *packet;
 };
 
 /**
@@ -148,6 +155,7 @@ struct cam_tfe_hw_mgr_ctx {
  * @work q                 work queue for TFE hw manager
  * @debug_cfg              debug configuration
  * @support_consumed_addr  indicate whether hw supports last consumed address
+ * @ctx_lock               Spinlock for HW manager
  */
 struct cam_tfe_hw_mgr {
 	struct cam_isp_hw_mgr          mgr_common;
@@ -159,7 +167,7 @@ struct cam_tfe_hw_mgr {
 	atomic_t                       active_ctx_cnt;
 	struct list_head               free_ctx_list;
 	struct list_head               used_ctx_list;
-	struct cam_tfe_hw_mgr_ctx      ctx_pool[CAM_CTX_MAX];
+	struct cam_tfe_hw_mgr_ctx      ctx_pool[CAM_TFE_CTX_MAX];
 
 	struct cam_tfe_csid_hw_caps    tfe_csid_dev_caps[
 						CAM_TFE_CSID_HW_NUM_MAX];
@@ -167,6 +175,7 @@ struct cam_tfe_hw_mgr {
 	struct cam_req_mgr_core_workq *workq;
 	struct cam_tfe_hw_mgr_debug    debug_cfg;
 	bool                           support_consumed_addr;
+	spinlock_t                     ctx_lock;
 };
 
 /**
@@ -181,7 +190,7 @@ struct cam_tfe_hw_mgr {
 struct cam_tfe_hw_event_recovery_data {
 	uint32_t                   error_type;
 	uint32_t                   affected_core[CAM_TFE_HW_NUM_MAX];
-	struct cam_tfe_hw_mgr_ctx *affected_ctx[CAM_CTX_MAX];
+	struct cam_tfe_hw_mgr_ctx *affected_ctx[CAM_TFE_CTX_MAX];
 	uint32_t                   no_of_context;
 };
 

+ 8 - 0
drivers/cam_isp/isp_hw_mgr/include/cam_isp_hw_mgr_intf.h

@@ -20,6 +20,12 @@
 #define CAM_TFE_HW_NUM_MAX   3
 #define CAM_TFE_RDI_NUM_MAX  3
 
+/* maximum context numbers for TFE */
+#define CAM_TFE_CTX_MAX      4
+
+/* maximum context numbers for IFE */
+#define CAM_IFE_CTX_MAX      8
+
 /* Appliacble vote paths for dual ife, based on no. of UAPI definitions */
 #define CAM_ISP_MAX_PER_PATH_VOTES 40
 
@@ -122,6 +128,7 @@ struct cam_isp_bw_config_internal {
  *                          is valid or not
  * @reg_dump_buf_desc:     cmd buffer descriptors for reg dump
  * @num_reg_dump_buf:      Count of descriptors in reg_dump_buf_desc
+ * @packet                 CSL packet from user mode driver
  *
  */
 struct cam_isp_prepare_hw_update_data {
@@ -137,6 +144,7 @@ struct cam_isp_prepare_hw_update_data {
 	struct cam_cmd_buf_desc               reg_dump_buf_desc[
 						CAM_REG_DUMP_MAX_BUF_ENTRIES];
 	uint32_t                              num_reg_dump_buf;
+	struct cam_packet                     *packet;
 };
 
 

+ 1 - 0
drivers/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid_core.c

@@ -4638,6 +4638,7 @@ static int cam_csid_handle_hw_err_irq(
 			"CSID[%d] Can not get cmd for tasklet, evt_type %d",
 			csid_hw->hw_intf->hw_idx,
 			evt_type);
+		cam_csid_put_evt_payload(csid_hw, &evt_payload);
 		return rc;
 	}
 

+ 215 - 0
drivers/cam_isp/isp_hw_mgr/isp_hw/tfe_csid_hw/cam_tfe_csid_core.c

@@ -18,6 +18,7 @@
 #include "cam_cpas_api.h"
 #include "cam_isp_hw_mgr_intf.h"
 #include "cam_subdev.h"
+#include "cam_tasklet_util.h"
 
 /* Timeout value in msec */
 #define TFE_CSID_TIMEOUT                               1000
@@ -869,6 +870,7 @@ static int cam_tfe_csid_enable_hw(struct cam_tfe_csid_hw  *csid_hw)
 	const struct cam_tfe_csid_reg_offset      *csid_reg;
 	struct cam_hw_soc_info              *soc_info;
 	uint32_t i, val, clk_lvl;
+	unsigned long flags;
 
 	csid_reg = csid_hw->csid_info->csid_reg;
 	soc_info = &csid_hw->hw_info->soc_info;
@@ -941,6 +943,12 @@ static int cam_tfe_csid_enable_hw(struct cam_tfe_csid_hw  *csid_hw)
 	if (rc)
 		goto disable_soc;
 
+	spin_lock_irqsave(&csid_hw->spin_lock, flags);
+	csid_hw->fatal_err_detected = false;
+	csid_hw->device_enabled = 1;
+	spin_unlock_irqrestore(&csid_hw->spin_lock, flags);
+	cam_tasklet_start(csid_hw->tasklet);
+
 	return rc;
 
 disable_soc:
@@ -994,6 +1002,7 @@ static int cam_tfe_csid_disable_hw(struct cam_tfe_csid_hw *csid_hw)
 		CAM_ERR(CAM_ISP, "CSID:%d Disable CSID SOC failed",
 			csid_hw->hw_intf->hw_idx);
 
+	cam_tasklet_stop(csid_hw->tasklet);
 	spin_lock_irqsave(&csid_hw->spin_lock, flags);
 	csid_hw->device_enabled = 0;
 	spin_unlock_irqrestore(&csid_hw->spin_lock, flags);
@@ -1933,6 +1942,9 @@ static int cam_tfe_csid_release(void *hw_priv,
 		goto end;
 	}
 
+	csid_hw->event_cb = NULL;
+	csid_hw->event_cb_priv = NULL;
+
 	if ((res->res_state <= CAM_ISP_RESOURCE_STATE_AVAILABLE) ||
 		(res->res_state >= CAM_ISP_RESOURCE_STATE_STREAMING)) {
 		CAM_WARN(CAM_ISP,
@@ -2677,6 +2689,189 @@ static int cam_tfe_csid_process_cmd(void *hw_priv,
 	return rc;
 }
 
+static int cam_csid_get_evt_payload(
+	struct cam_tfe_csid_hw *csid_hw,
+	struct cam_csid_evt_payload **evt_payload)
+{
+
+	spin_lock(&csid_hw->spin_lock);
+
+	if (list_empty(&csid_hw->free_payload_list)) {
+		*evt_payload = NULL;
+		spin_unlock(&csid_hw->spin_lock);
+		CAM_ERR_RATE_LIMIT(CAM_ISP, "No free payload core %d",
+			csid_hw->hw_intf->hw_idx);
+		return -ENOMEM;
+	}
+
+	*evt_payload = list_first_entry(&csid_hw->free_payload_list,
+			struct cam_csid_evt_payload, list);
+	list_del_init(&(*evt_payload)->list);
+	spin_unlock(&csid_hw->spin_lock);
+
+	return 0;
+}
+
+static int cam_csid_put_evt_payload(
+	struct cam_tfe_csid_hw *csid_hw,
+	struct cam_csid_evt_payload **evt_payload)
+{
+	unsigned long flags;
+
+	if (*evt_payload == NULL) {
+		CAM_ERR_RATE_LIMIT(CAM_ISP, "Invalid payload core %d",
+			csid_hw->hw_intf->hw_idx);
+		return -EINVAL;
+	}
+	spin_lock_irqsave(&csid_hw->spin_lock, flags);
+	list_add_tail(&(*evt_payload)->list,
+		&csid_hw->free_payload_list);
+	*evt_payload = NULL;
+	spin_unlock_irqrestore(&csid_hw->spin_lock, flags);
+
+	return 0;
+}
+static char *cam_csid_status_to_str(uint32_t status)
+{
+	switch (status) {
+	case TFE_CSID_IRQ_REG_TOP:
+		return "TOP";
+	case TFE_CSID_IRQ_REG_RX:
+		return "RX";
+	case TFE_CSID_IRQ_REG_IPP:
+		return "IPP";
+	case TFE_CSID_IRQ_REG_RDI0:
+		return "RDI0";
+	case TFE_CSID_IRQ_REG_RDI1:
+		return "RDI1";
+	case TFE_CSID_IRQ_REG_RDI2:
+		return "RDI2";
+	default:
+		return "Invalid IRQ";
+	}
+}
+
+static int cam_csid_evt_bottom_half_handler(
+	void *handler_priv,
+	void *evt_payload_priv)
+{
+	struct cam_tfe_csid_hw *csid_hw;
+	struct cam_csid_evt_payload *evt_payload;
+	int i;
+	int rc = 0;
+	struct cam_isp_hw_event_info event_info;
+
+	if (!handler_priv || !evt_payload_priv) {
+		CAM_ERR(CAM_ISP,
+			"Invalid Param handler_priv %pK evt_payload_priv %pK",
+			handler_priv, evt_payload_priv);
+		return 0;
+	}
+
+	csid_hw = (struct cam_tfe_csid_hw *)handler_priv;
+	evt_payload = (struct cam_csid_evt_payload *)evt_payload_priv;
+
+	if (!csid_hw->event_cb || !csid_hw->event_cb_priv) {
+		CAM_ERR_RATE_LIMIT(CAM_ISP,
+			"hw_idx %d Invalid args %pK %pK",
+			csid_hw->hw_intf->hw_idx,
+			csid_hw->event_cb,
+			csid_hw->event_cb_priv);
+		goto end;
+	}
+
+	if (csid_hw->event_cb_priv != evt_payload->priv) {
+		CAM_ERR_RATE_LIMIT(CAM_ISP,
+			"hw_idx %d priv mismatch %pK, %pK",
+			csid_hw->hw_intf->hw_idx,
+			csid_hw->event_cb_priv,
+			evt_payload->priv);
+		goto end;
+	}
+
+	CAM_ERR_RATE_LIMIT(CAM_ISP, "idx %d err %d phy %d",
+		csid_hw->hw_intf->hw_idx,
+		evt_payload->evt_type,
+		csid_hw->csi2_rx_cfg.phy_sel);
+
+	for (i = 0; i < TFE_CSID_IRQ_REG_MAX; i++)
+		CAM_ERR_RATE_LIMIT(CAM_ISP, "status %s: %x",
+			cam_csid_status_to_str(i),
+			evt_payload->irq_status[i]);
+
+	/* this hunk can be extended to handle more cases
+	 * which we want to offload to bottom half from
+	 * irq handlers
+	 */
+	event_info.err_type = evt_payload->evt_type;
+	event_info.hw_idx = evt_payload->hw_idx;
+
+	switch (evt_payload->evt_type) {
+	case CAM_ISP_HW_ERROR_CSID_FATAL:
+		if (csid_hw->fatal_err_detected)
+			break;
+		csid_hw->fatal_err_detected = true;
+		rc = csid_hw->event_cb(NULL,
+			CAM_ISP_HW_EVENT_ERROR, (void *)&event_info);
+		break;
+
+	default:
+		CAM_DBG(CAM_ISP, "CSID[%d] invalid error type %d",
+			csid_hw->hw_intf->hw_idx,
+			evt_payload->evt_type);
+		break;
+	}
+end:
+	cam_csid_put_evt_payload(csid_hw, &evt_payload);
+	return 0;
+}
+
+static int cam_csid_handle_hw_err_irq(
+	struct cam_tfe_csid_hw *csid_hw,
+	int                     evt_type,
+	uint32_t               *irq_status)
+{
+	int      rc = 0;
+	int      i;
+	void    *bh_cmd = NULL;
+	struct cam_csid_evt_payload *evt_payload;
+
+	CAM_DBG(CAM_ISP, "CSID[%d] error %d",
+		csid_hw->hw_intf->hw_idx, evt_type);
+
+	rc = cam_csid_get_evt_payload(csid_hw, &evt_payload);
+	if (rc) {
+		CAM_ERR_RATE_LIMIT(CAM_ISP,
+			"No free payload core %d",
+			csid_hw->hw_intf->hw_idx);
+		return rc;
+	}
+
+	rc = tasklet_bh_api.get_bh_payload_func(csid_hw->tasklet, &bh_cmd);
+	if (rc || !bh_cmd) {
+		CAM_ERR_RATE_LIMIT(CAM_ISP,
+			"CSID[%d] Can not get cmd for tasklet, evt_type %d",
+			csid_hw->hw_intf->hw_idx,
+			evt_type);
+		return rc;
+	}
+
+	evt_payload->evt_type = evt_type;
+	evt_payload->priv = csid_hw->event_cb_priv;
+	evt_payload->hw_idx = csid_hw->hw_intf->hw_idx;
+
+	for (i = 0; i < TFE_CSID_IRQ_REG_MAX; i++)
+		evt_payload->irq_status[i] = irq_status[i];
+
+	tasklet_bh_api.bottom_half_enqueue_func(csid_hw->tasklet,
+		bh_cmd,
+		csid_hw,
+		evt_payload,
+		cam_csid_evt_bottom_half_handler);
+
+	return rc;
+}
+
 irqreturn_t cam_tfe_csid_irq(int irq_num, void *data)
 {
 	struct cam_tfe_csid_hw                         *csid_hw;
@@ -2756,20 +2951,24 @@ irqreturn_t cam_tfe_csid_irq(int irq_num, void *data)
 		if (irq_status[TFE_CSID_IRQ_REG_RX] &
 			TFE_CSID_CSI2_RX_ERROR_LANE0_FIFO_OVERFLOW) {
 			fatal_err_detected = true;
+			goto handle_fatal_error;
 		}
 
 		if (irq_status[TFE_CSID_IRQ_REG_RX] &
 			TFE_CSID_CSI2_RX_ERROR_LANE1_FIFO_OVERFLOW) {
 			fatal_err_detected = true;
+			goto handle_fatal_error;
 		}
 
 		if (irq_status[TFE_CSID_IRQ_REG_RX] &
 			TFE_CSID_CSI2_RX_ERROR_LANE2_FIFO_OVERFLOW) {
 			fatal_err_detected = true;
+			goto handle_fatal_error;
 		}
 		if (irq_status[TFE_CSID_IRQ_REG_RX] &
 			TFE_CSID_CSI2_RX_ERROR_LANE3_FIFO_OVERFLOW) {
 			fatal_err_detected = true;
+			goto handle_fatal_error;
 		}
 
 		if (irq_status[TFE_CSID_IRQ_REG_RX] &
@@ -2800,6 +2999,7 @@ irqreturn_t cam_tfe_csid_irq(int irq_num, void *data)
 			TFE_CSID_CSI2_RX_ERROR_MMAPPED_VC_DT)
 			is_error_irq = true;
 	}
+handle_fatal_error:
 	spin_unlock_irqrestore(&csid_hw->spin_lock, flags);
 
 	if (csid_hw->error_irq_count || fatal_err_detected)
@@ -2825,6 +3025,8 @@ irqreturn_t cam_tfe_csid_irq(int irq_num, void *data)
 				CAM_SUBDEV_MESSAGE_IRQ_ERR,
 				(csid_hw->csi2_rx_cfg.phy_sel - 1));
 		}
+		cam_csid_handle_hw_err_irq(csid_hw,
+			CAM_ISP_HW_ERROR_CSID_FATAL, irq_status);
 	}
 
 	if (csid_hw->csid_debug & TFE_CSID_DEBUG_ENABLE_EOT_IRQ) {
@@ -3190,6 +3392,19 @@ int cam_tfe_csid_hw_probe_init(struct cam_hw_intf  *csid_hw_intf,
 		tfe_csid_hw->rdi_res[i].res_priv = path_data;
 	}
 
+	rc = cam_tasklet_init(&tfe_csid_hw->tasklet, tfe_csid_hw, csid_idx);
+	if (rc) {
+		CAM_ERR(CAM_ISP, "Unable to create CSID tasklet rc %d", rc);
+		goto err;
+	}
+
+	INIT_LIST_HEAD(&tfe_csid_hw->free_payload_list);
+	for (i = 0; i < CAM_CSID_EVT_PAYLOAD_MAX; i++) {
+		INIT_LIST_HEAD(&tfe_csid_hw->evt_payload[i].list);
+		list_add_tail(&tfe_csid_hw->evt_payload[i].list,
+			&tfe_csid_hw->free_payload_list);
+	}
+
 	tfe_csid_hw->csid_debug = 0;
 	tfe_csid_hw->error_irq_count = 0;
 	tfe_csid_hw->prev_boot_timestamp = 0;

+ 26 - 0
drivers/cam_isp/isp_hw_mgr/isp_hw/tfe_csid_hw/cam_tfe_csid_core.h

@@ -74,6 +74,8 @@
 #define TFE_CSID_DEBUG_DISABLE_EARLY_EOF              BIT(8)
 #define TFE_CSID_DEBUG_ENABLE_RST_IRQ_LOG             BIT(9)
 
+#define CAM_CSID_EVT_PAYLOAD_MAX                  10
+
 /* enum cam_csid_path_halt_mode select the path halt mode control */
 enum cam_tfe_csid_path_halt_mode {
 	TFE_CSID_HALT_MODE_INTERNAL,
@@ -354,12 +356,31 @@ struct cam_tfe_csid_path_cfg {
 	uint32_t                        sensor_vbi;
 };
 
+/**
+ * struct cam_csid_evt_payload- payload for csid hw event
+ * @list       : list head
+ * @evt_type   : Event type from CSID
+ * @irq_status : IRQ Status register
+ * @hw_idx     : Hw index
+ * @priv       : Private data of payload
+ */
+struct cam_csid_evt_payload {
+	struct list_head   list;
+	uint32_t           evt_type;
+	uint32_t           irq_status[TFE_CSID_IRQ_REG_MAX];
+	uint32_t           hw_idx;
+	void              *priv;
+};
+
 /**
  * struct cam_tfe_csid_hw- csid hw device resources data
  *
  * @hw_intf:                  contain the csid hw interface information
  * @hw_info:                  csid hw device information
  * @csid_info:                csid hw specific information
+ * @tasklet:                  tasklet to handle csid errors
+ * @free_payload_list:        list head for payload
+ * @evt_payload:              Event payload to be passed to tasklet
  * @in_res_id:                csid in resource type
  * @csi2_rx_cfg:              csi2 rx decoder configuration for csid
  * @csi2_rx_reserve_cnt:      csi2 reservations count value
@@ -382,6 +403,7 @@ struct cam_tfe_csid_path_cfg {
  * @device_enabled            Device enabled will set once CSID powered on and
  *                            initial configuration are done.
  * @lock_state                csid spin lock
+ * @fatal_err_detected        flag to indicate fatal errror is reported
  * @event_cb:                 Callback function to hw mgr in case of hw events
  * @event_cb_priv:            Context data
  * @ppi_hw_intf               interface to ppi hardware
@@ -395,6 +417,9 @@ struct cam_tfe_csid_hw {
 	struct cam_hw_intf                 *hw_intf;
 	struct cam_hw_info                 *hw_info;
 	struct cam_tfe_csid_hw_info        *csid_info;
+	void                               *tasklet;
+	struct list_head                    free_payload_list;
+	struct cam_csid_evt_payload   evt_payload[CAM_CSID_EVT_PAYLOAD_MAX];
 	uint32_t                            in_res_id;
 	struct cam_tfe_csid_csi2_rx_cfg     csi2_rx_cfg;
 	uint32_t                            csi2_reserve_cnt;
@@ -413,6 +438,7 @@ struct cam_tfe_csid_hw {
 	uint32_t                            error_irq_count;
 	uint32_t                            device_enabled;
 	spinlock_t                          spin_lock;
+	bool                                fatal_err_detected;
 	cam_hw_mgr_event_cb_func            event_cb;
 	void                               *event_cb_priv;
 	struct cam_hw_intf                 *ppi_hw_intf[CAM_CSID_PPI_HW_MAX];

+ 4 - 4
drivers/cam_ope/ope_hw_mgr/cam_ope_hw_mgr.c

@@ -1647,11 +1647,11 @@ static void cam_ope_ctx_cdm_callback(uint32_t handle, void *userdata,
 		if (!rc)
 			goto end;
 	} else {
-		CAM_ERR(CAM_OPE,
+		CAM_INFO(CAM_OPE,
 			"CDM hdl=%x, udata=%pK, status=%d, cookie=%d req_id = %llu ctx_id=%d",
 			 handle, userdata, status, cookie,
 			 ope_req->request_id, ctx->ctx_id);
-		CAM_ERR(CAM_OPE, "Rst of CDM and OPE for error reqid = %lld",
+		CAM_INFO(CAM_OPE, "Rst of CDM and OPE for error reqid = %lld",
 			ope_req->request_id);
 		if (status != CAM_CDM_CB_STATUS_HW_FLUSH) {
 			cam_ope_dump_req_data(ope_req);
@@ -2657,8 +2657,6 @@ static int cam_ope_mgr_acquire_hw(void *hw_priv, void *hw_acquire_args)
 
 		hw_mgr->clk_info.base_clk =
 			soc_info->clk_rate[CAM_TURBO_VOTE][idx];
-		hw_mgr->clk_info.curr_clk =
-			soc_info->clk_rate[CAM_TURBO_VOTE][idx];
 		hw_mgr->clk_info.threshold = 5;
 		hw_mgr->clk_info.over_clked = 0;
 
@@ -2689,6 +2687,8 @@ static int cam_ope_mgr_acquire_hw(void *hw_priv, void *hw_acquire_args)
 		soc_info = &dev->soc_info;
 		idx = soc_info->src_clk_idx;
 		clk_update.clk_rate = soc_info->clk_rate[CAM_TURBO_VOTE][idx];
+		hw_mgr->clk_info.curr_clk =
+			soc_info->clk_rate[CAM_TURBO_VOTE][idx];
 
 		rc = hw_mgr->ope_dev_intf[i]->hw_ops.process_cmd(
 			hw_mgr->ope_dev_intf[i]->hw_priv, OPE_HW_CLK_UPDATE,

+ 19 - 3
drivers/cam_req_mgr/cam_req_mgr_core.c

@@ -15,6 +15,7 @@
 #include "cam_trace.h"
 #include "cam_debug_util.h"
 #include "cam_req_mgr_dev.h"
+#include "cam_req_mgr_debug.h"
 
 static struct cam_req_mgr_core_device *g_crm_core_dev;
 static struct cam_req_mgr_core_link g_links[MAXIMUM_LINKS_PER_SESSION];
@@ -1716,6 +1717,15 @@ static int __cam_req_mgr_process_req(struct cam_req_mgr_core_link *link,
 					"Max retry attempts (count %d) reached on link[0x%x] for req [%lld]",
 					max_retry, link->link_hdl,
 					in_q->slot[in_q->rd_idx].req_id);
+
+				cam_req_mgr_debug_delay_detect();
+				trace_cam_delay_detect("CRM",
+					"Max retry attempts reached",
+					in_q->slot[in_q->rd_idx].req_id,
+					CAM_DEFAULT_VALUE,
+					link->link_hdl,
+					CAM_DEFAULT_VALUE, rc);
+
 				__cam_req_mgr_notify_error_on_link(link, dev);
 				link->retry_cnt = 0;
 			}
@@ -2350,6 +2360,7 @@ int cam_req_mgr_process_flush_req(void *priv, void *data)
 		rc = -EINVAL;
 		goto end;
 	}
+
 	link = (struct cam_req_mgr_core_link *)priv;
 	task_data = (struct crm_task_payload *)data;
 	flush_info  = (struct cam_req_mgr_flush_info *)&task_data->u;
@@ -2667,6 +2678,7 @@ int cam_req_mgr_process_error(void *priv, void *data)
 		rc = -EINVAL;
 		goto end;
 	}
+
 	link = (struct cam_req_mgr_core_link *)priv;
 	task_data = (struct crm_task_payload *)data;
 	err_info  = (struct cam_req_mgr_error_notify *)&task_data->u;
@@ -2760,6 +2772,7 @@ int cam_req_mgr_process_stop(void *priv, void *data)
 		rc = -EINVAL;
 		goto end;
 	}
+
 	link = (struct cam_req_mgr_core_link *)priv;
 	__cam_req_mgr_flush_req_slot(link);
 end:
@@ -2790,6 +2803,7 @@ static int cam_req_mgr_process_trigger(void *priv, void *data)
 		rc = -EINVAL;
 		goto end;
 	}
+
 	link = (struct cam_req_mgr_core_link *)priv;
 	task_data = (struct crm_task_payload *)data;
 	trigger_data = (struct cam_req_mgr_trigger_notify *)&task_data->u;
@@ -3260,7 +3274,7 @@ static int cam_req_mgr_cb_notify_trigger(
 
 	task = cam_req_mgr_workq_get_task(link->workq);
 	if (!task) {
-		CAM_ERR(CAM_CRM, "no empty task frame %lld",
+		CAM_ERR_RATE_LIMIT(CAM_CRM, "no empty task frame %lld",
 			trigger_data->frame_id);
 		rc = -EBUSY;
 		goto end;
@@ -3520,7 +3534,8 @@ int cam_req_mgr_create_session(
 	ses_info->session_hdl = session_hdl;
 
 	mutex_init(&cam_session->lock);
-	CAM_DBG(CAM_CRM, "LOCK_DBG session lock %pK", &cam_session->lock);
+	CAM_DBG(CAM_CRM, "LOCK_DBG session lock %pK hdl 0x%x",
+		&cam_session->lock, session_hdl);
 
 	mutex_lock(&cam_session->lock);
 	cam_session->session_hdl = session_hdl;
@@ -3685,7 +3700,7 @@ int cam_req_mgr_link(struct cam_req_mgr_ver_info *link_info)
 	memset(&root_dev, 0, sizeof(struct cam_create_dev_hdl));
 	root_dev.session_hdl = link_info->u.link_info_v1.session_hdl;
 	root_dev.priv = (void *)link;
-
+	root_dev.dev_id = CAM_CRM;
 	mutex_lock(&link->lock);
 	/* Create unique dev handle for link */
 	link->link_hdl = cam_create_device_hdl(&root_dev);
@@ -3795,6 +3810,7 @@ int cam_req_mgr_link_v2(struct cam_req_mgr_ver_info *link_info)
 	memset(&root_dev, 0, sizeof(struct cam_create_dev_hdl));
 	root_dev.session_hdl = link_info->u.link_info_v2.session_hdl;
 	root_dev.priv = (void *)link;
+	root_dev.dev_id = CAM_CRM;
 
 	mutex_lock(&link->lock);
 	/* Create unique dev handle for link */

+ 10 - 1
drivers/cam_req_mgr/cam_req_mgr_debug.c

@@ -8,6 +8,7 @@
 #define MAX_SESS_INFO_LINE_BUFF_LEN 256
 
 static char sess_info_buffer[MAX_SESS_INFO_LINE_BUFF_LEN];
+static int cam_debug_mgr_delay_detect;
 
 static int cam_req_mgr_debug_set_bubble_recovery(void *data, u64 val)
 {
@@ -131,6 +132,8 @@ int cam_req_mgr_debug_register(struct cam_req_mgr_core_device *core_dev)
 		debugfs_root, core_dev, &bubble_recovery);
 	dbgfileptr = debugfs_create_bool("recovery_on_apply_fail", 0644,
 		debugfs_root, &core_dev->recovery_on_apply_fail);
+	dbgfileptr = debugfs_create_u32("delay_detect_count", 0644,
+		debugfs_root, &cam_debug_mgr_delay_detect);
 	if (IS_ERR(dbgfileptr)) {
 		if (PTR_ERR(dbgfileptr) == -ENODEV)
 			CAM_WARN(CAM_MEM, "DebugFS not enabled in kernel!");
@@ -141,7 +144,13 @@ end:
 	return rc;
 }
 
-void cam_req_mgr_debug_unregister(void)
+int cam_req_mgr_debug_unregister(void)
 {
 	debugfs_remove_recursive(debugfs_root);
+	return 0;
+}
+
+void cam_req_mgr_debug_delay_detect(void)
+{
+	cam_debug_mgr_delay_detect += 1;
 }

+ 5 - 1
drivers/cam_req_mgr/cam_req_mgr_debug.h

@@ -11,6 +11,10 @@
 #include "cam_debug_util.h"
 
 int cam_req_mgr_debug_register(struct cam_req_mgr_core_device *core_dev);
-void cam_req_mgr_debug_unregister(void);
+int cam_req_mgr_debug_unregister(void);
 
+/* cam_req_mgr_debug_delay_detect()
+ * @brief    : increment debug_fs varaible by 1 whenever delay occurred.
+ */
+void cam_req_mgr_debug_delay_detect(void);
 #endif

+ 2 - 1
drivers/cam_req_mgr/cam_req_mgr_dev.c

@@ -162,7 +162,8 @@ static int cam_req_mgr_close(struct file *filep)
 	struct v4l2_subdev_fh *subdev_fh = to_v4l2_subdev_fh(vfh);
 
 	CAM_WARN(CAM_CRM,
-		"release invoked associated userspace process has died");
+		"release invoked associated userspace process has died, open_cnt: %d",
+		g_dev.open_cnt);
 	mutex_lock(&g_dev.cam_lock);
 
 	if (g_dev.open_cnt <= 0) {

+ 25 - 4
drivers/cam_req_mgr/cam_req_mgr_util.c

@@ -1,6 +1,6 @@
 // SPDX-License-Identifier: GPL-2.0-only
 /*
- * Copyright (c) 2017-2019, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2017-2020, The Linux Foundation. All rights reserved.
  */
 
 #define pr_fmt(fmt) "CAM-REQ-MGR_UTIL %s:%d " fmt, __func__, __LINE__
@@ -113,14 +113,30 @@ static int32_t cam_get_free_handle_index(void)
 
 	idx = find_first_zero_bit(hdl_tbl->bitmap, hdl_tbl->bits);
 
-	if (idx >= CAM_REQ_MGR_MAX_HANDLES_V2 || idx < 0)
+	if (idx >= CAM_REQ_MGR_MAX_HANDLES_V2 || idx < 0) {
+		CAM_DBG(CAM_CRM, "idx: %d", idx);
 		return -ENOSR;
+	}
 
 	set_bit(idx, hdl_tbl->bitmap);
 
 	return idx;
 }
 
+void cam_dump_tbl_info(void)
+{
+	int i;
+
+	for (i = 0; i < CAM_REQ_MGR_MAX_HANDLES_V2; i++)
+		CAM_INFO(CAM_CRM,
+			"i: %d session_hdl=0x%x hdl_value=0x%x type=%d state=%d dev_id=0x%llx",
+			i, hdl_tbl->hdl[i].session_hdl,
+			hdl_tbl->hdl[i].hdl_value,
+			hdl_tbl->hdl[i].type,
+			hdl_tbl->hdl[i].state,
+			hdl_tbl->hdl[i].dev_id);
+}
+
 int32_t cam_create_session_hdl(void *priv)
 {
 	int idx;
@@ -137,6 +153,7 @@ int32_t cam_create_session_hdl(void *priv)
 	idx = cam_get_free_handle_index();
 	if (idx < 0) {
 		CAM_ERR(CAM_CRM, "Unable to create session handle");
+		cam_dump_tbl_info();
 		spin_unlock_bh(&hdl_tbl_lock);
 		return idx;
 	}
@@ -149,6 +166,7 @@ int32_t cam_create_session_hdl(void *priv)
 	hdl_tbl->hdl[idx].state = HDL_ACTIVE;
 	hdl_tbl->hdl[idx].priv = priv;
 	hdl_tbl->hdl[idx].ops = NULL;
+	hdl_tbl->hdl[idx].dev_id = CAM_CRM;
 	spin_unlock_bh(&hdl_tbl_lock);
 
 	return handle;
@@ -169,7 +187,9 @@ int32_t cam_create_device_hdl(struct cam_create_dev_hdl *hdl_data)
 
 	idx = cam_get_free_handle_index();
 	if (idx < 0) {
-		CAM_ERR(CAM_CRM, "Unable to create device handle");
+		CAM_ERR(CAM_CRM,
+			"Unable to create device handle(idx= %d)", idx);
+		cam_dump_tbl_info();
 		spin_unlock_bh(&hdl_tbl_lock);
 		return idx;
 	}
@@ -182,9 +202,10 @@ int32_t cam_create_device_hdl(struct cam_create_dev_hdl *hdl_data)
 	hdl_tbl->hdl[idx].state = HDL_ACTIVE;
 	hdl_tbl->hdl[idx].priv = hdl_data->priv;
 	hdl_tbl->hdl[idx].ops = hdl_data->ops;
+	hdl_tbl->hdl[idx].dev_id = hdl_data->dev_id;
 	spin_unlock_bh(&hdl_tbl_lock);
 
-	pr_debug("%s: handle = %x", __func__, handle);
+	pr_debug("%s: handle = 0x%x idx = %d\n", __func__, handle, idx);
 	return handle;
 }
 

+ 5 - 1
drivers/cam_req_mgr/cam_req_mgr_util.h

@@ -1,6 +1,6 @@
 /* SPDX-License-Identifier: GPL-2.0-only */
 /*
- * Copyright (c) 2016-2019, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2016-2020, The Linux Foundation. All rights reserved.
  */
 
 #ifndef _CAM_REQ_MGR_UTIL_API_H_
@@ -35,6 +35,7 @@ enum hdl_type {
  * @hdl_value: Allocated handle
  * @type: session/device handle
  * @state: free/used
+ * @dev_id: device id for handle
  * @ops: ops structure
  * @priv: private data of a handle
  */
@@ -43,6 +44,7 @@ struct handle {
 	uint32_t hdl_value;
 	enum hdl_type type;
 	enum hdl_state state;
+	uint64_t dev_id;
 	void *ops;
 	void *priv;
 };
@@ -65,6 +67,7 @@ struct cam_req_mgr_util_hdl_tbl {
  * @v4l2_sub_dev_flag: flag to create v4l2 sub device
  * @media_entity_flag: flag for media entity
  * @reserved: reserved field
+ * @dev_id: device id for handle
  * @ops: ops pointer for a device handle
  * @priv: private data for a device handle
  */
@@ -73,6 +76,7 @@ struct cam_create_dev_hdl {
 	int32_t v4l2_sub_dev_flag;
 	int32_t media_entity_flag;
 	int32_t reserved;
+	uint64_t dev_id;
 	void *ops;
 	void *priv;
 };

+ 24 - 0
drivers/cam_req_mgr/cam_req_mgr_workq.c

@@ -102,6 +102,7 @@ void cam_req_mgr_process_workq(struct work_struct *w)
 	workq = (struct cam_req_mgr_core_workq *)
 		container_of(w, struct cam_req_mgr_core_workq, work);
 
+	cam_req_mgr_thread_switch_delay_detect(workq->workq_scheduled_ts);
 	while (i < CRM_TASK_PRIORITY_MAX) {
 		WORKQ_ACQUIRE_LOCK(workq, flags);
 		while (!list_empty(&workq->task.process_head[i])) {
@@ -164,6 +165,7 @@ int cam_req_mgr_workq_enqueue_task(struct crm_workq_task *task,
 	CAM_DBG(CAM_CRM, "enq task %pK pending_cnt %d",
 		task, atomic_read(&workq->task.pending_cnt));
 
+	workq->workq_scheduled_ts = ktime_get();
 	queue_work(workq->job, &workq->work);
 	WORKQ_RELEASE_LOCK(workq, flags);
 end:
@@ -265,3 +267,25 @@ void cam_req_mgr_workq_destroy(struct cam_req_mgr_core_workq **crm_workq)
 		*crm_workq = NULL;
 	}
 }
+
+void cam_req_mgr_thread_switch_delay_detect(ktime_t workq_scheduled)
+{
+	uint64_t                         diff;
+	ktime_t                          cur_time;
+	struct timespec64                cur_ts;
+	struct timespec64                workq_scheduled_ts;
+
+	cur_time = ktime_get();
+	diff = ktime_ms_delta(cur_time, workq_scheduled);
+	workq_scheduled_ts  = ktime_to_timespec64(workq_scheduled);
+	cur_ts = ktime_to_timespec64(cur_time);
+
+	if (diff > CAM_WORKQ_RESPONSE_TIME_THRESHOLD) {
+		CAM_WARN(CAM_CRM,
+			"Workq delay detected %ld:%06ld %ld:%06ld %ld:",
+			workq_scheduled_ts.tv_sec,
+			workq_scheduled_ts.tv_nsec/NSEC_PER_USEC,
+			cur_ts.tv_sec, cur_ts.tv_nsec/NSEC_PER_USEC,
+			diff);
+	}
+}

+ 24 - 8
drivers/cam_req_mgr/cam_req_mgr_workq.h

@@ -26,6 +26,13 @@
  */
 #define CAM_WORKQ_FLAG_SERIAL                    (1 << 1)
 
+/*
+ * Response time threshold in ms beyond which it is considered
+ * as workq scheduling/processing delay.
+ */
+#define CAM_WORKQ_RESPONSE_TIME_THRESHOLD   5
+
+
 /* Task priorities, lower the number higher the priority*/
 enum crm_task_priority {
 	CRM_TASK_PRIORITY_0,
@@ -54,14 +61,14 @@ enum crm_workq_context {
  * @ret        : return value in future to use for blocking calls
  */
 struct crm_workq_task {
-	int32_t                  priority;
-	void                    *payload;
-	int32_t                (*process_cb)(void *priv, void *data);
-	void                    *parent;
-	struct list_head         entry;
-	uint8_t                  cancel;
-	void                    *priv;
-	int32_t                  ret;
+	int32_t                    priority;
+	void                      *payload;
+	int32_t                  (*process_cb)(void *priv, void *data);
+	void                      *parent;
+	struct list_head           entry;
+	uint8_t                    cancel;
+	void                      *priv;
+	int32_t                    ret;
 };
 
 /** struct cam_req_mgr_core_workq
@@ -84,6 +91,7 @@ struct cam_req_mgr_core_workq {
 	struct workqueue_struct   *job;
 	spinlock_t                 lock_bh;
 	uint32_t                   in_irq;
+	ktime_t                    workq_scheduled_ts;
 
 	/* tasks */
 	struct {
@@ -142,6 +150,14 @@ void cam_req_mgr_workq_destroy(struct cam_req_mgr_core_workq **workq);
 int cam_req_mgr_workq_enqueue_task(struct crm_workq_task *task,
 	void *priv, int32_t prio);
 
+/**
+ * cam_req_mgr_thread_switch_delay_detect()
+ * @brief: Detects if workq delay has occurred or not
+ * @timestamp: workq scheduled timestamp
+ */
+void cam_req_mgr_thread_switch_delay_detect(
+	ktime_t timestamp);
+
 /**
  * cam_req_mgr_workq_get_task()
  * @brief: Returns empty task pointer for use

+ 1 - 0
drivers/cam_sensor_module/cam_actuator/cam_actuator_core.c

@@ -848,6 +848,7 @@ int32_t cam_actuator_driver_cmd(struct cam_actuator_ctrl_t *a_ctrl,
 		bridge_params.v4l2_sub_dev_flag = 0;
 		bridge_params.media_entity_flag = 0;
 		bridge_params.priv = a_ctrl;
+		bridge_params.dev_id = CAM_ACTUATOR;
 
 		actuator_acq_dev.device_handle =
 			cam_create_device_hdl(&bridge_params);

+ 4 - 0
drivers/cam_sensor_module/cam_cci/cam_cci_core.c

@@ -6,6 +6,7 @@
 #include <linux/module.h>
 #include "cam_cci_core.h"
 #include "cam_cci_dev.h"
+#include "cam_req_mgr_workq.h"
 
 static int32_t cam_cci_convert_type_to_num_bytes(
 	enum camera_sensor_i2c_type type)
@@ -1533,6 +1534,8 @@ static void cam_cci_write_async_helper(struct work_struct *work)
 	enum cci_i2c_master_t master;
 	struct cam_cci_master_info *cci_master_info;
 
+	cam_req_mgr_thread_switch_delay_detect(
+		write_async->workq_scheduled_ts);
 	cci_dev = write_async->cci_dev;
 	i2c_msg = &write_async->c_ctrl.cfg.cci_i2c_write_cfg;
 	master = write_async->c_ctrl.cci_info->cci_i2c_master;
@@ -1601,6 +1604,7 @@ static int32_t cam_cci_i2c_write_async(struct v4l2_subdev *sd,
 	cci_i2c_write_cfg_w->size = cci_i2c_write_cfg->size;
 	cci_i2c_write_cfg_w->delay = cci_i2c_write_cfg->delay;
 
+	write_async->workq_scheduled_ts = ktime_get();
 	queue_work(cci_dev->write_wq[write_async->queue], &write_async->work);
 
 	return rc;

+ 2 - 0
drivers/cam_sensor_module/cam_cci/cam_cci_dev.h

@@ -31,6 +31,7 @@
 #include "cam_cci_hwreg.h"
 #include "cam_soc_util.h"
 #include "cam_debug_util.h"
+#include "cam_req_mgr_workq.h"
 
 #define CCI_I2C_QUEUE_0_SIZE 128
 #define CCI_I2C_QUEUE_1_SIZE 32
@@ -292,6 +293,7 @@ struct cci_write_async {
 	struct cam_cci_ctrl c_ctrl;
 	enum cci_i2c_queue_t queue;
 	struct work_struct work;
+	ktime_t workq_scheduled_ts;
 	enum cci_i2c_sync sync_en;
 };
 

+ 1 - 0
drivers/cam_sensor_module/cam_csiphy/cam_csiphy_core.c

@@ -1110,6 +1110,7 @@ int32_t cam_csiphy_core_cfg(void *phy_dev,
 		bridge_params.v4l2_sub_dev_flag = 0;
 		bridge_params.media_entity_flag = 0;
 		bridge_params.priv = csiphy_dev;
+		bridge_params.dev_id = CAM_CSIPHY;
 		index = csiphy_dev->acquire_count;
 		csiphy_acq_dev.device_handle =
 			cam_create_device_hdl(&bridge_params);

+ 1 - 0
drivers/cam_sensor_module/cam_eeprom/cam_eeprom_core.c

@@ -351,6 +351,7 @@ static int32_t cam_eeprom_get_dev_handle(struct cam_eeprom_ctrl_t *e_ctrl,
 	bridge_params.v4l2_sub_dev_flag = 0;
 	bridge_params.media_entity_flag = 0;
 	bridge_params.priv = e_ctrl;
+	bridge_params.dev_id = CAM_EEPROM;
 
 	eeprom_acq_dev.device_handle =
 		cam_create_device_hdl(&bridge_params);

+ 1 - 0
drivers/cam_sensor_module/cam_flash/cam_flash_dev.c

@@ -64,6 +64,7 @@ static int32_t cam_flash_driver_cmd(struct cam_flash_ctrl *fctrl,
 		bridge_params.v4l2_sub_dev_flag = 0;
 		bridge_params.media_entity_flag = 0;
 		bridge_params.priv = fctrl;
+		bridge_params.dev_id = CAM_FLASH;
 
 		flash_acq_dev.device_handle =
 			cam_create_device_hdl(&bridge_params);

+ 1 - 0
drivers/cam_sensor_module/cam_ois/cam_ois_core.c

@@ -82,6 +82,7 @@ static int cam_ois_get_dev_handle(struct cam_ois_ctrl_t *o_ctrl,
 	bridge_params.v4l2_sub_dev_flag = 0;
 	bridge_params.media_entity_flag = 0;
 	bridge_params.priv = o_ctrl;
+	bridge_params.dev_id = CAM_OIS;
 
 	ois_acq_dev.device_handle =
 		cam_create_device_hdl(&bridge_params);

+ 1 - 0
drivers/cam_sensor_module/cam_sensor/cam_sensor_core.c

@@ -835,6 +835,7 @@ int32_t cam_sensor_driver_cmd(struct cam_sensor_ctrl_t *s_ctrl,
 		bridge_params.v4l2_sub_dev_flag = 0;
 		bridge_params.media_entity_flag = 0;
 		bridge_params.priv = s_ctrl;
+		bridge_params.dev_id = CAM_SENSOR;
 
 		sensor_acq_dev.device_handle =
 			cam_create_device_hdl(&bridge_params);

+ 2 - 0
drivers/cam_sync/cam_sync.c

@@ -17,6 +17,7 @@
 #include "cam_debug_util.h"
 #include "cam_common_util.h"
 #include "camera_main.h"
+#include "cam_req_mgr_workq.h"
 
 struct sync_device *sync_dev;
 
@@ -130,6 +131,7 @@ int cam_sync_register_callback(sync_callback cb_func,
 			sync_cb->status = row->state;
 			CAM_DBG(CAM_SYNC, "Enqueue callback for sync object:%d",
 				sync_cb->sync_obj);
+			sync_cb->workq_scheduled_ts = ktime_get();
 			queue_work(sync_dev->work_queue,
 				&sync_cb->cb_dispatch_work);
 			spin_unlock_bh(&sync_dev->row_spinlocks[sync_obj]);

+ 8 - 6
drivers/cam_sync/cam_sync_private.h

@@ -94,18 +94,20 @@ struct sync_child_info {
  * struct sync_callback_info - Single node of information about a kernel
  * callback registered on a sync object
  *
- * @callback_func    : Callback function, registered by client driver
- * @cb_data          : Callback data, registered by client driver
- * @status........   : Status with which callback will be invoked in client
- * @sync_obj         : Sync id of the object for which callback is registered
- * @cb_dispatch_work : Work representing the call dispatch
- * @list             : List member used to append this node to a linked list
+ * @callback_func      : Callback function, registered by client driver
+ * @cb_data            : Callback data, registered by client driver
+ * @status             : Status with which callback will be invoked in client
+ * @sync_obj           : Sync id of the object for which callback is registered
+ * @workq_scheduled_ts : workqueue scheduled timestamp
+ * @cb_dispatch_work   : Work representing the call dispatch
+ * @list               : List member used to append this node to a linked list
  */
 struct sync_callback_info {
 	sync_callback callback_func;
 	void *cb_data;
 	int status;
 	int32_t sync_obj;
+	ktime_t workq_scheduled_ts;
 	struct work_struct cb_dispatch_work;
 	struct list_head list;
 };

+ 3 - 0
drivers/cam_sync/cam_sync_util.c

@@ -4,6 +4,7 @@
  */
 
 #include "cam_sync_util.h"
+#include "cam_req_mgr_workq.h"
 
 int cam_sync_util_find_and_set_empty_row(struct sync_device *sync_dev,
 	long *idx)
@@ -293,6 +294,8 @@ void cam_sync_util_cb_dispatch(struct work_struct *cb_dispatch_work)
 		cb_dispatch_work);
 	sync_callback sync_data = cb_info->callback_func;
 
+	cam_req_mgr_thread_switch_delay_detect(
+			cb_info->workq_scheduled_ts);
 	sync_data(cb_info->sync_obj, cb_info->status, cb_info->cb_data);
 
 	kfree(cb_info);

+ 3 - 0
drivers/cam_utils/cam_packet_util.c

@@ -179,6 +179,9 @@ void cam_packet_dump_patch_info(struct cam_packet *packet,
 			((uint32_t *) &packet->payload +
 			packet->patch_offset/4);
 
+	CAM_INFO(CAM_UTIL, "Total num of patches : %d",
+		packet->num_patches);
+
 	for (i = 0; i < packet->num_patches; i++) {
 		hdl = cam_mem_is_secure_buf(patch_desc[i].src_buf_hdl) ?
 			sec_mmu_hdl : iommu_hdl;

+ 35 - 0
drivers/cam_utils/cam_trace.h

@@ -19,6 +19,8 @@
 #include "cam_req_mgr_interface.h"
 #include "cam_context.h"
 
+#define CAM_DEFAULT_VALUE 0xFF
+
 TRACE_EVENT(cam_context_state,
 	TP_PROTO(const char *name, struct cam_context *ctx),
 	TP_ARGS(name, ctx),
@@ -282,6 +284,39 @@ TRACE_EVENT(cam_req_mgr_add_req,
 	)
 );
 
+TRACE_EVENT(cam_delay_detect,
+	TP_PROTO(const char *entity,
+		const char *text, uint64_t req_id,
+		uint32_t ctx_id, int32_t link_hdl,
+		int32_t session_hdl, int rc),
+	TP_ARGS(entity, text, req_id, ctx_id,
+		link_hdl, session_hdl, rc),
+	TP_STRUCT__entry(
+		__string(entity, entity)
+		__string(text, text)
+		__field(uint64_t, req_id)
+		__field(uint64_t, ctx_id)
+		__field(int32_t, link_hdl)
+		__field(int32_t, session_hdl)
+		__field(int32_t, rc)
+	),
+	TP_fast_assign(
+		__assign_str(entity, entity);
+		__assign_str(text, text);
+		__entry->req_id      = req_id;
+		__entry->ctx_id      = ctx_id;
+		__entry->link_hdl    = link_hdl;
+		__entry->session_hdl = session_hdl;
+		__entry->rc          = rc;
+	),
+	TP_printk(
+		"%s: %s request=%lld ctx_id=%d link_hdl=0x%x session_hdl=0x%x rc=%d",
+			__get_str(entity), __get_str(text), __entry->req_id,
+			__entry->ctx_id, __entry->link_hdl,
+			__entry->session_hdl, __entry->rc
+	)
+);
+
 TRACE_EVENT(cam_submit_to_hw,
 	TP_PROTO(const char *entity, uint64_t req_id),
 	TP_ARGS(entity, req_id),

+ 1 - 0
include/uapi/camera/media/cam_req_mgr.h

@@ -426,6 +426,7 @@ struct cam_mem_cache_ops_cmd {
  * @CAM_REQ_MGR_ERROR_TYPE_BUFFER: Buffer was not filled, not fatal
  * @CAM_REQ_MGR_ERROR_TYPE_RECOVERY: Fatal error, can be recovered
  * @CAM_REQ_MGR_ERROR_TYPE_SOF_FREEZE: SOF freeze, can be recovered
+ * @CAM_REQ_MGR_ERROR_TYPE_FULL_RECOVERY: Full recovery, can be recovered
  * @CAM_REQ_MGR_ERROR_TYPE_PAGE_FAULT: page fault, can be recovered
  */
 #define CAM_REQ_MGR_ERROR_TYPE_DEVICE           0