ソースを参照

msm: camera: isp: Stop HW immediately in flush

When the userspace issues flush, ISP driver needs to ensure that
wait and active list requests are flushed and corresponding
buffer fences are signaled with error. For active and wait lists
IFE hardware is stopped immediately. Therefore IFE must also be
reset to ensure that VFE BUS FIFOs are cleared. Start IFE HW
after receiving init packet again.

CRs-Fixed: 2513939
Change-Id: I9a35ce05c24d6b63016e264a870d376eabb2b56f
Signed-off-by: Venkat Chinta <[email protected]>
Venkat Chinta 6 年 前
コミット
aaab1ef87e

+ 3 - 2
drivers/cam_core/cam_context.h

@@ -32,8 +32,9 @@ enum cam_context_state {
 	CAM_CTX_AVAILABLE            = 1,
 	CAM_CTX_ACQUIRED             = 2,
 	CAM_CTX_READY                = 3,
-	CAM_CTX_ACTIVATED            = 4,
-	CAM_CTX_STATE_MAX            = 5,
+	CAM_CTX_FLUSHED              = 4,
+	CAM_CTX_ACTIVATED            = 5,
+	CAM_CTX_STATE_MAX            = 6,
 };
 
 /**

+ 12 - 0
drivers/cam_core/cam_hw_mgr_intf.h

@@ -287,6 +287,16 @@ struct cam_hw_dump_pf_args {
 	bool                           *mem_found;
 };
 
+/**
+ * struct cam_hw_reset_args -hw reset arguments
+ *
+ * @ctxt_to_hw_map:        HW context from the acquire
+ *
+ */
+struct cam_hw_reset_args {
+	void                           *ctxt_to_hw_map;
+};
+
 /* enum cam_hw_mgr_command - Hardware manager command type */
 enum cam_hw_mgr_command {
 	CAM_HW_MGR_CMD_INTERNAL,
@@ -341,6 +351,7 @@ struct cam_hw_cmd_args {
  * @hw_open:                   Function pointer for HW init
  * @hw_close:                  Function pointer for HW deinit
  * @hw_flush:                  Function pointer for HW flush
+ * @hw_reset:                  Function pointer for HW reset
  *
  */
 struct cam_hw_mgr_intf {
@@ -361,6 +372,7 @@ struct cam_hw_mgr_intf {
 	int (*hw_open)(void *hw_priv, void *fw_download_args);
 	int (*hw_close)(void *hw_priv, void *hw_close_args);
 	int (*hw_flush)(void *hw_priv, void *hw_flush_args);
+	int (*hw_reset)(void *hw_priv, void *hw_reset_args);
 };
 
 #endif /* _CAM_HW_MGR_INTF_H_ */

+ 2 - 0
drivers/cam_cust/cam_custom_context.c

@@ -874,6 +874,8 @@ static struct cam_ctx_ops
 		.irq_ops = NULL,
 		.pagefault_ops = NULL,
 	},
+	/* Flushed */
+	{},
 	/* Activated */
 	{
 		.ioctl_ops = {

+ 2 - 0
drivers/cam_fd/cam_fd_context.c

@@ -189,6 +189,8 @@ static struct cam_ctx_ops
 		.crm_ops = {},
 		.irq_ops = NULL,
 	},
+	/* Flushed */
+	{},
 	/* Activated */
 	{
 		.ioctl_ops = {

+ 2 - 0
drivers/cam_icp/cam_icp_context.c

@@ -250,6 +250,8 @@ static struct cam_ctx_ops
 		.irq_ops = __cam_icp_handle_buf_done_in_ready,
 		.pagefault_ops = cam_icp_context_dump_active_request,
 	},
+	/* Flushed */
+	{},
 	/* Activated */
 	{
 		.ioctl_ops = {},

+ 122 - 7
drivers/cam_isp/cam_isp_context.c

@@ -31,6 +31,9 @@ static struct cam_isp_ctx_debug isp_ctx_debug;
 static int cam_isp_context_dump_active_request(void *data, unsigned long iova,
 	uint32_t buf_info);
 
+static int __cam_isp_ctx_start_dev_in_ready(struct cam_context *ctx,
+	struct cam_start_stop_dev_cmd *cmd);
+
 static void __cam_isp_ctx_update_state_monitor_array(
 	struct cam_isp_context *ctx_isp,
 	enum cam_isp_state_change_trigger trigger_type,
@@ -2072,13 +2075,15 @@ static int __cam_isp_ctx_flush_req(struct cam_context *ctx,
 }
 
 static int __cam_isp_ctx_flush_req_in_top_state(
-	struct cam_context *ctx,
+	struct cam_context               *ctx,
 	struct cam_req_mgr_flush_request *flush_req)
 {
-	int rc = 0;
-	struct cam_isp_context *ctx_isp;
-	struct cam_hw_cmd_args hw_cmd_args;
-
+	int                               rc = 0;
+	struct cam_isp_context           *ctx_isp;
+	struct cam_isp_stop_args          stop_isp;
+	struct cam_hw_stop_args           stop_args;
+	struct cam_hw_reset_args          reset_args;
+	struct cam_hw_cmd_args            hw_cmd_args;
 
 	ctx_isp = (struct cam_isp_context *) ctx->ctx_priv;
 	if (flush_req->type == CAM_REQ_MGR_FLUSH_TYPE_ALL) {
@@ -2087,7 +2092,7 @@ static int __cam_isp_ctx_flush_req_in_top_state(
 		ctx->last_flush_req = flush_req->req_id;
 	}
 
-	CAM_DBG(CAM_ISP, "try to flush pending list");
+	CAM_DBG(CAM_ISP, "Flush pending list");
 	spin_lock_bh(&ctx->lock);
 	rc = __cam_isp_ctx_flush_req(ctx, &ctx->pending_req_list, flush_req);
 	spin_unlock_bh(&ctx->lock);
@@ -2102,6 +2107,47 @@ static int __cam_isp_ctx_flush_req_in_top_state(
 		rc = 0;
 	}
 
+	if (flush_req->type == CAM_REQ_MGR_FLUSH_TYPE_ALL) {
+		if (ctx->state <= CAM_CTX_READY) {
+			ctx->state = CAM_CTX_ACQUIRED;
+			goto end;
+		}
+
+		stop_args.ctxt_to_hw_map = ctx_isp->hw_ctx;
+		stop_isp.hw_stop_cmd = CAM_ISP_HW_STOP_IMMEDIATELY;
+		stop_isp.stop_only = true;
+		stop_args.args = (void *)&stop_isp;
+		rc = ctx->hw_mgr_intf->hw_stop(ctx->hw_mgr_intf->hw_mgr_priv,
+			&stop_args);
+		if (rc)
+			CAM_ERR(CAM_ISP, "Failed to stop HW in Flush rc: %d",
+				rc);
+
+		CAM_DBG(CAM_ISP, "Flush wait and active lists");
+		spin_lock_bh(&ctx->lock);
+		if (!list_empty(&ctx->wait_req_list))
+			rc = __cam_isp_ctx_flush_req(ctx, &ctx->wait_req_list,
+				flush_req);
+
+		if (!list_empty(&ctx->active_req_list))
+			rc = __cam_isp_ctx_flush_req(ctx, &ctx->active_req_list,
+				flush_req);
+
+		ctx_isp->active_req_cnt = 0;
+		spin_unlock_bh(&ctx->lock);
+
+		reset_args.ctxt_to_hw_map = ctx_isp->hw_ctx;
+		rc = ctx->hw_mgr_intf->hw_reset(ctx->hw_mgr_intf->hw_mgr_priv,
+			&reset_args);
+		if (rc)
+			CAM_ERR(CAM_ISP, "Failed to reset HW rc: %d", rc);
+
+		ctx->state = CAM_CTX_FLUSHED;
+		ctx_isp->substate_activated = CAM_ISP_CTX_ACTIVATED_HALT;
+		ctx_isp->init_received = false;
+	}
+
+end:
 	atomic_set(&ctx_isp->process_bubble, 0);
 	return rc;
 }
@@ -3454,6 +3500,55 @@ static int __cam_isp_ctx_config_dev_in_acquired(struct cam_context *ctx,
 	return rc;
 }
 
+static int __cam_isp_ctx_config_dev_in_flushed(struct cam_context *ctx,
+	struct cam_config_dev_cmd *cmd)
+{
+	int rc = 0;
+	struct cam_start_stop_dev_cmd start_cmd;
+	struct cam_hw_cmd_args hw_cmd_args;
+	struct cam_isp_hw_cmd_args isp_hw_cmd_args;
+	struct cam_isp_context *ctx_isp =
+		(struct cam_isp_context *) ctx->ctx_priv;
+
+	if (!ctx_isp->hw_acquired) {
+		CAM_ERR(CAM_ISP, "HW is not acquired, reject packet");
+		return -EINVAL;
+	}
+
+	rc = __cam_isp_ctx_config_dev_in_top_state(ctx, cmd);
+	if (rc)
+		return rc;
+
+	if (!ctx_isp->init_received) {
+		CAM_WARN(CAM_ISP,
+			"Received update packet in flushed state, skip start");
+		goto end;
+	}
+
+	hw_cmd_args.ctxt_to_hw_map = ctx_isp->hw_ctx;
+	hw_cmd_args.cmd_type = CAM_HW_MGR_CMD_INTERNAL;
+	isp_hw_cmd_args.cmd_type = CAM_ISP_HW_MGR_CMD_RESUME_HW;
+	hw_cmd_args.u.internal_args = (void *)&isp_hw_cmd_args;
+	rc = ctx->hw_mgr_intf->hw_cmd(ctx->hw_mgr_intf->hw_mgr_priv,
+		&hw_cmd_args);
+	if (rc) {
+		CAM_ERR(CAM_ISP, "Failed to resume HW rc: %d", rc);
+		return rc;
+	}
+
+	start_cmd.dev_handle = cmd->dev_handle;
+	start_cmd.session_handle = cmd->session_handle;
+	rc = __cam_isp_ctx_start_dev_in_ready(ctx, &start_cmd);
+	if (rc)
+		CAM_ERR(CAM_ISP,
+			"Failed to re-start HW after flush rc: %d", rc);
+
+end:
+	CAM_DBG(CAM_ISP, "next state %d sub_state:%d", ctx->state,
+		ctx_isp->substate_activated);
+	return rc;
+}
+
 static int __cam_isp_ctx_link_in_acquired(struct cam_context *ctx,
 	struct cam_req_mgr_core_dev_link_setup *link)
 {
@@ -3543,7 +3638,11 @@ static int __cam_isp_ctx_start_dev_in_ready(struct cam_context *ctx,
 	start_isp.hw_config.priv  = &req_isp->hw_update_data;
 	start_isp.hw_config.init_packet = 1;
 	start_isp.hw_config.reapply = 0;
-	start_isp.start_only = false;
+
+	if (ctx->state == CAM_CTX_FLUSHED)
+		start_isp.start_only = true;
+	else
+		start_isp.start_only = false;
 
 	atomic_set(&ctx_isp->process_bubble, 0);
 	ctx_isp->frame_id = 0;
@@ -3966,6 +4065,22 @@ static struct cam_ctx_ops
 		.pagefault_ops = cam_isp_context_dump_active_request,
 		.dumpinfo_ops = cam_isp_context_info_dump,
 	},
+	/* Flushed */
+	{
+		.ioctl_ops = {
+			.stop_dev = __cam_isp_ctx_stop_dev_in_activated,
+			.release_dev = __cam_isp_ctx_release_dev_in_activated,
+			.config_dev = __cam_isp_ctx_config_dev_in_flushed,
+			.release_hw = __cam_isp_ctx_release_hw_in_activated,
+		},
+		.crm_ops = {
+			.unlink = __cam_isp_ctx_unlink_in_ready,
+			.process_evt = __cam_isp_ctx_process_evt,
+		},
+		.irq_ops = NULL,
+		.pagefault_ops = cam_isp_context_dump_active_request,
+		.dumpinfo_ops = cam_isp_context_info_dump,
+	},
 	/* Activated */
 	{
 		.ioctl_ops = {

+ 58 - 12
drivers/cam_isp/isp_hw_mgr/cam_ife_hw_mgr.c

@@ -368,6 +368,11 @@ static void cam_ife_hw_mgr_stop_hw_res(
 		if (!isp_hw_res->hw_res[i])
 			continue;
 		hw_intf = isp_hw_res->hw_res[i]->hw_intf;
+
+		if (isp_hw_res->hw_res[i]->res_state !=
+			CAM_ISP_RESOURCE_STATE_STREAMING)
+			continue;
+
 		if (hw_intf->hw_ops.stop)
 			hw_intf->hw_ops.stop(hw_intf->hw_priv,
 				isp_hw_res->hw_res[i],
@@ -794,7 +799,9 @@ static int cam_ife_mgr_csid_stop_hw(
 	cnt = 0;
 	list_for_each_entry(hw_mgr_res, stop_list, list) {
 		for (i = 0; i < CAM_ISP_HW_SPLIT_MAX; i++) {
-			if (!hw_mgr_res->hw_res[i])
+			if (!hw_mgr_res->hw_res[i] ||
+				(hw_mgr_res->hw_res[i]->res_state !=
+				CAM_ISP_RESOURCE_STATE_STREAMING))
 				continue;
 
 			isp_res = hw_mgr_res->hw_res[i];
@@ -2722,6 +2729,7 @@ static int cam_ife_mgr_acquire_hw(void *hw_mgr_priv, void *acquire_hw_args)
 		cdm_acquire.handle);
 	ife_ctx->cdm_handle = cdm_acquire.handle;
 	ife_ctx->cdm_ops = cdm_acquire.ops;
+	atomic_set(&ife_ctx->cdm_done, 1);
 
 	acquire_hw_info =
 		(struct cam_isp_acquire_hw_info *)acquire_args->acquire_info;
@@ -2910,6 +2918,7 @@ static int cam_ife_mgr_acquire_dev(void *hw_mgr_priv, void *acquire_hw_args)
 		cdm_acquire.handle);
 	ife_ctx->cdm_handle = cdm_acquire.handle;
 	ife_ctx->cdm_ops = cdm_acquire.ops;
+	atomic_set(&ife_ctx->cdm_done, 1);
 
 	isp_resource = (struct cam_isp_resource *)acquire_args->acquire_info;
 
@@ -3623,13 +3632,6 @@ static int cam_ife_mgr_stop_hw(void *hw_mgr_priv, void *stop_hw_args)
 	CAM_DBG(CAM_ISP, " Enter...ctx id:%d", ctx->ctx_index);
 	stop_isp = (struct cam_isp_stop_args    *)stop_args->args;
 
-	if ((stop_isp->hw_stop_cmd == CAM_ISP_HW_STOP_IMMEDIATELY) &&
-		(stop_isp->stop_only)) {
-		CAM_ERR(CAM_ISP, "Invalid params hw_stop_cmd:%d stop_only:%d",
-			stop_isp->hw_stop_cmd, stop_isp->stop_only);
-		return -EPERM;
-	}
-
 	/* Set the csid halt command */
 	if (stop_isp->hw_stop_cmd == CAM_ISP_HW_STOP_AT_FRAME_BOUNDARY)
 		csid_halt_type = CAM_CSID_HALT_AT_FRAME_BOUNDARY;
@@ -3663,7 +3665,7 @@ static int cam_ife_mgr_stop_hw(void *hw_mgr_priv, void *stop_hw_args)
 
 	/* Stop the master CSID path first */
 	cam_ife_mgr_csid_stop_hw(ctx, &ctx->res_list_ife_csid,
-		master_base_idx, CAM_CSID_HALT_AT_FRAME_BOUNDARY);
+		master_base_idx, csid_halt_type);
 
 	/* stop rest of the CSID paths  */
 	for (i = 0; i < ctx->num_base; i++) {
@@ -3673,7 +3675,7 @@ static int cam_ife_mgr_stop_hw(void *hw_mgr_priv, void *stop_hw_args)
 			ctx->base[i].idx, i, master_base_idx);
 
 		cam_ife_mgr_csid_stop_hw(ctx, &ctx->res_list_ife_csid,
-			ctx->base[i].idx, CAM_CSID_HALT_AT_FRAME_BOUNDARY);
+			ctx->base[i].idx, csid_halt_type);
 	}
 
 	CAM_DBG(CAM_ISP, "Stopping master CID idx %d", master_base_idx);
@@ -3714,6 +3716,8 @@ static int cam_ife_mgr_stop_hw(void *hw_mgr_priv, void *stop_hw_args)
 
 	cam_ife_mgr_pause_hw(ctx);
 
+	wait_for_completion(&ctx->config_done_complete);
+
 	if (stop_isp->stop_only)
 		goto end;
 
@@ -3945,6 +3949,7 @@ static int cam_ife_mgr_start_hw(void *hw_mgr_priv, void *start_hw_args)
 		goto safe_disable;
 	}
 
+start_only:
 	/* Apply initial configuration */
 	CAM_DBG(CAM_ISP, "Config HW");
 	rc = cam_ife_mgr_config_hw(hw_mgr_priv, &start_isp->hw_config);
@@ -3953,8 +3958,6 @@ static int cam_ife_mgr_start_hw(void *hw_mgr_priv, void *start_hw_args)
 		goto cdm_streamoff;
 	}
 
-start_only:
-
 	CAM_DBG(CAM_ISP, "START IFE OUT ... in ctx id:%d",
 		ctx->ctx_index);
 	/* start the IFE out devices */
@@ -4081,6 +4084,48 @@ static int cam_ife_mgr_write(void *hw_mgr_priv, void *write_args)
 	return -EPERM;
 }
 
+static int cam_ife_mgr_reset(void *hw_mgr_priv, void *hw_reset_args)
+{
+	struct cam_ife_hw_mgr            *hw_mgr = hw_mgr_priv;
+	struct cam_hw_reset_args         *reset_args = hw_reset_args;
+	struct cam_ife_hw_mgr_ctx        *ctx;
+	struct cam_ife_hw_mgr_res        *hw_mgr_res;
+	int                               rc = 0, i = 0;
+
+	if (!hw_mgr_priv || !hw_reset_args) {
+		CAM_ERR(CAM_ISP, "Invalid arguments");
+		return -EINVAL;
+	}
+
+	ctx = (struct cam_ife_hw_mgr_ctx *)reset_args->ctxt_to_hw_map;
+	if (!ctx || !ctx->ctx_in_use) {
+		CAM_ERR(CAM_ISP, "Invalid context is used");
+		return -EPERM;
+	}
+
+	CAM_DBG(CAM_ISP, "Reset CSID and VFE");
+	list_for_each_entry(hw_mgr_res, &ctx->res_list_ife_csid, list) {
+		rc = cam_ife_hw_mgr_reset_csid_res(hw_mgr_res);
+		if (rc) {
+			CAM_ERR(CAM_ISP, "Failed to reset CSID:%d rc: %d",
+				hw_mgr_res->res_id, rc);
+			goto end;
+		}
+	}
+
+	for (i = 0; i < ctx->num_base; i++) {
+		rc = cam_ife_mgr_reset_vfe_hw(hw_mgr, ctx->base[i].idx);
+		if (rc) {
+			CAM_ERR(CAM_ISP, "Failed to reset VFE:%d rc: %d",
+				ctx->base[i].idx, rc);
+			goto end;
+		}
+	}
+
+end:
+	return rc;
+}
+
 static int cam_ife_mgr_release_hw(void *hw_mgr_priv,
 					void *release_hw_args)
 {
@@ -6807,6 +6852,7 @@ int cam_ife_hw_mgr_init(struct cam_hw_mgr_intf *hw_mgr_intf, int *iommu_hdl)
 	hw_mgr_intf->hw_prepare_update = cam_ife_mgr_prepare_hw_update;
 	hw_mgr_intf->hw_config = cam_ife_mgr_config_hw;
 	hw_mgr_intf->hw_cmd = cam_ife_mgr_cmd;
+	hw_mgr_intf->hw_reset = cam_ife_mgr_reset;
 
 	if (iommu_hdl)
 		*iommu_hdl = g_ife_hw_mgr.mgr_common.img_iommu_hdl;

+ 13 - 9
drivers/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid_core.c

@@ -657,10 +657,6 @@ static int cam_ife_csid_path_reset(struct cam_ife_csid_hw *csid_hw,
 	init_completion(complete);
 	reset_strb_val = csid_reg->cmn_reg->path_rst_stb_all;
 
-	/* Enable the Test gen before reset */
-	cam_io_w_mb(1,	csid_hw->hw_info->soc_info.reg_map[0].mem_base +
-		csid_reg->tpg_reg->csid_tpg_ctrl_addr);
-
 	/* Reset the corresponding ife csid path */
 	cam_io_w_mb(reset_strb_val, soc_info->reg_map[0].mem_base +
 				reset_strb_addr);
@@ -675,10 +671,6 @@ static int cam_ife_csid_path_reset(struct cam_ife_csid_hw *csid_hw,
 			rc = -ETIMEDOUT;
 	}
 
-	/* Disable Test Gen after reset*/
-	cam_io_w_mb(0, soc_info->reg_map[0].mem_base +
-		csid_reg->tpg_reg->csid_tpg_ctrl_addr);
-
 end:
 	return rc;
 
@@ -2101,7 +2093,7 @@ static int cam_ife_csid_disable_pxl_path(
 
 	if (path_data->sync_mode == CAM_ISP_HW_SYNC_MASTER ||
 		path_data->sync_mode == CAM_ISP_HW_SYNC_NONE) {
-		/* configure Halt */
+		/* configure Halt for master */
 		val = cam_io_r_mb(soc_info->reg_map[0].mem_base +
 		pxl_reg->csid_pxl_ctrl_addr);
 		val &= ~0x3;
@@ -2110,6 +2102,18 @@ static int cam_ife_csid_disable_pxl_path(
 			pxl_reg->csid_pxl_ctrl_addr);
 	}
 
+	if (path_data->sync_mode == CAM_ISP_HW_SYNC_SLAVE &&
+		stop_cmd == CAM_CSID_HALT_IMMEDIATELY) {
+		/* configure Halt for slave */
+		val = cam_io_r_mb(soc_info->reg_map[0].mem_base +
+			pxl_reg->csid_pxl_ctrl_addr);
+		val &= ~0xF;
+		val |= stop_cmd;
+		val |= (CSID_HALT_MODE_MASTER << 2);
+		cam_io_w_mb(val, soc_info->reg_map[0].mem_base +
+			pxl_reg->csid_pxl_ctrl_addr);
+	}
+
 	return rc;
 }
 

+ 2 - 0
drivers/cam_lrme/cam_lrme_context.c

@@ -190,6 +190,8 @@ static struct cam_ctx_ops
 		.crm_ops = {},
 		.irq_ops = NULL,
 	},
+	/* Flushed */
+	{},
 	/* Activate */
 	{
 		.ioctl_ops = {