Browse Source

Merge "msm: camera: req_mgr: Correct slot status on mode update" into camera-kernel.lnx.7.0

Camera Software Integration 1 year ago
parent
commit
d37cd292ea

+ 18 - 8
drivers/cam_isp/cam_isp_context.c

@@ -6853,6 +6853,7 @@ static int __cam_isp_ctx_config_dev_in_top_state(
 	struct cam_packet                *packet;
 	size_t                            remain_len = 0;
 	struct cam_hw_prepare_update_args cfg = {0};
+	struct cam_isp_prepare_hw_update_data *hw_update_data;
 	struct cam_req_mgr_add_request    add_req;
 	struct cam_isp_context           *ctx_isp =
 		(struct cam_isp_context *) ctx->ctx_priv;
@@ -6947,6 +6948,7 @@ static int __cam_isp_ctx_config_dev_in_top_state(
 		goto free_req_and_buf_tracker_list;
 	}
 
+	hw_update_data = cfg.priv;
 	req_isp->num_cfg = cfg.num_hw_update_entries;
 	req_isp->num_fence_map_out = cfg.num_out_map_entries;
 	req_isp->num_fence_map_in = cfg.num_in_map_entries;
@@ -6955,6 +6957,8 @@ static int __cam_isp_ctx_config_dev_in_top_state(
 	req_isp->bubble_detected = false;
 	req_isp->cdm_reset_before_apply = false;
 	req_isp->hw_update_data.packet = packet;
+	req_isp->hw_update_data.num_exp = hw_update_data->num_exp;
+	req_isp->hw_update_data.mup_en = hw_update_data->mup_en;
 	req->pf_data.packet_handle = cmd->packet_handle;
 	req->pf_data.packet_offset = cmd->offset;
 	req->pf_data.req = req;
@@ -6969,7 +6973,7 @@ static int __cam_isp_ctx_config_dev_in_top_state(
 	}
 
 	CAM_DBG(CAM_ISP,
-		"packet req-id:%lld, opcode:%d, num_entry:%d, num_fence_out: %d, num_fence_in: %d, ctx_idx: %u, link: 0x%x",
+		"packet req-id:%lld, opcode:%d, num_entry:%d, num_fence_out: %d, num_fence_in: %d, ctx: %u, link: 0x%x",
 		packet->header.request_id, req_isp->hw_update_data.packet_opcode_type,
 		req_isp->num_cfg, req_isp->num_fence_map_out, req_isp->num_fence_map_in,
 		ctx->ctx_id, ctx->link_hdl);
@@ -7011,9 +7015,12 @@ static int __cam_isp_ctx_config_dev_in_top_state(
 			add_req.link_hdl = ctx->link_hdl;
 			add_req.dev_hdl  = ctx->dev_hdl;
 			add_req.req_id   = req->request_id;
+			add_req.num_exp = ctx_isp->last_num_exp;
 
-			if (ctx_isp->is_shdr_master && req_isp->hw_update_data.mup_en)
+			if (req_isp->hw_update_data.mup_en) {
 				add_req.num_exp = req_isp->hw_update_data.num_exp;
+				ctx_isp->last_num_exp = add_req.num_exp;
+			}
 			rc = ctx->ctx_crm_intf->add_req(&add_req);
 			if (rc) {
 				if (rc == -EBADR)
@@ -7455,6 +7462,7 @@ static int __cam_isp_ctx_acquire_hw_v1(struct cam_context *ctx,
 		goto free_res;
 	}
 
+	ctx_isp->last_num_exp = 0;
 	ctx_isp->support_consumed_addr =
 		(param.op_flags & CAM_IFE_CTX_CONSUME_ADDR_EN);
 	ctx_isp->is_tfe_shdr = (param.op_flags & CAM_IFE_CTX_SHDR_EN);
@@ -7520,9 +7528,9 @@ static int __cam_isp_ctx_acquire_hw_v1(struct cam_context *ctx,
 
 	trace_cam_context_state("ISP", ctx);
 	CAM_INFO(CAM_ISP,
-		"Acquire success:session_hdl 0x%xs ctx_type %d ctx %u link: 0x%x hw_mgr_ctx: %u",
+		"Acquire success:session_hdl 0x%xs ctx_type %d ctx %u link: 0x%x hw_mgr_ctx: %u is_shdr %d is_shdr_master %d",
 		ctx->session_hdl, isp_hw_cmd_args.u.ctx_type, ctx->ctx_id, ctx->link_hdl,
-		param.hw_mgr_ctx_id);
+		param.hw_mgr_ctx_id, ctx_isp->is_tfe_shdr, ctx_isp->is_shdr_master);
 	kfree(acquire_hw_info);
 	return rc;
 
@@ -7628,6 +7636,7 @@ static int __cam_isp_ctx_acquire_hw_v2(struct cam_context *ctx,
 	 * Set feature flag if applicable
 	 * custom hw is supported only on v2
 	 */
+	ctx_isp->last_num_exp = 0;
 	ctx_isp->custom_enabled =
 		(param.op_flags & CAM_IFE_CTX_CUSTOM_EN);
 	ctx_isp->use_frame_header_ts =
@@ -7771,9 +7780,9 @@ static int __cam_isp_ctx_acquire_hw_v2(struct cam_context *ctx,
 
 	trace_cam_context_state("ISP", ctx);
 	CAM_INFO(CAM_ISP,
-		"Acquire success: session_hdl 0x%xs ctx_type %d ctx %u link: 0x%x hw_mgr_ctx: %u",
+		"Acquire success: session_hdl 0x%xs ctx_type %d ctx %u link 0x%x hw_mgr_ctx %u is_shdr %d is_shdr_master %d",
 		ctx->session_hdl, isp_hw_cmd_args.u.ctx_type, ctx->ctx_id, ctx->link_hdl,
-		param.hw_mgr_ctx_id);
+		param.hw_mgr_ctx_id, ctx_isp->is_tfe_shdr, ctx_isp->is_shdr_master);
 	kfree(acquire_hw_info);
 	return rc;
 
@@ -7975,7 +7984,7 @@ static int __cam_isp_ctx_unlink_in_acquired(struct cam_context *ctx,
 	return rc;
 }
 
-static int __cam_isp_ctx_get_dev_info_in_acquired(struct cam_context *ctx,
+static int __cam_isp_ctx_get_dev_info(struct cam_context *ctx,
 	struct cam_req_mgr_device_info *dev_info)
 {
 	int rc = 0;
@@ -8988,7 +8997,7 @@ static struct cam_ctx_ops
 		.crm_ops = {
 			.link = __cam_isp_ctx_link_in_acquired,
 			.unlink = __cam_isp_ctx_unlink_in_acquired,
-			.get_dev_info = __cam_isp_ctx_get_dev_info_in_acquired,
+			.get_dev_info = __cam_isp_ctx_get_dev_info,
 			.process_evt = __cam_isp_ctx_process_evt,
 			.flush_req = __cam_isp_ctx_flush_req_in_top_state,
 			.dump_req = __cam_isp_ctx_dump_in_top_state,
@@ -9009,6 +9018,7 @@ static struct cam_ctx_ops
 		},
 		.crm_ops = {
 			.unlink = __cam_isp_ctx_unlink_in_ready,
+			.get_dev_info = __cam_isp_ctx_get_dev_info,
 			.flush_req = __cam_isp_ctx_flush_req_in_ready,
 			.dump_req = __cam_isp_ctx_dump_in_top_state,
 		},

+ 2 - 0
drivers/cam_isp/cam_isp_context.h

@@ -418,6 +418,7 @@ struct cam_isp_fcg_prediction_tracker {
  *                             frames and indicates which prediction should be used
  * @is_shdr:                   true, if usecase is sdhr
  * @is_shdr_master:            Flag to indicate master context in shdr usecase
+ * @last_num_exp:              Last num of exposure
  *
  */
 struct cam_isp_context {
@@ -484,6 +485,7 @@ struct cam_isp_context {
 	struct cam_isp_fcg_prediction_tracker fcg_tracker;
 	bool                                  is_tfe_shdr;
 	bool                                  is_shdr_master;
+	uint32_t                              last_num_exp;
 };
 
 /**

+ 1 - 2
drivers/cam_isp/isp_hw_mgr/cam_tfe_hw_mgr.c

@@ -2102,8 +2102,7 @@ static int cam_tfe_mgr_acquire_get_unified_structure_v2(
 	in_port->lane_cfg        =  in->lane_cfg;
 	in_port->num_valid_vc_dt =  in->num_valid_vc_dt;
 
-	if (in_port->num_valid_vc_dt == 0 ||
-		in_port->num_valid_vc_dt >= CAM_ISP_TFE_VC_DT_CFG) {
+	if (in_port->num_valid_vc_dt == 0) {
 		CAM_ERR(CAM_ISP, "Invalid i/p arg invalid vc-dt: %d",
 			in->num_valid_vc_dt);
 		rc = -EINVAL;

+ 1 - 0
drivers/cam_isp/isp_hw_mgr/isp_hw/tfe_csid_hw/cam_tfe_csid770.h

@@ -278,6 +278,7 @@ static struct cam_tfe_csid_csi2_rx_reg_offset
 	.csi2_rst_done_shift_val                      = 27,
 	.csi2_irq_mask_all                            = 0xFFFFFFF,
 	.csi2_misr_enable_shift_val                   = 6,
+	.csi2_vc_mode_shift_val                       = 2,
 	.csi2_capture_long_pkt_en_shift               = 0,
 	.csi2_capture_short_pkt_en_shift              = 1,
 	.csi2_capture_cphy_pkt_en_shift               = 2,

+ 6 - 4
drivers/cam_isp/isp_hw_mgr/isp_hw/tfe_csid_hw/cam_tfe_csid_core.c

@@ -738,7 +738,7 @@ static int cam_tfe_csid_cid_reserve(struct cam_tfe_csid_hw *csid_hw,
 	/* CSID  CSI2 v1.1 supports 4 vc  */
 	for (i = 0; i < cid_reserv->in_port->num_valid_vc_dt; i++) {
 		if (cid_reserv->in_port->dt[i] > 0x3f ||
-			cid_reserv->in_port->vc[i] > 0x3) {
+			cid_reserv->in_port->vc[i] > 0x1f) {
 			CAM_ERR(CAM_ISP, "CSID:%d Invalid vc:%d dt %d",
 				csid_hw->hw_intf->hw_idx,
 				cid_reserv->in_port->vc[i],
@@ -849,7 +849,7 @@ static int cam_tfe_csid_path_reserve(struct cam_tfe_csid_hw *csid_hw,
 	/* CSID  CSI2 v2.0 supports 4 vc */
 	for (i = 0; i < reserve->in_port->num_valid_vc_dt; i++) {
 		if (reserve->in_port->dt[i] > 0x3f ||
-			reserve->in_port->vc[i] > 0x3 ||
+			reserve->in_port->vc[i] > 0x1f ||
 			(reserve->sync_mode >= CAM_ISP_HW_SYNC_MAX)) {
 			CAM_ERR(CAM_ISP, "CSID:%d Invalid vc:%d dt %d mode:%d",
 				csid_hw->hw_intf->hw_idx,
@@ -1092,6 +1092,9 @@ static int cam_tfe_csid_enable_csi2(
 	/* rx cfg1 */
 	val = (1 << csid_reg->csi2_reg->csi2_misr_enable_shift_val);
 
+	/* enable vc mode to configure vc with value more than 3 */
+	val |= (1 << csid_reg->csi2_reg->csi2_vc_mode_shift_val);
+
 	/* enable packet ecc correction */
 	val |= 1;
 	cam_io_w_mb(val, soc_info->reg_map[0].mem_base +
@@ -1675,7 +1678,7 @@ static int cam_tfe_csid_enable_pxl_path(
 		pxl_reg->csid_pxl_ctrl_addr);
 
 	CAM_DBG(CAM_ISP, "CSID:%d IPP Ctrl val: 0x%x",
-			csid_hw->hw_intf->hw_idx, val);
+		csid_hw->hw_intf->hw_idx, val);
 
 	/* Enable the required pxl path interrupts */
 	val = TFE_CSID_PATH_INFO_RST_DONE |
@@ -3862,7 +3865,6 @@ irqreturn_t cam_tfe_csid_irq(int irq_num, void *data)
 	cam_io_w_mb(1, soc_info->reg_map[0].mem_base +
 		csid_reg->cmn_reg->csid_irq_cmd_addr);
 
-
 	/* Software register reset complete*/
 	if (irq_status[TFE_CSID_IRQ_REG_TOP])
 		complete(&csid_hw->csid_top_complete);

+ 4 - 4
drivers/cam_isp/isp_hw_mgr/isp_hw/tfe_hw/cam_tfe770.h

@@ -137,8 +137,8 @@ static struct cam_tfe_camif_reg_data tfe770_camif_reg_data = {
 	.epoch1_irq_mask              = 0x00000008,
 	.eof_irq_mask                 = 0x00000002,
 	.reg_update_irq_mask          = 0x00000001,
-	.error_irq_mask0              = 0x00010100,
-	.error_irq_mask2              = 0x00000023,
+	.error_irq_mask0              = 0x00210100,
+	.error_irq_mask2              = 0x00000223,
 	.subscribe_irq_mask           = {
 		0x00000000,
 		0x00000007,
@@ -1445,9 +1445,9 @@ struct cam_tfe_hw_info cam_tfe770 = {
 		0x00000000,
 	},
 	.error_irq_mask = {
-		0x001F1F00,
+		0x003F1F00,
 		0x00000000,
-		0x000000FF,
+		0x000002FF,
 	},
 	.bus_reg_irq_mask = {
 		0x00000002,

+ 1 - 0
drivers/cam_isp/isp_hw_mgr/isp_hw/tfe_hw/cam_tfe_core.c

@@ -2232,6 +2232,7 @@ int cam_tfe_top_reserve(void *device_priv,
 	top_priv = (struct cam_tfe_top_priv   *)device_priv;
 	args = (struct cam_tfe_acquire_args *)reserve_args;
 	acquire_args = &args->tfe_in;
+	top_priv->last_mup_val = 0;
 
 	for (i = 0; i < CAM_TFE_TOP_IN_PORT_MAX; i++) {
 		CAM_DBG(CAM_ISP, "i :%d res_id:%d state:%d", i,

+ 84 - 81
drivers/cam_req_mgr/cam_req_mgr_core.c

@@ -1197,7 +1197,6 @@ static void cam_req_mgr_reconfigure_link(struct cam_req_mgr_core_link *link,
 		if (dev->dev_info.trigger_on && !dev->dev_info.is_shdr_master) {
 			dev->is_active = is_active;
 			tbl = dev->pd_tbl;
-
 			if (is_active) {
 				tbl->dev_mask |= (1 << dev->dev_bit);
 			} else {
@@ -1379,7 +1378,7 @@ static int __cam_req_mgr_send_req(struct cam_req_mgr_core_link *link,
 	/* For regular send requests */
 	for (i = 0; i < link->num_devs; i++) {
 		dev = &link->l_dev[i];
-		if (dev && dev->is_active) {
+		if (dev) {
 			pd = dev->dev_info.p_delay;
 			if (pd >= CAM_PIPELINE_DELAY_MAX) {
 				CAM_WARN(CAM_CRM, "pd %d greater than max",
@@ -1390,10 +1389,15 @@ static int __cam_req_mgr_send_req(struct cam_req_mgr_core_link *link,
 			if (!(dev->dev_info.trigger & trigger))
 				continue;
 
+			idx = apply_data[pd].idx;
+			slot = &dev->pd_tbl->slot[idx];
+
 			if (dev->dev_info.trigger_on && !dev->dev_info.is_shdr_master &&
-				dev->dev_info.mode_switch_req == apply_data[pd].req_id &&
-				link->wait_for_dual_trigger)
+				slot->ops.skip_isp_apply) {
+				CAM_DBG(CAM_CRM, "Skip slave switch req %d apply %lld",
+					dev->dev_info.mode_switch_req, apply_data[pd].req_id);
 				continue;
+			}
 
 			if (apply_data[pd].skip_idx ||
 				(apply_data[pd].req_id < 0)) {
@@ -1419,8 +1423,6 @@ static int __cam_req_mgr_send_req(struct cam_req_mgr_core_link *link,
 			apply_req.dev_hdl = dev->dev_hdl;
 			apply_req.request_id =
 				apply_data[pd].req_id;
-			idx = apply_data[pd].idx;
-			slot = &dev->pd_tbl->slot[idx];
 			apply_req.report_if_bubble =
 				in_q->slot[idx].recover;
 
@@ -1488,6 +1490,7 @@ static int __cam_req_mgr_send_req(struct cam_req_mgr_core_link *link,
 				else if (apply_req.dual_trigger_status ==
 					CAM_REQ_DUAL_TRIGGER_ONE_EXPOSURE)
 					link->wait_for_dual_trigger = false;
+
 				if (prev_dual_trigger_status != link->wait_for_dual_trigger)
 					cam_req_mgr_reconfigure_link(
 						link, dev, link->wait_for_dual_trigger);
@@ -3360,38 +3363,6 @@ end:
 	return rc;
 }
 
-static void cam_req_mgr_handle_exposure_change(
-	struct cam_req_mgr_core_link        *link,
-	struct cam_req_mgr_add_request      *add_req)
-{
-	int i = 0;
-	struct cam_req_mgr_connected_device *device = NULL;
-
-	if (add_req->num_exp == CAM_REQ_DUAL_TRIGGER_TWO_EXPOSURE &&
-		!link->wait_for_dual_trigger) {
-		for (i = 0; i < link->num_devs; i++) {
-			device = &link->l_dev[i];
-
-			if (!device->dev_info.trigger_on || device->dev_info.is_shdr_master)
-				continue;
-
-			device->is_active = true;
-			device->dev_info.mode_switch_req = 0;
-		}
-	} else if (add_req->num_exp == CAM_REQ_DUAL_TRIGGER_ONE_EXPOSURE &&
-		link->wait_for_dual_trigger) {
-
-		for (i = 0; i < link->num_devs; i++) {
-			device = &link->l_dev[i];
-
-			if (!device->dev_info.trigger_on || device->dev_info.is_shdr_master)
-				continue;
-
-			device->dev_info.mode_switch_req = add_req->req_id;
-		}
-	}
-}
-
 /**
  * cam_req_mgr_process_add_req()
  *
@@ -3409,6 +3380,7 @@ int cam_req_mgr_process_add_req(void *priv, void *data)
 	struct cam_req_mgr_add_request      *add_req = NULL;
 	struct cam_req_mgr_core_link        *link = NULL;
 	struct cam_req_mgr_connected_device *device = NULL;
+	struct cam_req_mgr_connected_device *dev_l = NULL;
 	struct cam_req_mgr_req_tbl          *tbl = NULL;
 	struct cam_req_mgr_tbl_slot         *slot = NULL;
 	struct crm_task_payload             *task_data = NULL;
@@ -3461,6 +3433,7 @@ int cam_req_mgr_process_add_req(void *priv, void *data)
 
 	link_slot = &link->req.in_q->slot[idx];
 	slot = &tbl->slot[idx];
+	slot->ops.skip_isp_apply = false;
 
 	if ((add_req->skip_at_sof & 0xFF) > slot->inject_delay_at_sof) {
 		slot->inject_delay_at_sof = (add_req->skip_at_sof & 0xFF);
@@ -3565,9 +3538,9 @@ int cam_req_mgr_process_add_req(void *priv, void *data)
 	slot->state = CRM_REQ_STATE_PENDING;
 	slot->req_ready_map |= BIT(device->dev_bit);
 
-	CAM_DBG(CAM_CRM, "idx %d dev_hdl %x req_id %lld pd %d ready_map %x",
+	CAM_DBG(CAM_CRM, "idx %d dev_hdl %x req_id %lld pd %d ready_map %x tbl mask %x",
 		idx, add_req->dev_hdl, add_req->req_id, tbl->pd,
-		slot->req_ready_map);
+		slot->req_ready_map, tbl->dev_mask);
 
 	trace_cam_req_mgr_add_req(link, idx, add_req, tbl, device);
 
@@ -3589,23 +3562,36 @@ int cam_req_mgr_process_add_req(void *priv, void *data)
 		return rc;
 	}
 
-	if (!device->is_active && !device->dev_info.is_shdr_master &&
-		!link->wait_for_dual_trigger) {
-		device->is_active = true;
+	if (device->dev_info.trigger_on &&
+		add_req->num_exp == CAM_REQ_DUAL_TRIGGER_ONE_EXPOSURE) {
+		for (i = 0; i < link->num_devs; i++) {
+			dev_l = &link->l_dev[i];
+			if (dev_l->dev_info.trigger_on && dev_l->dev_hdl != add_req->dev_hdl) {
+				slot->ops.skip_isp_apply = true;
+				slot->req_ready_map |= (1 << dev_l->dev_bit);
+				if (slot->req_ready_map == tbl->dev_mask) {
+					slot->state = CRM_REQ_STATE_READY;
+					CAM_DBG(CAM_REQ,
+						"SHDR link %x idx %d req_id %lld pd %d SLOT READY",
+						link->link_hdl, idx, add_req->req_id, tbl->pd);
+				}
+				break;
+			}
+		}
+	}
+
+	if (device->dev_info.trigger_on &&
+		add_req->num_exp == CAM_REQ_DUAL_TRIGGER_TWO_EXPOSURE &&
+		!device->dev_info.is_shdr_master) {
+		tbl->dev_mask |= (1 << device->dev_bit);
 		if (slot->req_ready_map == tbl->dev_mask) {
+			slot->state = CRM_REQ_STATE_READY;
 			CAM_DBG(CAM_REQ,
-				"link 0x%x idx %d req_id %lld pd %d SLOT READY",
+				"SHDR link 0x%x idx %d req_id %lld pd %d SLOT READY",
 				link->link_hdl, idx, add_req->req_id, tbl->pd);
-			slot->state = CRM_REQ_STATE_READY;
 		}
-
-		mutex_unlock(&link->req.lock);
-		goto end;
 	}
 
-	if (device->dev_info.is_shdr_master)
-		cam_req_mgr_handle_exposure_change(link, add_req);
-
 	mutex_unlock(&link->req.lock);
 end:
 	return rc;
@@ -4070,6 +4056,8 @@ static int cam_req_mgr_cb_add_req(struct cam_req_mgr_add_request *add_req)
 	dev_req->trigger_eof = add_req->trigger_eof;
 	dev_req->skip_at_sof = add_req->skip_at_sof;
 	dev_req->skip_at_eof = add_req->skip_at_eof;
+	dev_req->num_exp = add_req->num_exp;
+
 	if (dev_req->trigger_eof) {
 		atomic_inc(&link->eof_event_cnt);
 		CAM_DBG(CAM_REQ, "Req_id: %llu, eof_event_cnt: %d",
@@ -4469,12 +4457,11 @@ static int __cam_req_mgr_setup_link_info(struct cam_req_mgr_core_link *link,
 {
 	int                                     rc = 0, i = 0, num_devices = 0;
 	struct cam_req_mgr_core_dev_link_setup  link_data;
-	struct cam_req_mgr_connected_device    *dev, *master_dev = NULL, *tmp_dev;
+	struct cam_req_mgr_connected_device    *dev;
 	struct cam_req_mgr_req_tbl             *pd_tbl;
 	enum cam_pipeline_delay                 max_delay;
 	enum cam_modeswitch_delay               max_modeswitch;
 	uint32_t num_trigger_devices = 0;
-	int32_t master_dev_idx = -1;
 
 	if (link_info->version == VERSION_1) {
 		if (link_info->u.link_info_v1.num_devices >
@@ -4526,11 +4513,6 @@ static int __cam_req_mgr_setup_link_info(struct cam_req_mgr_core_link *link,
 		dev->dev_info.dev_hdl = dev->dev_hdl;
 		rc = dev->ops->get_dev_info(&dev->dev_info);
 
-		if (dev->dev_info.is_shdr_master) {
-			master_dev = dev;
-			master_dev_idx = i;
-		}
-
 		trace_cam_req_mgr_connect_device(link, &dev->dev_info);
 		if (link_info->version == VERSION_1)
 			CAM_DBG(CAM_CRM,
@@ -4579,11 +4561,8 @@ static int __cam_req_mgr_setup_link_info(struct cam_req_mgr_core_link *link,
 				max_modeswitch = dev->dev_info.m_delay;
 		}
 
-		if (dev->dev_info.trigger_on) {
+		if (dev->dev_info.trigger_on)
 			num_trigger_devices++;
-			if (dev->dev_info.is_shdr)
-				link->is_shdr = true;
-		}
 
 		dev->is_active = true;
 	}
@@ -4601,26 +4580,8 @@ static int __cam_req_mgr_setup_link_info(struct cam_req_mgr_core_link *link,
 	link_data.crm_cb = &cam_req_mgr_ops;
 	link_data.max_delay = max_delay;
 	link_data.mode_switch_max_delay = max_modeswitch;
-	if (num_trigger_devices == CAM_REQ_MGR_MAX_TRIGGERS) {
+	if (num_trigger_devices == CAM_REQ_MGR_MAX_TRIGGERS)
 		link->dual_trigger = true;
-		link->wait_for_dual_trigger = true;
-	}
-
-	if (link->dual_trigger && master_dev) {
-		for (i = 0; i < num_devices; i++) {
-			dev = &link->l_dev[i];
-
-			if (dev->dev_info.trigger_on) {
-				if (dev->dev_hdl == master_dev->dev_hdl)
-					continue;
-				if (master_dev_idx < i) {
-					tmp_dev = master_dev;
-					master_dev = dev;
-					dev = tmp_dev;
-				}
-			}
-		}
-	}
 
 	num_trigger_devices = 0;
 	for (i = 0; i < num_devices; i++) {
@@ -5495,6 +5456,45 @@ end:
 	return rc;
 }
 
+int cam_req_mgr_rearrange_devs(
+	struct cam_req_mgr_core_link       *link)
+{
+	int i, rc = 0;
+	uint32_t  master_dev_idx = 0, slave_dev_idx = 0;
+	struct cam_req_mgr_connected_device    *dev, tmp_dev;
+
+	for (i = 0; i < link->num_devs; i++) {
+		dev = &link->l_dev[i];
+
+		if (!dev->dev_info.trigger_on)
+			continue;
+		rc = dev->ops->get_dev_info(&dev->dev_info);
+		if (rc) {
+			CAM_ERR(CAM_CRM, "Get dev info failed link %x dev %x",
+				link->link_hdl, dev->dev_hdl);
+			continue;
+		}
+
+		if (dev->dev_info.is_shdr_master)
+			master_dev_idx = i;
+		else
+			slave_dev_idx = i;
+	}
+
+	if (master_dev_idx < slave_dev_idx) {
+		tmp_dev = link->l_dev[master_dev_idx];
+		link->l_dev[master_dev_idx] = link->l_dev[slave_dev_idx];
+		link->l_dev[slave_dev_idx] = tmp_dev;
+	}
+
+	link->is_shdr = true;
+	link->wait_for_dual_trigger = true;
+	CAM_DBG(CAM_CRM, "link hdl %x wait for dual triger %d",
+		link->link_hdl, link->wait_for_dual_trigger);
+
+	return rc;
+}
+
 int cam_req_mgr_link_control(struct cam_req_mgr_link_control *control)
 {
 	int                                 rc = 0, i;
@@ -5547,6 +5547,9 @@ int cam_req_mgr_link_control(struct cam_req_mgr_link_control *control)
 					link->link_hdl);
 				rc = -EFAULT;
 			}
+
+			if (link->dual_trigger)
+				rc = cam_req_mgr_rearrange_devs(link);
 			/* Wait for the streaming of sync link */
 			link->initial_skip = true;
 			/* Pause the timer before sensor stream on */

+ 5 - 0
drivers/cam_req_mgr/cam_req_mgr_core.h

@@ -244,11 +244,16 @@ struct cam_req_mgr_apply {
  * @num_dev         : Number of devices need to be applied at this trigger point
  * @dev_hdl         : Device handle who requested for special ops
  * @apply_at_eof    : Boolean Identifier for request to be applied at EOF
+ * @is_applied      : Flag to identify if request is already applied to device
+ *                    in previous frame
+ * @skip_isp_apply  : Flag to indicate skip apply req for ISP
  */
 struct crm_tbl_slot_special_ops {
 	uint32_t num_dev;
 	int32_t dev_hdl[MAX_DEV_FOR_SPECIAL_OPS];
 	bool apply_at_eof;
+	bool is_applied;
+	bool skip_isp_apply;
 };
 
 /**