ソースを参照

Merge "msm: camera: custom: Add support for bubble handling" into camera-kernel.lnx.4.0

Haritha Chintalapati 4 年 前
コミット
b9c46ed8c0
2 ファイル変更164 行追加26 行削除
  1. 157 25
      drivers/cam_cust/cam_custom_context.c
  2. 7 1
      drivers/cam_cust/cam_custom_context.h

+ 157 - 25
drivers/cam_cust/cam_custom_context.c

@@ -181,11 +181,28 @@ static int __cam_custom_ctx_frame_done(
 			continue;
 			continue;
 		}
 		}
 
 
-		rc = cam_sync_signal(req_custom->fence_map_out[j].sync_id,
+		if (!req_custom->bubble_detected) {
+			rc = cam_sync_signal(
+				req_custom->fence_map_out[j].sync_id,
 				CAM_SYNC_STATE_SIGNALED_SUCCESS,
 				CAM_SYNC_STATE_SIGNALED_SUCCESS,
 				CAM_SYNC_COMMON_EVENT_SUCCESS);
 				CAM_SYNC_COMMON_EVENT_SUCCESS);
-		if (rc)
-			CAM_ERR(CAM_CUSTOM, "Sync failed with rc = %d", rc);
+			if (rc)
+				CAM_ERR(CAM_CUSTOM,
+					"Sync failed with rc = %d", rc);
+		} else if (!req_custom->bubble_report) {
+			rc = cam_sync_signal(
+				req_custom->fence_map_out[j].sync_id,
+				CAM_SYNC_STATE_SIGNALED_ERROR,
+				CAM_SYNC_ISP_EVENT_BUBBLE);
+			if (rc)
+				CAM_ERR(CAM_CUSTOM,
+					"Sync failed with rc = %d", rc);
+		} else {
+			req_custom->num_acked++;
+			CAM_DBG(CAM_CUSTOM, "frame done with bubble for %llu",
+				req->request_id);
+			continue;
+		}
 
 
 		req_custom->num_acked++;
 		req_custom->num_acked++;
 		req_custom->fence_map_out[j].sync_id = -1;
 		req_custom->fence_map_out[j].sync_id = -1;
@@ -203,15 +220,131 @@ static int __cam_custom_ctx_frame_done(
 
 
 	custom_ctx->active_req_cnt--;
 	custom_ctx->active_req_cnt--;
 	frame_done_req_id = req->request_id;
 	frame_done_req_id = req->request_id;
-	list_del_init(&req->list);
-	list_add_tail(&req->list, &ctx->free_req_list);
-	CAM_DBG(CAM_REQ,
-		"Move active request %lld to free list(cnt = %d) [all fences done], ctx %u",
-		frame_done_req_id, custom_ctx->active_req_cnt, ctx->ctx_id);
+	if (req_custom->bubble_detected && req_custom->bubble_report) {
+		req_custom->num_acked = 0;
+		req_custom->bubble_detected = false;
+		list_del_init(&req->list);
+		if (frame_done_req_id <= ctx->last_flush_req) {
+			for (i = 0; i < req_custom->num_fence_map_out; i++)
+				rc = cam_sync_signal(
+					req_custom->fence_map_out[i].sync_id,
+					CAM_SYNC_STATE_SIGNALED_ERROR,
+					CAM_SYNC_ISP_EVENT_BUBBLE);
+
+			list_add_tail(&req->list, &ctx->free_req_list);
+			atomic_set(&custom_ctx->process_bubble, 0);
+			CAM_DBG(CAM_REQ,
+				"Move active request %lld to free list(cnt = %d) [flushed], ctx %u",
+				frame_done_req_id, custom_ctx->active_req_cnt,
+				ctx->ctx_id);
+		} else {
+			list_add(&req->list, &ctx->pending_req_list);
+			atomic_set(&custom_ctx->process_bubble, 0);
+			CAM_DBG(CAM_REQ,
+				"Move active request %lld to pending list in ctx %u",
+				frame_done_req_id, ctx->ctx_id);
+		}
+	} else {
+		list_del_init(&req->list);
+		list_add_tail(&req->list, &ctx->free_req_list);
+		CAM_DBG(CAM_REQ,
+			"Move active request %lld to free list(cnt = %d) [all fences done], ctx %u",
+			frame_done_req_id,
+			custom_ctx->active_req_cnt,
+			ctx->ctx_id);
+	}
 
 
 	return rc;
 	return rc;
 }
 }
 
 
+static int __cam_custom_ctx_handle_bubble(
+	struct cam_context *ctx, uint64_t req_id)
+{
+	int                              rc = -EINVAL;
+	bool                             found = false;
+	struct cam_ctx_request          *req = NULL;
+	struct cam_ctx_request          *req_temp;
+	struct cam_custom_dev_ctx_req   *req_custom;
+
+	list_for_each_entry_safe(req, req_temp,
+		&ctx->wait_req_list, list) {
+		if (req->request_id == req_id) {
+			req_custom =
+				(struct cam_custom_dev_ctx_req *)req->req_priv;
+			if (!req_custom->bubble_report) {
+				CAM_DBG(CAM_CUSTOM,
+					"Skip bubble recovery for %llu",
+					req_id);
+				goto end;
+			}
+
+			req_custom->bubble_detected = true;
+			found = true;
+			CAM_DBG(CAM_CUSTOM,
+				"Found bubbled req %llu in wait list",
+				req_id);
+		}
+	}
+
+	if (found) {
+		rc = 0;
+		goto end;
+	}
+
+	list_for_each_entry_safe(req, req_temp,
+		&ctx->active_req_list, list) {
+		if (req->request_id == req_id) {
+			req_custom =
+				(struct cam_custom_dev_ctx_req *)req->req_priv;
+			if (!req_custom->bubble_report) {
+				CAM_DBG(CAM_CUSTOM,
+					"Skip bubble recovery for %llu",
+					req_id);
+				goto end;
+			}
+
+			req_custom->bubble_detected = true;
+			found = true;
+			CAM_DBG(CAM_CUSTOM,
+				"Found bubbled req %llu in active list",
+				req_id);
+		}
+	}
+
+	if (found)
+		rc = 0;
+	else
+		CAM_ERR(CAM_CUSTOM,
+			"req %llu not found in wait or active list bubble recovery failed ctx: %u",
+			req_id, ctx->ctx_id);
+
+end:
+	return rc;
+}
+
+static int __cam_custom_ctx_handle_evt(
+	struct cam_context *ctx,
+	struct cam_req_mgr_link_evt_data *evt_data)
+{
+	int rc = -1;
+	struct cam_custom_context *custom_ctx =
+		(struct cam_custom_context *) ctx->ctx_priv;
+
+	if (evt_data->u.error == CRM_KMD_ERR_BUBBLE) {
+		rc = __cam_custom_ctx_handle_bubble(ctx, evt_data->req_id);
+		if (rc)
+			return rc;
+	} else {
+		CAM_WARN(CAM_CUSTOM, "Unsupported error type %d",
+			evt_data->u.error);
+	}
+
+	CAM_DBG(CAM_CUSTOM, "Set bubble flag for req %llu in ctx %u",
+		evt_data->req_id, ctx->ctx_id);
+	atomic_set(&custom_ctx->process_bubble, 1);
+	return 0;
+}
+
 static struct cam_ctx_ops
 static struct cam_ctx_ops
 	cam_custom_ctx_activated_state_machine
 	cam_custom_ctx_activated_state_machine
 	[CAM_CUSTOM_CTX_ACTIVATED_MAX] = {
 	[CAM_CUSTOM_CTX_ACTIVATED_MAX] = {
@@ -487,6 +620,7 @@ static int __cam_custom_ctx_flush_req_in_top_state(
 	}
 	}
 
 
 end:
 end:
+	atomic_set(&custom_ctx->process_bubble, 0);
 	return rc;
 	return rc;
 }
 }
 
 
@@ -495,6 +629,8 @@ static int __cam_custom_ctx_flush_req_in_ready(
 	struct cam_req_mgr_flush_request *flush_req)
 	struct cam_req_mgr_flush_request *flush_req)
 {
 {
 	int rc = 0;
 	int rc = 0;
+	struct cam_custom_context *custom_ctx =
+		(struct cam_custom_context *) ctx->ctx_priv;
 
 
 	CAM_DBG(CAM_CUSTOM, "try to flush pending list");
 	CAM_DBG(CAM_CUSTOM, "try to flush pending list");
 	spin_lock_bh(&ctx->lock);
 	spin_lock_bh(&ctx->lock);
@@ -505,6 +641,7 @@ static int __cam_custom_ctx_flush_req_in_ready(
 		ctx->state = CAM_CTX_ACQUIRED;
 		ctx->state = CAM_CTX_ACQUIRED;
 	spin_unlock_bh(&ctx->lock);
 	spin_unlock_bh(&ctx->lock);
 
 
+	atomic_set(&custom_ctx->process_bubble, 0);
 	CAM_DBG(CAM_CUSTOM, "Flush request in ready state. next state %d",
 	CAM_DBG(CAM_CUSTOM, "Flush request in ready state. next state %d",
 		 ctx->state);
 		 ctx->state);
 	return rc;
 	return rc;
@@ -751,6 +888,14 @@ static int __cam_custom_ctx_apply_req_in_activated_state(
 	struct cam_custom_context       *custom_ctx = NULL;
 	struct cam_custom_context       *custom_ctx = NULL;
 	struct cam_hw_config_args        cfg;
 	struct cam_hw_config_args        cfg;
 
 
+	if (atomic_read(&custom_ctx->process_bubble)) {
+		CAM_WARN(CAM_CUSTOM,
+			"ctx_id:%d Processing bubble cannot apply Request Id %llu",
+			ctx->ctx_id, apply->request_id);
+		rc = -EAGAIN;
+		goto end;
+	}
+
 	if (list_empty(&ctx->pending_req_list)) {
 	if (list_empty(&ctx->pending_req_list)) {
 		CAM_ERR(CAM_CUSTOM, "No available request for Apply id %lld",
 		CAM_ERR(CAM_CUSTOM, "No available request for Apply id %lld",
 			apply->request_id);
 			apply->request_id);
@@ -779,7 +924,7 @@ static int __cam_custom_ctx_apply_req_in_activated_state(
 	}
 	}
 
 
 	req_custom = (struct cam_custom_dev_ctx_req *) req->req_priv;
 	req_custom = (struct cam_custom_dev_ctx_req *) req->req_priv;
-
+	req_custom->bubble_report = apply->report_if_bubble;
 	cfg.ctxt_to_hw_map = custom_ctx->hw_ctx;
 	cfg.ctxt_to_hw_map = custom_ctx->hw_ctx;
 	cfg.request_id = req->request_id;
 	cfg.request_id = req->request_id;
 	cfg.hw_update_entries = req_custom->cfg;
 	cfg.hw_update_entries = req_custom->cfg;
@@ -1287,6 +1432,7 @@ static int __cam_custom_ctx_start_dev_in_ready(struct cam_context *ctx,
 
 
 	ctx_custom->frame_id = 0;
 	ctx_custom->frame_id = 0;
 	ctx_custom->active_req_cnt = 0;
 	ctx_custom->active_req_cnt = 0;
+	atomic_set(&ctx_custom->process_bubble, 0);
 	ctx_custom->substate_activated =
 	ctx_custom->substate_activated =
 		(req_custom->num_fence_map_out) ?
 		(req_custom->num_fence_map_out) ?
 		CAM_CUSTOM_CTX_ACTIVATED_APPLIED :
 		CAM_CUSTOM_CTX_ACTIVATED_APPLIED :
@@ -1349,21 +1495,6 @@ static int __cam_custom_ctx_unlink_in_activated(struct cam_context *ctx,
 	return rc;
 	return rc;
 }
 }
 
 
-static int __cam_custom_ctx_process_evt(struct cam_context *ctx,
-	struct cam_req_mgr_link_evt_data *link_evt_data)
-{
-	switch (link_evt_data->evt_type) {
-	case CAM_REQ_MGR_LINK_EVT_ERR:
-		/* Handle error/bubble related issues */
-		break;
-	default:
-		CAM_WARN(CAM_CUSTOM, "Unknown event from CRM");
-		break;
-	}
-
-	return 0;
-}
-
 static int __cam_custom_ctx_handle_irq_in_activated(void *context,
 static int __cam_custom_ctx_handle_irq_in_activated(void *context,
 	uint32_t evt_id, void *evt_data)
 	uint32_t evt_id, void *evt_data)
 {
 {
@@ -1537,6 +1668,7 @@ static struct cam_ctx_ops
 		},
 		},
 		.crm_ops = {
 		.crm_ops = {
 			.unlink = __cam_custom_ctx_unlink_in_ready,
 			.unlink = __cam_custom_ctx_unlink_in_ready,
+			.process_evt = __cam_custom_ctx_handle_evt,
 		},
 		},
 		.irq_ops = NULL,
 		.irq_ops = NULL,
 	},
 	},
@@ -1556,7 +1688,7 @@ static struct cam_ctx_ops
 			.notify_frame_skip =
 			.notify_frame_skip =
 				__cam_custom_ctx_apply_default_req,
 				__cam_custom_ctx_apply_default_req,
 			.flush_req = __cam_custom_ctx_flush_req_in_top_state,
 			.flush_req = __cam_custom_ctx_flush_req_in_top_state,
-			.process_evt = __cam_custom_ctx_process_evt,
+			.process_evt = __cam_custom_ctx_handle_evt,
 		},
 		},
 		.irq_ops = __cam_custom_ctx_handle_irq_in_activated,
 		.irq_ops = __cam_custom_ctx_handle_irq_in_activated,
 		.pagefault_ops = NULL,
 		.pagefault_ops = NULL,

+ 7 - 1
drivers/cam_cust/cam_custom_context.h

@@ -1,6 +1,6 @@
 /* SPDX-License-Identifier: GPL-2.0-only
 /* SPDX-License-Identifier: GPL-2.0-only
  *
  *
- * Copyright (c) 2019, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2019-2020, The Linux Foundation. All rights reserved.
  */
  */
 
 
 #ifndef _CAM_CUSTOM_CONTEXT_H_
 #ifndef _CAM_CUSTOM_CONTEXT_H_
@@ -65,6 +65,8 @@ struct cam_custom_ctx_irq_ops {
  * @num_acked:             Count to track acked entried for output.
  * @num_acked:             Count to track acked entried for output.
  *                         If count equals the number of fence out, it means
  *                         If count equals the number of fence out, it means
  *                         the request has been completed.
  *                         the request has been completed.
+ * @bubble_report:         If bubble recovery is needed
+ * @bubble_detected:       request has bubbled
  * @hw_update_data:        HW update data for this request
  * @hw_update_data:        HW update data for this request
  *
  *
  */
  */
@@ -80,6 +82,8 @@ struct cam_custom_dev_ctx_req {
 						[CAM_CUSTOM_DEV_CTX_RES_MAX];
 						[CAM_CUSTOM_DEV_CTX_RES_MAX];
 	uint32_t                                 num_fence_map_in;
 	uint32_t                                 num_fence_map_in;
 	uint32_t                                 num_acked;
 	uint32_t                                 num_acked;
+	int32_t                                  bubble_report;
+	bool                                     bubble_detected;
 	struct cam_custom_prepare_hw_update_data hw_update_data;
 	struct cam_custom_prepare_hw_update_data hw_update_data;
 };
 };
 
 
@@ -95,6 +99,7 @@ struct cam_custom_dev_ctx_req {
  * @active_req_cnt: Counter for the active request
  * @active_req_cnt: Counter for the active request
  * @frame_id: Frame id tracking for the custom context
  * @frame_id: Frame id tracking for the custom context
  * @hw_acquired: Flag to indicate if HW is acquired for this context
  * @hw_acquired: Flag to indicate if HW is acquired for this context
+ * @process_bubble: If ctx currently processing bubble
  * @substate_actiavted: Current substate for the activated state.
  * @substate_actiavted: Current substate for the activated state.
  * @substate_machine: Custom substate machine for external interface
  * @substate_machine: Custom substate machine for external interface
  * @substate_machine_irq: Custom substate machine for irq handling
  * @substate_machine_irq: Custom substate machine for irq handling
@@ -113,6 +118,7 @@ struct cam_custom_context {
 	int64_t                        frame_id;
 	int64_t                        frame_id;
 	bool                           hw_acquired;
 	bool                           hw_acquired;
 	uint32_t                       substate_activated;
 	uint32_t                       substate_activated;
+	atomic_t                       process_bubble;
 	struct cam_ctx_ops            *substate_machine;
 	struct cam_ctx_ops            *substate_machine;
 	struct cam_custom_ctx_irq_ops *substate_machine_irq;
 	struct cam_custom_ctx_irq_ops *substate_machine_irq;
 	struct cam_ctx_request         req_base[CAM_CTX_REQ_MAX];
 	struct cam_ctx_request         req_base[CAM_CTX_REQ_MAX];