Pārlūkot izejas kodu

msm: camera: isp: Add support for flush in offline IFE

Add flush device ioctl handler in ISP top state machine to translate
the flush command into equivalent CRM flush request command. Also add
support for flushing offline IFE workqueue.

CRs-Fixed: 2939523
Change-Id: I79a6b6bca241b954e06f78e8a05e55907f46aa59
Signed-off-by: Anand Ravi <[email protected]>
Anand Ravi 4 gadi atpakaļ
vecāks
revīzija
25ee65107a

+ 47 - 8
drivers/cam_isp/cam_isp_context.c

@@ -4734,6 +4734,38 @@ static struct cam_ctx_ops
 	},
 };
 
+static int __cam_isp_ctx_flush_dev_in_top_state(struct cam_context *ctx,
+	struct cam_flush_dev_cmd *cmd)
+{
+	struct cam_isp_context *ctx_isp = ctx->ctx_priv;
+	struct cam_req_mgr_flush_request flush_req;
+
+	if (!ctx_isp->offline_context) {
+		CAM_ERR(CAM_ISP, "flush dev only supported in offline context");
+		return -EINVAL;
+	}
+
+	flush_req.type = (cmd->flush_type == CAM_FLUSH_TYPE_ALL) ? CAM_REQ_MGR_FLUSH_TYPE_ALL :
+			CAM_REQ_MGR_FLUSH_TYPE_CANCEL_REQ;
+	flush_req.req_id = cmd->req_id;
+
+	CAM_DBG(CAM_ISP, "offline flush (type:%u, req:%lu)", flush_req.type, flush_req.req_id);
+
+	switch (ctx->state) {
+	case CAM_CTX_ACQUIRED:
+	case CAM_CTX_ACTIVATED:
+		return __cam_isp_ctx_flush_req_in_top_state(ctx, &flush_req);
+	case CAM_CTX_READY:
+		return __cam_isp_ctx_flush_req_in_ready(ctx, &flush_req);
+	default:
+		CAM_ERR(CAM_ISP, "flush dev in wrong state: %d", ctx->state);
+		return -EINVAL;
+	}
+
+	if (cmd->flush_type == CAM_FLUSH_TYPE_ALL)
+		cam_req_mgr_workq_flush(ctx_isp->workq);
+}
+
 static void __cam_isp_ctx_free_mem_hw_entries(struct cam_context *ctx)
 {
 	kfree(ctx->out_map_entries);
@@ -4800,6 +4832,7 @@ static int __cam_isp_ctx_release_hw_in_top_state(struct cam_context *ctx,
 	rc = __cam_isp_ctx_flush_req(ctx, &ctx->pending_req_list, &flush_req);
 	spin_unlock_bh(&ctx->lock);
 	__cam_isp_ctx_free_mem_hw_entries(ctx);
+	cam_req_mgr_workq_destroy(&ctx_isp->workq);
 	ctx->state = CAM_CTX_ACQUIRED;
 
 	trace_cam_context_state("ISP", ctx);
@@ -5034,11 +5067,16 @@ static int __cam_isp_ctx_config_dev_in_top_state(
 				ctx->state);
 		}
 	} else {
+		if ((ctx->state == CAM_CTX_FLUSHED) || (ctx->state < CAM_CTX_READY)) {
+			rc = -EINVAL;
+			CAM_ERR(CAM_ISP, "Received update req %lld in wrong state:%d",
+				req->request_id, ctx->state);
+			goto put_ref;
+		}
+
 		if (ctx_isp->offline_context) {
 			__cam_isp_ctx_enqueue_request_in_order(ctx, req);
-		} else if ((ctx->state != CAM_CTX_FLUSHED) &&
-			(ctx->state >= CAM_CTX_READY) &&
-			ctx->ctx_crm_intf->add_req) {
+		} else if (ctx->ctx_crm_intf->add_req) {
 			memset(&add_req, 0, sizeof(add_req));
 			add_req.link_hdl = ctx->link_hdl;
 			add_req.dev_hdl  = ctx->dev_hdl;
@@ -5052,10 +5090,8 @@ static int __cam_isp_ctx_config_dev_in_top_state(
 					ctx, req);
 			}
 		} else {
-			rc = -EINVAL;
-			CAM_ERR(CAM_ISP,
-				"Recevied update req %lld in wrong state:%d",
-				req->request_id, ctx->state);
+			CAM_ERR(CAM_ISP, "Unable to add request: req id=%llu", req->request_id);
+			rc = -ENODEV;
 		}
 	}
 	if (rc)
@@ -6033,7 +6069,7 @@ static int __cam_isp_ctx_stop_dev_in_activated_unlock(
 			"Notify CRM about device stop ctx %u link 0x%x",
 			ctx->ctx_id, ctx->link_hdl);
 		ctx->ctx_crm_intf->notify_stop(&notify);
-	} else
+	} else if (!ctx_isp->offline_context)
 		CAM_ERR(CAM_ISP, "cb not present");
 
 	while (!list_empty(&ctx->pending_req_list)) {
@@ -6460,6 +6496,7 @@ static struct cam_ctx_ops
 			.acquire_hw = __cam_isp_ctx_acquire_hw_in_acquired,
 			.release_dev = __cam_isp_ctx_release_dev_in_top_state,
 			.config_dev = __cam_isp_ctx_config_dev_in_acquired,
+			.flush_dev = __cam_isp_ctx_flush_dev_in_top_state,
 			.release_hw = __cam_isp_ctx_release_hw_in_top_state,
 		},
 		.crm_ops = {
@@ -6479,6 +6516,7 @@ static struct cam_ctx_ops
 			.start_dev = __cam_isp_ctx_start_dev_in_ready,
 			.release_dev = __cam_isp_ctx_release_dev_in_top_state,
 			.config_dev = __cam_isp_ctx_config_dev_in_top_state,
+			.flush_dev = __cam_isp_ctx_flush_dev_in_top_state,
 			.release_hw = __cam_isp_ctx_release_hw_in_top_state,
 		},
 		.crm_ops = {
@@ -6512,6 +6550,7 @@ static struct cam_ctx_ops
 			.stop_dev = __cam_isp_ctx_stop_dev_in_activated,
 			.release_dev = __cam_isp_ctx_release_dev_in_activated,
 			.config_dev = __cam_isp_ctx_config_dev_in_top_state,
+			.flush_dev = __cam_isp_ctx_flush_dev_in_top_state,
 			.release_hw = __cam_isp_ctx_release_hw_in_activated,
 		},
 		.crm_ops = {

+ 18 - 4
drivers/cam_req_mgr/cam_req_mgr_workq.c

@@ -1,6 +1,6 @@
 // SPDX-License-Identifier: GPL-2.0-only
 /*
- * Copyright (c) 2016-2020, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2016-2021, The Linux Foundation. All rights reserved.
  */
 
 #include "cam_req_mgr_workq.h"
@@ -63,6 +63,18 @@ static void cam_req_mgr_workq_put_task(struct crm_workq_task *task)
 	WORKQ_RELEASE_LOCK(workq, flags);
 }
 
+void cam_req_mgr_workq_flush(struct cam_req_mgr_core_workq *workq)
+{
+	if (!workq) {
+		CAM_ERR(CAM_CRM, "workq is null");
+		return;
+	}
+
+	atomic_set(&workq->flush, 1);
+	cancel_work_sync(&workq->work);
+	atomic_set(&workq->flush, 0);
+}
+
 /**
  * cam_req_mgr_process_task() - Process the enqueued task
  * @task: pointer to task workq thread shall process
@@ -111,7 +123,8 @@ void cam_req_mgr_process_workq(struct work_struct *w)
 			atomic_sub(1, &workq->task.pending_cnt);
 			list_del_init(&task->entry);
 			WORKQ_RELEASE_LOCK(workq, flags);
-			cam_req_mgr_process_task(task);
+			if (!unlikely(atomic_read(&workq->flush)))
+				cam_req_mgr_process_task(task);
 			CAM_DBG(CAM_CRM, "processed task %pK free_cnt %d",
 				task, atomic_read(&workq->task.free_cnt));
 			WORKQ_ACQUIRE_LOCK(workq, flags);
@@ -140,9 +153,9 @@ int cam_req_mgr_workq_enqueue_task(struct crm_workq_task *task,
 		goto end;
 	}
 
-	if (task->cancel == 1) {
+	if (task->cancel == 1 || atomic_read(&workq->flush)) {
 		cam_req_mgr_workq_put_task(task);
-		CAM_WARN(CAM_CRM, "task aborted and queued back to pool");
+		CAM_INFO(CAM_CRM, "task aborted and queued back to pool");
 		rc = 0;
 		goto end;
 	}
@@ -215,6 +228,7 @@ int cam_req_mgr_workq_create(char *name, int32_t num_tasks,
 		for (i = CRM_TASK_PRIORITY_0; i < CRM_TASK_PRIORITY_MAX; i++)
 			INIT_LIST_HEAD(&crm_workq->task.process_head[i]);
 		INIT_LIST_HEAD(&crm_workq->task.empty_head);
+		atomic_set(&crm_workq->flush, 0);
 		crm_workq->in_irq = in_irq;
 		crm_workq->task.num_task = num_tasks;
 		crm_workq->task.pool = kcalloc(crm_workq->task.num_task,

+ 10 - 1
drivers/cam_req_mgr/cam_req_mgr_workq.h

@@ -1,6 +1,6 @@
 /* SPDX-License-Identifier: GPL-2.0-only */
 /*
- * Copyright (c) 2016-2020, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2016-2021, The Linux Foundation. All rights reserved.
  */
 
 #ifndef _CAM_REQ_MGR_WORKQ_H_
@@ -76,6 +76,7 @@ struct crm_workq_task {
  * @job         : workqueue internal job struct
  * @lock_bh     : lock for task structs
  * @in_irq      : set true if workque can be used in irq context
+ * @flush       : used to track if flush has been called on workqueue
  * task -
  * @lock        : Current task's lock handle
  * @pending_cnt : # of tasks left in queue
@@ -92,6 +93,7 @@ struct cam_req_mgr_core_workq {
 	spinlock_t                 lock_bh;
 	uint32_t                   in_irq;
 	ktime_t                    workq_scheduled_ts;
+	atomic_t                   flush;
 
 	/* tasks */
 	struct {
@@ -166,4 +168,11 @@ void cam_req_mgr_thread_switch_delay_detect(
 struct crm_workq_task *cam_req_mgr_workq_get_task(
 	struct cam_req_mgr_core_workq *workq);
 
+/**
+ * cam_req_mgr_workq_flush()
+ * @brief: Flushes the work queue. Function will sleep until any active task is complete.
+ * @workq: pointer to worker data struct
+ */
+void cam_req_mgr_workq_flush(struct cam_req_mgr_core_workq *workq);
+
 #endif