ソースを参照

msm: camera: common: Dynamically allocated flush request array

Change flush request active and flush request pending arrays from
static of size 20 to dynamically allocated size based on current
pending and active request lists.

CRs-Fixed: 3046003
Change-Id: Iaa1179881d786359dbe9d37411a6ef3ae8038227
Signed-off-by: sokchetra eung <[email protected]>
sokchetra eung 3 年 前
コミット
fb6888d8bb
2 ファイル変更71 行追加25 行削除
  1. 68 23
      drivers/cam_core/cam_context_utils.c
  2. 3 2
      drivers/cam_core/cam_hw_mgr_intf.h

+ 68 - 23
drivers/cam_core/cam_context_utils.c

@@ -752,8 +752,9 @@ int32_t cam_context_flush_ctx_to_hw(struct cam_context *ctx)
 {
 	struct cam_hw_flush_args flush_args;
 	struct list_head temp_list;
+	struct list_head *list;
 	struct cam_ctx_request *req;
-	uint32_t i;
+	uint32_t i, num_entries = 0;
 	int rc = 0;
 	bool free_req;
 
@@ -778,18 +779,30 @@ int32_t cam_context_flush_ctx_to_hw(struct cam_context *ctx)
 
 	flush_args.num_req_pending = 0;
 	flush_args.last_flush_req = ctx->last_flush_req;
+	list_for_each(list, &temp_list) {
+		num_entries++;
+	}
+	if (num_entries) {
+		flush_args.flush_req_pending =
+			kcalloc(num_entries, sizeof(void *), GFP_KERNEL);
+		if (!flush_args.flush_req_pending) {
+			CAM_ERR(CAM_CTXT, "[%s][%d] : Flush array memory alloc fail",
+				ctx->dev_name, ctx->ctx_id);
+			mutex_unlock(&ctx->sync_mutex);
+			rc = -ENOMEM;
+			goto end;
+		}
+	}
+
 	while (true) {
-		spin_lock(&ctx->lock);
-		if (list_empty(&temp_list)) {
-			spin_unlock(&ctx->lock);
+
+		if (list_empty(&temp_list))
 			break;
-		}
 
 		req = list_first_entry(&temp_list,
 				struct cam_ctx_request, list);
 
 		list_del_init(&req->list);
-		spin_unlock(&ctx->lock);
 		req->flushed = 1;
 
 		flush_args.flush_req_pending[flush_args.num_req_pending++] =
@@ -843,14 +856,31 @@ int32_t cam_context_flush_ctx_to_hw(struct cam_context *ctx)
 	}
 	mutex_unlock(&ctx->sync_mutex);
 
+	INIT_LIST_HEAD(&temp_list);
+	spin_lock(&ctx->lock);
+	list_splice_init(&ctx->active_req_list, &temp_list);
+	spin_unlock(&ctx->lock);
+
 	if (ctx->hw_mgr_intf->hw_flush) {
 		flush_args.num_req_active = 0;
-		spin_lock(&ctx->lock);
-		list_for_each_entry(req, &ctx->active_req_list, list) {
+		num_entries = 0;
+		list_for_each(list, &temp_list) {
+			num_entries++;
+		}
+		if (num_entries) {
+			flush_args.flush_req_active =
+				kcalloc(num_entries, sizeof(void *), GFP_KERNEL);
+			if (!flush_args.flush_req_active) {
+				CAM_ERR(CAM_CTXT, "[%s][%d] : Flush array memory alloc fail",
+					ctx->dev_name, ctx->ctx_id);
+				rc = -ENOMEM;
+				goto end;
+			}
+		}
+		list_for_each_entry(req, &temp_list, list) {
 			flush_args.flush_req_active[flush_args.num_req_active++]
 				= req->req_priv;
 		}
-		spin_unlock(&ctx->lock);
 
 		if (flush_args.num_req_pending || flush_args.num_req_active) {
 			flush_args.ctxt_to_hw_map = ctx->ctxt_to_hw_map;
@@ -860,27 +890,19 @@ int32_t cam_context_flush_ctx_to_hw(struct cam_context *ctx)
 		}
 	}
 
-	INIT_LIST_HEAD(&temp_list);
-	spin_lock(&ctx->lock);
-	list_splice_init(&ctx->active_req_list, &temp_list);
-	INIT_LIST_HEAD(&ctx->active_req_list);
-	spin_unlock(&ctx->lock);
-
 	if (cam_debug_ctx_req_list & ctx->dev_id)
 		CAM_INFO(CAM_CTXT,
 			"[%s][%d] : Moving all requests from active_list to temp_list",
 			ctx->dev_name, ctx->ctx_id);
 
 	while (true) {
-		spin_lock(&ctx->lock);
-		if (list_empty(&temp_list)) {
-			spin_unlock(&ctx->lock);
+
+		if (list_empty(&temp_list))
 			break;
-		}
+
 		req = list_first_entry(&temp_list,
 			struct cam_ctx_request, list);
 		list_del_init(&req->list);
-		spin_unlock(&ctx->lock);
 
 		for (i = 0; i < req->num_out_map_entries; i++) {
 			if (req->out_map_entries[i].sync_id != -1) {
@@ -911,9 +933,13 @@ int32_t cam_context_flush_ctx_to_hw(struct cam_context *ctx)
 				ctx->dev_name, ctx->ctx_id, req->request_id);
 	}
 
+	rc = 0;
 	CAM_DBG(CAM_CTXT, "[%s] X: NRT flush ctx", ctx->dev_name);
 
-	return 0;
+end:
+	kfree(flush_args.flush_req_active);
+	kfree(flush_args.flush_req_pending);
+	return rc;
 }
 
 int32_t cam_context_flush_req_to_hw(struct cam_context *ctx,
@@ -921,7 +947,7 @@ int32_t cam_context_flush_req_to_hw(struct cam_context *ctx,
 {
 	struct cam_ctx_request *req = NULL;
 	struct cam_hw_flush_args flush_args;
-	uint32_t i;
+	uint32_t i = 0;
 	int32_t sync_id = 0;
 	int rc = 0;
 	bool free_req = false;
@@ -931,6 +957,13 @@ int32_t cam_context_flush_req_to_hw(struct cam_context *ctx,
 	memset(&flush_args, 0, sizeof(flush_args));
 	flush_args.num_req_pending = 0;
 	flush_args.num_req_active = 0;
+	flush_args.flush_req_pending = kzalloc(sizeof(void *), GFP_KERNEL);
+	if (!flush_args.flush_req_pending) {
+		CAM_ERR(CAM_CTXT, "[%s][%d] : Flush array memory alloc fail",
+			ctx->dev_name, ctx->ctx_id);
+		rc = -ENOMEM;
+		goto end;
+	}
 	mutex_lock(&ctx->sync_mutex);
 	spin_lock(&ctx->lock);
 	list_for_each_entry(req, &ctx->pending_req_list, list) {
@@ -954,6 +987,13 @@ int32_t cam_context_flush_req_to_hw(struct cam_context *ctx,
 
 	if (ctx->hw_mgr_intf->hw_flush) {
 		if (!flush_args.num_req_pending) {
+			flush_args.flush_req_active = kzalloc(sizeof(void *), GFP_KERNEL);
+			if (!flush_args.flush_req_active) {
+				CAM_ERR(CAM_CTXT, "[%s][%d] : Flush array memory alloc fail",
+					ctx->dev_name, ctx->ctx_id);
+				rc = -ENOMEM;
+				goto end;
+			}
 			spin_lock(&ctx->lock);
 			list_for_each_entry(req, &ctx->active_req_list, list) {
 				if (req->request_id != cmd->req_id)
@@ -1027,9 +1067,14 @@ int32_t cam_context_flush_req_to_hw(struct cam_context *ctx,
 			}
 		}
 	}
+
+	rc = 0;
 	CAM_DBG(CAM_CTXT, "[%s] X: NRT flush req", ctx->dev_name);
 
-	return 0;
+end:
+	kfree(flush_args.flush_req_active);
+	kfree(flush_args.flush_req_pending);
+	return rc;
 }
 
 int32_t cam_context_flush_dev_to_hw(struct cam_context *ctx,

+ 3 - 2
drivers/cam_core/cam_hw_mgr_intf.h

@@ -1,6 +1,7 @@
 /* SPDX-License-Identifier: GPL-2.0-only */
 /*
  * Copyright (c) 2017-2021, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
  */
 
 #ifndef _CAM_HW_MGR_INTF_H_
@@ -326,9 +327,9 @@ struct cam_hw_config_args {
 struct cam_hw_flush_args {
 	void                           *ctxt_to_hw_map;
 	uint32_t                        num_req_pending;
-	void                           *flush_req_pending[20];
+	void                          **flush_req_pending;
 	uint32_t                        num_req_active;
-	void                           *flush_req_active[20];
+	void                          **flush_req_active;
 	enum flush_type_t               flush_type;
 	uint32_t                        last_flush_req;
 };