|
@@ -752,8 +752,9 @@ int32_t cam_context_flush_ctx_to_hw(struct cam_context *ctx)
|
|
{
|
|
{
|
|
struct cam_hw_flush_args flush_args;
|
|
struct cam_hw_flush_args flush_args;
|
|
struct list_head temp_list;
|
|
struct list_head temp_list;
|
|
|
|
+ struct list_head *list;
|
|
struct cam_ctx_request *req;
|
|
struct cam_ctx_request *req;
|
|
- uint32_t i;
|
|
|
|
|
|
+ uint32_t i, num_entries = 0;
|
|
int rc = 0;
|
|
int rc = 0;
|
|
bool free_req;
|
|
bool free_req;
|
|
|
|
|
|
@@ -778,18 +779,30 @@ int32_t cam_context_flush_ctx_to_hw(struct cam_context *ctx)
|
|
|
|
|
|
flush_args.num_req_pending = 0;
|
|
flush_args.num_req_pending = 0;
|
|
flush_args.last_flush_req = ctx->last_flush_req;
|
|
flush_args.last_flush_req = ctx->last_flush_req;
|
|
|
|
+ list_for_each(list, &temp_list) {
|
|
|
|
+ num_entries++;
|
|
|
|
+ }
|
|
|
|
+ if (num_entries) {
|
|
|
|
+ flush_args.flush_req_pending =
|
|
|
|
+ kcalloc(num_entries, sizeof(void *), GFP_KERNEL);
|
|
|
|
+ if (!flush_args.flush_req_pending) {
|
|
|
|
+ CAM_ERR(CAM_CTXT, "[%s][%d] : Flush array memory alloc fail",
|
|
|
|
+ ctx->dev_name, ctx->ctx_id);
|
|
|
|
+ mutex_unlock(&ctx->sync_mutex);
|
|
|
|
+ rc = -ENOMEM;
|
|
|
|
+ goto end;
|
|
|
|
+ }
|
|
|
|
+ }
|
|
|
|
+
|
|
while (true) {
|
|
while (true) {
|
|
- spin_lock(&ctx->lock);
|
|
|
|
- if (list_empty(&temp_list)) {
|
|
|
|
- spin_unlock(&ctx->lock);
|
|
|
|
|
|
+
|
|
|
|
+ if (list_empty(&temp_list))
|
|
break;
|
|
break;
|
|
- }
|
|
|
|
|
|
|
|
req = list_first_entry(&temp_list,
|
|
req = list_first_entry(&temp_list,
|
|
struct cam_ctx_request, list);
|
|
struct cam_ctx_request, list);
|
|
|
|
|
|
list_del_init(&req->list);
|
|
list_del_init(&req->list);
|
|
- spin_unlock(&ctx->lock);
|
|
|
|
req->flushed = 1;
|
|
req->flushed = 1;
|
|
|
|
|
|
flush_args.flush_req_pending[flush_args.num_req_pending++] =
|
|
flush_args.flush_req_pending[flush_args.num_req_pending++] =
|
|
@@ -843,14 +856,31 @@ int32_t cam_context_flush_ctx_to_hw(struct cam_context *ctx)
|
|
}
|
|
}
|
|
mutex_unlock(&ctx->sync_mutex);
|
|
mutex_unlock(&ctx->sync_mutex);
|
|
|
|
|
|
|
|
+ INIT_LIST_HEAD(&temp_list);
|
|
|
|
+ spin_lock(&ctx->lock);
|
|
|
|
+ list_splice_init(&ctx->active_req_list, &temp_list);
|
|
|
|
+ spin_unlock(&ctx->lock);
|
|
|
|
+
|
|
if (ctx->hw_mgr_intf->hw_flush) {
|
|
if (ctx->hw_mgr_intf->hw_flush) {
|
|
flush_args.num_req_active = 0;
|
|
flush_args.num_req_active = 0;
|
|
- spin_lock(&ctx->lock);
|
|
|
|
- list_for_each_entry(req, &ctx->active_req_list, list) {
|
|
|
|
|
|
+ num_entries = 0;
|
|
|
|
+ list_for_each(list, &temp_list) {
|
|
|
|
+ num_entries++;
|
|
|
|
+ }
|
|
|
|
+ if (num_entries) {
|
|
|
|
+ flush_args.flush_req_active =
|
|
|
|
+ kcalloc(num_entries, sizeof(void *), GFP_KERNEL);
|
|
|
|
+ if (!flush_args.flush_req_active) {
|
|
|
|
+ CAM_ERR(CAM_CTXT, "[%s][%d] : Flush array memory alloc fail",
|
|
|
|
+ ctx->dev_name, ctx->ctx_id);
|
|
|
|
+ rc = -ENOMEM;
|
|
|
|
+ goto end;
|
|
|
|
+ }
|
|
|
|
+ }
|
|
|
|
+ list_for_each_entry(req, &temp_list, list) {
|
|
flush_args.flush_req_active[flush_args.num_req_active++]
|
|
flush_args.flush_req_active[flush_args.num_req_active++]
|
|
= req->req_priv;
|
|
= req->req_priv;
|
|
}
|
|
}
|
|
- spin_unlock(&ctx->lock);
|
|
|
|
|
|
|
|
if (flush_args.num_req_pending || flush_args.num_req_active) {
|
|
if (flush_args.num_req_pending || flush_args.num_req_active) {
|
|
flush_args.ctxt_to_hw_map = ctx->ctxt_to_hw_map;
|
|
flush_args.ctxt_to_hw_map = ctx->ctxt_to_hw_map;
|
|
@@ -860,27 +890,19 @@ int32_t cam_context_flush_ctx_to_hw(struct cam_context *ctx)
|
|
}
|
|
}
|
|
}
|
|
}
|
|
|
|
|
|
- INIT_LIST_HEAD(&temp_list);
|
|
|
|
- spin_lock(&ctx->lock);
|
|
|
|
- list_splice_init(&ctx->active_req_list, &temp_list);
|
|
|
|
- INIT_LIST_HEAD(&ctx->active_req_list);
|
|
|
|
- spin_unlock(&ctx->lock);
|
|
|
|
-
|
|
|
|
if (cam_debug_ctx_req_list & ctx->dev_id)
|
|
if (cam_debug_ctx_req_list & ctx->dev_id)
|
|
CAM_INFO(CAM_CTXT,
|
|
CAM_INFO(CAM_CTXT,
|
|
"[%s][%d] : Moving all requests from active_list to temp_list",
|
|
"[%s][%d] : Moving all requests from active_list to temp_list",
|
|
ctx->dev_name, ctx->ctx_id);
|
|
ctx->dev_name, ctx->ctx_id);
|
|
|
|
|
|
while (true) {
|
|
while (true) {
|
|
- spin_lock(&ctx->lock);
|
|
|
|
- if (list_empty(&temp_list)) {
|
|
|
|
- spin_unlock(&ctx->lock);
|
|
|
|
|
|
+
|
|
|
|
+ if (list_empty(&temp_list))
|
|
break;
|
|
break;
|
|
- }
|
|
|
|
|
|
+
|
|
req = list_first_entry(&temp_list,
|
|
req = list_first_entry(&temp_list,
|
|
struct cam_ctx_request, list);
|
|
struct cam_ctx_request, list);
|
|
list_del_init(&req->list);
|
|
list_del_init(&req->list);
|
|
- spin_unlock(&ctx->lock);
|
|
|
|
|
|
|
|
for (i = 0; i < req->num_out_map_entries; i++) {
|
|
for (i = 0; i < req->num_out_map_entries; i++) {
|
|
if (req->out_map_entries[i].sync_id != -1) {
|
|
if (req->out_map_entries[i].sync_id != -1) {
|
|
@@ -911,9 +933,13 @@ int32_t cam_context_flush_ctx_to_hw(struct cam_context *ctx)
|
|
ctx->dev_name, ctx->ctx_id, req->request_id);
|
|
ctx->dev_name, ctx->ctx_id, req->request_id);
|
|
}
|
|
}
|
|
|
|
|
|
|
|
+ rc = 0;
|
|
CAM_DBG(CAM_CTXT, "[%s] X: NRT flush ctx", ctx->dev_name);
|
|
CAM_DBG(CAM_CTXT, "[%s] X: NRT flush ctx", ctx->dev_name);
|
|
|
|
|
|
- return 0;
|
|
|
|
|
|
+end:
|
|
|
|
+ kfree(flush_args.flush_req_active);
|
|
|
|
+ kfree(flush_args.flush_req_pending);
|
|
|
|
+ return rc;
|
|
}
|
|
}
|
|
|
|
|
|
int32_t cam_context_flush_req_to_hw(struct cam_context *ctx,
|
|
int32_t cam_context_flush_req_to_hw(struct cam_context *ctx,
|
|
@@ -921,7 +947,7 @@ int32_t cam_context_flush_req_to_hw(struct cam_context *ctx,
|
|
{
|
|
{
|
|
struct cam_ctx_request *req = NULL;
|
|
struct cam_ctx_request *req = NULL;
|
|
struct cam_hw_flush_args flush_args;
|
|
struct cam_hw_flush_args flush_args;
|
|
- uint32_t i;
|
|
|
|
|
|
+ uint32_t i = 0;
|
|
int32_t sync_id = 0;
|
|
int32_t sync_id = 0;
|
|
int rc = 0;
|
|
int rc = 0;
|
|
bool free_req = false;
|
|
bool free_req = false;
|
|
@@ -931,6 +957,13 @@ int32_t cam_context_flush_req_to_hw(struct cam_context *ctx,
|
|
memset(&flush_args, 0, sizeof(flush_args));
|
|
memset(&flush_args, 0, sizeof(flush_args));
|
|
flush_args.num_req_pending = 0;
|
|
flush_args.num_req_pending = 0;
|
|
flush_args.num_req_active = 0;
|
|
flush_args.num_req_active = 0;
|
|
|
|
+ flush_args.flush_req_pending = kzalloc(sizeof(void *), GFP_KERNEL);
|
|
|
|
+ if (!flush_args.flush_req_pending) {
|
|
|
|
+ CAM_ERR(CAM_CTXT, "[%s][%d] : Flush array memory alloc fail",
|
|
|
|
+ ctx->dev_name, ctx->ctx_id);
|
|
|
|
+ rc = -ENOMEM;
|
|
|
|
+ goto end;
|
|
|
|
+ }
|
|
mutex_lock(&ctx->sync_mutex);
|
|
mutex_lock(&ctx->sync_mutex);
|
|
spin_lock(&ctx->lock);
|
|
spin_lock(&ctx->lock);
|
|
list_for_each_entry(req, &ctx->pending_req_list, list) {
|
|
list_for_each_entry(req, &ctx->pending_req_list, list) {
|
|
@@ -954,6 +987,13 @@ int32_t cam_context_flush_req_to_hw(struct cam_context *ctx,
|
|
|
|
|
|
if (ctx->hw_mgr_intf->hw_flush) {
|
|
if (ctx->hw_mgr_intf->hw_flush) {
|
|
if (!flush_args.num_req_pending) {
|
|
if (!flush_args.num_req_pending) {
|
|
|
|
+ flush_args.flush_req_active = kzalloc(sizeof(void *), GFP_KERNEL);
|
|
|
|
+ if (!flush_args.flush_req_active) {
|
|
|
|
+ CAM_ERR(CAM_CTXT, "[%s][%d] : Flush array memory alloc fail",
|
|
|
|
+ ctx->dev_name, ctx->ctx_id);
|
|
|
|
+ rc = -ENOMEM;
|
|
|
|
+ goto end;
|
|
|
|
+ }
|
|
spin_lock(&ctx->lock);
|
|
spin_lock(&ctx->lock);
|
|
list_for_each_entry(req, &ctx->active_req_list, list) {
|
|
list_for_each_entry(req, &ctx->active_req_list, list) {
|
|
if (req->request_id != cmd->req_id)
|
|
if (req->request_id != cmd->req_id)
|
|
@@ -1027,9 +1067,14 @@ int32_t cam_context_flush_req_to_hw(struct cam_context *ctx,
|
|
}
|
|
}
|
|
}
|
|
}
|
|
}
|
|
}
|
|
|
|
+
|
|
|
|
+ rc = 0;
|
|
CAM_DBG(CAM_CTXT, "[%s] X: NRT flush req", ctx->dev_name);
|
|
CAM_DBG(CAM_CTXT, "[%s] X: NRT flush req", ctx->dev_name);
|
|
|
|
|
|
- return 0;
|
|
|
|
|
|
+end:
|
|
|
|
+ kfree(flush_args.flush_req_active);
|
|
|
|
+ kfree(flush_args.flush_req_pending);
|
|
|
|
+ return rc;
|
|
}
|
|
}
|
|
|
|
|
|
int32_t cam_context_flush_dev_to_hw(struct cam_context *ctx,
|
|
int32_t cam_context_flush_dev_to_hw(struct cam_context *ctx,
|