msm: camera: isp: Handle buf_done without request in active_list

In cases where we see a buf done without request being in
active_list, there could be a possibility that the request
is applied just before SOF, RUP but the context state is
not moved to Applied state, at this time the request is in
wait_list. In such cases RUP event doesn't move the request
into active_list. Also if the BUF_DONE on a port is happening
before EPOCH, we simply drop that BUF_DONE and then the request
will eventually be tagged as BUBBLE at the time of EPOCH. But
since BUF_DONE is dropped, the request will never come out of
BUBBLE. To handle such cases and to come out of BUBBLE,
check if BUF_DONE matches with the request in wait_list by
checking last_consumed address and if so, mark it as deferred
buf_done and handle once the request is moved to active_list.

CRs-Fixed: 2814346
Change-Id: I778a6a684076d5327d536f319a23206ccc0c25b6
Signed-off-by: Pavan Kumar Chilamkurthi <pchilamk@codeaurora.org>
This commit is contained in:
Pavan Kumar Chilamkurthi
2020-11-05 14:59:03 -08:00
parent 2dc8859361
commit 4c4ed69a5c
2 changed files with 164 additions and 8 deletions

View File

@@ -819,6 +819,7 @@ static int __cam_isp_ctx_handle_buf_done_for_req_list(
if (req_isp->bubble_detected && req_isp->bubble_report) { if (req_isp->bubble_detected && req_isp->bubble_report) {
req_isp->num_acked = 0; req_isp->num_acked = 0;
req_isp->num_deferred_acks = 0;
req_isp->bubble_detected = false; req_isp->bubble_detected = false;
list_del_init(&req->list); list_del_init(&req->list);
atomic_set(&ctx_isp->process_bubble, 0); atomic_set(&ctx_isp->process_bubble, 0);
@@ -1019,12 +1020,75 @@ static int __cam_isp_ctx_handle_buf_done_for_request(
return rc; return rc;
} }
static int __cam_isp_handle_deferred_buf_done(
struct cam_isp_context *ctx_isp,
struct cam_ctx_request *req,
bool bubble_handling,
uint32_t status, uint32_t event_cause)
{
int i, j;
int rc = 0;
struct cam_isp_ctx_req *req_isp =
(struct cam_isp_ctx_req *) req->req_priv;
struct cam_context *ctx = ctx_isp->base;
CAM_DBG(CAM_ISP,
"ctx[%d] : Req %llu : Handling %d deferred buf_dones num_acked=%d, bubble_handling=%d",
ctx->ctx_id, req->request_id, req_isp->num_deferred_acks,
req_isp->num_acked, bubble_handling);
for (i = 0; i < req_isp->num_deferred_acks; i++) {
j = req_isp->deferred_fence_map_index[i];
CAM_DBG(CAM_ISP,
"ctx[%d] : Sync with status=%d, event_cause=%d: req %lld res 0x%x sync_id 0x%x",
ctx->ctx_id, status, event_cause,
req->request_id,
req_isp->fence_map_out[j].resource_handle,
req_isp->fence_map_out[j].sync_id);
if (req_isp->fence_map_out[j].sync_id == -1) {
CAM_WARN(CAM_ISP,
"ctx[%d Deferred buf_done already signalled, req_id=%llu, j=%d, res=0x%x",
ctx->ctx_id, req->request_id, j,
req_isp->fence_map_out[j].resource_handle);
continue;
}
if (!bubble_handling) {
CAM_WARN(CAM_ISP,
"ctx[%d] : Req %llu, status=%d res=0x%x should never happen",
ctx->ctx_id, req->request_id, status,
req_isp->fence_map_out[j].resource_handle);
rc = cam_sync_signal(req_isp->fence_map_out[j].sync_id,
status, event_cause);
if (rc) {
CAM_ERR(CAM_ISP,
"ctx[%d] : Sync signal for Req %llu, sync_id %d status=%d failed with rc = %d",
ctx->ctx_id, req->request_id,
req_isp->fence_map_out[j].sync_id,
status, rc);
} else {
req_isp->num_acked++;
req_isp->fence_map_out[j].sync_id = -1;
}
} else {
req_isp->num_acked++;
}
}
req_isp->num_deferred_acks = 0;
return rc;
}
static int __cam_isp_ctx_handle_buf_done_for_request_verify_addr( static int __cam_isp_ctx_handle_buf_done_for_request_verify_addr(
struct cam_isp_context *ctx_isp, struct cam_isp_context *ctx_isp,
struct cam_ctx_request *req, struct cam_ctx_request *req,
struct cam_isp_hw_done_event_data *done, struct cam_isp_hw_done_event_data *done,
uint32_t bubble_state, uint32_t bubble_state,
bool verify_consumed_addr) bool verify_consumed_addr,
bool defer_buf_done)
{ {
int rc = 0; int rc = 0;
int i, j; int i, j;
@@ -1085,7 +1149,32 @@ static int __cam_isp_ctx_handle_buf_done_for_request_verify_addr(
continue; continue;
} }
if (!req_isp->bubble_detected) { if (defer_buf_done) {
uint32_t deferred_indx = req_isp->num_deferred_acks;
/*
* If we are handling this BUF_DONE event for a request
* that is still in wait_list, do not signal now,
* instead mark it as done and handle it later -
* if this request is going into BUBBLE state later
* it will automatically be re-applied. If this is not
* going into BUBBLE, signal fences later.
* Note - we will come here only if the last consumed
* address matches with this ports buffer.
*/
req_isp->deferred_fence_map_index[deferred_indx] = j;
req_isp->num_deferred_acks++;
CAM_WARN(CAM_ISP,
"ctx[%d] : Deferred buf done for %llu with bubble state %d recovery %d",
ctx->ctx_id, req->request_id, bubble_state,
req_isp->bubble_report);
CAM_WARN(CAM_ISP,
"ctx[%d] : Deferred info : num_acks=%d, fence_map_index=%d, resource_handle=0x%x, sync_id=%d",
ctx->ctx_id, req_isp->num_deferred_acks, j,
req_isp->fence_map_out[j].resource_handle,
req_isp->fence_map_out[j].sync_id);
continue;
} else if (!req_isp->bubble_detected) {
CAM_DBG(CAM_ISP, CAM_DBG(CAM_ISP,
"Sync with success: req %lld res 0x%x fd 0x%x, ctx %u", "Sync with success: req %lld res 0x%x fd 0x%x, ctx %u",
req->request_id, req->request_id,
@@ -1096,9 +1185,16 @@ static int __cam_isp_ctx_handle_buf_done_for_request_verify_addr(
rc = cam_sync_signal(req_isp->fence_map_out[j].sync_id, rc = cam_sync_signal(req_isp->fence_map_out[j].sync_id,
CAM_SYNC_STATE_SIGNALED_SUCCESS, CAM_SYNC_STATE_SIGNALED_SUCCESS,
CAM_SYNC_COMMON_EVENT_SUCCESS); CAM_SYNC_COMMON_EVENT_SUCCESS);
if (rc) if (rc) {
CAM_DBG(CAM_ISP, "Sync failed with rc = %d", CAM_DBG(CAM_ISP, "Sync failed with rc = %d",
rc); rc);
} else if (req_isp->num_deferred_acks) {
/* Process deferred buf_done acks */
__cam_isp_handle_deferred_buf_done(ctx_isp,
req, false,
CAM_SYNC_STATE_SIGNALED_SUCCESS,
CAM_SYNC_COMMON_EVENT_SUCCESS);
}
} else if (!req_isp->bubble_report) { } else if (!req_isp->bubble_report) {
CAM_DBG(CAM_ISP, CAM_DBG(CAM_ISP,
"Sync with failure: req %lld res 0x%x fd 0x%x, ctx %u", "Sync with failure: req %lld res 0x%x fd 0x%x, ctx %u",
@@ -1110,9 +1206,16 @@ static int __cam_isp_ctx_handle_buf_done_for_request_verify_addr(
rc = cam_sync_signal(req_isp->fence_map_out[j].sync_id, rc = cam_sync_signal(req_isp->fence_map_out[j].sync_id,
CAM_SYNC_STATE_SIGNALED_ERROR, CAM_SYNC_STATE_SIGNALED_ERROR,
CAM_SYNC_ISP_EVENT_BUBBLE); CAM_SYNC_ISP_EVENT_BUBBLE);
if (rc) if (rc) {
CAM_ERR(CAM_ISP, "Sync failed with rc = %d", CAM_ERR(CAM_ISP, "Sync failed with rc = %d",
rc); rc);
} else if (req_isp->num_deferred_acks) {
/* Process deferred buf_done acks */
__cam_isp_handle_deferred_buf_done(ctx_isp, req,
false,
CAM_SYNC_STATE_SIGNALED_ERROR,
CAM_SYNC_ISP_EVENT_BUBBLE);
}
} else { } else {
/* /*
* Ignore the buffer done if bubble detect is on * Ignore the buffer done if bubble detect is on
@@ -1124,6 +1227,14 @@ static int __cam_isp_ctx_handle_buf_done_for_request_verify_addr(
CAM_DBG(CAM_ISP, CAM_DBG(CAM_ISP,
"buf done with bubble state %d recovery %d", "buf done with bubble state %d recovery %d",
bubble_state, req_isp->bubble_report); bubble_state, req_isp->bubble_report);
/* Process deferred buf_done acks */
if (req_isp->num_deferred_acks)
__cam_isp_handle_deferred_buf_done(ctx_isp, req,
true,
CAM_SYNC_STATE_SIGNALED_ERROR,
CAM_SYNC_ISP_EVENT_BUBBLE);
continue; continue;
} }
@@ -1171,7 +1282,7 @@ static int __cam_isp_ctx_handle_buf_done(
struct cam_isp_hw_done_event_data done_next_req; struct cam_isp_hw_done_event_data done_next_req;
if (list_empty(&ctx->active_req_list)) { if (list_empty(&ctx->active_req_list)) {
CAM_DBG(CAM_ISP, "Buf done with no active request"); CAM_WARN(CAM_ISP, "Buf done with no active request");
return 0; return 0;
} }
@@ -1271,7 +1382,31 @@ static int __cam_isp_ctx_handle_buf_done_verify_addr(
struct cam_context *ctx = ctx_isp->base; struct cam_context *ctx = ctx_isp->base;
if (list_empty(&ctx->active_req_list)) { if (list_empty(&ctx->active_req_list)) {
CAM_DBG(CAM_ISP, "Buf done with no active request"); CAM_WARN(CAM_ISP,
"Buf done with no active request bubble_state=%d",
bubble_state);
if (!list_empty(&ctx->wait_req_list)) {
struct cam_isp_ctx_req *req_isp;
req = list_first_entry(&ctx->wait_req_list,
struct cam_ctx_request, list);
CAM_WARN(CAM_ISP,
"Buf done with no active request but with req in wait list, req %llu",
req->request_id);
req_isp = (struct cam_isp_ctx_req *) req->req_priv;
/*
* Verify consumed address for this request to make sure
* we are handling the buf_done for the correct
* buffer. Also defer actual buf_done handling, i.e
* do not signal the fence as this request may go into
* Bubble state eventully.
*/
rc =
__cam_isp_ctx_handle_buf_done_for_request_verify_addr(
ctx_isp, req, done, bubble_state, true, true);
}
return 0; return 0;
} }
@@ -1299,7 +1434,7 @@ static int __cam_isp_ctx_handle_buf_done_verify_addr(
*/ */
rc = __cam_isp_ctx_handle_buf_done_for_request_verify_addr( rc = __cam_isp_ctx_handle_buf_done_for_request_verify_addr(
ctx_isp, req, done, bubble_state, ctx_isp, req, done, bubble_state,
!irq_delay_detected); !irq_delay_detected, false);
/* /*
* Verify the consumed address for next req all the time, * Verify the consumed address for next req all the time,
@@ -1309,7 +1444,7 @@ static int __cam_isp_ctx_handle_buf_done_verify_addr(
if (!rc && irq_delay_detected) if (!rc && irq_delay_detected)
rc = __cam_isp_ctx_handle_buf_done_for_request_verify_addr( rc = __cam_isp_ctx_handle_buf_done_for_request_verify_addr(
ctx_isp, next_req, done, ctx_isp, next_req, done,
bubble_state, true); bubble_state, true, false);
return rc; return rc;
} }
@@ -1392,6 +1527,7 @@ static int __cam_isp_ctx_apply_req_offline(
ctx_isp->substate_activated = CAM_ISP_CTX_ACTIVATED_APPLIED; ctx_isp->substate_activated = CAM_ISP_CTX_ACTIVATED_APPLIED;
prev_applied_req = ctx_isp->last_applied_req_id; prev_applied_req = ctx_isp->last_applied_req_id;
ctx_isp->last_applied_req_id = req->request_id; ctx_isp->last_applied_req_id = req->request_id;
atomic_set(&ctx_isp->apply_in_progress, 1);
list_del_init(&req->list); list_del_init(&req->list);
list_add_tail(&req->list, &ctx->wait_req_list); list_add_tail(&req->list, &ctx->wait_req_list);
@@ -1405,12 +1541,14 @@ static int __cam_isp_ctx_apply_req_offline(
ctx_isp->substate_activated = CAM_ISP_CTX_ACTIVATED_SOF; ctx_isp->substate_activated = CAM_ISP_CTX_ACTIVATED_SOF;
ctx_isp->last_applied_req_id = prev_applied_req; ctx_isp->last_applied_req_id = prev_applied_req;
atomic_set(&ctx_isp->apply_in_progress, 0);
list_del_init(&req->list); list_del_init(&req->list);
list_add(&req->list, &ctx->pending_req_list); list_add(&req->list, &ctx->pending_req_list);
spin_unlock_bh(&ctx->lock); spin_unlock_bh(&ctx->lock);
} else { } else {
atomic_set(&ctx_isp->apply_in_progress, 0);
CAM_DBG(CAM_ISP, "New substate state %d, applied req %lld", CAM_DBG(CAM_ISP, "New substate state %d, applied req %lld",
CAM_ISP_CTX_ACTIVATED_APPLIED, CAM_ISP_CTX_ACTIVATED_APPLIED,
ctx_isp->last_applied_req_id); ctx_isp->last_applied_req_id);
@@ -1975,6 +2113,9 @@ static int __cam_isp_ctx_sof_in_epoch(struct cam_isp_context *ctx_isp,
return -EINVAL; return -EINVAL;
} }
if (atomic_read(&ctx_isp->apply_in_progress))
CAM_INFO(CAM_ISP, "Apply is in progress at the time of SOF");
ctx_isp->frame_id++; ctx_isp->frame_id++;
ctx_isp->sof_timestamp_val = sof_event_data->timestamp; ctx_isp->sof_timestamp_val = sof_event_data->timestamp;
ctx_isp->boot_timestamp = sof_event_data->boot_time; ctx_isp->boot_timestamp = sof_event_data->boot_time;
@@ -3000,6 +3141,8 @@ static int __cam_isp_ctx_apply_req_in_activated_state(
cfg.reapply = req_isp->reapply; cfg.reapply = req_isp->reapply;
cfg.cdm_reset_before_apply = req_isp->cdm_reset_before_apply; cfg.cdm_reset_before_apply = req_isp->cdm_reset_before_apply;
atomic_set(&ctx_isp->apply_in_progress, 1);
rc = ctx->hw_mgr_intf->hw_config(ctx->hw_mgr_intf->hw_mgr_priv, &cfg); rc = ctx->hw_mgr_intf->hw_config(ctx->hw_mgr_intf->hw_mgr_priv, &cfg);
if (!rc) { if (!rc) {
spin_lock_bh(&ctx->lock); spin_lock_bh(&ctx->lock);
@@ -3034,6 +3177,7 @@ static int __cam_isp_ctx_apply_req_in_activated_state(
"ctx_id:%d ,Can not apply (req %lld) the configuration, rc %d", "ctx_id:%d ,Can not apply (req %lld) the configuration, rc %d",
ctx->ctx_id, apply->request_id, rc); ctx->ctx_id, apply->request_id, rc);
} }
atomic_set(&ctx_isp->apply_in_progress, 0);
end: end:
return rc; return rc;
} }
@@ -4349,6 +4493,7 @@ static int __cam_isp_ctx_config_dev_in_top_state(
req_isp->num_fence_map_out = cfg.num_out_map_entries; req_isp->num_fence_map_out = cfg.num_out_map_entries;
req_isp->num_fence_map_in = cfg.num_in_map_entries; req_isp->num_fence_map_in = cfg.num_in_map_entries;
req_isp->num_acked = 0; req_isp->num_acked = 0;
req_isp->num_deferred_acks = 0;
req_isp->bubble_detected = false; req_isp->bubble_detected = false;
req_isp->cdm_reset_before_apply = false; req_isp->cdm_reset_before_apply = false;
req_isp->hw_update_data.packet = packet; req_isp->hw_update_data.packet = packet;

View File

@@ -148,6 +148,13 @@ struct cam_isp_ctx_irq_ops {
* @num_acked: Count to track acked entried for output. * @num_acked: Count to track acked entried for output.
* If count equals the number of fence out, it means * If count equals the number of fence out, it means
* the request has been completed. * the request has been completed.
* @num_deferred_acks: Number of buf_dones/acks that are deferred to
* handle or signalled in special scenarios.
* Increment this count instead of num_acked and
* handle the events later where eventually
* increment num_acked.
* @deferred_fence_map_index Saves the indices of fence_map_out for which
* handling of buf_done is deferred.
* @bubble_report: Flag to track if bubble report is active on * @bubble_report: Flag to track if bubble report is active on
* current request * current request
* @hw_update_data: HW update data for this request * @hw_update_data: HW update data for this request
@@ -167,6 +174,8 @@ struct cam_isp_ctx_req {
struct cam_hw_fence_map_entry fence_map_in[CAM_ISP_CTX_RES_MAX]; struct cam_hw_fence_map_entry fence_map_in[CAM_ISP_CTX_RES_MAX];
uint32_t num_fence_map_in; uint32_t num_fence_map_in;
uint32_t num_acked; uint32_t num_acked;
uint32_t num_deferred_acks;
uint32_t deferred_fence_map_index[CAM_ISP_CTX_RES_MAX];
int32_t bubble_report; int32_t bubble_report;
struct cam_isp_prepare_hw_update_data hw_update_data; struct cam_isp_prepare_hw_update_data hw_update_data;
ktime_t event_timestamp ktime_t event_timestamp
@@ -261,6 +270,7 @@ struct cam_isp_context_event_record {
* @custom_enabled: Custom HW enabled for this ctx * @custom_enabled: Custom HW enabled for this ctx
* @use_frame_header_ts: Use frame header for qtimer ts * @use_frame_header_ts: Use frame header for qtimer ts
* @support_consumed_addr: Indicate whether HW has last consumed addr reg * @support_consumed_addr: Indicate whether HW has last consumed addr reg
* @apply_in_progress Whether request apply is in progress
* @init_timestamp: Timestamp at which this context is initialized * @init_timestamp: Timestamp at which this context is initialized
* @isp_device_type: ISP device type * @isp_device_type: ISP device type
* @rxd_epoch: Indicate whether epoch has been received. Used to * @rxd_epoch: Indicate whether epoch has been received. Used to
@@ -307,6 +317,7 @@ struct cam_isp_context {
bool custom_enabled; bool custom_enabled;
bool use_frame_header_ts; bool use_frame_header_ts;
bool support_consumed_addr; bool support_consumed_addr;
atomic_t apply_in_progress;
unsigned int init_timestamp; unsigned int init_timestamp;
uint32_t isp_device_type; uint32_t isp_device_type;
atomic_t rxd_epoch; atomic_t rxd_epoch;