|
@@ -790,6 +790,70 @@ static void __cam_isp_ctx_handle_buf_done_fail_log(
|
|
}
|
|
}
|
|
}
|
|
}
|
|
|
|
|
|
|
|
+static int __cam_isp_ctx_handle_buf_done_for_req_list(
|
|
|
|
+ struct cam_isp_context *ctx_isp,
|
|
|
|
+ struct cam_ctx_request *req)
|
|
|
|
+{
|
|
|
|
+ int rc = 0, i;
|
|
|
|
+ uint64_t buf_done_req_id;
|
|
|
|
+ struct cam_isp_ctx_req *req_isp;
|
|
|
|
+ struct cam_context *ctx = ctx_isp->base;
|
|
|
|
+
|
|
|
|
+ req_isp = (struct cam_isp_ctx_req *) req->req_priv;
|
|
|
|
+ ctx_isp->active_req_cnt--;
|
|
|
|
+ buf_done_req_id = req->request_id;
|
|
|
|
+
|
|
|
|
+ if (req_isp->bubble_detected && req_isp->bubble_report) {
|
|
|
|
+ req_isp->num_acked = 0;
|
|
|
|
+ req_isp->bubble_detected = false;
|
|
|
|
+ list_del_init(&req->list);
|
|
|
|
+ atomic_set(&ctx_isp->process_bubble, 0);
|
|
|
|
+
|
|
|
|
+ if (buf_done_req_id <= ctx->last_flush_req) {
|
|
|
|
+ for (i = 0; i < req_isp->num_fence_map_out; i++)
|
|
|
|
+ rc = cam_sync_signal(
|
|
|
|
+ req_isp->fence_map_out[i].sync_id,
|
|
|
|
+ CAM_SYNC_STATE_SIGNALED_ERROR);
|
|
|
|
+
|
|
|
|
+ list_add_tail(&req->list, &ctx->free_req_list);
|
|
|
|
+ CAM_DBG(CAM_REQ,
|
|
|
|
+ "Move active request %lld to free list(cnt = %d) [flushed], ctx %u",
|
|
|
|
+ buf_done_req_id, ctx_isp->active_req_cnt,
|
|
|
|
+ ctx->ctx_id);
|
|
|
|
+ } else {
|
|
|
|
+ list_add(&req->list, &ctx->pending_req_list);
|
|
|
|
+ CAM_DBG(CAM_REQ,
|
|
|
|
+ "Move active request %lld to pending list(cnt = %d) [bubble recovery], ctx %u",
|
|
|
|
+ req->request_id, ctx_isp->active_req_cnt,
|
|
|
|
+ ctx->ctx_id);
|
|
|
|
+ }
|
|
|
|
+ } else {
|
|
|
|
+ if (!ctx_isp->use_frame_header_ts) {
|
|
|
|
+ if (ctx_isp->reported_req_id < buf_done_req_id) {
|
|
|
|
+ ctx_isp->reported_req_id = buf_done_req_id;
|
|
|
|
+ __cam_isp_ctx_send_sof_timestamp(ctx_isp,
|
|
|
|
+ buf_done_req_id,
|
|
|
|
+ CAM_REQ_MGR_SOF_EVENT_SUCCESS);
|
|
|
|
+ }
|
|
|
|
+ }
|
|
|
|
+ list_del_init(&req->list);
|
|
|
|
+ list_add_tail(&req->list, &ctx->free_req_list);
|
|
|
|
+ req_isp->reapply = false;
|
|
|
|
+
|
|
|
|
+ CAM_DBG(CAM_REQ,
|
|
|
|
+ "Move active request %lld to free list(cnt = %d) [all fences done], ctx %u",
|
|
|
|
+ buf_done_req_id, ctx_isp->active_req_cnt, ctx->ctx_id);
|
|
|
|
+ ctx_isp->req_info.last_bufdone_req_id = req->request_id;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ __cam_isp_ctx_update_state_monitor_array(ctx_isp,
|
|
|
|
+ CAM_ISP_STATE_CHANGE_TRIGGER_DONE, buf_done_req_id);
|
|
|
|
+
|
|
|
|
+ __cam_isp_ctx_update_event_record(ctx_isp,
|
|
|
|
+ CAM_ISP_CTX_EVENT_BUFDONE, req);
|
|
|
|
+ return rc;
|
|
|
|
+}
|
|
|
|
+
|
|
static int __cam_isp_ctx_handle_buf_done_for_request(
|
|
static int __cam_isp_ctx_handle_buf_done_for_request(
|
|
struct cam_isp_context *ctx_isp,
|
|
struct cam_isp_context *ctx_isp,
|
|
struct cam_ctx_request *req,
|
|
struct cam_ctx_request *req,
|
|
@@ -801,7 +865,6 @@ static int __cam_isp_ctx_handle_buf_done_for_request(
|
|
int i, j;
|
|
int i, j;
|
|
struct cam_isp_ctx_req *req_isp;
|
|
struct cam_isp_ctx_req *req_isp;
|
|
struct cam_context *ctx = ctx_isp->base;
|
|
struct cam_context *ctx = ctx_isp->base;
|
|
- uint64_t buf_done_req_id;
|
|
|
|
const char *handle_type;
|
|
const char *handle_type;
|
|
|
|
|
|
trace_cam_buf_done("ISP", ctx, req);
|
|
trace_cam_buf_done("ISP", ctx, req);
|
|
@@ -811,10 +874,8 @@ static int __cam_isp_ctx_handle_buf_done_for_request(
|
|
CAM_DBG(CAM_ISP, "Enter with bubble_state %d, req_bubble_detected %d",
|
|
CAM_DBG(CAM_ISP, "Enter with bubble_state %d, req_bubble_detected %d",
|
|
bubble_state, req_isp->bubble_detected);
|
|
bubble_state, req_isp->bubble_detected);
|
|
|
|
|
|
- if (done_next_req) {
|
|
|
|
- done_next_req->num_handles = 0;
|
|
|
|
- done_next_req->timestamp = done->timestamp;
|
|
|
|
- }
|
|
|
|
|
|
+ done_next_req->num_handles = 0;
|
|
|
|
+ done_next_req->timestamp = done->timestamp;
|
|
|
|
|
|
for (i = 0; i < done->num_handles; i++) {
|
|
for (i = 0; i < done->num_handles; i++) {
|
|
for (j = 0; j < req_isp->num_fence_map_out; j++) {
|
|
for (j = 0; j < req_isp->num_fence_map_out; j++) {
|
|
@@ -824,28 +885,21 @@ static int __cam_isp_ctx_handle_buf_done_for_request(
|
|
}
|
|
}
|
|
|
|
|
|
if (j == req_isp->num_fence_map_out) {
|
|
if (j == req_isp->num_fence_map_out) {
|
|
- if (done_next_req) {
|
|
|
|
- /*
|
|
|
|
- * If not found in current request, it could be
|
|
|
|
- * belonging to next request, This can happen if
|
|
|
|
- * IRQ delay happens.
|
|
|
|
- */
|
|
|
|
- CAM_WARN(CAM_ISP,
|
|
|
|
- "BUF_DONE for res 0x%x not found in Req %lld ",
|
|
|
|
- __cam_isp_resource_handle_id_to_type(
|
|
|
|
- done->resource_handle[i]),
|
|
|
|
- req->request_id);
|
|
|
|
|
|
+ /*
|
|
|
|
+ * If not found in current request, it could be
|
|
|
|
+ * belonging to next request, this can happen if
|
|
|
|
+ * IRQ delay happens. It is only valid when the
|
|
|
|
+ * platform doesn't have last consumed address.
|
|
|
|
+ */
|
|
|
|
+ CAM_WARN(CAM_ISP,
|
|
|
|
+ "BUF_DONE for res %s not found in Req %lld ",
|
|
|
|
+ __cam_isp_resource_handle_id_to_type(
|
|
|
|
+ done->resource_handle[i]),
|
|
|
|
+ req->request_id);
|
|
|
|
|
|
- done_next_req->resource_handle
|
|
|
|
- [done_next_req->num_handles++] =
|
|
|
|
- done->resource_handle[i];
|
|
|
|
- } else {
|
|
|
|
- CAM_ERR(CAM_ISP,
|
|
|
|
- "Can not find matching lane handle 0x%x! in Req %lld",
|
|
|
|
- done->resource_handle[i],
|
|
|
|
- req->request_id);
|
|
|
|
- rc = -EINVAL;
|
|
|
|
- }
|
|
|
|
|
|
+ done_next_req->resource_handle
|
|
|
|
+ [done_next_req->num_handles++] =
|
|
|
|
+ done->resource_handle[i];
|
|
continue;
|
|
continue;
|
|
}
|
|
}
|
|
|
|
|
|
@@ -866,12 +920,9 @@ static int __cam_isp_ctx_handle_buf_done_for_request(
|
|
trace_cam_log_event("Duplicate BufDone",
|
|
trace_cam_log_event("Duplicate BufDone",
|
|
handle_type, req->request_id, ctx->ctx_id);
|
|
handle_type, req->request_id, ctx->ctx_id);
|
|
|
|
|
|
- if (done_next_req) {
|
|
|
|
- done_next_req->resource_handle
|
|
|
|
- [done_next_req->num_handles++] =
|
|
|
|
- done->resource_handle[i];
|
|
|
|
- }
|
|
|
|
-
|
|
|
|
|
|
+ done_next_req->resource_handle
|
|
|
|
+ [done_next_req->num_handles++] =
|
|
|
|
+ done->resource_handle[i];
|
|
continue;
|
|
continue;
|
|
}
|
|
}
|
|
|
|
|
|
@@ -944,68 +995,156 @@ static int __cam_isp_ctx_handle_buf_done_for_request(
|
|
if (req_isp->num_acked != req_isp->num_fence_map_out)
|
|
if (req_isp->num_acked != req_isp->num_fence_map_out)
|
|
return rc;
|
|
return rc;
|
|
|
|
|
|
- ctx_isp->active_req_cnt--;
|
|
|
|
- buf_done_req_id = req->request_id;
|
|
|
|
|
|
+ rc = __cam_isp_ctx_handle_buf_done_for_req_list(ctx_isp, req);
|
|
|
|
+ return rc;
|
|
|
|
+}
|
|
|
|
|
|
- if (req_isp->bubble_detected && req_isp->bubble_report) {
|
|
|
|
- req_isp->num_acked = 0;
|
|
|
|
- req_isp->bubble_detected = false;
|
|
|
|
- list_del_init(&req->list);
|
|
|
|
- atomic_set(&ctx_isp->process_bubble, 0);
|
|
|
|
|
|
+static int __cam_isp_ctx_handle_buf_done_for_request_verify_addr(
|
|
|
|
+ struct cam_isp_context *ctx_isp,
|
|
|
|
+ struct cam_ctx_request *req,
|
|
|
|
+ struct cam_isp_hw_done_event_data *done,
|
|
|
|
+ uint32_t bubble_state,
|
|
|
|
+ bool verify_consumed_addr)
|
|
|
|
+{
|
|
|
|
+ int rc = 0;
|
|
|
|
+ int i, j;
|
|
|
|
+ struct cam_isp_ctx_req *req_isp;
|
|
|
|
+ struct cam_context *ctx = ctx_isp->base;
|
|
|
|
+ const char *handle_type;
|
|
|
|
|
|
- if (buf_done_req_id <= ctx->last_flush_req) {
|
|
|
|
- for (i = 0; i < req_isp->num_fence_map_out; i++)
|
|
|
|
- cam_sync_signal(
|
|
|
|
- req_isp->fence_map_out[i].sync_id,
|
|
|
|
- CAM_SYNC_STATE_SIGNALED_ERROR);
|
|
|
|
|
|
+ trace_cam_buf_done("ISP", ctx, req);
|
|
|
|
|
|
- list_add_tail(&req->list, &ctx->free_req_list);
|
|
|
|
- CAM_DBG(CAM_REQ,
|
|
|
|
- "Move active request %lld to free list(cnt = %d) [flushed], ctx %u",
|
|
|
|
- buf_done_req_id, ctx_isp->active_req_cnt,
|
|
|
|
|
|
+ req_isp = (struct cam_isp_ctx_req *) req->req_priv;
|
|
|
|
+
|
|
|
|
+ CAM_DBG(CAM_ISP, "Enter with bubble_state %d, req_bubble_detected %d",
|
|
|
|
+ bubble_state, req_isp->bubble_detected);
|
|
|
|
+
|
|
|
|
+ for (i = 0; i < done->num_handles; i++) {
|
|
|
|
+ for (j = 0; j < req_isp->num_fence_map_out; j++) {
|
|
|
|
+ if (verify_consumed_addr &&
|
|
|
|
+ (done->last_consumed_addr[i] !=
|
|
|
|
+ req_isp->fence_map_out[j].image_buf_addr[0]))
|
|
|
|
+ continue;
|
|
|
|
+
|
|
|
|
+ if (done->resource_handle[i] ==
|
|
|
|
+ req_isp->fence_map_out[j].resource_handle)
|
|
|
|
+ break;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ if (j == req_isp->num_fence_map_out) {
|
|
|
|
+ /*
|
|
|
|
+ * If not found in current request, it could be
|
|
|
|
+ * belonging to next request, this can happen if
|
|
|
|
+ * IRQ delay happens. It is only valid when the
|
|
|
|
+ * platform doesn't have last consumed address.
|
|
|
|
+ */
|
|
|
|
+ CAM_DBG(CAM_ISP,
|
|
|
|
+ "BUF_DONE for res %s not found in Req %lld ",
|
|
|
|
+ __cam_isp_resource_handle_id_to_type(
|
|
|
|
+ done->resource_handle[i]),
|
|
|
|
+ req->request_id);
|
|
|
|
+ continue;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ if (req_isp->fence_map_out[j].sync_id == -1) {
|
|
|
|
+ if (ctx_isp->isp_device_type == CAM_IFE_DEVICE_TYPE)
|
|
|
|
+ handle_type =
|
|
|
|
+ __cam_isp_resource_handle_id_to_type(
|
|
|
|
+ req_isp->fence_map_out[i].resource_handle);
|
|
|
|
+ else
|
|
|
|
+ handle_type =
|
|
|
|
+ __cam_isp_tfe_resource_handle_id_to_type(
|
|
|
|
+ req_isp->fence_map_out[i].resource_handle);
|
|
|
|
+
|
|
|
|
+ CAM_WARN(CAM_ISP,
|
|
|
|
+ "Duplicate BUF_DONE for req %lld : i=%d, j=%d, res=%s",
|
|
|
|
+ req->request_id, i, j, handle_type);
|
|
|
|
+
|
|
|
|
+ trace_cam_log_event("Duplicate BufDone",
|
|
|
|
+ handle_type, req->request_id, ctx->ctx_id);
|
|
|
|
+ continue;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ if (!req_isp->bubble_detected) {
|
|
|
|
+ CAM_DBG(CAM_ISP,
|
|
|
|
+ "Sync with success: req %lld res 0x%x fd 0x%x, ctx %u",
|
|
|
|
+ req->request_id,
|
|
|
|
+ req_isp->fence_map_out[j].resource_handle,
|
|
|
|
+ req_isp->fence_map_out[j].sync_id,
|
|
ctx->ctx_id);
|
|
ctx->ctx_id);
|
|
- } else {
|
|
|
|
- list_add(&req->list, &ctx->pending_req_list);
|
|
|
|
- CAM_DBG(CAM_REQ,
|
|
|
|
- "Move active request %lld to pending list(cnt = %d) [bubble recovery], ctx %u",
|
|
|
|
- req->request_id, ctx_isp->active_req_cnt,
|
|
|
|
|
|
+
|
|
|
|
+ rc = cam_sync_signal(req_isp->fence_map_out[j].sync_id,
|
|
|
|
+ CAM_SYNC_STATE_SIGNALED_SUCCESS);
|
|
|
|
+ if (rc)
|
|
|
|
+ CAM_DBG(CAM_ISP, "Sync failed with rc = %d",
|
|
|
|
+ rc);
|
|
|
|
+ } else if (!req_isp->bubble_report) {
|
|
|
|
+ CAM_ERR(CAM_ISP,
|
|
|
|
+ "Sync with failure: req %lld res 0x%x fd 0x%x, ctx %u",
|
|
|
|
+ req->request_id,
|
|
|
|
+ req_isp->fence_map_out[j].resource_handle,
|
|
|
|
+ req_isp->fence_map_out[j].sync_id,
|
|
ctx->ctx_id);
|
|
ctx->ctx_id);
|
|
|
|
+
|
|
|
|
+ rc = cam_sync_signal(req_isp->fence_map_out[j].sync_id,
|
|
|
|
+ CAM_SYNC_STATE_SIGNALED_ERROR);
|
|
|
|
+ if (rc)
|
|
|
|
+ CAM_ERR(CAM_ISP, "Sync failed with rc = %d",
|
|
|
|
+ rc);
|
|
|
|
+ } else {
|
|
|
|
+ /*
|
|
|
|
+ * Ignore the buffer done if bubble detect is on
|
|
|
|
+ * Increment the ack number here, and queue the
|
|
|
|
+ * request back to pending list whenever all the
|
|
|
|
+ * buffers are done.
|
|
|
|
+ */
|
|
|
|
+ req_isp->num_acked++;
|
|
|
|
+ CAM_DBG(CAM_ISP,
|
|
|
|
+ "buf done with bubble state %d recovery %d",
|
|
|
|
+ bubble_state, req_isp->bubble_report);
|
|
|
|
+ continue;
|
|
}
|
|
}
|
|
- } else {
|
|
|
|
|
|
|
|
- if (!ctx_isp->use_frame_header_ts) {
|
|
|
|
- if (ctx_isp->reported_req_id < buf_done_req_id) {
|
|
|
|
- ctx_isp->reported_req_id = buf_done_req_id;
|
|
|
|
- __cam_isp_ctx_send_sof_timestamp(ctx_isp,
|
|
|
|
- buf_done_req_id,
|
|
|
|
- CAM_REQ_MGR_SOF_EVENT_SUCCESS);
|
|
|
|
- }
|
|
|
|
|
|
+ CAM_DBG(CAM_ISP, "req %lld, reset sync id 0x%x ctx %u",
|
|
|
|
+ req->request_id,
|
|
|
|
+ req_isp->fence_map_out[j].sync_id, ctx->ctx_id);
|
|
|
|
+ if (!rc) {
|
|
|
|
+ req_isp->num_acked++;
|
|
|
|
+ req_isp->fence_map_out[j].sync_id = -1;
|
|
}
|
|
}
|
|
- list_del_init(&req->list);
|
|
|
|
- list_add_tail(&req->list, &ctx->free_req_list);
|
|
|
|
- req_isp->reapply = false;
|
|
|
|
|
|
|
|
- CAM_DBG(CAM_REQ,
|
|
|
|
- "Move active request %lld to free list(cnt = %d) [all fences done], ctx %u",
|
|
|
|
- buf_done_req_id, ctx_isp->active_req_cnt, ctx->ctx_id);
|
|
|
|
- ctx_isp->req_info.last_bufdone_req_id = req->request_id;
|
|
|
|
|
|
+ if ((ctx_isp->use_frame_header_ts) &&
|
|
|
|
+ (req_isp->hw_update_data.frame_header_res_id ==
|
|
|
|
+ req_isp->fence_map_out[j].resource_handle))
|
|
|
|
+ __cam_isp_ctx_send_sof_timestamp_frame_header(
|
|
|
|
+ ctx_isp,
|
|
|
|
+ req_isp->hw_update_data.frame_header_cpu_addr,
|
|
|
|
+ req->request_id, CAM_REQ_MGR_SOF_EVENT_SUCCESS);
|
|
}
|
|
}
|
|
|
|
|
|
- __cam_isp_ctx_update_state_monitor_array(ctx_isp,
|
|
|
|
- CAM_ISP_STATE_CHANGE_TRIGGER_DONE, buf_done_req_id);
|
|
|
|
|
|
+ if (req_isp->num_acked > req_isp->num_fence_map_out) {
|
|
|
|
+ /* Should not happen */
|
|
|
|
+ CAM_ERR(CAM_ISP,
|
|
|
|
+ "WARNING: req_id %lld num_acked %d > map_out %d, ctx %u",
|
|
|
|
+ req->request_id, req_isp->num_acked,
|
|
|
|
+ req_isp->num_fence_map_out, ctx->ctx_id);
|
|
|
|
+ WARN_ON(req_isp->num_acked > req_isp->num_fence_map_out);
|
|
|
|
+ }
|
|
|
|
|
|
- __cam_isp_ctx_update_event_record(ctx_isp,
|
|
|
|
- CAM_ISP_CTX_EVENT_BUFDONE, req);
|
|
|
|
|
|
+ if (req_isp->num_acked != req_isp->num_fence_map_out)
|
|
|
|
+ return rc;
|
|
|
|
+
|
|
|
|
+ rc = __cam_isp_ctx_handle_buf_done_for_req_list(ctx_isp, req);
|
|
return rc;
|
|
return rc;
|
|
}
|
|
}
|
|
|
|
|
|
-static int __cam_isp_ctx_handle_buf_done_in_activated_state(
|
|
|
|
|
|
+static int __cam_isp_ctx_handle_buf_done(
|
|
struct cam_isp_context *ctx_isp,
|
|
struct cam_isp_context *ctx_isp,
|
|
struct cam_isp_hw_done_event_data *done,
|
|
struct cam_isp_hw_done_event_data *done,
|
|
uint32_t bubble_state)
|
|
uint32_t bubble_state)
|
|
{
|
|
{
|
|
int rc = 0;
|
|
int rc = 0;
|
|
- struct cam_ctx_request *req;
|
|
|
|
|
|
+ struct cam_ctx_request *req;
|
|
struct cam_context *ctx = ctx_isp->base;
|
|
struct cam_context *ctx = ctx_isp->base;
|
|
struct cam_isp_hw_done_event_data done_next_req;
|
|
struct cam_isp_hw_done_event_data done_next_req;
|
|
|
|
|
|
@@ -1063,6 +1202,111 @@ static int __cam_isp_ctx_handle_buf_done_in_activated_state(
|
|
return rc;
|
|
return rc;
|
|
}
|
|
}
|
|
|
|
|
|
|
|
+static void __cam_isp_ctx_buf_done_match_req(
|
|
|
|
+ struct cam_ctx_request *req,
|
|
|
|
+ struct cam_isp_hw_done_event_data *done,
|
|
|
|
+ bool *irq_delay_detected)
|
|
|
|
+{
|
|
|
|
+ int i, j;
|
|
|
|
+ uint32_t match_count = 0;
|
|
|
|
+ struct cam_isp_ctx_req *req_isp;
|
|
|
|
+
|
|
|
|
+ req_isp = (struct cam_isp_ctx_req *) req->req_priv;
|
|
|
|
+
|
|
|
|
+ for (i = 0; i < done->num_handles; i++) {
|
|
|
|
+ for (j = 0; j < req_isp->num_fence_map_out; j++) {
|
|
|
|
+ if ((done->resource_handle[i] ==
|
|
|
|
+ req_isp->fence_map_out[j].resource_handle) &&
|
|
|
|
+ (done->last_consumed_addr[i] ==
|
|
|
|
+ req_isp->fence_map_out[j].image_buf_addr[0])) {
|
|
|
|
+ match_count++;
|
|
|
|
+ break;
|
|
|
|
+ }
|
|
|
|
+ }
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ if (match_count > 0)
|
|
|
|
+ *irq_delay_detected = true;
|
|
|
|
+ else
|
|
|
|
+ *irq_delay_detected = false;
|
|
|
|
+
|
|
|
|
+ CAM_DBG(CAM_ISP,
|
|
|
|
+ "buf done num handles %d match count %d for next req:%lld",
|
|
|
|
+ done->num_handles, match_count, req->request_id);
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+static int __cam_isp_ctx_handle_buf_done_verify_addr(
|
|
|
|
+ struct cam_isp_context *ctx_isp,
|
|
|
|
+ struct cam_isp_hw_done_event_data *done,
|
|
|
|
+ uint32_t bubble_state)
|
|
|
|
+{
|
|
|
|
+ int rc = 0;
|
|
|
|
+ bool irq_delay_detected = false;
|
|
|
|
+ struct cam_ctx_request *req;
|
|
|
|
+ struct cam_ctx_request *next_req = NULL;
|
|
|
|
+ struct cam_context *ctx = ctx_isp->base;
|
|
|
|
+
|
|
|
|
+ if (list_empty(&ctx->active_req_list)) {
|
|
|
|
+ CAM_DBG(CAM_ISP, "Buf done with no active request");
|
|
|
|
+ return 0;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ req = list_first_entry(&ctx->active_req_list,
|
|
|
|
+ struct cam_ctx_request, list);
|
|
|
|
+
|
|
|
|
+ if (ctx_isp->active_req_cnt > 1) {
|
|
|
|
+ next_req = list_last_entry(
|
|
|
|
+ &ctx->active_req_list,
|
|
|
|
+ struct cam_ctx_request, list);
|
|
|
|
+
|
|
|
|
+ if (next_req->request_id != req->request_id)
|
|
|
|
+ __cam_isp_ctx_buf_done_match_req(next_req, done,
|
|
|
|
+ &irq_delay_detected);
|
|
|
|
+ else
|
|
|
|
+ CAM_WARN(CAM_ISP,
|
|
|
|
+ "Req %lld only active request, spurious buf_done rxd",
|
|
|
|
+ req->request_id);
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ /*
|
|
|
|
+ * If irq delay isn't detected, then we need to verify
|
|
|
|
+ * the consumed address for current req, otherwise, we
|
|
|
|
+ * can't verify the consumed address.
|
|
|
|
+ */
|
|
|
|
+ rc = __cam_isp_ctx_handle_buf_done_for_request_verify_addr(
|
|
|
|
+ ctx_isp, req, done, bubble_state,
|
|
|
|
+ !irq_delay_detected);
|
|
|
|
+
|
|
|
|
+ /*
|
|
|
|
+ * Verify the consumed address for next req all the time,
|
|
|
|
+ * since the reported buf done event may belong to current
|
|
|
|
+ * req, then we can't signal this event for next req.
|
|
|
|
+ */
|
|
|
|
+ if (!rc && irq_delay_detected)
|
|
|
|
+ rc = __cam_isp_ctx_handle_buf_done_for_request_verify_addr(
|
|
|
|
+ ctx_isp, next_req, done,
|
|
|
|
+ bubble_state, true);
|
|
|
|
+
|
|
|
|
+ return rc;
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+static int __cam_isp_ctx_handle_buf_done_in_activated_state(
|
|
|
|
+ struct cam_isp_context *ctx_isp,
|
|
|
|
+ struct cam_isp_hw_done_event_data *done,
|
|
|
|
+ uint32_t bubble_state)
|
|
|
|
+{
|
|
|
|
+ int rc = 0;
|
|
|
|
+
|
|
|
|
+ if (ctx_isp->support_consumed_addr)
|
|
|
|
+ rc = __cam_isp_ctx_handle_buf_done_verify_addr(
|
|
|
|
+ ctx_isp, done, bubble_state);
|
|
|
|
+ else
|
|
|
|
+ rc = __cam_isp_ctx_handle_buf_done(
|
|
|
|
+ ctx_isp, done, bubble_state);
|
|
|
|
+
|
|
|
|
+ return rc;
|
|
|
|
+}
|
|
|
|
+
|
|
static int __cam_isp_ctx_apply_req_offline(
|
|
static int __cam_isp_ctx_apply_req_offline(
|
|
void *priv, void *data)
|
|
void *priv, void *data)
|
|
{
|
|
{
|
|
@@ -3663,6 +3907,7 @@ static int __cam_isp_ctx_release_hw_in_top_state(struct cam_context *ctx,
|
|
ctx_isp->reported_req_id = 0;
|
|
ctx_isp->reported_req_id = 0;
|
|
ctx_isp->hw_acquired = false;
|
|
ctx_isp->hw_acquired = false;
|
|
ctx_isp->init_received = false;
|
|
ctx_isp->init_received = false;
|
|
|
|
+ ctx_isp->support_consumed_addr = false;
|
|
ctx_isp->req_info.last_bufdone_req_id = 0;
|
|
ctx_isp->req_info.last_bufdone_req_id = 0;
|
|
|
|
|
|
atomic64_set(&ctx_isp->state_monitor_head, -1);
|
|
atomic64_set(&ctx_isp->state_monitor_head, -1);
|
|
@@ -4340,6 +4585,8 @@ static int __cam_isp_ctx_acquire_hw_v2(struct cam_context *ctx,
|
|
*/
|
|
*/
|
|
ctx_isp->custom_enabled = param.custom_enabled;
|
|
ctx_isp->custom_enabled = param.custom_enabled;
|
|
ctx_isp->use_frame_header_ts = param.use_frame_header_ts;
|
|
ctx_isp->use_frame_header_ts = param.use_frame_header_ts;
|
|
|
|
+ ctx_isp->support_consumed_addr =
|
|
|
|
+ param.support_consumed_addr;
|
|
|
|
|
|
/* Query the context has rdi only resource */
|
|
/* Query the context has rdi only resource */
|
|
hw_cmd_args.ctxt_to_hw_map = param.ctxt_to_hw_map;
|
|
hw_cmd_args.ctxt_to_hw_map = param.ctxt_to_hw_map;
|