|
@@ -5848,11 +5848,6 @@ static int __cam_isp_ctx_flush_req_in_top_state(
|
|
|
|
|
|
ctx_isp = (struct cam_isp_context *) ctx->ctx_priv;
|
|
|
|
|
|
- CAM_DBG(CAM_ISP, "Flush pending list, ctx_idx: %u, link: 0x%x", ctx->ctx_id, ctx->link_hdl);
|
|
|
- spin_lock_bh(&ctx->lock);
|
|
|
- __cam_isp_ctx_flush_req(ctx, &ctx->pending_req_list, flush_req);
|
|
|
- spin_unlock_bh(&ctx->lock);
|
|
|
-
|
|
|
/* Reset skipped_list for FCG config */
|
|
|
__cam_isp_ctx_reset_fcg_tracker(ctx);
|
|
|
|
|
@@ -5918,6 +5913,17 @@ static int __cam_isp_ctx_flush_req_in_top_state(
|
|
|
ctx_isp->init_received = false;
|
|
|
}
|
|
|
|
|
|
+ CAM_DBG(CAM_ISP, "Flush pending list, ctx_idx: %u, link: 0x%x", ctx->ctx_id, ctx->link_hdl);
|
|
|
+ /*
|
|
|
+ * On occasions when we are doing a flush all, HW would get reset
|
|
|
+ * shutting down any th/bh in the pipeline. If internal recovery
|
|
|
+ * is triggered prior to flush, by clearing the pending list post
|
|
|
+ * HW reset will ensure no stale request entities are left behind
|
|
|
+ */
|
|
|
+ spin_lock_bh(&ctx->lock);
|
|
|
+ __cam_isp_ctx_flush_req(ctx, &ctx->pending_req_list, flush_req);
|
|
|
+ spin_unlock_bh(&ctx->lock);
|
|
|
+
|
|
|
end:
|
|
|
ctx_isp->bubble_frame_cnt = 0;
|
|
|
ctx_isp->congestion_cnt = 0;
|