Browse Source

Merge "msm: camera: common: Add camera minidump changes" into camera-kernel.lnx.5.0

Savita Patted 3 years ago
parent
commit
09f1d471b3

+ 4 - 1
config/waipio.mk

@@ -15,4 +15,7 @@ ccflags-y += -DCONFIG_SPECTRA_CUSTOM=1
 ccflags-y += -DCONFIG_SPECTRA_SENSOR=1
 ccflags-y += -DCONFIG_SPECTRA_SENSOR=1
 
 
 # External Dependencies
 # External Dependencies
-KBUILD_CPPFLAGS += -DCONFIG_MSM_MMRM=1
+KBUILD_CPPFLAGS += -DCONFIG_MSM_MMRM=1
+ifeq ($(CONFIG_QCOM_VA_MINIDUMP), y)
+KBUILD_CPPFLAGS += -DCONFIG_QCOM_VA_MINIDUMP=1
+endif

+ 19 - 0
drivers/cam_core/cam_hw_mgr_intf.h

@@ -57,6 +57,10 @@ typedef int (*cam_ctx_info_dump_cb_func)(void *context,
 typedef int (*cam_ctx_recovery_cb_func)(void *context,
 typedef int (*cam_ctx_recovery_cb_func)(void *context,
 	void *recovery_data);
 	void *recovery_data);
 
 
+/* ctx mini dump callback function type */
+typedef int (*cam_ctx_mini_dump_cb_func)(void *context,
+	void *args);
+
 /**
 /**
  * struct cam_hw_update_entry - Entry for hardware config
  * struct cam_hw_update_entry - Entry for hardware config
  *
  *
@@ -138,6 +142,7 @@ struct cam_hw_acquire_stream_caps {
  *                         its updated per hardware
  *                         its updated per hardware
  * @valid_acquired_hw:     Valid num of acquired hardware
  * @valid_acquired_hw:     Valid num of acquired hardware
  * @op_params:             OP Params from hw_mgr to ctx
  * @op_params:             OP Params from hw_mgr to ctx
+ * @mini_dump_cb:          Mini dump callback function
  *
  *
  */
  */
 struct cam_hw_acquire_args {
 struct cam_hw_acquire_args {
@@ -156,6 +161,7 @@ struct cam_hw_acquire_args {
 	uint32_t    valid_acquired_hw;
 	uint32_t    valid_acquired_hw;
 
 
 	struct cam_hw_acquire_stream_caps op_params;
 	struct cam_hw_acquire_stream_caps op_params;
+	cam_ctx_mini_dump_cb_func    mini_dump_cb;
 };
 };
 
 
 /**
 /**
@@ -396,6 +402,19 @@ struct cam_hw_cmd_args {
 	} u;
 	} u;
 };
 };
 
 
+/**
+ * struct cam_hw_mini_dump_args - Mini Dump arguments
+ *
+ * @start_addr:          Start address of buffer
+ * @len:                 Len of Buffer
+ * @bytes_written:       Bytes written
+ */
+struct cam_hw_mini_dump_args {
+	void             *start_addr;
+	unsigned long     len;
+	unsigned long     bytes_written;
+};
+
 /**
 /**
  * cam_hw_mgr_intf - HW manager interface
  * cam_hw_mgr_intf - HW manager interface
  *
  *

+ 185 - 1
drivers/cam_isp/cam_isp_context.c

@@ -263,6 +263,189 @@ static int __cam_isp_ctx_dump_event_record(
 	return 0;
 	return 0;
 }
 }
 
 
+static void __cam_isp_ctx_req_mini_dump(struct cam_ctx_request *req,
+	uint8_t *start_addr, uint8_t *end_addr,
+	unsigned long *bytes_updated)
+{
+	struct cam_isp_ctx_req_mini_dump *req_md;
+	struct cam_buf_io_cfg            *io_cfg;
+	struct cam_isp_ctx_req           *req_isp;
+	struct cam_packet                *packet = NULL;
+	unsigned long                     bytes_required = 0;
+
+	bytes_required = sizeof(*req_md);
+	*bytes_updated = 0;
+	if (start_addr + bytes_required > end_addr)
+		return;
+
+	req_md = (struct cam_isp_ctx_req_mini_dump *)start_addr;
+	req_isp = (struct cam_isp_ctx_req *)req->req_priv;
+	req_md->num_acked = req_isp->num_acked;
+	req_md->num_deferred_acks = req_isp->num_deferred_acks;
+	req_md->bubble_report = req_isp->bubble_report;
+	req_md->bubble_detected = req_isp->bubble_detected;
+	req_md->reapply = req_isp->reapply;
+	req_md->request_id = req->request_id;
+	*bytes_updated += bytes_required;
+
+	if (req_isp->num_fence_map_out) {
+		bytes_required = sizeof(struct cam_hw_fence_map_entry) *
+			req_isp->num_fence_map_out;
+		if (start_addr + *bytes_updated + bytes_required > end_addr)
+			return;
+
+		req_md->map_out = (struct cam_hw_fence_map_entry *)
+				((uint8_t *)start_addr + *bytes_updated);
+		memcpy(req_md->map_out, req_isp->fence_map_out, bytes_required);
+		req_md->num_fence_map_out = req_isp->num_fence_map_out;
+		*bytes_updated += bytes_required;
+	}
+
+	if (req_isp->num_fence_map_in) {
+		bytes_required = sizeof(struct cam_hw_fence_map_entry) *
+			req_isp->num_fence_map_in;
+		if (start_addr + *bytes_updated + bytes_required > end_addr)
+			return;
+
+		req_md->map_in = (struct cam_hw_fence_map_entry *)
+			((uint8_t *)start_addr + *bytes_updated);
+		memcpy(req_md->map_in, req_isp->fence_map_in, bytes_required);
+		req_md->num_fence_map_in = req_isp->num_fence_map_in;
+		*bytes_updated += bytes_required;
+	}
+
+	packet = req_isp->hw_update_data.packet;
+	if (packet && packet->num_io_configs) {
+		bytes_required = packet->num_io_configs * sizeof(struct cam_buf_io_cfg);
+		if (start_addr + *bytes_updated + bytes_required > end_addr)
+			return;
+
+		io_cfg = (struct cam_buf_io_cfg *)((uint32_t *)&packet->payload +
+			    packet->io_configs_offset / 4);
+		req_md->io_cfg = (struct cam_buf_io_cfg *)((uint8_t *)start_addr + *bytes_updated);
+		memcpy(req_md->io_cfg, io_cfg, bytes_required);
+		*bytes_updated += bytes_required;
+		req_md->num_io_cfg = packet->num_io_configs;
+	}
+}
+
+static int __cam_isp_ctx_minidump_cb(void *priv, void *args)
+{
+	struct cam_isp_ctx_mini_dump_info *md;
+	struct cam_isp_context            *ctx_isp;
+	struct cam_context                *ctx;
+	struct cam_ctx_request            *req, *req_temp;
+	struct cam_hw_mini_dump_args      *dump_args;
+	uint8_t                           *start_addr;
+	uint8_t                           *end_addr;
+	unsigned long                      total_bytes = 0;
+	unsigned long                      bytes_updated = 0;
+	uint32_t                           i;
+
+	if (!priv || !args) {
+		CAM_ERR(CAM_ISP, "invalid params");
+		return 0;
+	}
+
+	dump_args = (struct cam_hw_mini_dump_args *)args;
+	if (dump_args->len < sizeof(*md)) {
+		CAM_ERR(CAM_ISP,
+			"In sufficient size received %lu required size: %zu",
+			dump_args->len, sizeof(*md));
+		return 0;
+	}
+
+	ctx = (struct cam_context *)priv;
+	ctx_isp = (struct cam_isp_context *)ctx->ctx_priv;
+	start_addr = (uint8_t *)dump_args->start_addr;
+	end_addr = start_addr + dump_args->len;
+	md = (struct cam_isp_ctx_mini_dump_info *)dump_args->start_addr;
+
+	md->sof_timestamp_val = ctx_isp->sof_timestamp_val;
+	md->boot_timestamp = ctx_isp->boot_timestamp;
+	md->last_sof_timestamp = ctx_isp->last_sof_timestamp;
+	md->init_timestamp = ctx_isp->init_timestamp;
+	md->frame_id = ctx_isp->frame_id;
+	md->reported_req_id = ctx_isp->reported_req_id;
+	md->last_applied_req_id = ctx_isp->last_applied_req_id;
+	md->last_bufdone_err_apply_req_id =
+		ctx_isp->last_bufdone_err_apply_req_id;
+	md->frame_id_meta = ctx_isp->frame_id_meta;
+	md->substate_activated = ctx_isp->substate_activated;
+	md->ctx_id = ctx->ctx_id;
+	md->subscribe_event = ctx_isp->subscribe_event;
+	md->bubble_frame_cnt = ctx_isp->bubble_frame_cnt;
+	md->isp_device_type = ctx_isp->isp_device_type;
+	md->active_req_cnt = ctx_isp->active_req_cnt;
+	md->trigger_id = ctx_isp->trigger_id;
+	md->rdi_only_context = ctx_isp->rdi_only_context;
+	md->offline_context = ctx_isp->offline_context;
+	md->hw_acquired = ctx_isp->hw_acquired;
+	md->init_received = ctx_isp->init_received;
+	md->split_acquire = ctx_isp->split_acquire;
+	md->use_frame_header_ts = ctx_isp->use_frame_header_ts;
+	md->support_consumed_addr = ctx_isp->support_consumed_addr;
+	md->use_default_apply = ctx_isp->use_default_apply;
+	md->apply_in_progress = atomic_read(&ctx_isp->apply_in_progress);
+	md->process_bubble = atomic_read(&ctx_isp->process_bubble);
+	md->rxd_epoch = atomic_read(&ctx_isp->rxd_epoch);
+
+	for (i = 0; i < CAM_ISP_CTX_EVENT_MAX; i++) {
+		memcpy(md->event_record[i], ctx_isp->event_record[i],
+			sizeof(struct cam_isp_context_event_record) *
+			CAM_ISP_CTX_EVENT_RECORD_MAX_ENTRIES);
+	}
+
+	total_bytes += sizeof(*md);
+	if (start_addr + total_bytes >= end_addr)
+		goto end;
+
+	if (!list_empty(&ctx->active_req_list)) {
+		md->active_list = (struct cam_isp_ctx_req_mini_dump *)
+			    (start_addr + total_bytes);
+		list_for_each_entry_safe(req, req_temp, &ctx->active_req_list, list) {
+			bytes_updated = 0;
+			 __cam_isp_ctx_req_mini_dump(req,
+				(uint8_t *)&md->active_list[md->active_cnt++],
+				end_addr, &bytes_updated);
+			total_bytes +=  bytes_updated;
+			if ((start_addr + total_bytes >= end_addr))
+				goto end;
+		}
+	}
+
+	if (!list_empty(&ctx->wait_req_list)) {
+		md->wait_list = (struct cam_isp_ctx_req_mini_dump *)
+			(start_addr + total_bytes);
+		list_for_each_entry_safe(req, req_temp, &ctx->wait_req_list, list) {
+			bytes_updated = 0;
+			__cam_isp_ctx_req_mini_dump(req,
+				(uint8_t *)&md->wait_list[md->wait_cnt++],
+				end_addr, &bytes_updated);
+			total_bytes +=  bytes_updated;
+			if ((start_addr + total_bytes >= end_addr))
+				goto end;
+		}
+	}
+
+	if (!list_empty(&ctx->pending_req_list)) {
+		md->pending_list = (struct cam_isp_ctx_req_mini_dump *)
+			(start_addr + total_bytes);
+		list_for_each_entry_safe(req, req_temp, &ctx->pending_req_list, list) {
+			bytes_updated = 0;
+			__cam_isp_ctx_req_mini_dump(req,
+				(uint8_t *)&md->pending_list[md->pending_cnt++],
+				end_addr, &bytes_updated);
+			total_bytes +=  bytes_updated;
+			if ((start_addr + total_bytes >= end_addr))
+				goto end;
+		}
+	}
+end:
+	dump_args->bytes_written = total_bytes;
+	return 0;
+}
+
 static void __cam_isp_ctx_update_state_monitor_array(
 static void __cam_isp_ctx_update_state_monitor_array(
 	struct cam_isp_context *ctx_isp,
 	struct cam_isp_context *ctx_isp,
 	enum cam_isp_state_change_trigger trigger_type,
 	enum cam_isp_state_change_trigger trigger_type,
@@ -5277,7 +5460,6 @@ static int __cam_isp_ctx_acquire_dev_in_available(struct cam_context *ctx,
 	ctx_isp->hw_acquired = true;
 	ctx_isp->hw_acquired = true;
 	ctx_isp->split_acquire = false;
 	ctx_isp->split_acquire = false;
 	ctx->ctxt_to_hw_map = param.ctxt_to_hw_map;
 	ctx->ctxt_to_hw_map = param.ctxt_to_hw_map;
-
 	atomic64_set(&ctx_isp->state_monitor_head, -1);
 	atomic64_set(&ctx_isp->state_monitor_head, -1);
 	for (i = 0; i < CAM_ISP_CTX_EVENT_MAX; i++)
 	for (i = 0; i < CAM_ISP_CTX_EVENT_MAX; i++)
 		atomic64_set(&ctx_isp->event_record_head[i], -1);
 		atomic64_set(&ctx_isp->event_record_head[i], -1);
@@ -5384,6 +5566,7 @@ static int __cam_isp_ctx_acquire_hw_v1(struct cam_context *ctx,
 	param.num_acq = CAM_API_COMPAT_CONSTANT;
 	param.num_acq = CAM_API_COMPAT_CONSTANT;
 	param.acquire_info_size = cmd->data_size;
 	param.acquire_info_size = cmd->data_size;
 	param.acquire_info = (uint64_t) acquire_hw_info;
 	param.acquire_info = (uint64_t) acquire_hw_info;
+	param.mini_dump_cb = __cam_isp_ctx_minidump_cb;
 
 
 	rc = __cam_isp_ctx_allocate_mem_hw_entries(ctx,
 	rc = __cam_isp_ctx_allocate_mem_hw_entries(ctx,
 		&param);
 		&param);
@@ -5535,6 +5718,7 @@ static int __cam_isp_ctx_acquire_hw_v2(struct cam_context *ctx,
 	param.num_acq = CAM_API_COMPAT_CONSTANT;
 	param.num_acq = CAM_API_COMPAT_CONSTANT;
 	param.acquire_info_size = cmd->data_size;
 	param.acquire_info_size = cmd->data_size;
 	param.acquire_info = (uint64_t) acquire_hw_info;
 	param.acquire_info = (uint64_t) acquire_hw_info;
+	param.mini_dump_cb = __cam_isp_ctx_minidump_cb;
 
 
 	/* call HW manager to reserve the resource */
 	/* call HW manager to reserve the resource */
 	rc = ctx->hw_mgr_intf->hw_acquire(ctx->hw_mgr_intf->hw_mgr_priv,
 	rc = ctx->hw_mgr_intf->hw_acquire(ctx->hw_mgr_intf->hw_mgr_priv,

+ 119 - 0
drivers/cam_isp/cam_isp_context.h

@@ -345,6 +345,125 @@ struct cam_isp_context_dump_header {
 	uint32_t  word_size;
 	uint32_t  word_size;
 };
 };
 
 
+/** * struct cam_isp_ctx_req_mini_dump - ISP mini dumprequest
+ *
+ * @map_out:                   Output fence mapping
+ * @map_in:                    Input fence mapping
+ * @io_cfg:                    IO buffer configuration
+ * @request_id:                Request ID
+ * @num_fence_map_out:         Number of the output fence map
+ * @num_fence_map_in:          Number of input fence map
+ * @num_io_cfg:                Number of ISP hardware configuration entries
+ * @num_acked:                 Count to track acked entried for output.
+ * @num_deferred_acks:         Number of buf_dones/acks that are deferred to
+ *                             handle or signalled in special scenarios.
+ *                             Increment this count instead of num_acked and
+ *                             handle the events later where eventually
+ *                             increment num_acked.
+ * @bubble_report:             Flag to track if bubble report is active on
+ *                             current request
+ * @bubble_detected:           Flag to track if bubble is detected
+ * @reapply:                   True if reapplying after bubble
+ * @cdm_reset_before_apply:    For bubble re-apply when buf done not coming set
+ *                             to True
+ *
+ */
+struct cam_isp_ctx_req_mini_dump {
+	struct cam_hw_fence_map_entry   *map_out;
+	struct cam_hw_fence_map_entry   *map_in;
+	struct cam_buf_io_cfg           *io_cfg;
+	uint64_t                         request_id;
+	uint8_t                          num_fence_map_in;
+	uint8_t                          num_fence_map_out;
+	uint8_t                          num_io_cfg;
+	uint8_t                          num_acked;
+	uint8_t                          num_deferred_acks;
+	bool                             bubble_report;
+	bool                             bubble_detected;
+	bool                             reapply;
+	bool                             cdm_reset_before_apply;
+};
+
+/**
+ * struct cam_isp_ctx_mini_dump_info - Isp context mini dump data
+ *
+ * @active_list:               Active Req list
+ * @pending_list:              Pending req list
+ * @wait_list:                 Wait Req List
+ * @event_record:              Event record
+ * @sof_timestamp_val:         Captured time stamp value at sof hw event
+ * @boot_timestamp:            Boot time stamp for a given req_id
+ * @last_sof_timestamp:        SOF timestamp of the last frame
+ * @init_timestamp:            Timestamp at which this context is initialized
+ * @frame_id:                  Frame id read every epoch for the ctx
+ * @reported_req_id:           Last reported request id
+ * @last_applied_req_id:       Last applied request id
+ * @frame_id_meta:             Frame id for meta
+ * @ctx_id:                    Context id
+ * @subscribe_event:           The irq event mask that CRM subscribes to, IFE
+ *                             will invoke CRM cb at those event.
+ * @bubble_frame_cnt:          Count of the frame after bubble
+ * @isp_device_type:           ISP device type
+ * @active_req_cnt:            Counter for the active request
+ * @trigger_id:                ID provided by CRM for each ctx on the link
+ * @substate_actiavted:        Current substate for the activated state.
+ * @rxd_epoch:                 Indicate whether epoch has been received. Used to
+ *                             decide whether to apply request in offline ctx
+ * @process_bubble:            Atomic variable to check if ctx is still
+ *                             processing bubble.
+ * @apply_in_progress          Whether request apply is in progress
+ * @rdi_only_context:          Get context type information.
+ *                             true, if context is rdi only context
+ * @offline_context:           Indicate whether context is for offline IFE
+ * @hw_acquired:               Indicate whether HW resources are acquired
+ * @init_received:             Indicate whether init config packet is received
+ *                             meta from the sensor
+ * @split_acquire:             Indicate whether a separate acquire is expected
+ * @custom_enabled:            Custom HW enabled for this ctx
+ * @use_frame_header_ts:       Use frame header for qtimer ts
+ * @support_consumed_addr:     Indicate whether HW has last consumed addr reg
+ * @use_default_apply:         Use default settings in case of frame skip
+ *
+ */
+struct cam_isp_ctx_mini_dump_info {
+	struct cam_isp_ctx_req_mini_dump      *active_list;
+	struct cam_isp_ctx_req_mini_dump      *pending_list;
+	struct cam_isp_ctx_req_mini_dump      *wait_list;
+	struct cam_isp_context_event_record    event_record[
+		CAM_ISP_CTX_EVENT_MAX][CAM_ISP_CTX_EVENT_RECORD_MAX_ENTRIES];
+	uint64_t                               sof_timestamp_val;
+	uint64_t                               boot_timestamp;
+	uint64_t                               last_sof_timestamp;
+	uint64_t                               init_timestamp;
+	int64_t                                frame_id;
+	int64_t                                reported_req_id;
+	int64_t                                last_applied_req_id;
+	int64_t                                last_bufdone_err_apply_req_id;
+	uint32_t                               frame_id_meta;
+	uint8_t                                ctx_id;
+	uint8_t                                subscribe_event;
+	uint8_t                                bubble_frame_cnt;
+	uint8_t                                isp_device_type;
+	uint8_t                                active_req_cnt;
+	uint8_t                                trigger_id;
+	uint8_t                                substate_activated;
+	uint8_t                                rxd_epoch;
+	uint8_t                                process_bubble;
+	uint8_t                                active_cnt;
+	uint8_t                                pending_cnt;
+	uint8_t                                wait_cnt;
+	bool                                   apply_in_progress;
+	bool                                   rdi_only_context;
+	bool                                   offline_context;
+	bool                                   hw_acquired;
+	bool                                   init_received;
+	bool                                   split_acquire;
+	bool                                   custom_enabled;
+	bool                                   use_frame_header_ts;
+	bool                                   support_consumed_addr;
+	bool                                   use_default_apply;
+};
+
 /**
 /**
  * cam_isp_context_init()
  * cam_isp_context_init()
  *
  *

+ 120 - 1
drivers/cam_isp/isp_hw_mgr/cam_ife_hw_mgr.c

@@ -4701,6 +4701,7 @@ static int cam_ife_mgr_acquire_hw(void *hw_mgr_priv, void *acquire_hw_args)
 	ife_ctx->ctx_type = CAM_IFE_CTX_TYPE_NONE;
 	ife_ctx->ctx_type = CAM_IFE_CTX_TYPE_NONE;
 
 
 	ife_ctx->common.cb_priv = acquire_args->context_data;
 	ife_ctx->common.cb_priv = acquire_args->context_data;
+	ife_ctx->common.mini_dump_cb = acquire_args->mini_dump_cb;
 	ife_ctx->flags.internal_cdm = false;
 	ife_ctx->flags.internal_cdm = false;
 	ife_ctx->common.event_cb = acquire_args->event_cb;
 	ife_ctx->common.event_cb = acquire_args->event_cb;
 	ife_ctx->hw_mgr = ife_hw_mgr;
 	ife_ctx->hw_mgr = ife_hw_mgr;
@@ -10526,7 +10527,8 @@ mid_check:
 		get_res.out_res_id, ctx->ctx_index, packet->header.request_id);
 		get_res.out_res_id, ctx->ctx_index, packet->header.request_id);
 	*resource_type = get_res.out_res_id;
 	*resource_type = get_res.out_res_id;
 	ctx->flags.pf_mid_found = true;
 	ctx->flags.pf_mid_found = true;
-
+	ctx->pf_info.mid = get_res.mid;
+	ctx->pf_info.out_port_id = get_res.out_res_id;
 	cam_ife_mgr_pf_dump(get_res.out_res_id, ctx);
 	cam_ife_mgr_pf_dump(get_res.out_res_id, ctx);
 
 
 outportlog:
 outportlog:
@@ -12221,6 +12223,121 @@ static void cam_req_mgr_process_workq_cam_ife_worker(struct work_struct *w)
 	cam_req_mgr_process_workq(w);
 	cam_req_mgr_process_workq(w);
 }
 }
 
 
+static unsigned long cam_ife_hw_mgr_mini_dump_cb(void *dst, unsigned long len)
+{
+	struct cam_ife_hw_mini_dump_data   *mgr_md;
+	struct cam_ife_hw_mini_dump_ctx    *ctx_md;
+	struct cam_ife_hw_mgr_ctx          *ctx_temp;
+	struct cam_ife_hw_mgr_ctx          *ctx;
+	uint32_t                            j;
+	uint32_t                            hw_idx = 0;
+	struct cam_hw_intf                 *hw_intf = NULL;
+	struct cam_ife_hw_mgr              *hw_mgr;
+	struct cam_hw_mini_dump_args        hw_dump_args;
+	unsigned long                       bytes_written = 0;
+	unsigned long                       remain_len = len;
+	unsigned long                       dumped_len = 0;
+	uint32_t                            i = 0;
+	int                                 rc = 0;
+
+	if (len < sizeof(*mgr_md)) {
+		CAM_ERR(CAM_ISP, "Insufficent received length: %u",
+			len);
+		return 0;
+	}
+
+	mgr_md = (struct cam_ife_hw_mini_dump_data *)dst;
+	mgr_md->num_ctx = 0;
+	hw_mgr = &g_ife_hw_mgr;
+	dumped_len += sizeof(*mgr_md);
+	remain_len -= dumped_len;
+
+	list_for_each_entry_safe(ctx, ctx_temp,
+		&hw_mgr->used_ctx_list, list) {
+
+		if (remain_len < sizeof(*ctx_md)) {
+			CAM_ERR(CAM_ISP,
+			"Insufficent received length: %u, dumped_len %u",
+			len, dumped_len);
+			goto end;
+		}
+
+		ctx_md = (struct cam_ife_hw_mini_dump_ctx *)
+				((uint8_t *)dst + dumped_len);
+		mgr_md->ctx[i] = ctx_md;
+		ctx_md->ctx_index = ctx->ctx_index;
+		ctx_md->left_hw_idx = ctx->left_hw_idx;
+		ctx_md->right_hw_idx = ctx->right_hw_idx;
+		ctx_md->cdm_handle = ctx->cdm_handle;
+		ctx_md->num_base = ctx->num_base;
+		ctx_md->cdm_id = ctx->cdm_id;
+		ctx_md->last_cdm_done_req = ctx->last_cdm_done_req;
+		ctx_md->applied_req_id = ctx->applied_req_id;
+		ctx_md->ctx_type = ctx->ctx_type;
+		ctx_md->overflow_pending =
+			atomic_read(&ctx->overflow_pending);
+		ctx_md->cdm_done = atomic_read(&ctx->cdm_done);
+		memcpy(&ctx_md->pf_info, &ctx->pf_info,
+			sizeof(struct cam_ife_hw_mgr_ctx_pf_info));
+		memcpy(&ctx_md->flags, &ctx->flags,
+			sizeof(struct cam_ife_hw_mgr_ctx_flags));
+
+		dumped_len += sizeof(*ctx_md);
+
+		for (j = 0; j < ctx->num_base; j++) {
+			memcpy(&ctx_md->base[j], &ctx->base[j],
+				sizeof(struct cam_isp_ctx_base_info));
+			hw_idx = ctx->base[j].idx;
+			if (ctx->base[j].hw_type == CAM_ISP_HW_TYPE_CSID) {
+				hw_intf = hw_mgr->csid_devices[hw_idx];
+				ctx_md->csid_md[hw_idx] = (void *)((uint8_t *)dst + dumped_len);
+				memset(&hw_dump_args, 0, sizeof(hw_dump_args));
+				hw_dump_args.start_addr = ctx_md->csid_md[hw_idx];
+				hw_dump_args.len = remain_len;
+				hw_intf->hw_ops.process_cmd(hw_intf->hw_priv,
+					CAM_ISP_HW_CSID_MINI_DUMP, &hw_dump_args,
+					sizeof(hw_dump_args));
+				if (hw_dump_args.bytes_written == 0)
+					goto end;
+				dumped_len += hw_dump_args.bytes_written;
+				remain_len = len - dumped_len;
+			} else if (ctx->base[j].hw_type ==
+				CAM_ISP_HW_TYPE_VFE) {
+				hw_intf = hw_mgr->ife_devices[hw_idx]->hw_intf;
+				ctx_md->vfe_md[hw_idx] = (void *)((uint8_t *)dst + dumped_len);
+				memset(&hw_dump_args, 0, sizeof(hw_dump_args));
+				hw_dump_args.start_addr = ctx_md->vfe_md[hw_idx];
+				hw_dump_args.len = remain_len;
+				hw_intf->hw_ops.process_cmd(hw_intf->hw_priv,
+					CAM_ISP_HW_BUS_MINI_DUMP, &hw_dump_args,
+					sizeof(hw_dump_args));
+				if (hw_dump_args.bytes_written == 0)
+					goto end;
+				dumped_len += hw_dump_args.bytes_written;
+				remain_len = len - dumped_len;
+			}
+		}
+
+		if (ctx->common.mini_dump_cb) {
+			hw_dump_args.start_addr = (void *)((uint8_t *)dst + dumped_len);
+			hw_dump_args.len = remain_len;
+			hw_dump_args.bytes_written = 0;
+			rc = ctx->common.mini_dump_cb(ctx->common.cb_priv, &hw_dump_args);
+			if (rc || (hw_dump_args.bytes_written + dumped_len > len))
+				goto end;
+
+			ctx_md->ctx_priv = hw_dump_args.start_addr;
+			dumped_len += bytes_written;
+			remain_len = len - dumped_len;
+		}
+
+		i++;
+	}
+end:
+	mgr_md->num_ctx = i;
+	return dumped_len;
+}
+
 int cam_ife_hw_mgr_init(struct cam_hw_mgr_intf *hw_mgr_intf, int *iommu_hdl)
 int cam_ife_hw_mgr_init(struct cam_hw_mgr_intf *hw_mgr_intf, int *iommu_hdl)
 {
 {
 	int rc = -EFAULT;
 	int rc = -EFAULT;
@@ -12483,6 +12600,8 @@ int cam_ife_hw_mgr_init(struct cam_hw_mgr_intf *hw_mgr_intf, int *iommu_hdl)
 
 
 	cam_ife_hw_mgr_debug_register();
 	cam_ife_hw_mgr_debug_register();
 	cam_ife_mgr_count_ife();
 	cam_ife_mgr_count_ife();
+	cam_common_register_mini_dump_cb(cam_ife_hw_mgr_mini_dump_cb,
+		"CAM_ISP");
 
 
 	CAM_DBG(CAM_ISP, "Exit");
 	CAM_DBG(CAM_ISP, "Exit");
 
 

+ 109 - 42
drivers/cam_isp/isp_hw_mgr/cam_ife_hw_mgr.h

@@ -69,6 +69,17 @@ struct cam_ife_hw_mgr_debug {
 	bool           disable_ife_mmu_prefetch;
 	bool           disable_ife_mmu_prefetch;
 };
 };
 
 
+/**
+ * struct cam_ife_hw_mgr_ctx_pf_info - pf buf info
+ *
+ * @out_port_id: Out port id
+ * @mid: MID value
+ */
+struct cam_ife_hw_mgr_ctx_pf_info {
+	uint32_t       out_port_id;
+	uint32_t       mid;
+};
+
 /**
 /**
  * struct cam_sfe_scratch_buf_info - Scratch buf info
  * struct cam_sfe_scratch_buf_info - Scratch buf info
  *
  *
@@ -218,54 +229,55 @@ struct cam_ife_hw_mgr_ctx_flags {
  *
  *
  */
  */
 struct cam_ife_hw_mgr_ctx {
 struct cam_ife_hw_mgr_ctx {
-	struct list_head                list;
-	struct cam_isp_hw_mgr_ctx       common;
+	struct list_head                   list;
+	struct cam_isp_hw_mgr_ctx          common;
 
 
-	uint32_t                        ctx_index;
-	uint32_t                        left_hw_idx;
-	uint32_t                        right_hw_idx;
-	struct cam_ife_hw_mgr          *hw_mgr;
+	uint32_t                          ctx_index;
+	uint32_t                          left_hw_idx;
+	uint32_t                          right_hw_idx;
+	struct cam_ife_hw_mgr            *hw_mgr;
 
 
-	struct cam_isp_hw_mgr_res       res_list_ife_in;
-	struct list_head                res_list_ife_csid;
-	struct list_head                res_list_ife_src;
-	struct list_head                res_list_sfe_src;
-	struct list_head                res_list_ife_in_rd;
-	struct cam_isp_hw_mgr_res      *res_list_ife_out;
-	struct cam_isp_hw_mgr_res       res_list_sfe_out[
+	struct cam_isp_hw_mgr_res         res_list_ife_in;
+	struct list_head                  res_list_ife_csid;
+	struct list_head                  res_list_ife_src;
+	struct list_head                  res_list_sfe_src;
+	struct list_head                  res_list_ife_in_rd;
+	struct cam_isp_hw_mgr_res        *res_list_ife_out;
+	struct cam_isp_hw_mgr_res         res_list_sfe_out[
 						CAM_SFE_HW_OUT_RES_MAX];
 						CAM_SFE_HW_OUT_RES_MAX];
-	struct list_head                free_res_list;
-	struct cam_isp_hw_mgr_res       res_pool[CAM_IFE_HW_RES_POOL_MAX];
+	struct list_head                  free_res_list;
+	struct cam_isp_hw_mgr_res         res_pool[CAM_IFE_HW_RES_POOL_MAX];
 
 
-	uint32_t                        irq_status0_mask[CAM_IFE_HW_NUM_MAX];
-	uint32_t                        irq_status1_mask[CAM_IFE_HW_NUM_MAX];
-	struct cam_isp_ctx_base_info    base[CAM_IFE_HW_NUM_MAX +
+	uint32_t                          irq_status0_mask[CAM_IFE_HW_NUM_MAX];
+	uint32_t                          irq_status1_mask[CAM_IFE_HW_NUM_MAX];
+	struct cam_isp_ctx_base_info      base[CAM_IFE_HW_NUM_MAX +
 						CAM_SFE_HW_NUM_MAX];
 						CAM_SFE_HW_NUM_MAX];
-	uint32_t                        num_base;
-	uint32_t                        cdm_handle;
-	struct cam_cdm_utils_ops       *cdm_ops;
-	struct cam_cdm_bl_request      *cdm_cmd;
-	enum cam_cdm_id                 cdm_id;
-	uint32_t                        sof_cnt[CAM_IFE_HW_NUM_MAX];
-	uint32_t                        epoch_cnt[CAM_IFE_HW_NUM_MAX];
-	uint32_t                        eof_cnt[CAM_IFE_HW_NUM_MAX];
-	atomic_t                        overflow_pending;
-	atomic_t                        cdm_done;
-	uint64_t                        last_cdm_done_req;
-	struct completion               config_done_complete;
-	uint32_t                        hw_version;
-	struct cam_cmd_buf_desc         reg_dump_buf_desc[
+	uint32_t                          num_base;
+	uint32_t                          cdm_handle;
+	struct cam_cdm_utils_ops         *cdm_ops;
+	struct cam_cdm_bl_request        *cdm_cmd;
+	enum cam_cdm_id                   cdm_id;
+	uint32_t                          sof_cnt[CAM_IFE_HW_NUM_MAX];
+	uint32_t                          epoch_cnt[CAM_IFE_HW_NUM_MAX];
+	uint32_t                          eof_cnt[CAM_IFE_HW_NUM_MAX];
+	atomic_t                          overflow_pending;
+	atomic_t                          cdm_done;
+	uint64_t                          last_cdm_done_req;
+	struct completion                 config_done_complete;
+	uint32_t                          hw_version;
+	struct cam_cmd_buf_desc           reg_dump_buf_desc[
 						CAM_REG_DUMP_MAX_BUF_ENTRIES];
 						CAM_REG_DUMP_MAX_BUF_ENTRIES];
-	uint32_t                        num_reg_dump_buf;
-	uint64_t                        applied_req_id;
-	enum cam_ife_ctx_master_type    ctx_type;
-	uint32_t                        ctx_config;
-	struct timespec64               ts;
-	void                           *buf_done_controller;
-	struct cam_ife_hw_mgr_sfe_info  sfe_info;
-	struct cam_ife_hw_mgr_ctx_flags flags;
-	uint32_t                        bw_config_version;
-	atomic_t                        recovery_id;
+	uint32_t                          num_reg_dump_buf;
+	uint64_t                          applied_req_id;
+	enum cam_ife_ctx_master_type      ctx_type;
+	uint32_t                          ctx_config;
+	struct timespec64                 ts;
+	void                             *buf_done_controller;
+	struct cam_ife_hw_mgr_sfe_info    sfe_info;
+	struct cam_ife_hw_mgr_ctx_flags   flags;
+	struct cam_ife_hw_mgr_ctx_pf_info pf_info;
+	uint32_t                          bw_config_version;
+	atomic_t                          recovery_id;
 };
 };
 
 
 /**
 /**
@@ -365,6 +377,61 @@ struct cam_ife_hw_event_recovery_data {
 	uint32_t                   id[CAM_IFE_CTX_MAX];
 	uint32_t                   id[CAM_IFE_CTX_MAX];
 };
 };
 
 
+/**
+ * struct cam_ife_hw_mini_dump_ctx - Mini dump data
+ *
+ * @base:                   device base index array contain the all IFE HW
+ * @pf_info:                Page Fault Info
+ * @csid_md:                CSID mini dump data
+ * @vfe_md:                 VFE mini dump data
+ * @flags:                  Flags pertainting to this ctx
+ * @ctx_priv:               Array of the hardware contexts that are affected
+ * @last_cdm_done_req:      Last cdm done request
+ * @applied_req_id:         Last request id to be applied
+ * @cdm_handle:             cdm hw acquire handle
+ * @ctx_index:              acquired context id.
+ * @left_hw_idx:            hw index for master core [left]
+ * @right_hw_idx:           hw index for slave core [right]
+ * @num_base:               number of valid base data in the base array
+ * @cdm_id:                 cdm id of the acquired cdm
+ * @ctx_type:               Type of IFE ctx [CUSTOM/SFE etc.]
+ * @overflow_pending:       flat to specify the overflow is pending for the
+ * @cdm_done:               flag to indicate cdm has finished writing shadow
+ *                          registers
+ */
+struct cam_ife_hw_mini_dump_ctx {
+	struct cam_isp_ctx_base_info          base[CAM_IFE_HW_NUM_MAX +
+				         	CAM_SFE_HW_NUM_MAX];
+	struct cam_ife_hw_mgr_ctx_pf_info     pf_info;
+	void                                 *csid_md[CAM_IFE_HW_NUM_MAX];
+	void                                 *vfe_md[CAM_IFE_HW_NUM_MAX];
+	struct cam_ife_hw_mgr_ctx_flags       flags;
+	void                                 *ctx_priv;
+	uint64_t                              last_cdm_done_req;
+	uint64_t                              applied_req_id;
+	uint32_t                              cdm_handle;
+	uint8_t                               ctx_index;
+	uint8_t                               left_hw_idx;
+	uint8_t                               right_hw_idx;
+	uint8_t                               num_base;
+	enum cam_cdm_id                       cdm_id;
+	enum cam_ife_ctx_master_type          ctx_type;
+	bool                                  overflow_pending;
+	bool                                  cdm_done;
+};
+
+/**
+ * struct cam_ife_hw_mini_dump_data - Mini dump data
+ *
+ * @num_ctx:                  Number of context dumped
+ * @ctx:                      Array of context
+ *
+ */
+struct cam_ife_hw_mini_dump_data {
+	uint32_t                            num_ctx;
+	struct cam_ife_hw_mini_dump_ctx    *ctx[CAM_IFE_CTX_MAX];
+};
+
 /**
 /**
  * cam_ife_hw_mgr_init()
  * cam_ife_hw_mgr_init()
  *
  *

+ 2 - 0
drivers/cam_isp/isp_hw_mgr/cam_isp_hw_mgr.h

@@ -21,12 +21,14 @@
  *                         acquire device
  *                         acquire device
  * @cb_priv:               first argument for the call back function
  * @cb_priv:               first argument for the call back function
  *                         set during acquire device
  *                         set during acquire device
+ * @mini_dump_cb           Callback for mini dump
  *
  *
  */
  */
 struct cam_isp_hw_mgr_ctx {
 struct cam_isp_hw_mgr_ctx {
 	void                           *tasklet_info;
 	void                           *tasklet_info;
 	cam_hw_event_cb_func            event_cb;
 	cam_hw_event_cb_func            event_cb;
 	void                           *cb_priv;
 	void                           *cb_priv;
+	cam_ctx_mini_dump_cb_func       mini_dump_cb;
 };
 };
 
 
 /**
 /**

+ 46 - 0
drivers/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid_hw_ver2.c

@@ -4676,6 +4676,49 @@ static int cam_ife_csid_ver2_set_csid_clock(
 	return 0;
 	return 0;
 }
 }
 
 
+static int cam_ife_csid_ver2_mini_dump(
+	struct cam_ife_csid_ver2_hw  *csid_hw,
+	void *cmd_args)
+{
+	struct cam_ife_csid_ver2_mini_dump_data *md;
+	uint32_t                                 i  = 0;
+	struct cam_ife_csid_ver2_path_cfg       *path_cfg;
+	struct cam_ife_csid_ver2_res_mini_dump  *md_res;
+	struct cam_isp_resource_node            *res;
+	struct cam_hw_mini_dump_args            *md_args;
+
+	md_args = (struct cam_hw_mini_dump_args *)cmd_args;
+	if (md_args->len < sizeof(*md)) {
+		md_args->bytes_written = 0;
+		return 0;
+	}
+
+	md  = (struct cam_ife_csid_ver2_mini_dump_data *)
+		    ((uint8_t *)md_args->start_addr);
+	md->clk_rate = csid_hw->clk_rate;
+	md->hw_state = csid_hw->hw_info->hw_state;
+
+	for (i = 0; i < CAM_IFE_PIX_PATH_RES_MAX; i++) {
+		res = &csid_hw->path_res[i];
+		path_cfg = (struct cam_ife_csid_ver2_path_cfg *)res->res_priv;
+		if (!path_cfg)
+			continue;
+
+		md_res = &md->res[i];
+		md_res->res_id = res->res_id;
+		scnprintf(md_res->res_name, CAM_ISP_RES_NAME_LEN, res->res_name);
+		memcpy(&md_res->path_cfg, path_cfg, sizeof(*path_cfg));
+	}
+
+	memcpy(&md->rx_cfg, &csid_hw->rx_cfg, sizeof(struct cam_ife_csid_rx_cfg));
+	memcpy(&md->flags, &csid_hw->flags, sizeof(struct cam_ife_csid_hw_flags));
+	memcpy(md->cid_data, csid_hw->cid_data,
+		sizeof(struct cam_ife_csid_cid_data) * CAM_IFE_CSID_CID_MAX);
+	md_args->bytes_written = sizeof(*md);
+
+	return 0;
+}
+
 static int cam_ife_csid_ver2_dual_sync_cfg(
 static int cam_ife_csid_ver2_dual_sync_cfg(
 	struct cam_ife_csid_ver2_hw  *csid_hw,
 	struct cam_ife_csid_ver2_hw  *csid_hw,
 	void *cmd_args)
 	void *cmd_args)
@@ -4810,6 +4853,9 @@ static int cam_ife_csid_ver2_process_cmd(void *hw_priv,
 		rc = cam_ife_csid_ver2_dual_sync_cfg(csid_hw,
 		rc = cam_ife_csid_ver2_dual_sync_cfg(csid_hw,
 			cmd_args);
 			cmd_args);
 		break;
 		break;
+	case CAM_ISP_HW_CSID_MINI_DUMP:
+		rc  = cam_ife_csid_ver2_mini_dump(csid_hw, cmd_args);
+		break;
 	case CAM_IFE_CSID_PROGRAM_OFFLINE_CMD:
 	case CAM_IFE_CSID_PROGRAM_OFFLINE_CMD:
 		rc = cam_ife_csid_ver2_program_offline_go_cmd(
 		rc = cam_ife_csid_ver2_program_offline_go_cmd(
 			csid_hw, cmd_args, arg_size);
 			csid_hw, cmd_args, arg_size);

+ 31 - 0
drivers/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid_hw_ver2.h

@@ -578,6 +578,37 @@ struct cam_ife_csid_ver2_hw {
 	atomic_t                               discard_frame_per_path;
 	atomic_t                               discard_frame_per_path;
 };
 };
 
 
+/*
+ * struct cam_ife_csid_res_mini_dump: CSID Res mini dump place holder
+ * @res_id:      Res id
+ * @res_name:    Res name
+ * @path_cfg:    path configuration
+ */
+struct cam_ife_csid_ver2_res_mini_dump {
+	uint32_t                           res_id;
+	uint8_t                            res_name[CAM_ISP_RES_NAME_LEN];
+	struct cam_ife_csid_ver2_path_cfg  path_cfg;
+};
+
+/*
+ * struct cam_ife_csid_mini_dump_data: CSID mini dump place holder
+ *
+ * @res:             Mini dump data for res
+ * @flags:           Flags
+ * @rx_cfg:          Rx configuration
+ * @cid_data:        CID data
+ * @clk_rate:        Clock Rate
+ * @hw_state:        hw state
+ */
+struct cam_ife_csid_ver2_mini_dump_data {
+	struct cam_ife_csid_ver2_res_mini_dump  res[CAM_IFE_PIX_PATH_RES_MAX];
+	struct cam_ife_csid_hw_flags            flags;
+	struct cam_ife_csid_rx_cfg              rx_cfg;
+	struct cam_ife_csid_cid_data            cid_data[CAM_IFE_CSID_CID_MAX];
+	uint64_t                                clk_rate;
+	uint8_t                                 hw_state;
+};
+
 int cam_ife_csid_hw_ver2_init(struct cam_hw_intf  *csid_hw_intf,
 int cam_ife_csid_hw_ver2_init(struct cam_hw_intf  *csid_hw_intf,
 	struct cam_ife_csid_core_info *csid_core_info,
 	struct cam_ife_csid_core_info *csid_core_info,
 	bool is_custom);
 	bool is_custom);

+ 2 - 0
drivers/cam_isp/isp_hw_mgr/isp_hw/include/cam_isp_hw.h

@@ -198,6 +198,8 @@ enum cam_isp_hw_cmd_type {
 	CAM_ISP_HW_CMD_RM_ENABLE_DISABLE,
 	CAM_ISP_HW_CMD_RM_ENABLE_DISABLE,
 	CAM_ISP_HW_CMD_APPLY_CLK_BW_UPDATE,
 	CAM_ISP_HW_CMD_APPLY_CLK_BW_UPDATE,
 	CAM_ISP_HW_CMD_INIT_CONFIG_UPDATE,
 	CAM_ISP_HW_CMD_INIT_CONFIG_UPDATE,
+	CAM_ISP_HW_CSID_MINI_DUMP,
+	CAM_ISP_HW_BUS_MINI_DUMP,
 	CAM_ISP_HW_CMD_MAX,
 	CAM_ISP_HW_CMD_MAX,
 };
 };
 
 

+ 1 - 0
drivers/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/cam_vfe_core.c

@@ -535,6 +535,7 @@ int cam_vfe_process_cmd(void *hw_priv, uint32_t cmd_type,
 	case CAM_ISP_HW_CMD_QUERY_BUS_CAP:
 	case CAM_ISP_HW_CMD_QUERY_BUS_CAP:
 	case CAM_ISP_HW_CMD_IFE_BUS_DEBUG_CFG:
 	case CAM_ISP_HW_CMD_IFE_BUS_DEBUG_CFG:
 	case CAM_ISP_HW_CMD_WM_BW_LIMIT_CONFIG:
 	case CAM_ISP_HW_CMD_WM_BW_LIMIT_CONFIG:
+	case CAM_ISP_HW_BUS_MINI_DUMP:
 		rc = core_info->vfe_bus->hw_ops.process_cmd(
 		rc = core_info->vfe_bus->hw_ops.process_cmd(
 			core_info->vfe_bus->bus_priv, cmd_type, cmd_args,
 			core_info->vfe_bus->bus_priv, cmd_type, cmd_args,
 			arg_size);
 			arg_size);

+ 76 - 0
drivers/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_bus/cam_vfe_bus_ver3.c

@@ -2577,6 +2577,75 @@ static void cam_vfe_bus_ver3_print_wm_info(
 		addr_status3);
 		addr_status3);
 }
 }
 
 
+static int cam_vfe_bus_ver3_mini_dump(
+	struct cam_vfe_bus_ver3_priv *bus_priv,
+	void *cmd_args)
+{
+	struct cam_isp_resource_node              *rsrc_node = NULL;
+	struct cam_vfe_bus_ver3_vfe_out_data      *rsrc_data = NULL;
+	struct cam_vfe_bus_ver3_wm_resource_data  *wm        = NULL;
+	struct cam_vfe_bus_ver3_mini_dump_data    *md;
+	struct cam_vfe_bus_ver3_wm_mini_dump      *md_wm;
+	struct cam_hw_mini_dump_args              *md_args;
+	struct cam_hw_info                        *hw_info = NULL;
+	uint32_t                                   bytes_written = 0;
+	uint32_t                                   i, j, k = 0;
+
+	if (!bus_priv || !cmd_args) {
+		CAM_ERR(CAM_ISP, "Invalid bus private data");
+		return -EINVAL;
+	}
+
+	hw_info = (struct cam_hw_info *)bus_priv->common_data.hw_intf->hw_priv;
+	md_args = (struct cam_hw_mini_dump_args *)cmd_args;
+
+	if (sizeof(*md) > md_args->len) {
+		md_args->bytes_written = 0;
+		return 0;
+	}
+
+	md = (struct cam_vfe_bus_ver3_mini_dump_data *)md_args->start_addr;
+	md->clk_rate = hw_info->soc_info.applied_src_clk_rate;
+	md->hw_idx = bus_priv->common_data.hw_intf->hw_idx;
+	md->hw_state = hw_info->hw_state;
+	bytes_written += sizeof(*md);
+	md->wm = (struct cam_vfe_bus_ver3_wm_mini_dump *)
+			((uint8_t *)md_args->start_addr + bytes_written);
+
+	for (i = 0; i < bus_priv->num_out; i++) {
+		rsrc_node = &bus_priv->vfe_out[i];
+		rsrc_data = rsrc_node->res_priv;
+		if (!rsrc_data)
+			continue;
+
+		for (j = 0; j < rsrc_data->num_wm; j++) {
+			if (bytes_written + sizeof(*md_wm) > md_args->len)
+				goto end;
+
+			md_wm = &md->wm[k];
+			wm = rsrc_data->wm_res[j].res_priv;
+			md_wm->width  = wm->width;
+			md_wm->index  = wm->index;
+			md_wm->height = wm->height;
+			md_wm->stride = wm->stride;
+			md_wm->en_cfg = wm->en_cfg;
+			md_wm->h_init = wm->h_init;
+			md_wm->format = wm->format;
+			md_wm->acquired_width = wm->acquired_width;
+			md_wm->acquired_height = wm->acquired_height;
+			md_wm->state = rsrc_node->res_state;
+			scnprintf(md_wm->name, CAM_ISP_RES_NAME_LEN,
+				"%s", rsrc_data->wm_res[j].res_name);
+			k++;
+			bytes_written += sizeof(*md_wm);
+		}
+	}
+end:
+	md->num_client = k;
+	md_args->bytes_written = bytes_written;
+	return 0;
+}
+
 static int cam_vfe_bus_ver3_print_dimensions(
 static int cam_vfe_bus_ver3_print_dimensions(
 	uint32_t                                   res_id,
 	uint32_t                                   res_id,
 	struct cam_vfe_bus_ver3_priv              *bus_priv)
 	struct cam_vfe_bus_ver3_priv              *bus_priv)
@@ -3871,6 +3940,13 @@ static int cam_vfe_bus_ver3_process_cmd(
 			event_info->res_id, bus_priv);
 			event_info->res_id, bus_priv);
 		break;
 		break;
 		}
 		}
+	case CAM_ISP_HW_BUS_MINI_DUMP: {
+		bus_priv = (struct cam_vfe_bus_ver3_priv  *) priv;
+
+		rc = cam_vfe_bus_ver3_mini_dump(bus_priv, cmd_args);
+		break;
+		}
+
 	case CAM_ISP_HW_CMD_UBWC_UPDATE_V2:
 	case CAM_ISP_HW_CMD_UBWC_UPDATE_V2:
 		rc = cam_vfe_bus_ver3_update_ubwc_config_v2(cmd_args);
 		rc = cam_vfe_bus_ver3_update_ubwc_config_v2(cmd_args);
 		break;
 		break;

+ 46 - 0
drivers/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_bus/cam_vfe_bus_ver3.h

@@ -240,6 +240,52 @@ struct cam_vfe_bus_ver3_hw_info {
 	uint32_t max_bw_counter_limit;
 	uint32_t max_bw_counter_limit;
 };
 };
 
 
+/**
+ * struct cam_vfe_bus_ver3_wm_mini_dump - VFE WM data
+ *
+ * @width                  Width
+ * @height                 Height
+ * @stride                 stride
+ * @h_init                 init height
+ * @acquired_width         acquired width
+ * @acquired_height        acquired height
+ * @en_cfg                 Enable flag
+ * @format                 format
+ * @index                  Index
+ * @state                  state
+ * @name                   Res name
+ */
+struct cam_vfe_bus_ver3_wm_mini_dump {
+	uint32_t   width;
+	uint32_t   height;
+	uint32_t   stride;
+	uint32_t   h_init;
+	uint32_t   acquired_width;
+	uint32_t   acquired_height;
+	uint32_t   en_cfg;
+	uint32_t   format;
+	uint32_t   index;
+	uint32_t   state;
+	uint8_t    name[CAM_ISP_RES_NAME_LEN];
+};
+
+/**
+ * struct cam_vfe_bus_ver3_mini_dump_data - VFE bus mini dump data
+ *
+ * @wm:              Write Master client information
+ * @clk_rate:        Clock rate
+ * @num_client:      Num client
+ * @hw_state:        hw statte
+ * @hw_idx:          Hw index
+ */
+struct cam_vfe_bus_ver3_mini_dump_data {
+	struct cam_vfe_bus_ver3_wm_mini_dump *wm;
+	uint64_t                              clk_rate;
+	uint32_t                              num_client;
+	uint8_t                               hw_state;
+	uint8_t                               hw_idx;
+};
+
 /*
 /*
  * cam_vfe_bus_ver3_init()
  * cam_vfe_bus_ver3_init()
  *
  *

+ 25 - 0
drivers/cam_req_mgr/cam_mem_mgr.c

@@ -33,6 +33,29 @@ static void cam_mem_mgr_put_dma_heaps(void);
 static int cam_mem_mgr_get_dma_heaps(void);
 static int cam_mem_mgr_get_dma_heaps(void);
 #endif
 #endif
 
 
+static unsigned long cam_mem_mgr_mini_dump_cb(void *dst, unsigned long len)
+{
+	struct cam_mem_table_mini_dump      *md;
+
+	if (!dst) {
+		CAM_ERR(CAM_MEM, "Invalid  params");
+		return 0;
+	}
+
+	if (len < sizeof(*md)) {
+		CAM_ERR(CAM_MEM, "Insufficient length %u", len);
+		return 0;
+	}
+
+	md = (struct cam_mem_table_mini_dump *)dst;
+	memcpy(md->bufq, tbl.bufq, CAM_MEM_BUFQ_MAX * sizeof(struct cam_mem_buf_queue));
+	md->dbg_buf_idx = tbl.dbg_buf_idx;
+	md->alloc_profile_enable = tbl.alloc_profile_enable;
+	md->force_cache_allocs = tbl.force_cache_allocs;
+	md->need_shared_buffer_padding = tbl.need_shared_buffer_padding;
+	return sizeof(*md);
+}
+
 static void cam_mem_mgr_print_tbl(void)
 static void cam_mem_mgr_print_tbl(void)
 {
 {
 	int i;
 	int i;
@@ -203,6 +226,8 @@ int cam_mem_mgr_init(void)
 	atomic_set(&cam_mem_mgr_state, CAM_MEM_MGR_INITIALIZED);
 	atomic_set(&cam_mem_mgr_state, CAM_MEM_MGR_INITIALIZED);
 
 
 	cam_mem_mgr_create_debug_fs();
 	cam_mem_mgr_create_debug_fs();
+	cam_common_register_mini_dump_cb(cam_mem_mgr_mini_dump_cb,
+		"cam_mem");
 
 
 	return 0;
 	return 0;
 put_heaps:
 put_heaps:

+ 19 - 0
drivers/cam_req_mgr/cam_mem_mgr.h

@@ -104,6 +104,25 @@ struct cam_mem_table {
 
 
 };
 };
 
 
+/**
+ * struct cam_mem_table_mini_dump
+ *
+ * @bufq: array of buffers
+ * @dbg_buf_idx: debug buffer index to get usecases info
+ * @alloc_profile_enable: Whether to enable alloc profiling
+ * @dbg_buf_idx: debug buffer index to get usecases info
+ * @force_cache_allocs: Force all internal buffer allocations with cache
+ * @need_shared_buffer_padding: Whether padding is needed for shared buffer
+ *                              allocations.
+ */
+struct cam_mem_table_mini_dump {
+	struct cam_mem_buf_queue bufq[CAM_MEM_BUFQ_MAX];
+	size_t dbg_buf_idx;
+	bool   alloc_profile_enable;
+	bool   force_cache_allocs;
+	bool   need_shared_buffer_padding;
+};
+
 /**
 /**
  * @brief: Allocates and maps buffer
  * @brief: Allocates and maps buffer
  *
  *

+ 103 - 0
drivers/cam_req_mgr/cam_req_mgr_core.c

@@ -5305,6 +5305,106 @@ end:
 	return 0;
 	return 0;
 }
 }
 
 
+static unsigned long cam_req_mgr_core_mini_dump_cb(void *dst,
+	unsigned long len)
+{
+	struct cam_req_mgr_core_link *link;
+	struct cam_req_mgr_core_mini_dump *md;
+	struct cam_req_mgr_core_link_mini_dump *md_link;
+	struct cam_req_mgr_req_tbl   *l_tbl = NULL;
+	uint8_t *waddr;
+	unsigned long dumped_len = 0;
+	unsigned long remain_len = len;
+	uint32_t i = 0, j = 0, k = 0;
+
+	if (!dst || len < sizeof(*md)) {
+		CAM_ERR(CAM_CRM, "Insufficient received length %lu dumped len: %u",
+			len, dumped_len);
+		return 0;
+	}
+
+	md = (struct cam_req_mgr_core_mini_dump *)dst;
+	md->num_link = 0;
+	waddr = (uint8_t *)dst + sizeof(*md);
+	dumped_len += sizeof(*md);
+	remain_len -= dumped_len;
+
+	for (i = 0; i < MAXIMUM_LINKS_PER_SESSION; i++) {
+		if (remain_len < sizeof(*md_link)) {
+			CAM_ERR(CAM_CRM,
+			"Insufficent received length: %lu, dumped_len %lu",
+			len, dumped_len);
+			goto end;
+		}
+
+		if (atomic_read(&g_links[i].is_used) == 0)
+			continue;
+
+		waddr += (k * sizeof(*md_link));
+		md_link = (struct cam_req_mgr_core_link_mini_dump *)waddr;
+		md->link[k] = md_link;
+		link = &g_links[i];
+		memcpy(&md_link->sof_time, &link->sync_data.sof_time,
+			sizeof(struct cam_req_mgr_sof_time));
+		md_link->sync_link_sof_skip = link->sync_data.sync_link_sof_skip;
+		md_link->num_sync_links = link->sync_data.num_sync_link;
+		md_link->initial_sync_req = link->sync_data.initial_sync_req;
+		md_link->last_flush_id = link->last_flush_id;
+		md_link->is_used = atomic_read(&link->is_used);
+		md_link->retry_cnt = link->retry_cnt;
+		md_link->eof_event_cnt = atomic_read(&link->eof_event_cnt);
+		md_link->pd_mask = link->pd_mask;
+		md_link->link_hdl = link->link_hdl;
+		md_link->num_devs = link->num_devs;
+		md_link->max_delay = link->max_delay;
+		md_link->dual_trigger = link->dual_trigger;
+		md_link->state = link->state;
+		md_link->is_shutdown = link->is_shutdown;
+		md_link->skip_init_frame = link->skip_init_frame;
+		md_link->is_master = link->is_master;
+		md_link->initial_skip = link->initial_skip;
+		md_link->in_msync_mode = link->in_msync_mode;
+		md_link->wq_congestion = link->wq_congestion;
+		memcpy(md_link->req.apply_data, link->req.apply_data,
+			sizeof(link->req.apply_data));
+		memcpy(md_link->req.prev_apply_data, link->req.prev_apply_data,
+			sizeof(link->req.prev_apply_data));
+		memcpy(&md_link->req.in_q, link->req.in_q,
+			sizeof(struct cam_req_mgr_req_queue));
+		md_link->req.num_tbl = link->req.num_tbl;
+
+		md_link->workq.workq_scheduled_ts =
+					    link->workq->workq_scheduled_ts;
+		md_link->workq.task.pending_cnt =
+				atomic_read(&link->workq->task.pending_cnt);
+		md_link->workq.task.free_cnt =
+				atomic_read(&link->workq->task.free_cnt);
+		md_link->workq.task.num_task = link->workq->task.num_task;
+
+		l_tbl = link->req.l_tbl;
+		j = 0;
+		while(l_tbl) {
+			md_link->req.l_tbl[j].id = l_tbl->id;
+			md_link->req.l_tbl[j].pd = l_tbl->id;
+			md_link->req.l_tbl[j].dev_count = l_tbl->dev_count;
+			md_link->req.l_tbl[j].dev_mask = l_tbl->dev_mask;
+			md_link->req.l_tbl[j].skip_traverse =
+				l_tbl->skip_traverse;
+			md_link->req.l_tbl[j].pd_delta = l_tbl->pd_delta;
+			md_link->req.l_tbl[j].num_slots = l_tbl->num_slots;
+			memcpy(md_link->req.l_tbl[j].slot, l_tbl->slot,
+				sizeof(l_tbl->slot));
+			l_tbl = l_tbl->next;
+			j++;
+		}
+		md->num_link++;
+		dumped_len += sizeof(*md_link);
+		remain_len = len - dumped_len;
+	}
+end:
+	return dumped_len;
+}
+
 int cam_req_mgr_core_device_init(void)
 int cam_req_mgr_core_device_init(void)
 {
 {
 	int i;
 	int i;
@@ -5330,6 +5430,9 @@ int cam_req_mgr_core_device_init(void)
 		atomic_set(&g_links[i].is_used, 0);
 		atomic_set(&g_links[i].is_used, 0);
 		cam_req_mgr_core_link_reset(&g_links[i]);
 		cam_req_mgr_core_link_reset(&g_links[i]);
 	}
 	}
+	cam_common_register_mini_dump_cb(cam_req_mgr_core_mini_dump_cb,
+		"CAM_CRM");
+
 	return 0;
 	return 0;
 }
 }
 
 

+ 107 - 0
drivers/cam_req_mgr/cam_req_mgr_core.h

@@ -8,6 +8,7 @@
 #include <linux/spinlock_types.h>
 #include <linux/spinlock_types.h>
 #include "cam_req_mgr_interface.h"
 #include "cam_req_mgr_interface.h"
 #include "cam_req_mgr_core_defs.h"
 #include "cam_req_mgr_core_defs.h"
+#include "cam_req_mgr_workq.h"
 #include "cam_req_mgr_timer.h"
 #include "cam_req_mgr_timer.h"
 
 
 #define CAM_REQ_MGR_MAX_LINKED_DEV     16
 #define CAM_REQ_MGR_MAX_LINKED_DEV     16
@@ -530,6 +531,112 @@ struct cam_req_mgr_dump_link_data {
 	struct cam_req_mgr_dev_info        dev_data;
 	struct cam_req_mgr_dev_info        dev_data;
 };
 };
 
 
+/**
+ * struct cam_req_mgr_req_tbl
+ * @id            : table indetifier
+ * @pd            : pipeline delay of table
+ * @dev_count     : num of devices having same pipeline delay
+ * @dev_mask      : mask to track which devices are linked
+ * @skip_traverse : to indicate how many traverses need to be dropped
+ *                  by this table especially in the beginning or bubble recovery
+ * @pd_delta      : differnce between this table's pipeline delay and next
+ * @num_slots     : number of request slots present in the table
+ * @slot          : array of slots tracking requests availability at devices
+ */
+struct cam_req_mgr_req_tbl_mini_dump {
+	int32_t                     id;
+	int32_t                     pd;
+	int32_t                     dev_count;
+	int32_t                     dev_mask;
+	int32_t                     skip_traverse;
+	int32_t                     pd_delta;
+	int32_t                     num_slots;
+	struct cam_req_mgr_tbl_slot slot[MAX_REQ_SLOTS];
+};
+
+/**
+ * struct cam_req_mgr_req_data_mini_dump
+ * @apply_data       : Holds information about request id for a request
+ * @prev_apply_data  : Holds information about request id for a previous
+ *                     applied request
+ * @in_q             : Poiner to Input request queue
+ * @l_tbl            : unique pd request tables.
+ * @num_tbl          : how many unique pd value devices are present
+ */
+struct cam_req_mgr_req_data_mini_dump {
+	struct cam_req_mgr_apply       apply_data[CAM_PIPELINE_DELAY_MAX];
+	struct cam_req_mgr_apply       prev_apply_data[CAM_PIPELINE_DELAY_MAX];
+	struct cam_req_mgr_req_queue   in_q;
+	struct cam_req_mgr_req_tbl_mini_dump l_tbl[4];
+	int32_t                              num_tbl;
+};
+
+/**
+ * struct cam_req_mgr_core_link_mini_dump
+ * @workq                : Work q information
+ * @req                  : req data holder.
+ * @sof_timestamp        : SOF timestamp value
+ * @initial_sync_req     : The initial req which is required to sync with the
+ * @prev_sof_timestamp   : Previous SOF timestamp value
+ * @last_flush_id        : Last request to flush
+ * @is_used              : 1 if link is in use else 0
+ * @eof_event_cnt        : Atomic variable to track the number of EOF requests
+ * @pd_mask              : each set bit indicates the device with pd equal to
+ * @num_sync_links       : num of links sync associated with this link
+ * @open_req_cnt         : Counter to keep track of open requests that are yet
+ *                         to be serviced in the kernel.
+ * @link_hdl             : Link identifier
+ * @num_devs             : num of connected devices to this link
+ * @max_delay            : Max of pipeline delay of all connected devs
+ *                          bit position is available.
+ * @state                : link state machine
+ * @dual_trigger         : Dual tirgger flag
+ * @is_shutdown          : Is shutting down
+ * @skip_init_frame      : skip initial frames crm_wd_timer validation in the
+ * @is_master            : Based on pd among links, the link with the highest pd
+ *                         is assigned as master
+ * @initial_skip         : Flag to determine if slave has started streaming in
+ *                         master-slave sync
+ * @in_m_sync_mode       : M-sync  mode flag
+ * @sync_link_sof_skip   : flag determines if a pkt is not available
+ * @wq_congestion        : Indicates if WQ congestion is detected or not
+ */
+struct cam_req_mgr_core_link_mini_dump {
+	struct cam_req_mgr_core_workq_mini_dump  workq;
+	struct cam_req_mgr_req_data_mini_dump    req;
+	struct cam_req_mgr_sof_time              sof_time;
+	int64_t                   initial_sync_req;
+	uint32_t                  last_flush_id;
+	uint32_t                  is_used;
+	uint32_t                  retry_cnt;
+	uint32_t                  eof_event_cnt;
+	int32_t                   pd_mask;
+	int32_t                   num_sync_links;
+	int32_t                   open_req_cnt;
+	int32_t                   link_hdl;
+	int32_t                   num_devs;
+	enum cam_pipeline_delay   max_delay;
+	enum cam_req_mgr_link_state   state;
+	bool                       dual_trigger;
+	bool                       is_shutdown;
+	bool                       skip_init_frame;
+	bool                       is_master;
+	bool                       initial_skip;
+	bool                       in_msync_mode;
+	bool                       sync_link_sof_skip;
+	bool                       wq_congestion;
+};
+
+/**
+ * struct cam_req_mgr_core_mini_dump
+ * @link             : Array of dumped links
+ * @num_link         : Number of links dumped
+ */
+struct cam_req_mgr_core_mini_dump {
+	struct cam_req_mgr_core_link_mini_dump *link[MAXIMUM_LINKS_PER_SESSION];
+	uint32_t num_link;
+};
+
 /**
 /**
  * cam_req_mgr_create_session()
  * cam_req_mgr_create_session()
  * @brief    : creates session
  * @brief    : creates session

+ 17 - 2
drivers/cam_req_mgr/cam_req_mgr_workq.h

@@ -14,8 +14,6 @@
 #include <linux/slab.h>
 #include <linux/slab.h>
 #include <linux/timer.h>
 #include <linux/timer.h>
 
 
-#include "cam_req_mgr_core.h"
-
 /* Threshold for scheduling delay in ms */
 /* Threshold for scheduling delay in ms */
 #define CAM_WORKQ_SCHEDULE_TIME_THRESHOLD   5
 #define CAM_WORKQ_SCHEDULE_TIME_THRESHOLD   5
 
 
@@ -110,6 +108,23 @@ struct cam_req_mgr_core_workq {
 	} task;
 	} task;
 };
 };
 
 
+/**
+ * struct cam_req_mgr_core_workq_mini_dump
+ * @workq_scheduled_ts: scheduled ts
+ * task -
+ * @pending_cnt : # of tasks left in queue
+ * @free_cnt    : # of free/available tasks
+ * @num_task    : size of tasks pool
+ */
+struct cam_req_mgr_core_workq_mini_dump {
+	ktime_t                    workq_scheduled_ts;
+	/* tasks */
+	struct {
+		uint32_t               pending_cnt;
+		uint32_t               free_cnt;
+		uint32_t               num_task;
+	} task;
+};
 /**
 /**
  * cam_req_mgr_process_workq() - main loop handling
  * cam_req_mgr_process_workq() - main loop handling
  * @w: workqueue task pointer
  * @w: workqueue task pointer

+ 169 - 0
drivers/cam_smmu/cam_smmu_api.c

@@ -229,6 +229,53 @@ struct cam_sec_buff_info {
 	size_t len;
 	size_t len;
 };
 };
 
 
+struct cam_smmu_mini_dump_cb_info {
+	struct cam_smmu_monitor mapping[CAM_SMMU_MONITOR_MAX_ENTRIES];
+	struct cam_smmu_region_info scratch_info;
+	struct cam_smmu_region_info firmware_info;
+	struct cam_smmu_region_info shared_info;
+	struct cam_smmu_region_info io_info;
+	struct cam_smmu_region_info secheap_info;
+	struct cam_smmu_region_info fwuncached_region;
+	struct cam_smmu_region_info qdss_info;
+	struct region_buf_info secheap_buf;
+	struct region_buf_info fwuncached_reg_buf;
+	char   name[CAM_SMMU_SHARED_HDL_MAX][16];
+	size_t va_len;
+	size_t io_mapping_size;
+	size_t shared_mapping_size;
+	size_t discard_iova_len;
+	int handle;
+	int device_count;
+	int num_shared_hdl;
+	int cb_count;
+	int secure_count;
+	int pf_count;
+	dma_addr_t va_start;
+	dma_addr_t discard_iova_start;
+	dma_addr_t qdss_phy_addr;
+	enum cam_io_coherency_mode coherency_mode;
+	enum cam_smmu_ops_param state;
+	uint8_t scratch_buf_support;
+	uint8_t firmware_support;
+	uint8_t shared_support;
+	uint8_t io_support;
+	uint8_t secheap_support;
+	uint8_t fwuncached_region_support;
+	uint8_t qdss_support;
+	bool is_mul_client;
+	bool is_secure;
+	bool is_fw_allocated;
+	bool is_secheap_allocated;
+	bool is_fwuncached_buf_allocated;
+	bool is_qdss_allocated;
+};
+
+struct cam_smmu_mini_dump_info {
+	uint32_t cb_num;
+	struct   cam_smmu_mini_dump_cb_info *cb;
+};
+
 static const char *qdss_region_name = "qdss";
 static const char *qdss_region_name = "qdss";
 
 
 static struct cam_iommu_cb_set iommu_cb_set;
 static struct cam_iommu_cb_set iommu_cb_set;
@@ -4240,6 +4287,125 @@ cb_init_fail:
 	return rc;
 	return rc;
 }
 }
 
 
+static void cam_smmu_mini_dump_entries(
+	struct cam_smmu_mini_dump_cb_info *target,
+	struct cam_context_bank_info *src)
+{
+	int i = 0;
+	int64_t state_head = 0;
+	uint32_t index, num_entries, oldest_entry;
+	struct timespec64 *ts = NULL;
+
+	state_head = atomic64_read(&src->monitor_head);
+
+	if (state_head == -1) {
+		return;
+	} else if (state_head < CAM_SMMU_MONITOR_MAX_ENTRIES) {
+		num_entries = state_head;
+		oldest_entry = 0;
+	} else {
+		num_entries = CAM_SMMU_MONITOR_MAX_ENTRIES;
+		div_u64_rem(state_head + 1,
+			CAM_SMMU_MONITOR_MAX_ENTRIES, &oldest_entry);
+	}
+
+	index = oldest_entry;
+
+	for (i = 0; i < num_entries; i++) {
+		ts = &src->monitor_entries[index].timestamp;
+		memcpy(&target->mapping[index],
+			&src->monitor_entries[index],
+			sizeof(struct cam_smmu_monitor));
+		index = (index + 1) % CAM_SMMU_MONITOR_MAX_ENTRIES;
+	}
+}
+
+static unsigned long cam_smmu_mini_dump_cb(void *dst, unsigned long len)
+{
+	struct cam_smmu_mini_dump_cb_info *cb_md;
+	struct cam_smmu_mini_dump_info     *md;
+	struct cam_context_bank_info       *cb;
+	unsigned long                       dumped_len = 0;
+	unsigned long                       remain_len = len;
+	uint32_t                            i = 0, j = 0;
+
+	if (!dst || len < sizeof(*md)) {
+		CAM_ERR(CAM_SMMU, "Invalid params dst: %pk len:%lu",
+			dst, len);
+		return 0;
+	}
+
+	md = (struct cam_smmu_mini_dump_info *)dst;
+	md->cb_num = 0;
+	md->cb = (struct cam_smmu_mini_dump_cb_info *)
+		((uint8_t *)dst + sizeof(*md));
+	dumped_len += sizeof(*md);
+	remain_len =  len - dumped_len;
+
+	for (i = 0; i < iommu_cb_set.cb_num; i++) {
+		if (remain_len < sizeof(*cb_md))
+			goto end;
+
+		cb = &iommu_cb_set.cb_info[i];
+		cb_md = &md->cb[i];
+		cb_md->is_mul_client = cb->is_mul_client;
+		cb_md->is_secure = cb->is_secure;
+		cb_md->is_fw_allocated = cb->is_fw_allocated;
+		cb_md->is_secheap_allocated = cb->is_secheap_allocated;
+		cb_md->is_fwuncached_buf_allocated = cb->is_fwuncached_buf_allocated;
+		cb_md->is_qdss_allocated = cb->is_qdss_allocated;
+		cb_md->scratch_buf_support = cb->scratch_buf_support;
+		cb_md->firmware_support = cb->firmware_support;
+		cb_md->shared_support = cb->shared_support;
+		cb_md->io_support = cb->io_support;
+		cb_md->fwuncached_region_support = cb->fwuncached_region_support;
+		cb_md->qdss_support = cb->qdss_support;
+		cb_md->coherency_mode = cb->coherency_mode;
+		cb_md->state = cb->state;
+		cb_md->va_start = cb->va_start;
+		cb_md->discard_iova_start = cb->discard_iova_start;
+		cb_md->qdss_phy_addr = cb->qdss_phy_addr;
+		cb_md->va_len = cb->va_len;
+		cb_md->io_mapping_size = cb->io_mapping_size;
+		cb_md->shared_mapping_size = cb->shared_mapping_size;
+		cb_md->discard_iova_len = cb->discard_iova_len;
+		cb_md->handle = cb->handle;
+		cb_md->device_count = cb->device_count;
+		cb_md->num_shared_hdl = cb->num_shared_hdl;
+		cb_md->secure_count = cb->secure_count;
+		cb_md->cb_count = cb->cb_count;
+		cb_md->pf_count = cb->pf_count;
+		memcpy(&cb_md->scratch_info, &cb->scratch_info,
+			sizeof(struct cam_smmu_region_info));
+		memcpy(&cb_md->firmware_info, &cb->firmware_info,
+			sizeof(struct cam_smmu_region_info));
+		memcpy(&cb_md->shared_info, &cb->shared_info,
+			sizeof(struct cam_smmu_region_info));
+		memcpy(&cb_md->io_info, &cb->io_info,
+			sizeof(struct cam_smmu_region_info));
+		memcpy(&cb_md->secheap_info, &cb->secheap_info,
+			sizeof(struct cam_smmu_region_info));
+		memcpy(&cb_md->fwuncached_region, &cb->fwuncached_region,
+			sizeof(struct cam_smmu_region_info));
+		memcpy(&cb_md->qdss_info, &cb->qdss_info,
+			sizeof(struct cam_smmu_region_info));
+		memcpy(&cb_md->secheap_buf, &cb->secheap_buf,
+			sizeof(struct region_buf_info));
+		memcpy(&cb_md->fwuncached_reg_buf, &cb->fwuncached_reg_buf,
+			sizeof(struct region_buf_info));
+
+		for (j = 0; j < iommu_cb_set.cb_info[i].num_shared_hdl; j++)
+			scnprintf(cb_md->name[j], 16, cb->name[j]);
+
+		cam_smmu_mini_dump_entries(cb_md, cb);
+		dumped_len += sizeof(*cb_md);
+		remain_len = len - dumped_len;
+		md->cb_num++;
+	}
+end:
+	return dumped_len;
+};
+
 static int cam_smmu_create_debug_fs(void)
 static int cam_smmu_create_debug_fs(void)
 {
 {
 	int rc = 0;
 	int rc = 0;
@@ -4367,6 +4533,9 @@ static int cam_smmu_component_bind(struct device *dev,
 	iommu_cb_set.is_expanded_memory =
 	iommu_cb_set.is_expanded_memory =
 		of_property_read_bool(dev->of_node, "expanded_memory");
 		of_property_read_bool(dev->of_node, "expanded_memory");
 
 
+	cam_common_register_mini_dump_cb(cam_smmu_mini_dump_cb,
+		"cam_smmu");
+
 	CAM_DBG(CAM_SMMU, "Main component bound successfully");
 	CAM_DBG(CAM_SMMU, "Main component bound successfully");
 	return 0;
 	return 0;
 }
 }

+ 105 - 0
drivers/cam_utils/cam_common_util.c

@@ -13,6 +13,10 @@
 #include <linux/moduleparam.h>
 #include <linux/moduleparam.h>
 #include "cam_common_util.h"
 #include "cam_common_util.h"
 #include "cam_debug_util.h"
 #include "cam_debug_util.h"
+#if IS_REACHABLE(CONFIG_QCOM_VA_MINIDUMP)
+#include <soc/qcom/minidump.h>
+static  struct cam_common_mini_dump_dev_info g_minidump_dev_info;
+#endif
 
 
 static uint timeout_multiplier = 1;
 static uint timeout_multiplier = 1;
 module_param(timeout_multiplier, uint, 0644);
 module_param(timeout_multiplier, uint, 0644);
@@ -145,3 +149,104 @@ void cam_common_util_thread_switch_delay_detect(
 			diff, threshold);
 			diff, threshold);
 	}
 	}
 }
 }
+
+#if IS_REACHABLE(CONFIG_QCOM_VA_MINIDUMP)
+static void cam_common_mini_dump_handler(void *dst, unsigned long len)
+{
+	int                               i = 0;
+	uint8_t                          *waddr;
+	unsigned long                     bytes_written = 0;
+	unsigned long                     remain_len = len;
+	struct cam_common_mini_dump_data *md;
+
+	if (len < sizeof(*md)) {
+	    CAM_WARN(CAM_UTIL, "Insufficient len %lu", len);
+	    return;
+	}
+
+	md = (struct cam_common_mini_dump_data *)dst;
+	waddr = (uint8_t *)md + sizeof(*md);
+	remain_len -= sizeof(*md);
+
+	for (i = 0; i < CAM_COMMON_MINI_DUMP_DEV_NUM; i++) {
+		if (!g_minidump_dev_info.dump_cb[i])
+			continue;
+
+		memcpy(md->name[i], g_minidump_dev_info.name[i],
+			strlen(g_minidump_dev_info.name[i]));
+		md->waddr[i] = (void *)waddr;
+		bytes_written = g_minidump_dev_info.dump_cb[i](
+			(void *)waddr, remain_len);
+		md->size[i] = bytes_written;
+		if (bytes_written >= len) {
+			CAM_WARN(CAM_UTIL, "No more space to dump");
+			goto nomem;
+		}
+
+		remain_len -= bytes_written;
+		waddr += bytes_written;
+	}
+
+	return;
+nomem:
+    for (; i >=0; i--)
+	    CAM_WARN(CAM_UTIL, "%s: Dumped len: %lu", md->name[i], md->size[i]);
+}
+
+static int cam_common_md_notify_handler(struct notifier_block *this,
+	unsigned long event, void *ptr)
+{
+	struct va_md_entry cbentry;
+	int rc = 0;
+
+	cbentry.vaddr = 0x0;
+	strlcpy(cbentry.owner, "Camera", sizeof(cbentry.owner));
+	cbentry.size = CAM_COMMON_MINI_DUMP_SIZE;
+	cbentry.cb = cam_common_mini_dump_handler;
+	rc = qcom_va_md_add_region(&cbentry);
+	if (rc) {
+		CAM_ERR(CAM_UTIL, "Va Region add falied %d", rc);
+		return NOTIFY_STOP_MASK;
+	}
+
+	return NOTIFY_OK;
+}
+
+static struct notifier_block cam_common_md_notify_blk = {
+	.notifier_call = cam_common_md_notify_handler,
+	.priority = INT_MAX,
+};
+
+int cam_common_register_mini_dump_cb(
+	cam_common_mini_dump_cb mini_dump_cb,
+	uint8_t *dev_name)
+{
+	int rc = 0;
+
+	if (g_minidump_dev_info.num_devs >= CAM_COMMON_MINI_DUMP_DEV_NUM) {
+		CAM_ERR(CAM_UTIL, "No free index available");
+		return -EINVAL;
+	}
+
+	if (!mini_dump_cb || !dev_name) {
+		CAM_ERR(CAM_UTIL, "Invalid params");
+		return -EINVAL;
+	}
+
+	g_minidump_dev_info.dump_cb[g_minidump_dev_info.num_devs] =
+		mini_dump_cb;
+	scnprintf(g_minidump_dev_info.name[g_minidump_dev_info.num_devs],
+		CAM_COMMON_MINI_DUMP_DEV_NAME_LEN, dev_name);
+	g_minidump_dev_info.num_devs++;
+	if (!g_minidump_dev_info.is_registered) {
+		rc = qcom_va_md_register("Camera", &cam_common_md_notify_blk);
+		if (rc) {
+			CAM_ERR(CAM_UTIL, "Camera VA minidump register failed");
+			goto end;
+		}
+		g_minidump_dev_info.is_registered = true;
+	}
+end:
+	return rc;
+}
+#endif

+ 53 - 0
drivers/cam_utils/cam_common_util.h

@@ -13,6 +13,10 @@
 #define CAM_36BIT_INTF_GET_IOVA_BASE(iova) ((iova) >> 8)
 #define CAM_36BIT_INTF_GET_IOVA_BASE(iova) ((iova) >> 8)
 #define CAM_36BIT_INTF_GET_IOVA_OFFSET(iova) ((iova) & 0xff)
 #define CAM_36BIT_INTF_GET_IOVA_OFFSET(iova) ((iova) & 0xff)
 
 
+#define CAM_COMMON_MINI_DUMP_DEV_NUM      6
+#define CAM_COMMON_MINI_DUMP_DEV_NAME_LEN 16
+#define CAM_COMMON_MINI_DUMP_SIZE         10 * 1024 * 1024
+
 #define PTR_TO_U64(ptr) ((uint64_t)(uintptr_t)ptr)
 #define PTR_TO_U64(ptr) ((uint64_t)(uintptr_t)ptr)
 #define U64_TO_PTR(ptr) ((void *)(uintptr_t)ptr)
 #define U64_TO_PTR(ptr) ((void *)(uintptr_t)ptr)
 
 
@@ -43,6 +47,34 @@
 	(hrs) = do_div(tmp, 24);                                                 \
 	(hrs) = do_div(tmp, 24);                                                 \
 })
 })
 
 
+typedef unsigned long (*cam_common_mini_dump_cb) (void *dst, unsigned long len);
+
+/**
+ * struct cam_common_mini_dump_dev_info
+ * @dump_cb       : address of data dumped
+ * @name          : Name of driver
+ * @num_devs      : Number of device registerd
+ * @is_registered : Bool to indicate if registered
+ */
+struct cam_common_mini_dump_dev_info {
+	cam_common_mini_dump_cb  dump_cb[CAM_COMMON_MINI_DUMP_DEV_NUM];
+	uint8_t                  name[CAM_COMMON_MINI_DUMP_DEV_NUM]
+				    [CAM_COMMON_MINI_DUMP_DEV_NAME_LEN];
+	uint8_t                  num_devs;
+	bool                     is_registered;
+};
+
+/**
+ * struct cam_common_mini_dump_data
+ * @link         : address of data dumped
+ * @name         : Name of driver
+ * @size         : Size dumped
+ */
+struct cam_common_mini_dump_data {
+	void          *waddr[CAM_COMMON_MINI_DUMP_DEV_NUM];
+	uint8_t        name[CAM_COMMON_MINI_DUMP_DEV_NUM][CAM_COMMON_MINI_DUMP_DEV_NAME_LEN];
+	unsigned long  size[CAM_COMMON_MINI_DUMP_DEV_NUM];
+};
 
 
 /**
 /**
  * cam_common_util_get_string_index()
  * cam_common_util_get_string_index()
@@ -138,4 +170,25 @@ int cam_common_modify_timer(struct timer_list *timer, int32_t timeout_val);
 void cam_common_util_thread_switch_delay_detect(const char *token,
 void cam_common_util_thread_switch_delay_detect(const char *token,
 	ktime_t scheduled_time, uint32_t threshold);
 	ktime_t scheduled_time, uint32_t threshold);
 
 
+/**
+ * cam_common_register_mini_dump_cb()
+ *
+ * @brief                  common interface to register mini dump cb
+ *
+ * @mini_dump_cb:          Pointer to the mini_dump_cb
+ * @name:                  name of device registering
+ *
+ * @return:                0 if success in register non-zero if failes
+ */
+#if IS_REACHABLE(CONFIG_QCOM_VA_MINIDUMP)
+int cam_common_register_mini_dump_cb(
+	cam_common_mini_dump_cb mini_dump_cb, uint8_t *name);
+#else
+static inline int cam_common_register_mini_dump_cb(
+	cam_common_mini_dump_cb mini_dump_cb,
+	uint8_t *dev_name)
+{
+	return 0;
+}
+#endif
 #endif /* _CAM_COMMON_UTIL_H_ */
 #endif /* _CAM_COMMON_UTIL_H_ */