Prechádzať zdrojové kódy

msm: camera: sync: Add monitor information for sync objects

Monitor sync object operations such as create, register callback,
signal, unregister callback which is useful in debugging. Introduce
debugfs to dynamically set the monitor mask. Apply dump on error
when CAM_GENERIC_FENCE_TYPE_xx is set in cam_sync_monitor_mask and
apply dump on release when CAM_GENERIC_FENCE_TYPE_xx_DUMP is also set
at the same time. Apply dynamic memory allocation for monitor data
in synx and dma. Refactor APIs into generic ones and add memory
checkings.

CRs-Fixed: 3350863
Change-Id: I70cfdc1215be5f6bf564c672a874e6ba7997cdfa
Signed-off-by: Pavan Kumar Chilamkurthi <[email protected]>
Signed-off-by: Haochen Yang <[email protected]>
Pavan Kumar Chilamkurthi 2 rokov pred
rodič
commit
07b3c215a1

+ 189 - 56
drivers/cam_sync/cam_sync.c

@@ -1,7 +1,7 @@
 // SPDX-License-Identifier: GPL-2.0-only
 /*
  * Copyright (c) 2017-2021, The Linux Foundation. All rights reserved.
- * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved.
  */
 
 #include <linux/init.h>
@@ -28,6 +28,7 @@ struct sync_device *sync_dev;
  * directly in the same context
  */
 static bool trigger_cb_without_switch;
+unsigned long cam_sync_monitor_mask;
 
 static void cam_sync_print_fence_table(void)
 {
@@ -144,8 +145,9 @@ int cam_sync_register_callback(sync_callback cb_func,
 	struct sync_callback_info *sync_cb;
 	struct sync_table_row *row = NULL;
 	int status = 0;
+	int rc = 0;
 
-	if (sync_obj >= CAM_SYNC_MAX_OBJS || sync_obj <= 0 || !cb_func)
+	if ((sync_obj >= CAM_SYNC_MAX_OBJS) || (sync_obj <= 0) || (!cb_func))
 		return -EINVAL;
 
 	spin_lock_bh(&sync_dev->row_spinlocks[sync_obj]);
@@ -156,14 +158,14 @@ int cam_sync_register_callback(sync_callback cb_func,
 			"Error: accessing an uninitialized sync obj %s[%d]",
 			row->name,
 			sync_obj);
-		spin_unlock_bh(&sync_dev->row_spinlocks[sync_obj]);
-		return -EINVAL;
+		rc = -EINVAL;
+		goto monitor_dump;
 	}
 
 	sync_cb = kzalloc(sizeof(*sync_cb), GFP_ATOMIC);
 	if (!sync_cb) {
-		spin_unlock_bh(&sync_dev->row_spinlocks[sync_obj]);
-		return -ENOMEM;
+		rc = -ENOMEM;
+		goto monitor_dump;
 	}
 
 	/* Trigger callback if sync object is already in SIGNALED state */
@@ -171,6 +173,11 @@ int cam_sync_register_callback(sync_callback cb_func,
 		(row->state == CAM_SYNC_STATE_SIGNALED_ERROR) ||
 		(row->state == CAM_SYNC_STATE_SIGNALED_CANCEL)) &&
 		(!row->remaining)) {
+		if (test_bit(CAM_GENERIC_FENCE_TYPE_SYNC_OBJ,
+			&cam_sync_monitor_mask))
+			cam_generic_fence_update_monitor_array(sync_obj,
+				&sync_dev->table_lock, sync_dev->mon_data,
+				CAM_FENCE_OP_SKIP_REGISTER_CB);
 		if (trigger_cb_without_switch) {
 			CAM_DBG(CAM_SYNC, "Invoke callback for sync object:%s[%d]",
 				row->name,
@@ -203,9 +210,20 @@ int cam_sync_register_callback(sync_callback cb_func,
 	sync_cb->sync_obj = sync_obj;
 	INIT_WORK(&sync_cb->cb_dispatch_work, cam_sync_util_cb_dispatch);
 	list_add_tail(&sync_cb->list, &row->callback_list);
+
+	if (test_bit(CAM_GENERIC_FENCE_TYPE_SYNC_OBJ, &cam_sync_monitor_mask))
+		cam_generic_fence_update_monitor_array(sync_obj,
+			&sync_dev->table_lock, sync_dev->mon_data,
+			CAM_FENCE_OP_REGISTER_CB);
+
 	spin_unlock_bh(&sync_dev->row_spinlocks[sync_obj]);
 
 	return 0;
+
+monitor_dump:
+	cam_sync_dump_monitor_array(row);
+	spin_unlock_bh(&sync_dev->row_spinlocks[sync_obj]);
+	return rc;
 }
 
 int cam_sync_deregister_callback(sync_callback cb_func,
@@ -214,8 +232,9 @@ int cam_sync_deregister_callback(sync_callback cb_func,
 	struct sync_table_row *row = NULL;
 	struct sync_callback_info *sync_cb, *temp;
 	bool found = false;
+	int rc = 0;
 
-	if (sync_obj >= CAM_SYNC_MAX_OBJS || sync_obj <= 0)
+	if ((sync_obj >= CAM_SYNC_MAX_OBJS) || (sync_obj <= 0))
 		return -EINVAL;
 
 	spin_lock_bh(&sync_dev->row_spinlocks[sync_obj]);
@@ -226,24 +245,43 @@ int cam_sync_deregister_callback(sync_callback cb_func,
 			"Error: accessing an uninitialized sync obj = %s[%d]",
 			row->name,
 			sync_obj);
-		spin_unlock_bh(&sync_dev->row_spinlocks[sync_obj]);
-		return -EINVAL;
+		rc = -EINVAL;
+		goto monitor_dump;
 	}
 
 	CAM_DBG(CAM_SYNC, "deregistered callback for sync object:%s[%d]",
 		row->name,
 		sync_obj);
 	list_for_each_entry_safe(sync_cb, temp, &row->callback_list, list) {
-		if (sync_cb->callback_func == cb_func &&
-			sync_cb->cb_data == userdata) {
+		if ((sync_cb->callback_func == cb_func) &&
+			(sync_cb->cb_data == userdata)) {
 			list_del_init(&sync_cb->list);
 			kfree(sync_cb);
 			found = true;
 		}
 	}
 
+	if (test_bit(CAM_GENERIC_FENCE_TYPE_SYNC_OBJ, &cam_sync_monitor_mask)) {
+		if (found) {
+			cam_generic_fence_update_monitor_array(sync_obj,
+				&sync_dev->table_lock, sync_dev->mon_data,
+				CAM_FENCE_OP_UNREGISTER_CB);
+		} else {
+			CAM_ERR(CAM_SYNC,
+				"Error: Callback not found sync obj = %s[%d] : sync_id %d, state %d",
+				row->name, sync_obj, row->sync_id, row->state);
+			cam_sync_dump_monitor_array(row);
+		}
+	}
+
 	spin_unlock_bh(&sync_dev->row_spinlocks[sync_obj]);
+
 	return found ? 0 : -ENOENT;
+
+monitor_dump:
+	cam_sync_dump_monitor_array(row);
+	spin_unlock_bh(&sync_dev->row_spinlocks[sync_obj]);
+	return rc;
 }
 
 static inline int cam_sync_signal_dma_fence_util(
@@ -309,6 +347,12 @@ static void cam_sync_signal_parent_util(int32_t status,
 				parent_info->sync_id, parent_row->state,
 				event_cause);
 
+		if (test_bit(CAM_GENERIC_FENCE_TYPE_SYNC_OBJ,
+			&cam_sync_monitor_mask))
+			cam_generic_fence_update_monitor_array(parent_info->sync_id,
+				&sync_dev->table_lock, sync_dev->mon_data,
+				CAM_FENCE_OP_SIGNAL);
+
 		spin_unlock_bh(&sync_dev->row_spinlocks[parent_info->sync_id]);
 		list_del_init(&parent_info->list);
 		kfree(parent_info);
@@ -362,22 +406,21 @@ int cam_sync_signal(int32_t sync_obj, uint32_t status, uint32_t event_cause)
 	struct cam_synx_obj_signal signal_synx_obj;
 #endif
 
-	if (sync_obj >= CAM_SYNC_MAX_OBJS || sync_obj <= 0) {
+	if ((sync_obj >= CAM_SYNC_MAX_OBJS) || (sync_obj <= 0)) {
 		CAM_ERR(CAM_SYNC, "Error: Out of range sync obj (0 <= %d < %d)",
 			sync_obj, CAM_SYNC_MAX_OBJS);
 		return -EINVAL;
 	}
 
-	row = sync_dev->sync_table + sync_obj;
 	spin_lock_bh(&sync_dev->row_spinlocks[sync_obj]);
+	row = sync_dev->sync_table + sync_obj;
 
 	rc = cam_sync_signal_validate_util(sync_obj, status);
 	if (rc) {
-		spin_unlock_bh(&sync_dev->row_spinlocks[sync_obj]);
 		CAM_ERR(CAM_SYNC,
 			"Error: Failed to validate signal info for sync_obj = %s[%d] with status = %d rc = %d",
 			row->name, sync_obj, status, rc);
-		return rc;
+		goto monitor_dump;
 	}
 
 	if (!atomic_dec_and_test(&row->ref_cnt)) {
@@ -409,6 +452,12 @@ int cam_sync_signal(int32_t sync_obj, uint32_t status, uint32_t event_cause)
 	/* copy parent list to local and release child lock */
 	INIT_LIST_HEAD(&parents_list);
 	list_splice_init(&row->parents_list, &parents_list);
+
+	if (test_bit(CAM_GENERIC_FENCE_TYPE_SYNC_OBJ, &cam_sync_monitor_mask))
+		cam_generic_fence_update_monitor_array(sync_obj,
+			&sync_dev->table_lock, sync_dev->mon_data,
+			CAM_FENCE_OP_SIGNAL);
+
 	spin_unlock_bh(&sync_dev->row_spinlocks[sync_obj]);
 
 #if IS_ENABLED(CONFIG_TARGET_SYNX_ENABLE)
@@ -432,6 +481,11 @@ int cam_sync_signal(int32_t sync_obj, uint32_t status, uint32_t event_cause)
 	cam_sync_signal_parent_util(status, event_cause, &parents_list);
 
 	return 0;
+
+monitor_dump:
+	cam_sync_dump_monitor_array(row);
+	spin_unlock_bh(&sync_dev->row_spinlocks[sync_obj]);
+	return rc;
 }
 
 int cam_sync_merge(int32_t *sync_obj, uint32_t num_objs, int32_t *merged_obj)
@@ -441,7 +495,7 @@ int cam_sync_merge(int32_t *sync_obj, uint32_t num_objs, int32_t *merged_obj)
 	bool bit;
 	int i = 0;
 
-	if (!sync_obj || !merged_obj) {
+	if ((!sync_obj) || (!merged_obj)) {
 		CAM_ERR(CAM_SYNC, "Invalid pointer(s)");
 		return -EINVAL;
 	}
@@ -480,7 +534,6 @@ int cam_sync_merge(int32_t *sync_obj, uint32_t num_objs, int32_t *merged_obj)
 		CAM_ERR(CAM_SYNC, "Error: Unable to init row at idx = %ld",
 			idx);
 		clear_bit(idx, sync_dev->bitmap);
-		spin_unlock_bh(&sync_dev->row_spinlocks[idx]);
 		return -EINVAL;
 	}
 	CAM_DBG(CAM_SYNC, "Init row at idx:%ld to merge objects", idx);
@@ -493,21 +546,21 @@ int cam_sync_merge(int32_t *sync_obj, uint32_t num_objs, int32_t *merged_obj)
 int cam_sync_get_obj_ref(int32_t sync_obj)
 {
 	struct sync_table_row *row = NULL;
+	int rc;
 
-	if (sync_obj >= CAM_SYNC_MAX_OBJS || sync_obj <= 0)
+	if ((sync_obj >= CAM_SYNC_MAX_OBJS) || (sync_obj <= 0))
 		return -EINVAL;
 
-	row = sync_dev->sync_table + sync_obj;
-
 	spin_lock(&sync_dev->row_spinlocks[sync_obj]);
+	row = sync_dev->sync_table + sync_obj;
 
 	if (row->state != CAM_SYNC_STATE_ACTIVE) {
-		spin_unlock(&sync_dev->row_spinlocks[sync_obj]);
 		CAM_ERR(CAM_SYNC,
 			"Error: accessing an uninitialized sync obj = %s[%d]",
 			row->name,
 			sync_obj);
-		return -EINVAL;
+		rc = -EINVAL;
+		goto monitor_dump;
 	}
 
 	atomic_inc(&row->ref_cnt);
@@ -515,13 +568,18 @@ int cam_sync_get_obj_ref(int32_t sync_obj)
 	CAM_DBG(CAM_SYNC, "get ref for obj %d", sync_obj);
 
 	return 0;
+
+monitor_dump:
+	cam_sync_dump_monitor_array(row);
+	spin_unlock(&sync_dev->row_spinlocks[sync_obj]);
+	return rc;
 }
 
 int cam_sync_put_obj_ref(int32_t sync_obj)
 {
 	struct sync_table_row *row = NULL;
 
-	if (sync_obj >= CAM_SYNC_MAX_OBJS || sync_obj <= 0)
+	if ((sync_obj >= CAM_SYNC_MAX_OBJS) || (sync_obj <= 0))
 		return -EINVAL;
 
 	row = sync_dev->sync_table + sync_obj;
@@ -539,17 +597,20 @@ int cam_sync_destroy(int32_t sync_obj)
 int cam_sync_check_valid(int32_t sync_obj)
 {
 	struct sync_table_row *row = NULL;
+	int rc;
 
-	if (sync_obj >= CAM_SYNC_MAX_OBJS || sync_obj <= 0)
+	if ((sync_obj >= CAM_SYNC_MAX_OBJS) || (sync_obj <= 0))
 		return -EINVAL;
 
+	spin_lock_bh(&sync_dev->row_spinlocks[sync_obj]);
 	row = sync_dev->sync_table + sync_obj;
 
 	if (!test_bit(sync_obj, sync_dev->bitmap)) {
 		CAM_ERR(CAM_SYNC, "Error: Released sync obj received %s[%d]",
 			row->name,
 			sync_obj);
-		return -EINVAL;
+		rc = -EINVAL;
+		goto monitor_dump;
 	}
 
 	if (row->state == CAM_SYNC_STATE_INVALID) {
@@ -557,18 +618,25 @@ int cam_sync_check_valid(int32_t sync_obj)
 			"Error: accessing an uninitialized sync obj = %s[%d]",
 			row->name,
 			sync_obj);
-		return -EINVAL;
+		rc = -EINVAL;
+		goto monitor_dump;
 	}
+	spin_unlock_bh(&sync_dev->row_spinlocks[sync_obj]);
 	return 0;
+
+monitor_dump:
+	cam_sync_dump_monitor_array(row);
+	spin_unlock_bh(&sync_dev->row_spinlocks[sync_obj]);
+	return rc;
 }
 
 int cam_sync_wait(int32_t sync_obj, uint64_t timeout_ms)
 {
 	unsigned long timeleft;
-	int rc = -EINVAL;
+	int rc;
 	struct sync_table_row *row = NULL;
 
-	if (sync_obj >= CAM_SYNC_MAX_OBJS || sync_obj <= 0)
+	if ((sync_obj >= CAM_SYNC_MAX_OBJS) || (sync_obj <= 0))
 		return -EINVAL;
 
 	row = sync_dev->sync_table + sync_obj;
@@ -578,7 +646,8 @@ int cam_sync_wait(int32_t sync_obj, uint64_t timeout_ms)
 			"Error: accessing an uninitialized sync obj = %s[%d]",
 			row->name,
 			sync_obj);
-		return -EINVAL;
+		rc = -EINVAL;
+		goto monitor_dump;
 	}
 
 	timeleft = cam_common_wait_for_completion_timeout(&row->signaled,
@@ -588,6 +657,7 @@ int cam_sync_wait(int32_t sync_obj, uint64_t timeout_ms)
 		CAM_ERR(CAM_SYNC,
 			"Error: timed out for sync obj = %s[%d]", row->name, sync_obj);
 		rc = -ETIMEDOUT;
+		goto monitor_dump;
 	} else {
 		switch (row->state) {
 		case CAM_SYNC_STATE_INVALID:
@@ -598,17 +668,21 @@ int cam_sync_wait(int32_t sync_obj, uint64_t timeout_ms)
 				"Error: Wait on invalid state = %d, obj = %d, name = %s",
 				row->state, sync_obj, row->name);
 			rc = -EINVAL;
-			break;
+			goto monitor_dump;
 		case CAM_SYNC_STATE_SIGNALED_SUCCESS:
 			rc = 0;
 			break;
 		default:
 			rc = -EINVAL;
-			break;
+			goto monitor_dump;
 		}
 	}
 
 	return rc;
+
+monitor_dump:
+	cam_sync_dump_monitor_array(row);
+	return rc;
 }
 
 static int cam_sync_handle_create(struct cam_private_ioctl_arg *k_ioctl)
@@ -643,7 +717,7 @@ static int cam_sync_handle_create(struct cam_private_ioctl_arg *k_ioctl)
 
 static int cam_sync_handle_signal(struct cam_private_ioctl_arg *k_ioctl)
 {
-	int rc = 0;
+	int rc;
 	struct cam_sync_signal sync_signal;
 
 	if (k_ioctl->size != sizeof(struct cam_sync_signal))
@@ -787,7 +861,7 @@ static int cam_sync_handle_register_user_payload(
 		return -EFAULT;
 
 	sync_obj = userpayload_info.sync_obj;
-	if (sync_obj >= CAM_SYNC_MAX_OBJS || sync_obj <= 0)
+	if ((sync_obj >= CAM_SYNC_MAX_OBJS) || (sync_obj <= 0))
 		return -EINVAL;
 
 	user_payload_kernel = kzalloc(sizeof(*user_payload_kernel), GFP_KERNEL);
@@ -814,6 +888,11 @@ static int cam_sync_handle_register_user_payload(
 	if ((row->state == CAM_SYNC_STATE_SIGNALED_SUCCESS) ||
 		(row->state == CAM_SYNC_STATE_SIGNALED_ERROR) ||
 		(row->state == CAM_SYNC_STATE_SIGNALED_CANCEL)) {
+		if (test_bit(CAM_GENERIC_FENCE_TYPE_SYNC_OBJ,
+			&cam_sync_monitor_mask))
+			cam_generic_fence_update_monitor_array(sync_obj,
+				&sync_dev->table_lock, sync_dev->mon_data,
+				CAM_FENCE_OP_SKIP_REGISTER_CB);
 
 		cam_sync_util_send_v4l2_event(CAM_SYNC_V4L_EVENT_ID_CB_TRIG,
 			sync_obj,
@@ -836,6 +915,12 @@ static int cam_sync_handle_register_user_payload(
 			user_payload_iter->payload_data[1] ==
 				user_payload_kernel->payload_data[1]) {
 
+			if (test_bit(CAM_GENERIC_FENCE_TYPE_SYNC_OBJ,
+				&cam_sync_monitor_mask))
+				cam_generic_fence_update_monitor_array(sync_obj,
+					&sync_dev->table_lock, sync_dev->mon_data,
+					CAM_FENCE_OP_ALREADY_REGISTERED_CB);
+
 			spin_unlock_bh(&sync_dev->row_spinlocks[sync_obj]);
 			kfree(user_payload_kernel);
 			return -EALREADY;
@@ -843,6 +928,12 @@ static int cam_sync_handle_register_user_payload(
 	}
 
 	list_add_tail(&user_payload_kernel->list, &row->user_payload_list);
+
+	if (test_bit(CAM_GENERIC_FENCE_TYPE_SYNC_OBJ, &cam_sync_monitor_mask))
+		cam_generic_fence_update_monitor_array(sync_obj,
+			&sync_dev->table_lock, sync_dev->mon_data,
+			CAM_FENCE_OP_REGISTER_CB);
+
 	spin_unlock_bh(&sync_dev->row_spinlocks[sync_obj]);
 	return 0;
 }
@@ -871,7 +962,7 @@ static int cam_sync_handle_deregister_user_payload(
 		return -EFAULT;
 
 	sync_obj = userpayload_info.sync_obj;
-	if (sync_obj >= CAM_SYNC_MAX_OBJS || sync_obj <= 0)
+	if ((sync_obj >= CAM_SYNC_MAX_OBJS) || (sync_obj <= 0))
 		return -EINVAL;
 
 	spin_lock_bh(&sync_dev->row_spinlocks[sync_obj]);
@@ -894,6 +985,12 @@ static int cam_sync_handle_deregister_user_payload(
 				userpayload_info.payload[1]) {
 			list_del_init(&user_payload_kernel->list);
 			kfree(user_payload_kernel);
+
+			if (test_bit(CAM_GENERIC_FENCE_TYPE_SYNC_OBJ,
+				&cam_sync_monitor_mask))
+				cam_generic_fence_update_monitor_array(sync_obj,
+					&sync_dev->table_lock, sync_dev->mon_data,
+					CAM_FENCE_OP_UNREGISTER_CB);
 		}
 	}
 
@@ -905,7 +1002,7 @@ static int cam_sync_dma_fence_cb(
 	int32_t sync_obj,
 	struct cam_dma_fence_signal_sync_obj *signal_sync_obj)
 {
-	int32_t rc = 0;
+	int32_t rc;
 	int32_t status = CAM_SYNC_STATE_SIGNALED_SUCCESS;
 	struct sync_table_row *row = NULL;
 	struct list_head parents_list;
@@ -916,7 +1013,7 @@ static int cam_sync_dma_fence_cb(
 	}
 
 	/* Validate sync object range */
-	if (!(sync_obj > 0 && sync_obj < CAM_SYNC_MAX_OBJS)) {
+	if (!((sync_obj > 0) && (sync_obj < CAM_SYNC_MAX_OBJS))) {
 		CAM_ERR(CAM_SYNC, "Invalid sync obj: %d", sync_obj);
 		return -EINVAL;
 	}
@@ -987,7 +1084,7 @@ end:
 static int cam_sync_synx_obj_cb(int32_t sync_obj,
 	struct cam_synx_obj_signal_sync_obj *signal_sync_obj)
 {
-	int32_t rc = 0;
+	int32_t rc;
 	struct sync_table_row *row = NULL;
 	struct list_head parents_list;
 
@@ -997,7 +1094,7 @@ static int cam_sync_synx_obj_cb(int32_t sync_obj,
 	}
 
 	/* Validate sync object range */
-	if (!(sync_obj > 0 && sync_obj < CAM_SYNC_MAX_OBJS)) {
+	if (!((sync_obj > 0) && (sync_obj < CAM_SYNC_MAX_OBJS))) {
 		CAM_ERR(CAM_SYNC, "Invalid sync obj: %d", sync_obj);
 		return -EINVAL;
 	}
@@ -1130,7 +1227,7 @@ static void cam_generic_fence_free_input_info_util(
 static int cam_generic_fence_handle_dma_create(
 	struct cam_generic_fence_cmd_args *fence_cmd_args)
 {
-	int rc = 0, i, dma_fence_row_idx;
+	int rc, i, dma_fence_row_idx;
 	struct cam_generic_fence_input_info *fence_input_info = NULL;
 	struct cam_generic_fence_config *fence_cfg = NULL;
 
@@ -1180,7 +1277,7 @@ out_copy:
 static int cam_generic_fence_handle_dma_release(
 	struct cam_generic_fence_cmd_args *fence_cmd_args)
 {
-	int rc = 0, i;
+	int rc, i;
 	bool failed = false;
 	struct cam_dma_fence_release_params release_params;
 	struct cam_generic_fence_input_info *fence_input_info = NULL;
@@ -1238,7 +1335,7 @@ static int cam_generic_fence_handle_dma_release(
 static int cam_generic_fence_handle_dma_import(
 	struct cam_generic_fence_cmd_args *fence_cmd_args)
 {
-	int32_t rc = 0, i, dma_fence_row_idx;
+	int32_t rc, i, dma_fence_row_idx;
 	struct dma_fence *fence = NULL;
 	struct cam_dma_fence_create_sync_obj_payload dma_sync_create;
 	struct cam_generic_fence_input_info *fence_input_info = NULL;
@@ -1508,7 +1605,7 @@ static int cam_generic_fence_config_parse_params(
 static int cam_generic_fence_handle_synx_create(
 	struct cam_generic_fence_cmd_args *fence_cmd_args)
 {
-	int rc = 0, i;
+	int rc, i;
 	int32_t row_idx, fence_flag;
 	struct cam_generic_fence_input_info *fence_input_info = NULL;
 	struct cam_generic_fence_config *fence_cfg = NULL;
@@ -1563,7 +1660,7 @@ out_copy:
 static int cam_generic_fence_handle_synx_release(
 	struct cam_generic_fence_cmd_args *fence_cmd_args)
 {
-	int rc = 0, i;
+	int rc, i;
 	bool failed = false;
 	struct cam_generic_fence_input_info *fence_input_info = NULL;
 	struct cam_generic_fence_config *fence_cfg = NULL;
@@ -1621,7 +1718,7 @@ static int cam_generic_fence_handle_synx_release(
 static int cam_sync_synx_associate_obj(int32_t sync_obj, uint32_t synx_obj,
 	int32_t synx_obj_row_idx, bool *is_sync_obj_signaled)
 {
-	int rc = 0;
+	int rc;
 	struct sync_table_row *row = NULL;
 	struct cam_synx_obj_signal signal_synx_obj;
 
@@ -1656,7 +1753,7 @@ signal_synx:
 static int cam_generic_fence_handle_synx_import(
 	struct cam_generic_fence_cmd_args *fence_cmd_args)
 {
-	int32_t rc = 0, i, synx_obj_row_idx;
+	int32_t rc, i, synx_obj_row_idx;
 	struct sync_synx_obj_info synx_sync_create;
 	struct cam_generic_fence_input_info *fence_input_info = NULL;
 	struct cam_generic_fence_config *fence_cfg = NULL;
@@ -1749,7 +1846,7 @@ out_copy:
 static int cam_generic_fence_handle_synx_signal(
 	struct cam_generic_fence_cmd_args *fence_cmd_args)
 {
-	int32_t rc = 0, i;
+	int32_t rc, i;
 	struct cam_generic_fence_signal_info *fence_signal_info;
 	struct cam_synx_obj_signal *synx_signal_info;
 
@@ -1832,7 +1929,7 @@ static int cam_generic_fence_process_synx_obj_cmd(
 static int cam_generic_fence_handle_sync_create(
 	struct cam_generic_fence_cmd_args *fence_cmd_args)
 {
-	int rc = 0, i, dma_fence_row_idx;
+	int rc, i, dma_fence_row_idx;
 	bool dma_fence_created;
 	unsigned long fence_sel_mask;
 	struct cam_dma_fence_release_params release_params;
@@ -2045,7 +2142,7 @@ static int cam_generic_fence_handle_sync_release(
 	struct cam_generic_fence_cmd_args *fence_cmd_args)
 {
 	bool failed = false;
-	int rc = 0, i;
+	int rc, i;
 	unsigned long fence_sel_mask;
 	struct cam_sync_check_for_dma_release check_for_dma_release;
 	struct cam_dma_fence_release_params release_params;
@@ -2329,6 +2426,9 @@ static int cam_sync_open(struct file *filep)
 	rc = v4l2_fh_open(filep);
 	if (!rc) {
 		sync_dev->open_cnt++;
+#if IS_ENABLED(CONFIG_TARGET_SYNX_ENABLE)
+		cam_synx_obj_open();
+#endif
 		cam_dma_fence_open();
 		spin_lock_bh(&sync_dev->cam_sync_eventq_lock);
 		sync_dev->cam_sync_eventq = filep->private_data;
@@ -2336,6 +2436,18 @@ static int cam_sync_open(struct file *filep)
 	} else {
 		CAM_ERR(CAM_SYNC, "v4l2_fh_open failed : %d", rc);
 	}
+
+	if (test_bit(CAM_GENERIC_FENCE_TYPE_SYNC_OBJ, &cam_sync_monitor_mask)) {
+		sync_dev->mon_data = kzalloc(
+			sizeof(struct cam_generic_fence_monitor_data *) *
+			CAM_SYNC_MONITOR_TABLE_SIZE, GFP_KERNEL);
+		if (!sync_dev->mon_data) {
+			CAM_WARN(CAM_SYNC, "Failed to allocate memory %d",
+				sizeof(struct cam_generic_fence_monitor_data *) *
+				CAM_SYNC_MONITOR_TABLE_SIZE);
+		}
+	}
+
 	mutex_unlock(&sync_dev->table_lock);
 
 	return rc;
@@ -2352,6 +2464,7 @@ static int cam_sync_close(struct file *filep)
 		rc = -ENODEV;
 		return rc;
 	}
+
 	mutex_lock(&sync_dev->table_lock);
 	sync_dev->open_cnt--;
 	if (!sync_dev->open_cnt) {
@@ -2395,6 +2508,12 @@ static int cam_sync_close(struct file *filep)
 					  "Cleanup destroy fail:idx:%d\n", i);
 			}
 		}
+
+		if (sync_dev->mon_data) {
+			for (i = 0; i < CAM_SYNC_MONITOR_TABLE_SIZE; i++)
+				kfree(sync_dev->mon_data[i]);
+		}
+		kfree(sync_dev->mon_data);
 	}
 
 	/* Clean dma fence table */
@@ -2549,9 +2668,8 @@ static void cam_sync_init_entity(struct sync_device *sync_dev)
 
 static int cam_sync_create_debugfs(void)
 {
-	int rc = 0;
+	int rc;
 	struct dentry *dbgfileptr = NULL;
-
 	if (!cam_debugfs_available())
 		return 0;
 
@@ -2567,6 +2685,8 @@ static int cam_sync_create_debugfs(void)
 	debugfs_create_bool("trigger_cb_without_switch", 0644,
 		sync_dev->dentry, &trigger_cb_without_switch);
 
+	debugfs_create_ulong("cam_sync_monitor_mask", 0644,
+		sync_dev->dentry, &cam_sync_monitor_mask);
 end:
 	return rc;
 }
@@ -2574,7 +2694,7 @@ end:
 #if IS_REACHABLE(CONFIG_MSM_GLOBAL_SYNX)
 int cam_synx_sync_signal(int32_t sync_obj, uint32_t synx_status)
 {
-	int rc = 0;
+	int rc;
 	uint32_t sync_status = synx_status;
 
 	switch (synx_status) {
@@ -2607,10 +2727,22 @@ int cam_synx_sync_signal(int32_t sync_obj, uint32_t synx_status)
 	return rc;
 }
 
+int cam_synx_sync_register_callback(sync_callback cb_func,
+	void *userdata, int32_t sync_obj)
+{
+	return cam_sync_register_callback(cb_func, userdata, sync_obj);
+}
+
+int cam_synx_sync_deregister_callback(sync_callback cb_func,
+	void *userdata, int32_t sync_obj)
+{
+	return cam_sync_deregister_callback(cb_func, userdata, sync_obj);
+}
+
 static int cam_sync_register_synx_bind_ops(
 	struct synx_register_params *object)
 {
-	int rc = 0;
+	int rc;
 
 	rc = synx_register_ops(object);
 	if (rc)
@@ -2622,7 +2754,7 @@ static int cam_sync_register_synx_bind_ops(
 static void cam_sync_unregister_synx_bind_ops(
 	struct synx_register_params *object)
 {
-	int rc = 0;
+	int rc;
 
 	rc = synx_deregister_ops(object);
 	if (rc)
@@ -2635,8 +2767,8 @@ static void cam_sync_configure_synx_obj(struct synx_register_params *object)
 
 	params->name = CAM_SYNC_NAME;
 	params->type = SYNX_TYPE_CSL;
-	params->ops.register_callback = cam_sync_register_callback;
-	params->ops.deregister_callback = cam_sync_deregister_callback;
+	params->ops.register_callback = cam_synx_sync_register_callback;
+	params->ops.deregister_callback = cam_synx_sync_deregister_callback;
 	params->ops.enable_signaling = cam_sync_get_obj_ref;
 	params->ops.signal = cam_synx_sync_signal;
 }
@@ -2720,6 +2852,7 @@ static int cam_sync_component_bind(struct device *dev,
 	}
 
 	trigger_cb_without_switch = false;
+	cam_sync_monitor_mask = 0;
 	cam_sync_create_debugfs();
 #if IS_ENABLED(CONFIG_TARGET_SYNX_ENABLE)
 	/* Initialize synx obj driver */
@@ -2789,7 +2922,7 @@ const static struct component_ops cam_sync_component_ops = {
 
 static int cam_sync_probe(struct platform_device *pdev)
 {
-	int rc = 0;
+	int rc;
 
 	CAM_DBG(CAM_SYNC, "Adding Sync component");
 	rc = component_add(&pdev->dev, &cam_sync_component_ops);

+ 213 - 16
drivers/cam_sync/cam_sync_dma_fence.c

@@ -2,7 +2,11 @@
 /*
  * Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved.
  */
+
 #include "cam_sync_dma_fence.h"
+#include "cam_sync_util.h"
+
+extern unsigned long cam_sync_monitor_mask;
 
 /**
  * struct cam_dma_fence_row - DMA fence row
@@ -29,6 +33,7 @@ struct cam_dma_fence_device {
 	spinlock_t row_spinlocks[CAM_DMA_FENCE_MAX_FENCES];
 	struct mutex dev_lock;
 	DECLARE_BITMAP(bitmap, CAM_DMA_FENCE_MAX_FENCES);
+	struct cam_generic_fence_monitor_data **monitor_data;
 };
 
 static atomic64_t g_cam_dma_fence_seq_no;
@@ -61,6 +66,30 @@ static struct dma_fence_ops cam_sync_dma_fence_ops = {
 	.release = __cam_dma_fence_free,
 };
 
+static inline struct cam_generic_fence_monitor_entry *
+	__cam_dma_fence_get_monitor_entries(int idx)
+{
+	struct cam_generic_fence_monitor_data *monitor_data;
+
+	monitor_data = CAM_GENERIC_MONITOR_GET_DATA(g_cam_dma_fence_dev->monitor_data, idx);
+	if (monitor_data->swap_monitor_entries)
+		return monitor_data->prev_monitor_entries;
+	else
+		return monitor_data->monitor_entries;
+}
+
+static inline struct cam_generic_fence_monitor_entry *
+	__cam_dma_fence_get_prev_monitor_entries(int idx)
+{
+	struct cam_generic_fence_monitor_data *monitor_data;
+
+	monitor_data = CAM_GENERIC_MONITOR_GET_DATA(g_cam_dma_fence_dev->monitor_data, idx);
+	if (monitor_data->swap_monitor_entries)
+		return monitor_data->monitor_entries;
+	else
+		return monitor_data->prev_monitor_entries;
+}
+
 static void __cam_dma_fence_print_table(void)
 {
 	int i;
@@ -131,12 +160,17 @@ static void __cam_dma_fence_init_row(const char *name,
 
 	spin_lock_bh(&g_cam_dma_fence_dev->row_spinlocks[idx]);
 	row = &g_cam_dma_fence_dev->rows[idx];
-	memset(row, 0, sizeof(*row));
 	row->fence = dma_fence;
 	row->fd = fd;
 	row->state = CAM_DMA_FENCE_STATE_ACTIVE;
 	row->ext_dma_fence = ext_dma_fence;
 	strscpy(row->name, name, CAM_DMA_FENCE_NAME_LEN);
+
+	if (test_bit(CAM_GENERIC_FENCE_TYPE_DMA_FENCE, &cam_sync_monitor_mask)) {
+		cam_generic_fence_update_monitor_array(idx,
+			&g_cam_dma_fence_dev->dev_lock, g_cam_dma_fence_dev->monitor_data,
+			CAM_FENCE_OP_CREATE);
+	}
 	spin_unlock_bh(&g_cam_dma_fence_dev->row_spinlocks[idx]);
 }
 
@@ -146,6 +180,7 @@ void __cam_dma_fence_signal_cb(
 	struct cam_dma_fence_signal_sync_obj signal_sync_obj;
 	struct cam_dma_fence_row *dma_fence_row =
 		container_of(cb, struct cam_dma_fence_row, fence_cb);
+	uint32_t idx;
 
 	if (dma_fence_row->state == CAM_DMA_FENCE_STATE_INVALID) {
 		CAM_ERR(CAM_DMA_FENCE, "dma fence seqno: %llu is in invalid state: %d",
@@ -169,14 +204,55 @@ void __cam_dma_fence_signal_cb(
 		signal_sync_obj.status = dma_fence_get_status_locked(fence);
 		dma_fence_row->state = CAM_DMA_FENCE_STATE_SIGNALED;
 		dma_fence_row->sync_cb(dma_fence_row->sync_obj, &signal_sync_obj);
+		if (test_bit(CAM_GENERIC_FENCE_TYPE_DMA_FENCE,
+			&cam_sync_monitor_mask)) {
+			__cam_dma_fence_find_fence_in_table(dma_fence_row->fd, &idx);
+			cam_generic_fence_update_monitor_array(idx,
+				&g_cam_dma_fence_dev->dev_lock,
+				g_cam_dma_fence_dev->monitor_data,
+				CAM_FENCE_OP_UNREGISTER_ON_SIGNAL);
+		}
 	}
 }
 
+static void __cam_dma_fence_dump_monitor_array(int dma_row_idx)
+{
+	struct dma_fence *fence;
+	struct cam_generic_fence_monitor_obj_info obj_info;
+	struct cam_dma_fence_row *row;
+
+	if (!g_cam_dma_fence_dev->monitor_data ||
+		!test_bit(CAM_GENERIC_FENCE_TYPE_DMA_FENCE, &cam_sync_monitor_mask))
+		return;
+
+	if (!CAM_GENERIC_MONITOR_GET_DATA(g_cam_dma_fence_dev->monitor_data,
+		dma_row_idx)->prev_obj_id)
+		return;
+
+	row = &g_cam_dma_fence_dev->rows[dma_row_idx];
+	fence = row->fence;
+
+	obj_info.name = row->name;
+	obj_info.obj_id = row->fd;
+	obj_info.state = row->state;
+	obj_info.ref_cnt = kref_read(&fence->refcount);
+	obj_info.monitor_data = CAM_GENERIC_MONITOR_GET_DATA(
+		g_cam_dma_fence_dev->monitor_data, dma_row_idx);
+	obj_info.fence_type = CAM_GENERIC_FENCE_TYPE_DMA_FENCE;
+	obj_info.sync_id = row->sync_obj;
+	obj_info.monitor_entries =
+		__cam_dma_fence_get_monitor_entries(dma_row_idx);
+	obj_info.prev_monitor_entries =
+		__cam_dma_fence_get_prev_monitor_entries(dma_row_idx);
+	cam_generic_fence_dump_monitor_array(&obj_info);
+}
+
 int cam_dma_fence_get_put_ref(
 	bool get_or_put, int32_t dma_fence_row_idx)
 {
 	struct dma_fence *dma_fence;
 	struct cam_dma_fence_row *row;
+	int rc = 0;
 
 	if ((dma_fence_row_idx < 0) ||
 		(dma_fence_row_idx >= CAM_DMA_FENCE_MAX_FENCES)) {
@@ -192,8 +268,8 @@ int cam_dma_fence_get_put_ref(
 		CAM_ERR(CAM_DMA_FENCE,
 			"dma fence at idx: %d is in invalid state: %d",
 			dma_fence_row_idx, row->state);
-		spin_unlock_bh(&g_cam_dma_fence_dev->row_spinlocks[dma_fence_row_idx]);
-		return -EINVAL;
+		rc = -EINVAL;
+		goto monitor_dump;
 	}
 
 	dma_fence = row->fence;
@@ -209,7 +285,12 @@ int cam_dma_fence_get_put_ref(
 		kref_read(&dma_fence->refcount), (get_or_put ? "getref" : "putref"),
 		dma_fence->seqno);
 
-	return 0;
+	return rc;
+
+monitor_dump:
+	__cam_dma_fence_dump_monitor_array(dma_fence_row_idx);
+	spin_unlock_bh(&g_cam_dma_fence_dev->row_spinlocks[dma_fence_row_idx]);
+	return rc;
 }
 
 static struct dma_fence *cam_dma_fence_get_fence_from_sync_file(
@@ -292,11 +373,17 @@ int cam_dma_fence_register_cb(int32_t *sync_obj, int32_t *dma_fence_idx,
 	dma_fence = row->fence;
 
 	if (row->state != CAM_DMA_FENCE_STATE_ACTIVE) {
+		if (test_bit(CAM_GENERIC_FENCE_TYPE_DMA_FENCE,
+			&cam_sync_monitor_mask))
+			cam_generic_fence_update_monitor_array(dma_fence_row_idx,
+				&g_cam_dma_fence_dev->dev_lock,
+				g_cam_dma_fence_dev->monitor_data,
+				CAM_FENCE_OP_SKIP_REGISTER_CB);
 		CAM_ERR(CAM_DMA_FENCE,
 			"dma fence at idx: %d fd: %d seqno: %llu is not active, current state: %d",
 			dma_fence_row_idx, row->fd, dma_fence->seqno, row->state);
 		rc = -EINVAL;
-		goto end;
+		goto monitor_dump;
 	}
 
 	/**
@@ -305,6 +392,12 @@ int cam_dma_fence_register_cb(int32_t *sync_obj, int32_t *dma_fence_idx,
 	 * possible that same fd is returned to a new fence.
 	 */
 	if (row->cb_registered_for_sync) {
+		if (test_bit(CAM_GENERIC_FENCE_TYPE_DMA_FENCE,
+			&cam_sync_monitor_mask))
+			cam_generic_fence_update_monitor_array(dma_fence_row_idx,
+				&g_cam_dma_fence_dev->dev_lock, g_cam_dma_fence_dev->monitor_data,
+				CAM_FENCE_OP_ALREADY_REGISTERED_CB);
+
 		CAM_WARN(CAM_DMA_FENCE,
 			"dma fence at idx: %d fd: %d seqno: %llu has already registered a cb for sync: %d - same fd for 2 fences?",
 			dma_fence_row_idx, row->fd, dma_fence->seqno, row->sync_obj);
@@ -317,9 +410,14 @@ int cam_dma_fence_register_cb(int32_t *sync_obj, int32_t *dma_fence_idx,
 		CAM_ERR(CAM_DMA_FENCE,
 			"Failed to register cb for dma fence fd: %d seqno: %llu rc: %d",
 			row->fd, dma_fence->seqno, rc);
-		goto end;
+		goto monitor_dump;
 	}
 
+	if (test_bit(CAM_GENERIC_FENCE_TYPE_DMA_FENCE, &cam_sync_monitor_mask))
+		cam_generic_fence_update_monitor_array(dma_fence_row_idx,
+			&g_cam_dma_fence_dev->dev_lock, g_cam_dma_fence_dev->monitor_data,
+			CAM_FENCE_OP_REGISTER_CB);
+
 	row->cb_registered_for_sync = true;
 	row->sync_obj = *sync_obj;
 	row->sync_cb = sync_cb;
@@ -327,6 +425,11 @@ int cam_dma_fence_register_cb(int32_t *sync_obj, int32_t *dma_fence_idx,
 	CAM_DBG(CAM_DMA_FENCE,
 		"CB successfully registered for dma fence fd: %d seqno: %llu for sync_obj: %d",
 		row->fd, dma_fence->seqno, *sync_obj);
+	spin_unlock_bh(&g_cam_dma_fence_dev->row_spinlocks[dma_fence_row_idx]);
+	return rc;
+
+monitor_dump:
+	__cam_dma_fence_dump_monitor_array(dma_fence_row_idx);
 
 end:
 	spin_unlock_bh(&g_cam_dma_fence_dev->row_spinlocks[dma_fence_row_idx]);
@@ -357,7 +460,7 @@ int cam_dma_fence_internal_signal(
 	int32_t dma_fence_row_idx,
 	struct cam_dma_fence_signal *signal_dma_fence)
 {
-	int rc = 0;
+	int rc;
 	struct dma_fence *dma_fence = NULL;
 	struct cam_dma_fence_row *row = NULL;
 
@@ -376,10 +479,10 @@ int cam_dma_fence_internal_signal(
 	dma_fence = row->fence;
 
 	if (IS_ERR_OR_NULL(dma_fence)) {
-		spin_unlock_bh(&g_cam_dma_fence_dev->row_spinlocks[dma_fence_row_idx]);
 		CAM_ERR(CAM_DMA_FENCE, "DMA fence in row: %d is invalid",
 			dma_fence_row_idx);
-		return -EINVAL;
+		rc = -EINVAL;
+		goto monitor_dump;
 	}
 
 	if (row->state == CAM_DMA_FENCE_STATE_SIGNALED) {
@@ -390,6 +493,11 @@ int cam_dma_fence_internal_signal(
 		return 0;
 	}
 
+	if (test_bit(CAM_GENERIC_FENCE_TYPE_DMA_FENCE, &cam_sync_monitor_mask))
+		cam_generic_fence_update_monitor_array(dma_fence_row_idx,
+			&g_cam_dma_fence_dev->dev_lock, g_cam_dma_fence_dev->monitor_data,
+			CAM_FENCE_OP_SIGNAL);
+
 	rc = __cam_dma_fence_signal_fence(dma_fence, signal_dma_fence->status);
 	if (rc)
 		CAM_WARN(CAM_DMA_FENCE,
@@ -405,11 +513,16 @@ int cam_dma_fence_internal_signal(
 		signal_dma_fence->status, rc);
 
 	return rc;
+
+monitor_dump:
+	__cam_dma_fence_dump_monitor_array(dma_fence_row_idx);
+	spin_unlock_bh(&g_cam_dma_fence_dev->row_spinlocks[dma_fence_row_idx]);
+	return rc;
 }
 
 int cam_dma_fence_signal_fd(struct cam_dma_fence_signal *signal_dma_fence)
 {
-	int rc = 0;
+	int rc;
 	uint32_t idx;
 	struct dma_fence *dma_fence = NULL;
 	struct cam_dma_fence_row *row = NULL;
@@ -430,11 +543,11 @@ int cam_dma_fence_signal_fd(struct cam_dma_fence_signal *signal_dma_fence)
 	 * between signal and release
 	 */
 	if (row->state == CAM_DMA_FENCE_STATE_INVALID) {
-		spin_unlock_bh(&g_cam_dma_fence_dev->row_spinlocks[idx]);
 		CAM_ERR(CAM_DMA_FENCE,
 			"dma fence fd: %d is invalid row_idx: %u, failed to signal",
 			signal_dma_fence->dma_fence_fd, idx);
-		return -EINVAL;
+		rc = -EINVAL;
+		goto monitor_dump;
 	}
 
 	if (row->state == CAM_DMA_FENCE_STATE_SIGNALED) {
@@ -445,6 +558,11 @@ int cam_dma_fence_signal_fd(struct cam_dma_fence_signal *signal_dma_fence)
 		return 0;
 	}
 
+	if (test_bit(CAM_GENERIC_FENCE_TYPE_DMA_FENCE, &cam_sync_monitor_mask))
+		cam_generic_fence_update_monitor_array(idx,
+			&g_cam_dma_fence_dev->dev_lock, g_cam_dma_fence_dev->monitor_data,
+			CAM_FENCE_OP_SIGNAL);
+
 	rc = __cam_dma_fence_signal_fence(dma_fence, signal_dma_fence->status);
 	if (rc)
 		CAM_WARN(CAM_DMA_FENCE,
@@ -460,6 +578,11 @@ int cam_dma_fence_signal_fd(struct cam_dma_fence_signal *signal_dma_fence)
 		signal_dma_fence->status, rc);
 
 	return rc;
+
+monitor_dump:
+	__cam_dma_fence_dump_monitor_array(idx);
+	spin_unlock_bh(&g_cam_dma_fence_dev->row_spinlocks[idx]);
+	return rc;
 }
 
 static int __cam_dma_fence_get_fd(int32_t *row_idx,
@@ -542,26 +665,53 @@ end:
 	return rc;
 }
 
+void __cam_dma_fence_save_previous_monitor_data(int dma_row_idx)
+{
+	struct cam_generic_fence_monitor_data *row_mon_data;
+	struct cam_dma_fence_row *row;
+
+	if (!g_cam_dma_fence_dev->monitor_data)
+		return;
+
+	row = &g_cam_dma_fence_dev->rows[dma_row_idx];
+	row_mon_data = CAM_GENERIC_MONITOR_GET_DATA(
+	g_cam_dma_fence_dev->monitor_data, dma_row_idx);
+
+	/* save current usage details into prev variables */
+	strscpy(row_mon_data->prev_name, row->name, CAM_DMA_FENCE_NAME_LEN);
+	row_mon_data->prev_obj_id          = row->fd;
+	row_mon_data->prev_sync_id         = row->sync_obj;
+	row_mon_data->prev_state           = row->state;
+	row_mon_data->swap_monitor_entries = !row_mon_data->swap_monitor_entries;
+	row_mon_data->prev_monitor_head    = atomic64_read(&row_mon_data->monitor_head);
+}
+
 static int __cam_dma_fence_release(int32_t dma_row_idx)
 {
 	struct dma_fence *dma_fence = NULL;
 	struct cam_dma_fence_row *row = NULL;
+	int rc;
 
 	spin_lock_bh(&g_cam_dma_fence_dev->row_spinlocks[dma_row_idx]);
 	row = &g_cam_dma_fence_dev->rows[dma_row_idx];
 	dma_fence = row->fence;
 
 	if (row->state == CAM_DMA_FENCE_STATE_INVALID) {
-		spin_unlock_bh(&g_cam_dma_fence_dev->row_spinlocks[dma_row_idx]);
 		CAM_ERR(CAM_DMA_FENCE, "Invalid row index: %u, state: %u",
 			dma_row_idx, row->state);
-		return -EINVAL;
+		rc = -EINVAL;
+		goto monitor_dump;
 	}
 
 	if (row->state == CAM_DMA_FENCE_STATE_ACTIVE) {
 		CAM_WARN(CAM_DMA_FENCE,
 			"Unsignaled fence being released name: %s seqno: %llu fd:%d",
 			row->name, dma_fence->seqno, row->fd);
+		if (test_bit(CAM_GENERIC_FENCE_TYPE_DMA_FENCE,
+			&cam_sync_monitor_mask))
+			cam_generic_fence_update_monitor_array(dma_row_idx,
+				&g_cam_dma_fence_dev->dev_lock, g_cam_dma_fence_dev->monitor_data,
+				CAM_FENCE_OP_SIGNAL);
 		__cam_dma_fence_signal_fence(dma_fence, -ECANCELED);
 	}
 
@@ -569,6 +719,16 @@ static int __cam_dma_fence_release(int32_t dma_row_idx)
 		"Releasing dma fence with fd: %d[%s] row_idx: %u current ref_cnt: %u",
 		row->fd, row->name, dma_row_idx, kref_read(&dma_fence->refcount));
 
+	if (test_bit(CAM_GENERIC_FENCE_TYPE_DMA_FENCE, &cam_sync_monitor_mask)) {
+		/* Update monitor entries & save data before row memset to 0 */
+		cam_generic_fence_update_monitor_array(dma_row_idx,
+			&g_cam_dma_fence_dev->dev_lock, g_cam_dma_fence_dev->monitor_data,
+			CAM_FENCE_OP_DESTROY);
+		if (test_bit(CAM_GENERIC_FENCE_TYPE_DMA_FENCE_DUMP, &cam_sync_monitor_mask))
+			__cam_dma_fence_dump_monitor_array(dma_row_idx);
+		__cam_dma_fence_save_previous_monitor_data(dma_row_idx);
+	}
+
 	/* putref on dma fence */
 	dma_fence_put(dma_fence);
 
@@ -576,7 +736,13 @@ static int __cam_dma_fence_release(int32_t dma_row_idx)
 	memset(row, 0, sizeof(struct cam_dma_fence_row));
 	clear_bit(dma_row_idx, g_cam_dma_fence_dev->bitmap);
 	spin_unlock_bh(&g_cam_dma_fence_dev->row_spinlocks[dma_row_idx]);
+
 	return 0;
+
+monitor_dump:
+	__cam_dma_fence_dump_monitor_array(dma_row_idx);
+	spin_unlock_bh(&g_cam_dma_fence_dev->row_spinlocks[dma_row_idx]);
+	return rc;
 }
 
 static int __cam_dma_fence_release_fd(int fd)
@@ -632,13 +798,27 @@ void cam_dma_fence_close(void)
 				kref_read(&row->fence->refcount));
 
 			/* If registered for cb, remove cb */
-			if (row->cb_registered_for_sync)
+			if (row->cb_registered_for_sync) {
+				if (test_bit(CAM_GENERIC_FENCE_TYPE_DMA_FENCE,
+					&cam_sync_monitor_mask))
+					cam_generic_fence_update_monitor_array(i,
+						&g_cam_dma_fence_dev->dev_lock,
+						g_cam_dma_fence_dev->monitor_data,
+						CAM_FENCE_OP_UNREGISTER_CB);
 				dma_fence_remove_callback(row->fence, &row->fence_cb);
+			}
 
 			/* Signal and put if the dma fence is created from camera */
 			if (!row->ext_dma_fence) {
-				if (row->state != CAM_DMA_FENCE_STATE_SIGNALED)
+				if (row->state != CAM_DMA_FENCE_STATE_SIGNALED) {
+					if (test_bit(CAM_GENERIC_FENCE_TYPE_DMA_FENCE,
+						&cam_sync_monitor_mask))
+						cam_generic_fence_update_monitor_array(i,
+							&g_cam_dma_fence_dev->dev_lock,
+							g_cam_dma_fence_dev->monitor_data,
+							CAM_FENCE_OP_SIGNAL);
 					__cam_dma_fence_signal_fence(row->fence, -EADV);
+				}
 				dma_fence_put(row->fence);
 			}
 
@@ -648,6 +828,12 @@ void cam_dma_fence_close(void)
 		spin_unlock_bh(&g_cam_dma_fence_dev->row_spinlocks[i]);
 	}
 
+	if (g_cam_dma_fence_dev->monitor_data) {
+		for (i = 0; i < CAM_DMA_FENCE_TABLE_SZ; i++)
+			kfree(g_cam_dma_fence_dev->monitor_data[i]);
+	}
+	kfree(g_cam_dma_fence_dev->monitor_data);
+
 	mutex_unlock(&g_cam_dma_fence_dev->dev_lock);
 	CAM_DBG(CAM_DMA_FENCE, "Close on Camera DMA fence driver");
 }
@@ -656,6 +842,17 @@ void cam_dma_fence_open(void)
 {
 	mutex_lock(&g_cam_dma_fence_dev->dev_lock);
 
+	if (test_bit(CAM_GENERIC_FENCE_TYPE_DMA_FENCE, &cam_sync_monitor_mask)) {
+		g_cam_dma_fence_dev->monitor_data = kzalloc(
+			sizeof(struct cam_generic_fence_monitor_data *) *
+			CAM_DMA_FENCE_TABLE_SZ, GFP_KERNEL);
+		if (!g_cam_dma_fence_dev->monitor_data) {
+			CAM_WARN(CAM_DMA_FENCE, "Failed to allocate memory %d",
+				sizeof(struct cam_generic_fence_monitor_data *) *
+				CAM_DMA_FENCE_TABLE_SZ);
+		}
+	}
+
 	/* DMA fence seqno reset */
 	atomic64_set(&g_cam_dma_fence_seq_no, 0);
 	mutex_unlock(&g_cam_dma_fence_dev->dev_lock);

+ 2 - 1
drivers/cam_sync/cam_sync_dma_fence.h

@@ -1,6 +1,6 @@
 /* SPDX-License-Identifier: GPL-2.0-only
  *
- * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved.
  */
 #ifndef __CAM_SYNC_DMA_FENCE_H__
 #define __CAM_SYNC_DMA_FENCE_H__
@@ -16,6 +16,7 @@
 
 #define CAM_DMA_FENCE_MAX_FENCES  128
 #define CAM_DMA_FENCE_NAME_LEN    128
+#define CAM_DMA_FENCE_TABLE_SZ (CAM_DMA_FENCE_MAX_FENCES / CAM_GENERIC_MONITOR_TABLE_ENTRY_SZ)
 
 /* DMA fence state */
 enum cam_dma_fence_state {

+ 131 - 1
drivers/cam_sync/cam_sync_private.h

@@ -1,7 +1,7 @@
 /* SPDX-License-Identifier: GPL-2.0-only */
 /*
  * Copyright (c) 2017-2020, The Linux Foundation. All rights reserved.
- * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved.
  */
 
 #ifndef __CAM_SYNC_PRIVATE_H__
@@ -48,6 +48,35 @@
 #define CAM_SYNC_TYPE_INDV              0
 #define CAM_SYNC_TYPE_GROUP             1
 
+/* Number of monitor table elements */
+#define CAM_SYNC_MONITOR_TABLE_SIZE     16
+/* Number of monitored objects per table entry */
+#define CAM_SYNC_MONITOR_TABLE_ENTRY_SZ (CAM_SYNC_MAX_OBJS / CAM_SYNC_MONITOR_TABLE_SIZE)
+#define CAM_SYNC_MONITOR_MAX_ENTRIES    30
+#define CAM_SYNC_INC_MONITOR_HEAD(head, ret) \
+	div_u64_rem(atomic64_add_return(1, head),\
+	CAM_SYNC_MONITOR_MAX_ENTRIES, (ret))
+#define CAM_SYNC_MONITOR_GET_DATA(idx)  \
+	(sync_dev->mon_data[idx / CAM_SYNC_MONITOR_TABLE_ENTRY_SZ] + \
+	(idx % CAM_SYNC_MONITOR_TABLE_ENTRY_SZ))
+#define CAM_GENERIC_MONITOR_TABLE_ENTRY_SZ CAM_SYNC_MONITOR_TABLE_ENTRY_SZ
+#define CAM_GENERIC_MONITOR_GET_DATA(mon_data, idx) \
+	((mon_data)[idx / CAM_GENERIC_MONITOR_TABLE_ENTRY_SZ] + \
+	(idx % CAM_GENERIC_MONITOR_TABLE_ENTRY_SZ))
+
+/**
+ * Feature is enabled by setting BIT(fence_type), this will trigger the fence
+ * dumps on any error, to explicitly trigger a dump on every fence release
+ * below BIT(fence_type_dump) needs to be used at the same time
+ */
+#define CAM_GENERIC_FENCE_DUMP         0x10
+#define CAM_GENERIC_FENCE_TYPE_SYNC_OBJ_DUMP \
+	(CAM_GENERIC_FENCE_TYPE_SYNC_OBJ + (CAM_GENERIC_FENCE_DUMP))
+#define CAM_GENERIC_FENCE_TYPE_DMA_FENCE_DUMP \
+	(CAM_GENERIC_FENCE_TYPE_DMA_FENCE + (CAM_GENERIC_FENCE_DUMP))
+#define CAM_GENERIC_FENCE_TYPE_SYNX_OBJ_DUMP \
+	(CAM_GENERIC_FENCE_TYPE_SYNX_OBJ + (CAM_GENERIC_FENCE_DUMP))
+
 /**
  * enum sync_type - Enum to indicate the type of sync object,
  * i.e. individual or group.
@@ -145,6 +174,105 @@ struct sync_dma_fence_info {
 	bool    sync_created_with_dma;
 };
 
+/**
+ * enum cam_fence_op - Enum to indicate the type of operation performed
+ *
+ * @CAM_FENCE_OP_CREATE                : Created obj
+ * @CAM_FENCE_OP_REGISTER_CB           : Successful callback registration
+ * @CAM_FENCE_OP_SKIP_REGISTER_CB      : Callback registration skipped
+ * @CAM_FENCE_OP_ALREADY_REGISTERED_CB : Callback already registered
+ * @CAM_FENCE_OP_SIGNAL                : Signaled obj
+ * @CAM_FENCE_OP_UNREGISTER_ON_SIGNAL  : Callback unregistered after signaling
+ * @CAM_FENCE_OP_UNREGISTER_CB         : Callback unregistered
+ * @CAM_FENCE_OP_DESTROY               : Destroyed obj
+ */
+enum cam_fence_op {
+	CAM_FENCE_OP_CREATE,
+	CAM_FENCE_OP_REGISTER_CB,
+	CAM_FENCE_OP_SKIP_REGISTER_CB,
+	CAM_FENCE_OP_ALREADY_REGISTERED_CB,
+	CAM_FENCE_OP_SIGNAL,
+	CAM_FENCE_OP_UNREGISTER_ON_SIGNAL,
+	CAM_FENCE_OP_UNREGISTER_CB,
+	CAM_FENCE_OP_DESTROY,
+};
+
+/**
+ * struct cam_generic_fence_monitor_entry - Single operation sync data
+ *
+ * @timestamp     : Timestamp of op
+ * @op            : Operation id
+ */
+struct cam_generic_fence_monitor_entry {
+	struct timespec64 timestamp;
+	enum cam_fence_op op;
+};
+
+/**
+ * struct cam_generic_fence_monitor_data - All operations data from current &
+ *                          previous use of a fence object
+ *
+ * @monitor_head         : Executed operations count
+ * @prev_name            : Previous name of this fence obj
+ * @prev_type            : Previous type of this fence obj
+ * @prev_obj_id          : Previous handle of this fence obj
+ * @prev_sync_id         : Previous handle of this fence's associated sync obj
+ * @prev_remaining       : Previous count of remaining children that not been
+ *                         signaled
+ * @prev_state           : Previous state (INVALID, ACTIVE, SIGNALED_SUCCESS or
+ *                         SIGNALED_ERROR)
+ * @prev_monitor_head    : Previous executed ops count
+ * @swap_monitor_entries : Flag indicating which entry table should be used
+ *                         as current/previous. Used to avoid copying.
+ * @monitor_entries      : Op info entry table
+ * @prev_monitor_entries : Previous op info entry table
+ */
+struct cam_generic_fence_monitor_data {
+	atomic64_t                        monitor_head;
+	char                              prev_name[CAM_DMA_FENCE_NAME_LEN];
+	enum sync_type                    prev_type;
+	int32_t                           prev_obj_id;
+	int32_t                           prev_sync_id;
+	uint32_t                          prev_remaining;
+	uint32_t                          prev_state;
+	uint64_t                          prev_monitor_head;
+	bool                              swap_monitor_entries;
+	struct cam_generic_fence_monitor_entry monitor_entries[
+		CAM_SYNC_MONITOR_MAX_ENTRIES];
+	struct cam_generic_fence_monitor_entry prev_monitor_entries[
+		CAM_SYNC_MONITOR_MAX_ENTRIES];
+};
+
+/**
+ * struct cam_generic_fence_monitor_obj_info - Single object monitor info
+ *
+ * @name                 : Name of this fence obj
+ * @sync_type            : Type of this fence obj
+ * @obj_id               : Handle of this fence obj
+ * @sync_id              : Handle of this fence's associated sync obj
+ * @state                : Previous state (INVALID, ACTIVE, SIGNALED_SUCCESS or
+ *                         SIGNALED_ERROR)
+ * @remaining            : Count of remaining children that not been signaled
+ * @ref_cnt              : Ref count of the number of usage of the fence.
+ * @fence_type           : Fence type - DMA/Sync/Synx
+ * @monitor_data         : Fence operations data
+ * @monitor_entries      : Op info entry table
+ * @prev_monitor_entries : Previous op info entry table
+ */
+struct cam_generic_fence_monitor_obj_info {
+	char *name;
+	enum sync_type sync_type;
+	int32_t obj_id;
+	int32_t sync_id;
+	uint32_t state;
+	uint32_t remaining;
+	uint32_t ref_cnt;
+	uint32_t fence_type;
+	struct cam_generic_fence_monitor_data *monitor_data;
+	struct cam_generic_fence_monitor_entry *monitor_entries;
+	struct cam_generic_fence_monitor_entry *prev_monitor_entries;
+};
+
 /**
  * struct sync_synx_obj_info - Synx object info associated with this sync obj
  *
@@ -225,6 +353,7 @@ struct cam_signalable_info {
  * @work_queue      : Work queue used for dispatching kernel callbacks
  * @cam_sync_eventq : Event queue used to dispatch user payloads to user space
  * @bitmap          : Bitmap representation of all sync objects
+ * @mon_data        : Objects monitor data
  * @params          : Parameters for synx call back registration
  * @version         : version support
  */
@@ -240,6 +369,7 @@ struct sync_device {
 	struct v4l2_fh *cam_sync_eventq;
 	spinlock_t cam_sync_eventq_lock;
 	DECLARE_BITMAP(bitmap, CAM_SYNC_MAX_OBJS);
+	struct cam_generic_fence_monitor_data **mon_data;
 #if IS_REACHABLE(CONFIG_MSM_GLOBAL_SYNX)
 	struct synx_register_params params;
 #endif

+ 204 - 21
drivers/cam_sync/cam_sync_synx.c

@@ -4,6 +4,9 @@
  */
 
 #include "cam_sync_synx.h"
+#include "cam_sync_util.h"
+
+extern unsigned long cam_sync_monitor_mask;
 
 /**
  * struct cam_synx_obj_row - Synx obj row
@@ -27,11 +30,39 @@ struct cam_synx_obj_device {
 	struct synx_session *session_handle;
 	struct mutex dev_lock;
 	DECLARE_BITMAP(bitmap, CAM_SYNX_MAX_OBJS);
+	struct cam_generic_fence_monitor_data **monitor_data;
 };
 
 static struct cam_synx_obj_device *g_cam_synx_obj_dev;
 static char cam_synx_session_name[64] = "Camera_Generic_Synx_Session";
 
+
+static inline struct cam_generic_fence_monitor_entry *
+	__cam_synx_obj_get_monitor_entries(int idx)
+{
+	struct cam_generic_fence_monitor_data *monitor_data;
+
+	monitor_data = CAM_GENERIC_MONITOR_GET_DATA(
+		g_cam_synx_obj_dev->monitor_data, idx);
+	if (monitor_data->swap_monitor_entries)
+		return monitor_data->prev_monitor_entries;
+	else
+		return monitor_data->monitor_entries;
+}
+
+static inline struct cam_generic_fence_monitor_entry *
+	__cam_synx_obj_get_prev_monitor_entries(int idx)
+{
+	struct cam_generic_fence_monitor_data *monitor_data;
+
+	monitor_data = CAM_GENERIC_MONITOR_GET_DATA(
+		g_cam_synx_obj_dev->monitor_data, idx);
+	if (monitor_data->swap_monitor_entries)
+		return monitor_data->monitor_entries;
+	else
+		return monitor_data->prev_monitor_entries;
+}
+
 static int __cam_synx_obj_map_sync_status_util(uint32_t sync_status,
 	uint32_t *out_synx_status)
 {
@@ -51,6 +82,56 @@ static int __cam_synx_obj_map_sync_status_util(uint32_t sync_status,
 	return 0;
 }
 
+static void __cam_synx_obj_save_previous_monitor_data(int32_t row_idx)
+{
+	struct cam_generic_fence_monitor_data *row_mon_data;
+	struct cam_synx_obj_row *row;
+
+	if (!g_cam_synx_obj_dev->monitor_data)
+		return;
+
+	row = &g_cam_synx_obj_dev->rows[row_idx];
+	row_mon_data = CAM_GENERIC_MONITOR_GET_DATA(
+		g_cam_synx_obj_dev->monitor_data, row_idx);
+
+	/* save current usage details into prev variables */
+	strscpy(row_mon_data->prev_name, row->name, CAM_SYNX_OBJ_NAME_LEN);
+	row_mon_data->prev_obj_id          = row->synx_obj;
+	row_mon_data->prev_sync_id         = row->sync_obj;
+	row_mon_data->prev_state           = row->state;
+	row_mon_data->prev_monitor_head    = atomic64_read(&row_mon_data->monitor_head);
+	row_mon_data->swap_monitor_entries = !row_mon_data->swap_monitor_entries;
+}
+
+static void __cam_synx_obj_dump_monitor_array(int32_t row_idx)
+{
+	struct cam_generic_fence_monitor_obj_info obj_info;
+	struct cam_synx_obj_row *row;
+
+	if (!g_cam_synx_obj_dev->monitor_data ||
+		!test_bit(CAM_GENERIC_FENCE_TYPE_SYNX_OBJ, &cam_sync_monitor_mask))
+		return;
+
+	if (!CAM_GENERIC_MONITOR_GET_DATA(g_cam_synx_obj_dev->monitor_data,
+		row_idx)->prev_obj_id)
+		return;
+
+	row = &g_cam_synx_obj_dev->rows[row_idx];
+
+	obj_info.name = row->name;
+	obj_info.obj_id = row->synx_obj;
+	obj_info.state = row->state;
+	obj_info.monitor_data = CAM_GENERIC_MONITOR_GET_DATA(
+		g_cam_synx_obj_dev->monitor_data, row_idx);
+	obj_info.fence_type = CAM_GENERIC_FENCE_TYPE_SYNX_OBJ;
+	obj_info.sync_id = row->sync_obj;
+	obj_info.monitor_entries =
+		__cam_synx_obj_get_monitor_entries(row_idx);
+	obj_info.prev_monitor_entries =
+		__cam_synx_obj_get_prev_monitor_entries(row_idx);
+	cam_generic_fence_dump_monitor_array(&obj_info);
+}
+
 static int __cam_synx_obj_release(int32_t row_idx)
 {
 	struct cam_synx_obj_row *row = NULL;
@@ -62,10 +143,26 @@ static int __cam_synx_obj_release(int32_t row_idx)
 		CAM_WARN(CAM_SYNX,
 			"Unsignaled synx obj being released name: %s synx_obj:%d",
 			row->name, row->synx_obj);
+		if (test_bit(CAM_GENERIC_FENCE_TYPE_SYNX_OBJ,
+			&cam_sync_monitor_mask))
+			cam_generic_fence_update_monitor_array(row_idx,
+				&g_cam_synx_obj_dev->dev_lock, g_cam_synx_obj_dev->monitor_data,
+				CAM_FENCE_OP_SIGNAL);
 		synx_signal(g_cam_synx_obj_dev->session_handle, row->synx_obj,
 			SYNX_STATE_SIGNALED_CANCEL);
 	}
 
+	if (test_bit(CAM_GENERIC_FENCE_TYPE_SYNX_OBJ, &cam_sync_monitor_mask)) {
+		/* Update monitor entries & save data before row memset to 0 */
+		cam_generic_fence_update_monitor_array(row_idx,
+			&g_cam_synx_obj_dev->dev_lock, g_cam_synx_obj_dev->monitor_data,
+			CAM_FENCE_OP_DESTROY);
+
+		if (test_bit(CAM_GENERIC_FENCE_TYPE_SYNX_OBJ_DUMP, &cam_sync_monitor_mask))
+			__cam_synx_obj_dump_monitor_array(row_idx);
+		__cam_synx_obj_save_previous_monitor_data(row_idx);
+	}
+
 	CAM_DBG(CAM_SYNX,
 		"Releasing synx_obj: %d[%s] row_idx: %u",
 		row->synx_obj, row->name, row_idx);
@@ -106,10 +203,14 @@ static void __cam_synx_obj_init_row(uint32_t idx, const char *name,
 
 	spin_lock_bh(&g_cam_synx_obj_dev->row_spinlocks[idx]);
 	row = &g_cam_synx_obj_dev->rows[idx];
-	memset(row, 0, sizeof(*row));
 	row->synx_obj = synx_obj;
 	row->state = CAM_SYNX_OBJ_STATE_ACTIVE;
 	strscpy(row->name, name, CAM_SYNX_OBJ_NAME_LEN);
+	if (test_bit(CAM_GENERIC_FENCE_TYPE_SYNX_OBJ, &cam_sync_monitor_mask)) {
+		cam_generic_fence_update_monitor_array(idx,
+			&g_cam_synx_obj_dev->dev_lock, g_cam_synx_obj_dev->monitor_data,
+			CAM_FENCE_OP_CREATE);
+	}
 	spin_unlock_bh(&g_cam_synx_obj_dev->row_spinlocks[idx]);
 }
 
@@ -128,6 +229,7 @@ static void __cam_synx_obj_signal_cb(u32 h_synx, int status, void *data)
 {
 	struct cam_synx_obj_signal_sync_obj signal_sync_obj;
 	struct cam_synx_obj_row *synx_obj_row = NULL;
+	int32_t idx;
 
 	if (!data) {
 		CAM_ERR(CAM_SYNX,
@@ -177,6 +279,13 @@ static void __cam_synx_obj_signal_cb(u32 h_synx, int status, void *data)
 		}
 		synx_obj_row->state = CAM_SYNX_OBJ_STATE_SIGNALED;
 		synx_obj_row->sync_cb(synx_obj_row->sync_obj, &signal_sync_obj);
+		if (test_bit(CAM_GENERIC_FENCE_TYPE_SYNX_OBJ,
+			&cam_sync_monitor_mask)) {
+			cam_synx_obj_find_obj_in_table(synx_obj_row->synx_obj, &idx);
+			cam_generic_fence_update_monitor_array(idx,
+				&g_cam_synx_obj_dev->dev_lock, g_cam_synx_obj_dev->monitor_data,
+				CAM_FENCE_OP_UNREGISTER_ON_SIGNAL);
+		}
 	}
 
 }
@@ -202,16 +311,14 @@ int cam_synx_obj_find_obj_in_table(uint32_t synx_obj, int32_t *idx)
 	return rc;
 }
 
-static int __cam_synx_obj_release_obj(uint32_t synx_obj)
+static int __cam_synx_obj_release_obj(uint32_t synx_obj, int32_t *idx)
 {
-	int32_t idx;
-
-	if (cam_synx_obj_find_obj_in_table(synx_obj, &idx)) {
+	if (cam_synx_obj_find_obj_in_table(synx_obj, idx)) {
 		CAM_ERR(CAM_SYNX, "Failed to find synx obj: %d", synx_obj);
 		return -EINVAL;
 	}
 
-	return __cam_synx_obj_release(idx);
+	return __cam_synx_obj_release(*idx);
 }
 
 static int __cam_synx_obj_import(const char *name,
@@ -348,7 +455,7 @@ int cam_synx_obj_import_dma_fence(const char *name, uint32_t flags, void *fence,
 int cam_synx_obj_internal_signal(int32_t row_idx,
 	struct cam_synx_obj_signal *signal_synx_obj)
 {
-	int rc = 0;
+	int rc;
 	uint32_t signal_status, synx_obj = 0;
 	struct cam_synx_obj_row *row = NULL;
 
@@ -381,8 +488,10 @@ int cam_synx_obj_internal_signal(int32_t row_idx,
 			signal_synx_obj->synx_obj);
 	}
 
-	row->state = CAM_SYNX_OBJ_STATE_SIGNALED;
-	spin_unlock_bh(&g_cam_synx_obj_dev->row_spinlocks[row_idx]);
+	if (test_bit(CAM_GENERIC_FENCE_TYPE_SYNX_OBJ, &cam_sync_monitor_mask))
+		cam_generic_fence_update_monitor_array(row_idx,
+			&g_cam_synx_obj_dev->dev_lock, g_cam_synx_obj_dev->monitor_data,
+			CAM_FENCE_OP_SIGNAL);
 
 	rc = synx_signal(g_cam_synx_obj_dev->session_handle,
 		signal_synx_obj->synx_obj, signal_status);
@@ -396,20 +505,30 @@ int cam_synx_obj_internal_signal(int32_t row_idx,
 		signal_synx_obj->synx_obj, signal_status, rc);
 
 end:
+	spin_unlock_bh(&g_cam_synx_obj_dev->row_spinlocks[row_idx]);
 	return rc;
 }
 
 int cam_synx_obj_release(struct cam_synx_obj_release_params *release_params)
 {
-	if (release_params->use_row_idx)
-		return __cam_synx_obj_release_row(release_params->u.synx_row_idx);
-	else
-		return __cam_synx_obj_release_obj(release_params->u.synx_obj);
+	int rc;
+	int32_t idx = -1;
+
+	if (release_params->use_row_idx) {
+		rc = __cam_synx_obj_release_row(release_params->u.synx_row_idx);
+		if (rc < 0)
+			__cam_synx_obj_dump_monitor_array(release_params->u.synx_row_idx);
+	} else {
+		rc = __cam_synx_obj_release_obj(release_params->u.synx_obj, &idx);
+		if ((rc < 0) && (idx >= 0))
+			__cam_synx_obj_dump_monitor_array(idx);
+	}
+	return rc;
 }
 
 int cam_synx_obj_signal_obj(struct cam_synx_obj_signal *signal_synx_obj)
 {
-	int rc = 0;
+	int rc;
 	uint32_t idx, signal_status = 0, synx_obj = 0;
 	struct cam_synx_obj_row *row = NULL;
 
@@ -443,6 +562,11 @@ int cam_synx_obj_signal_obj(struct cam_synx_obj_signal *signal_synx_obj)
 	row->state = CAM_SYNX_OBJ_STATE_SIGNALED;
 	spin_unlock_bh(&g_cam_synx_obj_dev->row_spinlocks[idx]);
 
+	if (test_bit(CAM_GENERIC_FENCE_TYPE_SYNX_OBJ, &cam_sync_monitor_mask))
+		cam_generic_fence_update_monitor_array(idx,
+			&g_cam_synx_obj_dev->dev_lock, g_cam_synx_obj_dev->monitor_data,
+			CAM_FENCE_OP_SIGNAL);
+
 	rc = synx_signal(g_cam_synx_obj_dev->session_handle,
 		signal_synx_obj->synx_obj, signal_status);
 	if (rc) {
@@ -483,22 +607,30 @@ int cam_synx_obj_register_cb(int32_t *sync_obj, int32_t row_idx,
 	synx_obj = row->synx_obj;
 
 	if (row->state != CAM_SYNX_OBJ_STATE_ACTIVE) {
+		if (test_bit(CAM_GENERIC_FENCE_TYPE_SYNX_OBJ,
+			&cam_sync_monitor_mask))
+			cam_generic_fence_update_monitor_array(row_idx,
+				&g_cam_synx_obj_dev->dev_lock, g_cam_synx_obj_dev->monitor_data,
+				CAM_FENCE_OP_SKIP_REGISTER_CB);
 		CAM_ERR(CAM_SYNX,
 			"synx obj at idx: %d handle: %d is not active, current state: %d",
 			row_idx, row->synx_obj, row->state);
 		rc = -EINVAL;
-		spin_unlock_bh(&g_cam_synx_obj_dev->row_spinlocks[row_idx]);
-		goto end;
+		goto monitor_dump;
 	}
 
 	/**
 	 * If the cb is already registered, return
 	 */
 	if (row->cb_registered_for_sync) {
+		if (test_bit(CAM_GENERIC_FENCE_TYPE_SYNX_OBJ,
+			&cam_sync_monitor_mask))
+			cam_generic_fence_update_monitor_array(row_idx,
+				&g_cam_synx_obj_dev->dev_lock, g_cam_synx_obj_dev->monitor_data,
+				CAM_FENCE_OP_ALREADY_REGISTERED_CB);
 		CAM_WARN(CAM_SYNX,
 			"synx obj at idx: %d handle: %d has already registered a cb for sync: %d",
 			row_idx, row->synx_obj, row->sync_obj);
-		spin_unlock_bh(&g_cam_synx_obj_dev->row_spinlocks[row_idx]);
 		goto end;
 	}
 
@@ -510,21 +642,30 @@ int cam_synx_obj_register_cb(int32_t *sync_obj, int32_t row_idx,
 	cb_params.cancel_cb_func = NULL;
 	cb_params.h_synx = synx_obj;
 	cb_params.cb_func = __cam_synx_obj_signal_cb;
-	spin_unlock_bh(&g_cam_synx_obj_dev->row_spinlocks[row_idx]);
+
+	if (test_bit(CAM_GENERIC_FENCE_TYPE_SYNX_OBJ, &cam_sync_monitor_mask))
+		cam_generic_fence_update_monitor_array(row_idx,
+			&g_cam_synx_obj_dev->dev_lock, g_cam_synx_obj_dev->monitor_data,
+			CAM_FENCE_OP_REGISTER_CB);
 
 	rc = synx_async_wait(g_cam_synx_obj_dev->session_handle, &cb_params);
 	if (rc) {
 		CAM_ERR(CAM_SYNX,
 			"Failed to register cb for synx obj: %d rc: %d",
 			synx_obj, rc);
-		goto end;
+		goto monitor_dump;
 	}
 
 	CAM_DBG(CAM_SYNX,
 		"CB successfully registered for synx obj: %d for sync_obj: %d",
 		synx_obj, *sync_obj);
+	spin_unlock_bh(&g_cam_synx_obj_dev->row_spinlocks[row_idx]);
+	return rc;
 
+monitor_dump:
+	__cam_synx_obj_dump_monitor_array(row_idx);
 end:
+	spin_unlock_bh(&g_cam_synx_obj_dev->row_spinlocks[row_idx]);
 	return rc;
 }
 
@@ -550,9 +691,25 @@ int __cam_synx_init_session(void)
 	return 0;
 }
 
+void cam_synx_obj_open(void)
+{
+	mutex_lock(&g_cam_synx_obj_dev->dev_lock);
+	if (test_bit(CAM_GENERIC_FENCE_TYPE_SYNX_OBJ, &cam_sync_monitor_mask)) {
+		g_cam_synx_obj_dev->monitor_data = kzalloc(
+			sizeof(struct cam_generic_fence_monitor_data *) *
+			CAM_SYNX_TABLE_SZ, GFP_KERNEL);
+		if (!g_cam_synx_obj_dev->monitor_data) {
+			CAM_WARN(CAM_DMA_FENCE, "Failed to allocate memory %d",
+				sizeof(struct cam_generic_fence_monitor_data *) *
+				CAM_SYNX_TABLE_SZ);
+		}
+	}
+	mutex_unlock(&g_cam_synx_obj_dev->dev_lock);
+}
+
 void cam_synx_obj_close(void)
 {
-	int i, rc = 0;
+	int i, rc;
 	struct cam_synx_obj_row *row = NULL;
 	struct synx_callback_params cb_params;
 
@@ -573,6 +730,12 @@ void cam_synx_obj_close(void)
 			cb_params.h_synx = row->synx_obj;
 			cb_params.cb_func = __cam_synx_obj_signal_cb;
 
+			if (test_bit(CAM_GENERIC_FENCE_TYPE_SYNX_OBJ,
+				&cam_sync_monitor_mask))
+				cam_generic_fence_update_monitor_array(i,
+					&g_cam_synx_obj_dev->dev_lock,
+					g_cam_synx_obj_dev->monitor_data,
+					CAM_FENCE_OP_UNREGISTER_CB);
 			rc = synx_cancel_async_wait(
 				g_cam_synx_obj_dev->session_handle,
 				&cb_params);
@@ -584,9 +747,23 @@ void cam_synx_obj_close(void)
 		}
 
 		/* Signal and release the synx obj */
-		if (row->state != CAM_SYNX_OBJ_STATE_SIGNALED)
+		if (row->state != CAM_SYNX_OBJ_STATE_SIGNALED) {
+			if (test_bit(CAM_GENERIC_FENCE_TYPE_SYNX_OBJ,
+				&cam_sync_monitor_mask))
+				cam_generic_fence_update_monitor_array(i,
+					&g_cam_synx_obj_dev->dev_lock,
+					g_cam_synx_obj_dev->monitor_data,
+					CAM_FENCE_OP_SIGNAL);
 			synx_signal(g_cam_synx_obj_dev->session_handle,
 				row->synx_obj, SYNX_STATE_SIGNALED_CANCEL);
+		}
+
+		if (test_bit(CAM_GENERIC_FENCE_TYPE_SYNX_OBJ,
+			&cam_sync_monitor_mask))
+			cam_generic_fence_update_monitor_array(i,
+				&g_cam_synx_obj_dev->dev_lock,
+				g_cam_synx_obj_dev->monitor_data,
+				CAM_FENCE_OP_DESTROY);
 		synx_release(g_cam_synx_obj_dev->session_handle,
 			row->synx_obj);
 
@@ -594,6 +771,12 @@ void cam_synx_obj_close(void)
 		clear_bit(i, g_cam_synx_obj_dev->bitmap);
 	}
 
+	if (g_cam_synx_obj_dev->monitor_data) {
+		for (i = 0; i < CAM_SYNX_TABLE_SZ; i++)
+			kfree(g_cam_synx_obj_dev->monitor_data[i]);
+	}
+	kfree(g_cam_synx_obj_dev->monitor_data);
+
 	mutex_unlock(&g_cam_synx_obj_dev->dev_lock);
 	CAM_DBG(CAM_SYNX, "Close on Camera SYNX driver");
 }

+ 8 - 1
drivers/cam_sync/cam_sync_synx.h

@@ -1,6 +1,6 @@
 /* SPDX-License-Identifier: GPL-2.0-only
  *
- * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved.
  */
 #ifndef __CAM_SYNC_SYNX_H__
 #define __CAM_SYNC_SYNX_H__
@@ -15,6 +15,7 @@
 
 #define CAM_SYNX_MAX_OBJS 256
 #define CAM_SYNX_OBJ_NAME_LEN 64
+#define CAM_SYNX_TABLE_SZ (CAM_SYNX_MAX_OBJS / CAM_GENERIC_MONITOR_TABLE_ENTRY_SZ)
 
 /* Synx obj state */
 enum cam_synx_obj_state {
@@ -136,6 +137,12 @@ int cam_synx_obj_signal_obj(struct cam_synx_obj_signal *signal_synx_obj);
 int cam_synx_obj_register_cb(int32_t *sync_obj, int32_t row_idx,
 	cam_sync_callback_for_synx_obj sync_cb);
 
+/**
+ * @brief: cam synx driver open
+ *
+ */
+void cam_synx_obj_open(void);
+
 /**
  * @brief: cam synx driver close
  *

+ 319 - 6
drivers/cam_sync/cam_sync_util.c

@@ -1,13 +1,303 @@
 // SPDX-License-Identifier: GPL-2.0-only
 /*
  * Copyright (c) 2017-2018, 2020-2021 The Linux Foundation. All rights reserved.
- * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved.
  */
 
 #include "cam_sync_util.h"
 #include "cam_req_mgr_workq.h"
 #include "cam_common_util.h"
 
+extern unsigned long cam_sync_monitor_mask;
+
+static int cam_generic_expand_monitor_table(int idx, struct mutex *lock,
+	struct cam_generic_fence_monitor_data **mon_data)
+{
+	struct cam_generic_fence_monitor_data *row_mon_data;
+
+	mutex_lock(lock);
+	row_mon_data = mon_data[(idx / CAM_GENERIC_MONITOR_TABLE_ENTRY_SZ)];
+	if (!row_mon_data) {
+		row_mon_data = kzalloc(
+			sizeof(struct cam_generic_fence_monitor_data) *
+			CAM_GENERIC_MONITOR_TABLE_ENTRY_SZ, GFP_KERNEL);
+		mon_data[(idx / CAM_GENERIC_MONITOR_TABLE_ENTRY_SZ)] = row_mon_data;
+	}
+	if (!row_mon_data) {
+		CAM_ERR(CAM_SYNC, "Error allocating memory %d, idx %d",
+			sizeof(struct cam_generic_fence_monitor_data) *
+			CAM_GENERIC_MONITOR_TABLE_ENTRY_SZ, idx);
+		mutex_unlock(lock);
+		return -ENOMEM;
+	}
+
+	mutex_unlock(lock);
+
+	return 0;
+}
+
+static inline struct cam_generic_fence_monitor_entry *__cam_sync_get_monitor_entries(int idx)
+{
+	struct cam_generic_fence_monitor_data *mon_data;
+
+	mon_data = CAM_SYNC_MONITOR_GET_DATA(idx);
+	if (mon_data->swap_monitor_entries)
+		return mon_data->prev_monitor_entries;
+	else
+		return mon_data->monitor_entries;
+}
+
+static inline struct cam_generic_fence_monitor_entry *__cam_sync_get_prev_monitor_entries(int idx)
+{
+	struct cam_generic_fence_monitor_data *mon_data;
+
+	mon_data = CAM_SYNC_MONITOR_GET_DATA(idx);
+	if (mon_data->swap_monitor_entries)
+		return mon_data->monitor_entries;
+	else
+		return mon_data->prev_monitor_entries;
+}
+
+const char *cam_fence_op_to_string(
+	enum cam_fence_op op)
+{
+	switch (op) {
+	case CAM_FENCE_OP_CREATE:
+		return "CREATE";
+	case CAM_FENCE_OP_REGISTER_CB:
+		return "REGISTER_CB";
+	case CAM_FENCE_OP_SIGNAL:
+		return "SIGNAL";
+	case CAM_FENCE_OP_UNREGISTER_ON_SIGNAL:
+		return "UNREGISTER_ON_SIGNAL";
+	case CAM_FENCE_OP_UNREGISTER_CB:
+		return "UNREGISTER_CB";
+	case CAM_FENCE_OP_SKIP_REGISTER_CB:
+		return "SKIP_REGISTER_CB";
+	case CAM_FENCE_OP_ALREADY_REGISTERED_CB:
+		return "ALREADY_REGISTERED_CB";
+	case CAM_FENCE_OP_DESTROY:
+		return "DESTROY";
+	default:
+		return "INVALID";
+	}
+}
+
+static void __cam_sync_save_previous_monitor_data(
+	struct sync_table_row *row)
+{
+	struct cam_generic_fence_monitor_data *row_mon_data;
+
+	if (!sync_dev->mon_data)
+		return;
+
+	row_mon_data = CAM_SYNC_MONITOR_GET_DATA(row->sync_id);
+
+	/* save current usage details into prev variables */
+	strscpy(row_mon_data->prev_name, row->name, SYNC_DEBUG_NAME_LEN);
+	row_mon_data->prev_type         = row->type;
+	row_mon_data->prev_obj_id       = row->sync_id;
+	row_mon_data->prev_state        = row->state;
+	row_mon_data->prev_remaining    = row->remaining;
+	row_mon_data->prev_monitor_head = atomic64_read(&row_mon_data->monitor_head);
+
+	/* Toggle swap flag. Avoid copying and just read/write using correct table idx */
+	row_mon_data->swap_monitor_entries = !row_mon_data->swap_monitor_entries;
+}
+
+void cam_generic_fence_update_monitor_array(int idx,
+	struct mutex *lock,
+	struct cam_generic_fence_monitor_data **mon_data,
+	enum cam_fence_op op)
+{
+	int iterator, rc;
+	struct cam_generic_fence_monitor_data *row_mon_data;
+	struct cam_generic_fence_monitor_entry *row_mon_entries;
+
+	/* Validate inputs */
+	if (!lock || !mon_data)
+		return;
+
+	row_mon_data = mon_data[(idx / CAM_GENERIC_MONITOR_TABLE_ENTRY_SZ)];
+	if (!row_mon_data) {
+		rc = cam_generic_expand_monitor_table(idx, lock, mon_data);
+		if (rc) {
+			CAM_ERR(CAM_SYNC, "Failed to expand monitor table");
+			return;
+		}
+	}
+
+	row_mon_data = CAM_GENERIC_MONITOR_GET_DATA(mon_data, idx);
+	if (op == CAM_FENCE_OP_CREATE)
+		atomic64_set(&row_mon_data->monitor_head, -1);
+	if (row_mon_data->swap_monitor_entries)
+		row_mon_entries = row_mon_data->monitor_entries;
+	else
+		row_mon_entries = row_mon_data->prev_monitor_entries;
+
+	CAM_SYNC_INC_MONITOR_HEAD(&row_mon_data->monitor_head, &iterator);
+	CAM_GET_TIMESTAMP(row_mon_entries[iterator].timestamp);
+	row_mon_entries[iterator].op = op;
+}
+
+static void __cam_generic_fence_dump_monitor_entries(
+	struct cam_generic_fence_monitor_entry *monitor_entries,
+	uint32_t index, uint32_t num_entries)
+{
+	int i = 0;
+	uint64_t ms, hrs, min, sec;
+
+	for (i = 0; i < num_entries; i++) {
+		CAM_CONVERT_TIMESTAMP_FORMAT(monitor_entries[index].timestamp,
+			hrs, min, sec, ms);
+
+		CAM_INFO(CAM_SYNC,
+			"**** %llu:%llu:%llu.%llu : Index[%d] Op[%s]",
+			hrs, min, sec, ms,
+			index,
+			cam_fence_op_to_string(monitor_entries[index].op));
+
+		index = (index + 1) % CAM_SYNC_MONITOR_MAX_ENTRIES;
+	}
+}
+
+static int __cam_generic_fence_get_monitor_entries_info(uint64_t  state_head,
+	uint32_t *oldest_entry, uint32_t *num_entries)
+{
+	*oldest_entry = 0;
+	*num_entries  = 0;
+
+	if (state_head == -1) {
+		return -EINVAL;
+	} else if (state_head < CAM_SYNC_MONITOR_MAX_ENTRIES) {
+		/* head starts from -1 */
+		*num_entries = state_head + 1;
+		*oldest_entry = 0;
+	} else {
+		*num_entries = CAM_SYNC_MONITOR_MAX_ENTRIES;
+		div_u64_rem(state_head + 1,
+			CAM_SYNC_MONITOR_MAX_ENTRIES, oldest_entry);
+	}
+
+	return 0;
+}
+
+void cam_generic_fence_dump_monitor_array(
+	struct cam_generic_fence_monitor_obj_info *obj_info)
+{
+	int rc;
+	uint32_t num_entries, oldest_entry;
+	uint64_t ms, hrs, min, sec;
+	struct timespec64 current_ts;
+	struct cam_generic_fence_monitor_data *mon_data = obj_info->monitor_data;
+
+	/* Check if there are any current entries in the monitor data */
+	rc = __cam_generic_fence_get_monitor_entries_info(
+		atomic64_read(&mon_data->monitor_head),
+		&oldest_entry, &num_entries);
+
+	if (rc)
+		return;
+
+	/* Print current monitor entries */
+	CAM_GET_TIMESTAMP(current_ts);
+	CAM_CONVERT_TIMESTAMP_FORMAT(current_ts, hrs, min, sec, ms);
+	switch (obj_info->fence_type) {
+	case CAM_GENERIC_FENCE_TYPE_SYNC_OBJ:
+		CAM_INFO(CAM_SYNC,
+			"======== %llu:%llu:%llu:%llu Dumping monitor information for sync obj %s, type %d, sync_id %d state %d remaining %d ref_cnt %d num_entries %u ===========",
+			hrs, min, sec, ms, obj_info->name, obj_info->sync_type,
+			obj_info->obj_id, obj_info->state, obj_info->remaining,
+			obj_info->ref_cnt, num_entries);
+		break;
+	case CAM_GENERIC_FENCE_TYPE_DMA_FENCE:
+		CAM_INFO(CAM_DMA_FENCE,
+			"======== %llu:%llu:%llu:%llu Dumping monitor information for dma obj %s, fd %d sync_id %d state %d ref_cnt %d num_entries %u ===========",
+			hrs, min, sec, ms, obj_info->name, obj_info->obj_id,
+			obj_info->sync_id, obj_info->state, obj_info->ref_cnt,
+			num_entries);
+		break;
+	case CAM_GENERIC_FENCE_TYPE_SYNX_OBJ:
+		CAM_INFO(CAM_SYNX,
+			"======== %llu:%llu:%llu:%llu Dumping monitor information for synx obj %s, synx_id %d sync_id %d state %d ref_cnt %d num_entries %u ===========",
+			hrs, min, sec, ms, obj_info->name, obj_info->obj_id,
+			obj_info->sync_id, obj_info->state, obj_info->ref_cnt,
+			num_entries);
+		break;
+	default:
+		break;
+	}
+
+	__cam_generic_fence_dump_monitor_entries(obj_info->monitor_entries,
+		oldest_entry, num_entries);
+
+
+	/* Check if there are any previous entries in the monitor data */
+	rc = __cam_generic_fence_get_monitor_entries_info(
+		mon_data->prev_monitor_head,
+		&oldest_entry, &num_entries);
+
+	if (rc)
+		return;
+
+	/* Print previous monitor entries */
+	CAM_GET_TIMESTAMP(current_ts);
+	CAM_CONVERT_TIMESTAMP_FORMAT(current_ts, hrs, min, sec, ms);
+	switch (obj_info->fence_type) {
+	case CAM_GENERIC_FENCE_TYPE_SYNC_OBJ:
+		CAM_INFO(CAM_SYNC,
+			"======== %llu:%llu:%llu:%llu Dumping previous monitor information for sync obj %s, type %d, sync_id %d state %d remaining %d num_entries %u ===========",
+			hrs, min, sec, ms, mon_data->prev_name, mon_data->prev_type,
+			mon_data->prev_obj_id, mon_data->prev_state, mon_data->prev_remaining,
+			num_entries);
+		break;
+	case CAM_GENERIC_FENCE_TYPE_DMA_FENCE:
+		CAM_INFO(CAM_DMA_FENCE,
+			"======== %llu:%llu:%llu:%llu Dumping previous monitor information for dma obj %s, fd %d sync_id %d state %d num_entries %u ===========",
+			hrs, min, sec, ms, mon_data->prev_name, mon_data->prev_obj_id,
+			mon_data->prev_sync_id, mon_data->prev_state,
+			num_entries);
+		break;
+	case CAM_GENERIC_FENCE_TYPE_SYNX_OBJ:
+		CAM_INFO(CAM_SYNX,
+			"======== %llu:%llu:%llu:%llu Dumping previous monitor information for synx obj %s, synx_id %d sync_id %d state %d num_entries %u ===========",
+			hrs, min, sec, ms, mon_data->prev_name, mon_data->prev_obj_id,
+			mon_data->prev_sync_id, mon_data->prev_state,
+			num_entries);
+		break;
+	default:
+		break;
+	}
+
+	__cam_generic_fence_dump_monitor_entries(obj_info->prev_monitor_entries,
+		oldest_entry, num_entries);
+
+}
+
+void cam_sync_dump_monitor_array(struct sync_table_row *row)
+{
+	struct cam_generic_fence_monitor_obj_info obj_info;
+
+	if (!sync_dev->mon_data ||
+		!test_bit(CAM_GENERIC_FENCE_TYPE_SYNC_OBJ, &cam_sync_monitor_mask) ||
+		!(CAM_GENERIC_MONITOR_GET_DATA(sync_dev->mon_data, row->sync_id)->prev_obj_id))
+		return;
+
+	obj_info.name = row->name;
+	obj_info.sync_type = row->type;
+	obj_info.obj_id = row->sync_id;
+	obj_info.state = row->state;
+	obj_info.remaining = row->remaining;
+	obj_info.ref_cnt = atomic_read(&row->ref_cnt);
+	obj_info.monitor_data = CAM_SYNC_MONITOR_GET_DATA(row->sync_id);
+	obj_info.fence_type = CAM_GENERIC_FENCE_TYPE_SYNC_OBJ;
+	obj_info.monitor_entries =
+		__cam_sync_get_monitor_entries(row->sync_id);
+	obj_info.prev_monitor_entries =
+		__cam_sync_get_prev_monitor_entries(row->sync_id);
+	cam_generic_fence_dump_monitor_array(&obj_info);
+}
+
 int cam_sync_util_find_and_set_empty_row(struct sync_device *sync_dev,
 	long *idx)
 {
@@ -35,8 +325,6 @@ int cam_sync_init_row(struct sync_table_row *table,
 	if (!table || idx <= 0 || idx >= CAM_SYNC_MAX_OBJS)
 		return -EINVAL;
 
-	memset(row, 0, sizeof(*row));
-
 	strlcpy(row->name, name, SYNC_DEBUG_NAME_LEN);
 	INIT_LIST_HEAD(&row->parents_list);
 	INIT_LIST_HEAD(&row->children_list);
@@ -48,6 +336,11 @@ int cam_sync_init_row(struct sync_table_row *table,
 	init_completion(&row->signaled);
 	INIT_LIST_HEAD(&row->callback_list);
 	INIT_LIST_HEAD(&row->user_payload_list);
+	if (test_bit(CAM_GENERIC_FENCE_TYPE_SYNC_OBJ, &cam_sync_monitor_mask)) {
+		cam_generic_fence_update_monitor_array(idx, &sync_dev->table_lock,
+			sync_dev->mon_data,
+			CAM_FENCE_OP_CREATE);
+	}
 	CAM_DBG(CAM_SYNC,
 		"row name:%s sync_id:%i [idx:%u] row_state:%u ",
 		row->name, row->sync_id, idx, row->state);
@@ -60,7 +353,7 @@ int cam_sync_init_group_object(struct sync_table_row *table,
 	uint32_t *sync_objs,
 	uint32_t num_objs)
 {
-	int i, rc = 0;
+	int i, rc;
 	struct sync_child_info *child_info;
 	struct sync_parent_info *parent_info;
 	struct sync_table_row *row = table + idx;
@@ -160,7 +453,7 @@ int cam_sync_deinit_object(struct sync_table_row *table, uint32_t idx,
 	struct sync_table_row      *child_row = NULL, *parent_row = NULL;
 	struct list_head            temp_child_list, temp_parent_list;
 
-	if (!table || idx <= 0 || idx >= CAM_SYNC_MAX_OBJS)
+	if (!table || (idx <= 0) || (idx >= CAM_SYNC_MAX_OBJS))
 		return -EINVAL;
 
 	CAM_DBG(CAM_SYNC,
@@ -182,6 +475,15 @@ int cam_sync_deinit_object(struct sync_table_row *table, uint32_t idx,
 			"Destroying an active sync object name:%s id:%i",
 			row->name, row->sync_id);
 
+	if (test_bit(CAM_GENERIC_FENCE_TYPE_SYNC_OBJ, &cam_sync_monitor_mask)) {
+		cam_generic_fence_update_monitor_array(idx, &sync_dev->table_lock,
+			sync_dev->mon_data,
+			CAM_FENCE_OP_DESTROY);
+		if (test_bit(CAM_GENERIC_FENCE_TYPE_SYNC_OBJ_DUMP, &cam_sync_monitor_mask))
+			cam_sync_dump_monitor_array(row);
+		__cam_sync_save_previous_monitor_data(row);
+	}
+
 	row->state = CAM_SYNC_STATE_INVALID;
 
 	/* Object's child and parent objects will be added into this list */
@@ -359,6 +661,11 @@ void cam_sync_util_dispatch_signaled_cb(int32_t sync_obj,
 		temp_sync_cb, &signalable_row->callback_list, list) {
 		sync_cb->status = status;
 		list_del_init(&sync_cb->list);
+		if (test_bit(CAM_GENERIC_FENCE_TYPE_SYNC_OBJ,
+			&cam_sync_monitor_mask))
+			cam_generic_fence_update_monitor_array(sync_obj,
+				&sync_dev->table_lock, sync_dev->mon_data,
+				CAM_FENCE_OP_UNREGISTER_ON_SIGNAL);
 		queue_work(sync_dev->work_queue,
 			&sync_cb->cb_dispatch_work);
 	}
@@ -382,12 +689,18 @@ void cam_sync_util_dispatch_signaled_cb(int32_t sync_obj,
 			event_cause);
 
 		list_del_init(&payload_info->list);
+
+		if (test_bit(CAM_GENERIC_FENCE_TYPE_SYNC_OBJ,
+			&cam_sync_monitor_mask))
+			cam_generic_fence_update_monitor_array(sync_obj,
+				&sync_dev->table_lock, sync_dev->mon_data,
+				CAM_FENCE_OP_UNREGISTER_ON_SIGNAL);
 		/*
 		 * We can free the list node here because
 		 * sending V4L event will make a deep copy
 		 * anyway
 		 */
-		 kfree(payload_info);
+		kfree(payload_info);
 	}
 
 	/*

+ 32 - 1
drivers/cam_sync/cam_sync_util.h

@@ -1,7 +1,7 @@
 /* SPDX-License-Identifier: GPL-2.0-only */
 /*
  * Copyright (c) 2017-2020, The Linux Foundation. All rights reserved.
- * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved.
  */
 
 #ifndef __CAM_SYNC_UTIL_H__
@@ -183,4 +183,35 @@ void cam_sync_util_cleanup_children_list(struct sync_table_row *row,
 void cam_sync_util_cleanup_parents_list(struct sync_table_row *row,
 	uint32_t list_clean_type, uint32_t sync_obj);
 
+/**
+ * @brief: Function to dump sync obj & monitor data
+ * @row                 : Row whose data to dump
+ *
+ * @return None
+ */
+void cam_sync_dump_monitor_array(struct sync_table_row *row);
+
+/**
+ * @brief: Function to add a new entry to the monitor table
+ * @idx                 : Index of row to update
+ * @mutex               : Mutex lock when expand monitor table
+ * @mon_data            : Pointer to the monitor data array
+ * @op                  : Operation id
+ *
+ * @return None
+ */
+void cam_generic_fence_update_monitor_array(int idx,
+	struct mutex *lock,
+	struct cam_generic_fence_monitor_data **mon_data,
+	enum cam_fence_op                op);
+
+/**
+ * @brief: Function to dump monitor array for sync/dma/synx
+ * @obj_info             : Monitor object that needs to be dumped
+ *
+ * @return None
+ */
+void cam_generic_fence_dump_monitor_array(
+	struct cam_generic_fence_monitor_obj_info *obj_info);
+
 #endif /* __CAM_SYNC_UTIL_H__ */