Jelajahi Sumber

msm: camera: reqmgr: Dump device name as part of frame skip log

Currently we can infer only the pd of the device whose request
was not available on time leading to a frame skip. This change
identifies and logs the device name to ease debugging.

Change-Id: I49a3cd84611b21626c68395b11f0ef52bffb62db
Signed-off-by: Karthik Anantha Ram <[email protected]>
Signed-off-by: Mukund Madhusudan Atre <[email protected]>
Karthik Anantha Ram 6 tahun lalu
induk
melakukan
f2a0cc111f

+ 53 - 7
drivers/cam_req_mgr/cam_req_mgr_core.c

@@ -167,6 +167,40 @@ static int __cam_req_mgr_inject_delay(
 	return rc;
 }
 
+/**
+ * __cam_req_mgr_find_dev_name()
+ *
+ * @brief      : Find the dev name whose req is not ready
+ * @link       : link info
+ * @req_id     : req_id which is not ready
+ * @pd         : pipeline delay
+ * @masked_val : masked value holds the bit for all devices
+ *               that don't have the req_id ready for a given
+ *               pipeline delay
+ * @pd         : pipeline delay
+ *
+ */
+static void __cam_req_mgr_find_dev_name(
+	struct cam_req_mgr_core_link *link,
+	int64_t req_id, uint32_t pd, uint32_t masked_val)
+{
+	int i = 0;
+	struct cam_req_mgr_connected_device *dev = NULL;
+
+	for (i = 0; i < link->num_devs; i++) {
+		dev = &link->l_dev[i];
+		if (dev->dev_info.p_delay == pd) {
+			if (masked_val & (1 << dev->dev_bit))
+				continue;
+
+			CAM_INFO(CAM_CRM,
+				"Skip Frame: req: %lld not ready on link: 0x%x for pd: %d dev: %s open_req count: %d",
+				req_id, link->link_hdl, pd, dev->dev_info.name,
+				link->open_req_cnt);
+		}
+	}
+}
+
 /**
  * __cam_req_mgr_notify_error_on_link()
  *
@@ -296,14 +330,15 @@ static int __cam_req_mgr_traverse(struct cam_req_mgr_traverse *traverse_data)
 		}
 	} else {
 		/* This pd table is not ready to proceed with asked idx */
-		CAM_INFO(CAM_CRM,
-			"Skip Frame: req: %lld not ready pd: %d open_req count: %d",
-			CRM_GET_REQ_ID(traverse_data->in_q, curr_idx),
-			tbl->pd,
-			traverse_data->open_req_cnt);
+		traverse_data->result_data.req_id =
+			CRM_GET_REQ_ID(traverse_data->in_q, curr_idx);
+		traverse_data->result_data.pd = tbl->pd;
+		traverse_data->result_data.masked_value =
+			(tbl->dev_mask & slot->req_ready_map);
 		SET_FAILURE_BIT(traverse_data->result, tbl->pd);
 		return -EAGAIN;
 	}
+
 	return 0;
 }
 
@@ -650,6 +685,9 @@ static int __cam_req_mgr_check_link_is_ready(struct cam_req_mgr_core_link *link,
 	traverse_data.tbl = link->req.l_tbl;
 	traverse_data.in_q = in_q;
 	traverse_data.result = 0;
+	traverse_data.result_data.masked_value = 0;
+	traverse_data.result_data.pd = 0;
+	traverse_data.result_data.req_id = 0;
 	traverse_data.validate_only = validate_only;
 	traverse_data.open_req_cnt = link->open_req_cnt;
 
@@ -669,8 +707,13 @@ static int __cam_req_mgr_check_link_is_ready(struct cam_req_mgr_core_link *link,
 			apply_data[2].req_id,
 			apply_data[1].req_id,
 			apply_data[0].req_id);
-	} else
+	} else {
 		rc = -EAGAIN;
+		__cam_req_mgr_find_dev_name(link,
+			traverse_data.result_data.req_id,
+			traverse_data.result_data.pd,
+			traverse_data.result_data.masked_value);
+	}
 
 	return rc;
 }
@@ -2104,6 +2147,7 @@ int cam_req_mgr_process_error(void *priv, void *data)
 			spin_lock_bh(&link->link_state_spin_lock);
 			link->state = CAM_CRM_LINK_STATE_ERR;
 			spin_unlock_bh(&link->link_state_spin_lock);
+			link->open_req_cnt++;
 		}
 	}
 	mutex_unlock(&link->req.lock);
@@ -2580,7 +2624,9 @@ static int __cam_req_mgr_setup_link_info(struct cam_req_mgr_core_link *link,
 		dev->dev_bit = pd_tbl->dev_count++;
 		dev->pd_tbl = pd_tbl;
 		pd_tbl->dev_mask |= (1 << dev->dev_bit);
-
+		CAM_DBG(CAM_CRM, "dev_bit %u name %s pd %u mask %d",
+			dev->dev_bit, dev->dev_info.name, pd_tbl->pd,
+			pd_tbl->dev_mask);
 		/* Communicate with dev to establish the link */
 		dev->ops->link_setup(&link_data);
 

+ 22 - 7
drivers/cam_req_mgr/cam_req_mgr_core.h

@@ -125,10 +125,24 @@ enum cam_req_mgr_link_state {
 	CAM_CRM_LINK_STATE_MAX,
 };
 
+/**
+ * struct cam_req_mgr_traverse_result
+ * @req_id        : Req id that is not ready
+ * @pd            : pipeline delay
+ * @masked_value  : Holds the dev bit for devices not ready
+ *                  for the given request
+ */
+struct cam_req_mgr_traverse_result {
+	int64_t  req_id;
+	uint32_t pd;
+	uint32_t masked_value;
+};
+
 /**
  * struct cam_req_mgr_traverse
  * @idx              : slot index
  * @result           : contains which all tables were able to apply successfully
+ * @result_data      : holds the result of traverse in case it fails
  * @tbl              : pointer of pipeline delay based request table
  * @apply_data       : pointer which various tables will update during traverse
  * @in_q             : input request queue pointer
@@ -136,13 +150,14 @@ enum cam_req_mgr_link_state {
  * @open_req_cnt     : Count of open requests yet to be serviced in the kernel.
  */
 struct cam_req_mgr_traverse {
-	int32_t                       idx;
-	uint32_t                      result;
-	struct cam_req_mgr_req_tbl   *tbl;
-	struct cam_req_mgr_apply     *apply_data;
-	struct cam_req_mgr_req_queue *in_q;
-	bool                          validate_only;
-	int32_t                       open_req_cnt;
+	int32_t                            idx;
+	uint32_t                           result;
+	struct cam_req_mgr_traverse_result result_data;
+	struct cam_req_mgr_req_tbl        *tbl;
+	struct cam_req_mgr_apply          *apply_data;
+	struct cam_req_mgr_req_queue      *in_q;
+	bool                               validate_only;
+	int32_t                            open_req_cnt;
 };
 
 /**