Przeglądaj źródła

msm: camera: cdm: CDM page fault handling improvement

Modify sequence of CDM page fault handling to include pausing
cdm, setting global CDM PF bit, and streaming off all clients.
It also reduces PF handling latency by omitting CDM HW reset when
calling CDM stream off. While handling page fault, the faulted CDM
hw would not service all APIs to clients (except release) until all
clients release the CDM.

CRs-Fixed: 3156647
Change-Id: I3532bf68ba59800b2084dee252d7fd0807e0c68a
Signed-off-by: sokchetra eung <[email protected]>
sokchetra eung 3 lat temu
rodzic
commit
4ae959b7b8

+ 3 - 0
drivers/cam_cdm/cam_cdm.h

@@ -86,6 +86,7 @@
 #define CAM_CDM_ERROR_HW_STATUS 0x5
 #define CAM_CDM_FLUSH_HW_STATUS 0x6
 #define CAM_CDM_RESET_ERR_STATUS 0x7
+#define CAM_CDM_PF_HW_STATUS 0x8
 
 /* Curent used AHB masks and shifts */
 #define CAM_CDM_AHB_LOG_CID_SHIFT    28
@@ -541,6 +542,7 @@ struct cam_cdm_bl_fifo {
  * @gen_irq:             memory region in which gen_irq command will be written
  * @cpas_handle:         handle for cpas driver
  * @arbitration:         type of arbitration to be used for the CDM
+ * @num_active_clients:  Number of currently active clients
  */
 struct cam_cdm {
 	uint32_t index;
@@ -563,6 +565,7 @@ struct cam_cdm {
 	struct cam_cdm_hw_mem gen_irq[CAM_CDM_BL_FIFO_MAX];
 	uint32_t cpas_handle;
 	enum cam_cdm_arbitration arbitration;
+	uint8_t num_active_clients;
 };
 
 /* struct cam_cdm_private_dt_data - CDM hw custom dt data */

+ 87 - 1
drivers/cam_cdm/cam_cdm_core_common.c

@@ -1,6 +1,7 @@
 // SPDX-License-Identifier: GPL-2.0-only
 /*
  * Copyright (c) 2017-2021, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
  */
 
 #include <linux/delay.h>
@@ -343,6 +344,20 @@ int cam_cdm_stream_ops_internal(void *hw_priv,
 		return -EINVAL;
 
 	core = (struct cam_cdm *)cdm_hw->core_info;
+
+	/*
+	 * If this CDM HW encounters Page Fault, block any futher
+	 * stream on/off until this CDM get released and acquired
+	 * again. CDM page fault handler will stream off the device.
+	 */
+	if (test_bit(CAM_CDM_PF_HW_STATUS, &core->cdm_status)) {
+		CAM_WARN(CAM_CDM,
+			"Attempt to stream %s failed. %s%u has encountered a page fault",
+			operation ? "on" : "off",
+			core->name, core->id);
+		return -EAGAIN;
+	}
+
 	mutex_lock(&cdm_hw->hw_mutex);
 	client_idx = CAM_CDM_GET_CLIENT_IDX(*handle);
 	client = core->clients[client_idx];
@@ -443,6 +458,49 @@ end:
 	return rc;
 }
 
+int cam_cdm_pf_stream_off_all_clients(struct cam_hw_info *cdm_hw)
+{
+	struct cam_cdm *core;
+	struct cam_cdm_client *client;
+	int i, rc;
+
+	if (!cdm_hw)
+		return -EINVAL;
+
+	core = cdm_hw->core_info;
+
+	if (!cdm_hw->open_count) {
+		CAM_DBG(CAM_CDM, "%s%u already streamed off. Open count %d",
+			core->name, core->id, cdm_hw->open_count);
+		return -EPERM;
+	}
+
+	CAM_DBG(CAM_CDM, "streaming off %s%u internally",
+		core->name, core->id);
+
+	rc = cam_hw_cdm_pf_deinit(cdm_hw, NULL, 0);
+	if (rc)
+		CAM_ERR(CAM_CDM, "Deinit failed in stream off rc: %d", rc);
+
+	for (i = 0; i < CAM_PER_CDM_MAX_REGISTERED_CLIENTS; i++) {
+		client = core->clients[i];
+		if (!client)
+			continue;
+
+		mutex_lock(&client->lock);
+		client->stream_on = false;
+		mutex_unlock(&client->lock);
+	}
+
+	rc = cam_cpas_stop(core->cpas_handle);
+	if (rc)
+		CAM_ERR(CAM_CDM, "CPAS stop failed in stream off rc %d", rc);
+
+	cdm_hw->open_count = 0;
+
+	return rc;
+}
+
 int cam_cdm_stream_start(void *hw_priv,
 	void *start_args, uint32_t size)
 {
@@ -483,6 +541,21 @@ int cam_cdm_process_cmd(void *hw_priv,
 
 	soc_data = &cdm_hw->soc_info;
 	core = (struct cam_cdm *)cdm_hw->core_info;
+
+	/*
+	 * When CDM has encountered a page fault, other than release no
+	 * other command will be serviced. PF handler notifies all clients
+	 * on the error, clients are expected to handle it, and release
+	 * its reference to the CDM core.
+	 */
+	if (test_bit(CAM_CDM_PF_HW_STATUS, &core->cdm_status) &&
+		(cmd != CAM_CDM_HW_INTF_CMD_RELEASE)) {
+		CAM_ERR(CAM_CDM,
+			"%s%u has encountered a page fault, unable to service cmd %u",
+			core->name, core->id, cmd);
+		return -EAGAIN;
+	}
+
 	switch (cmd) {
 	case CAM_CDM_HW_INTF_CMD_SUBMIT_BL: {
 		struct cam_cdm_hw_intf_cmd_submit_bl *req;
@@ -581,8 +654,9 @@ int cam_cdm_process_cmd(void *hw_priv,
 			rc = -ENOMEM;
 			break;
 		}
-
+		core->num_active_clients++;
 		mutex_unlock(&cdm_hw->hw_mutex);
+
 		client = core->clients[idx];
 		mutex_init(&client->lock);
 		data->ops = core->ops;
@@ -598,6 +672,7 @@ int cam_cdm_process_cmd(void *hw_priv,
 				mutex_lock(&cdm_hw->hw_mutex);
 				kfree(core->clients[idx]);
 				core->clients[idx] = NULL;
+				core->num_active_clients--;
 				mutex_unlock(
 					&cdm_hw->hw_mutex);
 				rc = -EPERM;
@@ -658,6 +733,17 @@ int cam_cdm_process_cmd(void *hw_priv,
 		mutex_unlock(&client->lock);
 		mutex_destroy(&client->lock);
 		kfree(client);
+		if (core->num_active_clients)
+			core->num_active_clients--;
+		else
+			CAM_ERR(CAM_CDM,
+				"Invalid active client decrement %u for %s%u",
+				core->num_active_clients, core->name, core->id);
+		if (!core->num_active_clients) {
+			CAM_DBG(CAM_CDM, "Clear cdm status bits for %s%u",
+				core->name, core->id);
+			core->cdm_status = 0;
+		}
 		mutex_unlock(&cdm_hw->hw_mutex);
 		rc = 0;
 		break;

+ 4 - 0
drivers/cam_cdm/cam_cdm_core_common.h

@@ -1,6 +1,7 @@
 /* SPDX-License-Identifier: GPL-2.0-only */
 /*
  * Copyright (c) 2017-2021, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
  */
 
 #ifndef _CAM_CDM_CORE_COMMON_H_
@@ -27,11 +28,14 @@ extern struct cam_cdm_utils_ops CDM170_ops;
 
 int cam_hw_cdm_init(void *hw_priv, void *init_hw_args, uint32_t arg_size);
 int cam_hw_cdm_deinit(void *hw_priv, void *init_hw_args, uint32_t arg_size);
+int cam_hw_cdm_pf_deinit(void *hw_priv, void *init_hw_args,
+	uint32_t arg_size);
 int cam_hw_cdm_alloc_genirq_mem(void *hw_priv);
 int cam_hw_cdm_release_genirq_mem(void *hw_priv);
 int cam_cdm_get_caps(void *hw_priv, void *get_hw_cap_args, uint32_t arg_size);
 int cam_cdm_stream_ops_internal(void *hw_priv, void *start_args,
 	bool operation);
+int cam_cdm_pf_stream_off_all_clients(struct cam_hw_info *cdm_hw);
 int cam_cdm_stream_start(void *hw_priv, void *start_args, uint32_t size);
 int cam_cdm_stream_stop(void *hw_priv, void *start_args, uint32_t size);
 int cam_cdm_process_cmd(void *hw_priv, uint32_t cmd, void *cmd_args,

+ 86 - 18
drivers/cam_cdm/cam_cdm_hw_core.c

@@ -973,8 +973,12 @@ int cam_hw_cdm_submit_bl(struct cam_hw_info *cdm_hw,
 	mutex_lock(&core->bl_fifo[fifo_idx].fifo_lock);
 	mutex_lock(&client->lock);
 
+	/*
+	 * Check PF status bit to avoid submiting commands to CDM
+	 */
 	if (test_bit(CAM_CDM_ERROR_HW_STATUS, &core->cdm_status) ||
-			test_bit(CAM_CDM_RESET_HW_STATUS, &core->cdm_status)) {
+			test_bit(CAM_CDM_RESET_HW_STATUS, &core->cdm_status) ||
+			test_bit(CAM_CDM_PF_HW_STATUS, &core->cdm_status)) {
 		mutex_unlock(&client->lock);
 		mutex_unlock(&core->bl_fifo[fifo_idx].fifo_lock);
 		return -EAGAIN;
@@ -992,11 +996,16 @@ int cam_hw_cdm_submit_bl(struct cam_hw_info *cdm_hw,
 			rc = -EINVAL;
 			break;
 		}
+
+		/*
+		 * While commands submission is ongoing, if error/reset/PF occurs, prevent
+		 * further command submission.
+		 */
 		if (test_bit(CAM_CDM_ERROR_HW_STATUS, &core->cdm_status) ||
-				test_bit(CAM_CDM_RESET_HW_STATUS,
-					&core->cdm_status)) {
+				test_bit(CAM_CDM_RESET_HW_STATUS, &core->cdm_status) ||
+				test_bit(CAM_CDM_PF_HW_STATUS, &core->cdm_status)) {
 			CAM_ERR_RATE_LIMIT(CAM_CDM,
-				"In error/reset state cnt=%d total cnt=%d cdm_status 0x%x",
+				"In error/reset/PF state cnt=%d total cnt=%d cdm_status 0x%x",
 				i, req->data->cmd_arrary_count,
 				core->cdm_status);
 			rc = -EAGAIN;
@@ -1384,7 +1393,7 @@ static void cam_hw_cdm_iommu_fault_handler(struct cam_smmu_pf_info *pf_info)
 	struct cam_hw_info *cdm_hw = NULL;
 	struct cam_cdm *core = NULL;
 	struct cam_cdm_private_dt_data *pvt_data;
-	int i;
+	int i, rc;
 
 	if (!pf_info) {
 		CAM_ERR(CAM_CDM, "pf_info is null");
@@ -1407,22 +1416,35 @@ static void cam_hw_cdm_iommu_fault_handler(struct cam_smmu_pf_info *pf_info)
 		}
 
 handle_cdm_pf:
-		set_bit(CAM_CDM_ERROR_HW_STATUS, &core->cdm_status);
+
+		CAM_ERR(CAM_CDM, "Page Fault on %s%u, flags: %u, status: %llu",
+			core->name, core->id, core->flags, core->cdm_status);
+		set_bit(CAM_CDM_PF_HW_STATUS, &core->cdm_status);
 		mutex_lock(&cdm_hw->hw_mutex);
+		/* Pausing CDM HW from doing any further memory transactions */
+		cam_hw_cdm_pause_core(cdm_hw, true);
+
 		for (i = 0; i < core->offsets->reg_data->num_bl_fifo; i++)
 			mutex_lock(&core->bl_fifo[i].fifo_lock);
+
 		if (cdm_hw->hw_state == CAM_HW_STATE_POWER_UP) {
 			cam_hw_cdm_dump_core_debug_registers(cdm_hw, true);
 		} else
 			CAM_INFO(CAM_CDM, "%s%u hw is power in off state",
 				cdm_hw->soc_info.label_name,
 				cdm_hw->soc_info.index);
+
 		for (i = 0; i < core->offsets->reg_data->num_bl_fifo; i++)
 			mutex_unlock(&core->bl_fifo[i].fifo_lock);
-		cam_cdm_notify_clients(cdm_hw, CAM_CDM_CB_STATUS_PAGEFAULT,
-			(void *)pf_info->iova);
+
+		/* Notify clients to handle PF event */
+		cam_cdm_notify_clients(cdm_hw, CAM_CDM_CB_STATUS_PAGEFAULT, (void *)pf_info);
+		/* Stream off CDM completely */
+		rc = cam_cdm_pf_stream_off_all_clients(cdm_hw);
+		if (rc)
+			CAM_ERR(CAM_CDM, "Stream off failed for %s%u rc: %d",
+				core->name, core->id, rc);
 		mutex_unlock(&cdm_hw->hw_mutex);
-		clear_bit(CAM_CDM_ERROR_HW_STATUS, &core->cdm_status);
 	} else {
 		CAM_ERR(CAM_CDM, "Invalid token");
 	}
@@ -2074,13 +2096,66 @@ end:
 	return rc;
 }
 
+static inline void cam_hw_cdm_clear_bl_requests(struct cam_cdm *cdm_core)
+{
+	struct cam_cdm_bl_cb_request_entry *node, *tnode;
+	int i;
+
+	for (i = 0; i < cdm_core->offsets->reg_data->num_bl_fifo; i++) {
+		list_for_each_entry_safe(node, tnode,
+			&cdm_core->bl_fifo[i].bl_request_list, entry) {
+			list_del_init(&node->entry);
+			kfree(node);
+			node = NULL;
+		}
+	}
+}
+
+int cam_hw_cdm_pf_deinit(void *hw_priv,
+	void *init_hw_args, uint32_t arg_size)
+{
+	struct cam_hw_info *cdm_hw = hw_priv;
+	struct cam_hw_soc_info *soc_info = NULL;
+	struct cam_cdm *cdm_core = NULL;
+	int i, rc;
+	unsigned long flags = 0;
+
+	if (!hw_priv)
+		return -EINVAL;
+
+	soc_info = &cdm_hw->soc_info;
+	cdm_core = (struct cam_cdm *)cdm_hw->core_info;
+
+	for (i = 0; i < cdm_core->offsets->reg_data->num_bl_fifo; i++)
+		mutex_lock(&cdm_core->bl_fifo[i].fifo_lock);
+
+	/* clear bl request */
+	cam_hw_cdm_clear_bl_requests(cdm_core);
+
+	for (i = 0; i < cdm_core->offsets->reg_data->num_bl_fifo; i++)
+		mutex_unlock(&cdm_core->bl_fifo[i].fifo_lock);
+
+	flags = cam_hw_util_hw_lock_irqsave(cdm_hw);
+	cdm_hw->hw_state = CAM_HW_STATE_POWER_DOWN;
+	cam_hw_util_hw_unlock_irqrestore(cdm_hw, flags);
+
+	rc = cam_soc_util_disable_platform_resource(soc_info, true, true);
+	if (rc)
+		CAM_ERR(CAM_CDM, "disable platform failed for %s%u",
+			soc_info->label_name, soc_info->index);
+	else
+		CAM_DBG(CAM_CDM, "%s%u Deinit success",
+			soc_info->label_name, soc_info->index);
+
+	return rc;
+}
+
 int cam_hw_cdm_deinit(void *hw_priv,
 	void *init_hw_args, uint32_t arg_size)
 {
 	struct cam_hw_info *cdm_hw = hw_priv;
 	struct cam_hw_soc_info *soc_info = NULL;
 	struct cam_cdm *cdm_core = NULL;
-	struct cam_cdm_bl_cb_request_entry *node, *tnode;
 	int rc = 0, i;
 	uint32_t reset_val = 1;
 	long time_left;
@@ -2096,14 +2171,7 @@ int cam_hw_cdm_deinit(void *hw_priv,
 		mutex_lock(&cdm_core->bl_fifo[i].fifo_lock);
 
 	/*clear bl request */
-	for (i = 0; i < cdm_core->offsets->reg_data->num_bl_fifo; i++) {
-		list_for_each_entry_safe(node, tnode,
-			&cdm_core->bl_fifo[i].bl_request_list, entry) {
-			list_del_init(&node->entry);
-			kfree(node);
-			node = NULL;
-		}
-	}
+	cam_hw_cdm_clear_bl_requests(cdm_core);
 
 	set_bit(CAM_CDM_RESET_HW_STATUS, &cdm_core->cdm_status);
 	reinit_completion(&cdm_core->reset_complete);