Преглед изворни кода

msm: eva: Minidump enablement for eva

Enabled FW static dump.
Enabled VA_MD for CMD and MSG queues, both for CPU and DSP.
Enabled VA_MD for debug structs.

Change-Id: I9a5a2418620cd0608b90301eefe0726a462c1ce3
Signed-off-by: Aniruddh Sharma <[email protected]>
Aniruddh Sharma пре 3 година
родитељ
комит
b30be7e544

+ 1 - 0
msm/Kbuild

@@ -23,6 +23,7 @@ msm-eva-objs := eva/cvp.o \
         eva/msm_smem.o \
         eva/msm_cvp_debug.o \
         eva/msm_cvp_res_parse.o \
+        eva/cvp_dump.o \
         eva/cvp_hfi.o \
         eva/hfi_response_handler.o \
         eva/hfi_packetization.o \

+ 1 - 0
msm/Makefile

@@ -11,6 +11,7 @@ msm-eva-objs := eva/cvp.o \
                 eva/msm_smem.o \
                 eva/msm_cvp_debug.o \
                 eva/msm_cvp_res_parse.o \
+                eva/cvp_dump.o \
                 eva/cvp_hfi.o \
                 eva/hfi_response_handler.o \
                 eva/hfi_packetization.o \

+ 330 - 0
msm/eva/cvp_dump.c

@@ -0,0 +1,330 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2021, The Linux Foundation. All rights reserved.
+ */
+
+#include <asm/memory.h>
+#include <linux/coresight-stm.h>
+#include <linux/delay.h>
+#include <linux/devfreq.h>
+#include <linux/hash.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/iommu.h>
+#include <linux/iopoll.h>
+#include <linux/of.h>
+#include <linux/pm_qos.h>
+#include <linux/regulator/consumer.h>
+#include <linux/slab.h>
+#include <linux/workqueue.h>
+#include <linux/platform_device.h>
+#include <linux/soc/qcom/llcc-qcom.h>
+#include <linux/qcom_scm.h>
+#include <linux/soc/qcom/smem.h>
+#include <linux/dma-mapping.h>
+#include <linux/reset.h>
+#include <soc/qcom/minidump.h>
+#include "hfi_packetization.h"
+#include "msm_cvp_debug.h"
+#include "cvp_core_hfi.h"
+#include "cvp_hfi_helper.h"
+#include "cvp_hfi_io.h"
+#include "msm_cvp_dsp.h"
+#include "msm_cvp_clocks.h"
+#include "cvp_dump.h"
+
+/*Declare and init the head node of the linked list
+for queue va_md dump*/
+LIST_HEAD(head_node_hfi_queue);
+
+/*Declare and init the head node of the linked list
+ for debug struct va_md dump*/
+LIST_HEAD(head_node_dbg_struct);
+
+int md_eva_dump(const char* name, u64 virt, u64 phys, u64 size)
+{
+	struct md_region md_entry;
+	if (msm_minidump_enabled())
+	{
+		dprintk(CVP_INFO, "Minidump is enabled!\n");
+
+		// add minidump region for EVA-FW image
+		strlcpy(md_entry.name, name, sizeof(md_entry.name));
+		md_entry.virt_addr = (uintptr_t)virt;
+		md_entry.phys_addr = phys;
+		md_entry.size = size;
+		if (msm_minidump_add_region(&md_entry) < 0)
+		{
+			dprintk(CVP_ERR, "Failed to add \"%s\" data in \
+                        Minidump\n", name);
+			return 1;
+		}
+		else
+		{
+			dprintk(CVP_INFO,
+				"add region success for \"%s\" with virt addr:\
+                                0x%x, phy addr: 0x%x, size: %d",
+				md_entry.name, md_entry.virt_addr,
+				md_entry.phys_addr, md_entry.size);
+			return 0;
+		}
+	}
+	else
+	{
+		dprintk(CVP_ERR, "Minidump is NOT enabled!\n");
+		return 1;
+	}
+}
+
+void cvp_va_md_register(char* name, void* notf_blk_ptr)
+{
+	int rc = 0;
+	struct notifier_block* notf_blk = (struct notifier_block*)notf_blk_ptr;
+	rc = qcom_va_md_register(name, notf_blk);
+	if (rc) {
+		dprintk(CVP_ERR,
+			"\"%s\" : qcom_va_md_register failed rc = %d\n",
+			name, rc);
+	}
+	else {
+		dprintk(CVP_INFO,
+			"\"%s\" : eva_queue qcom_va_md_register success rc = %d\n",
+			name, rc);
+	}
+}
+
+void cvp_register_va_md_region()
+{
+	if(qcom_va_md_enabled())
+	{
+		cvp_va_md_register("eva_queues", &eva_hfiq_list_notif_blk);
+		cvp_va_md_register("dbg_struct", &eva_struct_list_notif_blk);
+	}
+	else
+		dprintk(CVP_ERR, "VA_Minidump is NOT enabled!\n");
+}
+
+void cvp_free_va_md_list(void)
+{
+	struct eva_va_md_queue *cursor, *temp;
+
+	/* Iterate over each item of the hfi quesue va_md_list and add del the node */
+	list_for_each_entry_safe(cursor, temp, &head_node_hfi_queue, list)
+	{
+		list_del(&cursor->list);
+		kfree(cursor);
+	}
+
+	/* Iterate over each item of the hfi debug_struct
+        va_md_list and add del the node */
+	list_for_each_entry_safe(cursor, temp, &head_node_dbg_struct, list)
+	{
+		list_del(&cursor->list);
+		kfree(cursor);
+	}
+}
+
+void add_va_node_to_list(void *list_head_node, void *buff_va, u32 buff_size,
+                        const char *region_name, bool copy)
+{
+	struct list_head *head_node = (struct list_head *)list_head_node;
+	struct eva_va_md_queue *temp_node = NULL;
+	/*Creating Node*/
+	temp_node = kzalloc(sizeof(struct eva_va_md_queue), GFP_KERNEL);
+	if (!temp_node)
+	{
+		dprintk(CVP_ERR, "Memory allocation failed for list node\n");
+		return;
+	}
+
+	/*Init the list within the struct*/
+	INIT_LIST_HEAD(&temp_node->list);
+
+	/* Store back the data to linked list node data */
+	temp_node->va_md_buff = buff_va;
+	temp_node->va_md_buff_size = buff_size;
+	strlcpy(temp_node->region_name, region_name,
+                sizeof(temp_node->region_name));
+	temp_node->copy = copy;
+
+	/*Add Node to Linked List*/
+	list_add_tail(&temp_node->list, head_node);
+	dprintk(CVP_INFO,
+			"\"%s\" added to buffer list, vaddr: %px size: 0x%x\n",
+			temp_node->region_name, temp_node->va_md_buff,
+			temp_node->va_md_buff_size);
+}
+
+void add_hfi_queue_to_va_md_list(void *device)
+{
+	struct cvp_iface_q_info *iface_q;
+	struct iris_hfi_device *dev;
+
+	dev = (struct iris_hfi_device*)device;
+
+	/* Adding CPU QUEUES*/
+	iface_q = &dev->iface_queues[CVP_IFACEQ_CMDQ_IDX];
+	add_va_node_to_list(&head_node_hfi_queue,
+                                iface_q->q_array.align_virtual_addr,
+                                iface_q->q_array.mem_size,
+                                "eva_cmdq_cpu", false);
+	iface_q = &dev->iface_queues[CVP_IFACEQ_MSGQ_IDX];
+	add_va_node_to_list(&head_node_hfi_queue,
+                                iface_q->q_array.align_virtual_addr,
+                                iface_q->q_array.mem_size,
+                                "eva_msgq_cpu", false);
+
+	/* Adding DSP QUEUES*/
+	iface_q = &dev->dsp_iface_queues[CVP_IFACEQ_CMDQ_IDX];
+	add_va_node_to_list(&head_node_hfi_queue,
+                                iface_q->q_array.align_virtual_addr,
+                                iface_q->q_array.mem_size,
+                                "eva_cmdq_dsp", false);
+	iface_q = &dev->dsp_iface_queues[CVP_IFACEQ_MSGQ_IDX];
+	add_va_node_to_list(&head_node_hfi_queue,
+                                iface_q->q_array.align_virtual_addr,
+                                iface_q->q_array.mem_size,
+                                "eva_msgq_dsp", false);
+}
+
+void add_queue_header_to_va_md_list(void *device)
+{
+	struct cvp_iface_q_info *iface_q;
+	struct iris_hfi_device *dev;
+	struct cvp_hfi_queue_header *queue;
+
+	dev = (struct iris_hfi_device*)device;
+
+	// Add node for cvp_hfi_queue_header: cpucmdQ
+	iface_q = &dev->iface_queues[CVP_IFACEQ_CMDQ_IDX];
+	queue = (struct cvp_hfi_queue_header *)iface_q->q_hdr;
+	add_va_node_to_list(&head_node_dbg_struct,
+			queue, sizeof(struct cvp_hfi_queue_header),
+			"cvp_hfi_queue_header-cpucmdQ", false);
+
+	// Add node for cvp_hfi_queue_header: cpumsgQ
+	iface_q = &dev->iface_queues[CVP_IFACEQ_MSGQ_IDX];
+	queue = (struct cvp_hfi_queue_header *)iface_q->q_hdr;
+	add_va_node_to_list(&head_node_dbg_struct,
+			queue, sizeof(struct cvp_hfi_queue_header),
+			"cvp_hfi_queue_header-cpumsgQ", false);
+
+	// Add node for cvp_hfi_queue_header: dspcmdQ
+	iface_q = &dev->dsp_iface_queues[CVP_IFACEQ_CMDQ_IDX];
+	queue = (struct cvp_hfi_queue_header *)iface_q->q_hdr;
+	add_va_node_to_list(&head_node_dbg_struct,
+			queue, sizeof(struct cvp_hfi_queue_header),
+			"cvp_hfi_queue_header-dspcmdQ", false);
+
+	// Add node for cvp_hfi_queue_header: dspmsgQ
+	iface_q = &dev->dsp_iface_queues[CVP_IFACEQ_MSGQ_IDX];
+	queue = (struct cvp_hfi_queue_header *)iface_q->q_hdr;
+	add_va_node_to_list(&head_node_dbg_struct,
+			queue, sizeof(struct cvp_hfi_queue_header),
+			"cvp_hfi_queue_header-dspmsgQ", false);
+}
+
+int eva_hfiq_list_notif_handler(struct notifier_block *this,
+                                unsigned long event, void *ptr)
+{
+	struct va_md_entry entry;
+	struct eva_va_md_queue *cursor, *temp;
+	int rc = 0;
+	void *temp_data;
+
+	/* Iterate over each item of the list and
+        add that data to va_md_entry */
+	list_for_each_entry_safe(cursor, temp, &head_node_hfi_queue, list)
+	{
+		if(cursor->copy)
+		{
+			//Copying the content to local kzmalloc buffer...
+			dprintk(CVP_INFO, "Copying \"%s\"(%d Bytes)\
+                                to intermediate buffer\n",
+                                cursor->region_name, cursor->va_md_buff_size);
+			temp_data = kzalloc(cursor->va_md_buff_size,
+                                                        GFP_KERNEL);
+			memcpy(temp_data, cursor->va_md_buff,
+                                        cursor->va_md_buff_size);
+
+			entry.vaddr = (unsigned long)temp_data;
+		}
+		else
+			entry.vaddr = (unsigned long)cursor->va_md_buff;
+		entry.size = cursor->va_md_buff_size;
+		strlcpy(entry.owner, cursor->region_name, sizeof(entry.owner));
+		entry.cb = NULL;
+
+		if(msm_cvp_minidump_enable)
+		{
+			rc = qcom_va_md_add_region(&entry);
+			if(rc)
+				dprintk(CVP_ERR, "Add region \"failed\" for \
+                                \"%s\", vaddr: %px size: 0x%x\n", entry.owner,
+                                cursor->va_md_buff, entry.size);
+			else
+				dprintk(CVP_INFO, "Add region \"success\" for \
+                                \"%s\", vaddr: %px size: 0x%x\n", entry.owner,
+                                cursor->va_md_buff, entry.size);
+		}
+	}
+	return NOTIFY_OK;
+}
+
+int eva_struct_list_notif_handler(struct notifier_block *this,
+                unsigned long event, void *ptr)
+{
+	struct va_md_entry entry;
+	struct eva_va_md_queue *cursor, *temp;
+	int rc = 0;
+	void *temp_data;
+
+	/* Iterate over each item of the list
+        and add that data to va_md_entry */
+	list_for_each_entry_safe(cursor, temp, &head_node_dbg_struct, list)
+	{
+		if(cursor->copy)
+		{
+			//Copying the content to local kzmalloc buffer...
+			dprintk(CVP_INFO, "Copying \"%s\"(%d Bytes) to \
+                                intermediate buffer\n", cursor->region_name,
+                                cursor->va_md_buff_size);
+			temp_data = kzalloc(cursor->va_md_buff_size,
+                                                        GFP_KERNEL);
+			memcpy(temp_data, cursor->va_md_buff,
+                                cursor->va_md_buff_size);
+
+			entry.vaddr = (unsigned long)temp_data;
+		}
+		else
+			entry.vaddr = (unsigned long)cursor->va_md_buff;
+		entry.size = cursor->va_md_buff_size;
+		strlcpy(entry.owner, cursor->region_name, sizeof(entry.owner));
+		entry.cb = NULL;
+
+		if(msm_cvp_minidump_enable)
+		{
+			rc = qcom_va_md_add_region(&entry);
+			if(rc)
+				dprintk(CVP_ERR, "Add region \"failed\" for \
+                                        \"%s\", vaddr: %px size: 0x%x\n",
+                                        entry.owner, cursor->va_md_buff,
+                                        entry.size);
+			else
+				dprintk(CVP_INFO, "Add region \"success\" for \
+                                \"%s\", vaddr: %px size: 0x%x\n", entry.owner,
+                                cursor->va_md_buff, entry.size);
+		}
+	}
+	return NOTIFY_OK;
+}
+
+struct notifier_block eva_struct_list_notif_blk = {
+		.notifier_call = eva_struct_list_notif_handler,
+		.priority = INT_MAX-1,
+};
+
+struct notifier_block eva_hfiq_list_notif_blk = {
+		.notifier_call = eva_hfiq_list_notif_handler,
+		.priority = INT_MAX,
+};

+ 97 - 0
msm/eva/cvp_dump.h

@@ -0,0 +1,97 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2021, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef __H_CVP_MINIDUMP_H__
+#define __H_CVP_MINIDUMP_H__
+
+#include <linux/notifier.h>
+#include <linux/kernel.h>
+#include <soc/qcom/minidump.h>
+
+#define MAX_REGION_NAME_LEN 32
+#define EVAFW_IMAGE_SIZE 7*1024*1024
+
+extern struct list_head head_node_hfi_queue;
+extern struct list_head head_node_dbg_struct;
+extern struct notifier_block eva_hfiq_list_notif_blk;
+extern struct notifier_block eva_struct_list_notif_blk;
+
+/* notifier handler function for list of eva hfi queues */
+int eva_hfiq_list_notif_handler(struct notifier_block *this,
+                                        unsigned long event, void *ptr);
+
+/* notifier handler function for list of eva global structures */
+int eva_struct_list_notif_handler(struct notifier_block *this,
+                                        unsigned long event, void *ptr);
+
+/*
+ * wrapper for static minidump
+
+ * @name: Dump will be collected with this name
+ * @virt: Virtual address of the buffer which needs to be dumped
+ * @phys: Physical address of the buffer which needs to be dumped
+ * @size: Size of the buffer which needs to be dumped
+*/
+int md_eva_dump(const char* name, u64 virt, u64 phys, u64 size);
+
+/*
+ * Fucntion to add dump region to queue
+
+ * @list_head_node: Head node of the list which needs to be updated
+ * @buff_va: Virtual address of the buffer which needs to be dumped
+ * @buff_size: Size of the buffer which needs to be dumped
+ * @region_name: Dump will be collected with this name
+ * @copy: Flag to indicate if the buffer data needs to be copied
+ *		to the intermidiate buffer allocated by kzmalloc.
+*/
+void add_va_node_to_list(void *list_head_node, void *buff_va,
+                        u32 buff_size, const char *region_name, bool copy);
+
+/*
+ * Registers subsystem to minidump driver
+
+ * @name: Subsytem name which will get registered
+ * @notf_blk_ptr: notifier block pointer.
+ *		notifier_call mentioned in this block will be triggered by
+ *		minidump driver in case of crash
+*/
+void cvp_va_md_register(char *name, void* notf_blk_ptr);
+
+/* One function where we will register all the regions */
+void cvp_register_va_md_region(void);
+
+/*
+ * Free up the memory allocated for different va_md_list
+ * Do not forget to add code for any new list in this function
+*/
+void cvp_free_va_md_list(void);
+
+/* Adds the HFI queues(both for CPU and DSP) to the global hfi list head*/
+void add_hfi_queue_to_va_md_list(void *device);
+
+/*Add queue header structures(both for CPU and DSP)
+to the global struct list head*/
+void add_queue_header_to_va_md_list(void *device);
+
+/*
+ * Node structure for VA_MD Linked List
+
+ * @list: linux kernel list implementation
+ * @va_md_buff: Virtual address of the buffer which needs to be dumped
+ * @va_md_buff_size: Size of the buffer which needs to be dumped
+ * @region_name: Dump will be collected with this name
+ * @copy: Flag to indicate if the buffer data needs to be copied
+ *		to the intermidiate buffer allocated by kzmalloc.
+*/
+struct eva_va_md_queue
+{
+	struct list_head list;
+	void *va_md_buff;
+	u32 va_md_buff_size;
+	char region_name[MAX_REGION_NAME_LEN];
+	bool copy;
+};
+
+#endif

+ 6 - 0
msm/eva/cvp_fw_load.c

@@ -14,6 +14,7 @@
 #include <linux/of_address.h>
 #include <linux/firmware.h>
 #include <linux/soc/qcom/mdt_loader.h>
+#include "cvp_dump.h"
 
 #define MAX_FIRMWARE_NAME_SIZE 128
 
@@ -104,6 +105,11 @@ static int __load_fw_to_memory(struct platform_device *pdev,
 				__func__, rc, firmware_name);
 		goto exit;
 	}
+	rc = md_eva_dump("evafwdata", (uintptr_t)virt, phys, EVAFW_IMAGE_SIZE);
+	if (rc) {
+		dprintk(CVP_ERR, "%s: error %d in dumping \"%s\"\n",
+				__func__, rc, firmware_name);
+	}
 
 	memunmap(virt);
 	release_firmware(firmware);

+ 9 - 0
msm/eva/cvp_hfi.c

@@ -30,6 +30,7 @@
 #include "cvp_hfi_io.h"
 #include "msm_cvp_dsp.h"
 #include "msm_cvp_clocks.h"
+#include "cvp_dump.h"
 
 #define FIRMWARE_SIZE			0X00A00000
 #define REG_ADDR_OFFSET_BITMASK	0x000FFFFF
@@ -1798,6 +1799,13 @@ static int iris_hfi_core_init(void *device)
 		rc = -ENOMEM;
 		goto err_core_init;
 	}
+	cvp_register_va_md_region();
+
+	// Add node for dev struct
+	add_va_node_to_list(&head_node_dbg_struct, dev,
+        sizeof(struct iris_hfi_device), "iris_hfi_device-dev", false);
+	add_queue_header_to_va_md_list((void*)dev);
+	add_hfi_queue_to_va_md_list((void*)dev);
 
 	rc = msm_cvp_map_ipcc_regs(&ipcc_iova);
 	if (!rc) {
@@ -1941,6 +1949,7 @@ static int iris_hfi_core_trigger_ssr(void *device,
 	int rc = 0;
 	struct iris_hfi_device *dev;
 
+	cvp_free_va_md_list();
 	if (!device) {
 		dprintk(CVP_ERR, "invalid device\n");
 		return -ENODEV;

+ 2 - 0
msm/eva/msm_cvp_debug.c

@@ -30,6 +30,7 @@ bool msm_cvp_syscache_disable = !true;
 bool msm_cvp_dsp_disable = !true;
 bool msm_cvp_mmrm_enabled = true;
 bool msm_cvp_dcvs_disable = !true;
+int msm_cvp_minidump_enable = !1;
 
 #define MAX_DBG_BUF_SIZE 4096
 
@@ -247,6 +248,7 @@ struct dentry *msm_cvp_debugfs_init_drv(void)
 	debugfs_create_u32("fw_low_power_mode", 0644, dir,
 		&msm_cvp_fw_low_power_mode);
 	debugfs_create_u32("debug_output", 0644, dir, &msm_cvp_debug_out);
+	debugfs_create_u32("minidump_enable", 0644, dir, &msm_cvp_minidump_enable);
 	f = debugfs_create_bool("fw_coverage", 0644, dir, &msm_cvp_fw_coverage);
 	if (IS_ERR_OR_NULL(f))
 		goto failed_create_dir;

+ 1 - 0
msm/eva/msm_cvp_debug.h

@@ -65,6 +65,7 @@ extern bool msm_cvp_syscache_disable;
 extern bool msm_cvp_dsp_disable;
 extern bool msm_cvp_mmrm_enabled;
 extern bool msm_cvp_dcvs_disable;
+extern int msm_cvp_minidump_enable;
 
 #define dprintk(__level, __fmt, arg...)	\
 	do { \

+ 5 - 0
msm/eva/msm_cvp_dsp.c

@@ -10,6 +10,7 @@
 #include "msm_cvp_core.h"
 #include "msm_cvp.h"
 #include "cvp_hfi.h"
+#include "cvp_dump.h"
 
 struct cvp_dsp_apps gfa_cv;
 static int hlosVM[HLOS_VM_NUM] = {VMID_HLOS};
@@ -1915,6 +1916,10 @@ int cvp_dsp_device_init(void)
 	int i;
 	char name[CVP_FASTRPC_DRIVER_NAME_SIZE] = "qcom,fastcv0\0";
 
+    add_va_node_to_list(&head_node_dbg_struct,
+        &gfa_cv, sizeof(struct cvp_dsp_apps),
+        "cvp_dsp_apps-gfa_cv", false);
+
 	mutex_init(&me->tx_lock);
 	mutex_init(&me->rx_lock);
 	me->state = DSP_INVALID;

+ 0 - 1
msm/eva/msm_cvp_internal.h

@@ -26,7 +26,6 @@
 #include <synx_api.h>
 
 #define MAX_SUPPORTED_INSTANCES 16
-#define MAX_NAME_LENGTH 64
 #define MAX_DEBUGFS_NAME 50
 #define MAX_DSP_INIT_ATTEMPTS 16
 #define FENCE_WAIT_SIGNAL_TIMEOUT 100