Browse Source

msm-mmrm: add para virtualization FrontEnd driver

Add para virtualization FrontEnd driver.

Change-Id: I3747e427aee3ebf3d18ac622fe3e9730ed34d217
Mark Bao 3 years ago
parent
commit
b4df6ac473

+ 7 - 0
Kbuild

@@ -12,4 +12,11 @@ LINUXINCLUDE += -I$(MMRM_ROOT)/vm/common/inc/
 obj-m += vm/be/
 endif
 
+else
+
+LINUXINCLUDE += -I$(MMRM_ROOT)/vm/common/inc/
+
+obj-m += vm/fe/
+obj-y += vm/fe/vm_test/
+
 endif

+ 15 - 0
vm/fe/Kbuild

@@ -0,0 +1,15 @@
+
+ifeq ($(CONFIG_MSM_MMRM_VM), y)
+obj-m += mmrm_vm_fe.o
+
+ifeq ($(CONFIG_ARCH_WAIPIO), y)
+ifeq ($(CONFIG_ARCH_QTI_VM), y)
+	mmrm_vm_fe-objs := \
+	src/mmrm_vm_fe_main.o \
+	src/mmrm_vm_fe_frontend.o \
+	src/mmrm_vm_fe_msgq.o \
+	src/mmrm_vm_fe_api.o \
+	../common/src/mmrm_vm_debug.o
+endif
+endif
+endif

+ 16 - 0
vm/fe/src/Makefile.am

@@ -0,0 +1,16 @@
+KBUILD_OPTIONS+= MMRM_ROOT=$(KERNEL_SRC)/$(M)
+
+all: modules
+
+modules:
+	$(MAKE) -C $(KERNEL_SRC) M=$(M) modules $(KBUILD_OPTIONS)
+
+modules_install:
+	$(MAKE) INSTALL_MOD_STRIP=1 -C $(KERNEL_SRC) M=$(M) modules_install
+
+%:
+	$(MAKE) -C $(KERNEL_SRC) M=$(M) $@ $(KBUILD_OPTIONS)
+
+clean:
+	rm -f *.o *.ko *.mod.c *.mod.o *~ .*.cmd Module.symvers
+	rm -rf .tmp_versions

+ 76 - 0
vm/fe/src/mmrm_vm_fe.h

@@ -0,0 +1,76 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#ifndef __MMRM_VM_FE_H__
+#define __MMRM_VM_FE_H__
+
+#include <mmrm_vm_msgq.h>
+#include <mmrm_vm_interface.h>
+
+struct mmrm_vm_fe_clk_src_info {
+	struct mmrm_clk_client_desc client_desc;
+	int flags;
+};
+
+
+struct mmrm_vm_fe_clk_src_set {
+	struct mmrm_vm_fe_clk_src_info *clk_src_tbl;
+	u32 count;
+};
+
+struct mmrm_vm_fe_priv {
+	struct device *dev;
+
+	struct mmrm_client *client_tbl;
+
+	struct list_head mmrm_work_list;
+	struct mutex work_list_lock;
+
+	struct mmrm_vm_fe_clk_src_set clk_src_set;
+	struct mutex msg_send_lock;
+	int  seq_no;
+};
+
+/*
+ * mmrm_vm_fe_recv_cb -- FE message receiving thread call this function
+ *                       for transfer receiving packet to FE
+ * @mmrm_vm: specific device driver info
+ * @data: message pointer
+ * @size: message size
+ */
+void mmrm_vm_fe_recv_cb(struct mmrm_vm_driver_data *mmrm_vm, void *data, size_t size);
+
+/*
+ * mmrm_vm_fe_request_send -- FE send mmrm request message
+ * @mmrm_vm: device data, includes message handle
+ * @msg_pkt: request message pointer
+ * @msg_size: message size
+ */
+int mmrm_vm_fe_request_send(struct mmrm_vm_driver_data *mmrm_vm,
+	struct mmrm_vm_request_msg_pkt *msg_pkt, size_t msg_size);
+
+/*
+ * get_client_id_2_handle -- get handle from client ID
+ * @client_id: client ID
+ */
+struct mmrm_client *mmrm_vm_fe_get_client(u32 client_id);
+
+/*
+ * load_clk_resource_info -- get clk resource info from DT
+ * @drv_priv: device data
+ */
+int mmrm_vm_fe_load_clk_rsrc(struct mmrm_vm_driver_data *drv_priv);
+
+/*
+ * mmrm_vm_fe_recv -- process received response info
+ * @mmrm_vm: device data
+ * @data: received response info buffer
+ * @size: message size
+ */
+void mmrm_vm_fe_recv(struct mmrm_vm_driver_data *mmrm_vm, void *data, size_t size);
+
+#endif /* __MMRM_VM_FE_H__ */
+
+

+ 191 - 0
vm/fe/src/mmrm_vm_fe_api.c

@@ -0,0 +1,191 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#include <linux/gunyah/gh_msgq.h>
+#include <linux/gunyah/gh_rm_drv.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/of.h>
+#include <linux/sysfs.h>
+#include <linux/pm.h>
+#include <linux/suspend.h>
+#include <linux/delay.h>
+#include <linux/slab.h>
+
+#include "mmrm_vm_fe.h"
+#include "mmrm_vm_interface.h"
+#include "mmrm_vm_msgq.h"
+
+#define get_client_handle_2_id(client) (client->client_uid)
+
+extern struct mmrm_vm_driver_data *drv_vm_fe;
+
+#define MAX_TIMEOUT_MS 300
+
+int mmrm_fe_append_work_list(struct mmrm_vm_msg_q *msg_q, int msg_sz)
+{
+	struct mmrm_vm_request_msg_pkt *msg_pkt = msg_q->m_req;
+	struct mmrm_vm_fe_priv *fe_data = drv_vm_fe->vm_pvt_data;
+	unsigned long waited_time_ms;
+
+	init_completion(&msg_q->complete);
+	mutex_lock(&fe_data->work_list_lock);
+	list_add_tail(&msg_q->link, &fe_data->mmrm_work_list);
+	mutex_unlock(&fe_data->work_list_lock);
+
+	mutex_lock(&fe_data->msg_send_lock);
+	msg_pkt->msg.hd.seq_no = fe_data->seq_no++;
+	mutex_unlock(&fe_data->msg_send_lock);
+
+	mmrm_vm_fe_request_send(drv_vm_fe, msg_pkt, msg_sz);
+
+	waited_time_ms = wait_for_completion_timeout(&msg_q->complete,
+		msecs_to_jiffies(MAX_TIMEOUT_MS));
+	if (waited_time_ms >= MAX_TIMEOUT_MS)
+		return -1;
+	return 0;
+}
+
+struct mmrm_client *mmrm_client_register(struct mmrm_client_desc *desc)
+{
+	struct mmrm_vm_request_msg_pkt msg;
+	struct mmrm_vm_response_msg_pkt resp_pkt;
+	struct mmrm_vm_api_request_msg *api_msg = &msg.msg;
+	struct mmrm_vm_register_request *reg_data = &api_msg->data.reg;
+	size_t msg_size = sizeof(api_msg->hd) + sizeof(*reg_data);
+	int rc = 0;
+
+	struct mmrm_vm_msg_q msg_q;
+
+	api_msg->hd.cmd_id = MMRM_VM_REQUEST_REGISTER;
+	reg_data->client_type = desc->client_type;
+	reg_data->priority = desc->priority;
+	memcpy(&reg_data->desc, &desc->client_info.desc, sizeof(reg_data->desc));
+
+	msg_q.m_req = &msg;
+	msg_q.m_resp = &resp_pkt;
+	rc = mmrm_fe_append_work_list(&msg_q, msg_size);
+	if (rc != 0)
+		return NULL;
+
+	return mmrm_vm_fe_get_client(resp_pkt.msg.data.reg.client_id);
+}
+EXPORT_SYMBOL(mmrm_client_register);
+
+int mmrm_client_deregister(struct mmrm_client *client)
+{
+	int rc = 0;
+	struct mmrm_vm_request_msg_pkt msg;
+	struct mmrm_vm_response_msg_pkt resp_pkt;
+	struct mmrm_vm_api_request_msg *api_msg = &msg.msg;
+	struct mmrm_vm_deregister_request *reg_data = &api_msg->data.dereg;
+	size_t msg_size = sizeof(api_msg->hd) + sizeof(*reg_data);
+
+	struct mmrm_vm_msg_q msg_q;
+
+	api_msg->hd.cmd_id = MMRM_VM_REQUEST_DEREGISTER;
+	reg_data->client_id = get_client_handle_2_id(client);
+
+	msg_q.m_req = &msg;
+	msg_q.m_resp = &resp_pkt;
+
+	rc = mmrm_fe_append_work_list(&msg_q, msg_size);
+	if (rc != 0)
+		return rc;
+
+	rc = resp_pkt.msg.data.dereg.ret_code;
+	return rc;
+}
+EXPORT_SYMBOL(mmrm_client_deregister);
+
+int mmrm_client_set_value(struct mmrm_client *client,
+	struct mmrm_client_data *client_data, unsigned long val)
+{
+	int rc = 0;
+	struct mmrm_vm_request_msg_pkt msg;
+	struct mmrm_vm_response_msg_pkt resp_pkt;
+	struct mmrm_vm_api_request_msg *api_msg = &msg.msg;
+	struct mmrm_vm_setvalue_request *reg_data = &api_msg->data.setval;
+	size_t msg_size = sizeof(api_msg->hd) + sizeof(*reg_data);
+
+	struct mmrm_vm_msg_q msg_q;
+
+	api_msg->hd.cmd_id = MMRM_VM_REQUEST_SETVALUE;
+	reg_data->client_id = get_client_handle_2_id(client);
+	reg_data->data.flags = client_data->flags;
+	reg_data->data.num_hw_blocks = client_data->num_hw_blocks;
+	reg_data->val = val;
+
+	msg_q.m_req = &msg;
+	msg_q.m_resp = &resp_pkt;
+
+	rc = mmrm_fe_append_work_list(&msg_q, msg_size);
+	if (rc != 0)
+		return rc;
+
+	rc = resp_pkt.msg.data.setval.val;
+
+	return rc;
+}
+EXPORT_SYMBOL(mmrm_client_set_value);
+
+int mmrm_client_set_value_in_range(struct mmrm_client *client,
+	struct mmrm_client_data *client_data,
+	struct mmrm_client_res_value *val)
+{
+	int rc = 0;
+	struct mmrm_vm_request_msg_pkt msg;
+	struct mmrm_vm_response_msg_pkt resp_pkt;
+	struct mmrm_vm_api_request_msg *api_msg = &msg.msg;
+	struct mmrm_vm_setvalue_inrange_request *reg_data = &api_msg->data.setval_range;
+	size_t msg_size = sizeof(api_msg->hd) + sizeof(*reg_data);
+
+	struct mmrm_vm_msg_q msg_q;
+
+	api_msg->hd.cmd_id = MMRM_VM_REQUEST_SETVALUE_INRANGE;
+	reg_data->client_id = get_client_handle_2_id(client);
+	reg_data->data.flags = client_data->flags;
+	reg_data->data.num_hw_blocks = client_data->num_hw_blocks;
+	reg_data->val.cur = val->cur;
+	reg_data->val.max = val->max;
+	reg_data->val.min = val->min;
+
+	msg_q.m_req = &msg;
+	msg_q.m_resp = &resp_pkt;
+
+	rc = mmrm_fe_append_work_list(&msg_q, msg_size);
+	return rc;
+}
+EXPORT_SYMBOL(mmrm_client_set_value_in_range);
+
+
+int mmrm_client_get_value(struct mmrm_client *client,
+	struct mmrm_client_res_value *val)
+{
+	int rc = 0;
+	struct mmrm_vm_request_msg_pkt msg;
+	struct mmrm_vm_response_msg_pkt resp_pkt;
+	struct mmrm_vm_api_request_msg *api_msg = &msg.msg;
+	struct mmrm_vm_getvalue_request *reg_data = &api_msg->data.getval;
+	size_t msg_size = sizeof(api_msg->hd) + sizeof(*reg_data);
+
+	struct mmrm_vm_msg_q msg_q;
+
+	api_msg->hd.cmd_id = MMRM_VM_REQUEST_GETVALUE;
+	reg_data->client_id = get_client_handle_2_id(client);
+
+	msg_q.m_req = &msg;
+	msg_q.m_resp = &resp_pkt;
+
+	rc = mmrm_fe_append_work_list(&msg_q, msg_size);
+
+	if (rc == 0) {
+		val->cur = resp_pkt.msg.data.getval.val.cur;
+		val->max = resp_pkt.msg.data.getval.val.max;
+		val->min = resp_pkt.msg.data.getval.val.min;
+	}
+	return rc;
+}
+EXPORT_SYMBOL(mmrm_client_get_value);

+ 188 - 0
vm/fe/src/mmrm_vm_fe_frontend.c

@@ -0,0 +1,188 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#include <linux/of.h>
+
+#include "mmrm_vm_fe.h"
+#include "mmrm_vm_interface.h"
+#include "mmrm_vm_msgq.h"
+#include "mmrm_vm_debug.h"
+
+extern struct mmrm_vm_driver_data *drv_vm_fe;
+
+void mmrm_vm_fe_recv(struct mmrm_vm_driver_data *mmrm_vm, void *data, size_t size)
+{
+	struct mmrm_vm_api_response_msg *msg = data;
+	struct mmrm_vm_msg_q *node, *temp;
+	int rc = -1;
+	u64 kt2;
+	struct mmrm_vm_fe_priv *fe_data = mmrm_vm->vm_pvt_data;
+
+	mutex_lock(&fe_data->work_list_lock);
+	list_for_each_entry_safe(node, temp, &fe_data->mmrm_work_list, link) {
+		if (msg->hd.seq_no == node->m_req->msg.hd.seq_no) {
+			d_mpr_e("%s: seq no:%d\n", __func__, msg->hd.seq_no);
+			list_del(&node->link);
+			rc = 0;
+			break;
+		}
+	}
+	mutex_unlock(&fe_data->work_list_lock);
+	if (rc != 0)
+		return;
+
+	d_mpr_e("%s: cmd:%d\n", __func__, msg->hd.cmd_id);
+	switch (msg->hd.cmd_id) {
+	case	MMRM_VM_RESPONSE_REGISTER:
+		node->m_resp->msg.data.reg.client_id = msg->data.reg.client_id;
+		d_mpr_h("%s: client:%d\n", __func__, msg->data.reg.client_id);
+		break;
+	case	MMRM_VM_RESPONSE_SETVALUE:
+		node->m_resp->msg.data.setval.val = msg->data.setval.val;
+		break;
+	case	MMRM_VM_RESPONSE_SETVALUE_INRANGE:
+		node->m_resp->msg.data.setval_range.ret_code = msg->data.setval_range.ret_code;
+		break;
+	case	MMRM_VM_RESPONSE_GETVALUE:
+		node->m_resp->msg.data.getval.val = msg->data.getval.val;
+		break;
+	case	MMRM_VM_RESPONSE_DEREGISTER:
+		node->m_resp->msg.data.dereg.ret_code = msg->data.dereg.ret_code;
+		break;
+	default:
+		d_mpr_e("wrong response\n");
+		break;
+	};
+
+	complete(&node->complete);
+	kt2 = ktime_get_ns();
+}
+
+int mmrm_vm_fe_request_send(struct mmrm_vm_driver_data *mmrm_vm,
+		struct mmrm_vm_request_msg_pkt *msg_pkt, size_t msg_size)
+{
+	int  rc;
+
+	struct mmrm_vm_msg_hdr *hdr;
+	struct mmrm_vm_gh_msgq_info *pmsg_info = &mmrm_vm->msg_info;
+
+	hdr = (struct mmrm_vm_msg_hdr *)&msg_pkt->hdr;
+	hdr->version = MMRM_VM_VER_1;
+	hdr->type = MMRM_VM_TYPE_DATA;
+	hdr->flags = 0;
+	hdr->size = msg_size;
+
+	if (!pmsg_info->msgq_handle) {
+		d_mpr_e("Failed to send msg, invalid msgq handle\n");
+		return -EINVAL;
+	}
+
+	if (msg_size > GH_MSGQ_MAX_MSG_SIZE_BYTES) {
+		d_mpr_e("msg size unsupported for msgq: %ld > %d\n", msg_size,
+			GH_MSGQ_MAX_MSG_SIZE_BYTES);
+		return -E2BIG;
+	}
+
+	d_mpr_h("%s: handle=%p\n", __func__, pmsg_info->msgq_handle);
+	rc = gh_msgq_send(pmsg_info->msgq_handle, msg_pkt,
+			msg_size + sizeof(msg_pkt->hdr), GH_MSGQ_TX_PUSH);
+
+	d_mpr_h("%s: handle=%p result:%d\n", __func__, pmsg_info->msgq_handle, rc);
+
+	return rc;
+}
+
+int mmrm_vm_fe_load_clk_rsrc(struct mmrm_vm_driver_data *mmrm_vm)
+{
+	int rc = 0, num_clk_src = 0;
+	int c = 0, size_clk_src = 0, entry_offset = 5;
+
+	struct platform_device *pdev;
+	struct mmrm_vm_fe_clk_src_info *pclk_src;
+	struct mmrm_vm_fe_priv *fe_data = mmrm_vm->vm_pvt_data;
+
+	pdev = container_of(fe_data->dev, struct platform_device, dev);
+
+	of_find_property(pdev->dev.of_node, "mmrm-client-info", &size_clk_src);
+	if ((size_clk_src < sizeof(*fe_data->clk_src_set.clk_src_tbl)) ||
+		(size_clk_src % sizeof(*fe_data->clk_src_set.clk_src_tbl))) {
+		d_mpr_e("%s: invalid size(%d) of clk src table\n",
+			__func__, size_clk_src);
+		fe_data->clk_src_set.count = 0;
+		goto err_load_clk_src_tbl;
+	}
+
+	fe_data->clk_src_set.clk_src_tbl = devm_kzalloc(&pdev->dev,
+			size_clk_src, GFP_KERNEL);
+
+	if (!fe_data->clk_src_set.clk_src_tbl) {
+		d_mpr_e("%s: failed to allocate memory for clk_src_tbl\n",
+				__func__);
+		rc = -ENOMEM;
+		goto err_load_clk_src_tbl;
+	}
+	num_clk_src = size_clk_src / sizeof(struct mmrm_clk_client_desc);
+	fe_data->clk_src_set.count = num_clk_src;
+
+	d_mpr_h("%s: found %d clk_srcs size %d\n",
+			__func__, num_clk_src, size_clk_src);
+
+	for (c = 0; c < num_clk_src; c++) {
+		pclk_src = &fe_data->clk_src_set.clk_src_tbl[c];
+
+		of_property_read_u32_index(pdev->dev.of_node,
+			"mmrm-client-info", (c*entry_offset),
+			&pclk_src->client_desc.client_domain);
+		of_property_read_u32_index(pdev->dev.of_node,
+			"mmrm-client-info", (c*entry_offset+1),
+			&pclk_src->client_desc.client_id);
+	}
+
+	return 0;
+
+err_load_clk_src_tbl:
+	return rc;
+}
+
+void init_lookup_table(struct mmrm_vm_driver_data *mmrm_vm)
+{
+	int  i;
+	struct platform_device *pdev;
+	struct mmrm_vm_fe_priv *fe_data = mmrm_vm->vm_pvt_data;
+
+	pdev = container_of(fe_data->dev, struct platform_device, dev);
+	mmrm_vm_fe_load_clk_rsrc(mmrm_vm);
+
+	fe_data->client_tbl = devm_kzalloc(&pdev->dev,
+			fe_data->clk_src_set.count * sizeof(struct mmrm_client), GFP_KERNEL);
+	if (!fe_data->client_tbl)
+		return;
+
+	for (i = 0; i < fe_data->clk_src_set.count; i++) {
+		fe_data->client_tbl[i].client_type = 0;
+		fe_data->client_tbl[i].client_uid = 0;
+	}
+}
+
+struct mmrm_client *mmrm_vm_fe_get_client(u32 client_id)
+{
+	int i;
+	struct mmrm_client *ptr;
+	struct mmrm_vm_fe_priv *fe_data = drv_vm_fe->vm_pvt_data;
+
+	for (i = 0, ptr = fe_data->client_tbl; i < fe_data->clk_src_set.count; i++, ptr++) {
+		if (ptr->client_uid == client_id+1)
+			return ptr;
+	}
+
+	for (i = 0, ptr = fe_data->client_tbl; i < fe_data->clk_src_set.count; i++, ptr++) {
+		if (ptr->client_uid == 0) {
+			ptr->client_uid = client_id + 1;
+			return ptr;
+		}
+	}
+	return NULL;
+}
+

+ 140 - 0
vm/fe/src/mmrm_vm_fe_main.c

@@ -0,0 +1,140 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#include <linux/types.h>
+#include <linux/kthread.h>
+
+#include <linux/module.h>
+#include <linux/of.h>
+
+#include "mmrm_vm_fe.h"
+#include "mmrm_vm_msgq.h"
+#include "mmrm_vm_interface.h"
+#include "mmrm_vm_debug.h"
+
+struct mmrm_vm_driver_data *drv_vm_fe = (void *) -EPROBE_DEFER;
+
+ssize_t msgq_send_trigger_store(struct device *dev, struct device_attribute *attr,
+	const char *buf, size_t count)
+{
+	struct mmrm_vm_driver_data *priv = dev->driver_data;
+	char send_buf[64] = "test msg";
+	char recv_buf[64];
+	int ret;
+	bool flag;
+	size_t recv_size;
+
+	ret = strtobool(buf, &flag);
+	if (ret) {
+		dev_err(dev, "invalid user input\n");
+		return -1;
+	}
+	if (flag) {
+		ret = gh_msgq_send(priv->msg_info.msgq_handle, send_buf, sizeof(send_buf), 0);
+		if (ret)
+			dev_err(dev, "send msgq failed\n");
+		else
+			dev_info(dev, "send msgq success\n");
+		ret = gh_msgq_recv(priv->msg_info.msgq_handle, recv_buf, sizeof(recv_buf),
+			&recv_size, 0);
+		if (ret)
+			dev_err(dev, "recv msgq failed ret = %d\n", ret);
+		else
+			dev_info(dev, "recv msg: %s\n", recv_buf);
+	}
+	return ret ? ret : count;
+}
+
+static DEVICE_ATTR_WO(msgq_send_trigger);
+
+static struct attribute *mmrm_vm_fe_fs_attrs[] = {
+	&dev_attr_msgq_send_trigger.attr,
+	NULL,
+};
+
+static struct attribute_group mmrm_vm_fe_fs_attrs_group = {
+	.attrs = mmrm_vm_fe_fs_attrs,
+};
+
+static int mmrm_vm_fe_driver_probe(struct platform_device *pdev)
+{
+	struct device *dev = &pdev->dev;
+	struct mmrm_vm_fe_priv *fe_priv_data;
+	int rc = 0;
+
+	drv_vm_fe = devm_kzalloc(dev, sizeof(*drv_vm_fe), GFP_KERNEL);
+	if (!drv_vm_fe)
+		return -ENOMEM;
+
+	fe_priv_data = devm_kzalloc(dev, sizeof(*fe_priv_data), GFP_KERNEL);
+	if (!fe_priv_data) {
+		rc = -ENOMEM;
+		goto err_priv_data;
+	}
+
+	drv_vm_fe->vm_pvt_data = fe_priv_data;
+	fe_priv_data->seq_no = 0;
+	fe_priv_data->dev = dev;
+
+	mutex_init(&fe_priv_data->msg_send_lock);
+	dev_set_drvdata(&pdev->dev, drv_vm_fe);
+
+	INIT_LIST_HEAD(&fe_priv_data->mmrm_work_list);
+	mutex_init(&fe_priv_data->work_list_lock);
+
+	mmrm_vm_fe_load_clk_rsrc(drv_vm_fe);
+	mmrm_vm_msgq_init(drv_vm_fe);
+
+	if (sysfs_create_group(&pdev->dev.kobj, &mmrm_vm_fe_fs_attrs_group)) {
+		d_mpr_e("%s: failed to create sysfs\n",
+			__func__);
+	}
+
+	dev_err(dev, "msgq probe success");
+err_priv_data:
+
+	return rc;
+}
+
+static int mmrm_vm_fe_driver_remove(struct platform_device *pdev)
+{
+	struct mmrm_vm_driver_data *mmrm_vm = dev_get_drvdata(&pdev->dev);
+
+	mmrm_vm_msgq_deinit(mmrm_vm);
+	return 0;
+}
+
+static const struct of_device_id mmrm_vm_fe_match[] = {
+	{ .compatible = "qcom,mmrm-vm-fe" },
+	{},
+};
+MODULE_DEVICE_TABLE(of, mmrm_vm_fe_match);
+
+static struct platform_driver mmrm_vm_fe_driver = {
+	.probe = mmrm_vm_fe_driver_probe,
+	.driver = {
+		.name = "mmrm-vm-fe",
+		.of_match_table = mmrm_vm_fe_match,
+	},
+	.remove = mmrm_vm_fe_driver_remove,
+};
+
+static int __init mmrm_vm_fe_module_init(void)
+{
+	d_mpr_e("%s:  init start\n", __func__);
+
+	return platform_driver_register(&mmrm_vm_fe_driver);
+}
+subsys_initcall(mmrm_vm_fe_module_init);
+
+static void __exit mmrm_vm_fe_module_exit(void)
+{
+	platform_driver_unregister(&mmrm_vm_fe_driver);
+}
+module_exit(mmrm_vm_fe_module_exit);
+
+MODULE_SOFTDEP("pre: gunyah_transport");
+MODULE_DESCRIPTION("Qualcomm Technologies, Inc. Test MSGQ Driver");
+MODULE_LICENSE("GPL v2");

+ 151 - 0
vm/fe/src/mmrm_vm_fe_msgq.c

@@ -0,0 +1,151 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#include <linux/gunyah/gh_msgq.h>
+#include <linux/kthread.h>
+#include <linux/slab.h>
+#include <linux/list.h>
+#include <linux/of.h>
+
+#include <mmrm_vm_interface.h>
+#include "mmrm_vm_fe.h"
+#include "mmrm_vm_debug.h"
+
+void mmrm_vm_fe_msgq_msg_handler(struct work_struct *work)
+{
+	struct mmrm_vm_thread_info *pthread_info =
+		container_of(work, struct mmrm_vm_thread_info, msgq_work.work);
+	struct mmrm_vm_driver_data *mmrm_vm =
+		container_of(pthread_info, struct mmrm_vm_driver_data, thread_info);
+	struct list_head head;
+	struct mmrm_vm_msg *next_msg;
+	struct mmrm_vm_msg *msg;
+
+	mutex_lock(&pthread_info->list_lock);
+	list_replace_init(&pthread_info->queued_msg, &head);
+	mutex_unlock(&pthread_info->list_lock);
+
+	list_for_each_entry_safe(msg, next_msg, &head, link) {
+		mmrm_vm_fe_recv(mmrm_vm, msg->msg_buf, msg->msg_size);
+		list_del(&msg->link);
+		kfree(msg);
+	}
+}
+
+int mmrm_vm_msgq_listener(void *data)
+{
+	struct mmrm_vm_driver_data *mmrm_vm = (struct mmrm_vm_driver_data *)data;
+
+	struct mmrm_vm_gh_msgq_info *pmsg_info = &mmrm_vm->msg_info;
+	struct mmrm_vm_thread_info *pthread_info = &mmrm_vm->thread_info;
+	size_t size;
+	int ret = 0;
+	struct mmrm_vm_msg *msg;
+
+	while (true) {
+		msg = kzalloc(sizeof(*msg), GFP_KERNEL);
+		if (!msg)
+			return -ENOMEM;
+
+		ret = gh_msgq_recv(pmsg_info->msgq_handle, msg->msg_buf,
+				GH_MSGQ_MAX_MSG_SIZE_BYTES, &size, 0);
+
+		if (ret < 0) {
+			kfree(msg);
+			d_mpr_e("gh_msgq_recv failed, rc=%d\n", ret);
+			return -EINVAL;
+		}
+
+		msg->msg_size = size;
+		list_add_tail(&pthread_info->queued_msg, &msg->link);
+		mutex_unlock(&pthread_info->list_lock);
+
+		queue_delayed_work(pthread_info->msg_workq,
+				 &pthread_info->msgq_work, msecs_to_jiffies(0));
+	}
+	return 0;
+}
+
+int mmrm_vm_msgq_send(struct mmrm_vm_driver_data *mmrm_vm, void *msg, size_t msg_size)
+{
+	int  rc;
+	struct mmrm_vm_gh_msgq_info *pmsg_info;
+
+	if (IS_ERR_OR_NULL(mmrm_vm))
+		return -EINVAL;
+
+	pmsg_info = &mmrm_vm->msg_info;
+
+	if (!pmsg_info->msgq_handle) {
+		d_mpr_e("Failed to send msg, invalid msgq handle\n");
+		return -EINVAL;
+	}
+
+	if (msg_size > GH_MSGQ_MAX_MSG_SIZE_BYTES) {
+		d_mpr_e("msg size unsupported for msgq: %ld > %d\n", msg_size,
+			GH_MSGQ_MAX_MSG_SIZE_BYTES);
+		return -E2BIG;
+	}
+
+	rc = gh_msgq_send(pmsg_info->msgq_handle, msg, msg_size, 0);
+	d_mpr_e("%s: handle=%p result:%d\n", __func__, pmsg_info->msgq_handle, rc);
+
+	return rc;
+}
+
+int mmrm_vm_msgq_init(struct mmrm_vm_driver_data *mmrm_vm)
+{
+	int rc = 0;
+	struct mmrm_vm_gh_msgq_info *pmsg_info;
+	struct mmrm_vm_thread_info *pthread_info;
+
+	if (IS_ERR_OR_NULL(mmrm_vm))
+		return -EINVAL;
+
+	pmsg_info = &mmrm_vm->msg_info;
+	pthread_info = &mmrm_vm->thread_info;
+
+
+	pthread_info->msg_workq = create_singlethread_workqueue("vm_be_message_workq");
+	INIT_DELAYED_WORK(&pthread_info->msgq_work, mmrm_vm_fe_msgq_msg_handler);
+
+	pmsg_info->msgq_label = GH_MSGQ_LABEL_MMRM;
+	pmsg_info->msgq_handle = gh_msgq_register(pmsg_info->msgq_label);
+	d_mpr_h("%s: label:%d handle:%p\n", __func__,
+		pmsg_info->msgq_label, pmsg_info->msgq_handle);
+
+	if (IS_ERR(pmsg_info->msgq_handle)) {
+		rc = PTR_ERR(pmsg_info->msgq_handle);
+		d_mpr_e("msgq register failed rc:%d\n", rc);
+		return rc;
+	}
+
+	pthread_info->msgq_listener_thread =
+			kthread_create(mmrm_vm_msgq_listener, mmrm_vm, "mmrm_vm_fe");
+	wake_up_process(pthread_info->msgq_listener_thread);
+
+	d_mpr_e("%s:  msgq_handle=%p\n", __func__, pmsg_info->msgq_handle);
+
+	return rc;
+}
+
+int mmrm_vm_msgq_deinit(struct mmrm_vm_driver_data *mmrm_vm)
+{
+	struct mmrm_vm_gh_msgq_info *pmsg_info;
+	struct mmrm_vm_thread_info *pthread_info;
+
+	if (IS_ERR_OR_NULL(mmrm_vm))
+		return -EINVAL;
+
+	pmsg_info = &mmrm_vm->msg_info;
+	pthread_info = &mmrm_vm->thread_info;
+
+	if (pthread_info->msgq_listener_thread)
+		kthread_stop(pthread_info->msgq_listener_thread);
+
+	if (pmsg_info->msgq_handle)
+		gh_msgq_unregister(pmsg_info->msgq_handle);
+	return 0;
+}