Ver Fonte

Add 'qcom/opensource/mmrm-driver/' from commit '35211a9255b69d52ddc2b82dc8b1dd2840b98868'

git-subtree-dir: qcom/opensource/mmrm-driver
git-subtree-mainline: f9b254670f97565bd16e857ec349fe645c947685
git-subtree-split: 35211a9255b69d52ddc2b82dc8b1dd2840b98868
Change-Id:
repo: https://git.codelinaro.org/clo/la/platform/vendor/opensource/mmrm-driver
tag: VIDEO.LA.4.0.r2-06100-lanai.0
David Wronek há 5 meses atrás
pai
commit
99d06628db
45 ficheiros alterados com 7155 adições e 0 exclusões
  1. 76 0
      qcom/opensource/mmrm-driver/Android.mk
  2. 30 0
      qcom/opensource/mmrm-driver/BUILD.bazel
  3. 22 0
      qcom/opensource/mmrm-driver/Kbuild
  4. 16 0
      qcom/opensource/mmrm-driver/Makefile
  5. 4 0
      qcom/opensource/mmrm-driver/config/waipiommrm.conf
  6. 9 0
      qcom/opensource/mmrm-driver/config/waipiommrmconf.h
  7. 11 0
      qcom/opensource/mmrm-driver/driver/Kbuild
  8. 123 0
      qcom/opensource/mmrm-driver/driver/src/mmrm_clk_rsrc_mgr.c
  9. 195 0
      qcom/opensource/mmrm-driver/driver/src/mmrm_clk_rsrc_mgr.h
  10. 47 0
      qcom/opensource/mmrm-driver/driver/src/mmrm_clk_rsrc_mgr_cxipeak.c
  11. 1353 0
      qcom/opensource/mmrm-driver/driver/src/mmrm_clk_rsrc_mgr_sw.c
  12. 77 0
      qcom/opensource/mmrm-driver/driver/src/mmrm_debug.c
  13. 72 0
      qcom/opensource/mmrm-driver/driver/src/mmrm_debug.h
  14. 68 0
      qcom/opensource/mmrm-driver/driver/src/mmrm_fixedpoint.h
  15. 162 0
      qcom/opensource/mmrm-driver/driver/src/mmrm_internal.c
  16. 60 0
      qcom/opensource/mmrm-driver/driver/src/mmrm_internal.h
  17. 286 0
      qcom/opensource/mmrm-driver/driver/src/mmrm_res_parse.c
  18. 47 0
      qcom/opensource/mmrm-driver/driver/src/mmrm_resources.h
  19. 617 0
      qcom/opensource/mmrm-driver/driver/src/msm_mmrm.c
  20. 19 0
      qcom/opensource/mmrm-driver/mmrm_kernel_board.mk
  21. 12 0
      qcom/opensource/mmrm-driver/mmrm_kernel_product.mk
  22. 22 0
      qcom/opensource/mmrm-driver/mmrm_modules.bzl
  23. 83 0
      qcom/opensource/mmrm-driver/mmrm_modules_build.bzl
  24. 11 0
      qcom/opensource/mmrm-driver/target.bzl
  25. 9 0
      qcom/opensource/mmrm-driver/vm/be/Kbuild
  26. 32 0
      qcom/opensource/mmrm-driver/vm/be/src/mmrm_vm_be.h
  27. 9 0
      qcom/opensource/mmrm-driver/vm/be/src/mmrm_vm_be.rc
  28. 292 0
      qcom/opensource/mmrm-driver/vm/be/src/mmrm_vm_be_dispatch.c
  29. 112 0
      qcom/opensource/mmrm-driver/vm/be/src/mmrm_vm_be_main.c
  30. 317 0
      qcom/opensource/mmrm-driver/vm/be/src/mmrm_vm_be_msgq.c
  31. 63 0
      qcom/opensource/mmrm-driver/vm/common/inc/mmrm_vm_debug.h
  32. 246 0
      qcom/opensource/mmrm-driver/vm/common/inc/mmrm_vm_interface.h
  33. 104 0
      qcom/opensource/mmrm-driver/vm/common/inc/mmrm_vm_msgq.h
  34. 44 0
      qcom/opensource/mmrm-driver/vm/common/src/mmrm_vm_debug.c
  35. 15 0
      qcom/opensource/mmrm-driver/vm/fe/Kbuild
  36. 16 0
      qcom/opensource/mmrm-driver/vm/fe/src/Makefile.am
  37. 121 0
      qcom/opensource/mmrm-driver/vm/fe/src/mmrm_vm_fe.h
  38. 320 0
      qcom/opensource/mmrm-driver/vm/fe/src/mmrm_vm_fe_api.c
  39. 263 0
      qcom/opensource/mmrm-driver/vm/fe/src/mmrm_vm_fe_frontend.c
  40. 224 0
      qcom/opensource/mmrm-driver/vm/fe/src/mmrm_vm_fe_main.c
  41. 182 0
      qcom/opensource/mmrm-driver/vm/fe/src/mmrm_vm_fe_msgq.c
  42. 9 0
      qcom/opensource/mmrm-driver/vm/fe/vm_test/Kbuild
  43. 892 0
      qcom/opensource/mmrm-driver/vm/fe/vm_test/mmrm_vm_fe_test_internal.c
  44. 66 0
      qcom/opensource/mmrm-driver/vm/fe/vm_test/mmrm_vm_fe_test_internal.h
  45. 397 0
      qcom/opensource/mmrm-driver/vm/fe/vm_test/mmrm_vm_fe_test_main.c

+ 76 - 0
qcom/opensource/mmrm-driver/Android.mk

@@ -0,0 +1,76 @@
+TARGET_MMRM_ENABLE := false
+ifeq ($(TARGET_KERNEL_DLKM_DISABLE),true)
+	ifeq ($(TARGET_KERNEL_DLKM_MMRM_OVERRIDE),true)
+		TARGET_MMRM_ENABLE := true
+	endif
+else
+TARGET_MMRM_ENABLE := true
+endif
+
+ifeq ($(TARGET_MMRM_ENABLE),true)
+MMRM_BLD_DIR := $(shell pwd)/vendor/qcom/opensource/mmrm-driver
+
+# Build msm-mmrm.ko
+###########################################################
+# This is set once per LOCAL_PATH, not per (kernel) module
+KBUILD_OPTIONS := MMRM_ROOT=$(MMRM_BLD_DIR)
+KBUILD_OPTIONS += BOARD_PLATFORM=$(TARGET_BOARD_PLATFORM)
+###########################################################
+
+DLKM_DIR   := device/qcom/common/dlkm
+
+LOCAL_PATH := $(call my-dir)
+LOCAL_MODULE_DDK_BUILD    := true
+LOCAL_MODULE_KO_DIRS      := driver/msm-mmrm.ko
+
+include $(CLEAR_VARS)
+# For incremental compilation
+LOCAL_SRC_FILES           := $(wildcard $(LOCAL_PATH)/**/*) $(wildcard $(LOCAL_PATH)/*)
+LOCAL_MODULE              := mmrm-module-symvers
+LOCAL_MODULE_STEM         := Module.symvers
+LOCAL_MODULE_KBUILD_NAME  := Module.symvers
+LOCAL_MODULE_PATH         := $(KERNEL_MODULES_OUT)
+# Include kp_module.ko in the /vendor/lib/modules (vendor.img)
+# BOARD_VENDOR_KERNEL_MODULES += $(LOCAL_MODULE_PATH)/$(LOCAL_MODULE)
+include $(DLKM_DIR)/Build_external_kernelmodule.mk
+
+include $(CLEAR_VARS)
+# For incremental compilation
+LOCAL_SRC_FILES           := $(wildcard $(LOCAL_PATH)/**/*) $(wildcard $(LOCAL_PATH)/*)
+LOCAL_MODULE              := msm-mmrm.ko
+LOCAL_MODULE_KBUILD_NAME  := driver/msm-mmrm.ko
+LOCAL_MODULE_TAGS         := optional
+LOCAL_MODULE_DEBUG_ENABLE := true
+LOCAL_MODULE_PATH         := $(KERNEL_MODULES_OUT)
+# Include kp_module.ko in the /vendor/lib/modules (vendor.img)
+# BOARD_VENDOR_KERNEL_MODULES += $(LOCAL_MODULE_PATH)/$(LOCAL_MODULE)
+include $(DLKM_DIR)/Build_external_kernelmodule.mk
+
+# include $(CLEAR_VARS)
+# # For incremental compilation
+# LOCAL_SRC_FILES           := $(wildcard $(LOCAL_PATH)/**/*) $(wildcard $(LOCAL_PATH)/*)
+# LOCAL_MODULE              := mmrm_test_module.ko
+# LOCAL_MODULE_KBUILD_NAME  := test/mmrm_test_module.ko
+# LOCAL_MODULE_TAGS         := optional
+# LOCAL_MODULE_DEBUG_ENABLE := true
+# LOCAL_MODULE_PATH         := $(KERNEL_MODULES_OUT)
+# # Include kp_module.ko in the /vendor/lib/modules (vendor.img)
+# # BOARD_VENDOR_KERNEL_MODULES += $(LOCAL_MODULE_PATH)/$(LOCAL_MODULE)
+# include $(DLKM_DIR)/Build_external_kernelmodule.mk
+
+ifeq ($(CONFIG_MSM_MMRM_VM),y)
+	include $(CLEAR_VARS)
+	# For incremental compilation
+	LOCAL_SRC_FILES           := $(wildcard $(LOCAL_PATH)/**/*) $(wildcard $(LOCAL_PATH)/*)
+	LOCAL_MODULE              := mmrm_vm_be.ko
+	LOCAL_MODULE_KBUILD_NAME  := vm/be/mmrm_vm_be.ko
+	LOCAL_MODULE_TAGS         := optional
+	LOCAL_MODULE_DEBUG_ENABLE := true
+	LOCAL_MODULE_PATH         := $(KERNEL_MODULES_OUT)
+	LOCAL_INIT_RC             := vm/be/src/mmrm_vm_be.rc
+	LOCAL_C_INCLUDES          := vm/common/inc/
+	# Include kp_module.ko in the /vendor/lib/modules (vendor.img)
+	# BOARD_VENDOR_KERNEL_MODULES += $(LOCAL_MODULE_PATH)/$(LOCAL_MODULE)
+	include $(DLKM_DIR)/Build_external_kernelmodule.mk
+endif
+endif

+ 30 - 0
qcom/opensource/mmrm-driver/BUILD.bazel

@@ -0,0 +1,30 @@
+load("//build/kernel/kleaf:kernel.bzl", "ddk_headers")
+
+package(
+    default_visibility = [
+        "//visibility:public"],
+)
+
+ddk_headers(
+    name = "mmrm_drivers_configs",
+    hdrs  = glob([
+        "config/*.h",
+    ]),
+    includes = ["config"]
+)
+
+ddk_headers(
+    name = "driver_headers",
+    hdrs = glob([
+      "driver/src/*.h",
+    ]),
+    includes = ["driver/src"]
+)
+
+ddk_headers(
+    name = "mmrm_driver_headers",
+    hdrs = [":mmrm_drivers_configs", "driver_headers"]
+)
+
+load(":target.bzl", "define_pineapple")
+define_pineapple()

+ 22 - 0
qcom/opensource/mmrm-driver/Kbuild

@@ -0,0 +1,22 @@
+
+include $(MMRM_ROOT)/config/waipiommrm.conf
+LINUXINCLUDE += -include $(MMRM_ROOT)/config/waipiommrmconf.h
+
+ifneq ($(CONFIG_ARCH_QTI_VM), y)
+
+obj-m += driver/
+# obj-m += test/
+
+ifeq ($(CONFIG_MSM_MMRM_VM),y)
+LINUXINCLUDE += -I$(MMRM_ROOT)/vm/common/inc/
+obj-m += vm/be/
+endif
+
+else
+
+LINUXINCLUDE += -I$(MMRM_ROOT)/vm/common/inc/
+
+obj-m += vm/fe/
+obj-y += vm/fe/vm_test/
+
+endif

+ 16 - 0
qcom/opensource/mmrm-driver/Makefile

@@ -0,0 +1,16 @@
+KBUILD_OPTIONS+= MMRM_ROOT=$(KERNEL_SRC)/$(M)
+
+all: modules
+
+modules:
+	$(MAKE) -C $(KERNEL_SRC) M=$(M) modules $(KBUILD_OPTIONS)
+
+modules_install:
+	$(MAKE) INSTALL_MOD_STRIP=1 -C $(KERNEL_SRC) M=$(M) modules_install
+
+%:
+	$(MAKE) -C $(KERNEL_SRC) M=$(M) $@ $(KBUILD_OPTIONS)
+
+clean:
+	rm -f *.o *.ko *.mod.c *.mod.o *~ .*.cmd Module.symvers
+	rm -rf .tmp_versions

+ 4 - 0
qcom/opensource/mmrm-driver/config/waipiommrm.conf

@@ -0,0 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0-only
+# Copyright (c) 2020, The Linux Foundation. All rights reserved.
+export CONFIG_MSM_MMRM=y
+export CONFIG_MSM_MMRM_VM=n

+ 9 - 0
qcom/opensource/mmrm-driver/config/waipiommrmconf.h

@@ -0,0 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2020, The Linux Foundation. All rights reserved.
+ */
+
+#define CONFIG_MSM_MMRM 1
+
+// if activate mmrm para virtualizaion, uncomment line below
+//#define CONFIG_MSM_MMRM_VM 1

+ 11 - 0
qcom/opensource/mmrm-driver/driver/Kbuild

@@ -0,0 +1,11 @@
+ifdef CONFIG_MSM_MMRM
+LINUXINCLUDE += -I$(MMRM_ROOT)/driver/src
+
+obj-m += msm-mmrm.o
+msm-mmrm-objs := src/msm_mmrm.o \
+				src/mmrm_internal.o \
+				src/mmrm_res_parse.o \
+				src/mmrm_debug.o \
+				src/mmrm_clk_rsrc_mgr_sw.o \
+				src/mmrm_clk_rsrc_mgr.o
+endif

+ 123 - 0
qcom/opensource/mmrm-driver/driver/src/mmrm_clk_rsrc_mgr.c

@@ -0,0 +1,123 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2020-2021, The Linux Foundation. All rights reserved.
+ */
+
+#include "mmrm_clk_rsrc_mgr.h"
+#include "mmrm_debug.h"
+
+/* sw clk mgr ops */
+static struct mmrm_clk_mgr_ops sw_clk_mgr_ops = {
+	.init_clk_mgr = mmrm_init_sw_clk_mgr,
+	.destroy_clk_mgr = mmrm_destroy_sw_clk_mgr,
+};
+
+int mmrm_get_clk_mgr_ops(void *driver_data)
+{
+	int rc = 0;
+	struct mmrm_driver_data *drv_data =
+		(struct mmrm_driver_data *)driver_data;
+
+	if (drv_data->clk_res.scheme == CLK_MGR_SCHEME_SW) {
+		drv_data->clk_mgr_ops = &sw_clk_mgr_ops;
+	} else if (drv_data->clk_res.scheme == CLK_MGR_SCHEME_CXIPEAK) {
+		d_mpr_e("%s: cxipeak is not supported with mmrm\n", __func__);
+		rc = -EINVAL;
+		goto err_exit;
+	} else {
+		d_mpr_e("%s: unsupported clk mgr scheme\n", __func__);
+		goto err_exit;
+	}
+
+	return rc;
+
+err_exit:
+	return rc;
+}
+
+struct mmrm_client *mmrm_clk_client_register(struct mmrm_clk_mgr *clk_mgr,
+	struct mmrm_client_desc *client_desc)
+{
+	if (!clk_mgr || !clk_mgr->clk_client_ops ||
+		!clk_mgr->clk_client_ops->clk_client_reg) {
+		d_mpr_e("%s: invalid clk mgr\n", __func__);
+		return NULL;
+	}
+
+	return clk_mgr->clk_client_ops->clk_client_reg(clk_mgr,
+			client_desc->client_info.desc,
+			client_desc->priority,
+			client_desc->pvt_data,
+			client_desc->notifier_callback_fn);
+}
+
+int mmrm_clk_client_deregister(struct mmrm_clk_mgr *clk_mgr,
+	struct mmrm_client *client)
+{
+	if (!clk_mgr || !clk_mgr->clk_client_ops ||
+		!clk_mgr->clk_client_ops->clk_client_dereg) {
+		d_mpr_e("%s: invalid clk mgr\n", __func__);
+		return -EINVAL;
+	}
+
+	return clk_mgr->clk_client_ops->clk_client_dereg(clk_mgr, client);
+}
+
+
+int mmrm_clk_client_setval(struct mmrm_clk_mgr *clk_mgr,
+	struct mmrm_client *client,
+	struct mmrm_client_data *client_data,
+	unsigned long val)
+{
+	if (!clk_mgr || !clk_mgr->clk_client_ops ||
+		!clk_mgr->clk_client_ops->clk_client_setval) {
+		d_mpr_e("%s: invalid clk mgr\n", __func__);
+		return -EINVAL;
+	}
+
+	return clk_mgr->clk_client_ops->clk_client_setval(
+		clk_mgr, client, client_data, val);
+}
+
+int mmrm_clk_client_setval_inrange(struct mmrm_clk_mgr *clk_mgr,
+	struct mmrm_client *client,
+	struct mmrm_client_data *client_data,
+	struct mmrm_client_res_value *val)
+{
+	if (!clk_mgr || !clk_mgr->clk_client_ops ||
+		!clk_mgr->clk_client_ops->clk_client_setval_inrange) {
+		d_mpr_e("%s: invalid clk mgr\n", __func__);
+		return -EINVAL;
+	}
+
+	return clk_mgr->clk_client_ops->clk_client_setval_inrange(
+		clk_mgr, client, client_data, val);
+}
+
+int mmrm_clk_client_getval(struct mmrm_clk_mgr *clk_mgr,
+	struct mmrm_client *client,
+	struct mmrm_client_res_value *val)
+{
+	if (!clk_mgr || !clk_mgr->clk_client_ops ||
+		!clk_mgr->clk_client_ops->clk_client_getval) {
+		d_mpr_e("%s: invalid clk mgr\n", __func__);
+		return -EINVAL;
+	}
+
+	return clk_mgr->clk_client_ops->clk_client_getval(
+		clk_mgr, client, val);
+}
+
+int mmrm_clk_print_enabled_client_info(struct mmrm_clk_mgr *clk_mgr,
+	char *buf, int sz)
+{
+	if (!clk_mgr || !clk_mgr->clk_client_ops ||
+		!clk_mgr->clk_client_ops->clk_print_enabled_client_info) {
+		d_mpr_e("%s: invalid clk mgr\n", __func__);
+		return -EINVAL;
+	}
+
+	return clk_mgr->clk_client_ops->clk_print_enabled_client_info(
+		clk_mgr, buf, sz);
+}
+

+ 195 - 0
qcom/opensource/mmrm-driver/driver/src/mmrm_clk_rsrc_mgr.h

@@ -0,0 +1,195 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2020-2021, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2023, Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#ifndef _MMRM_CLK_RESOURCE_MGR_H_
+#define _MMRM_CLK_RESOURCE_MGR_H_
+
+#include <dt-bindings/regulator/qcom,rpmh-regulator-levels.h>
+#include <linux/soc/qcom/msm_mmrm.h>
+
+#include "mmrm_internal.h"
+#define MMRM_MAX_THROTTLE_CLIENTS 5
+
+enum mmrm_clk_mgr_scheme {
+	CLK_MGR_SCHEME_SW,
+	CLK_MGR_SCHEME_CXIPEAK
+};
+
+enum mmrm_sw_vdd_levels {
+	MMRM_VDD_LEVEL_LOW_SVS,
+	MMRM_VDD_LEVEL_SVS,
+	MMRM_VDD_LEVEL_SVS_L1,
+	MMRM_VDD_LEVEL_NOM,
+	MMRM_VDD_LEVEL_TURBO,
+	MMRM_VDD_LEVEL_MAX
+};
+
+static int mmrm_sw_vdd_corner[] = {
+	[MMRM_VDD_LEVEL_LOW_SVS] = RPMH_REGULATOR_LEVEL_LOW_SVS,
+	[MMRM_VDD_LEVEL_SVS] = RPMH_REGULATOR_LEVEL_SVS,
+	[MMRM_VDD_LEVEL_SVS_L1] = RPMH_REGULATOR_LEVEL_SVS_L1,
+	[MMRM_VDD_LEVEL_NOM] = RPMH_REGULATOR_LEVEL_NOM,
+	[MMRM_VDD_LEVEL_TURBO] = RPMH_REGULATOR_LEVEL_TURBO
+};
+
+#define MMRM_SW_CLIENTS_NUM_MAX 35
+extern u8 msm_mmrm_enable_throttle_feature;
+typedef int (*notifier_callback_fn_t)(
+	struct mmrm_client_notifier_data *notifier_data);
+
+struct mmrm_sw_clk_client_tbl_entry {
+	char name[MMRM_CLK_CLIENT_NAME_SIZE];
+	struct clk *clk;
+	enum mmrm_client_priority pri;
+	void *pvt_data; /* client user data */
+	notifier_callback_fn_t notifier_cb_fn;
+
+	/* prepared internally */
+	u32 clk_src_id;
+	bool pass_through;
+	u32 min_level;
+	u32 max_level;
+	u32 max_num_hw_blocks;
+	u64 freq[MMRM_VDD_LEVEL_MAX];
+	u32 dyn_pwr[MMRM_VDD_LEVEL_MAX];
+	u32 leak_pwr[MMRM_VDD_LEVEL_MAX];
+	u32 current_ma[MMRM_VDD_LEVEL_MAX][MMRM_VDD_LEVEL_MAX];
+
+	/* reference to this entry */
+	struct mmrm_client *client;
+
+	/* configured clk rate */
+	u64 clk_rate;
+	u32 vdd_level;
+	bool reserve;
+	u32 ref_count;
+	u32 num_hw_blocks;
+
+	/* CRM Data */
+	bool is_crm_client;
+	u32 hw_drv_instances;
+	u32 num_pwr_states;
+	u32 max_rate_idx;
+	u32 crm_client_tbl_size;
+	/*
+	 * CRM client vote table
+	 * size = (number of HW clients * supported power states + 1)
+	 *    Example table of a client with 1 SW client, 3 HW clients where
+	 *    each HW client supports 2 power states:
+	 *    index 0: HW_0 PW_ST_0
+	 *    index 1: HW_0 PW_ST_1
+	 *    index 2: HW_1 PW_ST_0
+	 *    index 3: HW_1 PW_ST_1
+	 *    index 4: HW_2 PW_ST_0
+	 *    index 5: HW_2 PW_ST_1
+	 *    index 6: SW_CLIENT
+	 */
+	u64 *crm_client_tbl;
+};
+
+struct mmrm_sw_throttled_clients_data {
+	struct list_head  list;
+	u32 table_id;
+	u32 delta_cu_ma;
+	u32 prev_vdd_level;
+};
+
+struct mmrm_sw_peak_current_data {
+	/* peak current data in ma */
+	u32 threshold;
+	/* current in ma */
+	u32 aggreg_val;
+	/* mmcx voltage level */
+	u32 aggreg_level;
+};
+
+struct mmrm_throttle_info {
+	u32 csid_throttle_client;
+	u16 tbl_entry_id;
+};
+
+struct mmrm_sw_clk_mgr_info {
+	void *driver_data;
+
+	/* client data */
+	struct mmrm_sw_clk_client_tbl_entry *clk_client_tbl;
+	u32 tot_clk_clients;
+	u32 enabled_clk_clients;
+	struct mmrm_throttle_info throttle_clients_info[MMRM_MAX_THROTTLE_CLIENTS];
+	u16 throttle_clients_data_length;
+
+	/* peak current data */
+	struct mmrm_sw_peak_current_data peak_cur_data;
+
+	/* HEAD of list of clients throttled */
+	struct list_head throttled_clients;
+
+};
+
+struct mmrm_clk_mgr {
+	struct mutex lock;
+	enum mmrm_clk_mgr_scheme scheme;
+	union {
+		struct mmrm_sw_clk_mgr_info sw_info;
+	} data;
+	struct mmrm_clk_mgr_client_ops *clk_client_ops;
+};
+
+struct mmrm_clk_mgr_client_ops {
+	/* client ops */
+	struct mmrm_client*(*clk_client_reg)(
+		struct mmrm_clk_mgr *clk_mgr,
+		struct mmrm_clk_client_desc clk_desc,
+		enum mmrm_client_priority priority, void *pvt_data,
+		notifier_callback_fn_t nt_fn_cb);
+	int (*clk_client_dereg)(
+		struct mmrm_clk_mgr *clk_mgr,
+		struct mmrm_client *client);
+	int (*clk_client_setval)(struct mmrm_clk_mgr *clk_mgr,
+		struct mmrm_client *client,
+		struct mmrm_client_data *client_data, unsigned long val);
+	int (*clk_client_setval_inrange)(struct mmrm_clk_mgr *clk_mgr,
+		struct mmrm_client *client,
+		struct mmrm_client_data *client_data,
+		struct mmrm_client_res_value *val);
+	int (*clk_client_getval)(struct mmrm_clk_mgr *clk_mgr,
+		struct mmrm_client *client, struct mmrm_client_res_value *val);
+	int (*clk_print_enabled_client_info)(struct mmrm_clk_mgr *clk_mgr,
+		char *buf, int sz);
+};
+
+/* clk mgr operations */
+struct mmrm_clk_mgr_ops {
+	int (*init_clk_mgr)(void *drv_data);
+	int (*destroy_clk_mgr)(struct mmrm_clk_mgr *sw_clk_mgr);
+};
+int mmrm_get_clk_mgr_ops(void *drv_data);
+
+/* clk mgr client operations */
+struct mmrm_client *mmrm_clk_client_register(struct mmrm_clk_mgr *clk_mgr,
+	struct mmrm_client_desc *client_desc);
+int mmrm_clk_client_deregister(struct mmrm_clk_mgr *clk_mgr,
+	struct mmrm_client *client);
+int mmrm_clk_client_setval(struct mmrm_clk_mgr *clk_mgr,
+	struct mmrm_client *client,
+	struct mmrm_client_data *client_data,
+	unsigned long val);
+int mmrm_clk_client_setval_inrange(struct mmrm_clk_mgr *clk_mgr,
+	struct mmrm_client *client,
+	struct mmrm_client_data *client_data,
+	struct mmrm_client_res_value *val);
+int mmrm_clk_client_getval(struct mmrm_clk_mgr *clk_mgr,
+	struct mmrm_client *client,
+	struct mmrm_client_res_value *val);
+int mmrm_clk_print_enabled_client_info(struct mmrm_clk_mgr *clk_mgr,
+	char *buf,
+	int sz);
+
+/* sw clk mgr specific */
+int mmrm_init_sw_clk_mgr(void *driver_data);
+int mmrm_destroy_sw_clk_mgr(struct mmrm_clk_mgr *sw_clk_mgr);
+
+#endif //_MMRM_CLK_RESOURCE_MGR_H_

+ 47 - 0
qcom/opensource/mmrm-driver/driver/src/mmrm_clk_rsrc_mgr_cxipeak.c

@@ -0,0 +1,47 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2020-2021, The Linux Foundation. All rights reserved.
+ */
+
+struct mmrm_client *mmrm_cxipeak_clk_client_register(
+	struct mmrm_clk_client_desc clk_desc,
+	enum mmrm_client_priority priority, void *pvt_data,
+	notifier_callback_fn_t not_fn_cb)
+{
+	return NULL;
+}
+
+int mmrm_cxipeak_clk_client_deregister(struct mmrm_client *client)
+{
+	return 0;
+}
+
+int mmrm_cxipeak_clk_client_set_value(
+	struct mmrm_client_data *client_data, unsigned long val)
+{
+	return 0;
+}
+
+int mmrm_cxipeak_clk_client_get_value(struct mmrm_client *client,
+	struct mmrm_client_res_value *val)
+{
+	return 0;
+}
+
+static struct mmrm_clk_mgr_client_ops clk_client_cxipeakops = {
+	.clk_client_reg = mmrm_cxipeak_clk_client_register,
+	.clk_client_dereg = mmrm_cxipeak_clk_client_deregister,
+	.clk_client_setval = mmrm_cxipeak_clk_client_setval,
+	.clk_client_getval = mmrm_cxipeak_clk_client_getval,
+};
+
+
+int mmrm_init_cxipeak_clk_mgr(void *driver_data)
+{
+	return 0;
+}
+
+int mmrm_destroy_cxipeak_clk_mgr(struct mmrm_clk_mgr *cxipeak_clk_mgr)
+{
+	return 0;
+}

+ 1353 - 0
qcom/opensource/mmrm-driver/driver/src/mmrm_clk_rsrc_mgr_sw.c

@@ -0,0 +1,1353 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2020-2021, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2023, Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+#include <linux/slab.h>
+#include <dt-bindings/regulator/qcom,rpmh-regulator-levels.h>
+#include <linux/clk.h>
+#include <linux/clk/qcom.h>
+
+#include "mmrm_debug.h"
+#include "mmrm_clk_rsrc_mgr.h"
+#include "mmrm_fixedpoint.h"
+
+#define Q16_INT(q) ((q) >> 16)
+#define Q16_FRAC(q) ((((q) & 0xFFFF) * 100) >> 16)
+#define CLK_RATE_STEP 1000000
+#define NOTIFY_TIMEOUT 100000000
+/* Max HW DRV Instances (inst 0-5)*/
+#define MAX_HW_DRV_INSTANCES 6
+/* Max HW DRV Instances (power states 0-4)*/
+#define MAX_POWER_STATES 5
+
+static int mmrm_sw_update_freq(
+	struct mmrm_sw_clk_mgr_info *sinfo, struct mmrm_sw_clk_client_tbl_entry *tbl_entry)
+{
+	int rc = 0;
+	u32 i;
+	struct mmrm_driver_data *drv_data = (struct mmrm_driver_data *)sinfo->driver_data;
+	struct mmrm_clk_platform_resources *cres = &drv_data->clk_res;
+	struct voltage_corner_set *cset = &cres->corner_set;
+	long clk_val_min, clk_val_max, clk_val, clk_val_round;
+	int voltage_corner;
+
+	clk_val_min = clk_round_rate(tbl_entry->clk, 1);
+	clk_val_max = clk_round_rate(tbl_entry->clk, ~0UL);
+	d_mpr_h("%s: csid(0x%x): min_clk_rate(%llu) max_clk_rate(%llu)\n",
+		__func__,
+		tbl_entry->clk_src_id,
+		clk_val_min,
+		clk_val_max);
+
+	/* init with min val */
+	for (i = 0; i < MMRM_VDD_LEVEL_MAX; i++) {
+		tbl_entry->freq[i] = clk_val_min;
+	}
+
+	/* step through rates */
+	for (clk_val = clk_val_min; clk_val < clk_val_max; clk_val += CLK_RATE_STEP) {
+		/* get next clk rate */
+		clk_val_round = clk_round_rate(tbl_entry->clk, clk_val);
+		if (clk_val_round > clk_val_min) {
+			clk_val_min = clk_val_round;
+
+			/* Get voltage corner */
+			voltage_corner = qcom_clk_get_voltage(tbl_entry->clk, clk_val_round);
+			if (voltage_corner < 0 || voltage_corner > mmrm_sw_vdd_corner[MMRM_VDD_LEVEL_TURBO]) {
+				break;
+			}
+
+			/* voltage corner is below svsl1 */
+			if (voltage_corner < mmrm_sw_vdd_corner[MMRM_VDD_LEVEL_LOW_SVS])
+				voltage_corner = mmrm_sw_vdd_corner[MMRM_VDD_LEVEL_LOW_SVS];
+
+			/* match vdd level */
+			for (i = 0; i < MMRM_VDD_LEVEL_MAX; i++) {
+				if (voltage_corner == mmrm_sw_vdd_corner[i])
+					break;
+			}
+
+			/* update freq */
+			while (i < MMRM_VDD_LEVEL_MAX) {
+				tbl_entry->freq[i++] = clk_val_round;
+			}
+		}
+	}
+
+	/* print results */
+	for (i = 0; i < MMRM_VDD_LEVEL_MAX; i++) {
+		d_mpr_h("%s: csid(0x%x) corner(%s) clk_rate(%llu)\n",
+			__func__,
+			tbl_entry->clk_src_id,
+			cset->corner_tbl[i].name,
+			tbl_entry->freq[i]);
+	}
+
+	return rc;
+}
+
+static void mmrm_sw_print_client_data(struct mmrm_sw_clk_mgr_info *sinfo,
+			struct mmrm_sw_clk_client_tbl_entry *tbl_entry)
+{
+	struct mmrm_driver_data *drv_data = (struct mmrm_driver_data *)sinfo->driver_data;
+	struct mmrm_clk_platform_resources *cres = &drv_data->clk_res;
+	struct voltage_corner_set *cset = &cres->corner_set;
+	u32 i, j;
+
+	for (i = 0; i < MMRM_VDD_LEVEL_MAX; i++) {
+		d_mpr_p("%s: csid(0x%x) corner(%s) dyn_pwr(%zu) leak_pwr(%zu)\n",
+				__func__,
+				tbl_entry->clk_src_id,
+				cset->corner_tbl[i].name,
+				tbl_entry->dyn_pwr[i],
+				tbl_entry->leak_pwr[i]);
+
+		for (j = 0; j < MMRM_VDD_LEVEL_MAX; j++) {
+			d_mpr_p("%s: csid(0x%x) total_pwr(%zu) cur_ma(%zu)\n",
+				__func__,
+				tbl_entry->clk_src_id,
+				(tbl_entry->dyn_pwr[i] + tbl_entry->leak_pwr[i]),
+				tbl_entry->current_ma[i][j]);
+		}
+	}
+}
+
+static void mmrm_sw_print_crm_table(struct mmrm_sw_clk_client_tbl_entry *tbl_entry)
+{
+	int i;
+
+	if (!tbl_entry->is_crm_client)
+		return;
+
+	for (i = 0; i < tbl_entry->crm_client_tbl_size; i++)
+		d_mpr_h("%s: csid(0x%x) client tbl idx %d val %llu\n",
+			__func__, tbl_entry->clk_src_id,
+			i, tbl_entry->crm_client_tbl[i]);
+	d_mpr_h("%s: csid(0x%x) client tbl max rate (idx %d) : %llu\n",
+			__func__, tbl_entry->clk_src_id, tbl_entry->max_rate_idx,
+			tbl_entry->clk_rate);
+}
+
+static u64 mmrm_sw_get_max_crm_rate(
+	struct mmrm_sw_clk_client_tbl_entry *tbl_entry,
+	struct mmrm_client_data *client_data, unsigned long new_clk_val,
+	int *new_max_rate_idx)
+{
+	u32 crm_max_rate, new_val_idx;
+
+	crm_max_rate = tbl_entry->clk_rate;
+	*new_max_rate_idx = tbl_entry->max_rate_idx;
+
+	new_val_idx = (client_data->drv_type == MMRM_CRM_SW_DRV) ?
+			(tbl_entry->crm_client_tbl_size - 1) : (tbl_entry->num_pwr_states *
+			client_data->crm_drv_idx + client_data->pwr_st);
+
+	if (new_clk_val > crm_max_rate) {
+		crm_max_rate = new_clk_val;
+		*new_max_rate_idx = new_val_idx;
+	} else {
+		/*
+		 * Get the new crm_max_rate from all SW/HW clients.
+		 * If the index with current max value is being updated with a lower value,
+		 * check if that index still has the max value or if another index has
+		 * the new max value.
+		 */
+		if (new_val_idx == tbl_entry->max_rate_idx) {
+			int i;
+
+			crm_max_rate = 0;
+			for (i = 0; i < tbl_entry->crm_client_tbl_size; i++) {
+				if (i == tbl_entry->max_rate_idx)
+					continue;
+
+				if (tbl_entry->crm_client_tbl[i] > crm_max_rate) {
+					crm_max_rate = tbl_entry->crm_client_tbl[i];
+					*new_max_rate_idx = i;
+				}
+			}
+
+			if (new_clk_val >= crm_max_rate) {
+				/* New value at old max index is still the maximum value */
+				crm_max_rate = new_clk_val;
+				*new_max_rate_idx = tbl_entry->max_rate_idx;
+			}
+		}
+	}
+
+	d_mpr_h("%s: csid(0x%x) new clk rate(idx %d) = %llu, crm_max_rate(idx %d) = %llu\n",
+		__func__, tbl_entry->clk_src_id, new_val_idx, new_clk_val,
+		*new_max_rate_idx, crm_max_rate);
+
+	return crm_max_rate;
+}
+
+static int mmrm_sw_update_curr(struct mmrm_sw_clk_mgr_info *sinfo,
+	struct mmrm_sw_clk_client_tbl_entry *tbl_entry)
+{
+	u32 i, j;
+	struct mmrm_driver_data *drv_data = (struct mmrm_driver_data *)sinfo->driver_data;
+	struct mmrm_clk_platform_resources *cres = &drv_data->clk_res;
+	struct voltage_corner_set *cset = &cres->corner_set;
+	u32 scaling_factor = 0, voltage_factor = 0;
+	fp_t nom_dyn_pwr, nom_leak_pwr, dyn_sc, leak_sc,
+		volt, dyn_pwr, leak_pwr, pwr_mw, nom_freq;
+	u32 c;
+	struct nom_clk_src_info *nom_tbl_entry = NULL;
+
+	for (c = 0; c < sinfo->tot_clk_clients; c++) {
+		if (tbl_entry->clk_src_id == sinfo->clk_client_tbl[c].clk_src_id) {
+			nom_tbl_entry = &cres->nom_clk_set.clk_src_tbl[c];
+			break;
+		}
+	}
+	if (nom_tbl_entry == NULL) {
+		d_mpr_h("%s: can't find 0x%x clock src ID\n",
+			__func__,
+			tbl_entry->clk_src_id);
+		return -EINVAL;
+	}
+
+	nom_dyn_pwr = FP(Q16_INT(nom_tbl_entry->nom_dyn_pwr),
+		Q16_FRAC(nom_tbl_entry->nom_dyn_pwr), 100);
+
+	nom_leak_pwr = FP(Q16_INT(nom_tbl_entry->nom_leak_pwr),
+		Q16_FRAC(nom_tbl_entry->nom_leak_pwr), 100);
+
+	nom_freq = tbl_entry->freq[MMRM_VDD_LEVEL_NOM];
+
+	/* update power & current entries for all levels */
+	for (i = 0; i < MMRM_VDD_LEVEL_MAX; i++) {
+		scaling_factor = cset->corner_tbl[i].scaling_factor_dyn;
+		dyn_sc = FP(
+			Q16_INT(scaling_factor), Q16_FRAC(scaling_factor), 100);
+
+		scaling_factor = cset->corner_tbl[i].scaling_factor_leak;
+		leak_sc = FP(
+			Q16_INT(scaling_factor), Q16_FRAC(scaling_factor), 100);
+
+		/* Frequency scaling */
+		pwr_mw = fp_mult(nom_dyn_pwr, tbl_entry->freq[i]);
+		pwr_mw = fp_div(pwr_mw, nom_freq);
+
+		/* Scaling factor */
+		dyn_pwr = fp_mult(pwr_mw, dyn_sc);
+		leak_pwr = fp_mult(nom_leak_pwr, leak_sc);
+
+		tbl_entry->dyn_pwr[i] = fp_round(dyn_pwr);
+		tbl_entry->leak_pwr[i] = fp_round(leak_pwr);
+
+		for (j = 0; j < MMRM_VDD_LEVEL_MAX; j++) {
+			voltage_factor = cset->corner_tbl[j].volt_factor;
+			volt = FP(Q16_INT(voltage_factor), Q16_FRAC(voltage_factor), 100);
+
+			tbl_entry->current_ma[i][j] = fp_round(fp_div((dyn_pwr+leak_pwr), volt));
+		}
+	}
+	mmrm_sw_print_client_data(sinfo, tbl_entry);
+	return 0;
+}
+
+static struct mmrm_client *mmrm_sw_clk_client_register(
+	struct mmrm_clk_mgr *sw_clk_mgr,
+	struct mmrm_clk_client_desc clk_desc,
+	enum mmrm_client_priority priority,
+	void *pvt_data,
+	notifier_callback_fn_t not_fn_cb)
+{
+	int rc = 0;
+	struct mmrm_client *clk_client = NULL;
+	struct mmrm_sw_clk_mgr_info *sinfo = &(sw_clk_mgr->data.sw_info);
+	struct mmrm_sw_clk_client_tbl_entry *tbl_entry;
+
+	u32 c = 0;
+	u32 clk_client_src_id = 0;
+
+	mutex_lock(&sw_clk_mgr->lock);
+
+	/* check if entry is free in table */
+	if (sinfo->tot_clk_clients == sinfo->enabled_clk_clients) {
+		d_mpr_e("%s: no free entry to register a clk client\n",
+			__func__);
+		rc = -EINVAL;
+		goto err_nofree_entry;
+	}
+
+	/* look for entry that matches domain and id */
+	clk_client_src_id = (clk_desc.client_domain << 16 | clk_desc.client_id);
+	for (c = 0; c < sinfo->tot_clk_clients; c++) {
+		if (clk_client_src_id == sinfo->clk_client_tbl[c].clk_src_id)
+			break;
+	}
+
+	/* entry not found */
+	if (c == sinfo->tot_clk_clients) {
+		d_mpr_e("%s: unknown clk client 0x%x\n",
+			__func__, clk_client_src_id);
+		rc = -EINVAL;
+		goto err_nofree_entry;
+	}
+
+	tbl_entry = &sinfo->clk_client_tbl[c];
+
+	/* entry already registered */
+	if (tbl_entry->client) {
+		if (msm_mmrm_allow_multiple_register) {
+			tbl_entry->ref_count++;
+			d_mpr_h("%s: client csid(0x%x) already registered ref:%d\n",
+				__func__, tbl_entry->clk_src_id, tbl_entry->ref_count);
+			clk_client = tbl_entry->client;
+
+			mmrm_sw_print_client_data(sinfo, tbl_entry);
+
+			goto exit_found;
+		}
+
+		d_mpr_e("%s: client csid(0x%x) already registered\n",
+			__func__, tbl_entry->clk_src_id);
+		rc = -EINVAL;
+		goto err_already_registered;
+	}
+
+	/* populate the entry */
+	clk_client = kzalloc(sizeof(*clk_client), GFP_KERNEL);
+	if (!clk_client) {
+		d_mpr_e("%s: failed to allocate memory for clk_client\n",
+			__func__);
+		rc = -ENOMEM;
+		goto err_fail_alloc_clk_client;
+	}
+
+	clk_client->client_uid = c;
+	clk_client->client_type = MMRM_CLIENT_CLOCK;
+	tbl_entry->ref_count = 1;
+
+	/* copy the entries provided by client */
+	tbl_entry->client = clk_client;
+	strlcpy(tbl_entry->name, clk_desc.name, MMRM_CLK_CLIENT_NAME_SIZE);
+	tbl_entry->clk = clk_desc.clk;
+	tbl_entry->pri = priority;
+	tbl_entry->pvt_data = pvt_data;
+	tbl_entry->notifier_cb_fn = not_fn_cb;
+
+	if (clk_desc.hw_drv_instances > MAX_HW_DRV_INSTANCES
+		|| clk_desc.num_pwr_states > MAX_POWER_STATES) {
+		d_mpr_e("%s: Invalid CRM data: HW DRV instances %d power states %d\n",
+			__func__, clk_desc.hw_drv_instances, clk_desc.num_pwr_states);
+		rc = -EINVAL;
+		goto err_invalid_crm_data;
+	}
+
+	/* CRM-managed client */
+	if (clk_desc.hw_drv_instances > 0 && clk_desc.num_pwr_states > 0) {
+		d_mpr_h("%s: CRM-managed clock client: HW DRV instances %d, power states %d\n",
+			__func__, clk_desc.hw_drv_instances, clk_desc.num_pwr_states);
+		tbl_entry->crm_client_tbl_size = clk_desc.hw_drv_instances *
+			clk_desc.num_pwr_states + 1;
+		tbl_entry->crm_client_tbl = kcalloc(tbl_entry->crm_client_tbl_size,
+			sizeof(u64), GFP_KERNEL);
+		if (!tbl_entry->crm_client_tbl) {
+			d_mpr_e("%s: failed to allocate CRM client table\n", __func__);
+			rc = -ENOMEM;
+			goto err_fail_alloc_crm_tbl;
+		}
+		tbl_entry->is_crm_client = 1;
+		tbl_entry->max_rate_idx = 0;
+		tbl_entry->hw_drv_instances = clk_desc.hw_drv_instances;
+		tbl_entry->num_pwr_states = clk_desc.num_pwr_states;
+	}
+
+	/* print table entry */
+	d_mpr_h("%s: csid(0x%x) name(%s) pri(%d) pvt(%p) notifier(%p) hw_drv_instances(%d) num_pwr_states(%d)\n",
+		__func__,
+		tbl_entry->clk_src_id,
+		tbl_entry->name,
+		tbl_entry->pri,
+		tbl_entry->pvt_data,
+		tbl_entry->notifier_cb_fn,
+		tbl_entry->hw_drv_instances,
+		tbl_entry->num_pwr_states);
+
+	/* determine full range of clock freq */
+	rc = mmrm_sw_update_freq(sinfo, tbl_entry);
+	if (rc) {
+		d_mpr_e("%s: csid(0x%x) failed to update freq\n",
+			__func__, tbl_entry->clk_src_id);
+		goto err_fail_update_entry;
+	}
+
+	/* calculate current & scale power for other levels */
+	rc = mmrm_sw_update_curr(sinfo, tbl_entry);
+	if (rc) {
+		d_mpr_e("%s: csid(0x%x) failed to update current\n",
+			__func__, tbl_entry->clk_src_id);
+		goto err_fail_update_entry;
+	}
+
+exit_found:
+	mutex_unlock(&sw_clk_mgr->lock);
+	return clk_client;
+
+err_fail_update_entry:
+	tbl_entry->is_crm_client = 0;
+	tbl_entry->max_rate_idx = 0;
+	tbl_entry->hw_drv_instances = 0;
+	tbl_entry->num_pwr_states = 0;
+	kfree(tbl_entry->crm_client_tbl);
+	tbl_entry->crm_client_tbl = NULL;
+err_fail_alloc_crm_tbl:
+	tbl_entry->crm_client_tbl_size = 0;
+err_invalid_crm_data:
+	kfree(clk_client);
+
+err_fail_alloc_clk_client:
+	tbl_entry->client = NULL;
+	tbl_entry->clk = NULL;
+	tbl_entry->pri = 0x0;
+	tbl_entry->pvt_data = NULL;
+	tbl_entry->notifier_cb_fn = NULL;
+err_nofree_entry:
+err_already_registered:
+	mutex_unlock(&sw_clk_mgr->lock);
+
+	d_mpr_e("%s: error = %d\n", __func__, rc);
+	return NULL;
+}
+
+static int mmrm_sw_clk_client_deregister(struct mmrm_clk_mgr *sw_clk_mgr,
+	struct mmrm_client *client)
+{
+	int rc =  0;
+	struct mmrm_sw_clk_client_tbl_entry *tbl_entry;
+	struct mmrm_sw_clk_mgr_info *sinfo = &(sw_clk_mgr->data.sw_info);
+
+	/* validate the client ptr */
+	if (!client) {
+		d_mpr_e("%s: invalid client\n", __func__);
+		rc = -EINVAL;
+		goto err_invalid_client;
+	}
+
+	if (client->client_uid >= sinfo->tot_clk_clients) {
+		d_mpr_e("%s: invalid client uid (%d)\n",
+			__func__, client->client_uid);
+		rc = -EINVAL;
+		goto err_invalid_client;
+	}
+
+	mutex_lock(&sw_clk_mgr->lock);
+
+	tbl_entry = &sinfo->clk_client_tbl[client->client_uid];
+	if (tbl_entry->ref_count > 0) {
+		tbl_entry->ref_count--;
+	}
+
+	if (tbl_entry->ref_count == 0) {
+		kfree(tbl_entry->crm_client_tbl);
+		tbl_entry->crm_client_tbl = NULL;
+		kfree(tbl_entry->client);
+		tbl_entry->vdd_level = 0;
+		tbl_entry->clk_rate = 0;
+		tbl_entry->client = NULL;
+		tbl_entry->clk = NULL;
+		tbl_entry->pri = 0x0;
+		tbl_entry->pvt_data = NULL;
+		tbl_entry->notifier_cb_fn = NULL;
+		tbl_entry->is_crm_client = 0;
+		tbl_entry->max_rate_idx = 0;
+		tbl_entry->hw_drv_instances = 0;
+		tbl_entry->num_pwr_states = 0;
+		tbl_entry->crm_client_tbl_size = 0;
+	}
+
+	mutex_unlock(&sw_clk_mgr->lock);
+
+	return rc;
+
+err_invalid_client:
+	d_mpr_e("%s: error = %d\n", __func__, rc);
+	return rc;
+}
+
+static int mmrm_sw_get_req_level(
+	struct mmrm_sw_clk_client_tbl_entry *tbl_entry,
+	unsigned long clk_val, u32 *req_level)
+{
+	int rc = 0;
+	int voltage_corner;
+	unsigned long clk_round_val = 0;
+	u32 level;
+
+	/*
+	 * Clients may set rates that are higher than max supported rate for a clock.
+	 * Round the rate to get the max supported corner.
+	 */
+	clk_round_val = clk_round_rate(tbl_entry->clk, clk_val);
+
+	/* get voltage corner */
+	voltage_corner = qcom_clk_get_voltage(tbl_entry->clk, clk_round_val);
+	if (voltage_corner < 0 || voltage_corner > mmrm_sw_vdd_corner[MMRM_VDD_LEVEL_TURBO]) {
+		d_mpr_e("%s: csid(0x%x): invalid voltage corner(%d) for rounded clk rate(%llu)\n",
+			__func__,
+			tbl_entry->clk_src_id,
+			voltage_corner,
+			clk_round_val);
+		rc = voltage_corner;
+		goto err_invalid_corner;
+	}
+
+	/* voltage corner is below low svs */
+	if (voltage_corner < mmrm_sw_vdd_corner[MMRM_VDD_LEVEL_LOW_SVS]) {
+		d_mpr_h("%s: csid(0x%x): lower voltage corner(%d)\n",
+			__func__,
+			tbl_entry->clk_src_id,
+			voltage_corner);
+		*req_level = MMRM_VDD_LEVEL_LOW_SVS;
+		goto exit_no_err;
+	}
+
+	/* match vdd level */
+	for (level = 0; level < MMRM_VDD_LEVEL_MAX; level++) {
+		if (voltage_corner == mmrm_sw_vdd_corner[level])
+			break;
+	}
+
+	if (level == MMRM_VDD_LEVEL_MAX) {
+		d_mpr_e("%s: csid(0x%x): invalid voltage corner(%d) for rounded clk rate(%llu)\n",
+			__func__,
+			tbl_entry->clk_src_id,
+			voltage_corner,
+			clk_round_val);
+		rc = -EINVAL;
+		goto err_invalid_corner;
+	}
+
+	*req_level = level;
+	d_mpr_h("%s: req_level(%d)\n", __func__, level);
+
+exit_no_err:
+	return rc;
+
+err_invalid_corner:
+	return rc;
+}
+
+static int mmrm_sw_check_req_level(
+	struct mmrm_sw_clk_mgr_info *sinfo,
+	u32 clk_src_id, u32 req_level, u32 *adj_level)
+{
+	int rc = 0;
+	struct mmrm_sw_peak_current_data *peak_data = &sinfo->peak_cur_data;
+	struct mmrm_sw_clk_client_tbl_entry *tbl_entry = NULL;
+	struct mmrm_sw_clk_client_tbl_entry *next_max_entry = NULL;
+	u32 c, level = req_level;
+
+	if (req_level >= MMRM_VDD_LEVEL_MAX) {
+		d_mpr_e("%s: invalid level %lu\n", __func__, req_level);
+		rc = -EINVAL;
+		goto err_invalid_level;
+	}
+	d_mpr_h("%s: csid(0x%x) level(%d) peak_data->aggreg_level(%d)\n",
+		__func__, clk_src_id, level, peak_data->aggreg_level);
+
+	/* req_level is rejected when another client has a higher level */
+	if (req_level < peak_data->aggreg_level) {
+		for (c = 0; c < sinfo->tot_clk_clients; c++) {
+			tbl_entry = &sinfo->clk_client_tbl[c];
+			if (IS_ERR_OR_NULL(tbl_entry->clk) || !tbl_entry->clk_rate ||
+				(tbl_entry->clk_src_id == clk_src_id)) {
+				continue;
+			}
+			if (tbl_entry->vdd_level == peak_data->aggreg_level) {
+				break;
+			}
+			if  ((tbl_entry->vdd_level < peak_data->aggreg_level)
+					&& (tbl_entry->vdd_level > req_level))
+				next_max_entry = tbl_entry;
+
+		}
+		/* reject req level */
+		if (c < sinfo->tot_clk_clients) {
+			level = peak_data->aggreg_level;
+		} else if (!IS_ERR_OR_NULL(next_max_entry)
+			&& next_max_entry->vdd_level > req_level) {
+			level = next_max_entry->vdd_level;
+		}
+	}
+
+	*adj_level = level;
+	d_mpr_h("%s: adj_level(%d)\n", __func__, level);
+	return rc;
+
+err_invalid_level:
+	return rc;
+}
+
+static int mmrm_sw_calculate_total_current(
+	struct mmrm_sw_clk_mgr_info *sinfo,
+	u32 req_level, u32 *total_cur, struct mmrm_sw_clk_client_tbl_entry *tbl_entry_new)
+{
+	int rc = 0;
+	struct mmrm_sw_clk_client_tbl_entry *tbl_entry;
+	u32 c, sum_cur = 0;
+
+	if (req_level >= MMRM_VDD_LEVEL_MAX) {
+		d_mpr_e("%s: invalid level %lu\n", __func__, req_level);
+		rc = -EINVAL;
+		goto err_invalid_level;
+	}
+
+	/* calculate sum of values (scaled by volt) */
+	for (c = 0; c < sinfo->tot_clk_clients; c++) {
+		tbl_entry = &sinfo->clk_client_tbl[c];
+		if (IS_ERR_OR_NULL(tbl_entry->clk) || !tbl_entry->clk_rate
+			|| (tbl_entry == tbl_entry_new)) {
+			continue;
+		}
+		sum_cur += (tbl_entry->current_ma[tbl_entry->vdd_level][req_level]
+			* tbl_entry->num_hw_blocks);
+	}
+
+	*total_cur = sum_cur;
+	d_mpr_h("%s: total_cur(%lu)\n", __func__, *total_cur);
+	return rc;
+
+err_invalid_level:
+	return rc;
+}
+
+static int mmrm_sw_throttle_low_priority_client(
+	struct mmrm_sw_clk_mgr_info *sinfo, int *delta_cur)
+{
+	int rc = 0, i;
+	u64 start_ts = 0, end_ts = 0;
+	struct mmrm_sw_clk_client_tbl_entry *tbl_entry_throttle_client;
+	struct mmrm_client_notifier_data notifier_data;
+	struct completion timeout;
+	struct mmrm_sw_peak_current_data *peak_data = &sinfo->peak_cur_data;
+	struct mmrm_sw_throttled_clients_data *tc_data;
+
+	u32 now_cur_ma, min_cur_ma;
+	long clk_min_level = MMRM_VDD_LEVEL_LOW_SVS;
+
+	init_completion(&timeout);
+
+	for (i = 0; i < sinfo->throttle_clients_data_length ; i++) {
+		tbl_entry_throttle_client =
+			&sinfo->clk_client_tbl[sinfo->throttle_clients_info[i].tbl_entry_id];
+
+		if (IS_ERR_OR_NULL(tbl_entry_throttle_client))
+			continue;
+
+		now_cur_ma = tbl_entry_throttle_client->current_ma
+			[tbl_entry_throttle_client->vdd_level]
+			[peak_data->aggreg_level];
+		min_cur_ma = tbl_entry_throttle_client->current_ma[clk_min_level]
+			[peak_data->aggreg_level];
+
+		d_mpr_h("%s:csid(0x%x) name(%s)\n",
+			__func__, tbl_entry_throttle_client->clk_src_id,
+			tbl_entry_throttle_client->name);
+		d_mpr_h("%s:now_cur_ma(%llu) min_cur_ma(%llu) delta_cur(%d)\n",
+			__func__, now_cur_ma, min_cur_ma, *delta_cur);
+
+		if ((now_cur_ma <= min_cur_ma) || (now_cur_ma - min_cur_ma <= *delta_cur))
+			continue;
+
+		d_mpr_h("%s: Throttle client csid(0x%x) name(%s)\n",
+			__func__, tbl_entry_throttle_client->clk_src_id,
+			tbl_entry_throttle_client->name);
+		d_mpr_h("%s:now_cur_ma %llu-min_cur_ma %llu>delta_cur %d\n",
+			__func__, now_cur_ma, min_cur_ma, *delta_cur);
+
+		/* Setup notifier */
+		notifier_data.cb_type = MMRM_CLIENT_RESOURCE_VALUE_CHANGE;
+		notifier_data.cb_data.val_chng.old_val =
+			tbl_entry_throttle_client->freq[tbl_entry_throttle_client->vdd_level];
+		notifier_data.cb_data.val_chng.new_val =
+			tbl_entry_throttle_client->freq[clk_min_level];
+		notifier_data.pvt_data = tbl_entry_throttle_client->pvt_data;
+		start_ts = ktime_get_ns();
+
+		if (tbl_entry_throttle_client->notifier_cb_fn)
+			rc = tbl_entry_throttle_client->notifier_cb_fn(&notifier_data);
+
+		end_ts = ktime_get_ns();
+		d_mpr_h("%s: Client notifier cbk processing time %llu ns\n",
+			__func__, (end_ts - start_ts));
+
+		if (rc) {
+			d_mpr_e("%s: Client failed to send SUCCESS in callback(%d)\n",
+				__func__, tbl_entry_throttle_client->clk_src_id);
+			continue;
+		}
+
+		if ((end_ts - start_ts) > NOTIFY_TIMEOUT)
+			d_mpr_e("%s:Client notifier cbk took %llu ns more than timeout %llu ns\n",
+				__func__, (end_ts - start_ts), NOTIFY_TIMEOUT);
+
+		if (tbl_entry_throttle_client->reserve == false) {
+			rc = clk_set_rate(tbl_entry_throttle_client->clk,
+						tbl_entry_throttle_client->freq[clk_min_level]);
+			if (rc) {
+				d_mpr_e("%s: Failed to throttle the clk csid(%d)\n",
+					__func__, tbl_entry_throttle_client->clk_src_id);
+				continue;
+			}
+		}
+
+		d_mpr_h("%s: %s throttled to %llu\n",
+			__func__, tbl_entry_throttle_client->name,
+			tbl_entry_throttle_client->freq[clk_min_level]);
+		*delta_cur -= now_cur_ma - min_cur_ma;
+
+		/* Store this client for bookkeeping */
+		tc_data = kzalloc(sizeof(*tc_data), GFP_KERNEL);
+		if (IS_ERR_OR_NULL(tc_data)) {
+			d_mpr_e("%s: Failed to allocate memory\n", __func__);
+			return -ENOMEM;
+		}
+		tc_data->table_id = i;
+		tc_data->delta_cu_ma = now_cur_ma - min_cur_ma;
+		tc_data->prev_vdd_level = tbl_entry_throttle_client->vdd_level;
+		// Add throttled client to list to access it later
+		list_add_tail(&tc_data->list, &sinfo->throttled_clients);
+
+		/* Store the throttled clock rate of client */
+		tbl_entry_throttle_client->clk_rate =
+					tbl_entry_throttle_client->freq[clk_min_level];
+
+		/* Store the corner level of throttled client */
+		tbl_entry_throttle_client->vdd_level = clk_min_level;
+
+		/* Clearing the reserve flag */
+		tbl_entry_throttle_client->reserve = false;
+
+		break;
+	}
+
+	return rc;
+}
+
+static void mmrm_sw_dump_enabled_client_info(struct mmrm_sw_clk_mgr_info *sinfo)
+{
+	u32 c;
+	struct mmrm_sw_peak_current_data *peak_data = &sinfo->peak_cur_data;
+	struct mmrm_sw_clk_client_tbl_entry *tbl_entry = NULL;
+
+	for (c = 0; c < sinfo->tot_clk_clients; c++) {
+		tbl_entry = &sinfo->clk_client_tbl[c];
+		if (tbl_entry->clk_rate) {
+			d_mpr_e("%s: csid(0x%x) clk_rate(%zu) vdd_level(%zu) cur_ma(%zu) num_hw_blocks(%zu)\n",
+				__func__,
+				tbl_entry->clk_src_id,
+				tbl_entry->clk_rate,
+				tbl_entry->vdd_level,
+				tbl_entry->current_ma[tbl_entry->vdd_level]
+					[peak_data->aggreg_level] * tbl_entry->num_hw_blocks,
+				tbl_entry->num_hw_blocks);
+		}
+	}
+	if (peak_data) {
+		d_mpr_e("%s: aggreg_val(%zu) aggreg_level(%zu)\n", __func__,
+			peak_data->aggreg_val, peak_data->aggreg_level);
+	}
+}
+
+static int mmrm_reinstate_throttled_client(struct mmrm_sw_clk_mgr_info *sinfo)
+{
+	struct mmrm_sw_peak_current_data *peak_data = &sinfo->peak_cur_data;
+	struct mmrm_sw_throttled_clients_data *iter, *safe_iter = NULL;
+	struct mmrm_client_notifier_data notifier_data;
+	struct mmrm_sw_clk_client_tbl_entry *re_entry_throttle_client;
+	int rc =  0;
+	u64 start_ts = 0, end_ts = 0;
+
+	list_for_each_entry_safe(iter, safe_iter, &sinfo->throttled_clients, list) {
+		if (!IS_ERR_OR_NULL(iter) && peak_data->aggreg_val +
+			iter->delta_cu_ma <= peak_data->threshold) {
+
+			d_mpr_h("%s: table_id = %d\n", __func__, iter->table_id);
+
+			re_entry_throttle_client =
+				&sinfo->clk_client_tbl
+				[sinfo->throttle_clients_info
+				[iter->table_id].tbl_entry_id];
+			if (!IS_ERR_OR_NULL(re_entry_throttle_client)) {
+				d_mpr_h("%s:found throttled client name(%s) clsid (0x%x)\n",
+					__func__, re_entry_throttle_client->name,
+					re_entry_throttle_client->clk_src_id);
+				notifier_data.cb_type = MMRM_CLIENT_RESOURCE_VALUE_CHANGE;
+				notifier_data.cb_data.val_chng.old_val =
+					re_entry_throttle_client->freq[MMRM_VDD_LEVEL_LOW_SVS];
+
+				notifier_data.cb_data.val_chng.new_val =
+					re_entry_throttle_client->freq[iter->prev_vdd_level];
+
+				notifier_data.pvt_data = re_entry_throttle_client->pvt_data;
+				start_ts = ktime_get_ns();
+
+				if (re_entry_throttle_client->notifier_cb_fn) {
+					rc = re_entry_throttle_client->notifier_cb_fn
+								(&notifier_data);
+					end_ts = ktime_get_ns();
+					d_mpr_h("%s: Client notifier cbk processing time(%llu)ns\n",
+						__func__, end_ts - start_ts);
+
+					if (rc) {
+						d_mpr_e("%s: Client notifier callback failed(%d)\n",
+							__func__,
+							re_entry_throttle_client->clk_src_id);
+					}
+					if ((end_ts - start_ts) > NOTIFY_TIMEOUT)
+						d_mpr_e("%s: Client notifier took %llu ns\n",
+							__func__, (end_ts - start_ts));
+				}
+				list_del(&iter->list);
+				kfree(iter);
+			}
+		}
+	}
+	return 0;
+}
+
+static int mmrm_sw_check_peak_current(struct mmrm_sw_clk_mgr_info *sinfo,
+	struct mmrm_sw_clk_client_tbl_entry *tbl_entry,
+	u32 req_level, u32 clk_val, u32 num_hw_blocks)
+{
+	int rc = 0;
+	struct mmrm_sw_peak_current_data *peak_data = &sinfo->peak_cur_data;
+	u32 adj_level = req_level;
+	u32 peak_cur = peak_data->aggreg_val;
+	u32 old_cur = 0, new_cur = 0;
+
+	int delta_cur = 0;
+
+	/* check the req level and adjust according to tbl entries */
+	rc = mmrm_sw_check_req_level(sinfo, tbl_entry->clk_src_id, req_level, &adj_level);
+	if (rc) {
+		goto err_invalid_level;
+	}
+
+	/* calculate new cur val as per adj_val */
+	if (clk_val)
+		new_cur = tbl_entry->current_ma[req_level][adj_level] * num_hw_blocks;
+
+
+	/* calculate old cur */
+	if (tbl_entry->clk_rate) {
+		//old_cur = tbl_entry->current_ma[tbl_entry->vdd_level][adj_level];
+		old_cur = tbl_entry->current_ma[tbl_entry->vdd_level]
+			[peak_data->aggreg_level] * tbl_entry->num_hw_blocks;
+	}
+
+	/* 1. adj_level increase: recalculated peak_cur other clients + new_cur
+	 * 2. adj_level decrease: recalculated peak_cur other clients + new_cur
+	 * 3. clk_val increase: aggreg_val + (new_cur - old_cur)
+	 * 4. clk_val decrease: aggreg_val + (new_cur - old_cur)
+	 * 5. clk_val 0: aggreg_val - old_cur
+	 */
+
+	/* recalculate aggregated current with adj level */
+	if (adj_level != peak_data->aggreg_level) {
+		rc = mmrm_sw_calculate_total_current(sinfo, adj_level, &peak_cur, tbl_entry);
+		if (rc) {
+			goto err_invalid_level;
+		}
+		peak_cur += new_cur;
+	} else {
+		delta_cur = (signed int)new_cur - old_cur;
+	}
+
+	d_mpr_h("%s: csid (0x%x) peak_cur(%zu) new_cur(%zu) old_cur(%zu) delta_cur(%d)\n",
+		__func__, tbl_entry->clk_src_id, peak_cur, new_cur, old_cur, delta_cur);
+
+	/* negative value, update peak data */
+	if ((signed)peak_cur + delta_cur <= 0) {
+		peak_data->aggreg_val = 0;
+		peak_data->aggreg_level = adj_level;
+		goto exit_no_err;
+	}
+
+	/* peak overshoot, do not update peak data */
+	if ((signed)peak_cur + delta_cur >= peak_data->threshold) {
+		/* Find low prority client and throttle it*/
+
+		if ((tbl_entry->pri == MMRM_CLIENT_PRIOR_HIGH)
+			&& (msm_mmrm_enable_throttle_feature > 0)) {
+			rc = mmrm_sw_throttle_low_priority_client(sinfo, &delta_cur);
+			if (rc != 0) {
+				d_mpr_e("%s: Failed to throttle the low priority client\n",
+						__func__);
+				mmrm_sw_dump_enabled_client_info(sinfo);
+				goto err_peak_overshoot;
+			}
+		} else {
+			d_mpr_e("%s: Client csid(0x%x) name(%s) can't request throtlling\n",
+				__func__, tbl_entry->clk_src_id, tbl_entry->name);
+			mmrm_sw_dump_enabled_client_info(sinfo);
+			rc = -EINVAL;
+			goto err_peak_overshoot;
+		}
+	}
+
+	/* update peak data */
+	peak_data->aggreg_val = peak_cur + delta_cur;
+	peak_data->aggreg_level = adj_level;
+	mmrm_reinstate_throttled_client(sinfo);
+
+exit_no_err:
+	d_mpr_h("%s: aggreg_val(%lu) aggreg_level(%lu)\n",
+		__func__,
+		peak_data->aggreg_val,
+		peak_data->aggreg_level);
+	return rc;
+
+err_invalid_level:
+err_peak_overshoot:
+	return rc;
+}
+
+static int mmrm_sw_clk_client_setval(struct mmrm_clk_mgr *sw_clk_mgr,
+	struct mmrm_client *client,
+	struct mmrm_client_data *client_data,
+	unsigned long clk_val)
+{
+	int rc = 0;
+	struct mmrm_sw_clk_client_tbl_entry *tbl_entry;
+	struct mmrm_sw_clk_mgr_info *sinfo = &(sw_clk_mgr->data.sw_info);
+	bool req_reserve;
+	u32 req_level;
+	unsigned long crm_max_rate = 0;
+	int max_rate_idx = 0;
+
+	/* validate input params */
+	if (!client) {
+		d_mpr_e("%s: invalid client\n", __func__);
+		rc = -EINVAL;
+		goto err_invalid_client;
+	}
+
+	if (client->client_uid >= sinfo->tot_clk_clients) {
+		d_mpr_e("%s: invalid client uid (%d)\n",
+			__func__, client->client_uid);
+		rc = -EINVAL;
+		goto err_invalid_client;
+	}
+
+	if (!client_data) {
+		d_mpr_e("%s: invalid client data\n", __func__);
+		rc = -EINVAL;
+		goto err_invalid_client_data;
+	}
+
+	/* get table entry */
+	tbl_entry = &sinfo->clk_client_tbl[client->client_uid];
+	if (IS_ERR_OR_NULL(tbl_entry->clk)) {
+		d_mpr_e("%s: clk src not registered\n", __func__);
+		rc = -EINVAL;
+		goto err_invalid_client;
+	}
+
+	d_mpr_h("%s: csid(0x%x) clk rate %llu\n",
+		__func__, tbl_entry->clk_src_id, clk_val);
+
+	if (tbl_entry->is_crm_client) {
+		if (client_data->crm_drv_idx >= tbl_entry->hw_drv_instances ||
+			client_data->pwr_st >= tbl_entry->num_pwr_states) {
+			d_mpr_e("%s: invalid CRM data\n", __func__);
+			rc = -EINVAL;
+			goto err_invalid_client_data;
+		}
+
+		crm_max_rate = mmrm_sw_get_max_crm_rate(tbl_entry, client_data,
+						clk_val, &max_rate_idx);
+	}
+
+	/*
+	 * Check if the requested clk rate is the same as the current clk rate.
+	 * When clk rates are the same, compare this with the current state.
+	 * Skip when duplicate calculations will be made.
+	 * CRM Clients: Always set the rate
+	 * --- current ---- requested --- action ---
+	 * a.  reserve  &&  req_reserve:  skip
+	 * b. !reserve  && !req_reserve:  skip
+	 * c. !reserve  &&  req_reserve:  skip
+	 * d.  reserve  && !req_reserve:  set clk rate
+	 */
+	req_reserve = client_data->flags & MMRM_CLIENT_DATA_FLAG_RESERVE_ONLY;
+	if (tbl_entry->clk_rate == clk_val &&
+		tbl_entry->num_hw_blocks == client_data->num_hw_blocks &&
+		tbl_entry->is_crm_client == false) {
+
+		d_mpr_h("%s: csid(0x%x) same as previous clk rate %llu\n",
+			__func__, tbl_entry->clk_src_id, clk_val);
+
+		/* a & b */
+		if (tbl_entry->reserve == req_reserve)
+			goto exit_no_err;
+
+		/* c & d */
+		mutex_lock(&sw_clk_mgr->lock);
+		tbl_entry->reserve = req_reserve;
+		mutex_unlock(&sw_clk_mgr->lock);
+
+		/* skip or set clk rate */
+		if (req_reserve)
+			goto exit_no_err;
+		else
+			goto set_clk_rate;
+	}
+
+	/* get corresponding level */
+	if (clk_val) {
+		if (!tbl_entry->is_crm_client)
+			rc = mmrm_sw_get_req_level(tbl_entry, clk_val, &req_level);
+		else
+			rc = mmrm_sw_get_req_level(tbl_entry, crm_max_rate, &req_level);
+		if (rc || req_level >= MMRM_VDD_LEVEL_MAX) {
+			d_mpr_e("%s: csid(0x%x) unable to get level for clk rate %llu crm_max_rate %llu\n",
+				__func__, tbl_entry->clk_src_id, clk_val, crm_max_rate);
+			rc = -EINVAL;
+			goto err_invalid_clk_val;
+		}
+		if (!((client_data->num_hw_blocks >= 1) &&
+			   (client_data->num_hw_blocks <= tbl_entry->max_num_hw_blocks))) {
+			d_mpr_e("%s: csid(0x%x) num_hw_block:%d\n",
+				__func__, tbl_entry->clk_src_id, client_data->num_hw_blocks);
+			rc = -EINVAL;
+			goto err_invalid_client_data;
+		}
+	} else {
+		req_level = 0;
+	}
+
+	mutex_lock(&sw_clk_mgr->lock);
+
+	/* check and update for peak current */
+	if (!tbl_entry->is_crm_client) {
+		rc = mmrm_sw_check_peak_current(sinfo, tbl_entry,
+			req_level, clk_val, client_data->num_hw_blocks);
+	} else {
+		rc = mmrm_sw_check_peak_current(sinfo, tbl_entry,
+			req_level, crm_max_rate, client_data->num_hw_blocks);
+	}
+
+	if (rc) {
+		d_mpr_e("%s: csid (0x%x) peak overshoot peak_cur(%lu)\n",
+			__func__, tbl_entry->clk_src_id,
+			sinfo->peak_cur_data.aggreg_val);
+		mutex_unlock(&sw_clk_mgr->lock);
+		goto err_peak_overshoot;
+	}
+
+	/* update table entry */
+	if (!tbl_entry->is_crm_client) {
+		tbl_entry->clk_rate = clk_val;
+	} else {
+		tbl_entry->max_rate_idx = max_rate_idx;
+		tbl_entry->clk_rate = crm_max_rate;
+		if (client_data->drv_type == MMRM_CRM_SW_DRV)
+			tbl_entry->crm_client_tbl[tbl_entry->crm_client_tbl_size - 1] = clk_val;
+		else
+			tbl_entry->crm_client_tbl[tbl_entry->num_pwr_states *
+				client_data->crm_drv_idx +
+				client_data->pwr_st] = clk_val;
+
+		mmrm_sw_print_crm_table(tbl_entry);
+	}
+	tbl_entry->vdd_level = req_level;
+	tbl_entry->reserve = req_reserve;
+	tbl_entry->num_hw_blocks = client_data->num_hw_blocks;
+
+	mutex_unlock(&sw_clk_mgr->lock);
+
+	/* check reserve only flag (skip set clock rate) */
+	if (req_reserve && !tbl_entry->is_crm_client) {
+		d_mpr_h("%s: csid(0x%x) skip setting clk rate\n",
+		__func__, tbl_entry->clk_src_id);
+		rc = 0;
+		goto exit_no_err;
+	}
+
+set_clk_rate:
+	if (!tbl_entry->is_crm_client || client_data->drv_type == MMRM_CRM_SW_DRV) {
+		d_mpr_h("%s: csid(0x%x) setting clk rate %llu\n",
+			__func__, tbl_entry->clk_src_id, clk_val);
+
+		rc = clk_set_rate(tbl_entry->clk, clk_val);
+		if (rc) {
+			d_mpr_e("%s: csid(0x%x) failed to set clk rate %llu\n",
+				__func__, tbl_entry->clk_src_id, clk_val);
+			rc = -EINVAL;
+			/* TBD: incase of failure clk_rate is invalid */
+			goto err_clk_set_fail;
+		}
+	} else {
+		d_mpr_h(
+		"%s: csid(0x%x) setting clk rate %llu drv_type %u, crm_drv_idx %u, pwr_st %u\n",
+			__func__, tbl_entry->clk_src_id, clk_val,
+			CRM_HW_DRV, client_data->crm_drv_idx,
+			client_data->pwr_st);
+
+		rc = qcom_clk_crm_set_rate(tbl_entry->clk, CRM_HW_DRV,
+				client_data->crm_drv_idx,
+				client_data->pwr_st, clk_val);
+		if (rc) {
+			d_mpr_e("%s: csid(0x%x) failed to set clk rate %llu\n",
+				__func__, tbl_entry->clk_src_id, clk_val);
+			rc = -EINVAL;
+			/* TBD: incase of failure clk_rate is invalid */
+			goto err_clk_set_fail;
+		}
+	}
+
+exit_no_err:
+	d_mpr_h("%s: clk rate %lu set successfully for %s\n",
+			__func__, clk_val, tbl_entry->name);
+	return rc;
+
+err_invalid_client:
+err_invalid_client_data:
+err_invalid_clk_val:
+err_peak_overshoot:
+err_clk_set_fail:
+	d_mpr_e("%s: error = %d\n", __func__, rc);
+	return rc;
+}
+
+static int mmrm_sw_clk_client_setval_inrange(struct mmrm_clk_mgr *sw_clk_mgr,
+		struct mmrm_client *client,
+		struct mmrm_client_data *client_data,
+		struct mmrm_client_res_value *val)
+{
+	/* TBD: add support for set val in range */
+	return mmrm_sw_clk_client_setval(sw_clk_mgr, client, client_data,
+		val->cur);
+}
+
+static int mmrm_sw_clk_client_getval(struct mmrm_clk_mgr *sw_clk_mgr,
+	struct mmrm_client *client,
+	struct mmrm_client_res_value *val)
+{
+	int rc = 0;
+	struct mmrm_sw_clk_client_tbl_entry *tbl_entry;
+	struct mmrm_sw_clk_mgr_info *sinfo = &(sw_clk_mgr->data.sw_info);
+
+	/* validate input params */
+	if (!client) {
+		d_mpr_e("%s: invalid client\n", __func__);
+		rc = -EINVAL;
+		goto err_invalid_client;
+	}
+
+	if (client->client_uid >= sinfo->tot_clk_clients) {
+		d_mpr_e("%s: invalid client uid (%d)\n",
+			__func__, client->client_uid);
+		rc = -EINVAL;
+		goto err_invalid_client;
+	}
+
+	tbl_entry = &sinfo->clk_client_tbl[client->client_uid];
+	if (!tbl_entry->clk) {
+		d_mpr_e("%s: clk src not registered\n", __func__);
+		rc = -EINVAL;
+		goto err_invalid_client;
+	}
+
+	/* return previously configured value */
+	/* TBD: Identify the min & max values */
+	val->min = tbl_entry->clk_rate;
+	val->cur = tbl_entry->clk_rate;
+	val->max = tbl_entry->clk_rate;
+
+	return rc;
+
+err_invalid_client:
+	d_mpr_e("%s: error = %d\n", __func__, rc);
+	return rc;
+}
+
+static int mmrm_sw_clk_print_enabled_client_info(struct mmrm_clk_mgr *sw_clk_mgr,
+	char *buf,
+	int sz)
+{
+	u32 c, len;
+	u32 left_spaces = (u32)sz;
+	struct mmrm_sw_clk_mgr_info *sinfo = &(sw_clk_mgr->data.sw_info);
+	struct mmrm_sw_peak_current_data *peak_data = &sinfo->peak_cur_data;
+	struct mmrm_sw_clk_client_tbl_entry *tbl_entry = NULL;
+
+	len = scnprintf(buf, left_spaces, "  csid    clk_rate     vdd_level   cur_ma   num_hw_blocks\n");
+	left_spaces -= len;
+	buf += len;
+
+	if (sinfo != NULL && peak_data != NULL) {
+		for (c = 0; (c < sinfo->tot_clk_clients) && (left_spaces > 1); c++) {
+			tbl_entry = &sinfo->clk_client_tbl[c];
+			if ((tbl_entry != NULL) && (tbl_entry->clk_rate)) {
+				len = scnprintf(buf, left_spaces, "0x%x    %zu   %zu   %zu   %zu\n",
+					tbl_entry->clk_src_id,
+					tbl_entry->clk_rate,
+					tbl_entry->vdd_level,
+					tbl_entry->current_ma[tbl_entry->vdd_level]
+						[peak_data->aggreg_level] * tbl_entry->num_hw_blocks,
+					tbl_entry->num_hw_blocks);
+				left_spaces -= len;
+				buf += len;
+			}
+		}
+		if (left_spaces > 1) {
+			len = scnprintf(buf, left_spaces, "aggreg_val(%zu) aggreg_level(%zu)\n",
+				peak_data->aggreg_val, peak_data->aggreg_level);
+			left_spaces -= len;
+		}
+	}
+	return (sz - left_spaces);
+}
+
+static struct mmrm_clk_mgr_client_ops clk_client_swops = {
+	.clk_client_reg = mmrm_sw_clk_client_register,
+	.clk_client_dereg = mmrm_sw_clk_client_deregister,
+	.clk_client_setval = mmrm_sw_clk_client_setval,
+	.clk_client_setval_inrange = mmrm_sw_clk_client_setval_inrange,
+	.clk_client_getval = mmrm_sw_clk_client_getval,
+	.clk_print_enabled_client_info = mmrm_sw_clk_print_enabled_client_info,
+};
+
+static int mmrm_sw_prepare_table(struct mmrm_clk_platform_resources *cres,
+	struct mmrm_sw_clk_mgr_info *sinfo)
+{
+	int rc = 0;
+	u32 c;
+	struct mmrm_sw_clk_client_tbl_entry *tbl_entry;
+	struct nom_clk_src_info *nom_tbl_entry;
+
+	/* read all resource entries */
+	for (c = 0; c < sinfo->tot_clk_clients; c++) {
+		tbl_entry = &sinfo->clk_client_tbl[c];
+		nom_tbl_entry = &cres->nom_clk_set.clk_src_tbl[c];
+
+		tbl_entry->clk_src_id = (nom_tbl_entry->domain << 16 |
+			nom_tbl_entry->clk_src_id);
+		tbl_entry->dyn_pwr[MMRM_VDD_LEVEL_NOM] =
+			nom_tbl_entry->nom_dyn_pwr;
+		tbl_entry->leak_pwr[MMRM_VDD_LEVEL_NOM] =
+			nom_tbl_entry->nom_leak_pwr;
+		tbl_entry->max_num_hw_blocks = nom_tbl_entry->num_hw_block;
+
+		d_mpr_h("%s: updating csid(0x%x) dyn_pwr(%d) leak_pwr(%d) num(%d)\n",
+			__func__,
+			tbl_entry->clk_src_id,
+			tbl_entry->dyn_pwr[MMRM_VDD_LEVEL_NOM],
+			tbl_entry->leak_pwr[MMRM_VDD_LEVEL_NOM],
+			tbl_entry->num_hw_blocks);
+	}
+
+	return rc;
+}
+
+int mmrm_init_sw_clk_mgr(void *driver_data)
+{
+	int rc = 0, i, j;
+	struct mmrm_driver_data *drv_data =
+		(struct mmrm_driver_data *)driver_data;
+	struct mmrm_clk_platform_resources *cres = &drv_data->clk_res;
+	struct mmrm_sw_clk_mgr_info *sinfo = NULL;
+	struct mmrm_clk_mgr *sw_clk_mgr = NULL;
+	u32 tbl_size = 0;
+
+	/* mmrm_sw_clk_mgr */
+	sw_clk_mgr = kzalloc(sizeof(*sw_clk_mgr), GFP_KERNEL);
+	if (!sw_clk_mgr) {
+		d_mpr_e("%s: failed to allocate memory for sw_clk_mgr\n",
+			__func__);
+		rc = -ENOMEM;
+		goto err_fail_sw_clk_mgr;
+	}
+
+	/* initialize the tables */
+	tbl_size = sizeof(struct mmrm_sw_clk_client_tbl_entry) *
+		cres->nom_clk_set.count;
+
+	sinfo = &(sw_clk_mgr->data.sw_info);
+	sinfo->driver_data = drv_data;
+	sinfo->clk_client_tbl = kzalloc(tbl_size, GFP_KERNEL);
+	if (!sinfo->clk_client_tbl) {
+		d_mpr_e(
+			"%s: failed to allocate memory for clk_client_tbl (%d)\n",
+			__func__, cres->nom_clk_set.count);
+		rc = -ENOMEM;
+		goto err_fail_clk_tbl;
+	}
+	sinfo->tot_clk_clients = cres->nom_clk_set.count;
+	sinfo->enabled_clk_clients = 0;
+	INIT_LIST_HEAD(&sinfo->throttled_clients);
+
+	/* prepare table entries */
+	rc = mmrm_sw_prepare_table(cres, sinfo);
+	if (rc) {
+		d_mpr_e("%s: failed to prepare clk table\n", __func__);
+		rc = -ENOMEM;
+		goto err_fail_prep_tbl;
+	}
+
+	/* update the peak current threshold */
+	sinfo->peak_cur_data.threshold = cres->peak_threshold;
+	sinfo->peak_cur_data.aggreg_val = 0;
+	sinfo->peak_cur_data.aggreg_level = 0;
+	sinfo->throttle_clients_data_length = cres->throttle_clients_data_length;
+	for (i = 0; i < sinfo->throttle_clients_data_length; i++) {
+		for (j = 0; j < sinfo->tot_clk_clients; j++) {
+			if (sinfo->clk_client_tbl[j].clk_src_id
+					== cres->clsid_threshold_clients[i]) {
+				sinfo->throttle_clients_info[i].csid_throttle_client
+						= cres->clsid_threshold_clients[i];
+				sinfo->throttle_clients_info[i].tbl_entry_id = j;
+				break;
+			}
+		}
+	}
+
+	/* initialize mutex for sw clk mgr */
+	mutex_init(&sw_clk_mgr->lock);
+	sw_clk_mgr->scheme = drv_data->clk_res.scheme;
+
+	/* clk client operations */
+	sw_clk_mgr->clk_client_ops = &clk_client_swops;
+	drv_data->clk_mgr = sw_clk_mgr;
+
+	return rc;
+
+err_fail_prep_tbl:
+	kfree(sinfo->clk_client_tbl);
+err_fail_clk_tbl:
+	kfree(sw_clk_mgr);
+	drv_data->clk_mgr = NULL;
+err_fail_sw_clk_mgr:
+	d_mpr_e("%s: error = %d\n", __func__, rc);
+	return rc;
+}
+
+int mmrm_destroy_sw_clk_mgr(struct mmrm_clk_mgr *sw_clk_mgr)
+{
+	int rc = 0;
+	struct mmrm_sw_clk_mgr_info *sinfo = &(sw_clk_mgr->data.sw_info);
+	struct mmrm_sw_throttled_clients_data *iter, *safe_iter = NULL;
+
+	list_for_each_entry_safe(iter, safe_iter, &sinfo->throttled_clients, list) {
+		list_del(&iter->list);
+		kfree(iter);
+	}
+
+	if (!sw_clk_mgr) {
+		d_mpr_e("%s: sw_clk_mgr null\n", __func__);
+		return -EINVAL;
+	}
+
+	kfree(sw_clk_mgr->data.sw_info.clk_client_tbl);
+	mutex_destroy(&sw_clk_mgr->lock);
+	kfree(sw_clk_mgr);
+
+	return rc;
+}

+ 77 - 0
qcom/opensource/mmrm-driver/driver/src/mmrm_debug.c

@@ -0,0 +1,77 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2020-2021, The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/slab.h>
+
+#include "mmrm_debug.h"
+
+int msm_mmrm_debug = MMRM_ERR | MMRM_WARN | MMRM_PRINTK;
+u8 msm_mmrm_enable_throttle_feature = 1;
+u8 msm_mmrm_allow_multiple_register = 0;
+
+
+
+#define MAX_DBG_BUF_SIZE 4096
+
+static ssize_t msm_mmrm_debugfs_info_read(
+	struct file *file, char __user *buf, size_t count, loff_t *ppos)
+{
+	char *dbuf, *cur, *end;
+	ssize_t len = 0;
+
+	dbuf = kzalloc(MAX_DBG_BUF_SIZE, GFP_KERNEL);
+	if (!dbuf) {
+		d_mpr_e("%s: Allocation failed!\n", __func__);
+		return -ENOMEM;
+	}
+	cur = dbuf;
+	end = cur + MAX_DBG_BUF_SIZE;
+
+	// TODO: Fill cur here
+
+	len = simple_read_from_buffer(buf, count, ppos, dbuf, cur - dbuf);
+	kfree(dbuf);
+	return len;
+}
+
+const struct file_operations msm_mmrm_debugfs_info_fops = {
+	.open = simple_open,
+	.read = msm_mmrm_debugfs_info_read,
+};
+
+struct dentry *msm_mmrm_debugfs_init(void)
+{
+	struct dentry *dir;
+	int file_val;
+
+	/* create a directory in debugfs root (/sys/kernel/debug) */
+	dir = debugfs_create_dir("msm_mmrm", NULL);
+	if (IS_ERR_OR_NULL(dir)) {
+		d_mpr_e("%s: Call to debugfs_create_dir(%s) failed!\n", __func__, "mmrm");
+		goto failed_create_dir;
+	}
+
+	/* basic info */
+	if (!debugfs_create_file("info", 0444, dir, &file_val, &msm_mmrm_debugfs_info_fops)) {
+		d_mpr_e("%s: Call to debugfs_create_file(%s) failed!\n", __func__, "info");
+		goto failed_create_dir;
+	}
+
+	/* add other params here */
+	debugfs_create_u32("debug_level", 0644, dir, &msm_mmrm_debug);
+	debugfs_create_u8("allow_multiple_register", 0644, dir, &msm_mmrm_allow_multiple_register);
+	debugfs_create_u8("enable_throttle_feature", 0644, dir, &msm_mmrm_enable_throttle_feature);
+
+	return dir;
+
+failed_create_dir:
+	d_mpr_e("%s: error\n", __func__);
+	return NULL;
+}
+
+void msm_mmrm_debugfs_deinit(struct dentry *dir)
+{
+	debugfs_remove_recursive(dir);
+}

+ 72 - 0
qcom/opensource/mmrm-driver/driver/src/mmrm_debug.h

@@ -0,0 +1,72 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2020-2021, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef __MMRM_DEBUG__
+#define __MMRM_DEBUG__
+
+#include <linux/debugfs.h>
+#include <linux/printk.h>
+
+#ifndef MMRM_DBG_LABEL
+#define MMRM_DBG_LABEL "msm_mmrm"
+#endif
+
+#define MMRM_DBG_TAG MMRM_DBG_LABEL ": %4s: "
+
+/* To enable messages OR these values and
+ * echo the result to debugfs file.
+ */
+enum mmrm_msg_prio {
+	MMRM_ERR = 0x000001,
+	MMRM_HIGH = 0x000002,
+	MMRM_LOW = 0x000004,
+	MMRM_WARN = 0x000008,
+	MMRM_POWER = 0x000010,
+	MMRM_PRINTK = 0x010000,
+	MMRM_FTRACE = 0x020000,
+};
+
+extern int msm_mmrm_debug;
+extern u8 msm_mmrm_allow_multiple_register;
+
+#define dprintk(__level, __fmt, ...) \
+	do { \
+		if (msm_mmrm_debug & __level) { \
+			if (msm_mmrm_debug & MMRM_PRINTK) { \
+				pr_info(MMRM_DBG_TAG __fmt, \
+					get_debug_level_str(__level), \
+					##__VA_ARGS__); \
+			} \
+		} \
+	} while (0)
+
+#define d_mpr_e(__fmt, ...) dprintk(MMRM_ERR, __fmt, ##__VA_ARGS__)
+#define d_mpr_h(__fmt, ...) dprintk(MMRM_HIGH, __fmt, ##__VA_ARGS__)
+#define d_mpr_l(__fmt, ...) dprintk(MMRM_LOW, __fmt, ##__VA_ARGS__)
+#define d_mpr_w(__fmt, ...) dprintk(MMRM_WARN, __fmt, ##__VA_ARGS__)
+#define d_mpr_p(__fmt, ...) dprintk(MMRM_POWER, __fmt, ##__VA_ARGS__)
+
+static inline char *get_debug_level_str(int level)
+{
+	switch (level) {
+	case MMRM_ERR:
+		return "err ";
+	case MMRM_HIGH:
+		return "high";
+	case MMRM_LOW:
+		return "low ";
+	case MMRM_WARN:
+		return "warn";
+	case MMRM_POWER:
+		return "power";
+	default:
+		return "????";
+	}
+}
+
+struct dentry *msm_mmrm_debugfs_init(void);
+void msm_mmrm_debugfs_deinit(struct dentry *dir);
+
+#endif

+ 68 - 0
qcom/opensource/mmrm-driver/driver/src/mmrm_fixedpoint.h

@@ -0,0 +1,68 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2020-2021, The Linux Foundation. All rights reserved.
+ */
+
+#ifdef _FIXP_ARITH_H
+#error "This implementation is meant to override fixp-arith.h, don't use both"
+#endif
+
+#ifndef _MMRM_FIXEDPOINT_H_
+#define _MMRM_FIXEDPOINT_H_
+
+#include <linux/types.h>
+#include <linux/bits.h>
+
+/*
+ * Normally would typedef'ed, but checkpatch doesn't like typedef.
+ * Also should be normally typedef'ed to intmax_t but that doesn't seem to be
+ * available in the kernel
+ */
+#define fp_t size_t
+
+/* (Arbitrarily) make the first 25% of the bits to be the fractional bits */
+#define FP_FRACTIONAL_BITS ((sizeof(fp_t) * 8) / 4)
+
+#define FP(__i, __f_n, __f_d) \
+	((((fp_t)(__i)) << FP_FRACTIONAL_BITS) + \
+	(((__f_n) << FP_FRACTIONAL_BITS) / (__f_d)))
+
+#define FP_INT(__i) FP(__i, 0, 1)
+#define FP_ONE FP_INT(1)
+#define FP_ZERO FP_INT(0)
+
+static inline size_t fp_frac_base(void)
+{
+	return GENMASK(FP_FRACTIONAL_BITS - 1, 0);
+}
+
+static inline size_t fp_frac(fp_t a)
+{
+	return a & GENMASK(FP_FRACTIONAL_BITS - 1, 0);
+}
+
+static inline size_t fp_int(fp_t a)
+{
+	return a >> FP_FRACTIONAL_BITS;
+}
+
+static inline size_t fp_round(fp_t a)
+{
+	/* is the fractional part >= frac_max / 2? */
+	bool round_up = fp_frac(a) >= fp_frac_base() / 2;
+
+	return fp_int(a) + round_up;
+}
+
+static inline fp_t fp_mult(fp_t a, fp_t b)
+{
+	return (a * b) >> FP_FRACTIONAL_BITS;
+}
+
+
+static inline fp_t fp_div(fp_t a, fp_t b)
+{
+	return (a << FP_FRACTIONAL_BITS) / b;
+}
+
+#endif  /* _MMRM_FIXEDPOINT_H_ */

+ 162 - 0
qcom/opensource/mmrm-driver/driver/src/mmrm_internal.c

@@ -0,0 +1,162 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2020-2021, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2023, Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#include <linux/types.h>
+#include <linux/of_platform.h>
+
+#include "mmrm_internal.h"
+#include "mmrm_debug.h"
+
+static struct mmrm_common_data common_pt_data[] = {
+	{
+		.key = "qcom,mmrm_clk_mgr_scheme",
+		.value = CLK_MGR_SCHEME_SW,
+	},
+};
+
+/*throttle client list is as per fdd & resource availability*/
+
+static struct mmrm_throttle_clients_data common_pt_throttle_clients_data_pineapple[] = {
+	{
+		.domain = MMRM_CLIENT_DOMAIN_DISPLAY,
+		.id = 0x3e,
+	},
+	{
+		.domain = MMRM_CLIENT_DOMAIN_VIDEO,
+		.id = 0x03,
+	},
+	{
+		.domain = MMRM_CLIENT_DOMAIN_CAMERA,
+		.id = 0x58,
+	},
+	{
+		.domain = MMRM_CLIENT_DOMAIN_CVP,
+		.id = 0x0a,
+	},
+	{
+		.domain = MMRM_CLIENT_DOMAIN_CAMERA,
+		.id = 0x02,
+	},
+};
+
+static struct mmrm_throttle_clients_data common_pt_throttle_clients_data_cliffs[] = {
+	{
+		.domain = MMRM_CLIENT_DOMAIN_DISPLAY,
+		.id = 0x3e,
+	},
+	{
+		.domain = MMRM_CLIENT_DOMAIN_VIDEO,
+		.id = 0x03,
+	},
+	{
+		.domain = MMRM_CLIENT_DOMAIN_CAMERA,
+		.id = 0x62,
+	},
+	{
+		.domain = MMRM_CLIENT_DOMAIN_CVP,
+		.id = 0x0a,
+	},
+	{
+		.domain = MMRM_CLIENT_DOMAIN_CAMERA,
+		.id = 0x17,
+	},
+};
+
+static struct mmrm_platform_data commom_pt_platform_data_pineapple = {
+	.common_data = common_pt_data,
+	.common_data_length = ARRAY_SIZE(common_pt_data),
+	.throttle_clk_clients_data = common_pt_throttle_clients_data_pineapple,
+	.throttle_clk_clients_data_length = ARRAY_SIZE(common_pt_throttle_clients_data_pineapple),
+};
+
+static struct mmrm_platform_data commom_pt_platform_data_cliffs = {
+	.common_data = common_pt_data,
+	.common_data_length = ARRAY_SIZE(common_pt_data),
+	.throttle_clk_clients_data = common_pt_throttle_clients_data_cliffs,
+	.throttle_clk_clients_data_length = ARRAY_SIZE(common_pt_throttle_clients_data_cliffs),
+};
+
+static const struct of_device_id mmrm_dt_match[] = {
+	{
+		.compatible = "qcom,waipio-mmrm",
+		.data = &commom_pt_platform_data_pineapple,
+	},
+	{
+		.compatible = "qcom,kalama-mmrm",
+		.data = &commom_pt_platform_data_pineapple,
+	},
+	{
+		.compatible = "qcom,pineapple-mmrm",
+		.data = &commom_pt_platform_data_pineapple,
+	},
+	{
+		.compatible = "qcom,cliffs-mmrm",
+		.data = &commom_pt_platform_data_cliffs,
+	},
+	{},
+};
+
+struct mmrm_platform_data *mmrm_get_platform_data(struct device *dev)
+{
+	struct mmrm_platform_data *platform_data = NULL;
+	const struct of_device_id *match;
+
+	match = of_match_node(mmrm_dt_match, dev->of_node);
+	if (match)
+		platform_data = (struct mmrm_platform_data *)match->data;
+
+	if (!platform_data)
+		goto exit;
+
+	/* add additional config checks for platform data */
+
+exit:
+	return platform_data;
+}
+
+int mmrm_init(struct mmrm_driver_data *drv_data)
+{
+	int rc = 0;
+
+	/* get clk resource mgr ops */
+	rc = mmrm_get_clk_mgr_ops(drv_data);
+	if (rc) {
+		d_mpr_e("%s: init clk mgr failed\n", __func__);
+		goto err_get_clk_mgr_ops;
+	}
+
+	/* clock resource mgr */
+	rc = drv_data->clk_mgr_ops->init_clk_mgr(drv_data);
+	if (rc) {
+		d_mpr_e("%s: init clk mgr failed\n", __func__);
+		goto err_init_clk_mgr;
+	}
+	return rc;
+
+err_init_clk_mgr:
+err_get_clk_mgr_ops:
+	return rc;
+}
+
+int mmrm_deinit(struct mmrm_driver_data *drv_data)
+{
+	int rc = 0;
+
+	if (!drv_data || !drv_data->clk_mgr_ops ||
+		!drv_data->clk_mgr_ops->destroy_clk_mgr) {
+		d_mpr_e("%s: invalid driver data or clk mgr ops\n", __func__);
+		return -EINVAL;
+	}
+
+	/* destroy clock resource mgr */
+	rc = drv_data->clk_mgr_ops->destroy_clk_mgr(drv_data->clk_mgr);
+	if (rc) {
+		d_mpr_e("%s: destroy clk mgr failed\n", __func__);
+		drv_data->clk_mgr = NULL;
+	}
+
+	return rc;
+}

+ 60 - 0
qcom/opensource/mmrm-driver/driver/src/mmrm_internal.h

@@ -0,0 +1,60 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2020-2021, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _MMRM_INTERNAL_H_
+#define _MMRM_INTERNAL_H_
+
+#include <linux/device.h>
+#include <linux/types.h>
+
+#include "mmrm_resources.h"
+#include "mmrm_clk_rsrc_mgr.h"
+
+struct mmrm_common_data {
+	char key[128];
+	int value;
+};
+
+struct mmrm_throttle_clients_data {
+	u32 domain;
+	u32 id;
+};
+
+struct mmrm_platform_data {
+	struct mmrm_common_data *common_data;
+	struct mmrm_throttle_clients_data *throttle_clk_clients_data;
+	u32 common_data_length;
+	u16 throttle_clk_clients_data_length;
+	u32 scheme;
+};
+
+struct mmrm_driver_data {
+	/* platform data */
+	struct mmrm_platform_data *platform_data;
+
+	/* clk */
+	bool is_clk_scaling_supported;
+	struct mmrm_clk_platform_resources clk_res;
+	struct mmrm_clk_mgr *clk_mgr;
+	struct mmrm_clk_mgr_ops *clk_mgr_ops;
+
+	/* debugfs */
+	struct dentry *debugfs_root;
+};
+
+struct mmrm_platform_data *mmrm_get_platform_data(struct device *dev);
+
+int mmrm_count_clk_clients_frm_dt(struct platform_device *pdev);
+
+int mmrm_read_platform_resources(
+	struct platform_device *pdev,
+	struct mmrm_driver_data *drv_data);
+int mmrm_free_platform_resources(struct mmrm_driver_data *drv_data);
+
+int mmrm_init(struct mmrm_driver_data *drv_data);
+int mmrm_deinit(struct mmrm_driver_data *drv_data);
+
+#endif //_MMRM_INTERNAL_H_
+

+ 286 - 0
qcom/opensource/mmrm-driver/driver/src/mmrm_res_parse.c

@@ -0,0 +1,286 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2020-2021, The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/types.h>
+#include <linux/of_platform.h>
+
+#include "mmrm_internal.h"
+#include "mmrm_debug.h"
+#include "mmrm_clk_rsrc_mgr.h"
+
+
+static int mmrm_find_key_value(
+	struct mmrm_platform_data *pdata, const char *key)
+{
+	int i = 0;
+	struct mmrm_common_data *cdata = pdata->common_data;
+	int size = pdata->common_data_length;
+
+	for (i = 0; i < size; i++) {
+		if (!strcmp(cdata[i].key, key))
+			return cdata[i].value;
+	}
+	return 0;
+}
+
+static int mmrm_read_clk_pltfrm_rsrc_frm_drv_data(
+	struct mmrm_driver_data *ddata)
+{
+	struct mmrm_platform_data *pdata;
+	struct mmrm_clk_platform_resources *cres;
+	int i = 0;
+
+	pdata = ddata->platform_data;
+	cres = &ddata->clk_res;
+
+	cres->scheme = mmrm_find_key_value(pdata,
+					"qcom,mmrm_clk_mgr_scheme");
+	d_mpr_h("%s: configured mmrm scheme %d\n",
+		__func__, cres->scheme);
+	cres->throttle_clients_data_length = pdata->throttle_clk_clients_data_length;
+
+	for (i = 0; i < pdata->throttle_clk_clients_data_length; i++) {
+		cres->clsid_threshold_clients[i] =
+			(pdata->throttle_clk_clients_data[i].domain << 16
+				| pdata->throttle_clk_clients_data[i].id);
+	}
+
+	return 0;
+}
+
+static void mmrm_free_rail_corner_table(
+	struct mmrm_clk_platform_resources *cres)
+{
+	cres->corner_set.corner_tbl = NULL;
+	cres->corner_set.count = 0;
+}
+
+static int mmrm_load_mm_rail_corner_table(
+	struct mmrm_clk_platform_resources *cres)
+{
+	int rc = 0, num_corners = 0, c = 0;
+	struct voltage_corner_set *corners = &cres->corner_set;
+	struct platform_device *pdev = cres->pdev;
+
+	num_corners = of_property_count_strings(pdev->dev.of_node,
+		"mm-rail-corners");
+	if (num_corners <= 0) {
+		d_mpr_e("%s: no mm rail corners found\n",
+			__func__);
+		corners->count = 0;
+		goto err_load_corner_tbl;
+	}
+
+	corners->corner_tbl = devm_kzalloc(&pdev->dev,
+		sizeof(*corners->corner_tbl) * num_corners, GFP_KERNEL);
+	if (!corners->corner_tbl) {
+		d_mpr_e("%s: failed to allocate memory for corner_tbl\n",
+			__func__);
+		rc = -ENOMEM;
+		goto err_load_corner_tbl;
+	}
+	corners->count = num_corners;
+	d_mpr_h("%s: found %d corners\n",
+		__func__, num_corners);
+
+	for (c = 0; c < num_corners; c++) {
+		struct corner_info *ci = &corners->corner_tbl[c];
+
+		of_property_read_string_index(pdev->dev.of_node,
+			"mm-rail-corners", c, &ci->name);
+		of_property_read_u32_index(pdev->dev.of_node,
+			"mm-rail-fact-volt", c, &ci->volt_factor);
+		of_property_read_u32_index(pdev->dev.of_node,
+			"scaling-fact-dyn", c, &ci->scaling_factor_dyn);
+		of_property_read_u32_index(pdev->dev.of_node,
+			"scaling-fact-leak", c, &ci->scaling_factor_leak);
+	}
+
+	/* print corner tables */
+	for (c = 0; c < num_corners; c++) {
+		struct corner_info *ci = &corners->corner_tbl[c];
+
+		d_mpr_h(
+			"%s: corner_name:%s volt_factor: %d sc_dyn: %d sc_leak: %d\n",
+			__func__, ci->name, ci->volt_factor,
+			ci->scaling_factor_dyn, ci->scaling_factor_leak);
+	}
+
+	return 0;
+
+err_load_corner_tbl:
+	return rc;
+}
+
+static void mmrm_free_nom_clk_src_table(
+	struct mmrm_clk_platform_resources *cres)
+{
+	cres->nom_clk_set.clk_src_tbl = NULL;
+	cres->nom_clk_set.count = 0;
+}
+
+static int mmrm_load_nom_clk_src_table(
+	struct mmrm_clk_platform_resources *cres)
+{
+	int rc = 0, num_clk_src = 0, c = 0, size_clk_src = 0, entry_offset = 5;
+
+	struct platform_device *pdev = cres->pdev;
+	struct nom_clk_src_set *clk_srcs = &cres->nom_clk_set;
+
+	rc = of_property_read_u32(pdev->dev.of_node, "mmrm-peak-threshold", &cres->peak_threshold);
+	if (rc < 0) {
+		d_mpr_e("%s: invalid or missing mmrm-peak-threshold DT property\n", __func__);
+		rc = -ENODEV;
+		goto err_load_clk_src_tbl;
+	}
+
+	d_mpr_h("%s: mmrm-peak-threshold threshold:%d\n",
+		__func__, cres->peak_threshold);
+
+	of_find_property(pdev->dev.of_node, "mmrm-client-info", &size_clk_src);
+	if ((size_clk_src < sizeof(*clk_srcs->clk_src_tbl)) ||
+		(size_clk_src % sizeof(*clk_srcs->clk_src_tbl))) {
+		d_mpr_e("%s: invalid size(%d) of clk src table\n",
+			__func__, size_clk_src);
+		clk_srcs->count = 0;
+		goto err_load_clk_src_tbl;
+	}
+
+	clk_srcs->clk_src_tbl = devm_kzalloc(&pdev->dev,
+		size_clk_src, GFP_KERNEL);
+	if (!clk_srcs->clk_src_tbl) {
+		d_mpr_e("%s: failed to allocate memory for clk_src_tbl\n",
+			__func__);
+		rc = -ENOMEM;
+		goto err_load_clk_src_tbl;
+	}
+	num_clk_src = size_clk_src / sizeof(struct nom_clk_src_info);
+	clk_srcs->count = num_clk_src;
+	d_mpr_h("%s: found %d clk_srcs size %d\n",
+		__func__, num_clk_src, size_clk_src);
+	for (c = 0; c < num_clk_src; c++) {
+		struct nom_clk_src_info *ci = &clk_srcs->clk_src_tbl[c];
+
+		of_property_read_u32_index(pdev->dev.of_node,
+			"mmrm-client-info", (c*entry_offset), &ci->domain);
+		of_property_read_u32_index(pdev->dev.of_node,
+			"mmrm-client-info", (c*entry_offset+1), &ci->clk_src_id);
+		of_property_read_u32_index(pdev->dev.of_node,
+			"mmrm-client-info", (c*entry_offset+2),
+			&ci->nom_dyn_pwr);
+		of_property_read_u32_index(pdev->dev.of_node,
+			"mmrm-client-info", (c*entry_offset+3),
+			&ci->nom_leak_pwr);
+		of_property_read_u32_index(pdev->dev.of_node,
+			"mmrm-client-info", (c*entry_offset+4),
+			&ci->num_hw_block);
+	}
+
+	/* print corner tables */
+	for (c = 0; c < num_clk_src; c++) {
+		struct nom_clk_src_info *ci = &clk_srcs->clk_src_tbl[c];
+
+		d_mpr_h("%s: domain: %d clk_src: %d dyn_pwr: %d leak_pwr: %d\n",
+			__func__, ci->domain, ci->clk_src_id, ci->nom_dyn_pwr,
+			ci->nom_leak_pwr);
+	}
+
+	return 0;
+
+err_load_clk_src_tbl:
+	return rc;
+}
+
+static int mmrm_read_clk_pltfrm_rsrc_frm_dt(
+	struct mmrm_clk_platform_resources *cres)
+{
+	int rc = 0;
+
+	rc = mmrm_load_mm_rail_corner_table(cres);
+	if (rc) {
+		d_mpr_e("%s: failed to load mm rail corner table\n",
+			__func__);
+		goto err_load_mmrm_rail_table;
+	}
+
+	if (cres->scheme == CLK_MGR_SCHEME_SW) {
+		rc = mmrm_load_nom_clk_src_table(cres);
+		if (rc) {
+			d_mpr_e("%s: failed to load nom clk src table\n",
+				__func__);
+			goto err_load_nom_clk_src_table;
+		}
+	} else if (cres->scheme == CLK_MGR_SCHEME_CXIPEAK) {
+		d_mpr_e("%s: cxipeak is not supported with mmrm\n",
+			__func__);
+		rc = -EINVAL;
+		goto err_load_mmrm_rail_table;
+	}
+
+	return rc;
+
+err_load_nom_clk_src_table:
+	mmrm_free_nom_clk_src_table(cres);
+
+err_load_mmrm_rail_table:
+	mmrm_free_rail_corner_table(cres);
+	return rc;
+}
+
+int mmrm_count_clk_clients_frm_dt(struct platform_device *pdev)
+{
+	u32 size_clk_src = 0, num_clk_src = 0;
+
+	of_find_property(pdev->dev.of_node, "mmrm-client-info", &size_clk_src);
+	num_clk_src = size_clk_src / sizeof(struct nom_clk_src_info);
+	d_mpr_h("%s: found %d clk_srcs size %d\n",
+		__func__, num_clk_src, size_clk_src);
+
+	return num_clk_src;
+}
+
+int mmrm_read_platform_resources(struct platform_device *pdev,
+	struct mmrm_driver_data *drv_data)
+{
+	int rc = 0;
+
+	if (pdev->dev.of_node) {
+
+		/* clk resources */
+		drv_data->clk_res.pdev = pdev;
+
+		rc = mmrm_read_clk_pltfrm_rsrc_frm_drv_data(drv_data);
+		if (rc) {
+			d_mpr_e(
+				"%s: failed to read clk platform res from driver\n",
+				__func__);
+			goto exit;
+		}
+		rc = mmrm_read_clk_pltfrm_rsrc_frm_dt(&drv_data->clk_res);
+		if (rc) {
+			d_mpr_e("%s: failed to read clk platform res from dt\n",
+				__func__);
+			goto exit;
+		}
+	} else {
+		d_mpr_e("%s: of node is null\n", __func__);
+		rc = -EINVAL;
+		goto exit;
+	}
+
+exit:
+	return rc;
+}
+
+int mmrm_free_platform_resources(struct mmrm_driver_data *drv_data)
+{
+	int rc = 0;
+
+	/* free clk resources */
+	mmrm_free_nom_clk_src_table(&drv_data->clk_res);
+	mmrm_free_rail_corner_table(&drv_data->clk_res);
+
+	return rc;
+}

+ 47 - 0
qcom/opensource/mmrm-driver/driver/src/mmrm_resources.h

@@ -0,0 +1,47 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2020-2021, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _MMRM_RESOURCES_H_
+#define _MMRM_RESOURCES_H_
+
+#include <linux/platform_device.h>
+#define MMRM_MAX_THROTTLE_CLIENTS 5
+
+struct corner_info {
+	const char *name;
+	u32 volt_factor;
+	u32 scaling_factor_dyn;
+	u32 scaling_factor_leak;
+};
+
+struct voltage_corner_set {
+	struct corner_info *corner_tbl;
+	u32 count;
+};
+
+struct nom_clk_src_info {
+	u32 domain;
+	u32 clk_src_id;
+	u32 nom_dyn_pwr;
+	u32 nom_leak_pwr;
+	u32 num_hw_block;
+};
+
+struct nom_clk_src_set {
+	struct nom_clk_src_info *clk_src_tbl;
+	u32 count;
+};
+
+struct mmrm_clk_platform_resources {
+	struct platform_device *pdev;
+	u32 scheme;
+	u32 clsid_threshold_clients[MMRM_MAX_THROTTLE_CLIENTS];
+	u16 throttle_clients_data_length;
+	struct voltage_corner_set corner_set;
+	struct nom_clk_src_set nom_clk_set;
+	u32 peak_threshold;
+};
+
+#endif

+ 617 - 0
qcom/opensource/mmrm-driver/driver/src/msm_mmrm.c

@@ -0,0 +1,617 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2020-2021, The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/module.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/soc/qcom/msm_mmrm.h>
+#include <linux/fs.h>
+
+#include "mmrm_internal.h"
+#include "mmrm_debug.h"
+#include "mmrm_clk_rsrc_mgr.h"
+
+#define	VERIFY_PDEV(pdev)	\
+{							\
+	if (!pdev) {			\
+		d_mpr_e("%s: null platform dev\n", __func__);\
+		rc = -EINVAL;		\
+		goto err_exit; \
+	}						\
+}
+
+#define RESET_DRV_DATA(drv_data)	\
+{									\
+	kfree(drv_data);				\
+	drv_data = (void *) -EPROBE_DEFER; \
+}
+
+#define CHECK_SKIP_MMRM_CLK_RSRC(drv_data)	\
+{									\
+	if (!drv_data->is_clk_scaling_supported) {	\
+		d_mpr_h("%s: mmrm clk rsrc not supported\n", __func__);\
+		goto skip_mmrm;				\
+	}								\
+}
+
+#define	MMRM_SYSFS_ENTRY_MAX_LEN PAGE_SIZE
+
+extern int msm_mmrm_debug;
+extern u8 msm_mmrm_enable_throttle_feature;
+extern u8 msm_mmrm_allow_multiple_register;
+
+struct mmrm_driver_data *drv_data = (void *) -EPROBE_DEFER;
+
+bool mmrm_client_check_scaling_supported(enum mmrm_client_type client_type, u32 client_domain)
+{
+	if (drv_data == (void *)-EPROBE_DEFER) {
+		d_mpr_e("%s: mmrm probe_init not done\n", __func__);
+		goto err_exit;
+	}
+
+	if (client_type == MMRM_CLIENT_CLOCK) {
+		CHECK_SKIP_MMRM_CLK_RSRC(drv_data);
+
+		/* TODO: Check for individual domain */
+	}
+
+	return true;
+err_exit:
+	d_mpr_e("%s: error exit\n", __func__);
+skip_mmrm:
+	return false;
+}
+EXPORT_SYMBOL(mmrm_client_check_scaling_supported);
+
+struct mmrm_client *mmrm_client_register(struct mmrm_client_desc *client_desc)
+{
+	struct mmrm_client *client = NULL;
+
+	/* check for null input */
+	if (!client_desc) {
+		d_mpr_e("%s: null input descriptor\n", __func__);
+		goto err_exit;
+	}
+
+	if (drv_data == (void *) -EPROBE_DEFER) {
+		d_mpr_e("%s: mmrm probe_init not done\n", __func__);
+		goto err_exit;
+	}
+
+	/* check for client type, then register */
+	if (client_desc->client_type == MMRM_CLIENT_CLOCK) {
+		/* check for skip mmrm */
+		CHECK_SKIP_MMRM_CLK_RSRC(drv_data);
+
+		client = mmrm_clk_client_register(
+					drv_data->clk_mgr, client_desc);
+		if (!client) {
+			d_mpr_e("%s: failed to register client\n", __func__);
+			goto err_exit;
+		}
+	} else {
+		d_mpr_e("%s: unknown client_type %d\n",
+			__func__, client_desc->client_type);
+		goto err_exit;
+	}
+
+skip_mmrm:
+	return client;
+
+err_exit:
+	d_mpr_e("%s: error exit\n", __func__);
+	return client;
+}
+EXPORT_SYMBOL(mmrm_client_register);
+
+int mmrm_client_deregister(struct mmrm_client *client)
+{
+	int rc = 0;
+
+	/* check for null input */
+	if (!client) {
+		d_mpr_e("%s: invalid input client\n", __func__);
+		rc = -EINVAL;
+		goto err_exit;
+	}
+
+	if (drv_data == (void *) -EPROBE_DEFER) {
+		d_mpr_e("%s: mmrm probe_init not done\n", __func__);
+		goto err_exit;
+	}
+
+	/* check for client type, then deregister */
+	if (client->client_type == MMRM_CLIENT_CLOCK) {
+		/* check for skip mmrm */
+		CHECK_SKIP_MMRM_CLK_RSRC(drv_data);
+
+		rc = mmrm_clk_client_deregister(drv_data->clk_mgr, client);
+		if (rc != 0) {
+			d_mpr_e("%s: failed to deregister client\n", __func__);
+			goto err_exit;
+		}
+	} else {
+		d_mpr_e("%s: unknown client_type %d\n",
+			__func__, client->client_type);
+	}
+
+skip_mmrm:
+	return rc;
+
+err_exit:
+	d_mpr_e("%s: error = %d\n", __func__, rc);
+	return rc;
+}
+EXPORT_SYMBOL(mmrm_client_deregister);
+
+int mmrm_client_set_value(struct mmrm_client *client,
+	struct mmrm_client_data *client_data, unsigned long val)
+{
+	int rc = 0;
+
+	/* check for null input */
+	if (!client || !client_data) {
+		d_mpr_e("%s: invalid input client(%pK) client_data(%pK)\n",
+			__func__, client, client_data);
+		rc = -EINVAL;
+		goto err_exit;
+	}
+
+	if (drv_data == (void *) -EPROBE_DEFER) {
+		d_mpr_e("%s: mmrm probe_init not done\n", __func__);
+		goto err_exit;
+	}
+
+	/* check for client type, then set value */
+	if (client->client_type == MMRM_CLIENT_CLOCK) {
+		/* check for skip mmrm */
+		CHECK_SKIP_MMRM_CLK_RSRC(drv_data);
+
+		rc = mmrm_clk_client_setval(drv_data->clk_mgr, client,
+				client_data, val);
+		if (rc != 0) {
+			d_mpr_e("%s: failed to set value for client\n", __func__);
+			goto err_exit;
+		}
+	} else {
+		d_mpr_e("%s: unknown client_type %d\n",
+			__func__, client->client_type);
+	}
+
+skip_mmrm:
+	return rc;
+
+err_exit:
+	d_mpr_e("%s: error = %d\n", __func__, rc);
+	return rc;
+}
+EXPORT_SYMBOL(mmrm_client_set_value);
+
+int mmrm_client_set_value_in_range(struct mmrm_client *client,
+	struct mmrm_client_data *client_data,
+	struct mmrm_client_res_value *val)
+{
+	int rc = 0;
+
+	/* check for null input */
+	if (!client || !client_data || !val) {
+		d_mpr_e(
+			"%s: invalid input client(%pK) client_data(%pK) val(%pK)\n",
+			__func__, client, client_data, val);
+		rc = -EINVAL;
+		goto err_exit;
+	}
+
+	if (drv_data == (void *) -EPROBE_DEFER) {
+		d_mpr_e("%s: mmrm probe_init not done\n", __func__);
+		goto err_exit;
+	}
+
+	/* check for client type, then set value */
+	if (client->client_type == MMRM_CLIENT_CLOCK) {
+		/* check for skip mmrm */
+		CHECK_SKIP_MMRM_CLK_RSRC(drv_data);
+
+		rc = mmrm_clk_client_setval_inrange(drv_data->clk_mgr,
+				client, client_data, val);
+		if (rc != 0) {
+			d_mpr_e("%s: failed to set value for client\n", __func__);
+			goto err_exit;
+		}
+	} else {
+		d_mpr_e("%s: unknown client_type %d\n",
+			__func__, client->client_type);
+	}
+
+skip_mmrm:
+	return rc;
+
+err_exit:
+	d_mpr_e("%s: error = %d\n", __func__, rc);
+	return rc;
+}
+EXPORT_SYMBOL(mmrm_client_set_value_in_range);
+
+int mmrm_client_get_value(struct mmrm_client *client,
+	struct mmrm_client_res_value *val)
+{
+	int rc = 0;
+
+	/* check for null input */
+	if (!client || !val) {
+		d_mpr_e("%s: invalid input client(%pK) val(%pK)\n",
+			__func__, client, val);
+		rc = -EINVAL;
+		goto err_exit;
+	}
+
+	if (drv_data == (void *) -EPROBE_DEFER) {
+		d_mpr_e("%s: mmrm probe_init not done\n", __func__);
+		goto err_exit;
+	}
+
+	/* check for client type, then get value */
+	if (client->client_type == MMRM_CLIENT_CLOCK) {
+		/* check for skip mmrm */
+		CHECK_SKIP_MMRM_CLK_RSRC(drv_data);
+
+		rc = mmrm_clk_client_getval(drv_data->clk_mgr,
+				client, val);
+		if (rc != 0) {
+			d_mpr_e("%s: failed to get value for client\n", __func__);
+			goto err_exit;
+		}
+	} else {
+		d_mpr_e("%s: unknown client_type %d\n",
+			__func__, client->client_type);
+	}
+
+skip_mmrm:
+	return rc;
+
+err_exit:
+	d_mpr_e("%s: error = %d\n", __func__, rc);
+	return rc;
+}
+EXPORT_SYMBOL(mmrm_client_get_value);
+
+int mmrm_client_get_clk_count(void)
+{
+	struct mmrm_sw_clk_mgr_info *sinfo;
+
+	if (drv_data == (void *) -EPROBE_DEFER)
+		return 0;
+
+	sinfo = &(drv_data->clk_mgr->data.sw_info);
+
+	return sinfo->tot_clk_clients;
+}
+EXPORT_SYMBOL(mmrm_client_get_clk_count);
+
+static int sysfs_get_param(const char *buf, u32 *param)
+{
+	int base;
+
+	if (buf) {
+		if ((buf[1] == 'x') || (buf[1] == 'X'))
+			base = 16;
+		else
+			base = 10;
+
+		if (kstrtou32(buf, base, param) != 0)
+			return -EINVAL;
+	}
+	return 0;
+}
+
+static ssize_t mmrm_sysfs_debug_get(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	int ret;
+
+	ret = scnprintf(buf, MMRM_SYSFS_ENTRY_MAX_LEN, "0x%x\n", msm_mmrm_debug);
+	pr_info("%s: 0x%04X\n", __func__, msm_mmrm_debug);
+
+	return ret;
+}
+
+static ssize_t mmrm_sysfs_debug_set(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count)
+{
+	int ret;
+	u32 reg_addr;
+
+	ret = sysfs_get_param(buf, &reg_addr);
+	if (ret == 0)
+		msm_mmrm_debug = reg_addr;
+
+	return count;
+}
+
+static ssize_t mmrm_sysfs_enable_throttle_get(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	int ret;
+
+	ret = scnprintf(buf, MMRM_SYSFS_ENTRY_MAX_LEN, "0x%x\n", msm_mmrm_enable_throttle_feature);
+	pr_info("%s: 0x%04X\n", __func__, msm_mmrm_enable_throttle_feature);
+
+	return ret;
+}
+
+static ssize_t mmrm_sysfs_enable_throttle_set(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count)
+{
+	u32 reg_addr;
+	int ret;
+
+	ret = sysfs_get_param(buf, &reg_addr);
+	if (ret == 0)
+		msm_mmrm_enable_throttle_feature = (u8)reg_addr;
+
+	return count;
+}
+
+static ssize_t mmrm_sysfs_allow_multiple_get(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	int ret;
+
+	ret = scnprintf(buf, MMRM_SYSFS_ENTRY_MAX_LEN, "0x%x\n", msm_mmrm_allow_multiple_register);
+	pr_info("%s: 0x%04X\n", __func__, msm_mmrm_allow_multiple_register);
+
+	return ret;
+}
+
+static ssize_t mmrm_sysfs_allow_multiple_set(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count)
+{
+	u32 reg_addr;
+	int ret;
+
+	ret = sysfs_get_param(buf, &reg_addr);
+	if (ret == 0)
+		msm_mmrm_allow_multiple_register = (u8)reg_addr;
+
+	return count;
+}
+
+
+static ssize_t dump_enabled_client_info_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	int rc;
+
+	rc = mmrm_clk_print_enabled_client_info(drv_data->clk_mgr, buf, MMRM_SYSFS_ENTRY_MAX_LEN);
+	if (rc == 0)
+		d_mpr_e("%s: failed to dump client info\n", __func__);
+
+	return rc;
+}
+
+static ssize_t dump_clk_res_info_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	int    i, len;
+	struct mmrm_clk_platform_resources *cres = &drv_data->clk_res;
+	struct nom_clk_src_set *clk_set = &cres->nom_clk_set;
+	struct nom_clk_src_info *pclk;
+	int left_spaces = MMRM_SYSFS_ENTRY_MAX_LEN;
+
+	len = scnprintf(buf, left_spaces, "threshold: %d\n",
+		cres->peak_threshold);
+	left_spaces -= len;
+	buf += len;
+
+	for (i = 0, pclk = clk_set->clk_src_tbl; i < clk_set->count && left_spaces > 1; i++, pclk++) {
+		len = scnprintf(buf, left_spaces, "%d\t%d\t% 8d\t%d\t%d\n",
+			pclk->domain,
+			pclk->clk_src_id,
+			pclk->nom_dyn_pwr,
+			pclk->nom_leak_pwr,
+			pclk->num_hw_block);
+		left_spaces -= len;
+		buf += len;
+	}
+
+	return MMRM_SYSFS_ENTRY_MAX_LEN - left_spaces;
+}
+
+static DEVICE_ATTR(debug, 0644,
+		mmrm_sysfs_debug_get,
+		mmrm_sysfs_debug_set);
+
+static DEVICE_ATTR(enable_throttle_feature, 0644,
+		mmrm_sysfs_enable_throttle_get,
+		mmrm_sysfs_enable_throttle_set);
+
+static DEVICE_ATTR(allow_multiple_register, 0644,
+		mmrm_sysfs_allow_multiple_get,
+		mmrm_sysfs_allow_multiple_set);
+
+static DEVICE_ATTR_RO(dump_enabled_client_info);
+static DEVICE_ATTR_RO(dump_clk_res_info);
+
+
+static struct attribute *mmrm_fs_attrs[] = {
+		&dev_attr_debug.attr,
+		&dev_attr_enable_throttle_feature.attr,
+		&dev_attr_allow_multiple_register.attr,
+		&dev_attr_dump_enabled_client_info.attr,
+		&dev_attr_dump_clk_res_info.attr,
+		NULL,
+};
+
+static struct attribute_group mmrm_fs_attrs_group = {
+		.attrs = mmrm_fs_attrs,
+};
+
+static int msm_mmrm_probe_init(struct platform_device *pdev)
+{
+	int rc = 0;
+	u32 clk_clients = 0;
+
+	drv_data = kzalloc(sizeof(*drv_data), GFP_KERNEL);
+	if (!drv_data) {
+		d_mpr_e("%s: unable to allocate memory for mmrm driver\n",
+			__func__);
+		rc = -ENOMEM;
+		goto err_no_mem;
+	}
+
+	/* check for clk clients needing admission control */
+	clk_clients = mmrm_count_clk_clients_frm_dt(pdev);
+	if (clk_clients) {
+		d_mpr_h("%s: %d clk clients managed for admission control\n",
+			__func__, clk_clients);
+		drv_data->is_clk_scaling_supported = true;
+	} else {
+		d_mpr_h("%s: no clk clients managed for admission control\n",
+			__func__);
+		drv_data->is_clk_scaling_supported = false;
+		goto skip_mmrm;
+	}
+
+	drv_data->platform_data = mmrm_get_platform_data(&pdev->dev);
+	if (!drv_data->platform_data) {
+		d_mpr_e("%s: unable to get platform data\n",
+			__func__);
+		rc = -EINVAL;
+		goto err_get_drv_data;
+	}
+
+	drv_data->debugfs_root = msm_mmrm_debugfs_init();
+	if (!drv_data->debugfs_root)
+		d_mpr_e("%s: failed to create debugfs for mmrm\n", __func__);
+
+	dev_set_drvdata(&pdev->dev, drv_data);
+
+	rc = mmrm_read_platform_resources(pdev, drv_data);
+	if (rc) {
+		d_mpr_e("%s: unable to read platform resources for mmrm\n",
+			__func__);
+		goto err_read_pltfrm_rsc;
+	}
+
+	rc = mmrm_init(drv_data);
+	if (rc) {
+		d_mpr_e("%s: failed to init mmrm\n",
+			__func__);
+		goto err_mmrm_init;
+	}
+
+	if (sysfs_create_group(&pdev->dev.kobj, &mmrm_fs_attrs_group)) {
+		d_mpr_e("%s: failed to create sysfs\n",
+			__func__);
+	}
+
+skip_mmrm:
+	return rc;
+
+err_mmrm_init:
+	msm_mmrm_debugfs_deinit(drv_data->debugfs_root);
+err_read_pltfrm_rsc:
+	mmrm_free_platform_resources(drv_data);
+err_get_drv_data:
+	RESET_DRV_DATA(drv_data);
+err_no_mem:
+	d_mpr_e("%s: error = %d\n", __func__, rc);
+	return rc;
+}
+
+static int msm_mmrm_probe(struct platform_device *pdev)
+{
+	int rc = -EINVAL;
+
+	d_mpr_h("%s\n", __func__);
+
+	VERIFY_PDEV(pdev)
+
+	if (of_device_is_compatible(pdev->dev.of_node, "qcom,msm-mmrm"))
+		return msm_mmrm_probe_init(pdev);
+
+	d_mpr_e("%s: no compatible device node\n", __func__);
+	return rc;
+
+err_exit:
+	d_mpr_e("%s: error = %d\n", __func__, rc);
+	return rc;
+}
+
+static int msm_mmrm_remove(struct platform_device *pdev)
+{
+	int rc = 0;
+
+	VERIFY_PDEV(pdev);
+
+	drv_data = dev_get_drvdata(&pdev->dev);
+	if (!drv_data) {
+		d_mpr_e("%s: null driver data\n", __func__);
+		return -EINVAL;
+	}
+
+	if (drv_data->is_clk_scaling_supported) {
+		sysfs_remove_group(&pdev->dev.kobj, &mmrm_fs_attrs_group);
+		msm_mmrm_debugfs_deinit(drv_data->debugfs_root);
+		mmrm_deinit(drv_data);
+		mmrm_free_platform_resources(drv_data);
+	}
+
+	dev_set_drvdata(&pdev->dev, NULL);
+	RESET_DRV_DATA(drv_data);
+
+	return rc;
+
+err_exit:
+	d_mpr_e("%s: error = %d\n", __func__, rc);
+	return rc;
+}
+
+static const struct of_device_id msm_mmrm_dt_match[] = {
+	{.compatible = "qcom,msm-mmrm"},
+	{}
+};
+
+MODULE_DEVICE_TABLE(of, msm_mmrm_dt_match);
+
+static struct platform_driver msm_mmrm_driver = {
+	.probe = msm_mmrm_probe,
+	.remove = msm_mmrm_remove,
+	.driver = {
+		.name = "msm-mmrm",
+		.of_match_table = msm_mmrm_dt_match,
+	},
+};
+
+static int __init msm_mmrm_init(void)
+{
+	int rc = 0;
+
+	rc = platform_driver_register(&msm_mmrm_driver);
+	if (rc) {
+		d_mpr_e("%s: failed to register platform driver\n",
+			__func__);
+		goto err_platform_drv_reg;
+	}
+
+	d_mpr_h("%s: success\n", __func__);
+	return rc;
+
+err_platform_drv_reg:
+	d_mpr_e("%s: error = %d\n", __func__, rc);
+	return rc;
+}
+
+static void __exit msm_mmrm_exit(void)
+{
+	platform_driver_unregister(&msm_mmrm_driver);
+}
+
+module_init(msm_mmrm_init);
+module_exit(msm_mmrm_exit);
+
+MODULE_DESCRIPTION("QTI MMRM Driver");
+MODULE_LICENSE("GPL v2");

+ 19 - 0
qcom/opensource/mmrm-driver/mmrm_kernel_board.mk

@@ -0,0 +1,19 @@
+TARGET_MMRM_ENABLE := false
+ifeq ($(TARGET_KERNEL_DLKM_DISABLE),true)
+	ifeq ($(TARGET_KERNEL_DLKM_MMRM_OVERRIDE),true)
+		TARGET_MMRM_ENABLE := true
+	endif
+else
+TARGET_MMRM_ENABLE := true
+endif
+
+# Build mmrm kernel driver
+ifeq ($(TARGET_MMRM_ENABLE),true)
+ifeq ($(call is-board-platform-in-list,$(TARGET_BOARD_PLATFORM)),true)
+BOARD_VENDOR_KERNEL_MODULES += $(KERNEL_MODULES_OUT)/msm-mmrm.ko
+BOARD_VENDOR_RAMDISK_KERNEL_MODULES += $(KERNEL_MODULES_OUT)/msm-mmrm.ko
+BOARD_VENDOR_RAMDISK_RECOVERY_KERNEL_MODULES_LOAD += $(KERNEL_MODULES_OUT)/msm-mmrm.ko
+
+# BOARD_VENDOR_KERNEL_MODULES += $(KERNEL_MODULES_OUT)/mmrm_test_module.ko
+endif
+endif

+ 12 - 0
qcom/opensource/mmrm-driver/mmrm_kernel_product.mk

@@ -0,0 +1,12 @@
+TARGET_MMRM_ENABLE := false
+ifeq ($(TARGET_KERNEL_DLKM_DISABLE),true)
+	ifeq ($(TARGET_KERNEL_DLKM_MMRM_OVERRIDE),true)
+		TARGET_MMRM_ENABLE := true
+	endif
+else
+TARGET_MMRM_ENABLE := true
+endif
+
+ifeq ($(TARGET_MMRM_ENABLE),true)
+PRODUCT_PACKAGES += msm-mmrm.ko
+endif

+ 22 - 0
qcom/opensource/mmrm-driver/mmrm_modules.bzl

@@ -0,0 +1,22 @@
+load(":mmrm_modules_build.bzl", "mmrm_driver_modules_entry")
+
+MMRM_DRV_PATH = "driver"
+
+mmrm_driver_modules = mmrm_driver_modules_entry([":mmrm_driver_headers"])
+module_entry = mmrm_driver_modules.register
+
+#--------------- MM-DRIVERS MODULES ------------------
+
+module_entry(
+    name = "msm-mmrm",
+    path = MMRM_DRV_PATH + "/src",
+    config_option = "CONFIG_MSM_MMRM",
+    srcs = [
+        "mmrm_clk_rsrc_mgr.c",
+        "mmrm_clk_rsrc_mgr_sw.c",
+        "mmrm_debug.c",
+        "mmrm_internal.c",
+        "mmrm_res_parse.c",
+        "msm_mmrm.c"
+    ],
+)

+ 83 - 0
qcom/opensource/mmrm-driver/mmrm_modules_build.bzl

@@ -0,0 +1,83 @@
+load("//build/kernel/kleaf:kernel.bzl", "ddk_module", "ddk_submodule")
+load("//build/bazel_common_rules/dist:dist.bzl", "copy_to_dist_dir")
+
+def _register_module_to_map(module_map, name, path, config_option, srcs, deps):
+
+    module = struct(
+        name = name,
+        path = path,
+        srcs = srcs,
+        config_option = config_option,
+        deps = deps,
+    )
+
+    module_map[name] = module
+
+def _get_kernel_build_options(modules, config_options):
+    all_options = {option: True for option in config_options}
+    all_options = all_options | {module.config_option: True for module in modules if module.config_option}
+    return all_options
+
+def _get_kernel_build_module_srcs(module, options, formatter):
+    srcs = module.srcs
+    print("srcs = ", srcs)
+    module_path = "{}/".format(module.path) if module.path else ""
+    globbed_srcs = native.glob(["{}{}".format(module_path, formatter(src)) for src in srcs])
+    return globbed_srcs
+
+def mmrm_driver_modules_entry(hdrs = []):
+    module_map = {}
+
+    def register(name, path = None, config_option = None, srcs = [], deps = []):
+        _register_module_to_map(module_map, name, path, config_option, srcs, deps)
+    return struct(
+        register = register,
+        get = module_map.get,
+        hdrs = hdrs,
+        module_map = module_map
+    )
+
+def define_target_variant_modules(target, variant, registry, modules, config_options = []):
+    kernel_build = "{}_{}".format(target, variant)
+    kernel_build_label = "//msm-kernel:{}".format(kernel_build)
+    modules = [registry.get(module_name) for module_name in modules]
+    options = _get_kernel_build_options(modules, config_options)
+    build_print = lambda message : print("{}: {}".format(kernel_build, message))
+    formatter = lambda s : s.replace("%b", kernel_build).replace("%t", target)
+    all_module_rules = []
+
+    for module in modules:
+        rule_name = "{}_{}".format(kernel_build, module.name)
+        module_srcs = _get_kernel_build_module_srcs(module, options, formatter)
+
+        if not module_srcs:
+            continue
+
+        ddk_submodule(
+            name = rule_name,
+            srcs = module_srcs,
+            out = "{}.ko".format(module.name),
+            deps = ["//msm-kernel:all_headers"] + registry.hdrs,
+            local_defines = options.keys()
+        )
+        all_module_rules.append(rule_name)
+
+    ddk_module(
+        name = "{}_mmrm_driver".format(kernel_build),
+        kernel_build = kernel_build_label,
+        deps = all_module_rules,
+    )
+    copy_to_dist_dir(
+        name = "{}_mmrm_driver_dist".format(kernel_build),
+        data = [":{}_mmrm_driver".format(kernel_build)],
+        dist_dir = "out/target/product/{}/dlkm/lib/modules/".format(target),
+        flat = True,
+        wipe_dist_dir = False,
+        allow_duplicate_filenames = False,
+        mode_overrides = {"**/*": "644"},
+        log = "info",
+    )
+
+def define_consolidate_gki_modules(target, registry, modules, config_options = []):
+    define_target_variant_modules(target, "consolidate", registry, modules, config_options)
+    define_target_variant_modules(target, "gki", registry, modules, config_options)

+ 11 - 0
qcom/opensource/mmrm-driver/target.bzl

@@ -0,0 +1,11 @@
+load(":mmrm_modules.bzl", "mmrm_driver_modules")
+load(":mmrm_modules_build.bzl", "define_consolidate_gki_modules")
+
+def define_pineapple():
+    define_consolidate_gki_modules(
+        target = "pineapple",
+        registry = mmrm_driver_modules,
+        modules = [
+            "msm-mmrm"
+        ],
+)

+ 9 - 0
qcom/opensource/mmrm-driver/vm/be/Kbuild

@@ -0,0 +1,9 @@
+ifeq ($(CONFIG_MSM_MMRM_VM),y)
+LINUXINCLUDE += -I$(MMRM_ROOT)/vm/be/src -I$(MMRM_ROOT)/driver/src
+
+obj-m += mmrm_vm_be.o
+mmrm_vm_be-objs := src/mmrm_vm_be_main.o \
+		src/mmrm_vm_be_dispatch.o \
+		src/mmrm_vm_be_msgq.o \
+		../common/src/mmrm_vm_debug.o
+endif

+ 32 - 0
qcom/opensource/mmrm-driver/vm/be/src/mmrm_vm_be.h

@@ -0,0 +1,32 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#ifndef _MMRM_VM_BE_H_
+#define _MMRM_VM_BE_H_
+
+#include <linux/soc/qcom/msm_mmrm.h>
+#include <mmrm_vm_interface.h>
+
+/*
+ * mmrm_vm_be_recv -- BE message receiving thread call this function
+ *                       for transfer receiving packet to BE
+ * @mmrm_vm: device driver info
+ * @data: message pointer
+ * @size: message size
+ */
+int mmrm_vm_be_recv(struct mmrm_vm_driver_data *mmrm_vm, void *data, size_t size);
+
+/*
+ * mmrm_vm_be_send_response -- BE message receiving thread call this function
+ *                             for sending back API calling result to FE
+ * @mmrm_vm: specific device driver info
+ * @size: message size
+ */
+int mmrm_vm_be_send_response(struct mmrm_vm_driver_data *mmrm_vm, void *msg);
+
+
+#endif /* _MMRM_VM_BE_H_ */
+
+

+ 9 - 0
qcom/opensource/mmrm-driver/vm/be/src/mmrm_vm_be.rc

@@ -0,0 +1,9 @@
+# SPDX-License-Identifier: GPL-2.0-only
+#
+# Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
+#
+
+on boot
+	insmod /vendor/lib/modules/mmrm_vm_be.ko
+
+

+ 292 - 0
qcom/opensource/mmrm-driver/vm/be/src/mmrm_vm_be_dispatch.c

@@ -0,0 +1,292 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+#include "mmrm_vm_debug.h"
+#include "mmrm_vm_interface.h"
+#include "mmrm_vm_msgq.h"
+#include "mmrm_vm_be.h"
+
+/**
+ * mmrm_vm_be_send_response - send response to FE
+ * mmrm_vm: driver private data
+ * msg: response message
+ */
+
+int mmrm_vm_be_send_response(struct mmrm_vm_driver_data *mmrm_vm, void *msg)
+{
+	struct mmrm_vm_response_msg_pkt *ppkt = (struct mmrm_vm_response_msg_pkt *)msg;
+	struct mmrm_vm_msg_hdr *hdr = &ppkt->hdr;
+	size_t msg_size = sizeof(*hdr) + hdr->size;
+	int rc;
+
+	hdr->version = MMRM_VM_VER_1;
+	hdr->type = MMRM_VM_TYPE_DATA;
+	hdr->flags = 0;
+
+	rc = mmrm_vm_msgq_send(mmrm_vm, msg, msg_size);
+	d_mpr_l("%s: size:%d rc=%d\n", __func__, msg_size, rc);
+	return rc;
+}
+
+/**
+ * mmrm_vm_be_client_register - call mmrm API to register client
+ * mmrm_vm: driver private data
+ * req: request parameters
+ */
+static int mmrm_vm_be_client_register(struct mmrm_vm_driver_data *mmrm_vm,
+	struct mmrm_vm_api_request_msg *req)
+{
+	struct mmrm_client *pClient;
+	int rc;
+	struct mmrm_vm_response_msg_pkt pkt;
+	struct mmrm_client_desc client_desc;
+
+	// unpacketizing the call from fe on SVM
+	client_desc.client_type = req->data.reg.client_type;
+	memcpy(&(client_desc.client_info.desc), &(req->data.reg.desc),
+				sizeof(client_desc.client_info.desc));
+	client_desc.priority = req->data.reg.priority;
+
+	d_mpr_l("%s: register type:%d priority:%d\n", __func__,
+			client_desc.client_type, client_desc.priority);
+	d_mpr_l("%s: domain:%d client ID:%d\n", __func__,
+			client_desc.client_info.desc.client_domain,
+			client_desc.client_info.desc.client_id);
+	d_mpr_l("%s: clk name:%s\n", __func__, client_desc.client_info.desc.name);
+
+	// call mmrm register function
+	pClient = mmrm_client_register(&client_desc);
+	if (pClient != NULL) {
+		mmrm_vm->clk_client_tbl[pClient->client_uid] = pClient;
+		pkt.msg.data.reg.client_id = pClient->client_uid;
+	} else {
+		pkt.msg.data.reg.client_id = U32_MAX;
+		d_mpr_e("%s: client:%p client id:%d\n", __func__, pClient, pkt.msg.data.reg.client_id);
+	}
+
+	// prepare response packet & send to fe on SVM
+	pkt.msg.hd.cmd_id = MMRM_VM_RESPONSE_REGISTER;
+	pkt.msg.hd.seq_no = req->hd.seq_no;
+	pkt.hdr.size = sizeof(pkt.msg.hd) + sizeof(pkt.msg.data.reg);
+
+	d_mpr_l("%s: cmd_id:%d data size:%d\n", __func__, pkt.msg.hd.cmd_id, pkt.hdr.size);
+
+	rc = mmrm_vm_be_send_response(mmrm_vm, &pkt);
+	if (rc != 0)
+		d_mpr_e("%s: rc:%d\n", __func__, rc);
+	return rc;
+}
+
+/**
+ * mmrm_vm_be_client_setvalue - call mmrm API to set client values
+ * mmrm_vm: driver private data
+ * req: set client value request parameters
+ */
+static int mmrm_vm_be_client_setvalue(struct mmrm_vm_driver_data *mmrm_vm,
+	struct mmrm_vm_api_request_msg *req)
+{
+	struct mmrm_vm_response_msg_pkt pkt_resp;
+	int rc;
+	struct mmrm_vm_setvalue_request *req_param = &req->data.setval;
+
+	// call mmrm client set value function, and fill response packet
+
+	rc = mmrm_client_set_value(mmrm_vm->clk_client_tbl[req_param->client_id],
+			&req_param->data, req_param->val);
+
+	if (rc != 0) {
+		d_mpr_e("%s: set value rc:%d client id:%d\n", __func__, rc, req_param->client_id);
+	}
+	// prepare response packet & send to fe on SVM
+	pkt_resp.msg.hd.cmd_id = MMRM_VM_RESPONSE_SETVALUE;
+	pkt_resp.msg.hd.seq_no = req->hd.seq_no;
+	pkt_resp.hdr.size = sizeof(pkt_resp.msg.hd) + sizeof(pkt_resp.msg.data.setval);
+
+	pkt_resp.msg.data.setval.val = rc;
+
+	d_mpr_l("%s: cmd_id:%d data size:%d\n", __func__,
+		pkt_resp.msg.hd.cmd_id, pkt_resp.hdr.size);
+
+	rc = mmrm_vm_be_send_response(mmrm_vm, &pkt_resp);
+
+	if (rc != 0)
+		d_mpr_e("%s: rc:%d\n", __func__, rc);
+	return rc;
+}
+
+/**
+ * mmrm_vm_be_client_setvalue_inrange - call mmrm API to set client range values
+ * mmrm_vm: driver private data
+ * req: set client value request parameters
+ */
+static int mmrm_vm_be_client_setvalue_inrange(struct mmrm_vm_driver_data *mmrm_vm,
+	struct mmrm_vm_api_request_msg *req)
+{
+	struct mmrm_vm_response_msg_pkt pkt;
+	int rc;
+	struct mmrm_vm_setvalue_inrange_request *req_param = &req->data.setval_range;
+
+	rc = mmrm_client_set_value_in_range(mmrm_vm->clk_client_tbl[req_param->client_id],
+		&req_param->data, &req_param->val);
+
+	pkt.msg.hd.cmd_id = MMRM_VM_RESPONSE_SETVALUE_INRANGE;
+	pkt.msg.hd.seq_no = req->hd.seq_no;
+	pkt.msg.data.setval_range.ret_code = rc;
+	pkt.hdr.size = sizeof(pkt.msg.hd) + sizeof(pkt.msg.data.setval_range);
+
+	d_mpr_l("%s: cmd_id:%d data size:%d\n", __func__, pkt.msg.hd.cmd_id, pkt.hdr.size);
+
+	rc = mmrm_vm_be_send_response(mmrm_vm, &pkt);
+	if (rc != 0)
+		d_mpr_e("%s: rc:%d\n", __func__, rc);
+	return rc;
+}
+
+/**
+ * mmrm_vm_be_client_getvalue - call mmrm API to get client values
+ * mmrm_vm: driver private data
+ * req: set client value request parameters
+ */
+static int mmrm_vm_be_client_getvalue(struct mmrm_vm_driver_data *mmrm_vm,
+	struct mmrm_vm_api_request_msg *req)
+{
+	struct mmrm_vm_response_msg_pkt pkt;
+	int rc;
+	struct mmrm_vm_getvalue_request *req_param = &req->data.getval;
+	struct mmrm_client_res_value val;
+	struct mmrm_client_res_value *p_val = &pkt.msg.data.getval.val;
+
+	rc = mmrm_client_get_value(mmrm_vm->clk_client_tbl[req_param->client_id], &val);
+
+	pkt.msg.hd.cmd_id = MMRM_VM_RESPONSE_GETVALUE;
+	pkt.msg.hd.seq_no = req->hd.seq_no;
+	pkt.hdr.size = sizeof(pkt.msg.hd) + sizeof(pkt.msg.data.getval);
+
+	p_val->cur = val.cur;
+	p_val->max = val.max;
+	p_val->min = val.min;
+
+//	pr_err("%s: cmd_id:%d data size:%d\n", __func__, pkt.msg.hd.cmd_id, pkt.hdr.size);
+
+	rc = mmrm_vm_be_send_response(mmrm_vm, &pkt);
+	if (rc != 0)
+		d_mpr_e("%s: rc:%d\n", __func__, rc);
+	return rc;
+}
+
+/**
+ * mmrm_vm_be_client_deregister - call mmrm API to deregister client
+ * mmrm_vm: driver private data
+ * req: set client value request parameters
+ */
+static int mmrm_vm_be_client_deregister(struct mmrm_vm_driver_data *mmrm_vm,
+		struct mmrm_vm_api_request_msg *req)
+{
+	int rc;
+	struct mmrm_vm_response_msg_pkt pkt;
+	struct mmrm_vm_deregister_request *req_param = &req->data.dereg;
+
+	rc = mmrm_client_deregister(mmrm_vm->clk_client_tbl[req_param->client_id]);
+//	pr_err("%s: client:%d\n", __func__, req_param->client_id);
+
+	pkt.msg.hd.cmd_id = MMRM_VM_RESPONSE_DEREGISTER;
+	pkt.msg.hd.seq_no = req->hd.seq_no;
+
+	pkt.hdr.size = sizeof(pkt.msg.hd) + sizeof(pkt.msg.data.dereg);
+	pkt.msg.data.dereg.ret_code = rc;
+
+//	pr_err("%s: cmd_id:%d data size:%d ret:%d\n", __func__,
+//		pkt.msg.hd.cmd_id, pkt.hdr.size, pkt.msg.data.dereg.ret_code);
+
+	rc = mmrm_vm_be_send_response(mmrm_vm, &pkt);
+	if (rc != 0)
+		d_mpr_e("%s: rc:%d\n", __func__, rc);
+	return rc;
+}
+
+/**
+ * mmrm_vm_be_client_noop - call none mmrm API to calculate msgq roundtrip time
+ * mmrm_vm: driver private data
+ * req: request parameters
+ */
+static int mmrm_vm_be_client_noop(struct mmrm_vm_driver_data *mmrm_vm,
+		struct mmrm_vm_api_request_msg *req)
+{
+	int rc = 0;
+	struct mmrm_vm_response_msg_pkt pkt;
+
+	pkt.msg.hd.cmd_id = MMRM_VM_RESPONSE_NOOP;
+	pkt.msg.hd.seq_no = req->hd.seq_no;
+
+	pkt.hdr.size = sizeof(pkt.msg.hd) + sizeof(pkt.msg.data.lptest);
+	pkt.msg.data.dereg.ret_code = rc;
+
+	rc = mmrm_vm_be_send_response(mmrm_vm, &pkt);
+	if (rc != 0)
+		d_mpr_e("%s: rc:%d\n", __func__, rc);
+	return rc;
+}
+
+/**
+ * mmrm_vm_be_recv - be dispatch mmrm request to mmrm API call
+ * mmrm_vm: driver private data
+ * data: request message buffer pointer
+ * size: request message size
+ */
+int mmrm_vm_be_recv(struct mmrm_vm_driver_data *mmrm_vm, void *data, size_t size)
+{
+	struct mmrm_vm_api_request_msg *cmd = data;
+	int rc = -1;
+
+	switch (cmd->hd.cmd_id) {
+	case MMRM_VM_REQUEST_REGISTER:
+		rc = mmrm_vm_be_client_register(mmrm_vm, cmd);
+		break;
+
+	case MMRM_VM_REQUEST_SETVALUE:
+		rc = mmrm_vm_be_client_setvalue(mmrm_vm, cmd);
+		break;
+
+	case MMRM_VM_REQUEST_SETVALUE_INRANGE:
+		rc = mmrm_vm_be_client_setvalue_inrange(mmrm_vm, cmd);
+		break;
+
+	case MMRM_VM_REQUEST_GETVALUE:
+		rc = mmrm_vm_be_client_getvalue(mmrm_vm, cmd);
+		break;
+
+	case MMRM_VM_REQUEST_DEREGISTER:
+		rc = mmrm_vm_be_client_deregister(mmrm_vm, cmd);
+		break;
+	case MMRM_VM_REQUEST_NOOP:
+		rc = mmrm_vm_be_client_noop(mmrm_vm, cmd);
+		break;
+	default:
+		pr_err("%s: cmd_id:%d unknown!!!\n", __func__, cmd->hd.cmd_id);
+		break;
+	}
+	return rc;
+}
+
+/**
+ * mmrm_vm_be_wrong_request - deal with SVM wrong request
+ * mmrm_vm: driver private data
+ */
+int mmrm_vm_be_wrong_request(struct mmrm_vm_driver_data *mmrm_vm)
+{
+	int rc = 0;
+	struct mmrm_vm_response_msg_pkt pkt;
+
+	pkt.msg.hd.cmd_id = MMRM_VM_RESPONSE_INVALID_PKT;
+	pkt.msg.hd.seq_no = 0;
+
+	pkt.hdr.size = sizeof(pkt.msg.hd) + sizeof(pkt.msg.data.dereg);
+	pkt.msg.data.dereg.ret_code = rc;
+
+	d_mpr_e("%s: wrong request\n", __func__);
+	rc = mmrm_vm_be_send_response(mmrm_vm, &pkt);
+	if (rc != 0)
+		d_mpr_e("%s: rc:%d\n", __func__, rc);
+	return rc;
+}

+ 112 - 0
qcom/opensource/mmrm-driver/vm/be/src/mmrm_vm_be_main.c

@@ -0,0 +1,112 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/of.h>
+#include <linux/slab.h>
+
+#include "mmrm_vm_be.h"
+#include "mmrm_vm_msgq.h"
+#include "mmrm_vm_interface.h"
+#include "mmrm_debug.h"
+
+#define MMRM_CLK_CLIENTS_NUM_MAX 35
+
+struct mmrm_vm_driver_data *drv_vm_be = (void *) -EPROBE_DEFER;
+
+int msm_mmrm_debug = MMRM_ERR | MMRM_WARN | MMRM_PRINTK;
+
+int mmrm_client_get_clk_count(void);
+
+static int mmrm_vm_be_driver_probe(struct platform_device *pdev)
+{
+	struct device *dev = &pdev->dev;
+	int   sz, clk_count, rc;
+
+	drv_vm_be = devm_kzalloc(dev, sizeof(*drv_vm_be), GFP_KERNEL);
+	if (!drv_vm_be)
+		return -ENOMEM;
+
+	clk_count = mmrm_client_get_clk_count();
+	if (clk_count <= 0 || clk_count > MMRM_CLK_CLIENTS_NUM_MAX) {
+		d_mpr_e("%s: clk count is not correct\n", __func__);
+		goto clk_count_err;
+	}
+	sz = sizeof(struct mmrm_client *) * clk_count;
+	drv_vm_be->clk_client_tbl = devm_kzalloc(dev, sz, GFP_KERNEL);
+	if (!drv_vm_be->clk_client_tbl)
+		goto client_tbl_err;
+
+	drv_vm_be->debugfs_root = msm_mmrm_debugfs_init();
+	if (!drv_vm_be->debugfs_root)
+		d_mpr_e("%s: failed to create debugfs for mmrm\n", __func__);
+
+	dev_set_drvdata(&pdev->dev, drv_vm_be);
+	rc = mmrm_vm_msgq_init(drv_vm_be);
+	if (rc != 0) {
+		d_mpr_e("%s: failed to init msgq\n", __func__);
+		goto msgq_init_err;
+	}
+
+	drv_vm_be->dev = dev;
+	dev_err(dev, "msgq probe success");
+	return 0;
+
+msgq_init_err:
+	dev_set_drvdata(&pdev->dev, NULL);
+	msm_mmrm_debugfs_deinit(drv_vm_be->debugfs_root);
+	return -EINVAL;
+client_tbl_err:
+	dev_err(dev, "msgq register alloc memory failed");
+	return -ENOMEM;
+clk_count_err:
+	return -EINVAL;
+}
+
+static int mmrm_vm_be_driver_remove(struct platform_device *pdev)
+{
+	mmrm_vm_msgq_deinit(drv_vm_be);
+	msm_mmrm_debugfs_deinit(drv_vm_be->debugfs_root);
+
+	dev_set_drvdata(&pdev->dev, NULL);
+	drv_vm_be = (void *) -EPROBE_DEFER;
+	return 0;
+}
+
+static const struct of_device_id mmrm_vm_be_match[] = {
+	{ .compatible = "qcom,mmrm-vm-be" },
+	{},
+};
+MODULE_DEVICE_TABLE(of, mmrm_vm_be_match);
+
+static struct platform_driver mmrm_vm_be_driver = {
+	.probe = mmrm_vm_be_driver_probe,
+	.driver = {
+		.name = "mmrm-vm-be",
+		.of_match_table = mmrm_vm_be_match,
+	},
+	.remove = mmrm_vm_be_driver_remove,
+};
+
+static int __init mmrm_vm_be_module_init(void)
+{
+	pr_info("%s:  init start\n", __func__);
+
+	return platform_driver_register(&mmrm_vm_be_driver);
+}
+subsys_initcall(mmrm_vm_be_module_init);
+
+static void __exit mmrm_vm_be_module_exit(void)
+{
+	platform_driver_unregister(&mmrm_vm_be_driver);
+}
+module_exit(mmrm_vm_be_module_exit);
+
+MODULE_SOFTDEP("pre: gunyah_transport");
+MODULE_SOFTDEP("pre: msm-mmrm");
+
+MODULE_DESCRIPTION("Qualcomm Technologies, Inc. MMRM BE Driver");
+MODULE_LICENSE("GPL v2");

+ 317 - 0
qcom/opensource/mmrm-driver/vm/be/src/mmrm_vm_be_msgq.c

@@ -0,0 +1,317 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#include <linux/gunyah/gh_msgq.h>
+#include <linux/kthread.h>
+#include <linux/slab.h>
+#include <linux/notifier.h>
+#include <linux/gunyah/gh_rm_drv.h>
+#include <linux/workqueue.h>
+#include <linux/list.h>
+
+#include "mmrm_vm_be.h"
+#include "mmrm_vm_interface.h"
+#include "mmrm_vm_debug.h"
+
+#define MAX_ERR_COUNT 5
+
+int mmrm_vm_be_wrong_request(struct mmrm_vm_driver_data *mmrm_vm);
+
+static int is_valid_mmrm_message(struct mmrm_vm_request_msg_pkt *pkt)
+{
+	int rc = -1;
+	struct mmrm_vm_msg_hdr *hdr = &pkt->hdr;
+
+	if (hdr->version == MMRM_VM_VER_1 &&
+		hdr->type == MMRM_VM_TYPE_DATA)
+		rc = 0;
+
+	return rc;
+}
+
+/**
+ * mmrm_vm_msgq_msg_handler - fe request handler
+ * work: work parameter that workqueue thread transfer
+ */
+static void mmrm_vm_msgq_msg_handler(struct work_struct *work)
+{
+	struct mmrm_vm_thread_info *pthread_info =
+		container_of(work, struct mmrm_vm_thread_info, msgq_work.work);
+	struct mmrm_vm_driver_data *mmrm_vm =
+		container_of(pthread_info, struct mmrm_vm_driver_data, thread_info);
+	struct mmrm_vm_msg *msg;
+	struct mmrm_vm_msg *next_msg;
+	struct list_head   head;
+	struct mmrm_vm_request_msg_pkt *pkt;
+
+	if (IS_ERR_OR_NULL(work))
+		return;
+
+	mutex_lock(&pthread_info->list_lock);
+	list_replace_init(&pthread_info->queued_msg, &head);
+	mutex_unlock(&pthread_info->list_lock);
+
+	list_for_each_entry_safe(msg, next_msg, &head, link) {
+		pkt = (struct mmrm_vm_request_msg_pkt *)msg->msg_buf;
+		if (is_valid_mmrm_message(pkt) == 0)
+			mmrm_vm_be_recv(mmrm_vm, &pkt->msg, pkt->hdr.size);
+		else {
+			mmrm_vm_be_wrong_request(mmrm_vm);
+			d_mpr_e("%s: wrong mmrm message\n", __func__);
+		}
+		list_del(&msg->link);
+		kfree(msg);
+	}
+}
+
+/**
+ * mmrm_vm_be_msgq_listener - gunyah's message receiving thread
+ * data: parameter that caller transfer
+ */
+static int mmrm_vm_be_msgq_listener(void *data)
+{
+	struct mmrm_vm_driver_data *mmrm_vm;
+	struct mmrm_vm_gh_msgq_info *pmsg_info;
+	struct mmrm_vm_thread_info *thread_info;
+
+	struct mmrm_vm_msg *msg;
+	size_t size;
+	int ret = 0;
+	int err_count = 0;
+
+	if (IS_ERR_OR_NULL(data))
+		return -EINVAL;
+
+	mmrm_vm = (struct mmrm_vm_driver_data *)data;
+	pmsg_info = &mmrm_vm->msg_info;
+	thread_info = &mmrm_vm->thread_info;
+
+	while (true) {
+		msg = kzalloc(sizeof(struct mmrm_vm_msg), GFP_KERNEL);
+		if (!msg)
+			return -ENOMEM;
+
+		ret = gh_msgq_recv(pmsg_info->msgq_handle, msg->msg_buf,
+				GH_MSGQ_MAX_MSG_SIZE_BYTES, &size, 0);
+		if (ret < 0) {
+			kfree(msg);
+			d_mpr_e("gh_msgq_recv failed, rc=%d\n", ret);
+			err_count++;
+			if (err_count < MAX_ERR_COUNT)
+				continue;
+
+			return -EINVAL;
+		}
+
+		err_count = 0;
+		msg->msg_size = size;
+
+		mutex_lock(&thread_info->list_lock);
+		list_add_tail(&msg->link, &thread_info->queued_msg);
+		mutex_unlock(&thread_info->list_lock);
+
+		queue_delayed_work(thread_info->msg_workq,
+				 &thread_info->msgq_work, msecs_to_jiffies(0));
+	}
+
+	return 0;
+}
+
+/**
+ * mmrm_vm_msgq_send - send response message by gunyah API
+ * mmrm_vm: driver data
+ * msg: message buffer pointer
+ * msg_size: message size
+ */
+int mmrm_vm_msgq_send(struct mmrm_vm_driver_data *mmrm_vm, void *msg, size_t msg_size)
+{
+	if (!mmrm_vm->msg_info.msgq_handle) {
+		d_mpr_e("Failed to send msg, invalid msgq handle\n");
+		return -EINVAL;
+	}
+
+	if (msg_size > GH_MSGQ_MAX_MSG_SIZE_BYTES) {
+		d_mpr_e("msg size unsupported for msgq: %ld > %d\n", msg_size,
+				GH_MSGQ_MAX_MSG_SIZE_BYTES);
+		return -E2BIG;
+	}
+
+	return gh_msgq_send(mmrm_vm->msg_info.msgq_handle, msg, msg_size, GH_MSGQ_TX_PUSH);
+}
+
+/**
+ * mmrm_vm_be_gh_validate_register - check gunyah connection validation
+ * msg_info: gunyah meesage info
+ * vm_status_payload: gunyah notification message status info
+ */
+int mmrm_vm_be_gh_validate_register(struct mmrm_vm_gh_msgq_info *msg_info,
+		struct gh_rm_notif_vm_status_payload *vm_status_payload)
+{
+	gh_vmid_t peer_vmid;
+	gh_vmid_t self_vmid;
+	int rc = -1;
+
+	if (vm_status_payload->vm_status != GH_RM_VM_STATUS_READY)
+		return rc;
+
+	if (gh_rm_get_vmid(msg_info->peer_id, &peer_vmid))
+		return rc;
+
+	if (gh_rm_get_vmid(GH_PRIMARY_VM, &self_vmid))
+		return rc;
+
+	if (peer_vmid != vm_status_payload->vmid)
+		return NOTIFY_DONE;
+
+	d_mpr_l("%s: vmid=%d peer_vmid=%d\n", __func__, vm_status_payload->vmid, peer_vmid);
+
+	if (msg_info->msgq_handle) {
+		return rc;
+	}
+
+	msg_info->msgq_handle = gh_msgq_register(msg_info->msgq_label);
+
+	rc = 0;
+
+	if (IS_ERR_OR_NULL(msg_info->msgq_handle)) {
+		rc = -1;
+		d_mpr_e("%s: gunyah message queue registration failed :%ld\n", __func__,
+			PTR_ERR(msg_info->msgq_handle));
+	}
+
+	return rc;
+}
+
+/**
+ * mmrm_vm_be_msgq_cb - check gunyah connection validation
+ * nb: gunyah notofier block info
+ * cmd: gunyah notification status category info
+ * data: user defined data pointer
+ */
+static int mmrm_vm_be_msgq_cb(struct notifier_block *nb, unsigned long cmd, void *data)
+{
+	struct gh_rm_notif_vm_status_payload *vm_status_payload;
+	struct mmrm_vm_driver_data *mmrm_vm;
+	struct mmrm_vm_gh_msgq_info *msg_info;
+	struct  mmrm_vm_thread_info *thread_info;
+	int rc;
+
+	if (IS_ERR_OR_NULL(nb))
+		return -EINVAL;
+
+	msg_info = container_of(nb, struct mmrm_vm_gh_msgq_info, pvt_nb);
+	mmrm_vm = container_of(msg_info, struct mmrm_vm_driver_data, msg_info);
+
+	thread_info = &mmrm_vm->thread_info;
+	if (cmd != GH_RM_NOTIF_VM_STATUS)
+		return NOTIFY_DONE;
+
+	/*
+	 * check VM status, only GH_TRUSTED_VM notification activate
+	 * GUNYAH message queue registering
+	 */
+	vm_status_payload = (struct gh_rm_notif_vm_status_payload *)data;
+	rc = mmrm_vm_be_gh_validate_register(msg_info, vm_status_payload);
+	if (rc != 0) {
+		return NOTIFY_DONE;
+	}
+
+	d_mpr_e("%s: msgq registration successful\n", __func__);
+
+	thread_info->msgq_listener_thread = kthread_run(mmrm_vm_be_msgq_listener,
+			(void *)mmrm_vm, "mmrm_msgq_listener");
+	if (IS_ERR_OR_NULL(thread_info->msgq_listener_thread)) {
+		return NOTIFY_DONE;
+	};
+
+	wake_up_process(thread_info->msgq_listener_thread);
+
+	return NOTIFY_DONE;
+}
+
+/**
+ * mmrm_vm_msgq_init - gunyah message queue initialization
+ * mmrm_vm: driver data
+ */
+int mmrm_vm_msgq_init(struct mmrm_vm_driver_data *mmrm_vm)
+{
+	struct mmrm_vm_gh_msgq_info *msg_info;
+	struct mmrm_vm_thread_info *thread_info;
+	int rc = -1;
+
+	if (IS_ERR_OR_NULL(mmrm_vm)) {
+		rc = -EINVAL;
+		d_mpr_e("%s:  driver init wrong\n", __func__);
+		goto err;
+	}
+	msg_info = &mmrm_vm->msg_info;
+	thread_info = &mmrm_vm->thread_info;
+
+	msg_info->msgq_label = GH_MSGQ_LABEL_MMRM;
+	d_mpr_l("%s:  msgq-label=%d\n", __func__, msg_info->msgq_label);
+
+	msg_info->peer_id = GH_TRUSTED_VM;
+	msg_info->pvt_nb.notifier_call = mmrm_vm_be_msgq_cb;
+	rc = gh_rm_register_notifier(&msg_info->pvt_nb);
+	if (rc != 0) {
+		d_mpr_e("%s:  gunyah register notifier failed\n", __func__);
+		goto err;
+	}
+	msg_info->status |= MMRM_VM_MSG_STATUS_NOTIFIER;
+	mutex_init(&thread_info->list_lock);
+	INIT_LIST_HEAD(&thread_info->queued_msg);
+	thread_info->msg_workq = create_singlethread_workqueue("vm_message_workq");
+	if (IS_ERR_OR_NULL(thread_info->msg_workq)) {
+		d_mpr_e("%s:  create workqueue thread failed\n", __func__);
+		goto err_workqueue;
+	};
+	INIT_DELAYED_WORK(&thread_info->msgq_work, mmrm_vm_msgq_msg_handler);
+
+	return 0;
+
+err_workqueue:
+	gh_rm_unregister_notifier(&msg_info->pvt_nb);
+	msg_info->status &= ~MMRM_VM_MSG_STATUS_NOTIFIER;
+
+err:
+	return rc;
+}
+
+/**
+ * mmrm_vm_msgq_init - gunyah message queue de-initialization
+ * mmrm_vm: driver data
+ */
+int mmrm_vm_msgq_deinit(struct mmrm_vm_driver_data *mmrm_vm)
+{
+	struct mmrm_vm_gh_msgq_info *msg_info;
+	struct mmrm_vm_thread_info *thread_info;
+	int rc = 0;
+
+	if (IS_ERR_OR_NULL(mmrm_vm))
+		return -EINVAL;
+
+	msg_info = &mmrm_vm->msg_info;
+	thread_info = &mmrm_vm->thread_info;
+	if (thread_info->msgq_listener_thread) {
+		kthread_stop(thread_info->msgq_listener_thread);
+		thread_info->msgq_listener_thread = NULL;
+	}
+
+	if (msg_info->status & MMRM_VM_MSG_STATUS_NOTIFIER)
+		gh_rm_unregister_notifier(&msg_info->pvt_nb);
+
+	if (msg_info->msgq_handle) {
+		rc = gh_msgq_unregister(msg_info->msgq_handle);
+		if (rc != 0)
+			d_mpr_e("%s: msgq gunyah unregistration failed: err:%d\n", __func__, rc);
+		msg_info->msgq_handle = NULL;
+	}
+
+	if (thread_info->msg_workq) {
+		destroy_workqueue(thread_info->msg_workq);
+		thread_info->msg_workq = NULL;
+	}
+	return rc;
+}

+ 63 - 0
qcom/opensource/mmrm-driver/vm/common/inc/mmrm_vm_debug.h

@@ -0,0 +1,63 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#ifndef __MMRM_VM_DEBUG__
+#define __MMRM_VM_DEBUG__
+
+#include <linux/debugfs.h>
+#include <linux/printk.h>
+
+#ifndef MMRM_VM_DBG_LABEL
+#define MMRM_VM_DBG_LABEL "mmrm_vm"
+#endif
+
+#define MMRM_VM_DBG_TAG MMRM_VM_DBG_LABEL ": %4s: "
+
+/* To enable messages OR these values and
+ * echo the result to debugfs file.
+ */
+enum mmrm_msg_prio {
+	MMRM_VM_ERR = 0x000001,
+	MMRM_VM_HIGH = 0x000002,
+	MMRM_VM_LOW = 0x000004,
+	MMRM_VM_WARN = 0x000008,
+	MMRM_VM_PRINTK = 0x010000,
+};
+
+extern int mmrm_vm_debug;
+
+#define dprintk(__level, __fmt, ...) \
+	do { \
+		if (mmrm_vm_debug & __level) { \
+			if (mmrm_vm_debug & MMRM_VM_PRINTK) { \
+				pr_info(MMRM_VM_DBG_TAG __fmt, \
+					get_debug_level_str(__level), \
+					##__VA_ARGS__); \
+			} \
+		} \
+	} while (0)
+
+#define d_mpr_e(__fmt, ...) dprintk(MMRM_VM_ERR, __fmt, ##__VA_ARGS__)
+#define d_mpr_h(__fmt, ...) dprintk(MMRM_VM_HIGH, __fmt, ##__VA_ARGS__)
+#define d_mpr_l(__fmt, ...) dprintk(MMRM_VM_LOW, __fmt, ##__VA_ARGS__)
+#define d_mpr_w(__fmt, ...) dprintk(MMRM_VM_WARN, __fmt, ##__VA_ARGS__)
+
+static inline char *get_debug_level_str(int level)
+{
+	switch (level) {
+	case MMRM_VM_ERR:
+		return "err ";
+	case MMRM_VM_HIGH:
+		return "high";
+	case MMRM_VM_LOW:
+		return "low ";
+	case MMRM_VM_WARN:
+		return "warn";
+	default:
+		return "????";
+	}
+}
+
+#endif /* __MMRM_VM_DEBUG__ */

+ 246 - 0
qcom/opensource/mmrm-driver/vm/common/inc/mmrm_vm_interface.h

@@ -0,0 +1,246 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#ifndef __MMRM_VM_INTERNAL_H__
+#define __MMRM_VM_INTERNAL_H__
+
+#include <linux/mutex.h>
+#include <linux/soc/qcom/msm_mmrm.h>
+
+#include <mmrm_vm_msgq.h>
+
+/**
+ * mmrm_vm_thread_info - message listener & workqueue info
+ * @msgq_listener_thread: handle to msgq listener thread that is used
+ *                        to receive/send messages through gunyah interface
+ * @msg_workq: message workqueue pointer
+ * @msgq_work: message work, worker thread to process the messages
+ * @queued_msg: message queue head
+ */
+struct mmrm_vm_thread_info {
+	struct task_struct *msgq_listener_thread;
+	struct workqueue_struct   *msg_workq;
+	struct delayed_work msgq_work;
+	struct mutex list_lock;
+	struct list_head   queued_msg;
+};
+
+/**
+ * struct mmrm_vm_data_priv -- device driver private part
+ * @dev: device pointer
+ * @msg_info: gunyah message info
+ * @thread_info: message lister & workqueue info
+ * @clk_client_tbl: index and client handler LUT
+ * @debugfs_root: debug fs, /sys/kernel/debug
+ * @vm_pvt_data: pointer to fe/be specific data
+ */
+struct mmrm_vm_driver_data {
+	struct device *dev;
+	struct mmrm_vm_gh_msgq_info msg_info;
+	struct mmrm_vm_thread_info thread_info;
+	struct mmrm_client **clk_client_tbl;
+
+	/* debugfs */
+	struct dentry *debugfs_root;
+	void *vm_pvt_data;
+};
+
+/**
+ * enum mmrm_vm_api_msg_id -- request/response cmd ID
+ */
+enum mmrm_vm_api_msg_id {
+	MMRM_VM_REQUEST_REGISTER = 1,
+	MMRM_VM_REQUEST_SETVALUE,
+	MMRM_VM_REQUEST_SETVALUE_INRANGE,
+	MMRM_VM_REQUEST_GETVALUE,
+	MMRM_VM_REQUEST_DEREGISTER,
+	MMRM_VM_REQUEST_NOOP, // this is for debug purpose,calculating msgq roundtrip time
+
+	MMRM_VM_RESPONSE_REGISTER = MMRM_VM_REQUEST_REGISTER | 0x800,
+	MMRM_VM_RESPONSE_SETVALUE,
+	MMRM_VM_RESPONSE_SETVALUE_INRANGE,
+	MMRM_VM_RESPONSE_GETVALUE,
+	MMRM_VM_RESPONSE_DEREGISTER,
+	MMRM_VM_RESPONSE_NOOP, // this is for debug purpose,calculating msgq roundtrip time
+	MMRM_VM_RESPONSE_INVALID_PKT,
+};
+
+/**
+ * struct msg_head -- message head
+ * @cmd_id: mmrm API message cmd id
+ * @seq_no: message sequence id
+ */
+struct mmrm_vm_api_msg_head {
+	enum mmrm_vm_api_msg_id cmd_id;
+	int  seq_no;
+};
+
+/**
+ * struct register_request -- mmrm register parameters
+ * @client_type: client type, definition see msm_mmrm.h
+ * @priority: client priority, definition see msm_mmrm.h
+ * @desc: client description, definition see msm_mmrm.h
+ */
+struct mmrm_vm_register_request {
+	enum mmrm_client_type client_type;
+	enum mmrm_client_priority priority;
+	struct mmrm_clk_client_desc desc;
+};
+
+/**
+ * struct deregister_request -- mmrm deregister parameters
+ * @client: client registered handle
+ */
+struct mmrm_vm_deregister_request {
+	u32 client_id;
+};
+
+/**
+ * struct mmrm_vm_noop_request -- noop request parameters
+ * @client: 32 bits value transfered
+ */
+struct mmrm_vm_noop_request {
+	u32 client_id;
+};
+
+/**
+ * struct setvalue_request -- mmrm setvalue parameters
+ * @client: client type, definition see msm_mmrm.h
+ * @data: client info, definition see msm_mmrm.h
+ * @val: new clock rate value
+ */
+struct mmrm_vm_setvalue_request {
+	u32 client_id;
+	struct mmrm_client_data data;
+	unsigned long val;
+};
+
+/**
+ * struct mmrm_vm_setvalue_inrange_request -- mmrm setvalue_inrange parameters
+ * @client: client type, definition see msm_mmrm.h
+ * @data: client info, definition see msm_mmrm.h
+ * @val: new clock rate value range, definition see msm_mmrm.h
+ */
+struct mmrm_vm_setvalue_inrange_request {
+	u32 client_id;
+	struct mmrm_client_data data;
+	struct mmrm_client_res_value val;
+};
+
+/**
+ * struct mmrm_vm_getvalue_request -- mmrm getvalue parameters
+ * @client: client type, definition see msm_mmrm.h
+ * @val: current clock rate value range, definition see msm_mmrm.h
+ */
+struct mmrm_vm_getvalue_request {
+	u32 client_id;
+};
+
+/**
+ * struct mmrm_vm_api_request_msg -- mmrm request API message unified data definition
+ * @hd: mmrm API request message head
+ * @data: parameters mmrm API needs per API message cmd id
+ */
+struct mmrm_vm_api_request_msg {
+	struct mmrm_vm_api_msg_head hd;
+	union {
+		struct mmrm_vm_register_request reg;
+		struct mmrm_vm_deregister_request dereg;
+		struct mmrm_vm_setvalue_request setval;
+		struct mmrm_vm_setvalue_inrange_request setval_range;
+		struct mmrm_vm_getvalue_request getval;
+		struct mmrm_vm_noop_request lptest;
+	} data;
+};
+
+/**
+ * struct mmrm_vm_register_response -- mmrm_client_register API response message
+ * @client: handle for registered client
+ */
+struct mmrm_vm_register_response {
+	u32 client_id;
+};
+
+/**
+ * struct mmrm_vm_deregister_response -- mmrm_client_deregister API response message
+ * @ret_code: indicates if the mmrm_client_deregister is successful
+ */
+struct mmrm_vm_deregister_response {
+	int ret_code;
+};
+
+/**
+ * struct mmrm_vm_noop_response -- noop request's response message
+ * @ret_code: return inetger
+ */
+struct mmrm_vm_noop_response {
+	int ret_code;
+};
+
+/**
+ * struct mmrm_vm_setvalue_response -- mmrm_client_set_value API response message
+ * @val: value that mmrm_client_set_value return
+ */
+struct mmrm_vm_setvalue_response {
+	unsigned long val;
+};
+
+/**
+ * struct mmrm_vm_setvalue_inrange_response -- mmrm_client_set_value_in_range API response message
+ * @ret_code: value that mmrm_client_set_value_in_range return
+ */
+struct mmrm_vm_setvalue_inrange_response {
+	int ret_code;
+};
+
+/**
+ * struct mmrm_vm_getvalue_response -- mmrm_client_get_value API response message
+ * @val: value that mmrm_client_get_value return
+ */
+struct mmrm_vm_getvalue_response {
+	struct mmrm_client_res_value val;
+};
+
+/**
+ * struct mmrm_vm_api_response_msg -- mmrm response message unified data
+ * @hd: mmrm API response message head
+ * @data: data that mmrm API return per API response message id
+ */
+struct mmrm_vm_api_response_msg {
+	struct mmrm_vm_api_msg_head hd;
+	union {
+		struct mmrm_vm_register_response reg;
+		struct mmrm_vm_deregister_response dereg;
+		struct mmrm_vm_setvalue_response setval;
+		struct mmrm_vm_setvalue_inrange_response setval_range;
+		struct mmrm_vm_getvalue_response getval;
+		struct mmrm_vm_noop_response lptest;
+	} data;
+};
+
+/**
+ * struct mmrm_vm_request_msg_pkt -- mmrm request packet that is sent through gunyah API
+ * @hdr: message head for checking message valid
+ * @msg: data that is needed by mmrm API
+ */
+struct mmrm_vm_request_msg_pkt {
+	struct mmrm_vm_msg_hdr hdr;
+	struct mmrm_vm_api_request_msg msg;
+	u64 start_time_ns;
+};
+
+/**
+ * struct mmrm_vm_response_msg_pkt -- mmrm response packet that is sent through gunyah API
+ * @hdr: message head for checking message valid
+ * @msg: data that is returned by mmrm API
+ */
+struct mmrm_vm_response_msg_pkt {
+	struct mmrm_vm_msg_hdr hdr;
+	struct mmrm_vm_api_response_msg msg;
+};
+
+#endif /* __MMRM_VM_INTERNAL_H__ */
+
+

+ 104 - 0
qcom/opensource/mmrm-driver/vm/common/inc/mmrm_vm_msgq.h

@@ -0,0 +1,104 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#ifndef __MMRM_VM_MSGQ_H__
+#define __MMRM_VM_MSGQ_H__
+
+#include <linux/gunyah/gh_msgq.h>
+
+#define MMRM_VM_VER_1 1       // mmrm version, for message valid check
+
+#define MMRM_VM_MAX_PKT_SZ  1024      // mmrm max gunyah packet size
+
+#define MMRM_VM_MSG_STATUS_NOTIFIER   0x01
+
+/**
+ * mmrm_vm_pkt_type: mmrm transfer type, for message valid check
+ * @MMRM_VM_TYPE_DATA: request/response data
+ */
+enum mmrm_vm_pkt_type {
+	MMRM_VM_TYPE_DATA = 1,
+};
+
+struct mmrm_vm_driver_data;
+
+/**
+ * struct mmrm_vm_msg_hdr - mmrm vm packet header
+ * @version: protocol version
+ * @type: packet type; one of MMRM_VM_TYPE_* in mmrm_vm_pkt_type
+ * @flags: Reserved for future use
+ * @size: length of packet, excluding this header
+ */
+struct mmrm_vm_msg_hdr {
+	u8 version;
+	u8 type;
+	u8 flags;
+	u8 resv;
+	u32 size;
+};
+
+/**
+ * mmrm_vm_msg - message that be received.
+ * @link - list head
+ * @msg_size - message size
+ * @msg_buf - message buffer
+ */
+struct mmrm_vm_msg {
+	struct list_head link;
+	size_t msg_size;
+	u8 msg_buf[GH_MSGQ_MAX_MSG_SIZE_BYTES];
+};
+
+/**
+ * mmrm_vm_msgq_info - gunyah info.
+ * @peer_id: notification callback check if message is from SVM
+ * @msgq_handle - registered msg queue handle with gunyah api
+ * @msgq_label - message queue label
+ * @status: indicate init status
+ * @pvt_nb - notifier info
+ */
+struct mmrm_vm_gh_msgq_info {
+	int  peer_id;
+	void *msgq_handle;
+	int  msgq_label;
+	int status;
+	struct notifier_block pvt_nb;
+};
+
+/**
+ * struct mmrm_vm_msg_q -- svm mmrm API caller queue that wait for mmrm API return
+ * @link: list head
+ * @m_req: request message pointer
+ * @m_resp: response message buffer pointer
+ * @complete: sync mmrm API response and caller
+ */
+struct mmrm_vm_msg_q {
+	struct list_head link;
+	struct mmrm_vm_request_msg_pkt *m_req;
+	struct mmrm_vm_response_msg_pkt *m_resp;
+	struct completion complete;
+};
+
+/**
+ * mmrm_vm_msgq_init - initialize display message queue: both TX and RX
+ * @mmrm_vm - handle to mmrm_vm_data_priv
+ */
+int mmrm_vm_msgq_init(struct mmrm_vm_driver_data *mmrm_vm);
+
+/**
+ * mmrm_vm_msgq_deinit - deinitialize display message queue: both TX and RX
+ * @mmrm_vm - handle to mmrm_vm_data_priv
+ */
+int mmrm_vm_msgq_deinit(struct mmrm_vm_driver_data *mmrm_vm);
+
+/**
+ * mmrm_vm_msgq_send - send custom messages across VM's
+ * @mmrm_vm - handle to mmrm_vm_data_priv
+ * @msg - payload data
+ * @msg_size - size of the payload_data
+ */
+int mmrm_vm_msgq_send(struct mmrm_vm_driver_data *mmrm_vm, void *msg, size_t msg_size);
+#endif // __MMRM_VM_MSGQ_H__
+

+ 44 - 0
qcom/opensource/mmrm-driver/vm/common/src/mmrm_vm_debug.c

@@ -0,0 +1,44 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#include <linux/slab.h>
+
+#include "mmrm_vm_debug.h"
+
+int mmrm_vm_debug = MMRM_VM_ERR | MMRM_VM_WARN | MMRM_VM_PRINTK;
+
+/**
+ * msm_mmrm_debugfs_init - init debug sys entry
+ */
+struct dentry *msm_mmrm_debugfs_init(void)
+{
+	struct dentry *dir;
+
+	/* create a directory in debugfs root (/sys/kernel/debug) */
+	dir = debugfs_create_dir("mmrm_vm", NULL);
+	if (IS_ERR_OR_NULL(dir)) {
+		d_mpr_e("%s: Call to debugfs_create_dir(%s) failed!\n", __func__, "mmrm");
+		goto failed_create_dir;
+	}
+
+	/* add other params here */
+	debugfs_create_u32("debug_level", 0644, dir, &mmrm_vm_debug);
+
+	return dir;
+
+failed_create_dir:
+	d_mpr_e("%s: error\n", __func__);
+	return NULL;
+}
+
+/**
+ * msm_mmrm_debugfs_deinit - de-init debug sys entry
+ * dir: directory in debugfs root
+ */
+void msm_mmrm_debugfs_deinit(struct dentry *dir)
+{
+	debugfs_remove_recursive(dir);
+}
+

+ 15 - 0
qcom/opensource/mmrm-driver/vm/fe/Kbuild

@@ -0,0 +1,15 @@
+
+ifeq ($(CONFIG_MSM_MMRM_VM), y)
+obj-m += mmrm_vm_fe.o
+
+ifeq ($(CONFIG_ARCH_KALAMA), y)
+ifeq ($(CONFIG_ARCH_QTI_VM), y)
+	mmrm_vm_fe-objs := \
+	src/mmrm_vm_fe_main.o \
+	src/mmrm_vm_fe_frontend.o \
+	src/mmrm_vm_fe_msgq.o \
+	src/mmrm_vm_fe_api.o \
+	../common/src/mmrm_vm_debug.o
+endif
+endif
+endif

+ 16 - 0
qcom/opensource/mmrm-driver/vm/fe/src/Makefile.am

@@ -0,0 +1,16 @@
+KBUILD_OPTIONS+= MMRM_ROOT=$(KERNEL_SRC)/$(M)
+
+all: modules
+
+modules:
+	$(MAKE) -C $(KERNEL_SRC) M=$(M) modules $(KBUILD_OPTIONS)
+
+modules_install:
+	$(MAKE) INSTALL_MOD_STRIP=1 -C $(KERNEL_SRC) M=$(M) modules_install
+
+%:
+	$(MAKE) -C $(KERNEL_SRC) M=$(M) $@ $(KBUILD_OPTIONS)
+
+clean:
+	rm -f *.o *.ko *.mod.c *.mod.o *~ .*.cmd Module.symvers
+	rm -rf .tmp_versions

+ 121 - 0
qcom/opensource/mmrm-driver/vm/fe/src/mmrm_vm_fe.h

@@ -0,0 +1,121 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#ifndef __MMRM_VM_FE_H__
+#define __MMRM_VM_FE_H__
+
+#include <mmrm_vm_msgq.h>
+#include <mmrm_vm_interface.h>
+
+#define MMRM_SYSFS_ENTRY_MAX_LEN     PAGE_SIZE
+
+struct mmrm_vm_fe_clk_client_desc {
+	u32 client_domain;
+	u32 client_id;
+	u32 num_hw_blocks;
+};
+
+struct mmrm_vm_fe_clk_src_set {
+	struct mmrm_vm_fe_clk_client_desc *clk_src_tbl;
+	u32 count;
+};
+
+struct mmrm_vm_fe_msgq_rt_stats {
+	u64 register_total_us;
+	u64 looptest_total_us;
+	u32 count;
+};
+
+struct mmrm_vm_fe_priv {
+	struct device *dev;
+
+	struct mmrm_client *client_tbl;
+
+	struct list_head resp_works;
+	struct mutex resp_works_lock;
+
+	struct mmrm_vm_fe_clk_src_set clk_src_set;
+	struct mutex msg_send_lock;
+	int  seq_no;
+	bool is_clk_scaling_supported;
+
+	struct mmrm_vm_fe_msgq_rt_stats msgq_rt_stats;
+};
+
+struct mmrm_vm_fe_pkt {
+	struct mmrm_vm_msg_q msgq;
+	struct mmrm_vm_request_msg_pkt req_pkt;
+	struct mmrm_vm_response_msg_pkt resp_pkt;
+};
+
+/*
+ * mmrm_vm_fe_recv_cb -- FE message receiving thread call this function
+ *                       for transfer receiving packet to FE
+ * @mmrm_vm: specific device driver info
+ * @data: message pointer
+ * @size: message size
+ */
+void mmrm_vm_fe_recv_cb(struct mmrm_vm_driver_data *mmrm_vm, void *data, size_t size);
+
+/*
+ * mmrm_vm_fe_request_send -- FE send mmrm request message
+ * @mmrm_vm: device data, includes message handle
+ * @msg_pkt: request message pointer
+ * @msg_size: message size
+ */
+int mmrm_vm_fe_request_send(struct mmrm_vm_driver_data *mmrm_vm,
+	struct mmrm_vm_request_msg_pkt *msg_pkt, size_t msg_size);
+
+/*
+ * get_client_id_2_handle -- get handle from client ID
+ * @client_id: client ID
+ */
+struct mmrm_client *mmrm_vm_fe_get_client(u32 client_id);
+
+/*
+ * load_clk_resource_info -- get clk resource info from DT
+ * @drv_priv: device data
+ */
+int mmrm_vm_fe_load_clk_rsrc(struct mmrm_vm_driver_data *drv_priv);
+
+/*
+ * mmrm_vm_fe_clk_src_check -- check if fe support the clk src
+ * @desc: clk src description
+ */
+struct mmrm_vm_fe_clk_client_desc *mmrm_vm_fe_clk_src_get(struct mmrm_client_desc *desc);
+
+/*
+ * init_lookup_table -- init et clk lookup table
+ * @mmrm_vm: device data
+ */
+int mmrm_vm_fe_init_lookup_table(struct mmrm_vm_driver_data *mmrm_vm);
+
+/*
+ * mmrm_vm_fe_clk_print_info -- output clk info through sys
+ * @clk_src_set: clk info
+ * @buf: received output buffer
+ * @max_len: buffer length
+ */
+int mmrm_vm_fe_clk_print_info(
+	struct mmrm_vm_fe_clk_src_set *clk_src_set,
+	char *buf, int max_len);
+
+/*
+ * mmrm_vm_fe_recv -- process received response info
+ * @mmrm_vm: device data
+ * @data: received response info buffer
+ * @size: message size
+ */
+void mmrm_vm_fe_recv(struct mmrm_vm_driver_data *mmrm_vm, void *data, size_t size);
+
+/*
+ * mmrm_vm_fe_count_clk_clients_frm_dt -- process received response info
+ * @pdev: platform device
+ */
+int mmrm_vm_fe_count_clk_clients_frm_dt(struct platform_device *pdev);
+
+#endif /* __MMRM_VM_FE_H__ */
+
+

+ 320 - 0
qcom/opensource/mmrm-driver/vm/fe/src/mmrm_vm_fe_api.c

@@ -0,0 +1,320 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#include <linux/gunyah/gh_msgq.h>
+#include <linux/gunyah/gh_rm_drv.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/of.h>
+#include <linux/sysfs.h>
+#include <linux/pm.h>
+#include <linux/suspend.h>
+#include <linux/delay.h>
+#include <linux/slab.h>
+#include <linux/ktime.h>
+
+
+#include "mmrm_vm_fe.h"
+#include "mmrm_vm_interface.h"
+#include "mmrm_vm_msgq.h"
+#include "mmrm_vm_debug.h"
+
+#define get_client_handle_2_id(client) (client->client_uid)
+
+extern struct mmrm_vm_driver_data *drv_vm_fe;
+
+#define MAX_TIMEOUT_MS 300
+
+#define CHECK_SKIP_MMRM_CLK_RSRC(drv_data)	\
+{									\
+	if (!drv_data->is_clk_scaling_supported) {	\
+		d_mpr_h("%s: mmrm clk rsrc not supported\n", __func__);\
+		goto skip_mmrm;				\
+	}								\
+}
+
+int mmrm_fe_append_work_list(struct mmrm_vm_msg_q *msg_q, int msg_sz)
+{
+	struct mmrm_vm_request_msg_pkt *msg_pkt = msg_q->m_req;
+	struct mmrm_vm_fe_priv *fe_data = drv_vm_fe->vm_pvt_data;
+	unsigned long waited_time_ms;
+
+	init_completion(&msg_q->complete);
+	mutex_lock(&fe_data->resp_works_lock);
+	list_add_tail(&msg_q->link, &fe_data->resp_works);
+	mutex_unlock(&fe_data->resp_works_lock);
+
+	mutex_lock(&fe_data->msg_send_lock);
+	msg_pkt->msg.hd.seq_no = fe_data->seq_no++;
+	mutex_unlock(&fe_data->msg_send_lock);
+
+	d_mpr_w("%s: seq no:%d\n", __func__, msg_pkt->msg.hd.seq_no);
+
+	msg_pkt->start_time_ns = ktime_get_ns();
+
+	mmrm_vm_fe_request_send(drv_vm_fe, msg_pkt, msg_sz);
+
+	waited_time_ms = wait_for_completion_timeout(&msg_q->complete,
+		msecs_to_jiffies(MAX_TIMEOUT_MS));
+	if (waited_time_ms >= MAX_TIMEOUT_MS) {
+		d_mpr_e("%s: request send timeout\n", __func__);
+		return -1;
+	}
+	return 0;
+}
+
+struct mmrm_vm_msg_q *get_msg_work(void)
+{
+	struct mmrm_vm_msg_q *msg_q;
+	struct mmrm_vm_fe_pkt *data;
+
+	data = kzalloc(sizeof(struct mmrm_vm_fe_pkt), GFP_KERNEL);
+	if (data == NULL)
+		goto err_mem_fail;
+
+	msg_q = &data->msgq;
+	msg_q->m_req = &data->req_pkt;
+	msg_q->m_resp = &data->resp_pkt;
+
+	return msg_q;
+
+err_mem_fail:
+	d_mpr_e("%s: failed to alloc msg buffer\n", __func__);
+	return NULL;
+}
+
+void release_msg_work(struct mmrm_vm_msg_q *msg_q)
+{
+	struct mmrm_vm_fe_pkt *data;
+
+	if (msg_q == NULL) {
+		d_mpr_e("%s: release null msg ptr\n", __func__);
+		return;
+	}
+	data = container_of(msg_q, struct mmrm_vm_fe_pkt, msgq);
+	kfree(data);
+}
+
+struct mmrm_client *mmrm_client_register(struct mmrm_client_desc *desc)
+{
+	struct mmrm_vm_msg_q *msg_q;
+	struct mmrm_vm_api_request_msg *api_msg;
+	struct mmrm_vm_register_request *reg_data;
+	size_t msg_size = sizeof(api_msg->hd) + sizeof(*reg_data);
+	int rc = 0;
+	struct mmrm_client *client = NULL;
+
+	if (mmrm_vm_fe_clk_src_get(desc) == NULL) {
+		d_mpr_e("%s: FE doesn't support clk domain=%d client id=%d\n", __func__,
+			desc->client_info.desc.client_domain, desc->client_info.desc.client_id);
+		goto err_clk_src;
+	}
+
+	msg_q = get_msg_work();
+	if (msg_q == NULL) {
+		d_mpr_e("%s: failed to alloc msg buf\n", __func__);
+		goto err_no_mem;
+	}
+	api_msg = &msg_q->m_req->msg;
+	reg_data = &api_msg->data.reg;
+
+	api_msg->hd.cmd_id = MMRM_VM_REQUEST_REGISTER;
+	reg_data->client_type = desc->client_type;
+	reg_data->priority = desc->priority;
+	memcpy(&reg_data->desc, &desc->client_info.desc, sizeof(reg_data->desc));
+
+	rc = mmrm_fe_append_work_list(msg_q, msg_size);
+	if (rc == 0) {
+		client = mmrm_vm_fe_get_client(msg_q->m_resp->msg.data.reg.client_id);
+	};
+
+	release_msg_work(msg_q);
+
+err_no_mem:
+err_clk_src:
+	return client;
+}
+EXPORT_SYMBOL(mmrm_client_register);
+
+int mmrm_client_deregister(struct mmrm_client *client)
+{
+	int rc = -1;
+	struct mmrm_vm_api_request_msg *api_msg;
+	struct mmrm_vm_deregister_request *reg_data;
+	struct mmrm_vm_msg_q *msg_q;
+
+	size_t msg_size = sizeof(api_msg->hd) + sizeof(*reg_data);
+
+	msg_q = get_msg_work();
+	if (msg_q == NULL) {
+		d_mpr_e("%s: failed to alloc msg buf\n", __func__);
+		goto err_no_mem;
+	}
+	api_msg = &msg_q->m_req->msg;
+	reg_data = &api_msg->data.dereg;
+
+	api_msg->hd.cmd_id = MMRM_VM_REQUEST_DEREGISTER;
+	reg_data->client_id = get_client_handle_2_id(client);
+
+
+	rc = mmrm_fe_append_work_list(msg_q, msg_size);
+	if (rc == 0)
+		rc = msg_q->m_resp->msg.data.dereg.ret_code;
+
+	release_msg_work(msg_q);
+
+err_no_mem:
+	return rc;
+}
+EXPORT_SYMBOL(mmrm_client_deregister);
+
+int mmrm_client_set_value(struct mmrm_client *client,
+	struct mmrm_client_data *client_data, unsigned long val)
+{
+	int rc = -1;
+	struct mmrm_vm_api_request_msg *api_msg;
+	struct mmrm_vm_setvalue_request *reg_data;
+	struct mmrm_vm_msg_q *msg_q;
+
+	size_t msg_size = sizeof(api_msg->hd) + sizeof(*reg_data);
+
+	msg_q = get_msg_work();
+	if (msg_q == NULL) {
+		d_mpr_e("%s: failed to alloc msg buf\n", __func__);
+		goto err_no_mem;
+	}
+	api_msg = &msg_q->m_req->msg;
+	reg_data = &api_msg->data.setval;
+
+	api_msg->hd.cmd_id = MMRM_VM_REQUEST_SETVALUE;
+	reg_data->client_id = get_client_handle_2_id(client);
+	reg_data->data.flags = client_data->flags;
+	reg_data->data.num_hw_blocks = client_data->num_hw_blocks;
+	reg_data->val = val;
+
+	rc = mmrm_fe_append_work_list(msg_q, msg_size);
+	if (rc != 0)
+		return rc;
+
+	rc = msg_q->m_resp->msg.data.setval.val;
+	d_mpr_h("%s: done rc=%d\n", __func__, rc);
+
+err_no_mem:
+	return rc;
+}
+EXPORT_SYMBOL(mmrm_client_set_value);
+
+int mmrm_client_set_value_in_range(struct mmrm_client *client,
+	struct mmrm_client_data *client_data,
+	struct mmrm_client_res_value *val)
+{
+	int rc = -1;
+	struct mmrm_vm_api_request_msg *api_msg ;
+	struct mmrm_vm_setvalue_inrange_request *reg_data;
+	size_t msg_size = sizeof(api_msg->hd) + sizeof(*reg_data);
+	struct mmrm_vm_msg_q *msg_q;
+
+	msg_q = get_msg_work();
+	if (msg_q == NULL) {
+		d_mpr_e("%s: failed to alloc msg buf\n", __func__);
+		goto err_no_mem;
+	}
+	api_msg = &msg_q->m_req->msg;
+	reg_data = &api_msg->data.setval_range;
+
+	api_msg->hd.cmd_id = MMRM_VM_REQUEST_SETVALUE_INRANGE;
+	reg_data->client_id = get_client_handle_2_id(client);
+	reg_data->data.flags = client_data->flags;
+	reg_data->data.num_hw_blocks = client_data->num_hw_blocks;
+	reg_data->val.cur = val->cur;
+	reg_data->val.max = val->max;
+	reg_data->val.min = val->min;
+
+	rc = mmrm_fe_append_work_list(msg_q, msg_size);
+
+err_no_mem:
+	return rc;
+}
+EXPORT_SYMBOL(mmrm_client_set_value_in_range);
+
+
+int mmrm_client_get_value(struct mmrm_client *client,
+	struct mmrm_client_res_value *val)
+{
+	int rc = -1;
+	struct mmrm_vm_api_request_msg *api_msg;
+	struct mmrm_vm_getvalue_request *reg_data;
+	size_t msg_size = sizeof(api_msg->hd) + sizeof(*reg_data);
+	struct mmrm_vm_msg_q *msg_q;
+
+	msg_q = get_msg_work();
+	if (msg_q == NULL) {
+		d_mpr_e("%s: failed to alloc msg buf\n", __func__);
+		goto err_no_mem;
+	}
+	api_msg = &msg_q->m_req->msg;
+	reg_data = &api_msg->data.getval;
+
+	api_msg->hd.cmd_id = MMRM_VM_REQUEST_GETVALUE;
+	reg_data->client_id = get_client_handle_2_id(client);
+
+
+	rc = mmrm_fe_append_work_list(msg_q, msg_size);
+
+	if (rc == 0) {
+		val->cur = msg_q->m_resp->msg.data.getval.val.cur;
+		val->max = msg_q->m_resp->msg.data.getval.val.max;
+		val->min = msg_q->m_resp->msg.data.getval.val.min;
+	}
+
+err_no_mem:
+	return rc;
+}
+EXPORT_SYMBOL(mmrm_client_get_value);
+
+bool mmrm_client_check_scaling_supported(enum mmrm_client_type client_type, u32 client_domain)
+{
+	struct mmrm_vm_fe_priv *fe_data;
+
+	if (drv_vm_fe == (void *)-EPROBE_DEFER) {
+		d_mpr_e("%s: mmrm probe_init not done\n", __func__);
+		goto err_exit;
+	}
+
+	fe_data = drv_vm_fe->vm_pvt_data;
+	if (client_type == MMRM_CLIENT_CLOCK) {
+		CHECK_SKIP_MMRM_CLK_RSRC(fe_data);
+	}
+
+	return true;
+err_exit:
+	d_mpr_e("%s: error exit\n", __func__);
+skip_mmrm:
+	return false;
+}
+EXPORT_SYMBOL(mmrm_client_check_scaling_supported);
+
+int mmrm_client_msgq_roundtrip_measure(u32 val)
+{
+	int rc = 0;
+	struct mmrm_vm_api_request_msg *api_msg;
+	struct mmrm_vm_noop_request *reg_data;
+	struct mmrm_vm_msg_q *msg_q;
+
+	size_t msg_size = sizeof(api_msg->hd) + sizeof(*reg_data);
+
+	msg_q = get_msg_work();
+	api_msg = &msg_q->m_req->msg;
+	reg_data = &api_msg->data.lptest;
+
+	api_msg->hd.cmd_id = MMRM_VM_REQUEST_NOOP;
+	reg_data->client_id = val;
+
+	rc = mmrm_fe_append_work_list(msg_q, msg_size);
+
+	release_msg_work(msg_q);
+
+	return rc;
+}

+ 263 - 0
qcom/opensource/mmrm-driver/vm/fe/src/mmrm_vm_fe_frontend.c

@@ -0,0 +1,263 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#include <linux/of.h>
+#include <linux/limits.h>
+
+#include <linux/timekeeping.h>
+
+#include "mmrm_vm_fe.h"
+#include "mmrm_vm_interface.h"
+#include "mmrm_vm_msgq.h"
+#include "mmrm_vm_debug.h"
+
+extern struct mmrm_vm_driver_data *drv_vm_fe;
+
+void mmrm_vm_fe_recv(struct mmrm_vm_driver_data *mmrm_vm, void *data, size_t size)
+{
+	struct mmrm_vm_api_response_msg *msg = data;
+	struct mmrm_vm_msg_q *node, *temp;
+	int rc = -1;
+	u64 kt2, interval;
+	struct mmrm_vm_fe_priv *fe_data = mmrm_vm->vm_pvt_data;
+
+	mutex_lock(&fe_data->resp_works_lock);
+	list_for_each_entry_safe(node, temp, &fe_data->resp_works, link) {
+		if (msg->hd.seq_no == node->m_req->msg.hd.seq_no) {
+			d_mpr_w("%s: seq no:%d\n", __func__, msg->hd.seq_no);
+			list_del(&node->link);
+			rc = 0;
+			break;
+		}
+	}
+	mutex_unlock(&fe_data->resp_works_lock);
+	if (rc != 0) {
+		d_mpr_e("%s: seq no:%d wrong\n", __func__, msg->hd.seq_no);
+		return;
+	}
+
+	d_mpr_w("%s: cmd:%d\n", __func__, msg->hd.cmd_id);
+	switch (msg->hd.cmd_id) {
+	case	MMRM_VM_RESPONSE_REGISTER:
+		node->m_resp->msg.data.reg.client_id = msg->data.reg.client_id;
+		d_mpr_w("%s: client_id:%u\n", __func__, msg->data.reg.client_id);
+		break;
+	case	MMRM_VM_RESPONSE_SETVALUE:
+		node->m_resp->msg.data.setval.val = msg->data.setval.val;
+		break;
+	case	MMRM_VM_RESPONSE_SETVALUE_INRANGE:
+		node->m_resp->msg.data.setval_range.ret_code = msg->data.setval_range.ret_code;
+		break;
+	case	MMRM_VM_RESPONSE_GETVALUE:
+		node->m_resp->msg.data.getval.val = msg->data.getval.val;
+		break;
+	case	MMRM_VM_RESPONSE_DEREGISTER:
+		node->m_resp->msg.data.dereg.ret_code = msg->data.dereg.ret_code;
+		break;
+	case	MMRM_VM_RESPONSE_NOOP:
+		kt2 = ktime_get_ns();
+		interval = kt2 - node->m_req->start_time_ns;
+		d_mpr_w("%s: looptest start:%lu end:%lu interval:%luns\n",
+			__func__,
+			node->m_req->start_time_ns, kt2,
+			interval);
+		fe_data->msgq_rt_stats.looptest_total_us += interval;
+		break;
+	case	MMRM_VM_RESPONSE_INVALID_PKT:
+		d_mpr_e("%s: invalid request code\n");
+		break;
+	default:
+		d_mpr_e("wrong response\n");
+		break;
+	};
+
+	complete(&node->complete);
+}
+
+int mmrm_vm_fe_request_send(struct mmrm_vm_driver_data *mmrm_vm,
+		struct mmrm_vm_request_msg_pkt *msg_pkt, size_t msg_size)
+{
+	int  rc;
+
+	struct mmrm_vm_msg_hdr *hdr;
+	struct mmrm_vm_gh_msgq_info *pmsg_info = &mmrm_vm->msg_info;
+
+	hdr = (struct mmrm_vm_msg_hdr *)&msg_pkt->hdr;
+	hdr->version = MMRM_VM_VER_1;
+	hdr->type = MMRM_VM_TYPE_DATA;
+	hdr->flags = 0;
+	hdr->size = msg_size;
+
+	if (!pmsg_info->msgq_handle) {
+		d_mpr_e("Failed to send msg, invalid msgq handle\n");
+		return -EINVAL;
+	}
+
+	if (msg_size > GH_MSGQ_MAX_MSG_SIZE_BYTES) {
+		d_mpr_e("msg size unsupported for msgq: %ld > %d\n", msg_size,
+			GH_MSGQ_MAX_MSG_SIZE_BYTES);
+		return -E2BIG;
+	}
+
+	rc = gh_msgq_send(pmsg_info->msgq_handle, msg_pkt,
+		msg_size + sizeof(msg_pkt->hdr), GH_MSGQ_TX_PUSH);
+
+	return rc;
+}
+
+int mmrm_vm_fe_count_clk_clients_frm_dt(struct platform_device *pdev)
+{
+	u32 size_clk_src = 0, num_clk_src = 0;
+
+	of_find_property(pdev->dev.of_node, "mmrm-client-info", &size_clk_src);
+	num_clk_src = size_clk_src / sizeof(struct mmrm_vm_fe_clk_client_desc);
+	d_mpr_h("%s: found %d clk_srcs size %d\n",
+		__func__, num_clk_src, size_clk_src);
+
+	return num_clk_src;
+}
+
+int mmrm_vm_fe_load_clk_rsrc(struct mmrm_vm_driver_data *mmrm_vm)
+{
+	int rc = 0, num_clk_src = 0;
+	int c = 0, size_clk_src = 0, entry_offset = 3;
+
+	struct platform_device *pdev;
+	struct mmrm_vm_fe_clk_client_desc *pclk_src;
+	struct mmrm_vm_fe_priv *fe_data = mmrm_vm->vm_pvt_data;
+
+	pdev = container_of(fe_data->dev, struct platform_device, dev);
+
+	of_find_property(pdev->dev.of_node, "mmrm-client-info", &size_clk_src);
+	if ((size_clk_src < sizeof(*fe_data->clk_src_set.clk_src_tbl)) ||
+		(size_clk_src % sizeof(*fe_data->clk_src_set.clk_src_tbl))) {
+		d_mpr_e("%s: invalid size(%d) of clk src table\n",
+			__func__, size_clk_src);
+		fe_data->clk_src_set.count = 0;
+		goto err_load_clk_src_tbl;
+	}
+
+	fe_data->clk_src_set.clk_src_tbl = devm_kzalloc(&pdev->dev,
+			size_clk_src, GFP_KERNEL);
+
+	if (!fe_data->clk_src_set.clk_src_tbl) {
+		d_mpr_e("%s: failed to allocate memory for clk_src_tbl\n",
+				__func__);
+		rc = -ENOMEM;
+		goto err_load_clk_src_tbl;
+	}
+	num_clk_src = size_clk_src / sizeof(struct mmrm_vm_fe_clk_client_desc);
+	fe_data->clk_src_set.count = num_clk_src;
+
+	d_mpr_w("%s: found %d clk_srcs size %d\n",
+			__func__, num_clk_src, size_clk_src);
+
+	for (c = 0; c < num_clk_src; c++) {
+		pclk_src = &fe_data->clk_src_set.clk_src_tbl[c];
+
+		of_property_read_u32_index(pdev->dev.of_node,
+			"mmrm-client-info", (c*entry_offset),
+			&pclk_src->client_domain);
+		of_property_read_u32_index(pdev->dev.of_node,
+			"mmrm-client-info", (c*entry_offset+1),
+			&pclk_src->client_id);
+		of_property_read_u32_index(pdev->dev.of_node,
+			"mmrm-client-info", (c*entry_offset+2),
+			&pclk_src->num_hw_blocks);
+	}
+
+	return 0;
+
+err_load_clk_src_tbl:
+	return rc;
+}
+
+struct mmrm_vm_fe_clk_client_desc *mmrm_vm_fe_clk_src_get(struct mmrm_client_desc *desc)
+{
+	struct mmrm_vm_fe_priv *fe_data = drv_vm_fe->vm_pvt_data;
+	int num_clk_src = fe_data->clk_src_set.count;
+	struct mmrm_vm_fe_clk_client_desc *pclk_src;
+	int i;
+
+	d_mpr_l("%s: num clk src=%d domain:%d id:%d\n", __func__, num_clk_src,
+		desc->client_info.desc.client_domain, desc->client_info.desc.client_id);
+
+	pclk_src = fe_data->clk_src_set.clk_src_tbl;
+	for (i = 0; i < num_clk_src; i++, pclk_src++) {
+		if (pclk_src->client_domain == desc->client_info.desc.client_domain ||
+			pclk_src->client_id == desc->client_info.desc.client_id) {
+			return pclk_src;
+		}
+	}
+	return NULL;
+}
+
+int mmrm_vm_fe_init_lookup_table(struct mmrm_vm_driver_data *mmrm_vm)
+{
+	int  i, rc = -1;
+	struct platform_device *pdev;
+	struct mmrm_vm_fe_priv *fe_data = mmrm_vm->vm_pvt_data;
+
+	pdev = container_of(fe_data->dev, struct platform_device, dev);
+
+	fe_data->client_tbl = devm_kzalloc(&pdev->dev,
+		fe_data->clk_src_set.count * sizeof(struct mmrm_client), GFP_KERNEL);
+	if (!fe_data->client_tbl)
+		return rc;
+
+	for (i = 0; i < fe_data->clk_src_set.count; i++) {
+		fe_data->client_tbl[i].client_type = 0;
+		fe_data->client_tbl[i].client_uid = U32_MAX;
+	}
+	return 0;
+}
+
+int mmrm_vm_fe_clk_print_info(
+	struct mmrm_vm_fe_clk_src_set *clk_src_set,
+	char *buf, int max_len)
+{
+	int left_spaces = max_len;
+	int len, c;
+	struct mmrm_vm_fe_clk_client_desc *pclk_src;
+	int num_clk_src = clk_src_set->count;
+
+	len = scnprintf(buf, left_spaces, "Domain  ID  Num\n");
+	left_spaces -= len;
+	buf += len;
+
+	pclk_src = clk_src_set->clk_src_tbl;
+	for (c = 0; c < num_clk_src; c++, pclk_src++) {
+		len = scnprintf(buf, left_spaces, "%d\t%d\t%d\n",
+			pclk_src->client_domain, pclk_src->client_id, pclk_src->num_hw_blocks);
+		left_spaces -= len;
+		buf += len;
+	}
+
+	return max_len - left_spaces;
+}
+
+struct mmrm_client *mmrm_vm_fe_get_client(u32 client_id)
+{
+	int i;
+	struct mmrm_client *ptr;
+	struct mmrm_vm_fe_priv *fe_data = drv_vm_fe->vm_pvt_data;
+
+	if (client_id == U32_MAX)
+		return NULL;
+
+	for (i = 0, ptr = fe_data->client_tbl; i < fe_data->clk_src_set.count; i++, ptr++) {
+		if (ptr->client_uid == client_id)
+			return ptr;
+	}
+
+	for (i = 0, ptr = fe_data->client_tbl; i < fe_data->clk_src_set.count; i++, ptr++) {
+		if (ptr->client_uid == U32_MAX) {
+			ptr->client_uid = client_id;
+			return ptr;
+		}
+	}
+	return NULL;
+}
+

+ 224 - 0
qcom/opensource/mmrm-driver/vm/fe/src/mmrm_vm_fe_main.c

@@ -0,0 +1,224 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#include <linux/types.h>
+#include <linux/kthread.h>
+
+#include <linux/module.h>
+#include <linux/of.h>
+
+#include "mmrm_vm_fe.h"
+#include "mmrm_vm_msgq.h"
+#include "mmrm_vm_interface.h"
+#include "mmrm_vm_debug.h"
+
+struct mmrm_vm_driver_data *drv_vm_fe = (void *) -EPROBE_DEFER;
+
+static ssize_t dump_clk_info_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	int rc;
+
+	struct mmrm_vm_fe_priv *fe_data = drv_vm_fe->vm_pvt_data;
+
+	rc = mmrm_vm_fe_clk_print_info(&fe_data->clk_src_set, buf, MMRM_SYSFS_ENTRY_MAX_LEN);
+	if (rc == 0)
+		d_mpr_e("%s: failed to dump clk info\n", __func__);
+
+	return rc;
+}
+
+ssize_t msgq_send_trigger_store(struct device *dev, struct device_attribute *attr,
+	const char *buf, size_t count)
+{
+	struct mmrm_vm_driver_data *priv = dev->driver_data;
+	char send_buf[64] = "test msg";
+	int ret;
+	bool flag;
+
+	ret = strtobool(buf, &flag);
+	if (ret) {
+		dev_err(dev, "invalid user input\n");
+		return -1;
+	}
+	if (flag) {
+		ret = mmrm_vm_msgq_send(priv, send_buf, sizeof(send_buf));
+		if (ret)
+			d_mpr_e("%s:send msgq failed\n", __func__);
+		else
+			d_mpr_e("%s:send msgq success\n", __func__);
+	}
+	return ret ? ret : count;
+}
+
+extern int mmrm_client_msgq_roundtrip_measure(u32 val);
+
+ssize_t msgq_rt_test_store(struct device *dev, struct device_attribute *attr,
+	const char *buf, size_t count)
+{
+	int ret;
+	long sz, n;
+	struct mmrm_vm_fe_priv *fe_data;
+	struct mmrm_vm_fe_msgq_rt_stats *trip_time;
+
+	if (IS_ERR_OR_NULL(drv_vm_fe)) {
+		return -1;
+	}
+	ret = kstrtol(buf, 10, &sz);
+
+	if (ret) {
+		dev_err(dev, "invalid user input\n");
+		return -1;
+	}
+	if (sz) {
+		d_mpr_w("%s: loop count:%d\n", __func__, sz);
+
+		fe_data = drv_vm_fe->vm_pvt_data;
+		trip_time = &fe_data->msgq_rt_stats;
+		trip_time->looptest_total_us = 0;
+
+		n = sz;
+		while (n-- > 0) {
+			ret = mmrm_client_msgq_roundtrip_measure(0);
+			if (ret) {
+				d_mpr_e("%s:send msgq failed\n", __func__);
+				break;
+			};
+		}
+		if (n <= 0)
+			d_mpr_w("%s: aver: %d\n", __func__, trip_time->looptest_total_us / sz);
+	}
+	return ret ? ret : count;
+}
+
+static DEVICE_ATTR_RO(dump_clk_info);
+static DEVICE_ATTR_WO(msgq_send_trigger);
+static DEVICE_ATTR_WO(msgq_rt_test);
+
+static struct attribute *mmrm_vm_fe_fs_attrs[] = {
+	&dev_attr_dump_clk_info.attr,
+	&dev_attr_msgq_send_trigger.attr,
+	&dev_attr_msgq_rt_test.attr,
+	NULL,
+};
+
+static struct attribute_group mmrm_vm_fe_fs_attrs_group = {
+	.attrs = mmrm_vm_fe_fs_attrs,
+};
+
+static int mmrm_vm_fe_driver_probe(struct platform_device *pdev)
+{
+	struct device *dev = &pdev->dev;
+	struct mmrm_vm_fe_priv *fe_priv_data;
+	int rc = 0;
+	u32 clk_clients = 0;
+
+	drv_vm_fe = devm_kzalloc(dev, sizeof(*drv_vm_fe), GFP_KERNEL);
+	if (!drv_vm_fe)
+		return -ENOMEM;
+
+	fe_priv_data = devm_kzalloc(dev, sizeof(*fe_priv_data), GFP_KERNEL);
+	if (!fe_priv_data) {
+		rc = -ENOMEM;
+		goto err_priv_data;
+	}
+
+	drv_vm_fe->vm_pvt_data = fe_priv_data;
+	fe_priv_data->seq_no = 0;
+	fe_priv_data->dev = dev;
+
+	/* check for clk clients needing admission control */
+	clk_clients = mmrm_vm_fe_count_clk_clients_frm_dt(pdev);
+	if (clk_clients) {
+		d_mpr_h("%s: %d clk clients managed for admission control\n",
+			__func__, clk_clients);
+		fe_priv_data->is_clk_scaling_supported = true;
+	} else {
+		d_mpr_h("%s: no clk clients managed for admission control\n",
+			__func__);
+		fe_priv_data->is_clk_scaling_supported = false;
+		goto skip_mmrm;
+	}
+
+	mutex_init(&fe_priv_data->msg_send_lock);
+	dev_set_drvdata(&pdev->dev, drv_vm_fe);
+
+	INIT_LIST_HEAD(&fe_priv_data->resp_works);
+	mutex_init(&fe_priv_data->resp_works_lock);
+
+	mmrm_vm_fe_load_clk_rsrc(drv_vm_fe);
+	rc = mmrm_vm_msgq_init(drv_vm_fe);
+	if (rc != 0) {
+		d_mpr_e("%s: failed to msgq init\n",
+			__func__);
+		goto err_msgq_init;
+	}
+
+	rc = mmrm_vm_fe_init_lookup_table(drv_vm_fe);
+	if (rc == -1) {
+		d_mpr_e("%s: failed to lookup table init\n",
+			__func__);
+		goto err_lookup_table;
+	}
+
+	if (sysfs_create_group(&pdev->dev.kobj, &mmrm_vm_fe_fs_attrs_group)) {
+		d_mpr_e("%s: failed to create sysfs\n",
+			__func__);
+	}
+
+	d_mpr_w("msgq probe success");
+	return rc;
+
+err_lookup_table:
+	mmrm_vm_msgq_deinit(drv_vm_fe);
+
+err_msgq_init:
+err_priv_data:
+	d_mpr_e("%s: failed to probe\n", __func__);
+
+skip_mmrm:
+	return rc;
+}
+
+static int mmrm_vm_fe_driver_remove(struct platform_device *pdev)
+{
+	struct mmrm_vm_driver_data *mmrm_vm = dev_get_drvdata(&pdev->dev);
+
+	mmrm_vm_msgq_deinit(mmrm_vm);
+	return 0;
+}
+
+static const struct of_device_id mmrm_vm_fe_match[] = {
+	{ .compatible = "qcom,mmrm-vm-fe" },
+	{},
+};
+MODULE_DEVICE_TABLE(of, mmrm_vm_fe_match);
+
+static struct platform_driver mmrm_vm_fe_driver = {
+	.probe = mmrm_vm_fe_driver_probe,
+	.driver = {
+		.name = "mmrm-vm-fe",
+		.of_match_table = mmrm_vm_fe_match,
+	},
+	.remove = mmrm_vm_fe_driver_remove,
+};
+
+static int __init mmrm_vm_fe_module_init(void)
+{
+	d_mpr_e("%s:  init start\n", __func__);
+
+	return platform_driver_register(&mmrm_vm_fe_driver);
+}
+subsys_initcall(mmrm_vm_fe_module_init);
+
+static void __exit mmrm_vm_fe_module_exit(void)
+{
+	platform_driver_unregister(&mmrm_vm_fe_driver);
+}
+module_exit(mmrm_vm_fe_module_exit);
+
+MODULE_SOFTDEP("pre: gunyah_transport");
+MODULE_DESCRIPTION("Qualcomm Technologies, Inc. Test MSGQ Driver");
+MODULE_LICENSE("GPL v2");

+ 182 - 0
qcom/opensource/mmrm-driver/vm/fe/src/mmrm_vm_fe_msgq.c

@@ -0,0 +1,182 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#include <linux/gunyah/gh_msgq.h>
+#include <linux/kthread.h>
+#include <linux/slab.h>
+#include <linux/list.h>
+#include <linux/of.h>
+
+#include <mmrm_vm_interface.h>
+#include "mmrm_vm_fe.h"
+#include "mmrm_vm_debug.h"
+
+void mmrm_vm_fe_msgq_msg_handler(struct work_struct *work)
+{
+	struct mmrm_vm_thread_info *pthread_info =
+		container_of(work, struct mmrm_vm_thread_info, msgq_work.work);
+	struct mmrm_vm_driver_data *mmrm_vm =
+		container_of(pthread_info, struct mmrm_vm_driver_data, thread_info);
+	struct list_head head;
+	struct mmrm_vm_msg *dummy = NULL;
+	struct mmrm_vm_msg *msg;
+	struct mmrm_vm_request_msg_pkt *msg_pkt;
+
+	mutex_lock(&pthread_info->list_lock);
+	list_replace_init(&pthread_info->queued_msg, &head);
+	mutex_unlock(&pthread_info->list_lock);
+
+	list_for_each_entry_safe(msg, dummy, &head, link) {
+		msg_pkt = (struct mmrm_vm_request_msg_pkt *)msg->msg_buf;
+		mmrm_vm_fe_recv(mmrm_vm, &msg_pkt->msg, msg_pkt->hdr.size);
+		list_del(&msg->link);
+		kfree(msg);
+	}
+}
+
+int mmrm_vm_msgq_listener(void *data)
+{
+	struct mmrm_vm_driver_data *mmrm_vm = (struct mmrm_vm_driver_data *)data;
+
+	struct mmrm_vm_gh_msgq_info *pmsg_info = &mmrm_vm->msg_info;
+	struct mmrm_vm_thread_info *pthread_info = &mmrm_vm->thread_info;
+	size_t size;
+	int ret = 0;
+	struct mmrm_vm_msg *msg;
+
+	while (true) {
+		msg = kzalloc(sizeof(*msg), GFP_KERNEL);
+		if (!msg)
+			return -ENOMEM;
+
+		ret = gh_msgq_recv(pmsg_info->msgq_handle, msg->msg_buf,
+				GH_MSGQ_MAX_MSG_SIZE_BYTES, &size, 0);
+		d_mpr_l("done ret=%d\n", ret);
+
+		if (ret < 0) {
+			kfree(msg);
+			d_mpr_e("gh_msgq_recv failed, rc=%d\n", ret);
+			return -EINVAL;
+		}
+
+		msg->msg_size = size;
+		mutex_lock(&pthread_info->list_lock);
+		list_add_tail(&msg->link, &pthread_info->queued_msg);
+		mutex_unlock(&pthread_info->list_lock);
+
+		queue_delayed_work(pthread_info->msg_workq,
+				 &pthread_info->msgq_work, msecs_to_jiffies(0));
+	}
+	return 0;
+}
+
+int mmrm_vm_msgq_send(struct mmrm_vm_driver_data *mmrm_vm, void *msg, size_t msg_size)
+{
+	int  rc;
+	struct mmrm_vm_gh_msgq_info *pmsg_info;
+
+	if (IS_ERR_OR_NULL(mmrm_vm))
+		return -EINVAL;
+
+	pmsg_info = &mmrm_vm->msg_info;
+
+	if (!pmsg_info->msgq_handle) {
+		d_mpr_e("Failed to send msg, invalid msgq handle\n");
+		return -EINVAL;
+	}
+
+	if (msg_size > GH_MSGQ_MAX_MSG_SIZE_BYTES) {
+		d_mpr_e("msg size unsupported for msgq: %ld > %d\n", msg_size,
+			GH_MSGQ_MAX_MSG_SIZE_BYTES);
+		return -E2BIG;
+	}
+
+	rc = gh_msgq_send(pmsg_info->msgq_handle, msg, msg_size, 0);
+	d_mpr_h("%s: result:%d\n", __func__, rc);
+
+	return rc;
+}
+
+int mmrm_vm_msgq_init(struct mmrm_vm_driver_data *mmrm_vm)
+{
+	int rc = 0;
+	struct mmrm_vm_gh_msgq_info *pmsg_info;
+	struct mmrm_vm_thread_info *pthread_info;
+
+	if (IS_ERR_OR_NULL(mmrm_vm)) {
+		rc = -EINVAL;
+		goto err;
+	}
+	pmsg_info = &mmrm_vm->msg_info;
+	pthread_info = &mmrm_vm->thread_info;
+
+
+	mutex_init(&pthread_info->list_lock);
+	INIT_LIST_HEAD(&pthread_info->queued_msg);
+
+	pthread_info->msg_workq = create_singlethread_workqueue("vm_fe_message_workq");
+	if (IS_ERR_OR_NULL(pthread_info->msg_workq)) {
+		rc = -1;
+		goto err;
+	}
+
+	INIT_DELAYED_WORK(&pthread_info->msgq_work, mmrm_vm_fe_msgq_msg_handler);
+
+	pmsg_info->msgq_label = GH_MSGQ_LABEL_MMRM;
+	pmsg_info->msgq_handle = gh_msgq_register(pmsg_info->msgq_label);
+	d_mpr_h("%s: label:%d handle:%p\n", __func__,
+		pmsg_info->msgq_label, pmsg_info->msgq_handle);
+
+	if (IS_ERR(pmsg_info->msgq_handle)) {
+		rc = PTR_ERR(pmsg_info->msgq_handle);
+		d_mpr_e("msgq register failed rc:%d\n", rc);
+		goto err_msgq_reg;
+	}
+
+	pthread_info->msgq_listener_thread =
+			kthread_create(mmrm_vm_msgq_listener, mmrm_vm, "mmrm_vm_fe");
+	if (IS_ERR_OR_NULL(pthread_info->msgq_listener_thread)) {
+		rc = PTR_ERR(pmsg_info->msgq_handle);
+		goto err_listener_thread;
+	}
+
+	wake_up_process(pthread_info->msgq_listener_thread);
+
+	d_mpr_w("%s:  msgq init done\n", __func__);
+
+	return rc;
+
+err_listener_thread:
+	gh_msgq_unregister(pmsg_info->msgq_handle);
+	pmsg_info->msgq_handle = NULL;
+err_msgq_reg:
+	destroy_workqueue(pthread_info->msg_workq);
+	pthread_info->msg_workq = NULL;
+err:
+	return rc;
+}
+
+int mmrm_vm_msgq_deinit(struct mmrm_vm_driver_data *mmrm_vm)
+{
+	struct mmrm_vm_gh_msgq_info *pmsg_info;
+	struct mmrm_vm_thread_info *pthread_info;
+
+	if (IS_ERR_OR_NULL(mmrm_vm))
+		return -EINVAL;
+
+	pmsg_info = &mmrm_vm->msg_info;
+	pthread_info = &mmrm_vm->thread_info;
+
+	if (pthread_info->msgq_listener_thread)
+		kthread_stop(pthread_info->msgq_listener_thread);
+
+	if (pmsg_info->msgq_handle)
+		gh_msgq_unregister(pmsg_info->msgq_handle);
+
+	if (pthread_info->msg_workq)
+		destroy_workqueue(pthread_info->msg_workq);
+
+	return 0;
+}

+ 9 - 0
qcom/opensource/mmrm-driver/vm/fe/vm_test/Kbuild

@@ -0,0 +1,9 @@
+
+obj-m += mmrm_vm_fe_test.o
+
+ifeq ($(CONFIG_ARCH_KALAMA), y)
+ifeq ($(CONFIG_ARCH_QTI_VM), y)
+	mmrm_vm_fe_test-objs := mmrm_vm_fe_test_main.o        \
+	mmrm_vm_fe_test_internal.o
+endif
+endif

+ 892 - 0
qcom/opensource/mmrm-driver/vm/fe/vm_test/mmrm_vm_fe_test_internal.c

@@ -0,0 +1,892 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#define pr_fmt(fmt) "mmrm_test: " fmt
+
+#include <linux/slab.h>
+#include <linux/clk.h>
+#include <linux/timekeeping.h>
+
+#include "mmrm_vm_debug.h"
+#include "mmrm_vm_fe_test_internal.h"
+
+#define MMRM_TEST_MAX_CLK_CLIENTS 30
+#define MMRM_TEST_NUM_CASES 3
+
+enum mmrm_test_result {
+	TEST_MMRM_SUCCESS = 0,
+	TEST_MMRM_FAIL_REGISTER,
+	TEST_MMRM_FAIL_SETVALUE,
+	TEST_MMRM_FAIL_CLKGET,
+};
+
+struct mmrm_test_clk_client {
+	struct mmrm_clk_client_desc clk_client_desc;
+	unsigned long clk_rate[MMRM_TEST_VDD_LEVEL_MAX];
+	struct mmrm_client *client;
+};
+
+static int test_mmrm_client_callback(struct mmrm_client_notifier_data *notifier_data)
+{
+	// TODO: Test callback here
+	return 0;
+}
+
+static struct mmrm_client *test_mmrm_vm_fe_client_register(struct mmrm_client_desc *desc)
+{
+	struct mmrm_client *client;
+	u64 kt1, kt2;
+
+	if (!desc) {
+		d_mpr_e("%s: Invalid input\n", __func__);
+		return NULL;
+	}
+
+	d_mpr_h("%s: domain(%d) cid(%d) name(%s) type(%d) pri(%d)\n",
+		__func__,
+		desc->client_info.desc.client_domain,
+		desc->client_info.desc.client_id,
+		desc->client_info.desc.name,
+		desc->client_type,
+		desc->priority);
+
+	d_mpr_w("%s: Registering mmrm client %s\n", __func__, desc->client_info.desc.name);
+
+	kt1 = ktime_get_ns();
+	client = mmrm_client_register(desc);
+	kt2 = ktime_get_ns();
+	d_mpr_h("%s: time interval: %lu\n", __func__, kt2 - kt1);
+
+	if (client == NULL) {
+		d_mpr_e("%s: Failed to register mmrm client %s\n",
+			__func__,
+			desc->client_info.desc.name);
+		return NULL;
+	}
+	return client;
+}
+
+static int test_mmrm_vm_fe_client_deregister(struct mmrm_client *client)
+{
+	int rc;
+
+	if (!client) {
+		d_mpr_e("%s: Invalid input\n", __func__);
+		return -EINVAL;
+	}
+
+	d_mpr_h("%s: cuid(%d) Deregistering mmrm client\n", __func__, client->client_uid);
+	rc = mmrm_client_deregister(client);
+	if (rc != 0) {
+		d_mpr_e("%s: cuid(%d) Failed to deregister mmrm client with %d\n",
+			__func__,
+			client->client_uid,
+			rc);
+		return rc;
+	}
+	return rc;
+}
+
+static int test_mmrm_vm_fe_client_set_value(
+	struct mmrm_client *client, struct mmrm_client_data *client_data, unsigned long val)
+{
+	int rc;
+	u64 kt1, kt2;
+
+	if (!client || !client_data) {
+		d_mpr_e("%s: Invalid input\n", __func__);
+		return -EINVAL;
+	}
+
+	d_mpr_h("%s: Setting value(%d) for mmrm client\n",
+		__func__,
+		val);
+	kt1 = ktime_get_ns();
+	rc = mmrm_client_set_value(client, client_data, val);
+	kt2 = ktime_get_ns();
+
+	if (rc != 0) {
+		d_mpr_e("%s: Failed to set value(%d) for mmrm client with %d\n",
+			__func__,
+			val,
+			rc);
+		return rc;
+	}
+	return rc;
+}
+
+static int test_mmrm_vm_fe_client_get_value(struct mmrm_client *client, struct mmrm_client_res_value *val)
+{
+	int rc;
+
+	if (!client || !val) {
+		d_mpr_e("%s: Invalid input\n", __func__);
+		return -EINVAL;
+	}
+
+	rc = mmrm_client_get_value(client, val);
+	if (rc != 0) {
+		d_mpr_e("%s: Failed to get value for mmrm client with %d\n",
+			__func__,
+			rc);
+		return rc;
+	}
+	d_mpr_h("%s: min(%d) cur(%d) max(%d)\n",
+		__func__,
+		val->min,
+		val->cur,
+		val->max);
+	return rc;
+}
+
+void mmrm_vm_fe_client_tests(struct platform_device *pdev)
+{
+	struct mmrm_client *client; // mmrm client
+	struct mmrm_client_data client_data; // mmrm client data
+	int level;
+	unsigned long val;
+	struct clock_rate *p_clk_res;
+	struct mmrm_clk_client_desc  *clk_desc;
+	struct mmrm_client_desc desc;
+	struct mmrm_client_res_value res_val;
+
+	int i, pass_count, count;
+	int rc = 0;
+
+	count = get_clock_count();
+
+	d_mpr_w("%s: Running individual client tests : %d\n", __func__, count);
+
+	// Run nominal test for each individual clock source
+	for (i = 0, pass_count = 0; i < count; i++) {
+		// Create callback used to pass resource data to client
+		struct mmrm_client_notifier_data notifier_data = {
+			MMRM_CLIENT_RESOURCE_VALUE_CHANGE, // cb_type
+			{{0, 0}}, // cb_data (old_val, new_val)
+			NULL}; // pvt_data
+
+		// Create client descriptor
+		p_clk_res = get_nth_clock(i);
+
+		desc.client_type = MMRM_CLIENT_CLOCK;
+		desc.priority = MMRM_CLIENT_PRIOR_HIGH;
+		desc.pvt_data = notifier_data.pvt_data;
+		desc.notifier_callback_fn = test_mmrm_client_callback;
+
+		clk_desc = &desc.client_info.desc;
+		clk_desc->client_domain = p_clk_res->domain;
+		clk_desc->client_id = p_clk_res->id;
+		strlcpy((char *)clk_desc->name, p_clk_res->name, sizeof(clk_desc->name));
+
+		// Register client
+		client = test_mmrm_vm_fe_client_register(&desc);
+		if (client == NULL) {
+			rc = -EINVAL;
+			d_mpr_e("%s: client register failed\n", __func__);
+			goto err_register;
+		}
+		// Set values (Use reserve only)
+		client_data = (struct mmrm_client_data){
+			1,
+			MMRM_CLIENT_DATA_FLAG_RESERVE_ONLY
+		};
+
+		for (level = 0; level < MMRM_TEST_VDD_LEVEL_MAX; level++) {
+			val = p_clk_res->clk_rates[level];
+			rc = test_mmrm_vm_fe_client_set_value(client, &client_data, val);
+			if (rc != 0) {
+				d_mpr_e("%s: client set value failed\n", __func__);
+				goto err_setval;
+			}
+		}
+
+		// Get value
+		rc = test_mmrm_vm_fe_client_get_value(client, &res_val);
+		if (rc != 0) {
+			d_mpr_e("%s: client get value failed\n", __func__);
+			goto err_getval;
+		}
+
+		d_mpr_h("%s: min:%d max:%d cur:%d\n", __func__, res_val.min, res_val.max, res_val.cur);
+
+	err_setval:
+	err_getval:
+
+		// Reset clk rate
+		test_mmrm_vm_fe_client_set_value(client, &client_data, 0);
+
+	err_register:
+
+		if (rc == 0)
+			pass_count++;
+	}
+
+	d_mpr_w("%s: Finish individual client tests (pass / total): (%d / %d)\n",
+		__func__, pass_count, count);
+}
+
+void mmrm_vm_fe_client_register_tests(struct platform_device *pdev)
+{
+	struct mmrm_client *client; // mmrm client
+	struct clock_rate *p_clk_res;
+	struct mmrm_clk_client_desc  *clk_desc;
+	struct mmrm_client_desc desc;
+
+	d_mpr_h("%s:  client register test\n", __func__);
+
+	// Run nominal test for each individual clock source
+	{
+		// Create callback used to pass resource data to client
+		struct mmrm_client_notifier_data notifier_data = {
+			MMRM_CLIENT_RESOURCE_VALUE_CHANGE, // cb_type
+			{{0, 0}}, // cb_data (old_val, new_val)
+			NULL}; // pvt_data
+
+		// Create client descriptor
+		p_clk_res = get_nth_clock(0);
+
+		desc.client_type = MMRM_CLIENT_CLOCK;
+		desc.priority = MMRM_CLIENT_PRIOR_HIGH;
+		desc.pvt_data = notifier_data.pvt_data;
+		desc.notifier_callback_fn = test_mmrm_client_callback;
+
+		clk_desc = &desc.client_info.desc;
+		clk_desc->client_domain = p_clk_res->domain;
+		clk_desc->client_id = p_clk_res->id;
+		strlcpy((char *)clk_desc->name, p_clk_res->name, sizeof(clk_desc->name));
+
+		// Register client
+		client = test_mmrm_vm_fe_client_register(&desc);
+		if (client == NULL) {
+			d_mpr_e("%s: client register fails\n", __func__);
+		} else
+			d_mpr_w("%s: client register successful\n", __func__);
+	}
+}
+
+// for camera ife/ipe/bps at nom
+// all camera +cvp at nom
+// all camera +cvp + mdss_mdp at nom
+// all camera + cvp +mdss_mdp +video at nom
+// all camera at nom + mdp/cvp/video svsl1
+// mdp at svsl1 + video at nom : voltage corner scaling
+
+// mdp at svsl1 + video at svsl1 + cvp at svsl1 + camera at nom
+
+
+// for camera ife/ipe/bps at nom
+//
+static test_case_info_t test_case_1[] = {
+	{"cam_cc_ife_0_clk_src", MMRM_TEST_VDD_LEVEL_NOM},
+	{"cam_cc_ife_1_clk_src", MMRM_TEST_VDD_LEVEL_NOM},
+	{"cam_cc_ife_2_clk_src", MMRM_TEST_VDD_LEVEL_NOM},
+	{"cam_cc_ife_lite_clk_src", MMRM_TEST_VDD_LEVEL_NOM},
+	{"cam_cc_ife_lite_csid_clk_src", MMRM_TEST_VDD_LEVEL_NOM},
+	{"cam_cc_ipe_nps_clk_src", MMRM_TEST_VDD_LEVEL_NOM},
+	{"cam_cc_bps_clk_src", MMRM_TEST_VDD_LEVEL_NOM},
+	{"", MMRM_TEST_VDD_LEVEL_MAX}
+};
+
+// all camera +cvp at nom
+//
+static test_case_info_t test_case_4[] = {
+	{"cam_cc_ife_0_clk_src", MMRM_TEST_VDD_LEVEL_NOM},
+	{"cam_cc_ife_1_clk_src", MMRM_TEST_VDD_LEVEL_NOM},
+	{"cam_cc_ife_2_clk_src", MMRM_TEST_VDD_LEVEL_NOM},
+	{"cam_cc_ife_lite_clk_src", MMRM_TEST_VDD_LEVEL_NOM},
+	{"cam_cc_ife_lite_csid_clk_src", MMRM_TEST_VDD_LEVEL_NOM},
+	{"cam_cc_ipe_nps_clk_src", MMRM_TEST_VDD_LEVEL_NOM},
+	{"cam_cc_bps_clk_src", MMRM_TEST_VDD_LEVEL_NOM},
+
+	{"cam_cc_jpeg_clk_src", MMRM_TEST_VDD_LEVEL_NOM},
+	{"cam_cc_camnoc_axi_clk_src", MMRM_TEST_VDD_LEVEL_NOM},
+	{"cam_cc_icp_clk_src", MMRM_TEST_VDD_LEVEL_NOM},
+	{"cam_cc_cphy_rx_clk_src", MMRM_TEST_VDD_LEVEL_NOM},
+
+	{"cam_cc_csi0phytimer_clk_src", MMRM_TEST_VDD_LEVEL_NOM},
+	{"cam_cc_csi1phytimer_clk_src", MMRM_TEST_VDD_LEVEL_NOM},
+	{"cam_cc_csi2phytimer_clk_src", MMRM_TEST_VDD_LEVEL_NOM},
+	{"cam_cc_csi3phytimer_clk_src", MMRM_TEST_VDD_LEVEL_NOM},
+	{"cam_cc_csi4phytimer_clk_src", MMRM_TEST_VDD_LEVEL_NOM},
+	{"cam_cc_csi5phytimer_clk_src", MMRM_TEST_VDD_LEVEL_NOM},
+
+	{"cam_cc_cci_0_clk_src", MMRM_TEST_VDD_LEVEL_NOM},
+	{"cam_cc_cci_1_clk_src", MMRM_TEST_VDD_LEVEL_NOM},
+	{"cam_cc_slow_ahb_clk_src", MMRM_TEST_VDD_LEVEL_NOM},
+	{"cam_cc_fast_ahb_clk_src", MMRM_TEST_VDD_LEVEL_NOM},
+
+	{"", MMRM_TEST_VDD_LEVEL_MAX}
+};
+
+
+// all camera +cvp + mdss_mdp at nom
+//
+static test_case_info_t test_case_5[] = {
+	{"cam_cc_ife_0_clk_src", MMRM_TEST_VDD_LEVEL_NOM},
+	{"cam_cc_ife_1_clk_src", MMRM_TEST_VDD_LEVEL_NOM},
+	{"cam_cc_ife_2_clk_src", MMRM_TEST_VDD_LEVEL_NOM},
+	{"cam_cc_ife_lite_clk_src", MMRM_TEST_VDD_LEVEL_NOM},
+	{"cam_cc_ife_lite_csid_clk_src", MMRM_TEST_VDD_LEVEL_NOM},
+	{"cam_cc_ipe_nps_clk_src", MMRM_TEST_VDD_LEVEL_NOM},
+	{"cam_cc_bps_clk_src", MMRM_TEST_VDD_LEVEL_NOM},
+
+	{"cam_cc_jpeg_clk_src", MMRM_TEST_VDD_LEVEL_NOM},
+	{"cam_cc_camnoc_axi_clk_src", MMRM_TEST_VDD_LEVEL_NOM},
+	{"cam_cc_icp_clk_src", MMRM_TEST_VDD_LEVEL_NOM},
+	{"cam_cc_cphy_rx_clk_src", MMRM_TEST_VDD_LEVEL_NOM},
+
+	{"cam_cc_csi0phytimer_clk_src", MMRM_TEST_VDD_LEVEL_NOM},
+	{"cam_cc_csi1phytimer_clk_src", MMRM_TEST_VDD_LEVEL_NOM},
+	{"cam_cc_csi2phytimer_clk_src", MMRM_TEST_VDD_LEVEL_NOM},
+	{"cam_cc_csi3phytimer_clk_src", MMRM_TEST_VDD_LEVEL_NOM},
+	{"cam_cc_csi4phytimer_clk_src", MMRM_TEST_VDD_LEVEL_NOM},
+	{"cam_cc_csi5phytimer_clk_src", MMRM_TEST_VDD_LEVEL_NOM},
+
+	{"cam_cc_cci_0_clk_src", MMRM_TEST_VDD_LEVEL_NOM},
+	{"cam_cc_cci_1_clk_src", MMRM_TEST_VDD_LEVEL_NOM},
+	{"cam_cc_slow_ahb_clk_src", MMRM_TEST_VDD_LEVEL_NOM},
+	{"cam_cc_fast_ahb_clk_src", MMRM_TEST_VDD_LEVEL_NOM},
+
+	{"", MMRM_TEST_VDD_LEVEL_MAX}
+};
+
+// all camera + cvp +mdss_mdp +video at nom
+//
+static test_case_info_t test_case_6[] = {
+	{"cam_cc_ife_0_clk_src", MMRM_TEST_VDD_LEVEL_NOM},
+	{"cam_cc_ife_1_clk_src", MMRM_TEST_VDD_LEVEL_NOM},
+	{"cam_cc_ife_2_clk_src", MMRM_TEST_VDD_LEVEL_NOM},
+	{"cam_cc_ife_lite_clk_src", MMRM_TEST_VDD_LEVEL_NOM},
+	{"cam_cc_sfe_0_clk_src", MMRM_TEST_VDD_LEVEL_NOM},
+	{"cam_cc_sfe_1_clk_src", MMRM_TEST_VDD_LEVEL_NOM},
+	{"cam_cc_ife_lite_csid_clk_src", MMRM_TEST_VDD_LEVEL_NOM},
+	{"cam_cc_ipe_nps_clk_src", MMRM_TEST_VDD_LEVEL_NOM},
+	{"cam_cc_bps_clk_src", MMRM_TEST_VDD_LEVEL_NOM},
+
+	{"cam_cc_jpeg_clk_src", MMRM_TEST_VDD_LEVEL_NOM},
+	{"cam_cc_camnoc_axi_clk_src", MMRM_TEST_VDD_LEVEL_NOM},
+	{"cam_cc_icp_clk_src", MMRM_TEST_VDD_LEVEL_NOM},
+	{"cam_cc_cphy_rx_clk_src", MMRM_TEST_VDD_LEVEL_NOM},
+
+	{"cam_cc_csi0phytimer_clk_src", MMRM_TEST_VDD_LEVEL_NOM},
+	{"cam_cc_csi1phytimer_clk_src", MMRM_TEST_VDD_LEVEL_NOM},
+	{"cam_cc_csi2phytimer_clk_src", MMRM_TEST_VDD_LEVEL_NOM},
+	{"cam_cc_csi3phytimer_clk_src", MMRM_TEST_VDD_LEVEL_NOM},
+	{"cam_cc_csi4phytimer_clk_src", MMRM_TEST_VDD_LEVEL_NOM},
+	{"cam_cc_csi5phytimer_clk_src", MMRM_TEST_VDD_LEVEL_NOM},
+
+	{"cam_cc_cci_0_clk_src", MMRM_TEST_VDD_LEVEL_NOM},
+	{"cam_cc_cci_1_clk_src", MMRM_TEST_VDD_LEVEL_NOM},
+	{"cam_cc_slow_ahb_clk_src", MMRM_TEST_VDD_LEVEL_NOM},
+	{"cam_cc_fast_ahb_clk_src", MMRM_TEST_VDD_LEVEL_NOM},
+
+	{"", MMRM_TEST_VDD_LEVEL_MAX}
+};
+
+// all camera at nom
+//
+static test_case_info_t test_case_7[] = {
+	{"cam_cc_ife_0_clk_src", MMRM_TEST_VDD_LEVEL_NOM},
+	{"cam_cc_ife_1_clk_src", MMRM_TEST_VDD_LEVEL_NOM},
+	{"cam_cc_ife_2_clk_src", MMRM_TEST_VDD_LEVEL_NOM},
+	{"cam_cc_ife_lite_clk_src", MMRM_TEST_VDD_LEVEL_NOM},
+	{"cam_cc_sfe_0_clk_src", MMRM_TEST_VDD_LEVEL_NOM},
+	{"cam_cc_sfe_1_clk_src", MMRM_TEST_VDD_LEVEL_NOM},
+	{"cam_cc_ife_lite_csid_clk_src", MMRM_TEST_VDD_LEVEL_NOM},
+	{"cam_cc_ipe_nps_clk_src", MMRM_TEST_VDD_LEVEL_NOM},
+	{"cam_cc_bps_clk_src", MMRM_TEST_VDD_LEVEL_NOM},
+
+	{"cam_cc_jpeg_clk_src", MMRM_TEST_VDD_LEVEL_NOM},
+	{"cam_cc_camnoc_axi_clk_src", MMRM_TEST_VDD_LEVEL_NOM},
+	{"cam_cc_icp_clk_src", MMRM_TEST_VDD_LEVEL_NOM},
+	{"cam_cc_cphy_rx_clk_src", MMRM_TEST_VDD_LEVEL_NOM},
+
+	{"cam_cc_csi0phytimer_clk_src", MMRM_TEST_VDD_LEVEL_NOM},
+	{"cam_cc_csi1phytimer_clk_src", MMRM_TEST_VDD_LEVEL_NOM},
+	{"cam_cc_csi2phytimer_clk_src", MMRM_TEST_VDD_LEVEL_NOM},
+	{"cam_cc_csi3phytimer_clk_src", MMRM_TEST_VDD_LEVEL_NOM},
+	{"cam_cc_csi4phytimer_clk_src", MMRM_TEST_VDD_LEVEL_NOM},
+	{"cam_cc_csi5phytimer_clk_src", MMRM_TEST_VDD_LEVEL_NOM},
+
+	{"cam_cc_cci_0_clk_src", MMRM_TEST_VDD_LEVEL_NOM},
+	{"cam_cc_cci_1_clk_src", MMRM_TEST_VDD_LEVEL_NOM},
+	{"cam_cc_slow_ahb_clk_src", MMRM_TEST_VDD_LEVEL_NOM},
+	{"cam_cc_fast_ahb_clk_src", MMRM_TEST_VDD_LEVEL_NOM},
+
+	{"", MMRM_TEST_VDD_LEVEL_MAX}
+};
+
+//	ife0, ife1 (lowsvs) + ipe (svs) + bps (nom)
+//	ife0, ife1 (lowsvs) + ipe (nom) + bps (nom) + sbi (lowsvs)
+//	ife0, ife1 (lowsvs) + ipe (nom) + bps (nom) + sbi (lowsvs)
+//	ife0, ife1 (lowsvs) + ipe (nom) + bps (nom) + sbi (lowsvs)
+//	ife0, ife1, ife2 (svs) + ipe (nom) + bps (nom) + sbi (svs)
+//	ife0, ife1 (svs) , ife2 (lowsvs) +
+//	sfe0 (svs) + sfe1(svs) + ipe (nom) + bps (nom) + sbi (svs)
+//	ife0, ife1 (svs) + ipe (nom) + bps (nom) + sbi (svs)
+//	ife0, ife1 (lowsvs) + ipe (nom) + bps (nom) + sbi (lowsvs)
+//	ife0, ife1 (lowsvs) + ipe (svs) + bps (nom)
+
+static test_case_info_t test_case_11[] = {
+	{"cam_cc_ife_0_clk_src", MMRM_TEST_VDD_LEVEL_LOW_SVS},
+	{"cam_cc_ife_1_clk_src", MMRM_TEST_VDD_LEVEL_LOW_SVS},
+	{"cam_cc_ipe_nps_clk_src", MMRM_TEST_VDD_LEVEL_SVS},
+	{"cam_cc_bps_clk_src", MMRM_TEST_VDD_LEVEL_NOM},
+
+	{"", MMRM_TEST_VDD_LEVEL_MAX}
+};
+
+//		ife0, ife1 (lowsvs) + ipe (nom) + bps (nom) + sbi (lowsvs)
+static test_case_info_t test_case_12[] = {
+	{"cam_cc_ife_0_clk_src", MMRM_TEST_VDD_LEVEL_LOW_SVS},
+	{"cam_cc_ife_1_clk_src", MMRM_TEST_VDD_LEVEL_LOW_SVS},
+	{"cam_cc_ipe_nps_clk_src", MMRM_TEST_VDD_LEVEL_NOM},
+	{"cam_cc_bps_clk_src", MMRM_TEST_VDD_LEVEL_NOM},
+
+	{"", MMRM_TEST_VDD_LEVEL_MAX}
+};
+
+//		ife0, ife1 (lowsvs) + ipe (nom) + bps (nom) + sbi (lowsvs)
+static test_case_info_t test_case_13[] = {
+	{"cam_cc_ife_0_clk_src", MMRM_TEST_VDD_LEVEL_LOW_SVS},
+	{"cam_cc_ife_1_clk_src", MMRM_TEST_VDD_LEVEL_LOW_SVS},
+	{"cam_cc_ipe_nps_clk_src", MMRM_TEST_VDD_LEVEL_NOM},
+	{"cam_cc_bps_clk_src", MMRM_TEST_VDD_LEVEL_NOM},
+
+	{"", MMRM_TEST_VDD_LEVEL_MAX}
+};
+
+
+//		ife0, ife1 (lowsvs) + ipe (nom) + bps (nom) + sbi (lowsvs)
+static test_case_info_t test_case_14[] = {
+	{"cam_cc_ife_0_clk_src", MMRM_TEST_VDD_LEVEL_LOW_SVS},
+	{"cam_cc_ife_1_clk_src", MMRM_TEST_VDD_LEVEL_LOW_SVS},
+	{"cam_cc_ipe_nps_clk_src", MMRM_TEST_VDD_LEVEL_NOM},
+	{"cam_cc_bps_clk_src", MMRM_TEST_VDD_LEVEL_NOM},
+
+	{"", MMRM_TEST_VDD_LEVEL_MAX}
+};
+
+//		ife0, ife1, ife2 (svs) + ipe (nom) + bps (nom) + sbi (svs)
+static test_case_info_t test_case_15[] = {
+	{"cam_cc_ife_0_clk_src", MMRM_TEST_VDD_LEVEL_SVS},
+	{"cam_cc_ife_1_clk_src", MMRM_TEST_VDD_LEVEL_SVS},
+	{"cam_cc_ife_2_clk_src", MMRM_TEST_VDD_LEVEL_SVS},
+	{"cam_cc_ipe_nps_clk_src", MMRM_TEST_VDD_LEVEL_NOM},
+	{"cam_cc_bps_clk_src", MMRM_TEST_VDD_LEVEL_NOM},
+
+	{"", MMRM_TEST_VDD_LEVEL_MAX}
+};
+
+//		ife0, ife1 (svs) , ife2 (lowsvs) + sfe0 (svs) + sfe1(svs) + ipe (nom) +
+//		bps (nom) + sbi (svs)
+static test_case_info_t test_case_16[] = {
+	{"cam_cc_ife_0_clk_src", MMRM_TEST_VDD_LEVEL_SVS},
+	{"cam_cc_ife_1_clk_src", MMRM_TEST_VDD_LEVEL_SVS},
+	{"cam_cc_ife_2_clk_src", MMRM_TEST_VDD_LEVEL_LOW_SVS},
+	{"cam_cc_sfe_0_clk_src", MMRM_TEST_VDD_LEVEL_SVS},
+	{"cam_cc_sfe_1_clk_src", MMRM_TEST_VDD_LEVEL_SVS},
+	{"cam_cc_ipe_nps_clk_src", MMRM_TEST_VDD_LEVEL_NOM},
+	{"cam_cc_bps_clk_src", MMRM_TEST_VDD_LEVEL_NOM},
+
+	{"", MMRM_TEST_VDD_LEVEL_MAX}
+};
+
+//		ife0, ife1 (svs) + ipe (nom) + bps (nom) + sbi (svs)
+static test_case_info_t test_case_17[] = {
+	{"cam_cc_ife_0_clk_src", MMRM_TEST_VDD_LEVEL_SVS},
+	{"cam_cc_ife_1_clk_src", MMRM_TEST_VDD_LEVEL_SVS},
+	{"cam_cc_ipe_nps_clk_src", MMRM_TEST_VDD_LEVEL_NOM},
+	{"cam_cc_bps_clk_src", MMRM_TEST_VDD_LEVEL_NOM},
+
+	{"", MMRM_TEST_VDD_LEVEL_MAX}
+};
+
+//		ife0, ife1 (lowsvs) + ipe (nom) + bps (nom) + sbi (lowsvs)
+static test_case_info_t test_case_18[] = {
+	{"cam_cc_ife_0_clk_src", MMRM_TEST_VDD_LEVEL_LOW_SVS},
+	{"cam_cc_ife_1_clk_src", MMRM_TEST_VDD_LEVEL_LOW_SVS},
+	{"cam_cc_ipe_nps_clk_src", MMRM_TEST_VDD_LEVEL_NOM},
+	{"cam_cc_bps_clk_src", MMRM_TEST_VDD_LEVEL_NOM},
+
+	{"", MMRM_TEST_VDD_LEVEL_MAX}
+};
+
+// throttle video
+// bps(nom) + ipe(nom) +sfe0(nom) + sfe1(nom) +camnoc(nom) + ife0(nom) + csid0(nom)+ ife1(nom) + csid1(nom) + ife2(svs)
+//
+//
+static test_case_info_t test_case_20[] = {
+	{"cam_cc_bps_clk_src", MMRM_TEST_VDD_LEVEL_NOM},
+	{"cam_cc_ipe_nps_clk_src", MMRM_TEST_VDD_LEVEL_NOM},
+
+	{"cam_cc_sfe_0_clk_src", MMRM_TEST_VDD_LEVEL_NOM},
+	{"cam_cc_sfe_1_clk_src", MMRM_TEST_VDD_LEVEL_NOM},
+	{"cam_cc_camnoc_axi_clk_src", MMRM_TEST_VDD_LEVEL_NOM},
+
+	{"cam_cc_ife_0_clk_src", MMRM_TEST_VDD_LEVEL_NOM},
+	{"cam_cc_csi0phytimer_clk_src", MMRM_TEST_VDD_LEVEL_NOM},
+	{"cam_cc_ife_1_clk_src", MMRM_TEST_VDD_LEVEL_NOM},
+	{"cam_cc_csi1phytimer_clk_src", MMRM_TEST_VDD_LEVEL_NOM},
+	{"cam_cc_ife_2_clk_src", MMRM_TEST_VDD_LEVEL_SVS},
+
+	{"", MMRM_TEST_VDD_LEVEL_MAX}
+};
+
+// throttle ipe
+// bps(nom) + ipe(nom) +sfe0(nom) + sfe1(nom) +camnoc(nom) + ife0(nom) + csid0(nom)+ ife1(nom) + csid1(nom) + ife2(svs)
+//
+//
+
+static test_case_info_t test_case_21[] = {
+	{"cam_cc_bps_clk_src", MMRM_TEST_VDD_LEVEL_NOM},
+	{"cam_cc_ipe_nps_clk_src", MMRM_TEST_VDD_LEVEL_NOM},
+
+	{"cam_cc_sfe_0_clk_src", MMRM_TEST_VDD_LEVEL_NOM},
+	{"cam_cc_sfe_1_clk_src", MMRM_TEST_VDD_LEVEL_NOM},
+	{"cam_cc_camnoc_axi_clk_src", MMRM_TEST_VDD_LEVEL_NOM},
+
+	{"cam_cc_ife_0_clk_src", MMRM_TEST_VDD_LEVEL_NOM},
+	{"cam_cc_csi0phytimer_clk_src", MMRM_TEST_VDD_LEVEL_NOM},
+	{"cam_cc_ife_1_clk_src", MMRM_TEST_VDD_LEVEL_NOM},
+	{"cam_cc_csi1phytimer_clk_src", MMRM_TEST_VDD_LEVEL_NOM},
+	{"cam_cc_ife_2_clk_src", MMRM_TEST_VDD_LEVEL_SVS_L1},
+	{"cam_cc_csi3phytimer_clk_src", MMRM_TEST_VDD_LEVEL_SVS_L1},
+
+	{"", MMRM_TEST_VDD_LEVEL_MAX}
+};
+
+// Reinstate throttled client. Moved below clients to LOW SVS to make sufficient available power
+// for throttled client to reinstate
+static test_case_info_t test_case_22[] = {
+	{"video_cc_mvs1_clk_src", MMRM_TEST_VDD_LEVEL_LOW_SVS},
+	{"disp_cc_mdss_mdp_clk_src", MMRM_TEST_VDD_LEVEL_LOW_SVS},
+	{"cam_cc_bps_clk_src", MMRM_TEST_VDD_LEVEL_LOW_SVS},
+	{"cam_cc_ipe_nps_clk_src", MMRM_TEST_VDD_LEVEL_LOW_SVS},
+	{"cam_cc_sfe_0_clk_src", MMRM_TEST_VDD_LEVEL_LOW_SVS},
+	{"cam_cc_sfe_1_clk_src", MMRM_TEST_VDD_LEVEL_LOW_SVS},
+
+	{"cam_cc_camnoc_axi_clk_src", MMRM_TEST_VDD_LEVEL_LOW_SVS},
+	{"cam_cc_ife_0_clk_src", MMRM_TEST_VDD_LEVEL_LOW_SVS},
+	{"cam_cc_csi0phytimer_clk_src", MMRM_TEST_VDD_LEVEL_LOW_SVS},
+	{"cam_cc_ife_1_clk_src", MMRM_TEST_VDD_LEVEL_LOW_SVS},
+	{"cam_cc_csi1phytimer_clk_src", MMRM_TEST_VDD_LEVEL_LOW_SVS},
+	{"cam_cc_csi3phytimer_clk_src", MMRM_TEST_VDD_LEVEL_LOW_SVS},
+
+	{"", MMRM_TEST_VDD_LEVEL_MAX}
+};
+
+// all camera +cam_cc_csid at nom
+//
+
+static test_case_info_t test_case_9[] = {
+	{"cam_cc_ife_0_clk_src", MMRM_TEST_VDD_LEVEL_NOM},
+	{"cam_cc_ife_1_clk_src", MMRM_TEST_VDD_LEVEL_NOM},
+	{"cam_cc_ife_2_clk_src", MMRM_TEST_VDD_LEVEL_NOM},
+	{"cam_cc_ife_lite_clk_src", MMRM_TEST_VDD_LEVEL_NOM},
+	{"cam_cc_ife_lite_csid_clk_src", MMRM_TEST_VDD_LEVEL_NOM},
+	{"cam_cc_ipe_nps_clk_src", MMRM_TEST_VDD_LEVEL_NOM},
+	{"cam_cc_bps_clk_src", MMRM_TEST_VDD_LEVEL_NOM},
+
+	{"cam_cc_csid_clk_src", MMRM_TEST_VDD_LEVEL_NOM, 0, 3},
+
+	{"cam_cc_jpeg_clk_src", MMRM_TEST_VDD_LEVEL_NOM},
+	{"cam_cc_camnoc_axi_clk_src", MMRM_TEST_VDD_LEVEL_NOM},
+	{"cam_cc_icp_clk_src", MMRM_TEST_VDD_LEVEL_NOM},
+	{"cam_cc_cphy_rx_clk_src", MMRM_TEST_VDD_LEVEL_NOM},
+
+	{"cam_cc_csi0phytimer_clk_src", MMRM_TEST_VDD_LEVEL_NOM},
+	{"cam_cc_csi1phytimer_clk_src", MMRM_TEST_VDD_LEVEL_NOM},
+	{"cam_cc_csi2phytimer_clk_src", MMRM_TEST_VDD_LEVEL_NOM},
+	{"cam_cc_csi3phytimer_clk_src", MMRM_TEST_VDD_LEVEL_NOM},
+	{"cam_cc_csi4phytimer_clk_src", MMRM_TEST_VDD_LEVEL_NOM},
+	{"cam_cc_csi5phytimer_clk_src", MMRM_TEST_VDD_LEVEL_NOM},
+
+	{"cam_cc_cci_0_clk_src", MMRM_TEST_VDD_LEVEL_NOM},
+	{"cam_cc_cci_1_clk_src", MMRM_TEST_VDD_LEVEL_NOM},
+	{"cam_cc_slow_ahb_clk_src", MMRM_TEST_VDD_LEVEL_NOM},
+	{"cam_cc_fast_ahb_clk_src", MMRM_TEST_VDD_LEVEL_NOM},
+
+	{"", MMRM_TEST_VDD_LEVEL_MAX}
+};
+
+// all camera at nom + cam_cc_csid
+//
+static test_case_info_t test_case_10[] = {
+	{"cam_cc_ife_0_clk_src", MMRM_TEST_VDD_LEVEL_NOM},
+	{"cam_cc_ife_1_clk_src", MMRM_TEST_VDD_LEVEL_NOM},
+	{"cam_cc_ife_2_clk_src", MMRM_TEST_VDD_LEVEL_NOM},
+	{"cam_cc_ife_lite_clk_src", MMRM_TEST_VDD_LEVEL_NOM},
+	{"cam_cc_sfe_0_clk_src", MMRM_TEST_VDD_LEVEL_NOM},
+	{"cam_cc_sfe_1_clk_src", MMRM_TEST_VDD_LEVEL_NOM},
+	{"cam_cc_ife_lite_csid_clk_src", MMRM_TEST_VDD_LEVEL_NOM},
+	{"cam_cc_ipe_nps_clk_src", MMRM_TEST_VDD_LEVEL_NOM},
+	{"cam_cc_bps_clk_src", MMRM_TEST_VDD_LEVEL_NOM},
+
+	{"cam_cc_csid_clk_src", MMRM_TEST_VDD_LEVEL_NOM, 0, 3},
+
+	{"cam_cc_jpeg_clk_src", MMRM_TEST_VDD_LEVEL_NOM},
+	{"cam_cc_camnoc_axi_clk_src", MMRM_TEST_VDD_LEVEL_NOM},
+	{"cam_cc_icp_clk_src", MMRM_TEST_VDD_LEVEL_NOM},
+	{"cam_cc_cphy_rx_clk_src", MMRM_TEST_VDD_LEVEL_NOM},
+
+	{"cam_cc_csi0phytimer_clk_src", MMRM_TEST_VDD_LEVEL_NOM},
+	{"cam_cc_csi1phytimer_clk_src", MMRM_TEST_VDD_LEVEL_NOM},
+	{"cam_cc_csi2phytimer_clk_src", MMRM_TEST_VDD_LEVEL_NOM},
+	{"cam_cc_csi3phytimer_clk_src", MMRM_TEST_VDD_LEVEL_NOM},
+	{"cam_cc_csi4phytimer_clk_src", MMRM_TEST_VDD_LEVEL_NOM},
+	{"cam_cc_csi5phytimer_clk_src", MMRM_TEST_VDD_LEVEL_NOM},
+
+	{"cam_cc_cci_0_clk_src", MMRM_TEST_VDD_LEVEL_NOM},
+	{"cam_cc_cci_1_clk_src", MMRM_TEST_VDD_LEVEL_NOM},
+	{"cam_cc_slow_ahb_clk_src", MMRM_TEST_VDD_LEVEL_NOM},
+	{"cam_cc_fast_ahb_clk_src", MMRM_TEST_VDD_LEVEL_NOM},
+
+	{"", MMRM_TEST_VDD_LEVEL_MAX}
+};
+
+test_case_info_t  *kalama_testcases[] = {
+	test_case_1,
+	test_case_4,
+	test_case_5,
+	test_case_6,
+	test_case_7,
+	test_case_9,
+	test_case_10,
+	test_case_11,
+	test_case_12,
+	test_case_13,
+	test_case_14,
+	test_case_15,
+	test_case_16,
+	test_case_17,
+	test_case_18,
+	test_case_20,
+	test_case_21,
+	test_case_22,
+};
+
+int kalama_testcases_count = sizeof(kalama_testcases)/sizeof(kalama_testcases[0]);
+
+static test_case_info_t cornercases_1[] = {
+	{"cam_cc_csid_clk_src", MMRM_TEST_VDD_LEVEL_NOM, 1, 3},
+	{"cam_cc_csid_clk_src", MMRM_TEST_VDD_LEVEL_NOM, 1, 2},
+	{"cam_cc_csid_clk_src", MMRM_TEST_VDD_LEVEL_NOM, 1, 1},
+	{"cam_cc_csid_clk_src", MMRM_TEST_VDD_LEVEL_NOM, 1, 2},
+	{"cam_cc_csid_clk_src", MMRM_TEST_VDD_LEVEL_NOM, 1, 3},
+
+	{"", MMRM_TEST_VDD_LEVEL_MAX}
+};
+
+static test_case_info_t cornercases_2[] = {
+	{"cam_cc_csid_clk_src", MMRM_TEST_VDD_LEVEL_LOW_SVS, 1, 3},
+	{"cam_cc_csid_clk_src", MMRM_TEST_VDD_LEVEL_LOW_SVS, 1, 2},
+	{"cam_cc_csid_clk_src", MMRM_TEST_VDD_LEVEL_SVS_L1, 1, 1},
+	{"cam_cc_csid_clk_src", MMRM_TEST_VDD_LEVEL_NOM, 1, 2},
+	{"cam_cc_csid_clk_src", MMRM_TEST_VDD_LEVEL_NOM, 1, 3},
+	{"cam_cc_csid_clk_src", MMRM_TEST_VDD_LEVEL_SVS_L1, 1, 1},
+	{"", MMRM_TEST_VDD_LEVEL_MAX}
+};
+
+test_case_info_t *kalama_cornercase_testcases[] = {
+	cornercases_1,
+	cornercases_2,
+};
+
+int kalama_cornercase_testcases_count = sizeof(kalama_cornercase_testcases)/sizeof(kalama_cornercase_testcases[0]);
+
+int test_mmrm_testcase_client_register(struct platform_device *pdev,
+	test_case_info_t *pcase)
+{
+	int rc = TEST_MMRM_SUCCESS;
+	// Create client descriptor
+	struct mmrm_client_desc desc = {
+		MMRM_CLIENT_CLOCK,          // client type
+		{},                         // clock client descriptor
+		MMRM_CLIENT_PRIOR_HIGH,     // client priority
+		NULL,                       // pvt_data
+		test_mmrm_client_callback   // callback fn
+	};
+
+	desc.client_info.desc.client_domain = pcase->client_domain;
+	desc.client_info.desc.client_id = pcase->client_id;
+	strlcpy((char *)(desc.client_info.desc.name), pcase->name,
+		MMRM_CLK_CLIENT_NAME_SIZE);
+
+	// Register client
+	pcase->client = test_mmrm_vm_fe_client_register(&desc);
+
+	return rc;
+}
+
+int test_mmrm_run_one_case(struct platform_device *pdev,
+	test_case_info_t *pcase)
+{
+	struct mmrm_client_data    client_data;
+	unsigned long val;
+	test_case_info_t *p = pcase;
+	int rc = TEST_MMRM_SUCCESS;
+
+	client_data = (struct mmrm_client_data){0, MMRM_CLIENT_DATA_FLAG_RESERVE_ONLY};
+
+	while (p->vdd_level != MMRM_TEST_VDD_LEVEL_MAX) {
+		val = p->clk_rate[p->vdd_level];
+		rc = test_mmrm_testcase_client_register(pdev, p);
+		if ((rc != TEST_MMRM_SUCCESS) || (IS_ERR_OR_NULL(p->client))) {
+			d_mpr_e("%s: client(%s) fail register\n", __func__,
+				p->name);
+			rc = -TEST_MMRM_FAIL_REGISTER;
+			break;
+		}
+
+		if (p->num_hw_blocks == 0) {
+			client_data.num_hw_blocks = 1;
+		} else {
+			client_data.num_hw_blocks = p->num_hw_blocks;
+		}
+
+		d_mpr_h("%s: domain:%d  csid:%d num_hw_block:%d\n",
+			__func__,
+			p->client_domain,
+			p->client_id,
+			client_data.num_hw_blocks);
+
+		if (test_mmrm_vm_fe_client_set_value(p->client, &client_data, val) != 0) {
+			rc = -TEST_MMRM_FAIL_SETVALUE;
+			break;
+		}
+
+		p++;
+	}
+
+	p = pcase;
+	while (p->vdd_level != MMRM_TEST_VDD_LEVEL_MAX) {
+		if (!IS_ERR_OR_NULL(p->client)) {
+			test_mmrm_vm_fe_client_set_value(p->client, &client_data, 0);
+			test_mmrm_vm_fe_client_deregister(p->client);
+		}
+		p++;
+	}
+
+	return rc;
+}
+
+int test_mmrm_populate_testcase(struct platform_device *pdev,
+	test_case_info_t **pcase, int count)
+{
+	int i;
+	test_case_info_t **p = pcase, *ptr;
+	struct clock_rate *p_clk_rate;
+
+	if (pcase[0]->client_id != 0)
+		return 0;
+
+	for (i = 0; i < count; i++, p++) {
+		ptr = *p;
+		while (ptr->vdd_level != MMRM_TEST_VDD_LEVEL_MAX) {
+			p_clk_rate = find_clk_by_name(ptr->name);
+			if (p_clk_rate != NULL) {
+				ptr->client_domain = p_clk_rate->domain;
+				ptr->client_id = p_clk_rate->id;
+				memcpy(ptr->clk_rate, p_clk_rate->clk_rates, sizeof(ptr->clk_rate));
+
+				if (ptr->num_hw_blocks == 0)
+					ptr->num_hw_blocks = 1;
+			}
+			ptr++;
+		}
+	}
+	return i;
+}
+
+void test_mmrm_concurrent_client_cases(struct platform_device *pdev,
+	test_case_info_t **testcases, int count)
+{
+	test_case_info_t **p = testcases;
+	int i;
+	int size, rc, pass = 0;
+	int *result_ptr;
+
+	d_mpr_h("%s: Started\n", __func__);
+
+	size = sizeof(int) * count;
+
+	test_mmrm_populate_testcase(pdev, testcases, count);
+
+	result_ptr = kzalloc(size, GFP_KERNEL);
+	if (IS_ERR_OR_NULL(result_ptr)) {
+		d_mpr_e("%s: failed to allocate memory for concurrent client test\n",
+			__func__);
+		goto err_fail_alloc_result_ptr;
+	}
+
+	p = testcases;
+	for (i = 0; i < count; i++, p++) {
+		d_mpr_e("%s: testcase: %d -----\n", __func__, i);
+		rc = test_mmrm_run_one_case(pdev, *p);
+		result_ptr[i] = rc;
+		if (rc == TEST_MMRM_SUCCESS)
+			pass++;
+	}
+
+	d_mpr_w("%s: Finish concurrent client tests (pass / total): (%d / %d)\n",
+			__func__, pass, count);
+
+	for (i = 0; i < count; i++) {
+		if (result_ptr[i] != TEST_MMRM_SUCCESS)
+			d_mpr_w("%s: Failed client test# %d reason %d\n",
+				__func__, i, result_ptr[i]);
+	}
+	kfree(result_ptr);
+
+err_fail_alloc_result_ptr:
+	;
+
+}
+
+void test_mmrm_switch_volt_corner_client_testcases(struct platform_device *pdev,
+	test_case_info_t **testcases, int count)
+{
+	test_case_info_t **p = testcases;
+	int i;
+	int size, rc, pass = 0;
+	int *result_ptr;
+
+	d_mpr_h("%s: Started\n", __func__);
+
+	size = sizeof(int) * count;
+
+	test_mmrm_populate_testcase(pdev, testcases, count);
+
+	result_ptr = kzalloc(size, GFP_KERNEL);
+	if (IS_ERR_OR_NULL(result_ptr)) {
+		d_mpr_h("%s: failed to allocate memory for concurrent client test\n",
+			__func__);
+		goto err_fail_alloc_result_ptr;
+	}
+
+	p = testcases;
+	for (i = 0; i < count; i++, p++) {
+		d_mpr_h("%s: switch volt corner testcase: %d -----\n", __func__, i);
+		rc = test_mmrm_run_one_case(pdev, *p);
+		result_ptr[i] = rc;
+		if (rc == TEST_MMRM_SUCCESS)
+			pass++;
+	}
+
+	d_mpr_w("%s: Finish switch volt corner client tests (pass / total): (%d / %d)\n",
+			__func__, pass, count);
+
+	for (i = 0; i < count; i++) {
+		if (result_ptr[i] != TEST_MMRM_SUCCESS)
+			d_mpr_w("%s: Failed client test# %d reason %d\n",
+				__func__, i, result_ptr[i]);
+	}
+	kfree(result_ptr);
+
+err_fail_alloc_result_ptr:
+	;
+}
+

+ 66 - 0
qcom/opensource/mmrm-driver/vm/fe/vm_test/mmrm_vm_fe_test_internal.h

@@ -0,0 +1,66 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#ifndef TEST_MMRM_TEST_INTERNAL_H_
+#define TEST_MMRM_TEST_INTERNAL_H_
+
+#include <linux/platform_device.h>
+#include <linux/soc/qcom/msm_mmrm.h>
+
+#define MMRM_SYSFS_ENTRY_MAX_LEN     PAGE_SIZE
+
+struct mmrm_test_desc {
+	struct mmrm_test_clk_client  *clk_client;
+	u32 clk_rate_id;
+};
+
+#define MMRM_SYSFS_ENTRY_MAX_LEN     PAGE_SIZE
+
+enum mmrm_vdd_level {
+	MMRM_TEST_VDD_LEVEL_LOW_SVS = 0,
+	MMRM_TEST_VDD_LEVEL_SVS,
+	MMRM_TEST_VDD_LEVEL_SVS_L1,
+	MMRM_TEST_VDD_LEVEL_NOM,
+	MMRM_TEST_VDD_LEVEL_TURBO,
+	MMRM_TEST_VDD_LEVEL_MAX
+};
+
+struct clock_rate {
+	const char *name;
+	u32   domain;
+	u32   id;
+	u32   clk_rates[MMRM_TEST_VDD_LEVEL_MAX];
+};
+
+typedef struct test_case_info_s {
+	const char name[MMRM_CLK_CLIENT_NAME_SIZE];
+	int  vdd_level;
+	u32 flags;
+	u32 num_hw_blocks;
+	u32 client_domain;
+	u32 client_id;
+	u32 clk_rate[MMRM_TEST_VDD_LEVEL_MAX];
+	struct mmrm_client *client;
+} test_case_info_t;
+
+extern test_case_info_t  *kalama_testcases[];
+extern int kalama_testcases_count;
+
+extern test_case_info_t *kalama_cornercase_testcases[];
+extern int kalama_cornercase_testcases_count;
+
+void mmrm_vm_fe_client_tests(struct platform_device *pdev);
+void test_mmrm_single_client_cases(struct platform_device *pdev,
+	int index, int count);
+void test_mmrm_concurrent_client_cases(struct platform_device *pdev,
+	test_case_info_t **testcases, int count);
+struct clock_rate *find_clk_by_name(const char *name);
+struct clock_rate *get_nth_clock(int nth);
+void test_mmrm_switch_volt_corner_client_testcases(struct platform_device *pdev,
+	test_case_info_t **testcases, int count);
+int get_clock_count(void);
+void mmrm_vm_fe_client_register_tests(struct platform_device *pdev);
+
+#endif  // TEST_MMRM_TEST_INTERNAL_H_

+ 397 - 0
qcom/opensource/mmrm-driver/vm/fe/vm_test/mmrm_vm_fe_test_main.c

@@ -0,0 +1,397 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#define pr_fmt(fmt) "mmrm_test: " fmt
+
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/soc/qcom/msm_mmrm.h>
+
+#include <soc/qcom/socinfo.h>
+
+#include "mmrm_vm_fe_test_internal.h"
+#include "mmrm_vm_debug.h"
+
+#define MODULE_NAME "mmrm_vm_fe_test"
+
+enum supported_soc_ids {
+	SOC_KALAMA_ID = 519			/* KAILUA */
+};
+
+struct mmrm_test_platform_resources {
+	struct platform_device *pdev;
+	struct clock_rate *clk_rate_tbl;
+	u32 count;
+};
+
+struct mmrm_test_driver_data {
+	struct mmrm_test_platform_resources clk_res;
+};
+
+static struct mmrm_test_driver_data *test_drv_data = (void *) -EPROBE_DEFER;
+
+int mmrm_vm_debug = MMRM_VM_ERR | MMRM_VM_WARN | MMRM_VM_PRINTK;
+
+int mmrm_vm_fe_load_mmrm_test_table(
+	struct mmrm_test_platform_resources *dt_res)
+{
+	int rc = 0, num_clock_names = 0, c = 0;
+	struct platform_device *pdev = dt_res->pdev;
+	int   entry_offset = 0;
+	struct clock_rate *clk_rate;
+
+	num_clock_names = of_property_count_strings(pdev->dev.of_node,
+			"clock-names");
+	if (num_clock_names <= 0) {
+		dt_res->count = 0;
+		goto err_load_corner_tbl;
+	}
+	d_mpr_h("%s: count =%d\n", __func__, num_clock_names);
+
+	dt_res->clk_rate_tbl = devm_kzalloc(&pdev->dev,
+		sizeof(*dt_res->clk_rate_tbl) * num_clock_names, GFP_KERNEL);
+
+	if (!dt_res->clk_rate_tbl) {
+		rc = -ENOMEM;
+		goto err_load_corner_tbl;
+	}
+	dt_res->count = num_clock_names;
+
+	clk_rate = dt_res->clk_rate_tbl;
+	for (c = 0; c < num_clock_names; c++, clk_rate++) {
+		of_property_read_string_index(pdev->dev.of_node,
+			"clock-names", c, &clk_rate->name);
+	}
+
+	clk_rate = dt_res->clk_rate_tbl;
+	for (c = 0; c < num_clock_names; c++, entry_offset += 7, clk_rate++) {
+		of_property_read_u32_index(pdev->dev.of_node,
+			"clock_rates", entry_offset, &clk_rate->domain);
+
+		of_property_read_u32_index(pdev->dev.of_node,
+			"clock_rates", entry_offset+1, &clk_rate->id);
+
+		of_property_read_u32_index(pdev->dev.of_node,
+			"clock_rates", entry_offset+2, &clk_rate->clk_rates[0]);
+
+		of_property_read_u32_index(pdev->dev.of_node,
+			"clock_rates", entry_offset+3, &clk_rate->clk_rates[1]);
+
+		of_property_read_u32_index(pdev->dev.of_node,
+			"clock_rates", entry_offset+4, &clk_rate->clk_rates[2]);
+
+		of_property_read_u32_index(pdev->dev.of_node,
+			"clock_rates", entry_offset+5, &clk_rate->clk_rates[3]);
+
+		of_property_read_u32_index(pdev->dev.of_node,
+			"clock_rates", entry_offset+6, &clk_rate->clk_rates[4]);
+	}
+
+	/* print clock rate tables */
+
+	clk_rate = dt_res->clk_rate_tbl;
+	for (c = 0; c < num_clock_names; c++, clk_rate++) {
+		d_mpr_h("clock name:%s, %d, %d, %d, %d, %d, %d, %d\n", clk_rate->name,
+			clk_rate->domain, clk_rate->id,
+			clk_rate->clk_rates[0], clk_rate->clk_rates[1],
+			clk_rate->clk_rates[2], clk_rate->clk_rates[3],
+			clk_rate->clk_rates[4]);
+	}
+
+	return 0;
+
+err_load_corner_tbl:
+	return rc;
+}
+
+int mmrm_clk_print_info(
+	struct mmrm_test_platform_resources *dt_res,
+	char *buf, int max_len)
+{
+	struct clock_rate *clk_rate;
+	int left_spaces = max_len;
+	int len, c;
+	u32 count;
+
+	count = dt_res->count;
+	/* print clock rate tables */
+
+	clk_rate = dt_res->clk_rate_tbl;
+	for (c = 0; c < count; c++, clk_rate++) {
+		len = scnprintf(buf, left_spaces, "clock name:%s, %d, %d, %d, %d, %d, %d, %d\n",
+			clk_rate->name, clk_rate->domain, clk_rate->id,
+			clk_rate->clk_rates[0], clk_rate->clk_rates[1],
+			clk_rate->clk_rates[2], clk_rate->clk_rates[3],
+			clk_rate->clk_rates[4]);
+
+		left_spaces -= len;
+		buf += len;
+	}
+	return max_len - left_spaces;
+}
+
+struct clock_rate *find_clk_by_name(const char *name)
+{
+	int  i;
+	struct mmrm_test_platform_resources  *res = &test_drv_data->clk_res;
+	struct clock_rate *p = res->clk_rate_tbl;
+
+	for (i = 0; i < res->count; i++, p++) {
+		if (strcmp(name, p->name) == 0)
+			return p;
+	}
+	return NULL;
+}
+
+int get_clock_count(void)
+{
+	return test_drv_data->clk_res.count;
+}
+
+struct clock_rate *get_nth_clock(int nth)
+{
+	struct mmrm_test_platform_resources  *res = &test_drv_data->clk_res;
+
+	return &(res->clk_rate_tbl[nth]);
+}
+
+int mmrm_vm_fe_test_read_platform_resources(struct platform_device *pdev)
+{
+	int rc = 0;
+
+	d_mpr_h("%s: mmrm_test_read_platform_resources =%p test_drv_data:%p\n",
+		__func__, pdev, test_drv_data);
+
+	if (pdev == NULL || pdev->dev.of_node == NULL) {
+		rc = -EINVAL;
+		goto exit;
+	}
+
+	if (test_drv_data == (void *) -EPROBE_DEFER) {
+		d_mpr_e("%s: mmrm_test_read_platform_resources\n", __func__);
+		goto exit;
+	}
+
+	rc = mmrm_vm_fe_load_mmrm_test_table(&test_drv_data->clk_res);
+	if (rc) {
+		goto exit;
+	}
+exit:
+	return rc;
+}
+
+static int mmrm_test(struct platform_device *pdev, int flags)
+{
+	int soc_id;
+
+	// Get socid to get known mmrm configurations
+	soc_id = socinfo_get_id();
+	d_mpr_e("%s: soc id: %d flags=%x\n", __func__, soc_id, flags);
+	soc_id = SOC_KALAMA_ID;
+	d_mpr_e("%s: soc id: %d flags=%x\n", __func__, soc_id, flags);
+
+	switch (soc_id) {
+	case SOC_KALAMA_ID:
+		if (flags & 1)
+			mmrm_vm_fe_client_tests(pdev);
+		if (flags & 2)
+			test_mmrm_concurrent_client_cases(pdev, kalama_testcases, kalama_testcases_count);
+		if (flags & 4)
+			test_mmrm_switch_volt_corner_client_testcases(pdev, kalama_cornercase_testcases, kalama_cornercase_testcases_count);
+		break;
+	default:
+		d_mpr_e("%s: Not supported for soc_id %d\n", __func__, soc_id);
+		return -ENODEV;
+	}
+	return 0;
+}
+
+static ssize_t mmrm_vm_fe_sysfs_debug_get(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	int ret;
+
+	ret = scnprintf(buf, MMRM_SYSFS_ENTRY_MAX_LEN, "0x%x\n", mmrm_vm_debug);
+	pr_info("%s: 0x%04X\n", __func__, mmrm_vm_debug);
+
+	return ret;
+}
+
+static ssize_t mmrm_vm_fe_sysfs_debug_set(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count)
+{
+	int ret;
+	unsigned long dbg_mask;
+
+	ret = kstrtoul(buf, 16, &dbg_mask);
+	if (ret == 0)
+		mmrm_vm_debug = dbg_mask;
+
+	return count;
+}
+
+static ssize_t dump_clk_info_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	int rc;
+
+	rc = mmrm_clk_print_info(&test_drv_data->clk_res, buf, MMRM_SYSFS_ENTRY_MAX_LEN);
+	if (rc == 0)
+		d_mpr_e("%s: failed to dump clk info\n", __func__);
+
+	return rc;
+}
+
+ssize_t test_trigger_store(struct device *dev, struct device_attribute *attr,
+	const char *buf, size_t count)
+{
+	struct platform_device *pdev = container_of(dev, struct platform_device, dev);
+	int ret;
+	unsigned long flags;
+
+	d_mpr_e("%s: start ...\n", __func__);
+
+	ret = kstrtoul(buf, 16, &flags);
+	if (ret) {
+		d_mpr_e("invalid user input\n");
+		return -1;
+	}
+
+	if  (flags & 0x80)
+		mmrm_vm_fe_client_register_tests(pdev);
+	else
+		mmrm_test(pdev, flags);
+
+	return count;
+}
+
+static DEVICE_ATTR_RO(dump_clk_info);
+
+static DEVICE_ATTR(debug, 0644,
+	mmrm_vm_fe_sysfs_debug_get,
+	mmrm_vm_fe_sysfs_debug_set);
+
+static DEVICE_ATTR_WO(test_trigger);
+
+static struct attribute *mmrm_vm_fe_test_fs_attrs[] = {
+	&dev_attr_debug.attr,
+	&dev_attr_dump_clk_info.attr,
+	&dev_attr_test_trigger.attr,
+	NULL,
+};
+
+static struct attribute_group mmrm_vm_fe_test_fs_attrs_group = {
+	.attrs = mmrm_vm_fe_test_fs_attrs,
+};
+
+static int mmrm_vm_fe_test_probe(struct platform_device *pdev)
+{
+	bool is_mmrm_supported = false;
+	int rc;
+
+	// Check if of_node is found
+	if (!of_device_is_compatible(pdev->dev.of_node, "qcom,mmrm-vm-fe-test")) {
+		d_mpr_e("No compatible device node\n");
+		return 1;
+	}
+
+	is_mmrm_supported = mmrm_client_check_scaling_supported(MMRM_CLIENT_CLOCK, 0);
+	if (!is_mmrm_supported) {
+		d_mpr_e("%s: MMRM not supported on %s\n", __func__, socinfo_get_id_string());
+		return 0;
+	}
+
+	test_drv_data = kzalloc(sizeof(*test_drv_data), GFP_KERNEL);
+	if (!test_drv_data) {
+		rc = -ENOMEM;
+		goto err_no_mem;
+	}
+	test_drv_data->clk_res.pdev = pdev;
+	dev_set_drvdata(&pdev->dev, test_drv_data);
+
+	rc = mmrm_vm_fe_test_read_platform_resources(pdev);
+	if (rc) {
+		d_mpr_e("%s: unable to read platform resources for mmrm\n",
+			__func__);
+		goto err_read;
+	}
+	d_mpr_e("%s: Validating mmrm on target\n", __func__);
+
+	if (sysfs_create_group(&pdev->dev.kobj, &mmrm_vm_fe_test_fs_attrs_group)) {
+		d_mpr_e("%s: failed to create sysfs\n",
+			__func__);
+	}
+
+err_no_mem:
+err_read:
+	return 0;
+}
+
+int mmrm_vm_fe_test_remove(struct platform_device *pdev)
+{
+	int rc = 0;
+
+	if (!pdev) {
+		rc = -EINVAL;
+		goto err_exit;
+	}
+
+	test_drv_data = dev_get_drvdata(&pdev->dev);
+	if (!test_drv_data) {
+		rc = -EINVAL;
+		goto err_exit;
+	}
+
+	if (test_drv_data->clk_res.clk_rate_tbl)
+		kfree(test_drv_data->clk_res.clk_rate_tbl);
+
+	dev_set_drvdata(&pdev->dev, NULL);
+
+	kfree(test_drv_data);
+	test_drv_data = (void *) -EPROBE_DEFER;
+
+err_exit:
+	return rc;
+}
+
+static const struct of_device_id mmrm_vm_fe_test_dt_match[] = {
+	{.compatible = "qcom,mmrm-vm-fe-test"}, {} // empty
+};
+
+static struct platform_driver mmrm_vm_fe_test_driver = {
+	.probe = mmrm_vm_fe_test_probe,
+	.remove = mmrm_vm_fe_test_remove,
+	.driver = {
+			.name = MODULE_NAME,
+			.owner = THIS_MODULE,
+			.of_match_table = mmrm_vm_fe_test_dt_match,
+		},
+};
+
+static int __init mmrm_vm_fe_test_init(void)
+{
+	int rc = 0;
+	rc = platform_driver_register(&mmrm_vm_fe_test_driver);
+	if (rc) {
+		pr_info("%s: failed to register platform driver\n", __func__);
+	}
+	return rc;
+}
+module_init(mmrm_vm_fe_test_init);
+
+static void __exit mmrm_vm_fe_test_exit(void)
+{
+	platform_driver_unregister(&mmrm_vm_fe_test_driver);
+}
+module_exit(mmrm_vm_fe_test_exit);
+
+MODULE_DESCRIPTION("MMRM VM FE TEST");
+MODULE_LICENSE("GPL v2");