Procházet zdrojové kódy

mmrm: Adding Multimedia Resource Manager(MMRM) driver

Adding Multimedia Resource Manager(MMRM) driver for clk rate
admission control of multimedia clients.

Change-Id: I7109369b254793d1dceaf3a34002ad01d14bc40a
Shivendra Kakrania před 4 roky
rodič
revize
0b565216b2

+ 10 - 0
Makefile

@@ -0,0 +1,10 @@
+# auto-detect subdirs
+ifeq ($(CONFIG_ARCH_WAIPIO), y)
+include $(srctree)/techpack/mmrm/config/waipiommrm.conf
+endif
+
+ifeq ($(CONFIG_ARCH_WAIPIO), y)
+LINUXINCLUDE    += -include $(srctree)/techpack/mmrm/config/waipiommrmconf.h
+endif
+
+obj-y +=driver/

+ 6 - 0
config/waipiommrm.conf

@@ -0,0 +1,6 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2020, The Linux Foundation. All rights reserved.
+ */
+
+export CONFIG_MSM_MMRM=y

+ 6 - 0
config/waipiommrmconf.h

@@ -0,0 +1,6 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2020, The Linux Foundation. All rights reserved.
+ */
+
+#define CONFIG_MSM_MMRM 1

+ 10 - 0
driver/Makefile

@@ -0,0 +1,10 @@
+ccflags-y += -I$(srctree)/techpack/mmrm/driver/src/
+
+msm-mmrm-objs := src/msm_mmrm.o \
+				src/mmrm_internal.o \
+				src/mmrm_res_parse.o \
+				src/mmrm_debug.o \
+				src/mmrm_clk_rsrc_mgr_sw.o \
+				src/mmrm_clk_rsrc_mgr.o
+
+obj-$(CONFIG_MSM_MMRM) := msm-mmrm.o

+ 109 - 0
driver/src/mmrm_clk_rsrc_mgr.c

@@ -0,0 +1,109 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2020, The Linux Foundation. All rights reserved.
+ */
+
+#include "mmrm_clk_rsrc_mgr.h"
+#include "mmrm_debug.h"
+
+/* sw clk mgr ops */
+static struct mmrm_clk_mgr_ops sw_clk_mgr_ops = {
+	.init_clk_mgr = mmrm_init_sw_clk_mgr,
+	.destroy_clk_mgr = mmrm_destroy_sw_clk_mgr,
+};
+
+int mmrm_get_clk_mgr_ops(void *driver_data)
+{
+	int rc = 0;
+	struct mmrm_driver_data *drv_data =
+		(struct mmrm_driver_data *)driver_data;
+
+	if (drv_data->clk_res.scheme == CLK_MGR_SCHEME_SW) {
+		drv_data->clk_mgr_ops = &sw_clk_mgr_ops;
+	} else if (drv_data->clk_res.scheme == CLK_MGR_SCHEME_CXIPEAK) {
+		d_mpr_e("%s: cxipeak is not supported with mmrm\n", __func__);
+		rc = -EINVAL;
+		goto err_exit;
+	} else {
+		d_mpr_e("%s: unsupported clk mgr scheme\n", __func__);
+		goto err_exit;
+	}
+
+	return rc;
+
+err_exit:
+	return rc;
+}
+
+struct mmrm_client *mmrm_clk_client_register(struct mmrm_clk_mgr *clk_mgr,
+	struct mmrm_client_desc *client_desc)
+{
+	if (!clk_mgr || !clk_mgr->clk_client_ops ||
+		!clk_mgr->clk_client_ops->clk_client_reg) {
+		d_mpr_e("%s: invalid clk mgr\n", __func__);
+		return NULL;
+	}
+
+	return clk_mgr->clk_client_ops->clk_client_reg(clk_mgr,
+			client_desc->client_info.desc,
+			client_desc->priority,
+			client_desc->pvt_data,
+			client_desc->notifier_callback_fn);
+}
+
+int mmrm_clk_client_deregister(struct mmrm_clk_mgr *clk_mgr,
+	struct mmrm_client *client)
+{
+	if (!clk_mgr || !clk_mgr->clk_client_ops ||
+		!clk_mgr->clk_client_ops->clk_client_dereg) {
+		d_mpr_e("%s: invalid clk mgr\n", __func__);
+		return -EINVAL;
+	}
+
+	return clk_mgr->clk_client_ops->clk_client_dereg(clk_mgr, client);
+}
+
+
+int mmrm_clk_client_setval(struct mmrm_clk_mgr *clk_mgr,
+	struct mmrm_client *client,
+	struct mmrm_client_data *client_data,
+	unsigned long val)
+{
+	if (!clk_mgr || !clk_mgr->clk_client_ops ||
+		!clk_mgr->clk_client_ops->clk_client_setval) {
+		d_mpr_e("%s: invalid clk mgr\n", __func__);
+		return -EINVAL;
+	}
+
+	return clk_mgr->clk_client_ops->clk_client_setval(
+		clk_mgr, client, client_data, val);
+}
+
+int mmrm_clk_client_setval_inrange(struct mmrm_clk_mgr *clk_mgr,
+	struct mmrm_client *client,
+	struct mmrm_client_data *client_data,
+	struct mmrm_client_res_value *val)
+{
+	if (!clk_mgr || !clk_mgr->clk_client_ops ||
+		!clk_mgr->clk_client_ops->clk_client_setval_inrange) {
+		d_mpr_e("%s: invalid clk mgr\n", __func__);
+		return -EINVAL;
+	}
+
+	return clk_mgr->clk_client_ops->clk_client_setval_inrange(
+		clk_mgr, client, client_data, val);
+}
+
+int mmrm_clk_client_getval(struct mmrm_clk_mgr *clk_mgr,
+	struct mmrm_client *client,
+	struct mmrm_client_res_value *val)
+{
+	if (!clk_mgr || !clk_mgr->clk_client_ops ||
+		!clk_mgr->clk_client_ops->clk_client_getval) {
+		d_mpr_e("%s: invalid clk mgr\n", __func__);
+		return -EINVAL;
+	}
+
+	return clk_mgr->clk_client_ops->clk_client_getval(
+		clk_mgr, client, val);
+}

+ 134 - 0
driver/src/mmrm_clk_rsrc_mgr.h

@@ -0,0 +1,134 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2020, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _MMRM_CLK_RESOURCE_MGR_H_
+#define _MMRM_CLK_RESOURCE_MGR_H_
+
+#include <dt-bindings/regulator/qcom,rpmh-regulator-levels.h>
+#include <linux/soc/qcom/msm_mmrm.h>
+
+#include "mmrm_internal.h"
+
+enum mmrm_clk_mgr_scheme {
+	CLK_MGR_SCHEME_SW,
+	CLK_MGR_SCHEME_CXIPEAK
+};
+
+enum mmrm_sw_vdd_levels {
+	MMRM_VDD_LEVEL_SVS_L1,
+	MMRM_VDD_LEVEL_NOM,
+	MMRM_VDD_LEVEL_TURBO,
+	MMRM_VDD_LEVEL_MAX
+};
+
+static int mmrm_sw_vdd_corner[] = {
+	[MMRM_VDD_LEVEL_SVS_L1] = RPMH_REGULATOR_LEVEL_SVS_L1,
+	[MMRM_VDD_LEVEL_NOM] = RPMH_REGULATOR_LEVEL_NOM,
+	[MMRM_VDD_LEVEL_TURBO] = RPMH_REGULATOR_LEVEL_TURBO
+};
+
+#define MMRM_SW_CLIENTS_NUM_MAX 35
+
+typedef int (*notifier_callback_fn_t)(
+	struct mmrm_client_notifier_data *notifier_data);
+
+struct mmrm_sw_clk_client_tbl_entry {
+	char name[MMRM_CLK_CLIENT_NAME_SIZE];
+	struct clk *clk;
+	enum mmrm_client_priority pri;
+	void *pvt_data; /* client user data */
+	notifier_callback_fn_t notifier_cb_fn;
+
+	/* prepared internally */
+	u32 clk_src_id;
+	bool pass_through;
+	u32 min_level;
+	u32 max_level;
+	u64 freq[MMRM_VDD_LEVEL_MAX];
+	u32 dyn_pwr[MMRM_VDD_LEVEL_MAX];
+	u32 leak_pwr[MMRM_VDD_LEVEL_MAX];
+	u32 current_ma[MMRM_VDD_LEVEL_MAX];
+
+	/* reference to this entry */
+	struct mmrm_client *client;
+
+	/* configured clk rate */
+	u64 clk_rate;
+};
+
+struct mmrm_sw_peak_current_data {
+	u32 threshold;
+	u32 aggreg_val;
+};
+
+struct mmrm_sw_clk_mgr_info {
+	/* client data */
+	struct mmrm_sw_clk_client_tbl_entry *clk_client_tbl;
+	u32 tot_clk_clients;
+	u32 enabled_clk_clients;
+
+	/* peak current data */
+	struct mmrm_sw_peak_current_data peak_cur_data;
+};
+
+struct mmrm_clk_mgr {
+	struct mutex lock;
+	enum mmrm_clk_mgr_scheme scheme;
+	union {
+		struct mmrm_sw_clk_mgr_info sw_info;
+	} data;
+	struct mmrm_clk_mgr_client_ops *clk_client_ops;
+};
+
+struct mmrm_clk_mgr_client_ops {
+	/* client ops */
+	struct mmrm_client*(*clk_client_reg)(
+		struct mmrm_clk_mgr *clk_mgr,
+		struct mmrm_clk_client_desc clk_desc,
+		enum mmrm_client_priority priority, void *pvt_data,
+		notifier_callback_fn_t nt_fn_cb);
+	int (*clk_client_dereg)(
+		struct mmrm_clk_mgr *clk_mgr,
+		struct mmrm_client *client);
+	int (*clk_client_setval)(struct mmrm_clk_mgr *clk_mgr,
+		struct mmrm_client *client,
+		struct mmrm_client_data *client_data, unsigned long val);
+	int (*clk_client_setval_inrange)(struct mmrm_clk_mgr *clk_mgr,
+		struct mmrm_client *client,
+		struct mmrm_client_data *client_data,
+		struct mmrm_client_res_value *val);
+	int (*clk_client_getval)(struct mmrm_clk_mgr *clk_mgr,
+		struct mmrm_client *client, struct mmrm_client_res_value *val);
+};
+
+/* clk mgr operations */
+struct mmrm_clk_mgr_ops {
+	int (*init_clk_mgr)(void *drv_data);
+	int (*destroy_clk_mgr)(struct mmrm_clk_mgr *sw_clk_mgr);
+};
+int mmrm_get_clk_mgr_ops(void *drv_data);
+
+/* clk mgr client operations */
+struct mmrm_client *mmrm_clk_client_register(struct mmrm_clk_mgr *clk_mgr,
+	struct mmrm_client_desc *client_desc);
+int mmrm_clk_client_deregister(struct mmrm_clk_mgr *clk_mgr,
+	struct mmrm_client *client);
+int mmrm_clk_client_setval(struct mmrm_clk_mgr *clk_mgr,
+	struct mmrm_client *client,
+	struct mmrm_client_data *client_data,
+	unsigned long val);
+int mmrm_clk_client_setval_inrange(struct mmrm_clk_mgr *clk_mgr,
+	struct mmrm_client *client,
+	struct mmrm_client_data *client_data,
+	struct mmrm_client_res_value *val);
+int mmrm_clk_client_getval(struct mmrm_clk_mgr *clk_mgr,
+	struct mmrm_client *client,
+	struct mmrm_client_res_value *val);
+
+/* sw clk mgr specific */
+int mmrm_init_sw_clk_mgr(void *driver_data);
+int mmrm_destroy_sw_clk_mgr(struct mmrm_clk_mgr *sw_clk_mgr);
+
+#endif //_MMRM_CLK_RESOURCE_MGR_H_

+ 47 - 0
driver/src/mmrm_clk_rsrc_mgr_cxipeak.c

@@ -0,0 +1,47 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2020, The Linux Foundation. All rights reserved.
+ */
+
+struct mmrm_client *mmrm_cxipeak_clk_client_register(
+	struct mmrm_clk_client_desc clk_desc,
+	enum mmrm_client_priority priority, void *pvt_data,
+	notifier_callback_fn_t not_fn_cb)
+{
+	return NULL;
+}
+
+int mmrm_cxipeak_clk_client_deregister(struct mmrm_client *client)
+{
+	return 0;
+}
+
+int mmrm_cxipeak_clk_client_set_value(
+	struct mmrm_client_data *client_data, unsigned long val)
+{
+	return 0;
+}
+
+int mmrm_cxipeak_clk_client_get_value(struct mmrm_client *client,
+	struct mmrm_client_res_value *val)
+{
+	return 0;
+}
+
+static struct mmrm_clk_mgr_client_ops clk_client_cxipeakops = {
+	.clk_client_reg = mmrm_cxipeak_clk_client_register,
+	.clk_client_dereg = mmrm_cxipeak_clk_client_deregister,
+	.clk_client_setval = mmrm_cxipeak_clk_client_setval,
+	.clk_client_getval = mmrm_cxipeak_clk_client_getval,
+};
+
+
+int mmrm_init_cxipeak_clk_mgr(void *driver_data)
+{
+	return 0;
+}
+
+int mmrm_destroy_cxipeak_clk_mgr(struct mmrm_clk_mgr *cxipeak_clk_mgr)
+{
+	return 0;
+}

+ 527 - 0
driver/src/mmrm_clk_rsrc_mgr_sw.c

@@ -0,0 +1,527 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2020, The Linux Foundation. All rights reserved.
+ */
+#include <linux/slab.h>
+#include <dt-bindings/regulator/qcom,rpmh-regulator-levels.h>
+#include <linux/clk.h>
+#include <linux/clk/qcom.h>
+
+#include "mmrm_debug.h"
+#include "mmrm_clk_rsrc_mgr.h"
+#include "mmrm_fixedpoint.h"
+
+#define Q16_INT(q) ((q) >> 16)
+#define Q16_FRAC(q) ((((q) & 0xFFFF) * 100) >> 16)
+
+static struct mmrm_client *mmrm_sw_clk_client_register(
+	struct mmrm_clk_mgr *sw_clk_mgr,
+	struct mmrm_clk_client_desc clk_desc,
+	enum mmrm_client_priority priority,
+	void *pvt_data,
+	notifier_callback_fn_t not_fn_cb)
+{
+	int rc = 0;
+	struct mmrm_client *clk_client = NULL;
+	struct mmrm_sw_clk_mgr_info *sinfo = &(sw_clk_mgr->data.sw_info);
+	struct mmrm_sw_clk_client_tbl_entry *tbl_entry;
+
+	u32 c = 0;
+
+	mutex_lock(&sw_clk_mgr->lock);
+
+	/* check if entry is free in table */
+	if (sinfo->tot_clk_clients == sinfo->enabled_clk_clients) {
+		d_mpr_e("%s: no free entry to register a clk client\n",
+			__func__);
+		rc = -EINVAL;
+		goto err_nofree_entry;
+	}
+
+	for (c = 0; c < sinfo->tot_clk_clients; c++) {
+		if (clk_desc.client_id == sinfo->clk_client_tbl[c].clk_src_id)
+			break;
+	}
+
+	if (c == sinfo->tot_clk_clients) {
+		d_mpr_e("%s: unknown clk client %d\n",
+			__func__, clk_desc.client_id);
+		rc = -EINVAL;
+		goto err_nofree_entry;
+	}
+
+	tbl_entry = &sinfo->clk_client_tbl[c];
+
+	if (tbl_entry->client) {
+		d_mpr_e("%s: client csid(%d) already registered\n",
+			__func__, tbl_entry->clk_src_id);
+		rc = -EINVAL;
+		goto err_already_registered;
+	}
+
+	/* populate the entry */
+	clk_client = kzalloc(sizeof(*clk_client), GFP_KERNEL);
+	if (!clk_client) {
+		d_mpr_e("%s: failed to allocate memory for clk_client\n",
+			__func__);
+		rc = -ENOMEM;
+		goto err_fail_alloc_clk_client;
+	}
+
+	clk_client->client_uid = c;
+	clk_client->client_type = MMRM_CLIENT_CLOCK;
+
+	/* copy the entries provided by client */
+	tbl_entry->client = clk_client;
+	strlcpy(tbl_entry->name, clk_desc.name, MMRM_CLK_CLIENT_NAME_SIZE);
+	tbl_entry->clk = clk_desc.clk;
+	tbl_entry->pri = priority;
+	tbl_entry->pvt_data = pvt_data;
+	tbl_entry->notifier_cb_fn = not_fn_cb;
+
+	/* print table entry */
+	d_mpr_e("%s: csid(%d) name(%s) pri(%d) pvt(%p) notifier(%p)\n",
+		__func__,
+		tbl_entry->clk_src_id,
+		tbl_entry->name,
+		tbl_entry->pri,
+		tbl_entry->pvt_data,
+		tbl_entry->notifier_cb_fn);
+
+	/* print power entries for the clk src */
+	d_mpr_e("%s: csid(%d) l0_cur(%d) l0_cur(%d) l0_cur(%d)\n",
+		__func__,
+		tbl_entry->clk_src_id,
+		tbl_entry->current_ma[MMRM_VDD_LEVEL_SVS_L1],
+		tbl_entry->current_ma[MMRM_VDD_LEVEL_NOM],
+		tbl_entry->current_ma[MMRM_VDD_LEVEL_TURBO]);
+
+	mutex_unlock(&sw_clk_mgr->lock);
+
+	return clk_client;
+
+err_fail_alloc_clk_client:
+err_already_registered:
+err_nofree_entry:
+	mutex_unlock(&sw_clk_mgr->lock);
+	return NULL;
+}
+
+static int mmrm_sw_clk_client_deregister(struct mmrm_clk_mgr *sw_clk_mgr,
+	struct mmrm_client *client)
+{
+	int rc =  0;
+	struct mmrm_sw_clk_client_tbl_entry *tbl_entry;
+	struct mmrm_sw_clk_mgr_info *sinfo = &(sw_clk_mgr->data.sw_info);
+
+	d_mpr_e("%s: entering\n", __func__);
+
+	/* validate the client ptr */
+	if (!client || client->client_uid >= sinfo->tot_clk_clients) {
+		d_mpr_e("%s: invalid client uid (%d)\n",
+			__func__, client->client_uid);
+		rc = -EINVAL;
+		goto err_not_valid_client;
+	}
+
+	mutex_lock(&sw_clk_mgr->lock);
+
+	tbl_entry = &sinfo->clk_client_tbl[client->client_uid];
+	kfree(tbl_entry->client);
+	tbl_entry->client = NULL;
+	tbl_entry->clk = NULL;
+	tbl_entry->pri = 0x0;
+	tbl_entry->pvt_data = NULL;
+	tbl_entry->notifier_cb_fn = NULL;
+
+	mutex_unlock(&sw_clk_mgr->lock);
+
+	return rc;
+
+err_not_valid_client:
+	return rc;
+}
+
+static int mmrm_sw_get_req_current(
+	struct mmrm_sw_clk_client_tbl_entry *tbl_entry,
+	unsigned long clk_val, u32 *req_current)
+{
+	int rc = 0;
+	u32 i;
+	int voltage_corner = mmrm_sw_vdd_corner[MMRM_VDD_LEVEL_NOM];
+
+	/* get voltage corner */
+	/* TBD: voltage_corner = qcom_clk_get_voltage(tbl_entry->clk, val); */
+	for (i = 0; i < MMRM_VDD_LEVEL_MAX; i++) {
+		if (voltage_corner == mmrm_sw_vdd_corner[i])
+			break;
+	}
+
+	if (i == MMRM_VDD_LEVEL_MAX) {
+		d_mpr_e("%s: csid(%d): invalid voltage corner(%d) for rate(%lld)\n",
+			__func__, tbl_entry->clk_src_id,
+			voltage_corner, clk_val);
+		rc = -EINVAL;
+		goto err_invalid_corner;
+	}
+
+	/* get current for the voltage corner */
+	*req_current = tbl_entry->current_ma[i];
+
+	return rc;
+
+err_invalid_corner:
+	return rc;
+}
+
+static int mmrm_sw_check_peak_current(
+	struct mmrm_sw_clk_mgr_info *sinfo,
+	u32 req_cur)
+{
+	int rc = 0;
+	struct mmrm_sw_peak_current_data *peak_data = &sinfo->peak_cur_data;
+
+	/* check for peak overshoot */
+	d_mpr_h("%s: entering\n", __func__);
+
+	if ((peak_data->aggreg_val + req_cur) >= peak_data->threshold) {
+		rc = -EINVAL;
+		/* TBD: return from here */
+	}
+
+	/* update peak current */
+	peak_data->aggreg_val += req_cur;
+
+	return rc;
+}
+
+static int mmrm_sw_clk_client_setval(struct mmrm_clk_mgr *sw_clk_mgr,
+	struct mmrm_client *client,
+	struct mmrm_client_data *client_data,
+	unsigned long val)
+{
+	int rc = 0;
+	struct mmrm_sw_clk_client_tbl_entry *tbl_entry;
+	struct mmrm_sw_clk_mgr_info *sinfo = &(sw_clk_mgr->data.sw_info);
+	u32 req_cur;
+
+	d_mpr_e("%s: entering\n", __func__);
+
+	/* validate input params */
+	if (!client || client->client_uid >= sinfo->tot_clk_clients) {
+		d_mpr_e("%s: invalid client uid (%d)\n",
+			__func__, client->client_uid);
+		rc = -EINVAL;
+		goto err_invalid_client;
+	}
+
+	tbl_entry = &sinfo->clk_client_tbl[client->client_uid];
+	if (!tbl_entry->clk) {
+		d_mpr_e("%s: clk src not registered\n");
+		rc = -EINVAL;
+		goto err_invalid_client;
+	}
+
+	/* check if already configured */
+	if (tbl_entry->clk_rate == val) {
+		d_mpr_h("%s: csid(%d) same as previous clk rate %lld\n",
+			__func__, tbl_entry->clk_src_id, val);
+		goto exit_no_err;
+	}
+
+	/* get the required current val */
+	rc = mmrm_sw_get_req_current(tbl_entry, val, &req_cur);
+	if (rc || !req_cur) {
+		d_mpr_e("%s: csid(%d) unable to get req current\n",
+			__func__, tbl_entry->clk_src_id);
+		rc = -EINVAL;
+		goto err_invalid_clk_val;
+	}
+
+	mutex_lock(&sw_clk_mgr->lock);
+
+	/* check & update for peak current */
+	rc = mmrm_sw_check_peak_current(sinfo, req_cur);
+	if (!rc) {
+		d_mpr_e("%s: csid (%d) peak overshoot req_cur(%d) peak_cur(%d)\n",
+			__func__, tbl_entry->clk_src_id, req_cur,
+			sinfo->peak_cur_data.aggreg_val);
+		/* TBD: unlock & check for mitigation */
+	}
+
+	/* update the current rate value */
+	tbl_entry->clk_rate = val;
+	mutex_unlock(&sw_clk_mgr->lock);
+
+	/* set clock rate */
+	d_mpr_e("%s: csid(%d) setting clk rate %llu\n", __func__,
+		tbl_entry->clk_src_id, val);
+	rc = clk_set_rate(tbl_entry->clk, val);
+	if (rc) {
+		d_mpr_e("%s: csid(%d) failed to set clock rate %llu\n",
+		__func__, tbl_entry->clk_src_id, val);
+		rc = -EINVAL;
+		/* TBD: incase of failure clk_rate is invalid */
+		goto err_clk_set_fail;
+	}
+
+exit_no_err:
+	return rc;
+
+err_invalid_clk_val:
+err_invalid_client:
+err_clk_set_fail:
+	return rc;
+}
+
+static int mmrm_sw_clk_client_setval_inrange(struct mmrm_clk_mgr *sw_clk_mgr,
+		struct mmrm_client *client,
+		struct mmrm_client_data *client_data,
+		struct mmrm_client_res_value *val)
+{
+	d_mpr_e("%s: entering\n", __func__);
+
+	/* TBD: add support for set val in range */
+	return mmrm_sw_clk_client_setval(sw_clk_mgr, client, client_data,
+		val->cur);
+}
+
+static int mmrm_sw_clk_client_getval(struct mmrm_clk_mgr *sw_clk_mgr,
+	struct mmrm_client *client,
+	struct mmrm_client_res_value *val)
+{
+	int rc = 0;
+	struct mmrm_sw_clk_client_tbl_entry *tbl_entry;
+	struct mmrm_sw_clk_mgr_info *sinfo = &(sw_clk_mgr->data.sw_info);
+
+	d_mpr_e("%s: entering\n", __func__);
+
+	/* validate input params */
+	if (!client || client->client_uid >= sinfo->tot_clk_clients) {
+		d_mpr_e("%s: invalid client uid (%d)\n",
+			__func__, client->client_uid);
+		rc = -EINVAL;
+		goto err_invalid_client;
+	}
+
+	tbl_entry = &sinfo->clk_client_tbl[client->client_uid];
+	if (!tbl_entry->clk) {
+		d_mpr_e("%s: clk src not registered\n");
+		rc = -EINVAL;
+		goto err_invalid_client;
+	}
+
+	/* return previously configured value */
+	/* TBD: Identify the min & max values */
+	val->min = tbl_entry->clk_rate;
+	val->cur = tbl_entry->clk_rate;
+	val->max = tbl_entry->clk_rate;
+
+	return rc;
+
+err_invalid_client:
+	return rc;
+}
+
+static struct mmrm_clk_mgr_client_ops clk_client_swops = {
+	.clk_client_reg = mmrm_sw_clk_client_register,
+	.clk_client_dereg = mmrm_sw_clk_client_deregister,
+	.clk_client_setval = mmrm_sw_clk_client_setval,
+	.clk_client_setval_inrange = mmrm_sw_clk_client_setval_inrange,
+	.clk_client_getval = mmrm_sw_clk_client_getval,
+};
+
+static int mmrm_sw_update_entries(struct mmrm_clk_platform_resources *cres,
+	struct mmrm_sw_clk_client_tbl_entry *tbl_entry)
+{
+	u32 i;
+	struct voltage_corner_set *cset = &cres->corner_set;
+	u32 scaling_factor = 0, voltage_factor = 0;
+	fp_t nom_dyn_pwr, nom_leak_pwr, freq_sc, dyn_sc, leak_sc,
+		volt, dyn_pwr, leak_pwr, pwr_mw;
+
+	nom_dyn_pwr = FP_INT(tbl_entry->dyn_pwr[MMRM_VDD_LEVEL_NOM]);
+	nom_leak_pwr = FP(Q16_INT(tbl_entry->leak_pwr[MMRM_VDD_LEVEL_NOM]),
+		Q16_FRAC(tbl_entry->leak_pwr[MMRM_VDD_LEVEL_NOM]), 100);
+
+	/* freq scaling only for svsl1, TBD: enhance with actual numbers */
+	freq_sc = FP(0, 86, 100);
+
+	/* update power & curernt entries for all levels */
+	for (i = 0; i < MMRM_VDD_LEVEL_MAX; i++) {
+		scaling_factor = cset->corner_tbl[i].scaling_factor_dyn;
+		dyn_sc = FP(
+			Q16_INT(scaling_factor), Q16_FRAC(scaling_factor), 100);
+
+		scaling_factor = cset->corner_tbl[i].scaling_factor_leak;
+		leak_sc = FP(
+			Q16_INT(scaling_factor), Q16_FRAC(scaling_factor), 100);
+
+		voltage_factor = cset->corner_tbl[i].volt_factor;
+		volt = FP(
+			Q16_INT(voltage_factor), Q16_FRAC(voltage_factor), 100);
+
+		if (!i)
+			pwr_mw = fp_mult(nom_dyn_pwr, freq_sc);
+		else
+			pwr_mw = nom_dyn_pwr;
+
+		dyn_pwr = fp_mult(pwr_mw, dyn_sc);
+		leak_pwr = fp_mult(nom_leak_pwr, leak_sc);
+
+		tbl_entry->dyn_pwr[i] = fp_round(dyn_pwr);
+		tbl_entry->leak_pwr[i] = fp_round(leak_pwr);
+		tbl_entry->current_ma[i] =
+			fp_round(fp_div((dyn_pwr+leak_pwr), volt));
+		/*
+		d_mpr_e("%s: csid(%d) corner(%s) dyn_pwr(%zu) leak_pwr(%zu) tot_pwr(%d) cur_ma(%d)\n",
+			__func__,
+			tbl_entry->clk_src_id,
+			cset->corner_tbl[i].name,
+			tbl_entry->dyn_pwr[i],
+			tbl_entry->leak_pwr[i],
+			fp_round(dyn_pwr+leak_pwr),
+			tbl_entry->current_ma[i]);
+		*/
+	}
+
+	return 0;
+}
+
+static int mmrm_sw_prepare_table(struct mmrm_clk_platform_resources *cres,
+	struct mmrm_sw_clk_mgr_info *sinfo)
+{
+	int rc = 0;
+	u32 c;
+	struct mmrm_sw_clk_client_tbl_entry *tbl_entry;
+	struct nom_clk_src_info *nom_tbl_entry;
+
+	d_mpr_e("%s: entering\n", __func__);
+
+	/* read all resource entries */
+	for (c = 0; c < sinfo->tot_clk_clients; c++) {
+		tbl_entry = &sinfo->clk_client_tbl[c];
+		nom_tbl_entry = &cres->nom_clk_set.clk_src_tbl[c];
+
+		tbl_entry->clk_src_id = (nom_tbl_entry->domain << 16 |
+			nom_tbl_entry->clk_src_id);
+		tbl_entry->dyn_pwr[MMRM_VDD_LEVEL_NOM] =
+			nom_tbl_entry->nom_dyn_pwr;
+		tbl_entry->leak_pwr[MMRM_VDD_LEVEL_NOM] =
+			nom_tbl_entry->nom_leak_pwr;
+
+		//d_mpr_e("%s: updating csid(%d) dyn_pwr(%d) leak_pwr(%d)\n",
+		//	__func__, tbl_entry->clk_src_id,
+		//	tbl_entry->dyn_pwr[MMRM_VDD_LEVEL_NOM],
+		//	tbl_entry->leak_pwr[MMRM_VDD_LEVEL_NOM]);
+
+		/* calculate current & scale power for other levels */
+		rc = mmrm_sw_update_entries(cres, tbl_entry);
+		if (rc) {
+			d_mpr_e("%s: csid(%d) failed to prepare table\n",
+				__func__, tbl_entry->clk_src_id);
+		}
+	}
+
+	/* print the tables */
+	for (c = 0; c < sinfo->tot_clk_clients; c++) {
+		tbl_entry = &sinfo->clk_client_tbl[c];
+		d_mpr_e("%s: csid(%d) l1_cur_ma(%d) l2_cur_ma(%d) l3_cur_ma(%d)\n",
+			__func__, tbl_entry->clk_src_id,
+			tbl_entry->current_ma[MMRM_VDD_LEVEL_SVS_L1],
+			tbl_entry->current_ma[MMRM_VDD_LEVEL_NOM],
+			tbl_entry->current_ma[MMRM_VDD_LEVEL_TURBO]);
+	}
+
+	return rc;
+}
+
+int mmrm_init_sw_clk_mgr(void *driver_data)
+{
+	int rc = 0;
+	struct mmrm_driver_data *drv_data =
+		(struct mmrm_driver_data *)driver_data;
+	struct mmrm_clk_platform_resources *cres = &drv_data->clk_res;
+	struct mmrm_sw_clk_mgr_info *sinfo = NULL;
+	struct mmrm_clk_mgr *sw_clk_mgr = NULL;
+	u32 tbl_size = 0;
+
+	d_mpr_e("%s: entering\n", __func__);
+
+	/* mmrm_sw_clk_mgr */
+	sw_clk_mgr = kzalloc(sizeof(*sw_clk_mgr), GFP_KERNEL);
+	if (!sw_clk_mgr) {
+		d_mpr_e("%s: failed to allocate memory for sw_clk_mgr\n",
+			__func__);
+		rc = -ENOMEM;
+		goto err_fail_sw_clk_mgr;
+	}
+
+	/* initialize the tables */
+	tbl_size = sizeof(struct mmrm_sw_clk_client_tbl_entry) *
+		cres->nom_clk_set.count;
+
+	sinfo = &(sw_clk_mgr->data.sw_info);
+	sinfo->clk_client_tbl = kzalloc(tbl_size, GFP_KERNEL);
+	if (!sinfo->clk_client_tbl) {
+		d_mpr_e(
+			"%s: failed to allocate memory for clk_client_tbl (%d)\n",
+			__func__, cres->nom_clk_set.count);
+		rc = -ENOMEM;
+		goto err_fail_clk_tbl;
+	}
+	sinfo->tot_clk_clients = cres->nom_clk_set.count;
+	sinfo->enabled_clk_clients = 0;
+
+	/* prepare table entries */
+	rc = mmrm_sw_prepare_table(cres, sinfo);
+	if (rc) {
+		d_mpr_e(
+			"%s: failed to prepare clk table\n",
+			__func__);
+		rc = -ENOMEM;
+		goto err_fail_prep_tbl;
+	}
+
+	/* update the peak current threshold */
+	sinfo->peak_cur_data.threshold =
+		cres->threshold;
+	sinfo->peak_cur_data.aggreg_val = 0;
+
+	/* initialize mutex for sw clk mgr */
+	mutex_init(&sw_clk_mgr->lock);
+	sw_clk_mgr->scheme = drv_data->clk_res.scheme;
+
+	/* clk client operations */
+	sw_clk_mgr->clk_client_ops = &clk_client_swops;
+	drv_data->clk_mgr = sw_clk_mgr;
+
+	d_mpr_e("%s: exiting\n", __func__);
+
+	return rc;
+
+err_fail_prep_tbl:
+	kfree(sinfo->clk_client_tbl);
+err_fail_clk_tbl:
+	kfree(sw_clk_mgr);
+	drv_data->clk_mgr = NULL;
+err_fail_sw_clk_mgr:
+	d_mpr_e("%s: exiting with error %d\n", __func__, rc);
+	return rc;
+}
+
+int mmrm_destroy_sw_clk_mgr(struct mmrm_clk_mgr *sw_clk_mgr)
+{
+	int rc = 0;
+
+	if (!sw_clk_mgr) {
+		d_mpr_e("%s: sw_clk_mgr null\n", __func__);
+		return -EINVAL;
+	}
+
+	kfree(sw_clk_mgr->data.sw_info.clk_client_tbl);
+	mutex_destroy(&sw_clk_mgr->lock);
+	kfree(sw_clk_mgr);
+
+	return rc;
+}

+ 15 - 0
driver/src/mmrm_debug.c

@@ -0,0 +1,15 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2020, The Linux Foundation. All rights reserved.
+ */
+#include "mmrm_debug.h"
+
+struct dentry *msm_mmrm_debugfs_init(void)
+{
+	return NULL;
+}
+
+void msm_mmrm_debugfs_deinit(void)
+{
+}
+

+ 20 - 0
driver/src/mmrm_debug.h

@@ -0,0 +1,20 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2020, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef __MMRM_DEBUG__
+#define __MMRM_DEBUG__
+
+#include <linux/debugfs.h>
+#include <linux/printk.h>
+
+#ifndef MMRM_DBG_LABEL
+#define MMRM_DBG_LABEL "msm_mmrm: "
+#endif
+
+//#define d_mpr_h(__fmt, ...) pr_info(MMRM_DBG_LABEL __fmt, ##__VA_ARGS__)
+#define d_mpr_h(__fmt, ...) pr_err(MMRM_DBG_LABEL __fmt, ##__VA_ARGS__)
+#define d_mpr_e(__fmt, ...) pr_err(MMRM_DBG_LABEL __fmt, ##__VA_ARGS__)
+
+#endif

+ 68 - 0
driver/src/mmrm_fixedpoint.h

@@ -0,0 +1,68 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2020, The Linux Foundation. All rights reserved.
+ */
+
+#ifdef _FIXP_ARITH_H
+#error "This implementation is meant to override fixp-arith.h, don't use both"
+#endif
+
+#ifndef _MMRM_FIXEDPOINT_H_
+#define _MMRM_FIXEDPOINT_H_
+
+#include <linux/types.h>
+#include <linux/bits.h>
+
+/*
+ * Normally would typedef'ed, but checkpatch doesn't like typedef.
+ * Also should be normally typedef'ed to intmax_t but that doesn't seem to be
+ * available in the kernel
+ */
+#define fp_t size_t
+
+/* (Arbitrarily) make the first 25% of the bits to be the fractional bits */
+#define FP_FRACTIONAL_BITS ((sizeof(fp_t) * 8) / 4)
+
+#define FP(__i, __f_n, __f_d) \
+	((((fp_t)(__i)) << FP_FRACTIONAL_BITS) + \
+	(((__f_n) << FP_FRACTIONAL_BITS) / (__f_d)))
+
+#define FP_INT(__i) FP(__i, 0, 1)
+#define FP_ONE FP_INT(1)
+#define FP_ZERO FP_INT(0)
+
+static inline size_t fp_frac_base(void)
+{
+	return GENMASK(FP_FRACTIONAL_BITS - 1, 0);
+}
+
+static inline size_t fp_frac(fp_t a)
+{
+	return a & GENMASK(FP_FRACTIONAL_BITS - 1, 0);
+}
+
+static inline size_t fp_int(fp_t a)
+{
+	return a >> FP_FRACTIONAL_BITS;
+}
+
+static inline size_t fp_round(fp_t a)
+{
+	/* is the fractional part >= frac_max / 2? */
+	bool round_up = fp_frac(a) >= fp_frac_base() / 2;
+
+	return fp_int(a) + round_up;
+}
+
+static inline fp_t fp_mult(fp_t a, fp_t b)
+{
+	return (a * b) >> FP_FRACTIONAL_BITS;
+}
+
+
+static inline fp_t fp_div(fp_t a, fp_t b)
+{
+	return (a << FP_FRACTIONAL_BITS) / b;
+}
+
+#endif  /* _MMRM_FIXEDPOINT_H_ */

+ 97 - 0
driver/src/mmrm_internal.c

@@ -0,0 +1,97 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2020, The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/types.h>
+#include <linux/of_platform.h>
+
+#include "mmrm_internal.h"
+#include "mmrm_debug.h"
+
+static struct mmrm_common_data waipio_common_data[] = {
+	{
+		.key = "qcom,mmrm_clk_threshold",
+		.value = 9,
+	},
+	{
+		.key = "qcom,mmrm_clk_mgr_scheme",
+		.value = CLK_MGR_SCHEME_SW,
+	},
+};
+
+static struct mmrm_platform_data waipio_data = {
+	.common_data = waipio_common_data,
+	.common_data_length = ARRAY_SIZE(waipio_common_data),
+};
+
+static const struct of_device_id mmrm_dt_match[] = {
+	{
+		.compatible = "qcom,waipio-mmrm",
+		.data = &waipio_data,
+	},
+	{},
+};
+
+struct mmrm_platform_data *mmrm_get_platform_data(struct device *dev)
+{
+	struct mmrm_platform_data *platform_data = NULL;
+	const struct of_device_id *match;
+
+	match = of_match_node(mmrm_dt_match, dev->of_node);
+	if (match)
+		platform_data = (struct mmrm_platform_data *)match->data;
+
+	if (!platform_data)
+		goto exit;
+
+	/* add additional config checks for platform data */
+
+exit:
+	return platform_data;
+}
+
+int mmrm_init(struct mmrm_driver_data *drv_data)
+{
+	int rc = 0;
+
+	/* get clk resource mgr ops */
+	rc = mmrm_get_clk_mgr_ops(drv_data);
+	if (rc) {
+		d_mpr_e("%s: init clk mgr failed\n", __func__);
+		goto err_get_clk_mgr_ops;
+	}
+
+	/* clock resource mgr */
+	rc = drv_data->clk_mgr_ops->init_clk_mgr(drv_data);
+	if (rc) {
+		d_mpr_e("%s: init clk mgr failed\n", __func__);
+		goto err_init_clk_mgr;
+	}
+
+	return rc;
+
+err_init_clk_mgr:
+err_get_clk_mgr_ops:
+	return rc;
+}
+
+int mmrm_deinit(struct mmrm_driver_data *drv_data)
+{
+	int rc = 0;
+
+	if (!drv_data || !drv_data->clk_mgr_ops ||
+		!drv_data->clk_mgr_ops->destroy_clk_mgr) {
+		d_mpr_e("%s: invalid driver data or clk mgr ops\n", __func__);
+		return -EINVAL;
+	}
+
+	/* destroy clock resource mgr */
+	rc = drv_data->clk_mgr_ops->destroy_clk_mgr(drv_data->clk_mgr);
+	if (rc) {
+		d_mpr_e("%s: destroy clk mgr failed\n", __func__);
+		drv_data->clk_mgr = NULL;
+	}
+
+	return rc;
+}

+ 47 - 0
driver/src/mmrm_internal.h

@@ -0,0 +1,47 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2020, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _MMRM_INTERNAL_H_
+#define _MMRM_INTERNAL_H_
+
+#include <linux/device.h>
+#include <linux/types.h>
+
+#include "mmrm_resources.h"
+#include "mmrm_clk_rsrc_mgr.h"
+
+struct mmrm_common_data {
+	char key[128];
+	int value;
+};
+
+struct mmrm_platform_data {
+	struct mmrm_common_data *common_data;
+	u32 common_data_length;
+	u32 scheme;
+};
+
+struct mmrm_driver_data {
+	/* platform data */
+	struct mmrm_platform_data *platform_data;
+
+	/* clk */
+	struct mmrm_clk_platform_resources clk_res;
+	struct mmrm_clk_mgr *clk_mgr;
+	struct mmrm_clk_mgr_ops *clk_mgr_ops;
+};
+
+struct mmrm_platform_data *mmrm_get_platform_data(struct device *dev);
+
+int mmrm_read_platform_resources(
+	struct platform_device *pdev,
+	struct mmrm_driver_data *drv_data);
+int mmrm_free_platform_resources(struct mmrm_driver_data *drv_data);
+
+int mmrm_init(struct mmrm_driver_data *drv_data);
+int mmrm_deinit(struct mmrm_driver_data *drv_data);
+
+#endif //_MMRM_INTERNAL_H_
+

+ 256 - 0
driver/src/mmrm_res_parse.c

@@ -0,0 +1,256 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2020, The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/types.h>
+#include <linux/of_platform.h>
+
+#include "mmrm_internal.h"
+#include "mmrm_debug.h"
+#include "mmrm_clk_rsrc_mgr.h"
+
+
+static int mmrm_find_key_value(
+	struct mmrm_platform_data *pdata, const char *key)
+{
+	int i = 0;
+	struct mmrm_common_data *cdata = pdata->common_data;
+	int size = pdata->common_data_length;
+
+	for (i = 0; i < size; i++) {
+		if (!strcmp(cdata[i].key, key))
+			return cdata[i].value;
+	}
+	return 0;
+}
+
+static int mmrm_read_clk_pltfrm_rsrc_frm_drv_data(
+	struct mmrm_driver_data *ddata)
+{
+	struct mmrm_platform_data *pdata;
+	struct mmrm_clk_platform_resources *cres;
+	int rc = 0;
+
+	pdata = ddata->platform_data;
+	cres = &ddata->clk_res;
+	cres->threshold = mmrm_find_key_value(pdata,
+						"qcom,mmrm_clk_threshold");
+	d_mpr_e("%s: configured mmrm clk threshold %d\n",
+		__func__, cres->threshold);
+
+	cres->scheme = mmrm_find_key_value(pdata,
+					"qcom,mmrm clk mgr scheme");
+
+	return rc;
+}
+
+static void mmrm_free_rail_corner_table(
+	struct mmrm_clk_platform_resources *cres)
+{
+	cres->corner_set.corner_tbl = NULL;
+	cres->corner_set.count = 0;
+}
+
+static int mmrm_load_mm_rail_corner_table(
+	struct mmrm_clk_platform_resources *cres)
+{
+	int rc = 0, num_corners = 0, c = 0;
+	struct voltage_corner_set *corners = &cres->corner_set;
+	struct platform_device *pdev = cres->pdev;
+
+	num_corners = of_property_count_strings(pdev->dev.of_node,
+		"mm-rail-corners");
+	if (num_corners <= 0) {
+		d_mpr_e("%s: no mm rail corners found\n",
+			__func__);
+		corners->count = 0;
+		goto err_load_corner_tbl;
+	}
+
+	corners->corner_tbl = devm_kzalloc(&pdev->dev,
+		sizeof(*corners->corner_tbl) * num_corners, GFP_KERNEL);
+	if (!corners->corner_tbl) {
+		d_mpr_e("%s: failed to allocate memory for corner_tbl\n",
+			__func__);
+		rc = -ENOMEM;
+		goto err_load_corner_tbl;
+	}
+	corners->count = num_corners;
+	d_mpr_h("%s: found %d corners\n",
+		__func__, num_corners);
+
+	for (c = 0; c < num_corners; c++) {
+		struct corner_info *ci = &corners->corner_tbl[c];
+
+		of_property_read_string_index(pdev->dev.of_node,
+			"mm-rail-corners", c, &ci->name);
+		of_property_read_u32_index(pdev->dev.of_node,
+			"mm-rail-fact-volt", c, &ci->volt_factor);
+		of_property_read_u32_index(pdev->dev.of_node,
+			"scaling-fact-dyn", c, &ci->scaling_factor_dyn);
+		of_property_read_u32_index(pdev->dev.of_node,
+			"scaling-fact-leak", c, &ci->scaling_factor_leak);
+	}
+
+	/* print corner tables */
+	for (c = 0; c < num_corners; c++) {
+		struct corner_info *ci = &corners->corner_tbl[c];
+
+		d_mpr_e(
+			"%s: corner_name:%s volt_factor: %d sc_dyn: %d sc_leak: %d\n",
+			__func__, ci->name, ci->volt_factor,
+			ci->scaling_factor_dyn, ci->scaling_factor_leak);
+	}
+
+	return 0;
+
+err_load_corner_tbl:
+	return rc;
+}
+
+static void mmrm_free_nom_clk_src_table(
+	struct mmrm_clk_platform_resources *cres)
+{
+	cres->nom_clk_set.clk_src_tbl = NULL;
+	cres->nom_clk_set.count = 0;
+}
+
+static int mmrm_load_nom_clk_src_table(
+	struct mmrm_clk_platform_resources *cres)
+{
+	int rc = 0, num_clk_src = 0, c = 0, size_clk_src = 0, entry_offset = 4;
+
+	struct platform_device *pdev = cres->pdev;
+	struct nom_clk_src_set *clk_srcs = &cres->nom_clk_set;
+
+	of_find_property(pdev->dev.of_node, "mmrm-client-info", &size_clk_src);
+	if ((size_clk_src < sizeof(*clk_srcs->clk_src_tbl)) ||
+		(size_clk_src % sizeof(*clk_srcs->clk_src_tbl))) {
+		d_mpr_e("%s: invalid size(%d) of clk src table\n",
+			__func__, size_clk_src);
+		clk_srcs->count = 0;
+		goto err_load_clk_src_tbl;
+	}
+
+	clk_srcs->clk_src_tbl = devm_kzalloc(&pdev->dev,
+		size_clk_src, GFP_KERNEL);
+	if (!clk_srcs->clk_src_tbl) {
+		d_mpr_e("%s: failed to allocate memory for clk_src_tbl\n",
+			__func__);
+		rc = -ENOMEM;
+		goto err_load_clk_src_tbl;
+	}
+	num_clk_src = size_clk_src / sizeof(struct nom_clk_src_info);
+	clk_srcs->count = num_clk_src;
+	d_mpr_h("%s: found %d clk_srcs size %d\n",
+		__func__, num_clk_src, size_clk_src);
+	for (c = 0; c < num_clk_src; c++) {
+		struct nom_clk_src_info *ci = &clk_srcs->clk_src_tbl[c];
+
+		of_property_read_u32_index(pdev->dev.of_node,
+			"mmrm-client-info", (c*entry_offset), &ci->domain);
+		of_property_read_u32_index(pdev->dev.of_node,
+			"mmrm-client-info", (c*entry_offset+1), &ci->clk_src_id);
+		of_property_read_u32_index(pdev->dev.of_node,
+			"mmrm-client-info", (c*entry_offset+2),
+			&ci->nom_dyn_pwr);
+		of_property_read_u32_index(pdev->dev.of_node,
+			"mmrm-client-info", (c*entry_offset+3),
+			&ci->nom_leak_pwr);
+	}
+
+	/* print corner tables */
+	for (c = 0; c < num_clk_src; c++) {
+		struct nom_clk_src_info *ci = &clk_srcs->clk_src_tbl[c];
+
+		d_mpr_e("%s: domain: %d clk_src: %d dyn_pwr: %d leak_pwr: %d\n",
+			__func__, ci->domain, ci->clk_src_id, ci->nom_dyn_pwr,
+			ci->nom_leak_pwr);
+	}
+
+	return 0;
+
+err_load_clk_src_tbl:
+	return rc;
+}
+
+static int mmrm_read_clk_pltfrm_rsrc_frm_dt(
+	struct mmrm_clk_platform_resources *cres)
+{
+	int rc = 0;
+
+	rc = mmrm_load_mm_rail_corner_table(cres);
+	if (rc) {
+		d_mpr_e("%s: failed to load mm rail corner table\n",
+			__func__);
+		goto err_load_mmrm_rail_table;
+	}
+
+	if (cres->scheme == CLK_MGR_SCHEME_SW) {
+		rc = mmrm_load_nom_clk_src_table(cres);
+		if (rc) {
+			d_mpr_e("%s: failed to load nom clk src table\n",
+				__func__);
+			goto err_load_nom_clk_src_table;
+		}
+	} else if (cres->scheme == CLK_MGR_SCHEME_CXIPEAK) {
+		d_mpr_e("%s: cxipeak is not supported with mmrm\n",
+			__func__);
+		rc = -EINVAL;
+		goto err_load_mmrm_rail_table;
+	}
+
+	return rc;
+
+err_load_nom_clk_src_table:
+	mmrm_free_nom_clk_src_table(cres);
+
+err_load_mmrm_rail_table:
+	mmrm_free_rail_corner_table(cres);
+	return rc;
+}
+
+int mmrm_read_platform_resources(struct platform_device *pdev,
+	struct mmrm_driver_data *drv_data)
+{
+	int rc = 0;
+
+	if (pdev->dev.of_node) {
+
+		/* clk resources */
+		drv_data->clk_res.pdev = pdev;
+
+		rc = mmrm_read_clk_pltfrm_rsrc_frm_drv_data(drv_data);
+		if (rc) {
+			d_mpr_e(
+				"%s: failed to read clk platform res from driver\n",
+				__func__);
+			goto exit;
+		}
+		rc = mmrm_read_clk_pltfrm_rsrc_frm_dt(&drv_data->clk_res);
+		if (rc) {
+			d_mpr_e("%s: failed to read clk platform res from dt\n",
+				__func__);
+			goto exit;
+		}
+	} else {
+		d_mpr_e("%s: of node is null\n", __func__);
+		rc = -EINVAL;
+		goto exit;
+	}
+
+exit:
+	return rc;
+}
+
+int mmrm_free_platform_resources(struct mmrm_driver_data *drv_data)
+{
+	int rc = 0;
+
+	/* free clk resources */
+	mmrm_free_nom_clk_src_table(&drv_data->clk_res);
+	mmrm_free_rail_corner_table(&drv_data->clk_res);
+
+	return rc;
+}

+ 43 - 0
driver/src/mmrm_resources.h

@@ -0,0 +1,43 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2020, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _MMRM_RESOURCES_H_
+#define _MMRM_RESOURCES_H_
+
+#include <linux/platform_device.h>
+
+struct corner_info {
+	const char *name;
+	u32 volt_factor;
+	u32 scaling_factor_dyn;
+	u32 scaling_factor_leak;
+};
+
+struct voltage_corner_set {
+	struct corner_info *corner_tbl;
+	u32 count;
+};
+
+struct nom_clk_src_info {
+	u32 domain;
+	u32 clk_src_id;
+	u32 nom_dyn_pwr;
+	u32 nom_leak_pwr;
+};
+
+struct nom_clk_src_set {
+	struct nom_clk_src_info *clk_src_tbl;
+	u32 count;
+};
+
+struct mmrm_clk_platform_resources {
+	struct platform_device *pdev;
+	u32 threshold;
+	u32 scheme;
+	struct voltage_corner_set corner_set;
+	struct nom_clk_src_set nom_clk_set;
+};
+
+#endif

+ 317 - 0
driver/src/msm_mmrm.c

@@ -0,0 +1,317 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2020, The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/module.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/soc/qcom/msm_mmrm.h>
+
+#include "mmrm_internal.h"
+#include "mmrm_debug.h"
+#include "mmrm_clk_rsrc_mgr.h"
+
+#define	VERIFY_PDEV(pdev)	\
+{							\
+	if (!pdev) {			\
+		d_mpr_e("%s: null platform dev\n", __func__);\
+		rc = -EINVAL;		\
+		goto err_exit; \
+	}						\
+}
+
+#define RESET_DRV_DATA(drv_data)	\
+{									\
+	kfree(drv_data);				\
+	drv_data = (void *) -EPROBE_DEFER; \
+}
+
+
+struct mmrm_driver_data *drv_data = (void *) -EPROBE_DEFER;
+
+struct mmrm_client *mmrm_client_register(struct mmrm_client_desc *client_desc)
+{
+	struct mmrm_client *client = NULL;
+
+	d_mpr_h("%s: entering\n", __func__);
+
+	if (!client_desc) {
+		d_mpr_e("%s: null input descriptor\n", __func__);
+		goto err_exit;
+	}
+
+	if (client_desc->client_type == MMRM_CLIENT_CLOCK) {
+		client = mmrm_clk_client_register(
+					drv_data->clk_mgr, client_desc);
+		if (!client)
+			d_mpr_e("%s: failed to register client\n", __func__);
+			goto err_exit;
+	} else {
+		d_mpr_e("%s: unknown client_type %d\n",
+			__func__, client_desc->client_type);
+		goto err_exit;
+	}
+
+	d_mpr_h("%s: exiting\n", __func__);
+	return client;
+
+err_exit:
+	d_mpr_h("%s: error exit\n", __func__);
+	return client;
+}
+
+int mmrm_client_deregister(struct mmrm_client *client)
+{
+	int rc = 0;
+
+	if (!client) {
+		d_mpr_e("%s: invalid input client\n", __func__);
+		rc = -EINVAL;
+		goto err_exit;
+	}
+
+	if (client->client_type == MMRM_CLIENT_CLOCK) {
+		rc = mmrm_clk_client_deregister(drv_data->clk_mgr, client);
+		if (!rc)
+			d_mpr_e("%s: failed to deregister client\n", __func__);
+	} else {
+		d_mpr_e("%s: unknown client_type %d\n",
+			__func__, client->client_type);
+	}
+
+	return rc;
+err_exit:
+	d_mpr_h("%s: error exit\n", __func__);
+	return rc;
+}
+
+int mmrm_client_set_value(struct mmrm_client *client,
+	struct mmrm_client_data *client_data, unsigned long val)
+{
+	int rc = 0;
+
+	if (!client || !client_data) {
+		d_mpr_e("%s: invalid input client(%pK) client_data(%pK)\n",
+			__func__, client, client_data);
+		rc = -EINVAL;
+		goto err_exit;
+	}
+
+	if (client->client_type == MMRM_CLIENT_CLOCK) {
+		rc = mmrm_clk_client_setval(drv_data->clk_mgr, client,
+				client_data, val);
+		if (!rc)
+			d_mpr_e("%s: failed to deregister client\n", __func__);
+	} else {
+		d_mpr_e("%s: unknown client_type %d\n",
+			__func__, client->client_type);
+	}
+
+	return rc;
+
+err_exit:
+	return rc;
+}
+
+int mmrm_client_set_value_inrange(struct mmrm_client *client,
+	struct mmrm_client_data *client_data,
+	struct mmrm_client_res_value *val)
+{
+	int rc = 0;
+
+	if (!client || !client_data || !val) {
+		d_mpr_e(
+			"%s: invalid input client(%pK) client_data(%pK) val(%pK)\n",
+			__func__, client, client_data, val);
+		rc = -EINVAL;
+		goto err_exit;
+	}
+
+	if (client->client_type == MMRM_CLIENT_CLOCK) {
+		rc = mmrm_clk_client_setval_inrange(drv_data->clk_mgr,
+				client, client_data, val);
+		if (!rc)
+			d_mpr_e("%s: failed to deregister client\n", __func__);
+	} else {
+		d_mpr_e("%s: unknown client_type %d\n",
+			__func__, client->client_type);
+
+	}
+
+	return rc;
+
+err_exit:
+	return rc;
+}
+
+int mmrm_client_get_value(struct mmrm_client *client,
+	struct mmrm_client_res_value *val)
+{
+	int rc = 0;
+
+	if (!client || !val) {
+		d_mpr_e("%s: invalid input client(%pK) val(%pK)\n",
+			__func__, client, val);
+		rc = -EINVAL;
+		goto err_exit;
+	}
+
+	if (client->client_type == MMRM_CLIENT_CLOCK) {
+		rc = mmrm_clk_client_getval(drv_data->clk_mgr,
+				client, val);
+		if (!rc)
+			d_mpr_e("%s: failed to deregister client\n", __func__);
+	} else {
+		d_mpr_e("%s: unknown client_type %d\n",
+			__func__, client->client_type);
+	}
+
+	return rc;
+
+err_exit:
+	return rc;
+}
+
+static int msm_mmrm_probe_init(struct platform_device *pdev)
+{
+	int rc = 0;
+
+	d_mpr_h("%s: entering\n", __func__);
+	drv_data = kzalloc(sizeof(*drv_data), GFP_KERNEL);
+	if (!drv_data) {
+		d_mpr_e("%s: unable to allocate memory for mmrm driver\n",
+			__func__);
+		rc = -ENOMEM;
+		goto err_no_mem;
+	}
+
+	drv_data->platform_data = mmrm_get_platform_data(&pdev->dev);
+	if (!drv_data->platform_data) {
+		d_mpr_e("%s: unable to get platform data\n",
+			__func__);
+		rc = -EINVAL;
+		goto err_get_drv_data;
+	}
+
+	dev_set_drvdata(&pdev->dev, drv_data);
+
+	rc = mmrm_read_platform_resources(pdev, drv_data);
+	if (rc) {
+		d_mpr_e("%s: unable to read platform resources for mmrm\n",
+			__func__);
+		goto err_read_pltfrm_rsc;
+	}
+
+	rc = mmrm_init(drv_data);
+	if (rc) {
+		d_mpr_e("%s: failed to init mmrm\n",
+			__func__);
+		goto err_mmrm_init;
+	}
+
+	d_mpr_h("%s: exiting with success\n", __func__);
+
+	return rc;
+
+err_mmrm_init:
+err_read_pltfrm_rsc:
+	mmrm_free_platform_resources(drv_data);
+err_get_drv_data:
+	RESET_DRV_DATA(drv_data);
+err_no_mem:
+	return rc;
+}
+
+static int msm_mmrm_probe(struct platform_device *pdev)
+{
+	int rc = -EINVAL;
+
+	d_mpr_h("%s: entering\n", __func__);
+
+	VERIFY_PDEV(pdev)
+
+	if (of_device_is_compatible(pdev->dev.of_node, "qcom,msm-mmrm"))
+		return msm_mmrm_probe_init(pdev);
+
+	d_mpr_h("%s: exiting: no compatible device node\n", __func__);
+
+	return rc;
+
+err_exit:
+	return rc;
+}
+
+static int msm_mmrm_remove(struct platform_device *pdev)
+{
+	int rc = 0;
+
+	VERIFY_PDEV(pdev);
+
+	drv_data = dev_get_drvdata(&pdev->dev);
+	if (!drv_data) {
+		d_mpr_e("%s: null driver data\n", __func__);
+		return -EINVAL;
+	}
+
+	mmrm_deinit(drv_data);
+	mmrm_free_platform_resources(drv_data);
+	dev_set_drvdata(&pdev->dev, NULL);
+	RESET_DRV_DATA(drv_data);
+
+	return rc;
+
+err_exit:
+	return rc;
+}
+
+static const struct of_device_id msm_mmrm_dt_match[] = {
+	{.compatible = "qcom,msm-mmrm"},
+	{}
+};
+
+MODULE_DEVICE_TABLE(of, msm_mmrm_of_match);
+
+static struct platform_driver msm_mmrm_driver = {
+	.probe = msm_mmrm_probe,
+	.remove = msm_mmrm_remove,
+	.driver = {
+		.name = "msm-mmrm",
+		.of_match_table = msm_mmrm_dt_match,
+	},
+};
+
+static int __init msm_mmrm_init(void)
+{
+	int rc = 0;
+
+	d_mpr_h("%s: entering\n", __func__);
+
+	rc = platform_driver_register(&msm_mmrm_driver);
+	if (rc) {
+		d_mpr_e("%s: failed to register platform driver\n",
+			__func__);
+		goto err_platform_drv_reg;
+	}
+
+	d_mpr_h("%s: exiting\n", __func__);
+
+	return rc;
+
+err_platform_drv_reg:
+	return rc;
+}
+
+static void __exit msm_mmrm_exit(void)
+{
+	d_mpr_h("%s: entering\n", __func__);
+	platform_driver_unregister(&msm_mmrm_driver);
+	d_mpr_h("%s: exiting\n", __func__);
+}
+
+module_init(msm_mmrm_init);
+module_exit(msm_mmrm_exit);
+
+MODULE_DESCRIPTION("QTI MMRM Driver");
+MODULE_LICENSE("GPL v2");