Kaynağa Gözat

disp: msm: sde: add VM files for SDE

Add VM layer files to handle VM specific operations
for primary and secondary VM's. They interact with
Hypervisor RM modules to LEND/ACCEPT/RELEASE/RECLAIM
HW resources. They also handle notifications for each
of the above Hyp RM operations to acquire and update
the SDE software states.

Change-Id: I5982f4bf56550dc464797c62cb356be39925b21c
Signed-off-by: Jeykumar Sankaran <[email protected]>
Jeykumar Sankaran 5 yıl önce
ebeveyn
işleme
720f9d0014

+ 1 - 0
config/lahainadisp.conf

@@ -10,3 +10,4 @@ export CONFIG_DRM_MSM_REGISTER_LOGGING=y
 export CONFIG_QCOM_MDSS_PLL=y
 export CONFIG_DRM_SDE_RSC=y
 export CONFIG_DISPLAY_BUILD=y
+export CONFIG_DRM_SDE_VM=y

+ 1 - 1
config/lahainadispconf.h

@@ -15,4 +15,4 @@
 #define CONFIG_DRM_SDE_EVTLOG_DEBUG 1
 #define CONFIG_QCOM_MDSS_PLL 1
 #define CONFIG_DRM_SDE_RSC 1
-
+#define CONFIG_DRM_SDE_VM 1

+ 5 - 1
msm/Makefile

@@ -81,7 +81,11 @@ msm_drm-$(CONFIG_DRM_MSM_SDE) += sde/sde_crtc.o \
 	sde/sde_hw_qdss.o \
 	sde_dsc_helper.o \
 	sde_vdc_helper.o \
-	sde/sde_hw_rc.o
+	sde/sde_hw_rc.o \
+
+msm_drm-$(CONFIG_DRM_SDE_VM) += sde/sde_vm_common.o \
+	sde/sde_vm_primary.o \
+	sde/sde_vm_trusted.o
 
 msm_drm-$(CONFIG_DEBUG_FS) += sde_dbg.o \
 	sde_dbg_evtlog.o \

+ 4 - 0
msm/sde/sde_kms.h

@@ -40,6 +40,7 @@
 #include "sde_power_handle.h"
 #include "sde_irq.h"
 #include "sde_core_perf.h"
+#include "sde_vm.h"
 
 #define DRMID(x) ((x) ? (x)->base.id : -1)
 
@@ -310,6 +311,9 @@ struct sde_kms {
 	cpumask_t irq_cpu_mask;
 	struct dev_pm_qos_request pm_qos_irq_req[NR_CPUS];
 	struct irq_affinity_notify affinity_notify;
+
+	struct sde_vm_ops vm_ops;
+	struct sde_vm *vm;
 };
 
 struct vsync_info {

+ 140 - 0
msm/sde/sde_vm.h

@@ -0,0 +1,140 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2020, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef __SDE_VM_H__
+#define __SDE_VM_H__
+
+#include "msm_drv.h"
+
+struct sde_kms;
+
+/**
+ * sde_vm_irq_entry - VM irq specification
+ * @label - VM_IRQ_LABEL assigned by Hyp RM
+ * @irq - linux mapped irq number
+ */
+struct sde_vm_irq_entry {
+	u32 label;
+	u32 irq;
+};
+
+/**
+ * sde_vm_irq_desc - list of IRQ's to be handled
+ * @n_irq - irq count
+ * @irq_entries - list of sde_vm_irq_entry
+ */
+struct sde_vm_irq_desc {
+	u32 n_irq;
+	struct sde_vm_irq_entry *irq_entries;
+};
+
+/**
+ * sde_vm - VM layer descriptor. Abstract for all the VM's
+ * @vm_res_lock - mutex to protect resource updates
+ * @mem_notificaiton_cookie - Hyp RM notification identifier
+ * @n_irq_lent - irq count
+ * @io_mem_handle - RM identifier for the IO range
+ * @sde_kms - handle to sde_kms
+ */
+struct sde_vm {
+	struct mutex vm_res_lock;
+	void *mem_notification_cookie;
+	atomic_t n_irq_lent;
+	int io_mem_handle;
+	struct sde_kms *sde_kms;
+};
+
+/**
+ * sde_vm_primary - VM layer descriptor for Primary VM
+ * @base - parent struct object
+ * @irq_desc - cache copy of irq list for validating reclaim
+ */
+struct sde_vm_primary {
+	struct sde_vm base;
+	struct sde_vm_irq_desc *irq_desc;
+};
+
+/**
+ * sde_vm_trusted - VM layer descriptor for Trusted VM
+ * @base - parent struct object
+ * @sgl_desc - hyp RM sgl list descriptor for IO ranges
+ * @irq_desc - irq list
+ */
+struct sde_vm_trusted {
+	struct sde_vm base;
+	struct sde_vm_irq_desc *irq_desc;
+	struct hh_sgl_desc *sgl_desc;
+};
+
+/**
+ * sde_vm_ops - VM specific function hooks
+ */
+struct sde_vm_ops {
+	/**
+	 * vm_acquire - hook to handle HW accept
+	 * @kms - handle to sde_kms
+	 * @return - return 0 on success
+	 */
+	int (*vm_acquire)(struct sde_kms *kms);
+
+	/**
+	 * vm_release - hook to handle HW release
+	 * @kms - handle to sde_kms
+	 * @return - return 0 on success
+	 */
+	int (*vm_release)(struct sde_kms *kms);
+
+	/**
+	 * vm_owns_hw - hook to query the HW status of the VM
+	 * @kms - handle to sde_kms
+	 * @return - return true when vm owns the hw
+	 */
+	bool (*vm_owns_hw)(struct sde_kms *kms);
+
+	/**
+	 * vm_prepare_commit - hook to handle operations before the first
+			       commit after acquiring the HW
+	 * @sde_kms - handle to sde_kms
+	 * @state - global atomic state to be parsed
+	 * @return - return 0 on success
+	 */
+	int (*vm_prepare_commit)(struct sde_kms *sde_kms,
+			struct drm_atomic_state *state);
+
+	/**
+	 * vm_post_commit - hook to handle operations after
+			    last commit before release
+	 * @sde_kms - handle to sde_kms
+	 * @state - global atomic state to be parsed
+	 * @return - return 0 on success
+	 */
+	int (*vm_post_commit)(struct sde_kms *sde_kms,
+			struct drm_atomic_state *state);
+
+	/**
+	 * vm_deinit - deinitialize VM layer
+	 * @kms - pointer to sde_kms
+	 * @ops - primary VM specific ops functions
+	 */
+	void (*vm_deinit)(struct sde_kms *kms, struct sde_vm_ops *ops);
+};
+
+/**
+ * sde_vm_primary_init - Initialize primary VM layer
+ * @kms - pointer to sde_kms
+ * @ops - primary VM specific ops functions
+ * @return - 0 on success
+ */
+int sde_vm_primary_init(struct sde_kms *kms, struct sde_vm_ops *ops);
+
+/**
+ * sde_vm_trusted_init - Initialize Trusted VM layer
+ * @kms - pointer to sde_kms
+ * @ops - primary VM specific ops functions
+ * @return - 0 on success
+ */
+int sde_vm_trusted_init(struct sde_kms *kms, struct sde_vm_ops *ops);
+
+#endif /* __SDE_VM_H__ */

+ 213 - 0
msm/sde/sde_vm_common.c

@@ -0,0 +1,213 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2020, The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/list_sort.h>
+#include "linux/sde_rsc.h"
+#include "dsi/dsi_display.h"
+#include "dp/dp_display.h"
+#include "sde_kms.h"
+#include "sde_vm_common.h"
+
+struct hh_notify_vmid_desc *sde_vm_populate_vmid(hh_vmid_t vmid)
+{
+	struct hh_notify_vmid_desc *vmid_desc;
+
+	vmid_desc = kzalloc(offsetof(struct hh_notify_vmid_desc,
+					vmid_entries[1]), GFP_KERNEL);
+	if (!vmid_desc)
+		return ERR_PTR(ENOMEM);
+
+	vmid_desc->n_vmid_entries = 1;
+	vmid_desc->vmid_entries[0].vmid = vmid;
+
+	return vmid_desc;
+}
+
+struct hh_acl_desc *sde_vm_populate_acl(enum hh_vm_names vm_name)
+{
+	struct hh_acl_desc *acl_desc;
+	hh_vmid_t vmid;
+
+	hh_rm_get_vmid(vm_name, &vmid);
+
+	acl_desc = kzalloc(offsetof(struct hh_acl_desc, acl_entries[1]),
+			   GFP_KERNEL);
+	if (!acl_desc)
+		return ERR_PTR(ENOMEM);
+
+	acl_desc->n_acl_entries = 1;
+	acl_desc->acl_entries[0].vmid = vmid;
+	acl_desc->acl_entries[0].perms = HH_RM_ACL_R | HH_RM_ACL_W;
+
+	return acl_desc;
+}
+
+int __mem_sort_cmp(void *priv, struct list_head *a, struct list_head *b)
+{
+	struct msm_io_mem_entry *left =
+		container_of(a, struct msm_io_mem_entry, list);
+	struct msm_io_mem_entry *right =
+		container_of(b, struct msm_io_mem_entry, list);
+
+	return (left->base - right->base);
+}
+
+bool __merge_on_overlap(struct msm_io_mem_entry *res,
+		const struct msm_io_mem_entry *left,
+		const struct msm_io_mem_entry *right)
+{
+	phys_addr_t l_s = left->base;
+	phys_addr_t l_e = left->base + left->size;
+	phys_addr_t r_s = right->base;
+	phys_addr_t r_e = right->base + right->size;
+
+	memset(res, 0, sizeof(*res));
+
+	if (r_s <= l_e) {
+		res->base = min(l_s, r_s);
+		res->size = max(l_e, r_e) - res->base;
+
+		return true;
+	}
+
+	return false;
+}
+
+void _sde_vm_sort_and_align(struct list_head *mem)
+{
+	struct msm_io_mem_entry *entry, *tmp, *prev = NULL;
+	struct msm_io_mem_entry merged_entry;
+
+	list_for_each_entry(entry, mem, list) {
+		entry->base = ALIGN_DOWN(entry->base, PAGE_SIZE);
+		entry->size = ALIGN(entry->size, PAGE_SIZE);
+	}
+
+	list_sort(NULL, mem, __mem_sort_cmp);
+
+	list_for_each_entry_safe(entry, tmp, mem, list) {
+		if (prev && __merge_on_overlap(&merged_entry, prev, entry)) {
+			prev->base = merged_entry.base;
+			prev->size = merged_entry.size;
+
+			list_del(&entry->list);
+			entry = prev;
+		}
+		prev = entry;
+	}
+
+	list_for_each_entry(entry, mem, list)
+		SDE_DEBUG("base: 0x%x - size: 0x%x\n",
+				entry->base, entry->size);
+}
+
+struct hh_sgl_desc *sde_vm_populate_sgl(struct msm_io_res *io_res)
+{
+	struct hh_sgl_desc *sgl_desc;
+	struct msm_io_mem_entry *mem;
+	u32 i = 0, num_mem_entry = 0;
+
+	_sde_vm_sort_and_align(&io_res->mem);
+
+	list_for_each_entry(mem, &io_res->mem, list)
+		num_mem_entry++;
+
+	sgl_desc = kzalloc(offsetof(struct hh_sgl_desc,
+			   sgl_entries[num_mem_entry]), GFP_KERNEL);
+	if (!sgl_desc)
+		return ERR_PTR(ENOMEM);
+
+	sgl_desc->n_sgl_entries = num_mem_entry;
+	list_for_each_entry(mem, &io_res->mem, list) {
+		sgl_desc->sgl_entries[i].ipa_base = mem->base;
+		sgl_desc->sgl_entries[i].size = mem->size;
+		i++;
+	}
+
+	msm_dss_clean_io_mem(&io_res->mem);
+
+	return sgl_desc;
+}
+
+struct sde_vm_irq_desc *sde_vm_populate_irq(struct msm_io_res *io_res)
+{
+	struct msm_io_irq_entry *irq;
+	u32 i = 0, num_irq = 0;
+	struct sde_vm_irq_desc *irq_desc;
+
+	list_for_each_entry(irq, &io_res->irq, list)
+		num_irq++;
+
+	irq_desc = kzalloc(sizeof(*irq_desc), GFP_KERNEL);
+	if (!irq_desc)
+		return ERR_PTR(ENOMEM);
+
+	irq_desc->irq_entries = kcalloc(num_irq,
+					sizeof(struct sde_vm_irq_entry),
+					GFP_KERNEL);
+	if (!irq_desc->irq_entries) {
+		sde_vm_free_irq(irq_desc);
+		return ERR_PTR(ENOMEM);
+	}
+
+	list_for_each_entry(irq, &io_res->irq, list) {
+		struct sde_vm_irq_entry *entry = &irq_desc->irq_entries[i];
+
+		entry->irq = irq->irq_num;
+		entry->label = irq->label;
+		i++;
+	}
+
+	irq_desc->n_irq = num_irq;
+
+	msm_dss_clean_io_irq(&io_res->irq);
+
+	return irq_desc;
+}
+
+void sde_vm_free_irq(struct sde_vm_irq_desc *irq_desc)
+{
+	if (irq_desc && irq_desc->irq_entries)
+		kfree(irq_desc->irq_entries);
+
+	kfree(irq_desc);
+}
+
+int sde_vm_get_resources(struct sde_kms *sde_kms, struct msm_io_res *io_res)
+{
+	struct msm_drm_private *priv = sde_kms->dev->dev_private;
+	struct msm_vm_client_entry *entry;
+	int rc = 0;
+
+	rc = sde_kms_get_io_resources(sde_kms, io_res);
+	if (rc)
+		goto fail_get_res;
+
+	list_for_each_entry(entry, &priv->vm_client_list, list) {
+		if (!entry->ops.vm_get_io_resources)
+			continue;
+
+		rc = entry->ops.vm_get_io_resources(io_res, entry->data);
+		if (rc) {
+			SDE_ERROR("get_io_resources failed for device: %d\n",
+					 entry->dev->id);
+			goto fail_get_res;
+		}
+	}
+
+	return rc;
+
+fail_get_res:
+	msm_dss_clean_io_mem(&io_res->mem);
+	msm_dss_clean_io_irq(&io_res->irq);
+
+	return rc;
+}
+
+void sde_vm_free_resources(struct msm_io_res *io_res)
+{
+	msm_dss_clean_io_mem(&io_res->mem);
+	msm_dss_clean_io_irq(&io_res->irq);
+}

+ 66 - 0
msm/sde/sde_vm_common.h

@@ -0,0 +1,66 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2020, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef __SDE_VM_COMMON_H__
+#define __SDE_VM_COMMON_H__
+
+#include <linux/haven/hh_rm_drv.h>
+#include "sde_vm.h"
+
+#define SDE_VM_MEM_LABEL 0x11
+
+/**
+ * sde_vm_populate_vmid - create and populate the rm vmid desc structure with
+ *			  the given vmid
+ * @vmid: vmid of the destination vm
+ * @return: populated hh_notify_vmid_desc structure
+ */
+struct hh_notify_vmid_desc *sde_vm_populate_vmid(hh_vmid_t vmid);
+
+/**
+ * sde_vm_populate_acl - create and populate the access control list structure
+ *			 for the given vm name
+ * @vm_name: vm name enum published by the RM driver
+ * @return: populated hh_acl_desc structure
+ */
+struct hh_acl_desc *sde_vm_populate_acl(enum hh_vm_names vm_name);
+
+/**
+ * sde_vm_populate_sgl - create and populate the scatter/gather list structure
+ *			 with the given io memory list
+ * @io_res: io resource list containing the io memory
+ * @return: populated hh_sgl_desc structure
+ */
+struct hh_sgl_desc *sde_vm_populate_sgl(struct msm_io_res *io_res);
+
+/**
+ * sde_vm_populate_irq - create and populate the hw irq descriptor structure
+ *			 with the given hw irq lines
+ * @io_res: io resource list containing the irq numbers
+ * @return: populated sde_vm_irq_desc structure
+ */
+struct sde_vm_irq_desc *sde_vm_populate_irq(struct msm_io_res *io_res);
+
+/**
+ * sde_vm_free_irq - free up the irq description structure
+ * @irq_desc: handle to irq descriptor
+ */
+void sde_vm_free_irq(struct sde_vm_irq_desc *irq_desc);
+
+/**
+ * sde_vm_get_resources - collect io resource from all the VM clients
+ * @sde_kms: handle to sde_kms
+ * @io_res: pointer to msm_io_res structure to populate the resources
+ * @return: 0 on success.
+ */
+int sde_vm_get_resources(struct sde_kms *sde_kms, struct msm_io_res *io_res);
+
+/**
+ * sde_vm_free_resources - free up the io resource list
+ * @io_res: pointer to msm_io_res structure
+ */
+void sde_vm_free_resources(struct msm_io_res *io_res);
+
+#endif /* __SDE_VM_COMMON_H__ */

+ 294 - 0
msm/sde/sde_vm_primary.c

@@ -0,0 +1,294 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2020, The Linux Foundation. All rights reserved.
+ */
+#include <linux/notifier.h>
+#include <linux/haven/hh_rm_drv.h>
+#include <linux/haven/hh_irq_lend.h>
+#include <linux/haven/hh_mem_notifier.h>
+#include "sde_kms.h"
+#include "sde_vm.h"
+#include "sde_vm_common.h"
+
+#define to_vm_primary(vm) ((struct sde_vm_primary *)vm)
+
+static bool sde_vm_owns_hw(struct sde_kms *sde_kms)
+{
+	struct sde_vm_primary *sde_vm;
+	bool owns_irq, owns_mem_io;
+
+	sde_vm = to_vm_primary(sde_kms->vm);
+
+	owns_irq = !atomic_read(&sde_vm->base.n_irq_lent);
+	owns_mem_io = (sde_vm->base.io_mem_handle < 0);
+
+	return (owns_irq & owns_mem_io);
+}
+
+void sde_vm_irq_release_notification_handler(void *req, enum hh_irq_label label)
+{
+	struct sde_vm_primary *sde_vm;
+	int rc = 0;
+
+	if (!req) {
+		SDE_ERROR("invalid data on release notificaiton\n");
+		return;
+	}
+
+	sde_vm = to_vm_primary(req);
+
+	mutex_lock(&sde_vm->base.vm_res_lock);
+
+	rc = hh_irq_reclaim(label);
+	if (rc) {
+		SDE_ERROR("failed to reclaim irq label: %d\n", label);
+		goto notify_end;
+	}
+
+	/**
+	 * Skipping per IRQ label verification since IRQ's are MDSS centric.
+	 * Need to enable addition verifications when per-display IRQ's are
+	 *  supported.
+	 */
+	atomic_dec(&sde_vm->base.n_irq_lent);
+
+	SDE_INFO("irq reclaim succeeded for label: %d\n", label);
+notify_end:
+	mutex_unlock(&sde_vm->base.vm_res_lock);
+
+}
+
+static void sde_vm_mem_release_notification_handler(
+		enum hh_mem_notifier_tag tag, unsigned long notif_type,
+		void *entry_data, void *notif_msg)
+{
+	struct hh_rm_notif_mem_released_payload *payload;
+	struct sde_vm_primary *sde_vm;
+	struct sde_kms *sde_kms;
+	int rc = 0;
+
+	if (notif_type != HH_RM_NOTIF_MEM_RELEASED ||
+			tag != HH_MEM_NOTIFIER_TAG_DISPLAY)
+		return;
+
+	if (!entry_data || !notif_msg)
+		return;
+
+	payload = (struct hh_rm_notif_mem_released_payload *)notif_msg;
+	sde_vm = (struct sde_vm_primary *)entry_data;
+	sde_kms = sde_vm->base.sde_kms;
+
+	mutex_lock(&sde_vm->base.vm_res_lock);
+
+	if (payload->mem_handle != sde_vm->base.io_mem_handle)
+		goto notify_end;
+
+	rc = hh_rm_mem_reclaim(payload->mem_handle, 0);
+	if (rc) {
+		SDE_ERROR("failed to reclaim IO memory, rc=%d\n", rc);
+		goto notify_end;
+	}
+
+	sde_vm->base.io_mem_handle = -1;
+
+	SDE_INFO("mem reclaim succeeded for tag: %d\n", tag);
+notify_end:
+	mutex_unlock(&sde_vm->base.vm_res_lock);
+}
+
+static int _sde_vm_lend_notify_registers(struct sde_vm *vm,
+					 struct msm_io_res *io_res)
+{
+	struct sde_vm_primary *sde_vm;
+	struct hh_acl_desc *acl_desc;
+	struct hh_sgl_desc *sgl_desc;
+	struct hh_notify_vmid_desc *vmid_desc;
+	hh_memparcel_handle_t mem_handle;
+	hh_vmid_t trusted_vmid;
+	int rc = 0;
+
+	sde_vm = to_vm_primary(vm);
+
+	acl_desc = sde_vm_populate_acl(HH_TRUSTED_VM);
+	if (IS_ERR(acl_desc)) {
+		SDE_ERROR("failed to populate acl descriptor, rc = %d\n",
+			   PTR_ERR(acl_desc));
+		return rc;
+	}
+
+	sgl_desc = sde_vm_populate_sgl(io_res);
+	if (IS_ERR_OR_NULL(sgl_desc)) {
+		SDE_ERROR("failed to populate sgl descriptor, rc = %d\n",
+			   PTR_ERR(sgl_desc));
+		goto sgl_fail;
+	}
+
+	rc = hh_rm_mem_lend(HH_RM_MEM_TYPE_IO, 0, SDE_VM_MEM_LABEL,
+				 acl_desc, sgl_desc, NULL, &mem_handle);
+	if (rc) {
+		SDE_ERROR("hyp lend failed with error, rc: %d\n", rc);
+		goto fail;
+	}
+
+	hh_rm_get_vmid(HH_TRUSTED_VM, &trusted_vmid);
+
+	vmid_desc = sde_vm_populate_vmid(trusted_vmid);
+
+	rc = hh_rm_mem_notify(mem_handle, HH_RM_MEM_NOTIFY_RECIPIENT,
+				  HH_MEM_NOTIFIER_TAG_DISPLAY, vmid_desc);
+	if (rc) {
+		SDE_ERROR("hyp mem notify failed, rc = %d\n", rc);
+		goto notify_fail;
+	}
+
+	sde_vm->base.io_mem_handle = mem_handle;
+
+	SDE_INFO("IO memory lend suceeded for tag: %d\n",
+			HH_MEM_NOTIFIER_TAG_DISPLAY);
+
+notify_fail:
+	kfree(vmid_desc);
+fail:
+	kfree(sgl_desc);
+sgl_fail:
+	kfree(acl_desc);
+
+	return rc;
+}
+
+static int _sde_vm_lend_irq(struct sde_vm *vm, struct msm_io_res *io_res)
+{
+	struct sde_vm_primary *sde_vm;
+	struct sde_vm_irq_desc *irq_desc;
+	int i, rc = 0;
+
+	sde_vm = to_vm_primary(vm);
+
+	irq_desc = sde_vm_populate_irq(io_res);
+
+	for (i  = 0; i < irq_desc->n_irq; i++) {
+		struct sde_vm_irq_entry *entry = &irq_desc->irq_entries[i];
+
+		rc = hh_irq_lend(entry->label, HH_TRUSTED_VM, entry->irq,
+				 sde_vm_irq_release_notification_handler,
+				 sde_vm);
+		if (rc) {
+			SDE_ERROR("irq lend failed for irq label: %d, rc=%d\n",
+				  entry->label, rc);
+			hh_irq_reclaim(entry->label);
+			return rc;
+		}
+
+		SDE_INFO("vm lend suceeded for IRQ label: %d\n", entry->label);
+	}
+
+	// cache the irq list for validation during release
+	sde_vm->irq_desc = irq_desc;
+	atomic_set(&sde_vm->base.n_irq_lent, sde_vm->irq_desc->n_irq);
+
+	return rc;
+}
+
+static int _sde_vm_release(struct sde_kms *kms)
+{
+	struct msm_io_res io_res;
+	struct sde_vm_primary *sde_vm;
+	int rc = 0;
+
+	if (!kms->vm)
+		return 0;
+
+	sde_vm = to_vm_primary(kms->vm);
+
+	INIT_LIST_HEAD(&io_res.mem);
+	INIT_LIST_HEAD(&io_res.irq);
+
+	rc = sde_vm_get_resources(kms, &io_res);
+	if (rc) {
+		SDE_ERROR("fail to get resources\n");
+		goto assign_fail;
+	}
+
+	mutex_lock(&sde_vm->base.vm_res_lock);
+
+	rc = _sde_vm_lend_notify_registers(kms->vm, &io_res);
+	if (rc) {
+		SDE_ERROR("fail to lend notify resources\n");
+		goto assign_fail;
+	}
+
+	rc = _sde_vm_lend_irq(kms->vm, &io_res);
+	if (rc) {
+		SDE_ERROR("failed to lend irq's\n");
+		goto assign_fail;
+	}
+assign_fail:
+	sde_vm_free_resources(&io_res);
+	mutex_unlock(&sde_vm->base.vm_res_lock);
+
+	return rc;
+}
+
+static void _sde_vm_deinit(struct sde_kms *sde_kms, struct sde_vm_ops *ops)
+{
+	struct sde_vm_primary *sde_vm;
+
+	if (!sde_kms->vm)
+		return;
+
+	memset(ops, 0, sizeof(*ops));
+
+	sde_vm = to_vm_primary(sde_kms->vm);
+
+	if (sde_vm->base.mem_notification_cookie)
+		hh_mem_notifier_unregister(
+				sde_vm->base.mem_notification_cookie);
+
+	if (sde_vm->irq_desc)
+		sde_vm_free_irq(sde_vm->irq_desc);
+
+	kfree(sde_vm);
+}
+
+static void _sde_vm_set_ops(struct sde_vm_ops *ops)
+{
+	memset(ops, 0, sizeof(*ops));
+
+	ops->vm_release = _sde_vm_release;
+	ops->vm_owns_hw = sde_vm_owns_hw;
+	ops->vm_deinit = _sde_vm_deinit;
+}
+
+int sde_vm_primary_init(struct sde_kms *kms, struct sde_vm_ops *ops)
+{
+	struct sde_vm_primary *sde_vm;
+	void *cookie;
+	int rc = 0;
+
+	sde_vm = kzalloc(sizeof(*sde_vm), GFP_KERNEL);
+	if (!sde_vm)
+		return -ENOMEM;
+
+	_sde_vm_set_ops(ops);
+
+	cookie = hh_mem_notifier_register(HH_MEM_NOTIFIER_TAG_DISPLAY,
+			       sde_vm_mem_release_notification_handler, sde_vm);
+	if (!cookie) {
+		SDE_ERROR("fails to register RM mem release notifier\n");
+		rc = -EINVAL;
+		goto init_fail;
+	}
+
+	sde_vm->base.mem_notification_cookie = cookie;
+	sde_vm->base.sde_kms = kms;
+	sde_vm->base.io_mem_handle = -1; // 0 is a valid handle
+	kms->vm = &sde_vm->base;
+
+	mutex_init(&sde_vm->base.vm_res_lock);
+
+	return 0;
+init_fail:
+	_sde_vm_deinit(kms, ops);
+
+	return rc;
+}

+ 394 - 0
msm/sde/sde_vm_trusted.c

@@ -0,0 +1,394 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2020, The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/haven/hh_rm_drv.h>
+#include <linux/haven/hh_irq_lend.h>
+#include <linux/haven/hh_mem_notifier.h>
+#include <linux/sort.h>
+#include <linux/bsearch.h>
+#include <linux/irq.h>
+#include <linux/irqdomain.h>
+#include "sde_kms.h"
+#include "sde_vm_common.h"
+#include "sde_vm.h"
+
+#define to_vm_trusted(vm) ((struct sde_vm_trusted *)vm)
+
+static int __sgl_cmp(const void *a, const void *b)
+{
+	struct hh_sgl_entry *l = (struct hh_sgl_entry *)a;
+	struct hh_sgl_entry *r = (struct hh_sgl_entry *)b;
+
+	return  (l->ipa_base - r->ipa_base);
+}
+
+int _sde_vm_validate_sgl(struct hh_sgl_desc *expected,
+			 struct hh_sgl_desc *assigned)
+{
+	u32 idx;
+
+	/*
+	 * fragmented address spaces are not supported.
+	 * So the number of sgl entries is expected to be the same.
+	 */
+	if (expected->n_sgl_entries != assigned->n_sgl_entries)
+		return -E2BIG;
+
+	sort(assigned->sgl_entries, assigned->n_sgl_entries,
+			sizeof(assigned->sgl_entries[0]), __sgl_cmp, NULL);
+
+	for (idx = 0; idx < expected->n_sgl_entries; idx++) {
+		struct hh_sgl_entry *e = &expected->sgl_entries[idx];
+		struct hh_sgl_entry *a = &assigned->sgl_entries[idx];
+
+		if ((e->ipa_base != a->ipa_base) || (e->size != a->size)) {
+			SDE_DEBUG("sgl mismatch: (%ld - %d) vs (%ld - %d)\n",
+				   e->ipa_base, e->size, a->ipa_base, a->size);
+			return -EINVAL;
+		}
+	}
+
+	return 0;
+}
+
+static int __irq_cmp(const void *a, const void *b)
+{
+	struct sde_vm_irq_entry *l = (struct sde_vm_irq_entry *)a;
+	struct sde_vm_irq_entry *r = (struct sde_vm_irq_entry *)b;
+
+	return  (l->label - r->label);
+}
+
+void sde_vm_irq_lend_notification_handler(void *req, enum hh_irq_label label)
+{
+	struct sde_vm_trusted *sde_vm;
+	struct sde_kms *sde_kms;
+	struct sde_vm_irq_desc *irq_desc;
+	struct sde_vm_irq_entry irq_temp, *found = NULL;
+	struct irq_data *exp_irq_data, *acc_irq_data;
+	int accepted_irq, expected_irq;
+
+	if (!req) {
+		SDE_ERROR("invalid data on lend notification\n");
+		return;
+	}
+
+	sde_vm = to_vm_trusted(req);
+	sde_kms = sde_vm->base.sde_kms;
+	irq_desc = sde_vm->irq_desc;
+
+	mutex_lock(&sde_vm->base.vm_res_lock);
+
+	memset(&irq_temp, 0, sizeof(irq_temp));
+
+	irq_temp.label = label;
+	found = bsearch((void *)&irq_temp, (void *)irq_desc->irq_entries,
+			irq_desc->n_irq, sizeof(struct sde_vm_irq_entry),
+			__irq_cmp);
+	if (!found) {
+		SDE_ERROR("irq mismatch for label: %d irq: %d\n",
+			   irq_temp.label, irq_temp.irq);
+		goto end;
+	}
+
+	expected_irq = found->irq;
+	accepted_irq = hh_irq_accept(label, -1, IRQ_TYPE_LEVEL_HIGH);
+	if (accepted_irq < 0) {
+		SDE_ERROR("failed to accept irq for label: %d\n");
+		goto end;
+	}
+
+	exp_irq_data = irq_get_irq_data(expected_irq);
+	if (!exp_irq_data) {
+		SDE_ERROR("failed to get irq data for irq: %d\n", exp_irq_data);
+		goto end;
+	}
+
+	acc_irq_data = irq_get_irq_data(accepted_irq);
+	if (!acc_irq_data) {
+		SDE_ERROR("failed to get irq data for irq: %d\n", accepted_irq);
+		goto end;
+	}
+
+	if (exp_irq_data->hwirq != acc_irq_data->hwirq) {
+		SDE_ERROR("IRQ mismatch on ACCEPT for label %d\n", label);
+		goto end;
+	}
+
+	SDE_INFO("IRQ accept succeeded for label %d irq: %d\n", label,
+			exp_irq_data->hwirq);
+
+	atomic_inc(&sde_vm->base.n_irq_lent);
+end:
+	mutex_unlock(&sde_vm->base.vm_res_lock);
+}
+
+static void sde_vm_mem_lend_notification_handler(enum hh_mem_notifier_tag tag,
+					       unsigned long notif_type,
+					void *entry_data, void *notif_msg)
+{
+	struct hh_rm_notif_mem_shared_payload *payload;
+	struct hh_sgl_desc *sgl_desc;
+	struct hh_acl_desc *acl_desc;
+	struct sde_kms *sde_kms;
+	struct sde_vm_trusted *sde_vm;
+	int rc = 0;
+
+	if (notif_type != HH_RM_NOTIF_MEM_SHARED ||
+			tag != HH_MEM_NOTIFIER_TAG_DISPLAY)
+		return;
+
+	if (!entry_data || !notif_msg)
+		return;
+
+	payload = (struct hh_rm_notif_mem_shared_payload *)notif_msg;
+
+	if (payload->trans_type != HH_RM_TRANS_TYPE_LEND ||
+	    payload->label != SDE_VM_MEM_LABEL)
+		return;
+
+	sde_vm = (struct sde_vm_trusted *)entry_data;
+	sde_kms = sde_vm->base.sde_kms;
+
+	mutex_lock(&sde_vm->base.vm_res_lock);
+
+	acl_desc = sde_vm_populate_acl(HH_TRUSTED_VM);
+	if (IS_ERR(acl_desc)) {
+		SDE_ERROR("failed to populate acl data, rc=%d\n",
+			   PTR_ERR(acl_desc));
+		goto acl_fail;
+	}
+
+	sgl_desc = hh_rm_mem_accept(payload->mem_handle, HH_RM_MEM_TYPE_IO,
+				    HH_RM_TRANS_TYPE_LEND,
+				    HH_RM_MEM_ACCEPT_VALIDATE_ACL_ATTRS|
+				    HH_RM_MEM_ACCEPT_VALIDATE_LABEL|
+				    HH_RM_MEM_ACCEPT_DONE,
+				    payload->label,
+				    acl_desc, NULL, NULL, 0);
+	if (IS_ERR_OR_NULL(sgl_desc)) {
+		SDE_ERROR("hh_rm_mem_accept failed with error, rc=%d\n",
+			   PTR_ERR(sgl_desc));
+		goto accept_fail;
+	}
+
+	rc = _sde_vm_validate_sgl(sde_vm->sgl_desc, sgl_desc);
+	if (rc) {
+		SDE_ERROR("failed in sgl validation for label: %d, rc = %d\n",
+				payload->label, rc);
+		goto accept_fail;
+	}
+
+	sde_vm->base.io_mem_handle = payload->mem_handle;
+
+	SDE_INFO("mem accept succeeded for tag: %d label: %d\n", tag,
+				payload->label);
+
+accept_fail:
+	kfree(acl_desc);
+acl_fail:
+	mutex_unlock(&sde_vm->base.vm_res_lock);
+}
+
+static int _sde_vm_release_irq(struct sde_vm *vm)
+{
+	struct sde_vm_trusted *sde_vm = (struct sde_vm_trusted *)vm;
+	struct sde_vm_irq_desc *irq_desc = sde_vm->irq_desc;
+	int i, rc = 0;
+
+	for (i = 0; i < irq_desc->n_irq; i++) {
+		struct sde_vm_irq_entry *entry = &irq_desc->irq_entries[i];
+
+		rc = hh_irq_release(entry->label);
+		if (rc) {
+			SDE_ERROR("failed to release IRQ label: %d rc = %d\n",
+				  entry->label, rc);
+			return rc;
+		}
+
+		atomic_dec(&sde_vm->base.n_irq_lent);
+	}
+
+	SDE_INFO("sde vm irq release succeeded, rc = %d\n", rc);
+
+	return rc;
+}
+
+static int _sde_vm_release(struct sde_kms *kms)
+{
+	struct sde_vm_trusted *sde_vm;
+	int rc = 0;
+
+	if (!kms->vm)
+		return 0;
+
+	sde_vm = to_vm_trusted(kms->vm);
+
+	mutex_lock(&sde_vm->base.vm_res_lock);
+
+	rc = hh_rm_mem_release(sde_vm->base.io_mem_handle, 0);
+	if (rc) {
+		SDE_ERROR("hh_rm_mem_release failed, rc=%d\n", rc);
+		goto end;
+	}
+
+	rc = hh_rm_mem_notify(sde_vm->base.io_mem_handle,
+			HH_RM_MEM_NOTIFY_OWNER,	HH_MEM_NOTIFIER_TAG_DISPLAY, 0);
+	if (rc) {
+		SDE_ERROR("hyp mem notify on release failed, rc = %d\n", rc);
+		goto end;
+	}
+
+	sde_vm->base.io_mem_handle = -1;
+
+	SDE_INFO("sde vm mem release succeeded, rc = %d\n", rc);
+
+	rc = _sde_vm_release_irq(kms->vm);
+	if (rc) {
+		SDE_ERROR("irq_release failed, rc = %d\n", rc);
+		goto end;
+	}
+
+end:
+	mutex_unlock(&sde_vm->base.vm_res_lock);
+
+	return rc;
+}
+
+int _sde_vm_populate_res(struct sde_kms *sde_kms, struct sde_vm_trusted *vm)
+{
+	struct msm_io_res io_res;
+	int rc = 0;
+
+	INIT_LIST_HEAD(&io_res.mem);
+	INIT_LIST_HEAD(&io_res.irq);
+
+	rc = sde_vm_get_resources(sde_kms, &io_res);
+	if (rc) {
+		SDE_ERROR("fail to get resources\n");
+		return rc;
+	}
+
+	vm->sgl_desc = sde_vm_populate_sgl(&io_res);
+	if (IS_ERR_OR_NULL(vm->sgl_desc)) {
+		SDE_ERROR("failed to parse sgl list\n");
+		return PTR_ERR(vm->sgl_desc);
+	}
+
+	vm->irq_desc = sde_vm_populate_irq(&io_res);
+	if (IS_ERR_OR_NULL(vm->irq_desc)) {
+		SDE_ERROR("failed to parse irq list\n");
+		return PTR_ERR(vm->irq_desc);
+	}
+
+	sort(vm->irq_desc->irq_entries, vm->irq_desc->n_irq,
+		sizeof(vm->irq_desc->irq_entries[0]), __irq_cmp, NULL);
+	sort(vm->sgl_desc->sgl_entries, vm->sgl_desc->n_sgl_entries,
+		sizeof(vm->sgl_desc->sgl_entries[0]), __sgl_cmp, NULL);
+
+	return rc;
+}
+
+static bool sde_vm_owns_hw(struct sde_kms *sde_kms)
+{
+	struct sde_vm_trusted *sde_vm;
+	bool owns_irq, owns_mem_io;
+
+	sde_vm = to_vm_trusted(sde_kms->vm);
+
+	owns_irq = (sde_vm->irq_desc->n_irq ==
+			atomic_read(&sde_vm->base.n_irq_lent));
+	owns_mem_io = (sde_vm->base.io_mem_handle >= 0);
+
+	return (owns_irq && owns_mem_io);
+}
+
+static void  _sde_vm_deinit(struct sde_kms *kms, struct sde_vm_ops *ops)
+{
+	struct sde_vm_trusted *sde_vm;
+
+	if (!kms->vm)
+		return;
+
+	sde_vm = to_vm_trusted(kms->vm);
+
+	memset(ops, 0, sizeof(*ops));
+
+	if (sde_vm->base.mem_notification_cookie)
+		hh_mem_notifier_unregister(
+				sde_vm->base.mem_notification_cookie);
+
+	kfree(sde_vm->sgl_desc);
+
+	if (sde_vm->irq_desc)
+		sde_vm_free_irq(sde_vm->irq_desc);
+
+	kfree(sde_vm);
+}
+
+static void _sde_vm_set_ops(struct sde_vm_ops *ops)
+{
+	memset(ops, 0, sizeof(*ops));
+
+	ops->vm_release = _sde_vm_release;
+	ops->vm_owns_hw = sde_vm_owns_hw;
+	ops->vm_deinit = _sde_vm_deinit;
+}
+
+int sde_vm_trusted_init(struct sde_kms *kms, struct sde_vm_ops *ops)
+{
+	struct sde_vm_trusted *sde_vm;
+	void *cookie;
+	int rc = 0;
+
+	sde_vm = kzalloc(sizeof(*sde_vm), GFP_KERNEL);
+	if (!sde_vm)
+		return -ENOMEM;
+
+	_sde_vm_set_ops(ops);
+
+	sde_vm->base.sde_kms = kms;
+
+	mutex_init(&sde_vm->base.vm_res_lock);
+
+	/**
+	 * Optimize resource population by reading the entire HW resource
+	 * space once during init. Once trusted vm starts supporting
+	 * per-display space assignment, this read has to be done on each event
+	 * notification.
+	 */
+	rc = _sde_vm_populate_res(kms, sde_vm);
+	if (rc) {
+		SDE_ERROR("failed to populate trusted vm res, rc= %d\n", rc);
+		goto init_fail;
+	}
+
+	cookie = hh_mem_notifier_register(HH_MEM_NOTIFIER_TAG_DISPLAY,
+			       sde_vm_mem_lend_notification_handler, sde_vm);
+	if (!cookie) {
+		SDE_ERROR("fails to register RM mem lend notifier\n");
+		goto init_fail;
+	}
+	sde_vm->base.mem_notification_cookie = cookie;
+
+	rc = hh_irq_wait_for_lend(HH_IRQ_LABEL_SDE, HH_PRIMARY_VM,
+				  sde_vm_irq_lend_notification_handler,
+				  (void *)sde_vm);
+	if (rc) {
+		SDE_ERROR("wait for irq lend on label: %d failed, rc=%d\n",
+			   HH_IRQ_LABEL_SDE, rc);
+		goto init_fail;
+	}
+
+	kms->vm = &sde_vm->base;
+
+	atomic_set(&sde_vm->base.n_irq_lent, 0);
+
+	return 0;
+init_fail:
+	_sde_vm_deinit(kms, ops);
+
+	return rc;
+}