Browse Source

disp: msm: sde: add trusted UI support for SDE

Extends atomic check to validate TUI transition request aginst
SDE and non-SDE client states. Triggers HW assignment after
making sure current frame is  completely flushed out of HW
pipeline. Initiate the acquire after TUI end after restoring
the software states.

Change-Id: I6ceccd44c18526ff818723b5631b66af3ff083c1
Signed-off-by: Jeykumar Sankaran <[email protected]>
Jeykumar Sankaran 5 years ago
parent
commit
ce532fb486
7 changed files with 503 additions and 50 deletions
  1. 345 1
      msm/sde/sde_kms.c
  2. 38 2
      msm/sde/sde_kms.h
  3. 60 41
      msm/sde/sde_vm.h
  4. 43 0
      msm/sde/sde_vm_common.c
  5. 5 0
      msm/sde/sde_vm_common.h
  6. 6 3
      msm/sde/sde_vm_primary.c
  7. 6 3
      msm/sde/sde_vm_trusted.c

+ 345 - 1
msm/sde/sde_kms.c

@@ -48,6 +48,7 @@
 #include "sde_crtc.h"
 #include "sde_crtc.h"
 #include "sde_reg_dma.h"
 #include "sde_reg_dma.h"
 #include "sde_connector.h"
 #include "sde_connector.h"
+#include "sde_vm.h"
 
 
 #include <linux/qcom_scm.h>
 #include <linux/qcom_scm.h>
 #include "soc/qcom/secure_buffer.h"
 #include "soc/qcom/secure_buffer.h"
@@ -893,6 +894,85 @@ static int _sde_kms_unmap_all_splash_regions(struct sde_kms *sde_kms)
 	return ret;
 	return ret;
 }
 }
 
 
+int sde_kms_vm_primary_prepare_commit(struct sde_kms *sde_kms,
+				      struct drm_atomic_state *state)
+{
+	struct drm_device *ddev;
+	struct drm_crtc *crtc;
+	struct drm_encoder *encoder;
+	struct drm_connector *connector;
+	struct sde_vm_ops *vm_ops;
+	struct sde_crtc_state *cstate;
+	enum sde_crtc_vm_req vm_req;
+	int rc = 0;
+
+	ddev = sde_kms->dev;
+
+	if (!sde_kms->vm)
+		return -EINVAL;
+
+	vm_ops = &sde_kms->vm->vm_ops;
+
+	crtc = state->crtcs[0].ptr;
+
+	cstate = to_sde_crtc_state(state->crtcs[0].new_state);
+
+	vm_req = sde_crtc_get_property(cstate, CRTC_PROP_VM_REQ_STATE);
+	if (vm_req != VM_REQ_ACQUIRE)
+		return 0;
+
+	/* enable MDSS irq line */
+	sde_irq_update(&sde_kms->base, true);
+
+	/* clear the stale IRQ status bits */
+	if (sde_kms->hw_intr && sde_kms->hw_intr->ops.clear_all_irqs)
+		sde_kms->hw_intr->ops.clear_all_irqs(sde_kms->hw_intr);
+
+	/* enable the display path IRQ's */
+	drm_for_each_encoder_mask(encoder, crtc->dev, crtc->state->encoder_mask)
+		sde_encoder_irq_control(encoder, true);
+
+	/* Schedule ESD work */
+	list_for_each_entry(connector, &ddev->mode_config.connector_list, head)
+		if (drm_connector_mask(connector) & crtc->state->connector_mask)
+			sde_connector_schedule_status_work(connector, true);
+
+	/* handle non-SDE pre_acquire */
+	if (vm_ops->vm_client_post_acquire)
+		rc = vm_ops->vm_client_post_acquire(sde_kms);
+
+	return rc;
+}
+
+int sde_kms_vm_trusted_prepare_commit(struct sde_kms *sde_kms,
+					   struct drm_atomic_state *state)
+{
+	struct drm_device *ddev;
+	struct drm_plane *plane;
+	struct sde_crtc_state *cstate;
+	enum sde_crtc_vm_req vm_req;
+
+	ddev = sde_kms->dev;
+
+	cstate = to_sde_crtc_state(state->crtcs[0].new_state);
+
+	vm_req = sde_crtc_get_property(cstate, CRTC_PROP_VM_REQ_STATE);
+	if (vm_req != VM_REQ_ACQUIRE)
+		return 0;
+
+	/* Clear the stale IRQ status bits */
+	if (sde_kms->hw_intr && sde_kms->hw_intr->ops.clear_all_irqs)
+		sde_kms->hw_intr->ops.clear_all_irqs(sde_kms->hw_intr);
+
+	/* Program the SID's for the trusted VM */
+	list_for_each_entry(plane, &ddev->mode_config.plane_list, head)
+		sde_plane_set_sid(plane, 1);
+
+	sde_hw_set_lutdma_sid(sde_kms->hw_sid, 1);
+
+	return 0;
+}
+
 static void sde_kms_prepare_commit(struct msm_kms *kms,
 static void sde_kms_prepare_commit(struct msm_kms *kms,
 		struct drm_atomic_state *state)
 		struct drm_atomic_state *state)
 {
 {
@@ -902,6 +982,7 @@ static void sde_kms_prepare_commit(struct msm_kms *kms,
 	struct drm_encoder *encoder;
 	struct drm_encoder *encoder;
 	struct drm_crtc *crtc;
 	struct drm_crtc *crtc;
 	struct drm_crtc_state *crtc_state;
 	struct drm_crtc_state *crtc_state;
+	struct sde_vm_ops *vm_ops;
 	int i, rc;
 	int i, rc;
 
 
 	if (!kms)
 	if (!kms)
@@ -947,6 +1028,14 @@ static void sde_kms_prepare_commit(struct msm_kms *kms,
 	 * transitions prepare below if any transtions is required.
 	 * transitions prepare below if any transtions is required.
 	 */
 	 */
 	sde_kms_prepare_secure_transition(kms, state);
 	sde_kms_prepare_secure_transition(kms, state);
+
+	if (!sde_kms->vm)
+		goto end;
+
+	vm_ops = &sde_kms->vm->vm_ops;
+
+	if (vm_ops->vm_prepare_commit)
+		vm_ops->vm_prepare_commit(sde_kms, state);
 end:
 end:
 	SDE_ATRACE_END("prepare_commit");
 	SDE_ATRACE_END("prepare_commit");
 }
 }
@@ -1039,6 +1128,129 @@ static void _sde_kms_release_splash_resource(struct sde_kms *sde_kms,
 	}
 	}
 }
 }
 
 
+int sde_kms_vm_trusted_post_commit(struct sde_kms *sde_kms,
+	struct drm_atomic_state *state)
+{
+	struct sde_vm_ops *vm_ops;
+	struct drm_device *ddev;
+	struct drm_crtc *crtc;
+	struct drm_plane *plane;
+	struct drm_encoder *encoder;
+	struct sde_crtc_state *cstate;
+	struct drm_crtc_state *new_cstate;
+	enum sde_crtc_vm_req vm_req;
+	int rc = 0;
+
+	if (!sde_kms || !sde_kms->vm)
+		return -EINVAL;
+
+	vm_ops = &sde_kms->vm->vm_ops;
+	ddev = sde_kms->dev;
+
+	crtc = state->crtcs[0].ptr;
+	new_cstate = state->crtcs[0].new_state;
+	cstate = to_sde_crtc_state(new_cstate);
+
+	vm_req = sde_crtc_get_property(cstate, CRTC_PROP_VM_REQ_STATE);
+	if (vm_req != VM_REQ_RELEASE)
+		return rc;
+
+	if (!new_cstate->active && !new_cstate->active_changed)
+		return rc;
+
+	/* if vm_req is enabled, once CRTC on the commit is guaranteed */
+	sde_kms_wait_for_frame_transfer_complete(&sde_kms->base, crtc);
+
+	drm_for_each_encoder_mask(encoder, crtc->dev, crtc->state->encoder_mask)
+		sde_encoder_irq_control(encoder, false);
+
+	sde_irq_update(&sde_kms->base, false);
+
+	list_for_each_entry(plane, &ddev->mode_config.plane_list, head)
+		sde_plane_set_sid(plane, 0);
+
+	sde_hw_set_lutdma_sid(sde_kms->hw_sid, 0);
+
+	if (vm_ops->vm_release)
+		rc = vm_ops->vm_release(sde_kms);
+
+	return rc;
+}
+
+int sde_kms_vm_pre_release(struct sde_kms *sde_kms,
+	struct drm_atomic_state *state)
+{
+	struct drm_device *ddev;
+	struct drm_crtc *crtc;
+	struct drm_encoder *encoder;
+	struct drm_connector *connector;
+	int rc = 0;
+
+	ddev = sde_kms->dev;
+
+	crtc = state->crtcs[0].ptr;
+
+	/* if vm_req is enabled, once CRTC on the commit is guaranteed */
+	sde_kms_wait_for_frame_transfer_complete(&sde_kms->base, crtc);
+
+	/* disable ESD work */
+	list_for_each_entry(connector,
+			&ddev->mode_config.connector_list, head) {
+		if (drm_connector_mask(connector) & crtc->state->connector_mask)
+			sde_connector_schedule_status_work(connector, false);
+	}
+
+	/* disable SDE irq's */
+	drm_for_each_encoder_mask(encoder, crtc->dev, crtc->state->encoder_mask)
+		sde_encoder_irq_control(encoder, false);
+
+	/* disable IRQ line */
+	sde_irq_update(&sde_kms->base, false);
+
+	return rc;
+}
+
+int sde_kms_vm_primary_post_commit(struct sde_kms *sde_kms,
+	struct drm_atomic_state *state)
+{
+	struct sde_vm_ops *vm_ops;
+	struct sde_crtc_state *cstate;
+	enum sde_crtc_vm_req vm_req;
+	int rc = 0;
+
+	if (!sde_kms || !sde_kms->vm)
+		return -EINVAL;
+
+	vm_ops = &sde_kms->vm->vm_ops;
+
+	cstate = to_sde_crtc_state(state->crtcs[0].new_state);
+
+	vm_req = sde_crtc_get_property(cstate, CRTC_PROP_VM_REQ_STATE);
+	if (vm_req != VM_REQ_RELEASE)
+		goto exit;
+
+	/* handle SDE pre-release */
+	sde_kms_vm_pre_release(sde_kms, state);
+
+	/* handle non-SDE clients pre-release */
+	if (vm_ops->vm_client_pre_release) {
+		rc = vm_ops->vm_client_pre_release(sde_kms);
+		if (rc) {
+			SDE_ERROR("sde vm pre_release failed, rc=%d\n", rc);
+			goto exit;
+		}
+	}
+
+	/* release HW */
+	if (vm_ops->vm_release) {
+		rc = vm_ops->vm_release(sde_kms);
+		if (rc)
+			SDE_ERROR("sde vm assign failed, rc=%d\n", rc);
+	}
+exit:
+	return rc;
+}
+
 static void sde_kms_complete_commit(struct msm_kms *kms,
 static void sde_kms_complete_commit(struct msm_kms *kms,
 		struct drm_atomic_state *old_state)
 		struct drm_atomic_state *old_state)
 {
 {
@@ -1049,6 +1261,7 @@ static void sde_kms_complete_commit(struct msm_kms *kms,
 	struct drm_connector *connector;
 	struct drm_connector *connector;
 	struct drm_connector_state *old_conn_state;
 	struct drm_connector_state *old_conn_state;
 	struct msm_display_conn_params params;
 	struct msm_display_conn_params params;
+	struct sde_vm_ops *vm_ops;
 	int i, rc = 0;
 	int i, rc = 0;
 
 
 	if (!kms || !old_state)
 	if (!kms || !old_state)
@@ -1093,6 +1306,17 @@ static void sde_kms_complete_commit(struct msm_kms *kms,
 		}
 		}
 	}
 	}
 
 
+	if (sde_kms->vm) {
+		vm_ops = &sde_kms->vm->vm_ops;
+
+		if (vm_ops->vm_post_commit) {
+			rc = vm_ops->vm_post_commit(sde_kms, old_state);
+			if (rc)
+				SDE_ERROR("vm post commit failed, rc = %d\n",
+					  rc);
+		}
+	}
+
 	pm_runtime_put_sync(sde_kms->dev->dev);
 	pm_runtime_put_sync(sde_kms->dev->dev);
 
 
 	for_each_old_crtc_in_state(old_state, crtc, old_crtc_state, i)
 	for_each_old_crtc_in_state(old_state, crtc, old_crtc_state, i)
@@ -1794,6 +2018,9 @@ static void _sde_kms_hw_destroy(struct sde_kms *sde_kms,
 		of_genpd_del_provider(pdev->dev.of_node);
 		of_genpd_del_provider(pdev->dev.of_node);
 	}
 	}
 
 
+	if (sde_kms->vm && sde_kms->vm->vm_ops.vm_deinit)
+		sde_kms->vm->vm_ops.vm_deinit(sde_kms, &sde_kms->vm->vm_ops);
+
 	if (sde_kms->hw_intr)
 	if (sde_kms->hw_intr)
 		sde_hw_intr_destroy(sde_kms->hw_intr);
 		sde_hw_intr_destroy(sde_kms->hw_intr);
 	sde_kms->hw_intr = NULL;
 	sde_kms->hw_intr = NULL;
@@ -2061,6 +2288,108 @@ backoff:
 	goto retry;
 	goto retry;
 }
 }
 
 
+static int sde_kms_check_vm_request(struct msm_kms *kms,
+				    struct drm_atomic_state *state)
+{
+	struct sde_kms *sde_kms;
+	struct drm_device *dev;
+	struct drm_crtc *crtc;
+	struct drm_crtc_state *new_cstate, *old_cstate;
+	uint32_t i, commit_crtc_cnt = 0, global_crtc_cnt = 0;
+	struct drm_crtc *active_crtc = NULL, *global_active_crtc = NULL;
+	enum sde_crtc_vm_req old_vm_req = VM_REQ_NONE, new_vm_req = VM_REQ_NONE;
+	struct sde_vm_ops *vm_ops;
+	bool vm_req_active = false;
+	enum sde_crtc_idle_pc_state idle_pc_state;
+	int rc = 0;
+
+	if (!kms || !state)
+		return -EINVAL;
+
+	sde_kms = to_sde_kms(kms);
+	dev = sde_kms->dev;
+
+	if (!sde_kms->vm)
+		return 0;
+
+	vm_ops = &sde_kms->vm->vm_ops;
+
+	for_each_oldnew_crtc_in_state(state, crtc, old_cstate, new_cstate, i) {
+		struct sde_crtc_state *old_state = NULL, *new_state = NULL;
+
+		new_state = to_sde_crtc_state(new_cstate);
+
+		if (!new_cstate->active && !new_cstate->active_changed)
+			continue;
+
+		new_vm_req = sde_crtc_get_property(new_state,
+				CRTC_PROP_VM_REQ_STATE);
+
+		commit_crtc_cnt++;
+
+		if (old_cstate) {
+			old_state = to_sde_crtc_state(old_cstate);
+			old_vm_req = sde_crtc_get_property(old_state,
+					CRTC_PROP_VM_REQ_STATE);
+		}
+
+		/**
+		 * No active request if the transition is from
+		 * VM_REQ_NONE to VM_REQ_NONE
+		 */
+		if (new_vm_req || (old_state && old_vm_req))
+			vm_req_active = true;
+
+		idle_pc_state = sde_crtc_get_property(new_state,
+						CRTC_PROP_IDLE_PC_STATE);
+
+		active_crtc = crtc;
+	}
+
+	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
+		if (!crtc->state->active)
+			continue;
+
+		global_crtc_cnt++;
+		global_active_crtc = crtc;
+	}
+
+	/* Check for single crtc commits only on valid VM requests */
+	if (vm_req_active && active_crtc && global_active_crtc &&
+		(commit_crtc_cnt > sde_kms->catalog->max_trusted_vm_displays ||
+		 global_crtc_cnt > sde_kms->catalog->max_trusted_vm_displays ||
+		 active_crtc != global_active_crtc)) {
+		SDE_ERROR(
+			   "failed to switch VM due to CRTC concurrencies: MAX_CNT: %d active_cnt: %d global_cnt: %d active_crtc: %d global_crtc: %d\n",
+			   sde_kms->catalog->max_trusted_vm_displays,
+			   commit_crtc_cnt, global_crtc_cnt, active_crtc,
+			   global_active_crtc);
+		return -E2BIG;
+	}
+
+	if (!vm_req_active)
+		return 0;
+
+	/* disable idle-pc before releasing the HW */
+	if ((new_vm_req == VM_REQ_RELEASE) &&
+			(idle_pc_state == IDLE_PC_ENABLE)) {
+		SDE_ERROR("failed to switch VM since idle-pc is enabled\n");
+		return -EINVAL;
+	}
+
+	mutex_lock(&sde_kms->vm->vm_res_lock);
+	if (vm_ops->vm_request_valid)
+		rc = vm_ops->vm_request_valid(sde_kms, old_vm_req, new_vm_req);
+	if (rc)
+		SDE_ERROR(
+		"failed to complete vm transition request. old_state = %d, new_state = %d, hw_ownership: %d\n",
+		old_vm_req, new_vm_req, vm_ops->vm_owns_hw(sde_kms));
+	mutex_unlock(&sde_kms->vm->vm_res_lock);
+
+	return rc;
+}
+
+
 static int sde_kms_check_secure_transition(struct msm_kms *kms,
 static int sde_kms_check_secure_transition(struct msm_kms *kms,
 		struct drm_atomic_state *state)
 		struct drm_atomic_state *state)
 {
 {
@@ -2174,6 +2503,13 @@ static int sde_kms_atomic_check(struct msm_kms *kms,
 	 * Secure state
 	 * Secure state
 	 */
 	 */
 	ret = sde_kms_check_secure_transition(kms, state);
 	ret = sde_kms_check_secure_transition(kms, state);
+	if (ret)
+		goto end;
+
+	ret = sde_kms_check_vm_request(kms, state);
+	if (ret)
+		SDE_ERROR("vm switch request checks failed\n");
+
 end:
 end:
 	SDE_ATRACE_END("atomic_check");
 	SDE_ATRACE_END("atomic_check");
 	return ret;
 	return ret;
@@ -3703,8 +4039,16 @@ static int sde_kms_hw_init(struct msm_kms *kms)
 	SDE_DEBUG("Registering for notification of irq_num: %d\n", irq_num);
 	SDE_DEBUG("Registering for notification of irq_num: %d\n", irq_num);
 	irq_set_affinity_notifier(irq_num, &sde_kms->affinity_notify);
 	irq_set_affinity_notifier(irq_num, &sde_kms->affinity_notify);
 
 
-	return 0;
+	if (sde_in_trusted_vm(sde_kms))
+		rc = sde_vm_trusted_init(sde_kms);
+	else
+		rc = sde_vm_primary_init(sde_kms);
+	if (rc) {
+		SDE_ERROR("failed to initialize VM ops, rc: %d\n", rc);
+		goto error;
+	}
 
 
+	return 0;
 error:
 error:
 	_sde_kms_hw_destroy(sde_kms, platformdev);
 	_sde_kms_hw_destroy(sde_kms, platformdev);
 end:
 end:

+ 38 - 2
msm/sde/sde_kms.h

@@ -40,7 +40,6 @@
 #include "sde_power_handle.h"
 #include "sde_power_handle.h"
 #include "sde_irq.h"
 #include "sde_irq.h"
 #include "sde_core_perf.h"
 #include "sde_core_perf.h"
-#include "sde_vm.h"
 
 
 #define DRMID(x) ((x) ? (x)->base.id : -1)
 #define DRMID(x) ((x) ? (x)->base.id : -1)
 
 
@@ -312,7 +311,6 @@ struct sde_kms {
 	struct dev_pm_qos_request pm_qos_irq_req[NR_CPUS];
 	struct dev_pm_qos_request pm_qos_irq_req[NR_CPUS];
 	struct irq_affinity_notify affinity_notify;
 	struct irq_affinity_notify affinity_notify;
 
 
-	struct sde_vm_ops vm_ops;
 	struct sde_vm *vm;
 	struct sde_vm *vm;
 };
 };
 
 
@@ -703,4 +701,42 @@ void sde_kms_irq_enable_notify(struct sde_kms *sde_kms, bool enable);
  */
  */
 int sde_kms_get_io_resources(struct sde_kms *kms, struct msm_io_res *io_res);
 int sde_kms_get_io_resources(struct sde_kms *kms, struct msm_io_res *io_res);
 
 
+/**
+ * sde_kms_vm_trusted_post_commit - function to prepare the VM after the
+ *				    last commit before releasing the HW
+ *				    resources from trusted VM
+ * @sde_kms: pointer to sde_kms
+ * @state: current frames atomic commit state
+ */
+int sde_kms_vm_trusted_post_commit(struct sde_kms *sde_kms,
+	struct drm_atomic_state *state);
+/**
+ * sde_kms_vm_primary_post_commit - function to prepare the VM after the
+ *				    last commit before assign the HW
+ *				    resources from primary VM
+ * @sde_kms: pointer to sde_kms
+ * @state: current frames atomic commit state
+ */
+int sde_kms_vm_primary_post_commit(struct sde_kms *sde_kms,
+	struct drm_atomic_state *state);
+
+/**
+ * sde_kms_vm_trusted_prepare_commit - function to prepare the VM before the
+ *				       the first commit after the accepting
+ *				       the HW resources in trusted VM.
+ * @sde_kms: pointer to sde_kms
+ * @state: current frame's atomic commit state
+ */
+int sde_kms_vm_trusted_prepare_commit(struct sde_kms *sde_kms,
+					   struct drm_atomic_state *state);
+/**
+ * sde_kms_vm_primary_prepare_commit - function to prepare the VM before the
+ *				       the first commit after the reclaming
+ *				       the HW resources in trusted VM.
+ * @sde_kms: pointer to sde_kms
+ * @state: current frame's atomic commit state
+ */
+int sde_kms_vm_primary_prepare_commit(struct sde_kms *sde_kms,
+					   struct drm_atomic_state *state);
+
 #endif /* __sde_kms_H__ */
 #endif /* __sde_kms_H__ */

+ 60 - 41
msm/sde/sde_vm.h

@@ -30,43 +30,7 @@ struct sde_vm_irq_desc {
 	struct sde_vm_irq_entry *irq_entries;
 	struct sde_vm_irq_entry *irq_entries;
 };
 };
 
 
-/**
- * sde_vm - VM layer descriptor. Abstract for all the VM's
- * @vm_res_lock - mutex to protect resource updates
- * @mem_notificaiton_cookie - Hyp RM notification identifier
- * @n_irq_lent - irq count
- * @io_mem_handle - RM identifier for the IO range
- * @sde_kms - handle to sde_kms
- */
-struct sde_vm {
-	struct mutex vm_res_lock;
-	void *mem_notification_cookie;
-	atomic_t n_irq_lent;
-	int io_mem_handle;
-	struct sde_kms *sde_kms;
-};
-
-/**
- * sde_vm_primary - VM layer descriptor for Primary VM
- * @base - parent struct object
- * @irq_desc - cache copy of irq list for validating reclaim
- */
-struct sde_vm_primary {
-	struct sde_vm base;
-	struct sde_vm_irq_desc *irq_desc;
-};
-
-/**
- * sde_vm_trusted - VM layer descriptor for Trusted VM
- * @base - parent struct object
- * @sgl_desc - hyp RM sgl list descriptor for IO ranges
- * @irq_desc - irq list
- */
-struct sde_vm_trusted {
-	struct sde_vm base;
-	struct sde_vm_irq_desc *irq_desc;
-	struct hh_sgl_desc *sgl_desc;
-};
+enum sde_crtc_vm_req;
 
 
 /**
 /**
  * sde_vm_ops - VM specific function hooks
  * sde_vm_ops - VM specific function hooks
@@ -139,15 +103,59 @@ struct sde_vm_ops {
 	 * @kms - handle to sde_kms
 	 * @kms - handle to sde_kms
 	 */
 	 */
 	int (*vm_client_post_acquire)(struct sde_kms *kms);
 	int (*vm_client_post_acquire)(struct sde_kms *kms);
+
+	int (*vm_request_valid)(struct sde_kms *sde_kms,
+			enum sde_crtc_vm_req old_state,
+			enum sde_crtc_vm_req new_state);
+};
+
+/**
+ * sde_vm - VM layer descriptor. Abstract for all the VM's
+ * @vm_res_lock - mutex to protect resource updates
+ * @mem_notificaiton_cookie - Hyp RM notification identifier
+ * @n_irq_lent - irq count
+ * @io_mem_handle - RM identifier for the IO range
+ * @sde_kms - handle to sde_kms
+ * @vm_ops - VM operation hooks for respective VM type
+ */
+struct sde_vm {
+	struct mutex vm_res_lock;
+	void *mem_notification_cookie;
+	atomic_t n_irq_lent;
+	int io_mem_handle;
+	struct sde_kms *sde_kms;
+	struct sde_vm_ops vm_ops;
 };
 };
 
 
+/**
+ * sde_vm_primary - VM layer descriptor for Primary VM
+ * @base - parent struct object
+ * @irq_desc - cache copy of irq list for validating reclaim
+ */
+struct sde_vm_primary {
+	struct sde_vm base;
+	struct sde_vm_irq_desc *irq_desc;
+};
+
+/**
+ * sde_vm_trusted - VM layer descriptor for Trusted VM
+ * @base - parent struct object
+ * @sgl_desc - hyp RM sgl list descriptor for IO ranges
+ * @irq_desc - irq list
+ */
+struct sde_vm_trusted {
+	struct sde_vm base;
+	struct sde_vm_irq_desc *irq_desc;
+	struct hh_sgl_desc *sgl_desc;
+};
+
+#if IS_ENABLED(CONFIG_DRM_SDE_VM)
 /**
 /**
  * sde_vm_primary_init - Initialize primary VM layer
  * sde_vm_primary_init - Initialize primary VM layer
  * @kms - pointer to sde_kms
  * @kms - pointer to sde_kms
- * @ops - primary VM specific ops functions
  * @return - 0 on success
  * @return - 0 on success
  */
  */
-int sde_vm_primary_init(struct sde_kms *kms, struct sde_vm_ops *ops);
+int sde_vm_primary_init(struct sde_kms *kms);
 
 
 /**
 /**
  * sde_vm_trusted_init - Initialize Trusted VM layer
  * sde_vm_trusted_init - Initialize Trusted VM layer
@@ -155,6 +163,17 @@ int sde_vm_primary_init(struct sde_kms *kms, struct sde_vm_ops *ops);
  * @ops - primary VM specific ops functions
  * @ops - primary VM specific ops functions
  * @return - 0 on success
  * @return - 0 on success
  */
  */
-int sde_vm_trusted_init(struct sde_kms *kms, struct sde_vm_ops *ops);
-
+int sde_vm_trusted_init(struct sde_kms *kms);
+#else
+static inline int sde_vm_primary_init(struct sde_kms *kms)
+{
+	return 0;
+}
+
+static inline int sde_vm_trusted_init(struct sde_kms *kms)
+{
+	return 0;
+}
+
+#endif /* IS_ENABLED(CONFIG_DRM_SDE_VM) */
 #endif /* __SDE_VM_H__ */
 #endif /* __SDE_VM_H__ */

+ 43 - 0
msm/sde/sde_vm_common.c

@@ -9,6 +9,7 @@
 #include "dp/dp_display.h"
 #include "dp/dp_display.h"
 #include "sde_kms.h"
 #include "sde_kms.h"
 #include "sde_vm_common.h"
 #include "sde_vm_common.h"
+#include "sde_crtc.h"
 
 
 struct hh_notify_vmid_desc *sde_vm_populate_vmid(hh_vmid_t vmid)
 struct hh_notify_vmid_desc *sde_vm_populate_vmid(hh_vmid_t vmid)
 {
 {
@@ -287,3 +288,45 @@ pre_release_rollback:
 
 
 	return rc;
 	return rc;
 }
 }
+
+int sde_vm_request_valid(struct sde_kms *sde_kms,
+			  enum sde_crtc_vm_req old_state,
+			  enum sde_crtc_vm_req new_state)
+{
+	struct sde_vm_ops *vm_ops;
+	int rc = 0;
+
+	vm_ops = &sde_kms->vm->vm_ops;
+
+	switch (new_state) {
+	case VM_REQ_RELEASE:
+		if (old_state == VM_REQ_RELEASE)
+			rc = -EINVAL;
+		break;
+	case VM_REQ_NONE:
+		if (old_state == VM_REQ_RELEASE)
+			rc = -EINVAL;
+		break;
+	case VM_REQ_ACQUIRE:
+		/**
+		 * Only the display which requested for HW assignment
+		 * can reclaim it back
+		 */
+		if (old_state != VM_REQ_RELEASE)
+			rc = -EINVAL;
+		break;
+	default:
+		SDE_ERROR("invalid vm request\n");
+		rc = -EINVAL;
+	};
+
+	if (!rc && !vm_ops->vm_owns_hw(sde_kms))
+		rc = -EINVAL;
+
+	SDE_DEBUG("old req: %d new req: %d owns_hw: %d\n",
+			old_state, new_state,
+			vm_ops->vm_owns_hw(sde_kms));
+	SDE_EVT32(old_state, new_state, vm_ops->vm_owns_hw(sde_kms));
+
+	return rc;
+}

+ 5 - 0
msm/sde/sde_vm_common.h

@@ -75,4 +75,9 @@ int sde_vm_post_acquire(struct sde_kms *kms);
  */
  */
 int sde_vm_pre_release(struct sde_kms *kms);
 int sde_vm_pre_release(struct sde_kms *kms);
 
 
+
+int sde_vm_request_valid(struct sde_kms *sde_kms,
+			  enum sde_crtc_vm_req old_state,
+			  enum sde_crtc_vm_req new_state);
+
 #endif /* __SDE_VM_COMMON_H__ */
 #endif /* __SDE_VM_COMMON_H__ */

+ 6 - 3
msm/sde/sde_vm_primary.c

@@ -259,9 +259,12 @@ static void _sde_vm_set_ops(struct sde_vm_ops *ops)
 	ops->vm_release = _sde_vm_release;
 	ops->vm_release = _sde_vm_release;
 	ops->vm_owns_hw = sde_vm_owns_hw;
 	ops->vm_owns_hw = sde_vm_owns_hw;
 	ops->vm_deinit = _sde_vm_deinit;
 	ops->vm_deinit = _sde_vm_deinit;
+	ops->vm_prepare_commit = sde_kms_vm_primary_prepare_commit;
+	ops->vm_post_commit = sde_kms_vm_primary_post_commit;
+	ops->vm_request_valid = sde_vm_request_valid;
 }
 }
 
 
-int sde_vm_primary_init(struct sde_kms *kms, struct sde_vm_ops *ops)
+int sde_vm_primary_init(struct sde_kms *kms)
 {
 {
 	struct sde_vm_primary *sde_vm;
 	struct sde_vm_primary *sde_vm;
 	void *cookie;
 	void *cookie;
@@ -271,7 +274,7 @@ int sde_vm_primary_init(struct sde_kms *kms, struct sde_vm_ops *ops)
 	if (!sde_vm)
 	if (!sde_vm)
 		return -ENOMEM;
 		return -ENOMEM;
 
 
-	_sde_vm_set_ops(ops);
+	_sde_vm_set_ops(&sde_vm->base.vm_ops);
 
 
 	cookie = hh_mem_notifier_register(HH_MEM_NOTIFIER_TAG_DISPLAY,
 	cookie = hh_mem_notifier_register(HH_MEM_NOTIFIER_TAG_DISPLAY,
 			       sde_vm_mem_release_notification_handler, sde_vm);
 			       sde_vm_mem_release_notification_handler, sde_vm);
@@ -290,7 +293,7 @@ int sde_vm_primary_init(struct sde_kms *kms, struct sde_vm_ops *ops)
 
 
 	return 0;
 	return 0;
 init_fail:
 init_fail:
-	_sde_vm_deinit(kms, ops);
+	_sde_vm_deinit(kms, &sde_vm->base.vm_ops);
 
 
 	return rc;
 	return rc;
 }
 }

+ 6 - 3
msm/sde/sde_vm_trusted.c

@@ -337,9 +337,12 @@ static void _sde_vm_set_ops(struct sde_vm_ops *ops)
 	ops->vm_release = _sde_vm_release;
 	ops->vm_release = _sde_vm_release;
 	ops->vm_owns_hw = sde_vm_owns_hw;
 	ops->vm_owns_hw = sde_vm_owns_hw;
 	ops->vm_deinit = _sde_vm_deinit;
 	ops->vm_deinit = _sde_vm_deinit;
+	ops->vm_prepare_commit = sde_kms_vm_trusted_prepare_commit;
+	ops->vm_post_commit = sde_kms_vm_trusted_post_commit;
+	ops->vm_request_valid = sde_vm_request_valid;
 }
 }
 
 
-int sde_vm_trusted_init(struct sde_kms *kms, struct sde_vm_ops *ops)
+int sde_vm_trusted_init(struct sde_kms *kms)
 {
 {
 	struct sde_vm_trusted *sde_vm;
 	struct sde_vm_trusted *sde_vm;
 	void *cookie;
 	void *cookie;
@@ -349,7 +352,7 @@ int sde_vm_trusted_init(struct sde_kms *kms, struct sde_vm_ops *ops)
 	if (!sde_vm)
 	if (!sde_vm)
 		return -ENOMEM;
 		return -ENOMEM;
 
 
-	_sde_vm_set_ops(ops);
+	_sde_vm_set_ops(&sde_vm->base.vm_ops);
 
 
 	sde_vm->base.sde_kms = kms;
 	sde_vm->base.sde_kms = kms;
 
 
@@ -390,7 +393,7 @@ int sde_vm_trusted_init(struct sde_kms *kms, struct sde_vm_ops *ops)
 
 
 	return 0;
 	return 0;
 init_fail:
 init_fail:
-	_sde_vm_deinit(kms, ops);
+	_sde_vm_deinit(kms, &sde_vm->base.vm_ops);
 
 
 	return rc;
 	return rc;
 }
 }