Przeglądaj źródła

disp: msm: sde: accept and reclaim resources on user prompt

Currently, trusted VM accepts and releases the resources
on RM notifications.

In cases, where TUI fails to submit the first frame on the trusted
vm, display cannot rely on the teardown commit IOCTL to release the
resources back to primary VM.

To handle scenarios where RM notification drops, the VM's should be
able to ACCEPT/RECLAIM resources without relying on the RM.

To address above scenarios, this change moves the resource handling
calls from notification handlers to user prompt. With this change,
trusted VM will ACCEPT the resource only on the first frame commit and
primary VM can RECLAIM the resources back if the TUI use case fails
or any of the RM notifications fail to deliver.

Change-Id: Iebb1724a7558e52567f8af1a49e38f8adbec88a0
Signed-off-by: Jeykumar Sankaran <[email protected]>
Jeykumar Sankaran 4 lat temu
rodzic
commit
9de6eee40e
5 zmienionych plików z 344 dodań i 231 usunięć
  1. 49 14
      msm/sde/sde_kms.c
  2. 13 0
      msm/sde/sde_vm.h
  3. 12 15
      msm/sde/sde_vm_common.c
  4. 86 68
      msm/sde/sde_vm_primary.c
  5. 184 134
      msm/sde/sde_vm_trusted.c

+ 49 - 14
msm/sde/sde_kms.c

@@ -1358,11 +1358,13 @@ int sde_kms_vm_trusted_post_commit(struct sde_kms *sde_kms,
 
 	sde_hw_set_lutdma_sid(sde_kms->hw_sid, 0);
 
-	sde_kms_vm_trusted_resource_deinit(sde_kms);
+	sde_vm_lock(sde_kms);
 
 	if (vm_ops->vm_release)
 		rc = vm_ops->vm_release(sde_kms);
 
+	sde_vm_unlock(sde_kms);
+
 	return rc;
 }
 
@@ -1438,12 +1440,15 @@ int sde_kms_vm_primary_post_commit(struct sde_kms *sde_kms,
 		}
 	}
 
+	sde_vm_lock(sde_kms);
 	/* release HW */
 	if (vm_ops->vm_release) {
 		rc = vm_ops->vm_release(sde_kms);
 		if (rc)
 			SDE_ERROR("sde vm assign failed, rc=%d\n", rc);
 	}
+	sde_vm_unlock(sde_kms);
+
 exit:
 	return rc;
 }
@@ -2675,6 +2680,38 @@ static int sde_kms_check_secure_transition(struct msm_kms *kms,
 	return 0;
 }
 
+static void sde_kms_vm_res_release(struct msm_kms *kms,
+		struct drm_atomic_state *state)
+{
+	struct drm_crtc *crtc;
+	struct drm_crtc_state *crtc_state;
+	struct sde_vm_ops *vm_ops;
+	enum sde_crtc_vm_req vm_req;
+	struct sde_kms *sde_kms = to_sde_kms(kms);
+	int i;
+
+	for_each_new_crtc_in_state(state, crtc, crtc_state, i) {
+		struct sde_crtc_state *cstate;
+
+		cstate = to_sde_crtc_state(state->crtcs[0].new_state);
+
+		vm_req = sde_crtc_get_property(cstate, CRTC_PROP_VM_REQ_STATE);
+		if (vm_req != VM_REQ_ACQUIRE)
+			return;
+	}
+
+	vm_ops = sde_vm_get_ops(sde_kms);
+	if (!vm_ops)
+		return;
+
+	sde_vm_lock(sde_kms);
+
+	if (vm_ops->vm_acquire_fail_handler)
+		vm_ops->vm_acquire_fail_handler(sde_kms);
+
+	sde_vm_unlock(sde_kms);
+}
+
 static int sde_kms_atomic_check(struct msm_kms *kms,
 		struct drm_atomic_state *state)
 {
@@ -2695,9 +2732,15 @@ static int sde_kms_atomic_check(struct msm_kms *kms,
 		goto end;
 	}
 
+	ret = sde_kms_check_vm_request(kms, state);
+	if (ret) {
+		SDE_ERROR("vm switch request checks failed\n");
+		goto end;
+	}
+
 	ret = drm_atomic_helper_check(dev, state);
 	if (ret)
-		goto end;
+		goto vm_clean_up;
 	/*
 	 * Check if any secure transition(moving CRTC between secure and
 	 * non-secure state and vice-versa) is allowed or not. when moving
@@ -2707,12 +2750,12 @@ static int sde_kms_atomic_check(struct msm_kms *kms,
 	 */
 	ret = sde_kms_check_secure_transition(kms, state);
 	if (ret)
-		goto end;
+		goto vm_clean_up;
 
-	ret = sde_kms_check_vm_request(kms, state);
-	if (ret)
-		SDE_ERROR("vm switch request checks failed\n");
+	goto end;
 
+vm_clean_up:
+	sde_kms_vm_res_release(kms, state);
 end:
 	SDE_ATRACE_END("atomic_check");
 	return ret;
@@ -4454,7 +4497,6 @@ int sde_kms_vm_trusted_resource_init(struct sde_kms *sde_kms)
 	struct msm_drm_private *priv;
 	struct sde_splash_display *handoff_display;
 	struct dsi_display *display;
-	struct sde_vm_ops *vm_ops;
 	int ret, i;
 
 	if (!sde_kms || !sde_kms->dev || !sde_kms->dev->dev_private) {
@@ -4462,13 +4504,6 @@ int sde_kms_vm_trusted_resource_init(struct sde_kms *sde_kms)
 		return -EINVAL;
 	}
 
-	vm_ops = sde_vm_get_ops(sde_kms);
-	if (vm_ops && !vm_ops->vm_owns_hw(sde_kms)) {
-		SDE_DEBUG(
-		   "skipping sde res init as device assign is not completed\n");
-		return 0;
-	}
-
 	if (sde_kms->dsi_display_count != 1) {
 		SDE_ERROR("no. of displays not supported:%d\n",
 				sde_kms->dsi_display_count);

+ 13 - 0
msm/sde/sde_vm.h

@@ -104,9 +104,22 @@ struct sde_vm_ops {
 	 */
 	int (*vm_client_post_acquire)(struct sde_kms *kms);
 
+	/**
+	 * vm_request_valid - hook to validate the RM_REQ state change
+	 * @sde_kms - handle to sde_kms
+	 * @old_state - current vm_req state
+	 * @new_state - new vm_req state
+	 */
 	int (*vm_request_valid)(struct sde_kms *sde_kms,
 			enum sde_crtc_vm_req old_state,
 			enum sde_crtc_vm_req new_state);
+
+	/**
+	 * vm_acquire_fail_handler - hook to the handler when resource
+	 *                           accept/reclaim fails.
+	 * @sde_kms - handle to sde_kms
+	 */
+	int (*vm_acquire_fail_handler)(struct sde_kms *sde_kms);
 };
 
 /**

+ 12 - 15
msm/sde/sde_vm_common.c

@@ -300,33 +300,30 @@ int sde_vm_request_valid(struct sde_kms *sde_kms,
 
 	switch (new_state) {
 	case VM_REQ_RELEASE:
-		if (old_state == VM_REQ_RELEASE)
-			rc = -EINVAL;
-		break;
 	case VM_REQ_NONE:
-		if (old_state == VM_REQ_RELEASE)
+		if ((old_state == VM_REQ_RELEASE) ||
+			!vm_ops->vm_owns_hw(sde_kms))
 			rc = -EINVAL;
 		break;
 	case VM_REQ_ACQUIRE:
-		/**
-		 * Only the display which requested for HW assignment
-		 * can reclaim it back
-		 */
-		if (old_state != VM_REQ_RELEASE)
+		if (old_state != VM_REQ_RELEASE) {
 			rc = -EINVAL;
+		} else if (!vm_ops->vm_owns_hw(sde_kms)) {
+			if (vm_ops->vm_acquire)
+				rc = vm_ops->vm_acquire(sde_kms);
+			else
+				rc = -EINVAL;
+		}
 		break;
 	default:
 		SDE_ERROR("invalid vm request\n");
 		rc = -EINVAL;
 	};
 
-	if (!rc && !vm_ops->vm_owns_hw(sde_kms))
-		rc = -EINVAL;
-
-	SDE_DEBUG("old req: %d new req: %d owns_hw: %d\n",
+	SDE_DEBUG("old req: %d new req: %d owns_hw: %d, rc: %d\n",
 			old_state, new_state,
-			vm_ops->vm_owns_hw(sde_kms));
-	SDE_EVT32(old_state, new_state, vm_ops->vm_owns_hw(sde_kms));
+			vm_ops->vm_owns_hw(sde_kms), rc);
+	SDE_EVT32(old_state, new_state, vm_ops->vm_owns_hw(sde_kms), rc);
 
 	return rc;
 }

+ 86 - 68
msm/sde/sde_vm_primary.c

@@ -12,7 +12,7 @@
 
 #define to_vm_primary(vm) ((struct sde_vm_primary *)vm)
 
-static bool sde_vm_owns_hw(struct sde_kms *sde_kms)
+static bool _sde_vm_owns_hw(struct sde_kms *sde_kms)
 {
 	struct sde_vm_primary *sde_vm;
 	bool owns_irq, owns_mem_io;
@@ -28,76 +28,90 @@ static bool sde_vm_owns_hw(struct sde_kms *sde_kms)
 void sde_vm_irq_release_notification_handler(void *req,
 		unsigned long notif_type, enum hh_irq_label label)
 {
-	struct sde_vm_primary *sde_vm;
-	int rc = 0;
+	SDE_INFO("irq release notification for label: %d\n", label);
+}
 
-	if (!req) {
-		SDE_ERROR("invalid data on release notificaiton\n");
-		return;
-	}
+static void sde_vm_mem_release_notification_handler(
+		enum hh_mem_notifier_tag tag, unsigned long notif_type,
+		void *entry_data, void *notif_msg)
+{
+	SDE_INFO("mem release notification for tag: %d\n", tag);
+}
 
-	sde_vm = to_vm_primary(req);
+int _sde_vm_reclaim_mem(struct sde_kms *sde_kms)
+{
+	struct sde_vm_primary *sde_vm = to_vm_primary(sde_kms->vm);
+	int rc = 0;
 
-	mutex_lock(&sde_vm->base.vm_res_lock);
+	if (sde_vm->base.io_mem_handle < 0)
+		return 0;
 
-	rc = hh_irq_reclaim(label);
+	rc = hh_rm_mem_reclaim(sde_vm->base.io_mem_handle, 0);
 	if (rc) {
-		SDE_ERROR("failed to reclaim irq label: %d\n", label);
-		goto notify_end;
+		SDE_ERROR("failed to reclaim IO memory, rc=%d\n", rc);
+		goto reclaim_fail;
 	}
 
-	/**
-	 * Skipping per IRQ label verification since IRQ's are MDSS centric.
-	 * Need to enable addition verifications when per-display IRQ's are
-	 *  supported.
-	 */
-	atomic_dec(&sde_vm->base.n_irq_lent);
-
-	SDE_INFO("irq reclaim succeeded for label: %d\n", label);
-notify_end:
-	mutex_unlock(&sde_vm->base.vm_res_lock);
+	SDE_INFO("mem reclaim succeeded\n");
+reclaim_fail:
+	sde_vm->base.io_mem_handle = -1;
 
+	return rc;
 }
 
-static void sde_vm_mem_release_notification_handler(
-		enum hh_mem_notifier_tag tag, unsigned long notif_type,
-		void *entry_data, void *notif_msg)
+int _sde_vm_reclaim_irq(struct sde_kms *sde_kms)
 {
-	struct hh_rm_notif_mem_released_payload *payload;
-	struct sde_vm_primary *sde_vm;
-	struct sde_kms *sde_kms;
-	int rc = 0;
+	struct sde_vm_primary *sde_vm = to_vm_primary(sde_kms->vm);
+	struct sde_vm_irq_desc *irq_desc;
+	int rc = 0, i;
 
-	if (notif_type != HH_RM_NOTIF_MEM_RELEASED ||
-			tag != HH_MEM_NOTIFIER_TAG_DISPLAY)
-		return;
+	if (!sde_vm->irq_desc)
+		return 0;
 
-	if (!entry_data || !notif_msg)
-		return;
+	irq_desc = sde_vm->irq_desc;
+
+	for (i = atomic_read(&sde_vm->base.n_irq_lent) - 1; i >= 0; i--) {
+		struct sde_vm_irq_entry *entry = &irq_desc->irq_entries[i];
+
+		rc = hh_irq_reclaim(entry->label);
+		if (rc) {
+			SDE_ERROR("failed to reclaim irq label: %d rc = %d\n",
+					entry->label, rc);
+			goto reclaim_fail;
+		}
+
+		atomic_dec(&sde_vm->base.n_irq_lent);
+
+		SDE_INFO("irq reclaim succeeded for label: %d\n", entry->label);
+	}
 
-	payload = (struct hh_rm_notif_mem_released_payload *)notif_msg;
-	sde_vm = (struct sde_vm_primary *)entry_data;
-	sde_kms = sde_vm->base.sde_kms;
+reclaim_fail:
+	sde_vm_free_irq(sde_vm->irq_desc);
+	sde_vm->irq_desc = NULL;
+	atomic_set(&sde_vm->base.n_irq_lent, 0);
 
-	mutex_lock(&sde_vm->base.vm_res_lock);
+	return rc;
+}
 
-	if (payload->mem_handle != sde_vm->base.io_mem_handle)
-		goto notify_end;
+static int _sde_vm_reclaim(struct sde_kms *sde_kms)
+{
+	int rc = 0;
 
-	rc = hh_rm_mem_reclaim(payload->mem_handle, 0);
+	rc = _sde_vm_reclaim_mem(sde_kms);
 	if (rc) {
-		SDE_ERROR("failed to reclaim IO memory, rc=%d\n", rc);
-		goto notify_end;
+		SDE_ERROR("vm reclaim mem failed, rc=%d\n", rc);
+		goto end;
 	}
 
-	sde_vm->base.io_mem_handle = -1;
+	rc = _sde_vm_reclaim_irq(sde_kms);
+	if (rc)
+		SDE_ERROR("vm reclaim irq failed, rc=%d\n", rc);
 
-	SDE_INFO("mem reclaim succeeded for tag: %d\n", tag);
-notify_end:
-	mutex_unlock(&sde_vm->base.vm_res_lock);
+end:
+	return rc;
 }
 
-static int _sde_vm_lend_notify_registers(struct sde_vm *vm,
+static int _sde_vm_lend_mem(struct sde_vm *vm,
 					 struct msm_io_res *io_res)
 {
 	struct sde_vm_primary *sde_vm;
@@ -114,13 +128,14 @@ static int _sde_vm_lend_notify_registers(struct sde_vm *vm,
 	if (IS_ERR(acl_desc)) {
 		SDE_ERROR("failed to populate acl descriptor, rc = %d\n",
 			   PTR_ERR(acl_desc));
-		return rc;
+		return -EINVAL;
 	}
 
 	sgl_desc = sde_vm_populate_sgl(io_res);
 	if (IS_ERR_OR_NULL(sgl_desc)) {
 		SDE_ERROR("failed to populate sgl descriptor, rc = %d\n",
 			   PTR_ERR(sgl_desc));
+		rc = -EINVAL;
 		goto sgl_fail;
 	}
 
@@ -131,6 +146,8 @@ static int _sde_vm_lend_notify_registers(struct sde_vm *vm,
 		goto fail;
 	}
 
+	sde_vm->base.io_mem_handle = mem_handle;
+
 	hh_rm_get_vmid(HH_TRUSTED_VM, &trusted_vmid);
 
 	vmid_desc = sde_vm_populate_vmid(trusted_vmid);
@@ -142,8 +159,6 @@ static int _sde_vm_lend_notify_registers(struct sde_vm *vm,
 		goto notify_fail;
 	}
 
-	sde_vm->base.io_mem_handle = mem_handle;
-
 	SDE_INFO("IO memory lend suceeded for tag: %d\n",
 			HH_MEM_NOTIFIER_TAG_DISPLAY);
 
@@ -167,6 +182,9 @@ static int _sde_vm_lend_irq(struct sde_vm *vm, struct msm_io_res *io_res)
 
 	irq_desc = sde_vm_populate_irq(io_res);
 
+	/* cache the irq list for validation during reclaim */
+	sde_vm->irq_desc = irq_desc;
+
 	for (i  = 0; i < irq_desc->n_irq; i++) {
 		struct sde_vm_irq_entry *entry = &irq_desc->irq_entries[i];
 
@@ -176,25 +194,22 @@ static int _sde_vm_lend_irq(struct sde_vm *vm, struct msm_io_res *io_res)
 		if (rc) {
 			SDE_ERROR("irq lend failed for irq label: %d, rc=%d\n",
 				  entry->label, rc);
-			hh_irq_reclaim(entry->label);
-			return rc;
+			goto done;
 		}
 
+		atomic_inc(&sde_vm->base.n_irq_lent);
+
 		rc = hh_irq_lend_notify(entry->label);
 		if (rc) {
 			SDE_ERROR("irq lend notify failed, label: %d, rc=%d\n",
 				entry->label, rc);
-			hh_irq_reclaim(entry->label);
-			return rc;
+			goto done;
 		}
 
 		SDE_INFO("vm lend suceeded for IRQ label: %d\n", entry->label);
 	}
 
-	// cache the irq list for validation during release
-	sde_vm->irq_desc = irq_desc;
-	atomic_set(&sde_vm->base.n_irq_lent, sde_vm->irq_desc->n_irq);
-
+done:
 	return rc;
 }
 
@@ -215,25 +230,27 @@ static int _sde_vm_release(struct sde_kms *kms)
 	rc = sde_vm_get_resources(kms, &io_res);
 	if (rc) {
 		SDE_ERROR("fail to get resources\n");
-		goto assign_fail;
+		goto done;
 	}
 
-	mutex_lock(&sde_vm->base.vm_res_lock);
-
-	rc = _sde_vm_lend_notify_registers(kms->vm, &io_res);
+	rc = _sde_vm_lend_mem(kms->vm, &io_res);
 	if (rc) {
 		SDE_ERROR("fail to lend notify resources\n");
-		goto assign_fail;
+		goto res_lend_fail;
 	}
 
 	rc = _sde_vm_lend_irq(kms->vm, &io_res);
 	if (rc) {
 		SDE_ERROR("failed to lend irq's\n");
-		goto assign_fail;
+		goto res_lend_fail;
 	}
-assign_fail:
+
+	goto done;
+
+res_lend_fail:
+	_sde_vm_reclaim(kms);
+done:
 	sde_vm_free_resources(&io_res);
-	mutex_unlock(&sde_vm->base.vm_res_lock);
 
 	return rc;
 }
@@ -266,7 +283,8 @@ static void _sde_vm_set_ops(struct sde_vm_ops *ops)
 	ops->vm_client_pre_release = sde_vm_pre_release;
 	ops->vm_client_post_acquire = sde_vm_post_acquire;
 	ops->vm_release = _sde_vm_release;
-	ops->vm_owns_hw = sde_vm_owns_hw;
+	ops->vm_acquire = _sde_vm_reclaim;
+	ops->vm_owns_hw = _sde_vm_owns_hw;
 	ops->vm_deinit = _sde_vm_deinit;
 	ops->vm_prepare_commit = sde_kms_vm_primary_prepare_commit;
 	ops->vm_post_commit = sde_kms_vm_primary_post_commit;

+ 184 - 134
msm/sde/sde_vm_trusted.c

@@ -61,86 +61,12 @@ static int __irq_cmp(const void *a, const void *b)
 	return  (l->label - r->label);
 }
 
-void sde_vm_irq_lend_notification_handler(void *req, unsigned long notif_type,
-		enum hh_irq_label label)
-{
-	struct sde_vm_trusted *sde_vm;
-	struct sde_kms *sde_kms;
-	struct sde_vm_irq_desc *irq_desc;
-	struct sde_vm_irq_entry irq_temp, *found = NULL;
-	struct irq_data *exp_irq_data, *acc_irq_data;
-	int accepted_irq, expected_irq;
-	int rc;
-
-	if (!req) {
-		SDE_ERROR("invalid data on lend notification\n");
-		return;
-	}
-
-	sde_vm = to_vm_trusted(req);
-	sde_kms = sde_vm->base.sde_kms;
-	irq_desc = sde_vm->irq_desc;
-
-	mutex_lock(&sde_vm->base.vm_res_lock);
-
-	memset(&irq_temp, 0, sizeof(irq_temp));
-
-	irq_temp.label = label;
-	found = bsearch((void *)&irq_temp, (void *)irq_desc->irq_entries,
-			irq_desc->n_irq, sizeof(struct sde_vm_irq_entry),
-			__irq_cmp);
-	if (!found) {
-		SDE_ERROR("irq mismatch for label: %d irq: %d\n",
-			   irq_temp.label, irq_temp.irq);
-		goto end;
-	}
-
-	expected_irq = found->irq;
-	accepted_irq = hh_irq_accept(label, -1, IRQ_TYPE_LEVEL_HIGH);
-	if (accepted_irq < 0) {
-		SDE_ERROR("failed to accept irq for label: %d\n");
-		goto end;
-	}
-
-	exp_irq_data = irq_get_irq_data(expected_irq);
-	if (!exp_irq_data) {
-		SDE_ERROR("failed to get irq data for irq: %d\n", exp_irq_data);
-		goto end;
-	}
-
-	acc_irq_data = irq_get_irq_data(accepted_irq);
-	if (!acc_irq_data) {
-		SDE_ERROR("failed to get irq data for irq: %d\n", accepted_irq);
-		goto end;
-	}
-
-	if (exp_irq_data->hwirq != acc_irq_data->hwirq) {
-		SDE_ERROR("IRQ mismatch on ACCEPT for label %d\n", label);
-		goto end;
-	}
-
-	SDE_INFO("IRQ accept succeeded for label %d irq: %d\n", label,
-			exp_irq_data->hwirq);
-
-	atomic_inc(&sde_vm->base.n_irq_lent);
-
-	rc = sde_kms_vm_trusted_resource_init(sde_kms);
-	if (rc)
-		SDE_ERROR("vm resource init failed\n");
-end:
-	mutex_unlock(&sde_vm->base.vm_res_lock);
-}
-
 static void sde_vm_mem_lend_notification_handler(enum hh_mem_notifier_tag tag,
 					       unsigned long notif_type,
 					void *entry_data, void *notif_msg)
 {
 	struct hh_rm_notif_mem_shared_payload *payload;
-	struct hh_sgl_desc *sgl_desc;
-	struct hh_acl_desc *acl_desc;
-	struct sde_kms *sde_kms;
 	struct sde_vm_trusted *sde_vm;
-	int rc = 0;
 
 	if (notif_type != HH_RM_NOTIF_MEM_SHARED ||
 			tag != HH_MEM_NOTIFIER_TAG_DISPLAY)
@@ -156,50 +82,21 @@ static void sde_vm_mem_lend_notification_handler(enum hh_mem_notifier_tag tag,
 		return;
 
 	sde_vm = (struct sde_vm_trusted *)entry_data;
-	sde_kms = sde_vm->base.sde_kms;
 
 	mutex_lock(&sde_vm->base.vm_res_lock);
 
-	acl_desc = sde_vm_populate_acl(HH_TRUSTED_VM);
-	if (IS_ERR(acl_desc)) {
-		SDE_ERROR("failed to populate acl data, rc=%d\n",
-			   PTR_ERR(acl_desc));
-		goto acl_fail;
-	}
-
-	sgl_desc = hh_rm_mem_accept(payload->mem_handle, HH_RM_MEM_TYPE_IO,
-				    HH_RM_TRANS_TYPE_LEND,
-				    HH_RM_MEM_ACCEPT_VALIDATE_ACL_ATTRS|
-				    HH_RM_MEM_ACCEPT_VALIDATE_LABEL|
-				    HH_RM_MEM_ACCEPT_DONE,
-				    payload->label,
-				    acl_desc, NULL, NULL, 0);
-	if (IS_ERR_OR_NULL(sgl_desc)) {
-		SDE_ERROR("hh_rm_mem_accept failed with error, rc=%d\n",
-			   PTR_ERR(sgl_desc));
-		goto accept_fail;
-	}
-
-	rc = _sde_vm_validate_sgl(sde_vm->sgl_desc, sgl_desc);
-	if (rc) {
-		SDE_ERROR("failed in sgl validation for label: %d, rc = %d\n",
-				payload->label, rc);
-		goto accept_fail;
-	}
-
 	sde_vm->base.io_mem_handle = payload->mem_handle;
 
-	SDE_INFO("mem accept succeeded for tag: %d label: %d\n", tag,
-				payload->label);
+	mutex_unlock(&sde_vm->base.vm_res_lock);
 
-	rc = sde_kms_vm_trusted_resource_init(sde_kms);
-	if (rc)
-		SDE_ERROR("vm resource init failed\n");
+	SDE_INFO("mem lend notification for tag: %d label: %d handle: %d\n",
+			tag, payload->label, payload->mem_handle);
+}
 
-accept_fail:
-	kfree(acl_desc);
-acl_fail:
-	mutex_unlock(&sde_vm->base.vm_res_lock);
+void sde_vm_irq_lend_notification_handler(void *req,
+		unsigned long notif_type, enum hh_irq_label label)
+{
+	SDE_INFO("IRQ LEND notification for label: %d\n", label);
 }
 
 static int _sde_vm_release_irq(struct sde_vm *vm)
@@ -208,48 +105,45 @@ static int _sde_vm_release_irq(struct sde_vm *vm)
 	struct sde_vm_irq_desc *irq_desc = sde_vm->irq_desc;
 	int i, rc = 0;
 
-	for (i = 0; i < irq_desc->n_irq; i++) {
+	for (i = atomic_read(&sde_vm->base.n_irq_lent) - 1; i >= 0; i--) {
 		struct sde_vm_irq_entry *entry = &irq_desc->irq_entries[i];
 
 		rc = hh_irq_release(entry->label);
 		if (rc) {
 			SDE_ERROR("failed to release IRQ label: %d rc = %d\n",
 				  entry->label, rc);
-			return rc;
+			goto done;
 		}
 
+		atomic_dec(&sde_vm->base.n_irq_lent);
+
 		rc = hh_irq_release_notify(entry->label);
 		if (rc) {
 			SDE_ERROR(
 				 "irq release notify failed,label: %d rc: %d\n",
 				 entry->label, rc);
-			return rc;
+			goto done;
 		}
 
-		atomic_dec(&sde_vm->base.n_irq_lent);
+		SDE_INFO("sde vm irq release for label: %d succeeded\n",
+				entry->label);
 	}
-
-	SDE_INFO("sde vm irq release succeeded, rc = %d\n", rc);
-
+done:
 	return rc;
 }
 
-static int _sde_vm_release(struct sde_kms *kms)
+static int _sde_vm_release_mem(struct sde_vm *vm)
 {
-	struct sde_vm_trusted *sde_vm;
 	int rc = 0;
+	struct sde_vm_trusted *sde_vm = (struct sde_vm_trusted *)vm;
 
-	if (!kms->vm)
+	if (sde_vm->base.io_mem_handle < 0)
 		return 0;
 
-	sde_vm = to_vm_trusted(kms->vm);
-
-	mutex_lock(&sde_vm->base.vm_res_lock);
-
 	rc = hh_rm_mem_release(sde_vm->base.io_mem_handle, 0);
 	if (rc) {
 		SDE_ERROR("hh_rm_mem_release failed, rc=%d\n", rc);
-		goto end;
+		goto done;
 	}
 
 	rc = hh_rm_mem_notify(sde_vm->base.io_mem_handle,
@@ -257,22 +151,40 @@ static int _sde_vm_release(struct sde_kms *kms)
 			HH_MEM_NOTIFIER_TAG_DISPLAY, 0);
 	if (rc) {
 		SDE_ERROR("hyp mem notify on release failed, rc = %d\n", rc);
-		goto end;
+		goto done;
 	}
 
 	sde_vm->base.io_mem_handle = -1;
 
-	SDE_INFO("sde vm mem release succeeded, rc = %d\n", rc);
+	SDE_INFO("sde vm mem release succeeded\n");
+done:
+	return rc;
 
-	rc = _sde_vm_release_irq(kms->vm);
+}
+
+static int _sde_vm_release(struct sde_kms *kms)
+{
+	struct sde_vm_trusted *sde_vm;
+	int rc = 0;
+
+	if (!kms->vm)
+		return 0;
+
+	sde_vm = to_vm_trusted(kms->vm);
+
+	sde_kms_vm_trusted_resource_deinit(kms);
+
+	rc = _sde_vm_release_mem(kms->vm);
 	if (rc) {
-		SDE_ERROR("irq_release failed, rc = %d\n", rc);
+		SDE_ERROR("mem_release failed, rc = %d\n", rc);
 		goto end;
 	}
 
-end:
-	mutex_unlock(&sde_vm->base.vm_res_lock);
+	rc = _sde_vm_release_irq(kms->vm);
+	if (rc)
+		SDE_ERROR("irq_release failed, rc = %d\n", rc);
 
+end:
 	return rc;
 }
 
@@ -310,7 +222,7 @@ int _sde_vm_populate_res(struct sde_kms *sde_kms, struct sde_vm_trusted *vm)
 	return rc;
 }
 
-static bool sde_vm_owns_hw(struct sde_kms *sde_kms)
+static bool _sde_vm_owns_hw(struct sde_kms *sde_kms)
 {
 	struct sde_vm_trusted *sde_vm;
 	bool owns_irq, owns_mem_io;
@@ -347,6 +259,142 @@ static void  _sde_vm_deinit(struct sde_kms *kms, struct sde_vm_ops *ops)
 	kfree(sde_vm);
 }
 
+static int _sde_vm_accept_mem(struct sde_vm *vm)
+{
+	struct hh_sgl_desc *sgl_desc;
+	struct hh_acl_desc *acl_desc;
+	struct sde_vm_trusted *sde_vm;
+	int rc = 0;
+
+	sde_vm = to_vm_trusted(vm);
+
+	acl_desc = sde_vm_populate_acl(HH_TRUSTED_VM);
+	if (IS_ERR(acl_desc)) {
+		SDE_ERROR("failed to populate acl data, rc=%d\n",
+			   PTR_ERR(acl_desc));
+		rc = PTR_ERR(acl_desc);
+		goto done;
+	}
+
+	sgl_desc = hh_rm_mem_accept(sde_vm->base.io_mem_handle,
+				    HH_RM_MEM_TYPE_IO,
+				    HH_RM_TRANS_TYPE_LEND,
+				    HH_RM_MEM_ACCEPT_VALIDATE_ACL_ATTRS|
+				    HH_RM_MEM_ACCEPT_VALIDATE_LABEL|
+				    HH_RM_MEM_ACCEPT_DONE,
+				    SDE_VM_MEM_LABEL,
+				    acl_desc, NULL, NULL, 0);
+	if (IS_ERR_OR_NULL(sgl_desc)) {
+		SDE_ERROR("hh_rm_mem_accept failed with error, rc=%d\n",
+			   PTR_ERR(sgl_desc));
+		rc = -EINVAL;
+
+		/* ACCEPT didn't go through. So no need to call the RELEASE */
+		sde_vm->base.io_mem_handle = -1;
+		goto accept_fail;
+	}
+
+	rc = _sde_vm_validate_sgl(sde_vm->sgl_desc, sgl_desc);
+	if (rc) {
+		SDE_ERROR(
+			"failed in sgl validation for SDE_VM_MEM_LABEL label, rc = %d\n",
+			rc);
+		goto accept_fail;
+	}
+
+	SDE_INFO("mem accept succeeded for SDE_VM_MEM_LABEL label\n");
+
+	return 0;
+
+accept_fail:
+	kfree(acl_desc);
+done:
+	return rc;
+}
+
+static int _sde_vm_accept_irq(struct sde_vm *vm)
+{
+	struct sde_vm_trusted *sde_vm;
+	struct sde_vm_irq_desc *irq_desc;
+	struct irq_data *exp_irq_data, *acc_irq_data;
+	int accepted_irq, expected_irq;
+	int rc = 0, i;
+
+	sde_vm = to_vm_trusted(vm);
+	irq_desc = sde_vm->irq_desc;
+
+	for (i = 0; i < irq_desc->n_irq; i++) {
+		struct sde_vm_irq_entry *irq_entry = &irq_desc->irq_entries[i];
+
+		expected_irq = irq_entry->irq;
+		accepted_irq = hh_irq_accept(irq_entry->label, -1,
+				IRQ_TYPE_LEVEL_HIGH);
+		if (accepted_irq < 0) {
+			SDE_ERROR("failed to accept irq for label: %d\n",
+					irq_entry->label);
+			rc = -EINVAL;
+			goto end;
+		}
+
+		atomic_inc(&sde_vm->base.n_irq_lent);
+
+		exp_irq_data = irq_get_irq_data(expected_irq);
+		if (!exp_irq_data) {
+			SDE_ERROR("failed to get irq data for irq: %d\n",
+					exp_irq_data);
+			rc = -EINVAL;
+			goto end;
+		}
+
+		acc_irq_data = irq_get_irq_data(accepted_irq);
+		if (!acc_irq_data) {
+			SDE_ERROR("failed to get irq data for irq: %d\n",
+					accepted_irq);
+			rc = -EINVAL;
+			goto end;
+		}
+
+		if (exp_irq_data->hwirq != acc_irq_data->hwirq) {
+			SDE_ERROR("IRQ mismatch on ACCEPT for label %d\n",
+					irq_entry->label);
+			rc = -EINVAL;
+			goto end;
+		}
+
+		SDE_INFO("IRQ accept succeeded for label %d irq: %d\n",
+				irq_entry->label, exp_irq_data->hwirq);
+	}
+end:
+	return rc;
+}
+
+static int _sde_vm_accept(struct sde_kms *kms)
+{
+	int rc = 0;
+
+	rc = _sde_vm_accept_mem(kms->vm);
+	if (rc)
+		goto res_accept_fail;
+
+	rc = _sde_vm_accept_irq(kms->vm);
+	if (rc)
+		goto res_accept_fail;
+
+	rc = sde_kms_vm_trusted_resource_init(kms);
+	if (rc) {
+		SDE_ERROR("vm resource init failed\n");
+		goto res_accept_fail;
+	}
+
+	goto end;
+
+res_accept_fail:
+	_sde_vm_release_irq(kms->vm);
+	_sde_vm_release_mem(kms->vm);
+end:
+	return rc;
+}
+
 static void _sde_vm_set_ops(struct sde_vm_ops *ops)
 {
 	memset(ops, 0, sizeof(*ops));
@@ -354,11 +402,13 @@ static void _sde_vm_set_ops(struct sde_vm_ops *ops)
 	ops->vm_client_pre_release = sde_vm_pre_release;
 	ops->vm_client_post_acquire = sde_vm_post_acquire;
 	ops->vm_release = _sde_vm_release;
-	ops->vm_owns_hw = sde_vm_owns_hw;
+	ops->vm_acquire = _sde_vm_accept;
+	ops->vm_owns_hw = _sde_vm_owns_hw;
 	ops->vm_deinit = _sde_vm_deinit;
 	ops->vm_prepare_commit = sde_kms_vm_trusted_prepare_commit;
 	ops->vm_post_commit = sde_kms_vm_trusted_post_commit;
 	ops->vm_request_valid = sde_vm_request_valid;
+	ops->vm_acquire_fail_handler = _sde_vm_release;
 }
 
 int sde_vm_trusted_init(struct sde_kms *kms)