sde_vm_trusted.c 9.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Copyright (c) 2020, The Linux Foundation. All rights reserved.
  4. */
  5. #include <linux/haven/hh_rm_drv.h>
  6. #include <linux/haven/hh_irq_lend.h>
  7. #include <linux/haven/hh_mem_notifier.h>
  8. #include <linux/sort.h>
  9. #include <linux/bsearch.h>
  10. #include <linux/irq.h>
  11. #include <linux/irqdomain.h>
  12. #include "sde_kms.h"
  13. #include "sde_vm_common.h"
  14. #include "sde_vm.h"
  15. #define to_vm_trusted(vm) ((struct sde_vm_trusted *)vm)
  16. static int __sgl_cmp(const void *a, const void *b)
  17. {
  18. struct hh_sgl_entry *l = (struct hh_sgl_entry *)a;
  19. struct hh_sgl_entry *r = (struct hh_sgl_entry *)b;
  20. return (l->ipa_base - r->ipa_base);
  21. }
  22. int _sde_vm_validate_sgl(struct hh_sgl_desc *expected,
  23. struct hh_sgl_desc *assigned)
  24. {
  25. u32 idx;
  26. /*
  27. * fragmented address spaces are not supported.
  28. * So the number of sgl entries is expected to be the same.
  29. */
  30. if (expected->n_sgl_entries != assigned->n_sgl_entries)
  31. return -E2BIG;
  32. sort(assigned->sgl_entries, assigned->n_sgl_entries,
  33. sizeof(assigned->sgl_entries[0]), __sgl_cmp, NULL);
  34. for (idx = 0; idx < expected->n_sgl_entries; idx++) {
  35. struct hh_sgl_entry *e = &expected->sgl_entries[idx];
  36. struct hh_sgl_entry *a = &assigned->sgl_entries[idx];
  37. if ((e->ipa_base != a->ipa_base) || (e->size != a->size)) {
  38. SDE_DEBUG("sgl mismatch: (%ld - %d) vs (%ld - %d)\n",
  39. e->ipa_base, e->size, a->ipa_base, a->size);
  40. return -EINVAL;
  41. }
  42. }
  43. return 0;
  44. }
  45. static int __irq_cmp(const void *a, const void *b)
  46. {
  47. struct sde_vm_irq_entry *l = (struct sde_vm_irq_entry *)a;
  48. struct sde_vm_irq_entry *r = (struct sde_vm_irq_entry *)b;
  49. return (l->label - r->label);
  50. }
  51. void sde_vm_irq_lend_notification_handler(void *req, enum hh_irq_label label)
  52. {
  53. struct sde_vm_trusted *sde_vm;
  54. struct sde_kms *sde_kms;
  55. struct sde_vm_irq_desc *irq_desc;
  56. struct sde_vm_irq_entry irq_temp, *found = NULL;
  57. struct irq_data *exp_irq_data, *acc_irq_data;
  58. int accepted_irq, expected_irq;
  59. int rc;
  60. if (!req) {
  61. SDE_ERROR("invalid data on lend notification\n");
  62. return;
  63. }
  64. sde_vm = to_vm_trusted(req);
  65. sde_kms = sde_vm->base.sde_kms;
  66. irq_desc = sde_vm->irq_desc;
  67. mutex_lock(&sde_vm->base.vm_res_lock);
  68. memset(&irq_temp, 0, sizeof(irq_temp));
  69. irq_temp.label = label;
  70. found = bsearch((void *)&irq_temp, (void *)irq_desc->irq_entries,
  71. irq_desc->n_irq, sizeof(struct sde_vm_irq_entry),
  72. __irq_cmp);
  73. if (!found) {
  74. SDE_ERROR("irq mismatch for label: %d irq: %d\n",
  75. irq_temp.label, irq_temp.irq);
  76. goto end;
  77. }
  78. expected_irq = found->irq;
  79. accepted_irq = hh_irq_accept(label, -1, IRQ_TYPE_LEVEL_HIGH);
  80. if (accepted_irq < 0) {
  81. SDE_ERROR("failed to accept irq for label: %d\n");
  82. goto end;
  83. }
  84. exp_irq_data = irq_get_irq_data(expected_irq);
  85. if (!exp_irq_data) {
  86. SDE_ERROR("failed to get irq data for irq: %d\n", exp_irq_data);
  87. goto end;
  88. }
  89. acc_irq_data = irq_get_irq_data(accepted_irq);
  90. if (!acc_irq_data) {
  91. SDE_ERROR("failed to get irq data for irq: %d\n", accepted_irq);
  92. goto end;
  93. }
  94. if (exp_irq_data->hwirq != acc_irq_data->hwirq) {
  95. SDE_ERROR("IRQ mismatch on ACCEPT for label %d\n", label);
  96. goto end;
  97. }
  98. SDE_INFO("IRQ accept succeeded for label %d irq: %d\n", label,
  99. exp_irq_data->hwirq);
  100. atomic_inc(&sde_vm->base.n_irq_lent);
  101. rc = sde_kms_vm_trusted_resource_init(sde_kms);
  102. if (rc)
  103. SDE_ERROR("vm resource init failed\n");
  104. end:
  105. mutex_unlock(&sde_vm->base.vm_res_lock);
  106. }
  107. static void sde_vm_mem_lend_notification_handler(enum hh_mem_notifier_tag tag,
  108. unsigned long notif_type,
  109. void *entry_data, void *notif_msg)
  110. {
  111. struct hh_rm_notif_mem_shared_payload *payload;
  112. struct hh_sgl_desc *sgl_desc;
  113. struct hh_acl_desc *acl_desc;
  114. struct sde_kms *sde_kms;
  115. struct sde_vm_trusted *sde_vm;
  116. int rc = 0;
  117. if (notif_type != HH_RM_NOTIF_MEM_SHARED ||
  118. tag != HH_MEM_NOTIFIER_TAG_DISPLAY)
  119. return;
  120. if (!entry_data || !notif_msg)
  121. return;
  122. payload = (struct hh_rm_notif_mem_shared_payload *)notif_msg;
  123. if (payload->trans_type != HH_RM_TRANS_TYPE_LEND ||
  124. payload->label != SDE_VM_MEM_LABEL)
  125. return;
  126. sde_vm = (struct sde_vm_trusted *)entry_data;
  127. sde_kms = sde_vm->base.sde_kms;
  128. mutex_lock(&sde_vm->base.vm_res_lock);
  129. acl_desc = sde_vm_populate_acl(HH_TRUSTED_VM);
  130. if (IS_ERR(acl_desc)) {
  131. SDE_ERROR("failed to populate acl data, rc=%d\n",
  132. PTR_ERR(acl_desc));
  133. goto acl_fail;
  134. }
  135. sgl_desc = hh_rm_mem_accept(payload->mem_handle, HH_RM_MEM_TYPE_IO,
  136. HH_RM_TRANS_TYPE_LEND,
  137. HH_RM_MEM_ACCEPT_VALIDATE_ACL_ATTRS|
  138. HH_RM_MEM_ACCEPT_VALIDATE_LABEL|
  139. HH_RM_MEM_ACCEPT_DONE,
  140. payload->label,
  141. acl_desc, NULL, NULL, 0);
  142. if (IS_ERR_OR_NULL(sgl_desc)) {
  143. SDE_ERROR("hh_rm_mem_accept failed with error, rc=%d\n",
  144. PTR_ERR(sgl_desc));
  145. goto accept_fail;
  146. }
  147. rc = _sde_vm_validate_sgl(sde_vm->sgl_desc, sgl_desc);
  148. if (rc) {
  149. SDE_ERROR("failed in sgl validation for label: %d, rc = %d\n",
  150. payload->label, rc);
  151. goto accept_fail;
  152. }
  153. sde_vm->base.io_mem_handle = payload->mem_handle;
  154. SDE_INFO("mem accept succeeded for tag: %d label: %d\n", tag,
  155. payload->label);
  156. rc = sde_kms_vm_trusted_resource_init(sde_kms);
  157. if (rc)
  158. SDE_ERROR("vm resource init failed\n");
  159. accept_fail:
  160. kfree(acl_desc);
  161. acl_fail:
  162. mutex_unlock(&sde_vm->base.vm_res_lock);
  163. }
  164. static int _sde_vm_release_irq(struct sde_vm *vm)
  165. {
  166. struct sde_vm_trusted *sde_vm = (struct sde_vm_trusted *)vm;
  167. struct sde_vm_irq_desc *irq_desc = sde_vm->irq_desc;
  168. int i, rc = 0;
  169. for (i = 0; i < irq_desc->n_irq; i++) {
  170. struct sde_vm_irq_entry *entry = &irq_desc->irq_entries[i];
  171. rc = hh_irq_release(entry->label);
  172. if (rc) {
  173. SDE_ERROR("failed to release IRQ label: %d rc = %d\n",
  174. entry->label, rc);
  175. return rc;
  176. }
  177. atomic_dec(&sde_vm->base.n_irq_lent);
  178. }
  179. SDE_INFO("sde vm irq release succeeded, rc = %d\n", rc);
  180. return rc;
  181. }
  182. static int _sde_vm_release(struct sde_kms *kms)
  183. {
  184. struct sde_vm_trusted *sde_vm;
  185. int rc = 0;
  186. if (!kms->vm)
  187. return 0;
  188. sde_vm = to_vm_trusted(kms->vm);
  189. mutex_lock(&sde_vm->base.vm_res_lock);
  190. rc = hh_rm_mem_release(sde_vm->base.io_mem_handle, 0);
  191. if (rc) {
  192. SDE_ERROR("hh_rm_mem_release failed, rc=%d\n", rc);
  193. goto end;
  194. }
  195. rc = hh_rm_mem_notify(sde_vm->base.io_mem_handle,
  196. HH_RM_MEM_NOTIFY_OWNER_RELEASED,
  197. HH_MEM_NOTIFIER_TAG_DISPLAY, 0);
  198. if (rc) {
  199. SDE_ERROR("hyp mem notify on release failed, rc = %d\n", rc);
  200. goto end;
  201. }
  202. sde_vm->base.io_mem_handle = -1;
  203. SDE_INFO("sde vm mem release succeeded, rc = %d\n", rc);
  204. rc = _sde_vm_release_irq(kms->vm);
  205. if (rc) {
  206. SDE_ERROR("irq_release failed, rc = %d\n", rc);
  207. goto end;
  208. }
  209. end:
  210. mutex_unlock(&sde_vm->base.vm_res_lock);
  211. return rc;
  212. }
  213. int _sde_vm_populate_res(struct sde_kms *sde_kms, struct sde_vm_trusted *vm)
  214. {
  215. struct msm_io_res io_res;
  216. int rc = 0;
  217. INIT_LIST_HEAD(&io_res.mem);
  218. INIT_LIST_HEAD(&io_res.irq);
  219. rc = sde_vm_get_resources(sde_kms, &io_res);
  220. if (rc) {
  221. SDE_ERROR("fail to get resources\n");
  222. return rc;
  223. }
  224. vm->sgl_desc = sde_vm_populate_sgl(&io_res);
  225. if (IS_ERR_OR_NULL(vm->sgl_desc)) {
  226. SDE_ERROR("failed to parse sgl list\n");
  227. return PTR_ERR(vm->sgl_desc);
  228. }
  229. vm->irq_desc = sde_vm_populate_irq(&io_res);
  230. if (IS_ERR_OR_NULL(vm->irq_desc)) {
  231. SDE_ERROR("failed to parse irq list\n");
  232. return PTR_ERR(vm->irq_desc);
  233. }
  234. sort(vm->irq_desc->irq_entries, vm->irq_desc->n_irq,
  235. sizeof(vm->irq_desc->irq_entries[0]), __irq_cmp, NULL);
  236. sort(vm->sgl_desc->sgl_entries, vm->sgl_desc->n_sgl_entries,
  237. sizeof(vm->sgl_desc->sgl_entries[0]), __sgl_cmp, NULL);
  238. return rc;
  239. }
  240. static bool sde_vm_owns_hw(struct sde_kms *sde_kms)
  241. {
  242. struct sde_vm_trusted *sde_vm;
  243. bool owns_irq, owns_mem_io;
  244. sde_vm = to_vm_trusted(sde_kms->vm);
  245. owns_irq = (sde_vm->irq_desc->n_irq ==
  246. atomic_read(&sde_vm->base.n_irq_lent));
  247. owns_mem_io = (sde_vm->base.io_mem_handle >= 0);
  248. return (owns_irq && owns_mem_io);
  249. }
  250. static void _sde_vm_deinit(struct sde_kms *kms, struct sde_vm_ops *ops)
  251. {
  252. struct sde_vm_trusted *sde_vm;
  253. if (!kms->vm)
  254. return;
  255. sde_vm = to_vm_trusted(kms->vm);
  256. memset(ops, 0, sizeof(*ops));
  257. if (sde_vm->base.mem_notification_cookie)
  258. hh_mem_notifier_unregister(
  259. sde_vm->base.mem_notification_cookie);
  260. kfree(sde_vm->sgl_desc);
  261. if (sde_vm->irq_desc)
  262. sde_vm_free_irq(sde_vm->irq_desc);
  263. kfree(sde_vm);
  264. }
  265. static void _sde_vm_set_ops(struct sde_vm_ops *ops)
  266. {
  267. memset(ops, 0, sizeof(*ops));
  268. ops->vm_client_pre_release = sde_vm_pre_release;
  269. ops->vm_client_post_acquire = sde_vm_post_acquire;
  270. ops->vm_release = _sde_vm_release;
  271. ops->vm_owns_hw = sde_vm_owns_hw;
  272. ops->vm_deinit = _sde_vm_deinit;
  273. ops->vm_prepare_commit = sde_kms_vm_trusted_prepare_commit;
  274. ops->vm_post_commit = sde_kms_vm_trusted_post_commit;
  275. ops->vm_request_valid = sde_vm_request_valid;
  276. }
  277. int sde_vm_trusted_init(struct sde_kms *kms)
  278. {
  279. struct sde_vm_trusted *sde_vm;
  280. void *cookie;
  281. int rc = 0;
  282. sde_vm = kzalloc(sizeof(*sde_vm), GFP_KERNEL);
  283. if (!sde_vm)
  284. return -ENOMEM;
  285. _sde_vm_set_ops(&sde_vm->base.vm_ops);
  286. sde_vm->base.sde_kms = kms;
  287. mutex_init(&sde_vm->base.vm_res_lock);
  288. /**
  289. * Optimize resource population by reading the entire HW resource
  290. * space once during init. Once trusted vm starts supporting
  291. * per-display space assignment, this read has to be done on each event
  292. * notification.
  293. */
  294. rc = _sde_vm_populate_res(kms, sde_vm);
  295. if (rc) {
  296. SDE_ERROR("failed to populate trusted vm res, rc= %d\n", rc);
  297. goto init_fail;
  298. }
  299. cookie = hh_mem_notifier_register(HH_MEM_NOTIFIER_TAG_DISPLAY,
  300. sde_vm_mem_lend_notification_handler, sde_vm);
  301. if (!cookie) {
  302. SDE_ERROR("fails to register RM mem lend notifier\n");
  303. goto init_fail;
  304. }
  305. sde_vm->base.mem_notification_cookie = cookie;
  306. rc = hh_irq_wait_for_lend(HH_IRQ_LABEL_SDE, HH_PRIMARY_VM,
  307. sde_vm_irq_lend_notification_handler,
  308. (void *)sde_vm);
  309. if (rc) {
  310. SDE_ERROR("wait for irq lend on label: %d failed, rc=%d\n",
  311. HH_IRQ_LABEL_SDE, rc);
  312. goto init_fail;
  313. }
  314. kms->vm = &sde_vm->base;
  315. atomic_set(&sde_vm->base.n_irq_lent, 0);
  316. return 0;
  317. init_fail:
  318. _sde_vm_deinit(kms, &sde_vm->base.vm_ops);
  319. return rc;
  320. }