sde_vm_trusted.c 12 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Copyright (c) 2022, 2024 Qualcomm Innovation Center, Inc. All rights reserved.
  4. * Copyright (c) 2020-2021, The Linux Foundation. All rights reserved.
  5. */
  6. #define pr_fmt(fmt) "[drm:%s:%d] " fmt, __func__, __LINE__
  7. #include <linux/gunyah/gh_rm_drv.h>
  8. #include <linux/gunyah/gh_irq_lend.h>
  9. #include <linux/gunyah/gh_mem_notifier.h>
  10. #include <linux/sort.h>
  11. #include <linux/bsearch.h>
  12. #include <linux/irq.h>
  13. #include <linux/irqdomain.h>
  14. #include "sde_kms.h"
  15. #include "sde_vm_common.h"
  16. #include "sde_vm.h"
  17. #include "sde_vm_msgq.h"
  18. #define to_vm_trusted(vm) ((struct sde_vm_trusted *)vm)
  19. static int __sgl_cmp(const void *a, const void *b)
  20. {
  21. struct gh_sgl_entry *l = (struct gh_sgl_entry *)a;
  22. struct gh_sgl_entry *r = (struct gh_sgl_entry *)b;
  23. return (l->ipa_base - r->ipa_base);
  24. }
  25. int _sde_vm_validate_sgl(struct gh_sgl_desc *expected,
  26. struct gh_sgl_desc *assigned)
  27. {
  28. u32 idx;
  29. sort(assigned->sgl_entries, assigned->n_sgl_entries,
  30. sizeof(assigned->sgl_entries[0]), __sgl_cmp, NULL);
  31. /*
  32. * fragmented address spaces are not supported.
  33. * So the number of sgl entries is expected to be the same.
  34. */
  35. if (expected->n_sgl_entries != assigned->n_sgl_entries) {
  36. SDE_ERROR("expected sgl entries = %d, assigned sgl entries = %d\n",
  37. expected->n_sgl_entries, assigned->n_sgl_entries);
  38. for (idx = 0; idx < expected->n_sgl_entries; idx++) {
  39. struct gh_sgl_entry *e = &expected->sgl_entries[idx];
  40. SDE_ERROR("expected sgl entry: (0x%llx - %llx)\n",
  41. e->ipa_base, e->size);
  42. }
  43. for (idx = 0; idx < assigned->n_sgl_entries; idx++) {
  44. struct gh_sgl_entry *a = &assigned->sgl_entries[idx];
  45. SDE_ERROR("assigned sgl entry: (0x%llx - %llx)\n",
  46. a->ipa_base, a->size);
  47. }
  48. return -E2BIG;
  49. }
  50. for (idx = 0; idx < expected->n_sgl_entries; idx++) {
  51. struct gh_sgl_entry *e = &expected->sgl_entries[idx];
  52. struct gh_sgl_entry *a = &assigned->sgl_entries[idx];
  53. if ((e->ipa_base != a->ipa_base) || (e->size != a->size)) {
  54. SDE_ERROR("sgl mismatch: (%llu - %llu) vs (%llu - %llu)\n",
  55. e->ipa_base, e->size, a->ipa_base, a->size);
  56. return -EINVAL;
  57. }
  58. }
  59. return 0;
  60. }
  61. static int __irq_cmp(const void *a, const void *b)
  62. {
  63. struct sde_vm_irq_entry *l = (struct sde_vm_irq_entry *)a;
  64. struct sde_vm_irq_entry *r = (struct sde_vm_irq_entry *)b;
  65. return (l->label - r->label);
  66. }
  67. static void sde_vm_mem_lend_notification_handler(enum gh_mem_notifier_tag tag,
  68. unsigned long notif_type,
  69. void *entry_data, void *notif_msg)
  70. {
  71. struct gh_rm_notif_mem_shared_payload *payload;
  72. struct sde_vm_trusted *sde_vm;
  73. if (notif_type != GH_RM_NOTIF_MEM_SHARED ||
  74. tag != GH_MEM_NOTIFIER_TAG_DISPLAY)
  75. return;
  76. if (!entry_data || !notif_msg)
  77. return;
  78. payload = (struct gh_rm_notif_mem_shared_payload *)notif_msg;
  79. if (payload->trans_type != GH_RM_TRANS_TYPE_LEND ||
  80. payload->label != SDE_VM_MEM_LABEL)
  81. return;
  82. sde_vm = (struct sde_vm_trusted *)entry_data;
  83. mutex_lock(&sde_vm->base.vm_res_lock);
  84. sde_vm->base.io_mem_handle = payload->mem_handle;
  85. mutex_unlock(&sde_vm->base.vm_res_lock);
  86. SDE_INFO("mem lend notification for tag: %d label: %d handle: %d\n",
  87. tag, payload->label, payload->mem_handle);
  88. }
  89. void sde_vm_irq_lend_notification_handler(void *req,
  90. unsigned long notif_type, enum gh_irq_label label)
  91. {
  92. SDE_INFO("IRQ LEND notification for label: %d\n", label);
  93. }
  94. static int _sde_vm_release_irq(struct sde_vm *vm)
  95. {
  96. struct sde_vm_trusted *sde_vm = (struct sde_vm_trusted *)vm;
  97. struct sde_vm_irq_desc *irq_desc = sde_vm->irq_desc;
  98. int i, rc = 0;
  99. for (i = atomic_read(&sde_vm->base.n_irq_lent) - 1; i >= 0; i--) {
  100. struct sde_vm_irq_entry *entry = &irq_desc->irq_entries[i];
  101. rc = gh_irq_release(entry->label);
  102. if (rc) {
  103. SDE_ERROR("failed to release IRQ label: %d rc = %d\n",
  104. entry->label, rc);
  105. goto done;
  106. }
  107. atomic_dec(&sde_vm->base.n_irq_lent);
  108. rc = gh_irq_release_notify(entry->label);
  109. if (rc) {
  110. SDE_ERROR(
  111. "irq release notify failed,label: %d rc: %d\n",
  112. entry->label, rc);
  113. goto done;
  114. }
  115. SDE_INFO("sde vm irq release for label: %d succeeded\n",
  116. entry->label);
  117. }
  118. done:
  119. return rc;
  120. }
  121. static int _sde_vm_release_mem(struct sde_vm *vm)
  122. {
  123. int rc = 0;
  124. struct sde_vm_trusted *sde_vm = (struct sde_vm_trusted *)vm;
  125. if (sde_vm->base.io_mem_handle < 0)
  126. return 0;
  127. rc = gh_rm_mem_release(sde_vm->base.io_mem_handle, 0);
  128. if (rc) {
  129. SDE_ERROR("gh_rm_mem_release failed, rc=%d\n", rc);
  130. goto done;
  131. }
  132. rc = gh_rm_mem_notify(sde_vm->base.io_mem_handle,
  133. GH_RM_MEM_NOTIFY_OWNER_RELEASED,
  134. GH_MEM_NOTIFIER_TAG_DISPLAY, 0);
  135. if (rc) {
  136. SDE_ERROR("hyp mem notify on release failed, rc = %d\n", rc);
  137. goto done;
  138. }
  139. sde_vm->base.io_mem_handle = -1;
  140. SDE_INFO("sde vm mem release succeeded\n");
  141. done:
  142. return rc;
  143. }
  144. static int _sde_vm_release(struct sde_kms *kms)
  145. {
  146. struct sde_vm_trusted *sde_vm;
  147. int rc = 0;
  148. if (!kms->vm)
  149. return 0;
  150. sde_vm = to_vm_trusted(kms->vm);
  151. sde_kms_vm_trusted_resource_deinit(kms);
  152. rc = _sde_vm_release_irq(kms->vm);
  153. if (rc)
  154. SDE_ERROR("irq_release failed, rc = %d\n", rc);
  155. rc = _sde_vm_release_mem(kms->vm);
  156. if (rc) {
  157. SDE_ERROR("mem_release failed, rc = %d\n", rc);
  158. goto end;
  159. }
  160. end:
  161. return rc;
  162. }
  163. int _sde_vm_resource_init(struct sde_kms *sde_kms,
  164. struct drm_atomic_state *state)
  165. {
  166. int rc = 0;
  167. rc = sde_kms_vm_trusted_resource_init(sde_kms, state);
  168. if (rc)
  169. SDE_ERROR("vm resource init failed\n");
  170. return rc;
  171. }
  172. int _sde_vm_populate_res(struct sde_kms *sde_kms, struct sde_vm_trusted *vm)
  173. {
  174. struct msm_io_res io_res;
  175. int rc = 0;
  176. INIT_LIST_HEAD(&io_res.mem);
  177. INIT_LIST_HEAD(&io_res.irq);
  178. rc = sde_vm_get_resources(sde_kms, &io_res);
  179. if (rc) {
  180. SDE_ERROR("fail to get resources\n");
  181. return rc;
  182. }
  183. vm->sgl_desc = sde_vm_populate_sgl(&io_res);
  184. if (IS_ERR_OR_NULL(vm->sgl_desc)) {
  185. SDE_ERROR("failed to parse sgl list\n");
  186. return PTR_ERR(vm->sgl_desc);
  187. }
  188. vm->irq_desc = sde_vm_populate_irq(&io_res);
  189. if (IS_ERR_OR_NULL(vm->irq_desc)) {
  190. SDE_ERROR("failed to parse irq list\n");
  191. return PTR_ERR(vm->irq_desc);
  192. }
  193. sort(vm->irq_desc->irq_entries, vm->irq_desc->n_irq,
  194. sizeof(vm->irq_desc->irq_entries[0]), __irq_cmp, NULL);
  195. sort(vm->sgl_desc->sgl_entries, vm->sgl_desc->n_sgl_entries,
  196. sizeof(vm->sgl_desc->sgl_entries[0]), __sgl_cmp, NULL);
  197. return rc;
  198. }
  199. static bool _sde_vm_owns_hw(struct sde_kms *sde_kms)
  200. {
  201. struct sde_vm_trusted *sde_vm;
  202. bool owns_irq, owns_mem_io;
  203. sde_vm = to_vm_trusted(sde_kms->vm);
  204. owns_irq = (sde_vm->irq_desc->n_irq ==
  205. atomic_read(&sde_vm->base.n_irq_lent));
  206. owns_mem_io = (sde_vm->base.io_mem_handle >= 0);
  207. return (owns_irq && owns_mem_io);
  208. }
  209. static void _sde_vm_deinit(struct sde_kms *kms, struct sde_vm_ops *ops)
  210. {
  211. struct sde_vm_trusted *sde_vm;
  212. if (!kms->vm)
  213. return;
  214. sde_vm = to_vm_trusted(kms->vm);
  215. memset(ops, 0, sizeof(*ops));
  216. sde_vm_msgq_deinit(kms->vm);
  217. if (sde_vm->base.mem_notification_cookie)
  218. gh_mem_notifier_unregister(
  219. sde_vm->base.mem_notification_cookie);
  220. kfree(sde_vm->sgl_desc);
  221. if (sde_vm->irq_desc)
  222. sde_vm_free_irq(sde_vm->irq_desc);
  223. kfree(sde_vm);
  224. }
  225. static int _sde_vm_accept_mem(struct sde_vm *vm)
  226. {
  227. struct gh_sgl_desc *sgl_desc;
  228. struct gh_acl_desc *acl_desc;
  229. struct sde_vm_trusted *sde_vm;
  230. int rc = 0;
  231. sde_vm = to_vm_trusted(vm);
  232. acl_desc = sde_vm_populate_acl(GH_TRUSTED_VM);
  233. if (IS_ERR(acl_desc)) {
  234. SDE_ERROR("failed to populate acl data, rc=%ld\n",
  235. PTR_ERR(acl_desc));
  236. rc = PTR_ERR(acl_desc);
  237. return rc;
  238. }
  239. sgl_desc = gh_rm_mem_accept(sde_vm->base.io_mem_handle,
  240. GH_RM_MEM_TYPE_IO,
  241. GH_RM_TRANS_TYPE_LEND,
  242. GH_RM_MEM_ACCEPT_VALIDATE_ACL_ATTRS|
  243. GH_RM_MEM_ACCEPT_VALIDATE_LABEL|
  244. GH_RM_MEM_ACCEPT_DONE,
  245. SDE_VM_MEM_LABEL,
  246. acl_desc, NULL, NULL, 0);
  247. if (IS_ERR_OR_NULL(sgl_desc)) {
  248. SDE_ERROR("gh_rm_mem_accept failed with error, rc=%ld\n",
  249. PTR_ERR(sgl_desc));
  250. rc = -EINVAL;
  251. /* ACCEPT didn't go through. So no need to call the RELEASE */
  252. sde_vm->base.io_mem_handle = -1;
  253. goto acl_done;
  254. }
  255. rc = _sde_vm_validate_sgl(sde_vm->sgl_desc, sgl_desc);
  256. if (rc) {
  257. SDE_ERROR(
  258. "failed in sgl validation for SDE_VM_MEM_LABEL label, rc = %d\n",
  259. rc);
  260. goto sgl_done;
  261. }
  262. SDE_INFO("mem accept succeeded for SDE_VM_MEM_LABEL label\n");
  263. sgl_done:
  264. kvfree(sgl_desc);
  265. acl_done:
  266. kfree(acl_desc);
  267. return rc;
  268. }
  269. static int _sde_vm_accept_irq(struct sde_vm *vm)
  270. {
  271. struct sde_vm_trusted *sde_vm;
  272. struct sde_vm_irq_desc *irq_desc;
  273. struct irq_data *exp_irq_data, *acc_irq_data;
  274. int accepted_irq, expected_irq;
  275. int rc = 0, i;
  276. sde_vm = to_vm_trusted(vm);
  277. irq_desc = sde_vm->irq_desc;
  278. for (i = 0; i < irq_desc->n_irq; i++) {
  279. struct sde_vm_irq_entry *irq_entry = &irq_desc->irq_entries[i];
  280. expected_irq = irq_entry->irq;
  281. accepted_irq = gh_irq_accept(irq_entry->label, -1,
  282. IRQ_TYPE_LEVEL_HIGH);
  283. if (accepted_irq < 0) {
  284. SDE_ERROR("failed to accept irq for label: %d\n",
  285. irq_entry->label);
  286. rc = -EINVAL;
  287. goto end;
  288. }
  289. atomic_inc(&sde_vm->base.n_irq_lent);
  290. exp_irq_data = irq_get_irq_data(expected_irq);
  291. if (!exp_irq_data) {
  292. SDE_ERROR("failed to get irq data for irq: %d\n",
  293. expected_irq);
  294. rc = -EINVAL;
  295. goto end;
  296. }
  297. acc_irq_data = irq_get_irq_data(accepted_irq);
  298. if (!acc_irq_data) {
  299. SDE_ERROR("failed to get irq data for irq: %d\n",
  300. accepted_irq);
  301. rc = -EINVAL;
  302. goto end;
  303. }
  304. if (exp_irq_data->hwirq != acc_irq_data->hwirq) {
  305. SDE_ERROR("IRQ mismatch on ACCEPT for label %d\n",
  306. irq_entry->label);
  307. rc = -EINVAL;
  308. goto end;
  309. }
  310. SDE_INFO("IRQ accept succeeded for label %u irq: %lu\n",
  311. irq_entry->label, exp_irq_data->hwirq);
  312. }
  313. end:
  314. return rc;
  315. }
  316. static int _sde_vm_accept(struct sde_kms *kms)
  317. {
  318. int rc = 0;
  319. rc = _sde_vm_accept_mem(kms->vm);
  320. if (rc)
  321. goto res_accept_fail;
  322. rc = _sde_vm_accept_irq(kms->vm);
  323. if (rc)
  324. goto res_accept_fail;
  325. return 0;
  326. res_accept_fail:
  327. _sde_vm_release_irq(kms->vm);
  328. _sde_vm_release_mem(kms->vm);
  329. return rc;
  330. }
  331. static void _sde_vm_set_ops(struct sde_vm_ops *ops)
  332. {
  333. memset(ops, 0, sizeof(*ops));
  334. ops->vm_client_pre_release = sde_vm_pre_release;
  335. ops->vm_client_post_acquire = sde_vm_post_acquire;
  336. ops->vm_release = _sde_vm_release;
  337. ops->vm_acquire = _sde_vm_accept;
  338. ops->vm_owns_hw = _sde_vm_owns_hw;
  339. ops->vm_deinit = _sde_vm_deinit;
  340. ops->vm_prepare_commit = sde_kms_vm_trusted_prepare_commit;
  341. ops->vm_post_commit = sde_kms_vm_trusted_post_commit;
  342. ops->vm_request_valid = sde_vm_request_valid;
  343. ops->vm_acquire_fail_handler = _sde_vm_release;
  344. ops->vm_msg_send = sde_vm_msg_send;
  345. ops->vm_resource_init = _sde_vm_resource_init;
  346. }
  347. int sde_vm_trusted_init(struct sde_kms *kms)
  348. {
  349. struct sde_vm_trusted *sde_vm;
  350. void *cookie;
  351. int rc = 0;
  352. sde_vm = kzalloc(sizeof(*sde_vm), GFP_KERNEL);
  353. if (!sde_vm)
  354. return -ENOMEM;
  355. _sde_vm_set_ops(&sde_vm->base.vm_ops);
  356. sde_vm->base.sde_kms = kms;
  357. mutex_init(&sde_vm->base.vm_res_lock);
  358. /**
  359. * Optimize resource population by reading the entire HW resource
  360. * space once during init. Once trusted vm starts supporting
  361. * per-display space assignment, this read has to be done on each event
  362. * notification.
  363. */
  364. rc = _sde_vm_populate_res(kms, sde_vm);
  365. if (rc) {
  366. SDE_ERROR("failed to populate trusted vm res, rc= %d\n", rc);
  367. goto init_fail;
  368. }
  369. cookie = gh_mem_notifier_register(GH_MEM_NOTIFIER_TAG_DISPLAY,
  370. sde_vm_mem_lend_notification_handler, sde_vm);
  371. if (!cookie) {
  372. SDE_ERROR("fails to register RM mem lend notifier\n");
  373. goto init_fail;
  374. }
  375. sde_vm->base.mem_notification_cookie = cookie;
  376. rc = gh_irq_wait_for_lend_v2(GH_IRQ_LABEL_SDE, GH_PRIMARY_VM,
  377. sde_vm_irq_lend_notification_handler,
  378. (void *)sde_vm);
  379. if (rc) {
  380. SDE_ERROR("wait for irq lend on label: %d failed, rc=%d\n",
  381. GH_IRQ_LABEL_SDE, rc);
  382. goto init_fail;
  383. }
  384. kms->vm = &sde_vm->base;
  385. atomic_set(&sde_vm->base.n_irq_lent, 0);
  386. rc = sde_vm_msgq_init(kms->vm);
  387. if (rc) {
  388. SDE_ERROR("failed to initialize the msgq, rc=%d\n", rc);
  389. goto init_fail;
  390. }
  391. return 0;
  392. init_fail:
  393. _sde_vm_deinit(kms, &sde_vm->base.vm_ops);
  394. return rc;
  395. }