sde_vm_trusted.c 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Copyright (c) 2020-2021, The Linux Foundation. All rights reserved.
  4. */
  5. #include <linux/haven/hh_rm_drv.h>
  6. #include <linux/haven/hh_irq_lend.h>
  7. #include <linux/haven/hh_mem_notifier.h>
  8. #include <linux/sort.h>
  9. #include <linux/bsearch.h>
  10. #include <linux/irq.h>
  11. #include <linux/irqdomain.h>
  12. #include "sde_kms.h"
  13. #include "sde_vm_common.h"
  14. #include "sde_vm.h"
  15. #include "sde_vm_msgq.h"
  16. #define to_vm_trusted(vm) ((struct sde_vm_trusted *)vm)
  17. static int __sgl_cmp(const void *a, const void *b)
  18. {
  19. struct hh_sgl_entry *l = (struct hh_sgl_entry *)a;
  20. struct hh_sgl_entry *r = (struct hh_sgl_entry *)b;
  21. return (l->ipa_base - r->ipa_base);
  22. }
  23. int _sde_vm_validate_sgl(struct hh_sgl_desc *expected,
  24. struct hh_sgl_desc *assigned)
  25. {
  26. u32 idx;
  27. /*
  28. * fragmented address spaces are not supported.
  29. * So the number of sgl entries is expected to be the same.
  30. */
  31. if (expected->n_sgl_entries != assigned->n_sgl_entries)
  32. return -E2BIG;
  33. sort(assigned->sgl_entries, assigned->n_sgl_entries,
  34. sizeof(assigned->sgl_entries[0]), __sgl_cmp, NULL);
  35. for (idx = 0; idx < expected->n_sgl_entries; idx++) {
  36. struct hh_sgl_entry *e = &expected->sgl_entries[idx];
  37. struct hh_sgl_entry *a = &assigned->sgl_entries[idx];
  38. if ((e->ipa_base != a->ipa_base) || (e->size != a->size)) {
  39. SDE_DEBUG("sgl mismatch: (%llu - %llu) vs (%llu - %llu)\n",
  40. e->ipa_base, e->size, a->ipa_base, a->size);
  41. return -EINVAL;
  42. }
  43. }
  44. return 0;
  45. }
  46. static int __irq_cmp(const void *a, const void *b)
  47. {
  48. struct sde_vm_irq_entry *l = (struct sde_vm_irq_entry *)a;
  49. struct sde_vm_irq_entry *r = (struct sde_vm_irq_entry *)b;
  50. return (l->label - r->label);
  51. }
  52. static void sde_vm_mem_lend_notification_handler(enum hh_mem_notifier_tag tag,
  53. unsigned long notif_type,
  54. void *entry_data, void *notif_msg)
  55. {
  56. struct hh_rm_notif_mem_shared_payload *payload;
  57. struct sde_vm_trusted *sde_vm;
  58. if (notif_type != HH_RM_NOTIF_MEM_SHARED ||
  59. tag != HH_MEM_NOTIFIER_TAG_DISPLAY)
  60. return;
  61. if (!entry_data || !notif_msg)
  62. return;
  63. payload = (struct hh_rm_notif_mem_shared_payload *)notif_msg;
  64. if (payload->trans_type != HH_RM_TRANS_TYPE_LEND ||
  65. payload->label != SDE_VM_MEM_LABEL)
  66. return;
  67. sde_vm = (struct sde_vm_trusted *)entry_data;
  68. mutex_lock(&sde_vm->base.vm_res_lock);
  69. sde_vm->base.io_mem_handle = payload->mem_handle;
  70. mutex_unlock(&sde_vm->base.vm_res_lock);
  71. SDE_INFO("mem lend notification for tag: %d label: %d handle: %d\n",
  72. tag, payload->label, payload->mem_handle);
  73. }
  74. void sde_vm_irq_lend_notification_handler(void *req,
  75. unsigned long notif_type, enum hh_irq_label label)
  76. {
  77. SDE_INFO("IRQ LEND notification for label: %d\n", label);
  78. }
  79. static int _sde_vm_release_irq(struct sde_vm *vm)
  80. {
  81. struct sde_vm_trusted *sde_vm = (struct sde_vm_trusted *)vm;
  82. struct sde_vm_irq_desc *irq_desc = sde_vm->irq_desc;
  83. int i, rc = 0;
  84. for (i = atomic_read(&sde_vm->base.n_irq_lent) - 1; i >= 0; i--) {
  85. struct sde_vm_irq_entry *entry = &irq_desc->irq_entries[i];
  86. rc = hh_irq_release(entry->label);
  87. if (rc) {
  88. SDE_ERROR("failed to release IRQ label: %d rc = %d\n",
  89. entry->label, rc);
  90. goto done;
  91. }
  92. atomic_dec(&sde_vm->base.n_irq_lent);
  93. rc = hh_irq_release_notify(entry->label);
  94. if (rc) {
  95. SDE_ERROR(
  96. "irq release notify failed,label: %d rc: %d\n",
  97. entry->label, rc);
  98. goto done;
  99. }
  100. SDE_INFO("sde vm irq release for label: %d succeeded\n",
  101. entry->label);
  102. }
  103. done:
  104. return rc;
  105. }
  106. static int _sde_vm_release_mem(struct sde_vm *vm)
  107. {
  108. int rc = 0;
  109. struct sde_vm_trusted *sde_vm = (struct sde_vm_trusted *)vm;
  110. if (sde_vm->base.io_mem_handle < 0)
  111. return 0;
  112. rc = hh_rm_mem_release(sde_vm->base.io_mem_handle, 0);
  113. if (rc) {
  114. SDE_ERROR("hh_rm_mem_release failed, rc=%d\n", rc);
  115. goto done;
  116. }
  117. rc = hh_rm_mem_notify(sde_vm->base.io_mem_handle,
  118. HH_RM_MEM_NOTIFY_OWNER_RELEASED,
  119. HH_MEM_NOTIFIER_TAG_DISPLAY, 0);
  120. if (rc) {
  121. SDE_ERROR("hyp mem notify on release failed, rc = %d\n", rc);
  122. goto done;
  123. }
  124. sde_vm->base.io_mem_handle = -1;
  125. SDE_INFO("sde vm mem release succeeded\n");
  126. done:
  127. return rc;
  128. }
  129. static int _sde_vm_release(struct sde_kms *kms)
  130. {
  131. struct sde_vm_trusted *sde_vm;
  132. int rc = 0;
  133. if (!kms->vm)
  134. return 0;
  135. sde_vm = to_vm_trusted(kms->vm);
  136. sde_kms_vm_trusted_resource_deinit(kms);
  137. rc = _sde_vm_release_mem(kms->vm);
  138. if (rc) {
  139. SDE_ERROR("mem_release failed, rc = %d\n", rc);
  140. goto end;
  141. }
  142. rc = _sde_vm_release_irq(kms->vm);
  143. if (rc)
  144. SDE_ERROR("irq_release failed, rc = %d\n", rc);
  145. end:
  146. return rc;
  147. }
  148. int _sde_vm_resource_init(struct sde_kms *sde_kms,
  149. struct drm_atomic_state *state)
  150. {
  151. int rc = 0;
  152. rc = sde_kms_vm_trusted_resource_init(sde_kms, state);
  153. if (rc)
  154. SDE_ERROR("vm resource init failed\n");
  155. return rc;
  156. }
  157. int _sde_vm_populate_res(struct sde_kms *sde_kms, struct sde_vm_trusted *vm)
  158. {
  159. struct msm_io_res io_res;
  160. int rc = 0;
  161. INIT_LIST_HEAD(&io_res.mem);
  162. INIT_LIST_HEAD(&io_res.irq);
  163. rc = sde_vm_get_resources(sde_kms, &io_res);
  164. if (rc) {
  165. SDE_ERROR("fail to get resources\n");
  166. return rc;
  167. }
  168. vm->sgl_desc = sde_vm_populate_sgl(&io_res);
  169. if (IS_ERR_OR_NULL(vm->sgl_desc)) {
  170. SDE_ERROR("failed to parse sgl list\n");
  171. return PTR_ERR(vm->sgl_desc);
  172. }
  173. vm->irq_desc = sde_vm_populate_irq(&io_res);
  174. if (IS_ERR_OR_NULL(vm->irq_desc)) {
  175. SDE_ERROR("failed to parse irq list\n");
  176. return PTR_ERR(vm->irq_desc);
  177. }
  178. sort(vm->irq_desc->irq_entries, vm->irq_desc->n_irq,
  179. sizeof(vm->irq_desc->irq_entries[0]), __irq_cmp, NULL);
  180. sort(vm->sgl_desc->sgl_entries, vm->sgl_desc->n_sgl_entries,
  181. sizeof(vm->sgl_desc->sgl_entries[0]), __sgl_cmp, NULL);
  182. return rc;
  183. }
  184. static bool _sde_vm_owns_hw(struct sde_kms *sde_kms)
  185. {
  186. struct sde_vm_trusted *sde_vm;
  187. bool owns_irq, owns_mem_io;
  188. sde_vm = to_vm_trusted(sde_kms->vm);
  189. owns_irq = (sde_vm->irq_desc->n_irq ==
  190. atomic_read(&sde_vm->base.n_irq_lent));
  191. owns_mem_io = (sde_vm->base.io_mem_handle >= 0);
  192. return (owns_irq && owns_mem_io);
  193. }
  194. static void _sde_vm_deinit(struct sde_kms *kms, struct sde_vm_ops *ops)
  195. {
  196. struct sde_vm_trusted *sde_vm;
  197. if (!kms->vm)
  198. return;
  199. sde_vm = to_vm_trusted(kms->vm);
  200. memset(ops, 0, sizeof(*ops));
  201. sde_vm_msgq_deinit(kms->vm);
  202. if (sde_vm->base.mem_notification_cookie)
  203. hh_mem_notifier_unregister(
  204. sde_vm->base.mem_notification_cookie);
  205. kfree(sde_vm->sgl_desc);
  206. if (sde_vm->irq_desc)
  207. sde_vm_free_irq(sde_vm->irq_desc);
  208. kfree(sde_vm);
  209. }
  210. static int _sde_vm_accept_mem(struct sde_vm *vm)
  211. {
  212. struct hh_sgl_desc *sgl_desc;
  213. struct hh_acl_desc *acl_desc;
  214. struct sde_vm_trusted *sde_vm;
  215. int rc = 0;
  216. sde_vm = to_vm_trusted(vm);
  217. acl_desc = sde_vm_populate_acl(HH_TRUSTED_VM);
  218. if (IS_ERR(acl_desc)) {
  219. SDE_ERROR("failed to populate acl data, rc=%ld\n",
  220. PTR_ERR(acl_desc));
  221. rc = PTR_ERR(acl_desc);
  222. goto done;
  223. }
  224. sgl_desc = hh_rm_mem_accept(sde_vm->base.io_mem_handle,
  225. HH_RM_MEM_TYPE_IO,
  226. HH_RM_TRANS_TYPE_LEND,
  227. HH_RM_MEM_ACCEPT_VALIDATE_ACL_ATTRS|
  228. HH_RM_MEM_ACCEPT_VALIDATE_LABEL|
  229. HH_RM_MEM_ACCEPT_DONE,
  230. SDE_VM_MEM_LABEL,
  231. acl_desc, NULL, NULL, 0);
  232. if (IS_ERR_OR_NULL(sgl_desc)) {
  233. SDE_ERROR("hh_rm_mem_accept failed with error, rc=%ld\n",
  234. PTR_ERR(sgl_desc));
  235. rc = -EINVAL;
  236. /* ACCEPT didn't go through. So no need to call the RELEASE */
  237. sde_vm->base.io_mem_handle = -1;
  238. goto accept_fail;
  239. }
  240. rc = _sde_vm_validate_sgl(sde_vm->sgl_desc, sgl_desc);
  241. if (rc) {
  242. SDE_ERROR(
  243. "failed in sgl validation for SDE_VM_MEM_LABEL label, rc = %d\n",
  244. rc);
  245. goto accept_fail;
  246. }
  247. SDE_INFO("mem accept succeeded for SDE_VM_MEM_LABEL label\n");
  248. return 0;
  249. accept_fail:
  250. kfree(acl_desc);
  251. done:
  252. return rc;
  253. }
  254. static int _sde_vm_accept_irq(struct sde_vm *vm)
  255. {
  256. struct sde_vm_trusted *sde_vm;
  257. struct sde_vm_irq_desc *irq_desc;
  258. struct irq_data *exp_irq_data, *acc_irq_data;
  259. int accepted_irq, expected_irq;
  260. int rc = 0, i;
  261. sde_vm = to_vm_trusted(vm);
  262. irq_desc = sde_vm->irq_desc;
  263. for (i = 0; i < irq_desc->n_irq; i++) {
  264. struct sde_vm_irq_entry *irq_entry = &irq_desc->irq_entries[i];
  265. expected_irq = irq_entry->irq;
  266. accepted_irq = hh_irq_accept(irq_entry->label, -1,
  267. IRQ_TYPE_LEVEL_HIGH);
  268. if (accepted_irq < 0) {
  269. SDE_ERROR("failed to accept irq for label: %d\n",
  270. irq_entry->label);
  271. rc = -EINVAL;
  272. goto end;
  273. }
  274. atomic_inc(&sde_vm->base.n_irq_lent);
  275. exp_irq_data = irq_get_irq_data(expected_irq);
  276. if (!exp_irq_data) {
  277. SDE_ERROR("failed to get irq data for irq: %d\n",
  278. expected_irq);
  279. rc = -EINVAL;
  280. goto end;
  281. }
  282. acc_irq_data = irq_get_irq_data(accepted_irq);
  283. if (!acc_irq_data) {
  284. SDE_ERROR("failed to get irq data for irq: %d\n",
  285. accepted_irq);
  286. rc = -EINVAL;
  287. goto end;
  288. }
  289. if (exp_irq_data->hwirq != acc_irq_data->hwirq) {
  290. SDE_ERROR("IRQ mismatch on ACCEPT for label %d\n",
  291. irq_entry->label);
  292. rc = -EINVAL;
  293. goto end;
  294. }
  295. SDE_INFO("IRQ accept succeeded for label %u irq: %lu\n",
  296. irq_entry->label, exp_irq_data->hwirq);
  297. }
  298. end:
  299. return rc;
  300. }
  301. static int _sde_vm_accept(struct sde_kms *kms)
  302. {
  303. int rc = 0;
  304. rc = _sde_vm_accept_mem(kms->vm);
  305. if (rc)
  306. goto res_accept_fail;
  307. rc = _sde_vm_accept_irq(kms->vm);
  308. if (rc)
  309. goto res_accept_fail;
  310. return 0;
  311. res_accept_fail:
  312. _sde_vm_release_irq(kms->vm);
  313. _sde_vm_release_mem(kms->vm);
  314. return rc;
  315. }
  316. static void _sde_vm_set_ops(struct sde_vm_ops *ops)
  317. {
  318. memset(ops, 0, sizeof(*ops));
  319. ops->vm_client_pre_release = sde_vm_pre_release;
  320. ops->vm_client_post_acquire = sde_vm_post_acquire;
  321. ops->vm_release = _sde_vm_release;
  322. ops->vm_acquire = _sde_vm_accept;
  323. ops->vm_owns_hw = _sde_vm_owns_hw;
  324. ops->vm_deinit = _sde_vm_deinit;
  325. ops->vm_prepare_commit = sde_kms_vm_trusted_prepare_commit;
  326. ops->vm_post_commit = sde_kms_vm_trusted_post_commit;
  327. ops->vm_request_valid = sde_vm_request_valid;
  328. ops->vm_acquire_fail_handler = _sde_vm_release;
  329. ops->vm_msg_send = sde_vm_msg_send;
  330. ops->vm_resource_init = _sde_vm_resource_init;
  331. }
  332. int sde_vm_trusted_init(struct sde_kms *kms)
  333. {
  334. struct sde_vm_trusted *sde_vm;
  335. void *cookie;
  336. int rc = 0;
  337. sde_vm = kzalloc(sizeof(*sde_vm), GFP_KERNEL);
  338. if (!sde_vm)
  339. return -ENOMEM;
  340. _sde_vm_set_ops(&sde_vm->base.vm_ops);
  341. sde_vm->base.sde_kms = kms;
  342. mutex_init(&sde_vm->base.vm_res_lock);
  343. /**
  344. * Optimize resource population by reading the entire HW resource
  345. * space once during init. Once trusted vm starts supporting
  346. * per-display space assignment, this read has to be done on each event
  347. * notification.
  348. */
  349. rc = _sde_vm_populate_res(kms, sde_vm);
  350. if (rc) {
  351. SDE_ERROR("failed to populate trusted vm res, rc= %d\n", rc);
  352. goto init_fail;
  353. }
  354. cookie = hh_mem_notifier_register(HH_MEM_NOTIFIER_TAG_DISPLAY,
  355. sde_vm_mem_lend_notification_handler, sde_vm);
  356. if (!cookie) {
  357. SDE_ERROR("fails to register RM mem lend notifier\n");
  358. goto init_fail;
  359. }
  360. sde_vm->base.mem_notification_cookie = cookie;
  361. rc = hh_irq_wait_for_lend_v2(HH_IRQ_LABEL_SDE, HH_PRIMARY_VM,
  362. sde_vm_irq_lend_notification_handler,
  363. (void *)sde_vm);
  364. if (rc) {
  365. SDE_ERROR("wait for irq lend on label: %d failed, rc=%d\n",
  366. HH_IRQ_LABEL_SDE, rc);
  367. goto init_fail;
  368. }
  369. kms->vm = &sde_vm->base;
  370. atomic_set(&sde_vm->base.n_irq_lent, 0);
  371. rc = sde_vm_msgq_init(kms->vm);
  372. if (rc) {
  373. SDE_ERROR("failed to initialize the msgq, rc=%d\n", rc);
  374. goto init_fail;
  375. }
  376. return 0;
  377. init_fail:
  378. _sde_vm_deinit(kms, &sde_vm->base.vm_ops);
  379. return rc;
  380. }