sde_vm_common.c 7.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Copyright (c) 2020-2021, The Linux Foundation. All rights reserved.
  4. */
  5. #include <linux/list_sort.h>
  6. #include "linux/sde_rsc.h"
  7. #include "dsi/dsi_display.h"
  8. #include "dp/dp_display.h"
  9. #include "sde_kms.h"
  10. #include "sde_vm_common.h"
  11. #include "sde_crtc.h"
  12. #include "sde_vm_msgq.h"
  13. struct hh_notify_vmid_desc *sde_vm_populate_vmid(hh_vmid_t vmid)
  14. {
  15. struct hh_notify_vmid_desc *vmid_desc;
  16. vmid_desc = kzalloc(offsetof(struct hh_notify_vmid_desc,
  17. vmid_entries[1]), GFP_KERNEL);
  18. if (!vmid_desc)
  19. return ERR_PTR(ENOMEM);
  20. vmid_desc->n_vmid_entries = 1;
  21. vmid_desc->vmid_entries[0].vmid = vmid;
  22. return vmid_desc;
  23. }
  24. struct hh_acl_desc *sde_vm_populate_acl(enum hh_vm_names vm_name)
  25. {
  26. struct hh_acl_desc *acl_desc;
  27. hh_vmid_t vmid;
  28. hh_rm_get_vmid(vm_name, &vmid);
  29. acl_desc = kzalloc(offsetof(struct hh_acl_desc, acl_entries[1]),
  30. GFP_KERNEL);
  31. if (!acl_desc)
  32. return ERR_PTR(ENOMEM);
  33. acl_desc->n_acl_entries = 1;
  34. acl_desc->acl_entries[0].vmid = vmid;
  35. acl_desc->acl_entries[0].perms = HH_RM_ACL_R | HH_RM_ACL_W;
  36. return acl_desc;
  37. }
  38. int __mem_sort_cmp(void *priv, struct list_head *a, struct list_head *b)
  39. {
  40. struct msm_io_mem_entry *left =
  41. container_of(a, struct msm_io_mem_entry, list);
  42. struct msm_io_mem_entry *right =
  43. container_of(b, struct msm_io_mem_entry, list);
  44. return (left->base - right->base);
  45. }
  46. bool __merge_on_overlap(struct msm_io_mem_entry *res,
  47. const struct msm_io_mem_entry *left,
  48. const struct msm_io_mem_entry *right)
  49. {
  50. phys_addr_t l_s = left->base;
  51. phys_addr_t l_e = left->base + left->size;
  52. phys_addr_t r_s = right->base;
  53. phys_addr_t r_e = right->base + right->size;
  54. memset(res, 0, sizeof(*res));
  55. if (r_s <= l_e) {
  56. res->base = min(l_s, r_s);
  57. res->size = max(l_e, r_e) - res->base;
  58. return true;
  59. }
  60. return false;
  61. }
  62. void _sde_vm_sort_and_align(struct list_head *mem)
  63. {
  64. struct msm_io_mem_entry *entry, *tmp, *prev = NULL;
  65. struct msm_io_mem_entry merged_entry;
  66. list_for_each_entry(entry, mem, list) {
  67. entry->base = ALIGN_DOWN(entry->base, PAGE_SIZE);
  68. entry->size = ALIGN(entry->size, PAGE_SIZE);
  69. }
  70. list_sort(NULL, mem, __mem_sort_cmp);
  71. list_for_each_entry_safe(entry, tmp, mem, list) {
  72. if (prev && __merge_on_overlap(&merged_entry, prev, entry)) {
  73. prev->base = merged_entry.base;
  74. prev->size = merged_entry.size;
  75. list_del(&entry->list);
  76. entry = prev;
  77. }
  78. prev = entry;
  79. }
  80. list_for_each_entry(entry, mem, list)
  81. SDE_DEBUG("base: 0x%llx - size: 0x%llx\n",
  82. entry->base, entry->size);
  83. }
  84. struct hh_sgl_desc *sde_vm_populate_sgl(struct msm_io_res *io_res)
  85. {
  86. struct hh_sgl_desc *sgl_desc;
  87. struct msm_io_mem_entry *mem;
  88. u32 i = 0, num_mem_entry = 0;
  89. _sde_vm_sort_and_align(&io_res->mem);
  90. list_for_each_entry(mem, &io_res->mem, list)
  91. num_mem_entry++;
  92. sgl_desc = kzalloc(offsetof(struct hh_sgl_desc,
  93. sgl_entries[num_mem_entry]), GFP_KERNEL);
  94. if (!sgl_desc)
  95. return ERR_PTR(ENOMEM);
  96. sgl_desc->n_sgl_entries = num_mem_entry;
  97. list_for_each_entry(mem, &io_res->mem, list) {
  98. sgl_desc->sgl_entries[i].ipa_base = mem->base;
  99. sgl_desc->sgl_entries[i].size = mem->size;
  100. i++;
  101. }
  102. msm_dss_clean_io_mem(&io_res->mem);
  103. return sgl_desc;
  104. }
  105. struct sde_vm_irq_desc *sde_vm_populate_irq(struct msm_io_res *io_res)
  106. {
  107. struct msm_io_irq_entry *irq;
  108. u32 i = 0, num_irq = 0;
  109. struct sde_vm_irq_desc *irq_desc;
  110. list_for_each_entry(irq, &io_res->irq, list)
  111. num_irq++;
  112. irq_desc = kzalloc(sizeof(*irq_desc), GFP_KERNEL);
  113. if (!irq_desc)
  114. return ERR_PTR(ENOMEM);
  115. irq_desc->irq_entries = kcalloc(num_irq,
  116. sizeof(struct sde_vm_irq_entry),
  117. GFP_KERNEL);
  118. if (!irq_desc->irq_entries) {
  119. sde_vm_free_irq(irq_desc);
  120. return ERR_PTR(ENOMEM);
  121. }
  122. list_for_each_entry(irq, &io_res->irq, list) {
  123. struct sde_vm_irq_entry *entry = &irq_desc->irq_entries[i];
  124. entry->irq = irq->irq_num;
  125. entry->label = irq->label;
  126. i++;
  127. }
  128. irq_desc->n_irq = num_irq;
  129. msm_dss_clean_io_irq(&io_res->irq);
  130. return irq_desc;
  131. }
  132. void sde_vm_free_irq(struct sde_vm_irq_desc *irq_desc)
  133. {
  134. if (irq_desc && irq_desc->irq_entries)
  135. kfree(irq_desc->irq_entries);
  136. kfree(irq_desc);
  137. }
  138. int sde_vm_get_resources(struct sde_kms *sde_kms, struct msm_io_res *io_res)
  139. {
  140. struct msm_drm_private *priv = sde_kms->dev->dev_private;
  141. struct msm_vm_client_entry *entry;
  142. int rc = 0;
  143. rc = sde_kms_get_io_resources(sde_kms, io_res);
  144. if (rc)
  145. goto fail_get_res;
  146. list_for_each_entry(entry, &priv->vm_client_list, list) {
  147. if (!entry->ops.vm_get_io_resources)
  148. continue;
  149. rc = entry->ops.vm_get_io_resources(io_res, entry->data);
  150. if (rc) {
  151. SDE_ERROR("get_io_resources failed for device: %d\n",
  152. entry->dev->id);
  153. goto fail_get_res;
  154. }
  155. }
  156. return rc;
  157. fail_get_res:
  158. msm_dss_clean_io_mem(&io_res->mem);
  159. msm_dss_clean_io_irq(&io_res->irq);
  160. return rc;
  161. }
  162. void sde_vm_free_resources(struct msm_io_res *io_res)
  163. {
  164. msm_dss_clean_io_mem(&io_res->mem);
  165. msm_dss_clean_io_irq(&io_res->irq);
  166. }
  167. int sde_vm_post_acquire(struct sde_kms *kms)
  168. {
  169. struct msm_drm_private *priv = kms->dev->dev_private;
  170. struct msm_vm_client_entry *entry;
  171. int rc = 0;
  172. list_for_each_entry(entry, &priv->vm_client_list, list) {
  173. if (!entry->ops.vm_post_hw_acquire)
  174. continue;
  175. rc = entry->ops.vm_post_hw_acquire(entry->data);
  176. if (rc) {
  177. SDE_ERROR("post_acquire failed for device: %d\n",
  178. entry->dev->id);
  179. goto post_acquire_rollback;
  180. }
  181. }
  182. return rc;
  183. post_acquire_rollback:
  184. list_for_each_entry_continue_reverse(entry, &priv->vm_client_list,
  185. list) {
  186. if (!entry->ops.vm_pre_hw_release)
  187. continue;
  188. rc = entry->ops.vm_pre_hw_release(entry->data);
  189. if (rc) {
  190. SDE_ERROR(
  191. "post_acquire failed during rollback for device: %d\n",
  192. entry->dev->id);
  193. break;
  194. }
  195. }
  196. return rc;
  197. }
  198. int sde_vm_pre_release(struct sde_kms *kms)
  199. {
  200. struct msm_drm_private *priv = kms->dev->dev_private;
  201. struct msm_vm_client_entry *entry;
  202. int rc = 0;
  203. list_for_each_entry(entry, &priv->vm_client_list, list) {
  204. if (!entry->ops.vm_pre_hw_release)
  205. continue;
  206. rc = entry->ops.vm_pre_hw_release(entry->data);
  207. if (rc) {
  208. SDE_ERROR("pre_release failed for device: %d\n",
  209. entry->dev->id);
  210. goto pre_release_rollback;
  211. }
  212. }
  213. return rc;
  214. pre_release_rollback:
  215. list_for_each_entry_continue_reverse(entry, &priv->vm_client_list,
  216. list) {
  217. if (!entry->ops.vm_post_hw_acquire)
  218. continue;
  219. rc = entry->ops.vm_post_hw_acquire(entry->data);
  220. if (rc) {
  221. SDE_ERROR(
  222. "post_acquire failed during rollback for device: %d\n",
  223. entry->dev->id);
  224. break;
  225. }
  226. }
  227. return rc;
  228. }
  229. int sde_vm_request_valid(struct sde_kms *sde_kms,
  230. enum sde_crtc_vm_req old_state,
  231. enum sde_crtc_vm_req new_state)
  232. {
  233. struct sde_vm_ops *vm_ops;
  234. int rc = 0;
  235. vm_ops = &sde_kms->vm->vm_ops;
  236. switch (new_state) {
  237. case VM_REQ_RELEASE:
  238. case VM_REQ_NONE:
  239. if ((old_state == VM_REQ_RELEASE) ||
  240. !vm_ops->vm_owns_hw(sde_kms))
  241. rc = -EINVAL;
  242. break;
  243. case VM_REQ_ACQUIRE:
  244. if (old_state != VM_REQ_RELEASE)
  245. rc = -EINVAL;
  246. break;
  247. default:
  248. SDE_ERROR("invalid vm request\n");
  249. rc = -EINVAL;
  250. };
  251. SDE_DEBUG("old req: %d new req: %d owns_hw: %d, rc: %d\n",
  252. old_state, new_state,
  253. vm_ops->vm_owns_hw(sde_kms), rc);
  254. SDE_EVT32(old_state, new_state, vm_ops->vm_owns_hw(sde_kms), rc);
  255. return rc;
  256. }
  257. int sde_vm_msg_send(struct sde_vm *sde_vm, void *msg, size_t msg_size)
  258. {
  259. if (!sde_vm)
  260. return -EINVAL;
  261. return sde_vm_msgq_send(sde_vm, msg, msg_size);
  262. }