sde_vm_common.c 7.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Copyright (c) 2020, The Linux Foundation. All rights reserved.
  4. */
  5. #include <linux/list_sort.h>
  6. #include "linux/sde_rsc.h"
  7. #include "dsi/dsi_display.h"
  8. #include "dp/dp_display.h"
  9. #include "sde_kms.h"
  10. #include "sde_vm_common.h"
  11. #include "sde_crtc.h"
  12. struct hh_notify_vmid_desc *sde_vm_populate_vmid(hh_vmid_t vmid)
  13. {
  14. struct hh_notify_vmid_desc *vmid_desc;
  15. vmid_desc = kzalloc(offsetof(struct hh_notify_vmid_desc,
  16. vmid_entries[1]), GFP_KERNEL);
  17. if (!vmid_desc)
  18. return ERR_PTR(ENOMEM);
  19. vmid_desc->n_vmid_entries = 1;
  20. vmid_desc->vmid_entries[0].vmid = vmid;
  21. return vmid_desc;
  22. }
  23. struct hh_acl_desc *sde_vm_populate_acl(enum hh_vm_names vm_name)
  24. {
  25. struct hh_acl_desc *acl_desc;
  26. hh_vmid_t vmid;
  27. hh_rm_get_vmid(vm_name, &vmid);
  28. acl_desc = kzalloc(offsetof(struct hh_acl_desc, acl_entries[1]),
  29. GFP_KERNEL);
  30. if (!acl_desc)
  31. return ERR_PTR(ENOMEM);
  32. acl_desc->n_acl_entries = 1;
  33. acl_desc->acl_entries[0].vmid = vmid;
  34. acl_desc->acl_entries[0].perms = HH_RM_ACL_R | HH_RM_ACL_W;
  35. return acl_desc;
  36. }
  37. int __mem_sort_cmp(void *priv, struct list_head *a, struct list_head *b)
  38. {
  39. struct msm_io_mem_entry *left =
  40. container_of(a, struct msm_io_mem_entry, list);
  41. struct msm_io_mem_entry *right =
  42. container_of(b, struct msm_io_mem_entry, list);
  43. return (left->base - right->base);
  44. }
  45. bool __merge_on_overlap(struct msm_io_mem_entry *res,
  46. const struct msm_io_mem_entry *left,
  47. const struct msm_io_mem_entry *right)
  48. {
  49. phys_addr_t l_s = left->base;
  50. phys_addr_t l_e = left->base + left->size;
  51. phys_addr_t r_s = right->base;
  52. phys_addr_t r_e = right->base + right->size;
  53. memset(res, 0, sizeof(*res));
  54. if (r_s <= l_e) {
  55. res->base = min(l_s, r_s);
  56. res->size = max(l_e, r_e) - res->base;
  57. return true;
  58. }
  59. return false;
  60. }
  61. void _sde_vm_sort_and_align(struct list_head *mem)
  62. {
  63. struct msm_io_mem_entry *entry, *tmp, *prev = NULL;
  64. struct msm_io_mem_entry merged_entry;
  65. list_for_each_entry(entry, mem, list) {
  66. entry->base = ALIGN_DOWN(entry->base, PAGE_SIZE);
  67. entry->size = ALIGN(entry->size, PAGE_SIZE);
  68. }
  69. list_sort(NULL, mem, __mem_sort_cmp);
  70. list_for_each_entry_safe(entry, tmp, mem, list) {
  71. if (prev && __merge_on_overlap(&merged_entry, prev, entry)) {
  72. prev->base = merged_entry.base;
  73. prev->size = merged_entry.size;
  74. list_del(&entry->list);
  75. entry = prev;
  76. }
  77. prev = entry;
  78. }
  79. list_for_each_entry(entry, mem, list)
  80. SDE_DEBUG("base: 0x%x - size: 0x%x\n",
  81. entry->base, entry->size);
  82. }
  83. struct hh_sgl_desc *sde_vm_populate_sgl(struct msm_io_res *io_res)
  84. {
  85. struct hh_sgl_desc *sgl_desc;
  86. struct msm_io_mem_entry *mem;
  87. u32 i = 0, num_mem_entry = 0;
  88. _sde_vm_sort_and_align(&io_res->mem);
  89. list_for_each_entry(mem, &io_res->mem, list)
  90. num_mem_entry++;
  91. sgl_desc = kzalloc(offsetof(struct hh_sgl_desc,
  92. sgl_entries[num_mem_entry]), GFP_KERNEL);
  93. if (!sgl_desc)
  94. return ERR_PTR(ENOMEM);
  95. sgl_desc->n_sgl_entries = num_mem_entry;
  96. list_for_each_entry(mem, &io_res->mem, list) {
  97. sgl_desc->sgl_entries[i].ipa_base = mem->base;
  98. sgl_desc->sgl_entries[i].size = mem->size;
  99. i++;
  100. }
  101. msm_dss_clean_io_mem(&io_res->mem);
  102. return sgl_desc;
  103. }
  104. struct sde_vm_irq_desc *sde_vm_populate_irq(struct msm_io_res *io_res)
  105. {
  106. struct msm_io_irq_entry *irq;
  107. u32 i = 0, num_irq = 0;
  108. struct sde_vm_irq_desc *irq_desc;
  109. list_for_each_entry(irq, &io_res->irq, list)
  110. num_irq++;
  111. irq_desc = kzalloc(sizeof(*irq_desc), GFP_KERNEL);
  112. if (!irq_desc)
  113. return ERR_PTR(ENOMEM);
  114. irq_desc->irq_entries = kcalloc(num_irq,
  115. sizeof(struct sde_vm_irq_entry),
  116. GFP_KERNEL);
  117. if (!irq_desc->irq_entries) {
  118. sde_vm_free_irq(irq_desc);
  119. return ERR_PTR(ENOMEM);
  120. }
  121. list_for_each_entry(irq, &io_res->irq, list) {
  122. struct sde_vm_irq_entry *entry = &irq_desc->irq_entries[i];
  123. entry->irq = irq->irq_num;
  124. entry->label = irq->label;
  125. i++;
  126. }
  127. irq_desc->n_irq = num_irq;
  128. msm_dss_clean_io_irq(&io_res->irq);
  129. return irq_desc;
  130. }
  131. void sde_vm_free_irq(struct sde_vm_irq_desc *irq_desc)
  132. {
  133. if (irq_desc && irq_desc->irq_entries)
  134. kfree(irq_desc->irq_entries);
  135. kfree(irq_desc);
  136. }
  137. int sde_vm_get_resources(struct sde_kms *sde_kms, struct msm_io_res *io_res)
  138. {
  139. struct msm_drm_private *priv = sde_kms->dev->dev_private;
  140. struct msm_vm_client_entry *entry;
  141. int rc = 0;
  142. rc = sde_kms_get_io_resources(sde_kms, io_res);
  143. if (rc)
  144. goto fail_get_res;
  145. list_for_each_entry(entry, &priv->vm_client_list, list) {
  146. if (!entry->ops.vm_get_io_resources)
  147. continue;
  148. rc = entry->ops.vm_get_io_resources(io_res, entry->data);
  149. if (rc) {
  150. SDE_ERROR("get_io_resources failed for device: %d\n",
  151. entry->dev->id);
  152. goto fail_get_res;
  153. }
  154. }
  155. return rc;
  156. fail_get_res:
  157. msm_dss_clean_io_mem(&io_res->mem);
  158. msm_dss_clean_io_irq(&io_res->irq);
  159. return rc;
  160. }
  161. void sde_vm_free_resources(struct msm_io_res *io_res)
  162. {
  163. msm_dss_clean_io_mem(&io_res->mem);
  164. msm_dss_clean_io_irq(&io_res->irq);
  165. }
  166. int sde_vm_post_acquire(struct sde_kms *kms)
  167. {
  168. struct msm_drm_private *priv = kms->dev->dev_private;
  169. struct msm_vm_client_entry *entry;
  170. int rc = 0;
  171. list_for_each_entry(entry, &priv->vm_client_list, list) {
  172. if (!entry->ops.vm_post_hw_acquire)
  173. continue;
  174. rc = entry->ops.vm_post_hw_acquire(entry->data);
  175. if (rc) {
  176. SDE_ERROR("post_acquire failed for device: %d\n",
  177. entry->dev->id);
  178. goto post_acquire_rollback;
  179. }
  180. }
  181. return rc;
  182. post_acquire_rollback:
  183. list_for_each_entry_continue_reverse(entry, &priv->vm_client_list,
  184. list) {
  185. if (!entry->ops.vm_pre_hw_release)
  186. continue;
  187. rc = entry->ops.vm_pre_hw_release(entry->data);
  188. if (rc) {
  189. SDE_ERROR(
  190. "post_acquire failed during rollback for device: %d\n",
  191. entry->dev->id);
  192. break;
  193. }
  194. }
  195. return rc;
  196. }
  197. int sde_vm_pre_release(struct sde_kms *kms)
  198. {
  199. struct msm_drm_private *priv = kms->dev->dev_private;
  200. struct msm_vm_client_entry *entry;
  201. int rc = 0;
  202. list_for_each_entry(entry, &priv->vm_client_list, list) {
  203. if (!entry->ops.vm_pre_hw_release)
  204. continue;
  205. rc = entry->ops.vm_pre_hw_release(entry->data);
  206. if (rc) {
  207. SDE_ERROR("pre_release failed for device: %d\n",
  208. entry->dev->id);
  209. goto pre_release_rollback;
  210. }
  211. }
  212. return rc;
  213. pre_release_rollback:
  214. list_for_each_entry_continue_reverse(entry, &priv->vm_client_list,
  215. list) {
  216. if (!entry->ops.vm_post_hw_acquire)
  217. continue;
  218. rc = entry->ops.vm_post_hw_acquire(entry->data);
  219. if (rc) {
  220. SDE_ERROR(
  221. "post_acquire failed during rollback for device: %d\n",
  222. entry->dev->id);
  223. break;
  224. }
  225. }
  226. return rc;
  227. }
  228. int sde_vm_request_valid(struct sde_kms *sde_kms,
  229. enum sde_crtc_vm_req old_state,
  230. enum sde_crtc_vm_req new_state)
  231. {
  232. struct sde_vm_ops *vm_ops;
  233. int rc = 0;
  234. vm_ops = &sde_kms->vm->vm_ops;
  235. switch (new_state) {
  236. case VM_REQ_RELEASE:
  237. case VM_REQ_NONE:
  238. if ((old_state == VM_REQ_RELEASE) ||
  239. !vm_ops->vm_owns_hw(sde_kms))
  240. rc = -EINVAL;
  241. break;
  242. case VM_REQ_ACQUIRE:
  243. if (old_state != VM_REQ_RELEASE) {
  244. rc = -EINVAL;
  245. } else if (!vm_ops->vm_owns_hw(sde_kms)) {
  246. if (vm_ops->vm_acquire)
  247. rc = vm_ops->vm_acquire(sde_kms);
  248. else
  249. rc = -EINVAL;
  250. }
  251. break;
  252. default:
  253. SDE_ERROR("invalid vm request\n");
  254. rc = -EINVAL;
  255. };
  256. SDE_DEBUG("old req: %d new req: %d owns_hw: %d, rc: %d\n",
  257. old_state, new_state,
  258. vm_ops->vm_owns_hw(sde_kms), rc);
  259. SDE_EVT32(old_state, new_state, vm_ops->vm_owns_hw(sde_kms), rc);
  260. return rc;
  261. }