gunyah_rsc_mgr.h 5.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182
  1. /* SPDX-License-Identifier: GPL-2.0-only */
  2. /*
  3. * Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved.
  4. */
  5. #ifndef _GUNYAH_RSC_MGR_H
  6. #define _GUNYAH_RSC_MGR_H
  7. #include <linux/android_vendor.h>
  8. #include <linux/list.h>
  9. #include <linux/notifier.h>
  10. #include <linux/gunyah.h>
  11. #define GH_VMID_INVAL U16_MAX
  12. #define GH_MEM_HANDLE_INVAL U32_MAX
  13. struct gh_rm;
  14. int gh_rm_call(void *rm, u32 message_id, const void *req_buf, size_t req_buf_size,
  15. void **resp_buf, size_t *resp_buf_size);
  16. int gh_rm_notifier_register(void *rm, struct notifier_block *nb);
  17. int gh_rm_notifier_unregister(void *rm, struct notifier_block *nb);
  18. struct device *gh_rm_get(struct gh_rm *rm);
  19. void gh_rm_put(struct gh_rm *rm);
  20. struct gh_rm_vm_exited_payload {
  21. __le16 vmid;
  22. __le16 exit_type;
  23. __le32 exit_reason_size;
  24. u8 exit_reason[];
  25. } __packed;
  26. #define GH_RM_NOTIFICATION_VM_EXITED 0x56100001
  27. enum gh_rm_vm_status {
  28. GH_RM_VM_STATUS_NO_STATE = 0,
  29. GH_RM_VM_STATUS_INIT = 1,
  30. GH_RM_VM_STATUS_READY = 2,
  31. GH_RM_VM_STATUS_RUNNING = 3,
  32. GH_RM_VM_STATUS_PAUSED = 4,
  33. GH_RM_VM_STATUS_LOAD = 5,
  34. GH_RM_VM_STATUS_AUTH = 6,
  35. GH_RM_VM_STATUS_INIT_FAILED = 8,
  36. GH_RM_VM_STATUS_EXITED = 9,
  37. GH_RM_VM_STATUS_RESETTING = 10,
  38. GH_RM_VM_STATUS_RESET = 11,
  39. };
  40. struct gh_rm_vm_status_payload {
  41. __le16 vmid;
  42. u16 reserved;
  43. u8 vm_status;
  44. u8 os_status;
  45. __le16 app_status;
  46. } __packed;
  47. #define GH_RM_NOTIFICATION_VM_STATUS 0x56100008
  48. #define GH_RM_ACL_X BIT(0)
  49. #define GH_RM_ACL_W BIT(1)
  50. #define GH_RM_ACL_R BIT(2)
  51. struct gh_rm_mem_acl_entry {
  52. __le16 vmid;
  53. u8 perms;
  54. u8 reserved;
  55. } __packed;
  56. struct gh_rm_mem_entry {
  57. __le64 phys_addr;
  58. __le64 size;
  59. } __packed;
  60. enum gh_rm_mem_type {
  61. GH_RM_MEM_TYPE_NORMAL = 0,
  62. GH_RM_MEM_TYPE_IO = 1,
  63. };
  64. /*
  65. * struct gh_rm_mem_parcel - Info about memory to be lent/shared/donated/reclaimed
  66. * @mem_type: The type of memory: normal (DDR) or IO
  67. * @label: An client-specified identifier which can be used by the other VMs to identify the purpose
  68. * of the memory parcel.
  69. * @n_acl_entries: Count of the number of entries in the @acl_entries array.
  70. * @acl_entries: An array of access control entries. Each entry specifies a VM and what access
  71. * is allowed for the memory parcel.
  72. * @n_mem_entries: Count of the number of entries in the @mem_entries array.
  73. * @mem_entries: An array of regions to be associated with the memory parcel. Addresses should be
  74. * (intermediate) physical addresses from Linux's perspective.
  75. * @mem_handle: On success, filled with memory handle that RM allocates for this memory parcel
  76. */
  77. struct gh_rm_mem_parcel {
  78. enum gh_rm_mem_type mem_type;
  79. u32 label;
  80. size_t n_acl_entries;
  81. struct gh_rm_mem_acl_entry *acl_entries;
  82. size_t n_mem_entries;
  83. struct gh_rm_mem_entry *mem_entries;
  84. u32 mem_handle;
  85. ANDROID_BACKPORT_RESERVED(1);
  86. ANDROID_BACKPORT_RESERVED(2);
  87. ANDROID_BACKPORT_RESERVED(3);
  88. ANDROID_BACKPORT_RESERVED(4);
  89. ANDROID_BACKPORT_RESERVED(5);
  90. ANDROID_BACKPORT_RESERVED(6);
  91. ANDROID_BACKPORT_RESERVED(7);
  92. ANDROID_BACKPORT_RESERVED(8);
  93. };
  94. /* RPC Calls */
  95. int gh_rm_mem_lend(struct gh_rm *rm, struct gh_rm_mem_parcel *parcel);
  96. int gh_rm_mem_share(struct gh_rm *rm, struct gh_rm_mem_parcel *parcel);
  97. int gh_rm_mem_reclaim(struct gh_rm *rm, struct gh_rm_mem_parcel *parcel);
  98. int gh_rm_alloc_vmid(struct gh_rm *rm, u16 vmid);
  99. int gh_rm_dealloc_vmid(struct gh_rm *rm, u16 vmid);
  100. int gh_rm_vm_reset(struct gh_rm *rm, u16 vmid);
  101. int gh_rm_vm_start(struct gh_rm *rm, u16 vmid);
  102. int gh_rm_vm_stop(struct gh_rm *rm, u16 vmid);
  103. int gh_rm_vm_set_firmware_mem(struct gh_rm *rm, u16 vmid, struct gh_rm_mem_parcel *parcel,
  104. u64 fw_offset, u64 fw_size);
  105. enum gh_rm_vm_auth_mechanism {
  106. GH_RM_VM_AUTH_NONE = 0,
  107. GH_RM_VM_AUTH_QCOM_PIL_ELF = 1,
  108. GH_RM_VM_AUTH_QCOM_ANDROID_PVM = 2,
  109. };
  110. int gh_rm_vm_configure(struct gh_rm *rm, u16 vmid, enum gh_rm_vm_auth_mechanism auth_mechanism,
  111. u32 mem_handle, u64 image_offset, u64 image_size,
  112. u64 dtb_offset, u64 dtb_size);
  113. int gh_rm_vm_init(struct gh_rm *rm, u16 vmid);
  114. struct gh_rm_hyp_resource {
  115. u8 type;
  116. u8 reserved;
  117. __le16 partner_vmid;
  118. __le32 resource_handle;
  119. __le32 resource_label;
  120. __le64 cap_id;
  121. __le32 virq_handle;
  122. #define GH_RM_RESOURCE_NO_VIRQ 0xFFFFFFFF
  123. __le32 virq;
  124. __le64 base;
  125. __le64 size;
  126. } __packed;
  127. struct gh_rm_hyp_resources {
  128. __le32 n_entries;
  129. struct gh_rm_hyp_resource entries[];
  130. } __packed;
  131. int gh_rm_get_hyp_resources(struct gh_rm *rm, u16 vmid,
  132. struct gh_rm_hyp_resources **resources);
  133. int gh_rm_get_vmid(struct gh_rm *rm, u16 *vmid);
  134. struct gh_resource *gh_rm_alloc_resource(struct gh_rm *rm, struct gh_rm_hyp_resource *hyp_resource);
  135. void gh_rm_free_resource(struct gh_resource *ghrsc);
  136. struct gh_rm_platform_ops {
  137. int (*pre_mem_share)(void *rm, struct gh_rm_mem_parcel *mem_parcel);
  138. int (*post_mem_reclaim)(void *rm, struct gh_rm_mem_parcel *mem_parcel);
  139. ANDROID_BACKPORT_RESERVED(1);
  140. ANDROID_BACKPORT_RESERVED(2);
  141. ANDROID_BACKPORT_RESERVED(3);
  142. ANDROID_BACKPORT_RESERVED(4);
  143. };
  144. #if IS_ENABLED(CONFIG_GUNYAH_PLATFORM_HOOKS)
  145. int gh_rm_register_platform_ops(const struct gh_rm_platform_ops *platform_ops);
  146. void gh_rm_unregister_platform_ops(const struct gh_rm_platform_ops *platform_ops);
  147. int devm_gh_rm_register_platform_ops(struct device *dev, const struct gh_rm_platform_ops *ops);
  148. #else
  149. static inline int gh_rm_register_platform_ops(const struct gh_rm_platform_ops *platform_ops)
  150. { return 0; }
  151. static inline void gh_rm_unregister_platform_ops(const struct gh_rm_platform_ops *platform_ops) { }
  152. static inline int devm_gh_rm_register_platform_ops(struct device *dev,
  153. const struct gh_rm_platform_ops *ops) { return 0; }
  154. #endif
  155. #endif