gh_rm_drv.h 20 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775
  1. /* SPDX-License-Identifier: GPL-2.0-only */
  2. /*
  3. * Copyright (c) 2020-2021, The Linux Foundation. All rights reserved.
  4. * Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved.
  5. *
  6. */
  7. #ifndef __GH_RM_DRV_H
  8. #define __GH_RM_DRV_H
  9. #include <linux/types.h>
  10. #include <linux/notifier.h>
  11. #include <linux/fwnode.h>
  12. #include <linux/gunyah_rsc_mgr.h>
  13. #include <linux/range.h>
  14. #include "gh_common.h"
  15. /* Notification type Message IDs */
  16. /* Memory APIs */
  17. #define GH_RM_NOTIF_MEM_SHARED 0x51100011
  18. #define GH_RM_NOTIF_MEM_RELEASED 0x51100012
  19. #define GH_RM_NOTIF_MEM_ACCEPTED 0x51100013
  20. #define GH_RM_MEM_TYPE_NORMAL 0
  21. #define GH_RM_MEM_TYPE_IO 1
  22. #define GH_RM_TRANS_TYPE_DONATE 0
  23. #define GH_RM_TRANS_TYPE_LEND 1
  24. #define GH_RM_TRANS_TYPE_SHARE 2
  25. #define GH_RM_ACL_X BIT(0)
  26. #define GH_RM_ACL_W BIT(1)
  27. #define GH_RM_ACL_R BIT(2)
  28. #define GH_RM_MEM_RELEASE_CLEAR BIT(0)
  29. #define GH_RM_MEM_RECLAIM_CLEAR BIT(0)
  30. #define GH_RM_MEM_ACCEPT_VALIDATE_SANITIZED BIT(0)
  31. #define GH_RM_MEM_ACCEPT_VALIDATE_ACL_ATTRS BIT(1)
  32. #define GH_RM_MEM_ACCEPT_VALIDATE_LABEL BIT(2)
  33. #define GH_RM_MEM_ACCEPT_MAP_IPA_CONTIGUOUS BIT(4)
  34. #define GH_RM_MEM_ACCEPT_DONE BIT(7)
  35. #define GH_RM_MEM_SHARE_SANITIZE BIT(0)
  36. #define GH_RM_MEM_SHARE_APPEND BIT(1)
  37. #define GH_RM_MEM_LEND_SANITIZE BIT(0)
  38. #define GH_RM_MEM_LEND_APPEND BIT(1)
  39. #define GH_RM_MEM_DONATE_SANITIZE BIT(0)
  40. #define GH_RM_MEM_DONATE_APPEND BIT(1)
  41. #define GH_RM_MEM_APPEND_END BIT(0)
  42. #define GH_RM_MEM_NOTIFY_RECIPIENT_SHARED BIT(0)
  43. #define GH_RM_MEM_NOTIFY_RECIPIENT GH_RM_MEM_NOTIFY_RECIPIENT_SHARED
  44. #define GH_RM_MEM_NOTIFY_OWNER_RELEASED BIT(1)
  45. #define GH_RM_MEM_NOTIFY_OWNER GH_RM_MEM_NOTIFY_OWNER_RELEASED
  46. #define GH_RM_MEM_NOTIFY_OWNER_ACCEPTED BIT(2)
  47. /* Support may vary across hardware platforms */
  48. #define GH_RM_IPA_RESERVE_ECC BIT(0)
  49. #define GH_RM_IPA_RESERVE_MEMTAG BIT(1)
  50. #define GH_RM_IPA_RESERVE_NORMAL BIT(2)
  51. #define GH_RM_IPA_RESERVE_IO BIT(3)
  52. /* BIT(4) and BIT(5) reserved */
  53. /* The calling VM's default memory type */
  54. #define GH_RM_IPA_RESERVE_DEFAULT BIT(6)
  55. #define GH_RM_IPA_RESERVE_VALID_FLAGS (GENMASK(3, 0) | BIT(6))
  56. #define GH_RM_IPA_RESERVE_PLATFORM_ENCRYPTED BIT(0)
  57. #define GH_RM_IPA_RESERVE_PLATFORM_AUTHENTICATED BIT(1)
  58. #define GH_RM_IPA_RESERVE_PLATFORM_ANTI_ROLLBACK BIT(2)
  59. #define GH_RM_IPA_RESERVE_PLATFORM_VALID_FLAGS GENMASK(2, 0)
  60. #define MAX_EXIT_REASON_SIZE 4
  61. struct gh_rm_mem_shared_acl_entry;
  62. struct gh_rm_mem_shared_sgl_entry;
  63. struct gh_rm_mem_shared_attr_entry;
  64. struct gh_rm_notif_mem_shared_payload {
  65. u32 mem_handle;
  66. u8 mem_type;
  67. u8 trans_type;
  68. u8 flags;
  69. u8 reserved1;
  70. u16 owner_vmid;
  71. u16 reserved2;
  72. u32 label;
  73. gh_label_t mem_info_tag;
  74. /* TODO: How to arrange multiple variable length struct arrays? */
  75. } __packed;
  76. struct gh_rm_mem_shared_acl_entry {
  77. u16 acl_vmid;
  78. u8 acl_rights;
  79. u8 reserved;
  80. } __packed;
  81. struct gh_rm_mem_shared_sgl_entry {
  82. u32 sgl_size_low;
  83. u32 sgl_size_high;
  84. } __packed;
  85. struct gh_rm_mem_shared_attr_entry {
  86. u16 attributes;
  87. u16 attributes_vmid;
  88. } __packed;
  89. struct gh_rm_notif_mem_released_payload {
  90. u32 mem_handle;
  91. u16 participant_vmid;
  92. u16 reserved;
  93. gh_label_t mem_info_tag;
  94. } __packed;
  95. struct gh_rm_notif_mem_accepted_payload {
  96. u32 mem_handle;
  97. u16 participant_vmid;
  98. u16 reserved;
  99. gh_label_t mem_info_tag;
  100. } __packed;
  101. struct gh_acl_entry {
  102. u16 vmid;
  103. u8 perms;
  104. u8 reserved;
  105. } __packed;
  106. struct gh_sgl_entry {
  107. u64 ipa_base;
  108. u64 size;
  109. } __packed;
  110. struct gh_mem_attr_entry {
  111. u16 attr;
  112. u16 vmid;
  113. } __packed;
  114. struct gh_acl_desc {
  115. u32 n_acl_entries;
  116. struct gh_acl_entry acl_entries[];
  117. } __packed;
  118. struct gh_sgl_desc {
  119. u16 n_sgl_entries;
  120. u16 reserved;
  121. struct gh_sgl_entry sgl_entries[];
  122. } __packed;
  123. struct gh_mem_attr_desc {
  124. u16 n_mem_attr_entries;
  125. u16 reserved;
  126. struct gh_mem_attr_entry attr_entries[];
  127. } __packed;
  128. struct gh_notify_vmid_entry {
  129. u16 vmid;
  130. u16 reserved;
  131. } __packed;
  132. struct gh_notify_vmid_desc {
  133. u16 n_vmid_entries;
  134. u16 reserved;
  135. struct gh_notify_vmid_entry vmid_entries[];
  136. } __packed;
  137. /* VM APIs */
  138. #define GH_RM_NOTIF_VM_EXITED 0x56100001
  139. #define GH_RM_NOTIF_VM_SHUTDOWN 0x56100002
  140. #define GH_RM_NOTIF_VM_STATUS 0x56100008
  141. #define GH_RM_NOTIF_VM_IRQ_LENT 0x56100011
  142. #define GH_RM_NOTIF_VM_IRQ_RELEASED 0x56100012
  143. #define GH_RM_NOTIF_VM_IRQ_ACCEPTED 0x56100013
  144. /* AUTH mechanisms */
  145. #define GH_VM_UNAUTH 0
  146. #define GH_VM_AUTH_PIL_ELF 1
  147. #define GH_VM_AUTH_ANDROID_PVM 2
  148. /* AUTH_PARAM_TYPE mechanisms */
  149. #define GH_VM_AUTH_PARAM_PAS_ID 0 /* Used to pass peripheral auth id */
  150. #define GH_RM_VM_STATUS_NO_STATE 0
  151. #define GH_RM_VM_STATUS_INIT 1
  152. #define GH_RM_VM_STATUS_READY 2
  153. #define GH_RM_VM_STATUS_RUNNING 3
  154. #define GH_RM_VM_STATUS_PAUSED 4
  155. #define GH_RM_VM_STATUS_LOAD 5
  156. #define GH_RM_VM_STATUS_AUTH 6
  157. /* 7 is reserved */
  158. #define GH_RM_VM_STATUS_INIT_FAILED 8
  159. #define GH_RM_VM_STATUS_EXITED 9
  160. #define GH_RM_VM_STATUS_RESETTING 10
  161. #define GH_RM_VM_STATUS_RESET 11
  162. #define GH_RM_OS_STATUS_NONE 0
  163. #define GH_RM_OS_STATUS_EARLY_BOOT 1
  164. #define GH_RM_OS_STATUS_BOOT 2
  165. #define GH_RM_OS_STATUS_INIT 3
  166. #define GH_RM_OS_STATUS_RUN 4
  167. #define GH_RM_APP_STATUS_TUI_SERVICE_BOOT 1
  168. #define GH_RM_VM_STOP_FLAG_FORCE_STOP 0x01
  169. #define GH_RM_VM_EXIT_TYPE_VM_EXIT 0
  170. #define GH_RM_VM_EXIT_TYPE_SYSTEM_OFF 1
  171. #define GH_RM_VM_EXIT_TYPE_SYSTEM_RESET 2
  172. #define GH_RM_VM_EXIT_TYPE_SYSTEM_RESET2 3
  173. #define GH_RM_VM_EXIT_TYPE_WDT_BITE 4
  174. #define GH_RM_VM_EXIT_TYPE_HYP_ERROR 5
  175. #define GH_RM_VM_EXIT_TYPE_ASYNC_EXT_ABORT 6
  176. #define GH_RM_VM_EXIT_TYPE_VM_STOP_FORCED 7
  177. /* GH_RM_VM_EXIT_TYPE_VM_EXIT */
  178. struct gh_vm_exit_reason_vm_exit {
  179. u16 exit_flags;
  180. /* GH_VM_EXIT_EXIT_FLAG_* are bit representations */
  181. #define GH_VM_EXIT_EXIT_FLAG_TYPE 0x1
  182. #define GH_VM_EXIT_POWEROFF 0 /* Value at bit:0 */
  183. #define GH_VM_EXIT_RESTART 1 /* Value at bit:0 */
  184. #define GH_VM_EXIT_EXIT_FLAG_SYSTEM 0x2
  185. #define GH_VM_EXIT_EXIT_FLAG_WARM 0x4
  186. #define GH_VM_EXIT_EXIT_FLAG_DUMP 0x8
  187. u8 exit_code;
  188. /* Exit codes */
  189. #define GH_VM_EXIT_CODE_NORMAL 0
  190. #define GH_VM_EXIT_SOFTWARE_ERR 1
  191. #define GH_VM_EXIT_BUS_ERR 2
  192. #define GH_VM_EXIT_DEVICE_ERR 3
  193. u8 reserved;
  194. } __packed;
  195. /* Reasons for VM_STOP */
  196. #define GH_VM_STOP_SHUTDOWN 0
  197. #define GH_VM_STOP_RESTART 1
  198. #define GH_VM_STOP_CRASH 2
  199. #define GH_VM_STOP_FORCE_STOP 3
  200. #define GH_VM_STOP_MAX 4
  201. struct gh_rm_notif_vm_exited_payload {
  202. gh_vmid_t vmid;
  203. u16 exit_type;
  204. u32 exit_reason_size;
  205. u32 exit_reason[0];
  206. } __packed;
  207. struct gh_rm_notif_vm_shutdown_payload {
  208. u32 stop_reason;
  209. } __packed;
  210. struct gh_rm_notif_vm_status_payload {
  211. gh_vmid_t vmid;
  212. u16 reserved;
  213. u8 vm_status;
  214. u8 os_status;
  215. u16 app_status;
  216. } __packed;
  217. struct gh_rm_notif_vm_irq_lent_payload {
  218. gh_vmid_t owner_vmid;
  219. u16 reserved;
  220. gh_virq_handle_t virq_handle;
  221. gh_label_t virq_label;
  222. } __packed;
  223. struct gh_rm_notif_vm_irq_released_payload {
  224. gh_virq_handle_t virq_handle;
  225. } __packed;
  226. struct gh_rm_notif_vm_irq_accepted_payload {
  227. gh_virq_handle_t virq_handle;
  228. } __packed;
  229. struct gh_vm_auth_param_entry {
  230. u32 auth_param_type;
  231. u32 auth_param;
  232. } __packed;
  233. /* Arch specific APIs */
  234. #if IS_ENABLED(CONFIG_GH_ARM64_DRV)
  235. /* IRQ APIs */
  236. int gh_get_irq(u32 virq, u32 type, struct fwnode_handle *handle);
  237. int gh_put_irq(int irq);
  238. int gh_get_virq(int base_virq, int virq);
  239. int gh_put_virq(int irq);
  240. int gh_arch_validate_vm_exited_notif(size_t payload_size,
  241. struct gh_rm_notif_vm_exited_payload *payload);
  242. #else
  243. static inline int gh_get_irq(u32 virq, u32 type,
  244. struct fwnode_handle *handle)
  245. {
  246. return -EINVAL;
  247. }
  248. static inline int gh_put_irq(int irq)
  249. {
  250. return -EINVAL;
  251. }
  252. static inline int gh_get_virq(int base_virq, int virq)
  253. {
  254. return -EINVAL;
  255. }
  256. static inline int gh_put_virq(int irq)
  257. {
  258. return -EINVAL;
  259. }
  260. static inline int gh_arch_validate_vm_exited_notif(size_t payload_size,
  261. struct gh_rm_notif_vm_exited_payload *payload)
  262. {
  263. return -EINVAL;
  264. }
  265. #endif
  266. /* VM Services */
  267. #define GH_RM_NOTIF_VM_CONSOLE_CHARS 0X56100080
  268. struct gh_rm_notif_vm_console_chars {
  269. gh_vmid_t vmid;
  270. u16 num_bytes;
  271. u8 bytes[0];
  272. } __packed;
  273. struct gh_vm_status {
  274. u8 vm_status;
  275. u8 os_status;
  276. u16 app_status;
  277. } __packed;
  278. struct notifier_block;
  279. typedef int (*gh_virtio_mmio_cb_t)(gh_vmid_t peer, const char *vm_name,
  280. gh_label_t label, gh_capid_t cap_id, int linux_irq, u64 base, u64 size);
  281. typedef int (*gh_wdog_manage_cb_t)(gh_vmid_t vmid, gh_capid_t cap_id, bool populate);
  282. typedef int (*gh_vcpu_affinity_set_cb_t)(gh_vmid_t vmid, gh_label_t label,
  283. gh_capid_t cap_id, int linux_irq);
  284. typedef int (*gh_vcpu_affinity_reset_cb_t)(gh_vmid_t vmid, gh_label_t label,
  285. gh_capid_t cap_id, int *linux_irq);
  286. typedef int (*gh_vpm_grp_set_cb_t)(gh_vmid_t vmid, gh_capid_t cap_id, int linux_irq);
  287. typedef int (*gh_vpm_grp_reset_cb_t)(gh_vmid_t vmid, int *linux_irq);
  288. typedef void (*gh_all_res_populated_cb_t)(gh_vmid_t vmid, bool res_populated);
  289. #if IS_ENABLED(CONFIG_GH_RM_DRV)
  290. /* RM client registration APIs */
  291. int gh_rm_register_notifier(struct notifier_block *nb);
  292. int gh_rm_unregister_notifier(struct notifier_block *nb);
  293. /* Client APIs for IRQ management */
  294. int gh_rm_virq_to_irq(u32 virq, u32 type);
  295. int gh_rm_irq_to_virq(int irq, u32 *virq);
  296. int gh_rm_vm_irq_lend(gh_vmid_t vmid,
  297. int virq,
  298. int label,
  299. gh_virq_handle_t *virq_handle);
  300. int gh_rm_vm_irq_lend_notify(gh_vmid_t vmid, gh_virq_handle_t virq_handle);
  301. int gh_rm_vm_irq_accept(gh_virq_handle_t virq_handle, int virq);
  302. int gh_rm_vm_irq_accept_notify(gh_vmid_t vmid, gh_virq_handle_t virq_handle);
  303. int gh_rm_vm_irq_release(gh_virq_handle_t virq_handle);
  304. int gh_rm_vm_irq_release_notify(gh_vmid_t vmid, gh_virq_handle_t virq_handle);
  305. int gh_rm_vm_irq_reclaim(gh_virq_handle_t virq_handle);
  306. int gh_rm_set_virtio_mmio_cb(gh_virtio_mmio_cb_t fnptr);
  307. void gh_rm_unset_virtio_mmio_cb(void);
  308. int gh_rm_set_wdog_manage_cb(gh_wdog_manage_cb_t fnptr);
  309. int gh_rm_set_vcpu_affinity_cb(gh_vcpu_affinity_set_cb_t fnptr);
  310. int gh_rm_reset_vcpu_affinity_cb(gh_vcpu_affinity_reset_cb_t fnptr);
  311. int gh_rm_set_vpm_grp_cb(gh_vpm_grp_set_cb_t fnptr);
  312. int gh_rm_reset_vpm_grp_cb(gh_vpm_grp_reset_cb_t fnptr);
  313. int gh_rm_all_res_populated_cb(gh_all_res_populated_cb_t fnptr);
  314. /* Client APIs for VM management */
  315. int gh_rm_vm_alloc_vmid(enum gh_vm_names vm_name, int *vmid);
  316. int gh_rm_vm_dealloc_vmid(gh_vmid_t vmid);
  317. int gh_rm_vm_config_image(gh_vmid_t vmid, u16 auth_mech, u32 mem_handle,
  318. u64 image_offset, u64 image_size, u64 dtb_offset, u64 dtb_size);
  319. int gh_rm_vm_auth_image(gh_vmid_t vmid, ssize_t n_entries,
  320. struct gh_vm_auth_param_entry *entry);
  321. int ghd_rm_vm_init(gh_vmid_t vmid);
  322. int ghd_rm_get_vmid(enum gh_vm_names vm_name, gh_vmid_t *vmid);
  323. int gh_rm_get_vm_id_info(gh_vmid_t vmid);
  324. int gh_rm_get_vm_name(gh_vmid_t vmid, enum gh_vm_names *vm_name);
  325. int gh_rm_get_vminfo(enum gh_vm_names vm_name, struct gh_vminfo *vminfo);
  326. int ghd_rm_vm_start(int vmid);
  327. enum gh_vm_names gh_get_image_name(const char *str);
  328. enum gh_vm_names gh_get_vm_name(const char *str);
  329. int gh_rm_get_this_vmid(gh_vmid_t *vmid);
  330. int ghd_rm_vm_stop(gh_vmid_t vmid, u32 stop_reason, u8 flags);
  331. int ghd_rm_vm_reset(gh_vmid_t vmid);
  332. /* Client APIs for VM query */
  333. int gh_rm_populate_hyp_res(gh_vmid_t vmid, const char *vm_name);
  334. int gh_rm_unpopulate_hyp_res(gh_vmid_t vmid, const char *vm_name);
  335. /* Client APIs for VM Services */
  336. struct gh_vm_status *gh_rm_vm_get_status(gh_vmid_t vmid);
  337. int gh_rm_vm_set_status(struct gh_vm_status gh_vm_status);
  338. int gh_rm_vm_set_vm_status(u8 vm_status);
  339. int gh_rm_vm_set_os_status(u8 os_status);
  340. int gh_rm_vm_set_app_status(u16 app_status);
  341. int gh_rm_console_open(gh_vmid_t vmid);
  342. int gh_rm_console_close(gh_vmid_t vmid);
  343. int gh_rm_console_write(gh_vmid_t vmid, const char *buf, size_t size);
  344. int gh_rm_console_flush(gh_vmid_t vmid);
  345. int gh_rm_mem_qcom_lookup_sgl(u8 mem_type, gh_label_t label,
  346. struct gh_acl_desc *acl_desc,
  347. struct gh_sgl_desc *sgl_desc,
  348. struct gh_mem_attr_desc *mem_attr_desc,
  349. gh_memparcel_handle_t *handle);
  350. int gh_rm_mem_release(gh_memparcel_handle_t handle, u8 flags);
  351. int ghd_rm_mem_reclaim(gh_memparcel_handle_t handle, u8 flags);
  352. struct gh_sgl_desc *gh_rm_mem_accept(gh_memparcel_handle_t handle, u8 mem_type,
  353. u8 trans_type, u8 flags, gh_label_t label,
  354. struct gh_acl_desc *acl_desc,
  355. struct gh_sgl_desc *sgl_desc,
  356. struct gh_mem_attr_desc *mem_attr_desc,
  357. u16 map_vmid);
  358. int ghd_rm_mem_share(u8 mem_type, u8 flags, gh_label_t label,
  359. struct gh_acl_desc *acl_desc, struct gh_sgl_desc *sgl_desc,
  360. struct gh_mem_attr_desc *mem_attr_desc,
  361. gh_memparcel_handle_t *handle);
  362. int ghd_rm_mem_lend(u8 mem_type, u8 flags, gh_label_t label,
  363. struct gh_acl_desc *acl_desc, struct gh_sgl_desc *sgl_desc,
  364. struct gh_mem_attr_desc *mem_attr_desc,
  365. gh_memparcel_handle_t *handle);
  366. int gh_rm_mem_donate(u8 mem_type, u8 flags, gh_label_t label,
  367. struct gh_acl_desc *acl_desc, struct gh_sgl_desc *sgl_desc,
  368. struct gh_mem_attr_desc *mem_attr_desc,
  369. gh_memparcel_handle_t *handle);
  370. int gh_rm_mem_notify(gh_memparcel_handle_t handle, u8 flags,
  371. gh_label_t mem_info_tag,
  372. struct gh_notify_vmid_desc *vmid_desc);
  373. int gh_rm_ipa_reserve(u64 size, u64 align, struct range limits, u32 generic_constraints,
  374. u32 platform_constraints, u64 *ipa);
  375. /* API to set time base */
  376. int gh_rm_vm_set_time_base(gh_vmid_t vmid);
  377. /* API for minidump support */
  378. int gh_rm_minidump_get_info(void);
  379. int gh_rm_minidump_register_range(phys_addr_t base_ipa, size_t region_size,
  380. const char *name, size_t name_size);
  381. int gh_rm_minidump_deregister_slot(uint16_t slot_num);
  382. int gh_rm_minidump_get_slot_from_name(uint16_t starting_slot, const char *name,
  383. size_t name_size);
  384. #else
  385. /* RM client register notifications APIs */
  386. static inline int gh_rm_register_notifier(struct notifier_block *nb)
  387. {
  388. return -ENODEV;
  389. }
  390. static inline int gh_rm_unregister_notifier(struct notifier_block *nb)
  391. {
  392. return -ENODEV;
  393. }
  394. /* Client APIs for IRQ management */
  395. static inline int gh_rm_virq_to_irq(u32 virq)
  396. {
  397. return -EINVAL;
  398. }
  399. static inline int gh_rm_vm_irq_lend(gh_vmid_t vmid,
  400. int virq,
  401. int label,
  402. gh_virq_handle_t *virq_handle)
  403. {
  404. return -EINVAL;
  405. }
  406. static inline int gh_rm_irq_to_virq(int irq, u32 *virq)
  407. {
  408. return -EINVAL;
  409. }
  410. static inline int gh_rm_vm_irq_lend_notify(gh_vmid_t vmid,
  411. gh_virq_handle_t virq_handle)
  412. {
  413. return -EINVAL;
  414. }
  415. static inline int gh_rm_vm_irq_accept(gh_virq_handle_t virq_handle, int virq)
  416. {
  417. return -EINVAL;
  418. }
  419. static inline int gh_rm_vm_irq_accept_notify(gh_vmid_t vmid,
  420. gh_virq_handle_t virq_handle)
  421. {
  422. return -EINVAL;
  423. }
  424. static inline int gh_rm_vm_irq_release(gh_virq_handle_t virq_handle)
  425. {
  426. return -EINVAL;
  427. }
  428. static inline int gh_rm_vm_irq_release_notify(gh_vmid_t vmid,
  429. gh_virq_handle_t virq_handle)
  430. {
  431. return -EINVAL;
  432. }
  433. static inline int gh_rm_vm_irq_reclaim(gh_virq_handle_t virq_handle)
  434. {
  435. return -EINVAL;
  436. }
  437. /* Client APIs for VM management */
  438. static inline int gh_rm_vm_alloc_vmid(enum gh_vm_names vm_name, int *vmid)
  439. {
  440. return -EINVAL;
  441. }
  442. static inline int gh_rm_vm_dealloc_vmid(gh_vmid_t vmid)
  443. {
  444. return -EINVAL;
  445. }
  446. static inline int gh_rm_vm_config_image(gh_vmid_t vmid, u16 auth_mech,
  447. u32 mem_handle, u64 image_offset, u64 image_size,
  448. u64 dtb_offset, u64 dtb_size)
  449. {
  450. return -EINVAL;
  451. }
  452. static inline int gh_rm_vm_auth_image(gh_vmid_t vmid, ssize_t n_entries,
  453. struct gh_vm_auth_param_entry *entry)
  454. {
  455. return -EINVAL;
  456. }
  457. static inline int ghd_rm_vm_init(gh_vmid_t vmid)
  458. {
  459. return -EINVAL;
  460. }
  461. static inline int ghd_rm_get_vmid(enum gh_vm_names vm_name, gh_vmid_t *vmid)
  462. {
  463. return -EINVAL;
  464. }
  465. static inline int gh_rm_get_vm_name(gh_vmid_t vmid, enum gh_vm_names *vm_name)
  466. {
  467. return -EINVAL;
  468. }
  469. static inline int gh_rm_get_this_vmid(gh_vmid_t *vmid)
  470. {
  471. return -EINVAL;
  472. }
  473. static inline int gh_rm_get_vminfo(enum gh_vm_names vm_name, struct gh_vminfo *vminfo)
  474. {
  475. return -EINVAL;
  476. }
  477. static inline int ghd_rm_vm_start(int vmid)
  478. {
  479. return -EINVAL;
  480. }
  481. static inline int gh_rm_get_vm_id_info(gh_vmid_t vmid)
  482. {
  483. return -EINVAL;
  484. }
  485. static inline int ghd_rm_vm_stop(gh_vmid_t vmid, u32 stop_reason, u8 flags)
  486. {
  487. return -EINVAL;
  488. }
  489. static inline int ghd_rm_vm_reset(gh_vmid_t vmid)
  490. {
  491. return -EINVAL;
  492. }
  493. /* Client APIs for VM query */
  494. static inline int gh_rm_populate_hyp_res(gh_vmid_t vmid, const char *vm_name)
  495. {
  496. return -EINVAL;
  497. }
  498. /* Client APIs for VM Services */
  499. static inline struct gh_vm_status *gh_rm_vm_get_status(gh_vmid_t vmid)
  500. {
  501. return ERR_PTR(-EINVAL);
  502. }
  503. static inline int gh_rm_vm_set_status(struct gh_vm_status gh_vm_status)
  504. {
  505. return -EINVAL;
  506. }
  507. static inline int gh_rm_vm_set_vm_status(u8 vm_status)
  508. {
  509. return -EINVAL;
  510. }
  511. static inline int gh_rm_vm_set_os_status(u8 os_status)
  512. {
  513. return -EINVAL;
  514. }
  515. static inline int gh_rm_vm_set_app_status(u16 app_status)
  516. {
  517. return -EINVAL;
  518. }
  519. static inline int gh_rm_console_open(gh_vmid_t vmid)
  520. {
  521. return -EINVAL;
  522. }
  523. static inline int gh_rm_console_close(gh_vmid_t vmid)
  524. {
  525. return -EINVAL;
  526. }
  527. static inline int gh_rm_console_write(gh_vmid_t vmid, const char *buf,
  528. size_t size)
  529. {
  530. return -EINVAL;
  531. }
  532. static inline int gh_rm_console_flush(gh_vmid_t vmid)
  533. {
  534. return -EINVAL;
  535. }
  536. static inline int gh_rm_mem_qcom_lookup_sgl(u8 mem_type, gh_label_t label,
  537. struct gh_acl_desc *acl_desc,
  538. struct gh_sgl_desc *sgl_desc,
  539. struct gh_mem_attr_desc *mem_attr_desc,
  540. gh_memparcel_handle_t *handle)
  541. {
  542. return -EINVAL;
  543. }
  544. static inline int gh_rm_mem_release(gh_memparcel_handle_t handle, u8 flags)
  545. {
  546. return -EINVAL;
  547. }
  548. static inline int ghd_rm_mem_reclaim(gh_memparcel_handle_t handle, u8 flags)
  549. {
  550. return -EINVAL;
  551. }
  552. static inline struct gh_sgl_desc *gh_rm_mem_accept(gh_memparcel_handle_t handle,
  553. u8 mem_type,
  554. u8 trans_type, u8 flags, gh_label_t label,
  555. struct gh_acl_desc *acl_desc,
  556. struct gh_sgl_desc *sgl_desc,
  557. struct gh_mem_attr_desc *mem_attr_desc,
  558. u16 map_vmid)
  559. {
  560. return ERR_PTR(-EINVAL);
  561. }
  562. static inline int ghd_rm_mem_share(u8 mem_type, u8 flags, gh_label_t label,
  563. struct gh_acl_desc *acl_desc, struct gh_sgl_desc *sgl_desc,
  564. struct gh_mem_attr_desc *mem_attr_desc,
  565. gh_memparcel_handle_t *handle)
  566. {
  567. return -EINVAL;
  568. }
  569. static inline int ghd_rm_mem_lend(u8 mem_type, u8 flags, gh_label_t label,
  570. struct gh_acl_desc *acl_desc, struct gh_sgl_desc *sgl_desc,
  571. struct gh_mem_attr_desc *mem_attr_desc,
  572. gh_memparcel_handle_t *handle)
  573. {
  574. return -EINVAL;
  575. }
  576. static inline int gh_rm_mem_donate(u8 mem_type, u8 flags, gh_label_t label,
  577. struct gh_acl_desc *acl_desc, struct gh_sgl_desc *sgl_desc,
  578. struct gh_mem_attr_desc *mem_attr_desc,
  579. gh_memparcel_handle_t *handle)
  580. {
  581. return -EINVAL;
  582. }
  583. static inline int gh_rm_mem_notify(gh_memparcel_handle_t handle, u8 flags,
  584. gh_label_t mem_info_tag,
  585. struct gh_notify_vmid_desc *vmid_desc)
  586. {
  587. return -EINVAL;
  588. }
  589. static inline int gh_rm_set_virtio_mmio_cb(gh_virtio_mmio_cb_t fnptr)
  590. {
  591. return -EINVAL;
  592. }
  593. static inline void gh_rm_unset_virtio_mmio_cb(void)
  594. {
  595. }
  596. static inline int gh_rm_set_wdog_manage_cb(gh_wdog_manage_cb_t fnptr)
  597. {
  598. return -EINVAL;
  599. }
  600. static inline int gh_rm_set_vcpu_affinity_cb(gh_vcpu_affinity_set_cb_t fnptr)
  601. {
  602. return -EINVAL;
  603. }
  604. static inline int gh_rm_reset_vcpu_affinity_cb(gh_vcpu_affinity_reset_cb_t fnptr)
  605. {
  606. return -EINVAL;
  607. }
  608. static inline int gh_rm_set_vpm_grp_cb(gh_vpm_grp_set_cb_t fnptr)
  609. {
  610. return -EINVAL;
  611. }
  612. static inline int gh_rm_reset_vpm_grp_cb(gh_vpm_grp_reset_cb_t fnptr)
  613. {
  614. return -EINVAL;
  615. }
  616. static inline int gh_rm_all_res_populated_cb(gh_all_res_populated_cb_t fnptr)
  617. {
  618. return -EINVAL;
  619. }
  620. /* API to set time base */
  621. static inline int gh_rm_vm_set_time_base(gh_vmid_t vmid)
  622. {
  623. return -EINVAL;
  624. }
  625. /* API for minidump support */
  626. static inline int gh_rm_minidump_get_info(void)
  627. {
  628. return -EINVAL;
  629. }
  630. static inline int gh_rm_minidump_register_range(phys_addr_t base_ipa,
  631. size_t region_size, const char *name,
  632. size_t name_size)
  633. {
  634. return -EINVAL;
  635. }
  636. static inline int gh_rm_minidump_deregister_slot(uint16_t slot_num)
  637. {
  638. return -EINVAL;
  639. }
  640. static inline int gh_rm_minidump_get_slot_from_name(uint16_t starting_slot,
  641. const char *name,
  642. size_t name_size)
  643. {
  644. return -EINVAL;
  645. }
  646. static inline int gh_rm_ipa_reserve(u64 size, u64 align, struct range limits,
  647. u32 generic_constraints, u32 platform_constraints,
  648. u64 *ipa)
  649. {
  650. return -EINVAL;
  651. }
  652. #endif
  653. #endif