gunyah.h 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462
  1. /* SPDX-License-Identifier: GPL-2.0-only */
  2. /*
  3. * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
  4. */
  5. #undef TRACE_SYSTEM
  6. #define TRACE_SYSTEM gunyah
  7. #if !defined(_TRACE_GUNYAH_H) || defined(TRACE_HEADER_MULTI_READ)
  8. #define _TRACE_GUNYAH_H
  9. #include <linux/types.h>
  10. #include <linux/tracepoint.h>
  11. #include <linux/trace_seq.h>
  12. #include <soc/qcom/secure_buffer.h>
  13. #ifndef __GUNYAH_HELPER_FUNCTIONS
  14. #define __GUNYAH_HELPER_FUNCTIONS
  15. #define MAX_ENTRIES_TO_PRINT 4
  16. enum {
  17. DONATE = 0,
  18. LEND = 1,
  19. SHARE = 2
  20. };
  21. static inline const char *__print_acl_arr(struct trace_seq *p, u8 *acl_perms, u16 *acl_vmids,
  22. int count)
  23. {
  24. const char *ret;
  25. int i = 0;
  26. u8 *perms = acl_perms;
  27. u16 *vmids = acl_vmids;
  28. ret = trace_seq_buffer_ptr(p);
  29. trace_seq_putc(p, '{');
  30. for (i = 0; i < count; i++) {
  31. trace_seq_printf(p, "(0x%x,", *vmids);
  32. trace_seq_printf(p, "%s%s%s)",
  33. ((*perms & 0x4) ? "R" : ""),
  34. ((*perms & 0x2) ? "W" : ""),
  35. ((*perms & 0x1) ? "X" : "")
  36. );
  37. perms++;
  38. vmids++;
  39. if (i != count-1)
  40. trace_seq_printf(p, ", ");
  41. }
  42. trace_seq_putc(p, '}');
  43. trace_seq_putc(p, 0);
  44. return ret;
  45. }
  46. #endif
  47. DECLARE_EVENT_CLASS(gh_rm_mem_accept_donate_lend_share,
  48. TP_PROTO(u8 mem_type, u8 flags, gh_label_t label,
  49. struct gh_acl_desc *acl_desc, struct gh_sgl_desc *sgl_desc,
  50. struct gh_mem_attr_desc *mem_attr_desc,
  51. gh_memparcel_handle_t *handle, u16 map_vmid, u8 trans_type),
  52. TP_ARGS(mem_type, flags, label,
  53. acl_desc, sgl_desc,
  54. mem_attr_desc,
  55. handle, map_vmid, trans_type),
  56. TP_STRUCT__entry(
  57. __field(u8, mem_type)
  58. __field(u8, flags)
  59. __field(gh_label_t, label)
  60. /* gh_acl_desc */
  61. __field(u32, n_acl_entries)
  62. __dynamic_array(u16, acl_vmid_arr,
  63. ((acl_desc != NULL) ? acl_desc->n_acl_entries : 0))
  64. __dynamic_array(u8, acl_perm_arr,
  65. ((acl_desc != NULL) ? acl_desc->n_acl_entries : 0))
  66. /* gh_sgl_desc */
  67. __field(u16, n_sgl_entries)
  68. __dynamic_array(u64, sgl_ipa_base_arr,
  69. ((sgl_desc != NULL) ? (sgl_desc->n_sgl_entries > MAX_ENTRIES_TO_PRINT
  70. ? MAX_ENTRIES_TO_PRINT
  71. : sgl_desc->n_sgl_entries)
  72. : 0))
  73. __dynamic_array(u64, sgl_size_arr,
  74. ((sgl_desc != NULL) ? (sgl_desc->n_sgl_entries > MAX_ENTRIES_TO_PRINT
  75. ? MAX_ENTRIES_TO_PRINT
  76. : sgl_desc->n_sgl_entries)
  77. : 0))
  78. /* mem_attr_desc */
  79. __field(u16, n_mem_attr_entries)
  80. __dynamic_array(u16, mem_attr_attr_arr,
  81. ((mem_attr_desc != NULL)
  82. ? mem_attr_desc->n_mem_attr_entries : 0))
  83. __dynamic_array(u16, mem_attr_vmid_arr,
  84. ((mem_attr_desc != NULL)
  85. ? mem_attr_desc->n_mem_attr_entries : 0))
  86. __field(gh_memparcel_handle_t, handle)
  87. __field(u16, map_vmid)
  88. __field(u8, trans_type)
  89. __field(int, sgl_entries_to_print)
  90. ),
  91. TP_fast_assign(
  92. unsigned int i;
  93. /* gh_acl_desc */
  94. u16 *acl_vmids_arr_ptr = __get_dynamic_array(acl_vmid_arr);
  95. u8 *acl_perms_arr_ptr = __get_dynamic_array(acl_perm_arr);
  96. /* gh_sgl_desc */
  97. u64 *sgl_ipa_base_arr_ptr = __get_dynamic_array(sgl_ipa_base_arr);
  98. u64 *sgl_size_arr_ptr = __get_dynamic_array(sgl_size_arr);
  99. /* mem_attr_desc */
  100. u16 *mem_attr_attr_arr_ptr = __get_dynamic_array(mem_attr_attr_arr);
  101. u16 *mem_attr_vmid_arr_ptr = __get_dynamic_array(mem_attr_vmid_arr);
  102. __entry->mem_type = mem_type;
  103. __entry->flags = flags;
  104. __entry->label = label;
  105. /* gh_acl_desc */
  106. if (acl_desc != NULL) {
  107. __entry->n_acl_entries = acl_desc->n_acl_entries;
  108. for (i = 0; i < __entry->n_acl_entries; i++) {
  109. acl_vmids_arr_ptr[i] = acl_desc->acl_entries[i].vmid;
  110. acl_perms_arr_ptr[i] = acl_desc->acl_entries[i].perms;
  111. }
  112. } else {
  113. __entry->n_acl_entries = 0;
  114. }
  115. /* gh_sgl_desc */
  116. if (sgl_desc != NULL) {
  117. __entry->n_sgl_entries = sgl_desc->n_sgl_entries;
  118. __entry->sgl_entries_to_print =
  119. __entry->n_sgl_entries > MAX_ENTRIES_TO_PRINT
  120. ? MAX_ENTRIES_TO_PRINT
  121. : __entry->n_sgl_entries;
  122. for (i = 0; i < __entry->sgl_entries_to_print; i++) {
  123. sgl_ipa_base_arr_ptr[i] = sgl_desc->sgl_entries[i].ipa_base;
  124. sgl_size_arr_ptr[i] = sgl_desc->sgl_entries[i].size;
  125. }
  126. } else {
  127. __entry->n_sgl_entries = 0;
  128. __entry->sgl_entries_to_print = 0;
  129. }
  130. /* mem_attr_desc */
  131. if (mem_attr_desc != NULL) {
  132. __entry->n_mem_attr_entries = mem_attr_desc->n_mem_attr_entries;
  133. for (i = 0; i < __entry->n_mem_attr_entries; i++) {
  134. mem_attr_attr_arr_ptr[i] = mem_attr_desc->attr_entries[i].attr;
  135. mem_attr_vmid_arr_ptr[i] = mem_attr_desc->attr_entries[i].vmid;
  136. }
  137. } else {
  138. __entry->n_mem_attr_entries = 0;
  139. }
  140. __entry->handle = *handle;
  141. __entry->map_vmid = map_vmid;
  142. __entry->trans_type = trans_type;
  143. ),
  144. TP_printk("mem_type = %s flags = 0x%x label = %u\t\t"
  145. "acl_entries = %u acl_arr = %s\t\t"
  146. "sgl_entries = %u sgl_ipa_base = %s sgl_size = %s\t\t"
  147. "mem_attr_entries = %u mem_attr_attr = %s mem_attr_vmid = %s\t\t"
  148. "handle = %u map_vmid = 0x%x trans_type = %s",
  149. __print_symbolic(__entry->mem_type,
  150. { 0, "Normal Memory" },
  151. { 1, "IO Memory" }),
  152. __entry->flags,
  153. __entry->label,
  154. __entry->n_acl_entries,
  155. (__entry->n_acl_entries
  156. ? __print_acl_arr(p, __get_dynamic_array(acl_perm_arr),
  157. __get_dynamic_array(acl_vmid_arr), __entry->n_acl_entries)
  158. : "N/A"),
  159. __entry->n_sgl_entries,
  160. (__entry->n_sgl_entries
  161. ? __print_array(__get_dynamic_array(sgl_ipa_base_arr),
  162. __entry->sgl_entries_to_print, sizeof(u64))
  163. : "N/A"),
  164. (__entry->n_sgl_entries
  165. ? __print_array(__get_dynamic_array(sgl_size_arr),
  166. __entry->sgl_entries_to_print, sizeof(u64))
  167. : "N/A"),
  168. __entry->n_mem_attr_entries,
  169. (__entry->n_mem_attr_entries
  170. ? __print_array(__get_dynamic_array(mem_attr_attr_arr),
  171. __entry->n_mem_attr_entries, sizeof(u16))
  172. : "N/A"),
  173. (__entry->n_mem_attr_entries
  174. ? __print_array(__get_dynamic_array(mem_attr_vmid_arr),
  175. __entry->n_mem_attr_entries, sizeof(u16))
  176. : "N/A"),
  177. __entry->handle, __entry->map_vmid,
  178. __print_symbolic(__entry->trans_type,
  179. { 0, "Donate" },
  180. { 1, "Lend" },
  181. { 2, "Share" })
  182. )
  183. );
  184. DEFINE_EVENT(gh_rm_mem_accept_donate_lend_share, gh_rm_mem_accept,
  185. TP_PROTO(u8 mem_type, u8 flags, gh_label_t label,
  186. struct gh_acl_desc *acl_desc, struct gh_sgl_desc *sgl_desc,
  187. struct gh_mem_attr_desc *mem_attr_desc,
  188. gh_memparcel_handle_t *handle, u16 map_vmid, u8 trans_type),
  189. TP_ARGS(mem_type, flags, label,
  190. acl_desc, sgl_desc,
  191. mem_attr_desc,
  192. handle, map_vmid, trans_type)
  193. );
  194. DEFINE_EVENT(gh_rm_mem_accept_donate_lend_share, gh_rm_mem_donate,
  195. TP_PROTO(u8 mem_type, u8 flags, gh_label_t label,
  196. struct gh_acl_desc *acl_desc, struct gh_sgl_desc *sgl_desc,
  197. struct gh_mem_attr_desc *mem_attr_desc,
  198. gh_memparcel_handle_t *handle, u16 map_vmid, u8 trans_type),
  199. TP_ARGS(mem_type, flags, label,
  200. acl_desc, sgl_desc,
  201. mem_attr_desc,
  202. handle, map_vmid, trans_type)
  203. );
  204. DEFINE_EVENT(gh_rm_mem_accept_donate_lend_share, gh_rm_mem_lend,
  205. TP_PROTO(u8 mem_type, u8 flags, gh_label_t label,
  206. struct gh_acl_desc *acl_desc, struct gh_sgl_desc *sgl_desc,
  207. struct gh_mem_attr_desc *mem_attr_desc,
  208. gh_memparcel_handle_t *handle, u16 map_vmid, u8 trans_type),
  209. TP_ARGS(mem_type, flags, label,
  210. acl_desc, sgl_desc,
  211. mem_attr_desc,
  212. handle, map_vmid, trans_type)
  213. );
  214. DEFINE_EVENT(gh_rm_mem_accept_donate_lend_share, gh_rm_mem_share,
  215. TP_PROTO(u8 mem_type, u8 flags, gh_label_t label,
  216. struct gh_acl_desc *acl_desc, struct gh_sgl_desc *sgl_desc,
  217. struct gh_mem_attr_desc *mem_attr_desc,
  218. gh_memparcel_handle_t *handle, u16 map_vmid, u8 trans_type),
  219. TP_ARGS(mem_type, flags, label,
  220. acl_desc, sgl_desc,
  221. mem_attr_desc,
  222. handle, map_vmid, trans_type)
  223. );
  224. TRACE_EVENT(gh_rm_mem_accept_reply,
  225. TP_PROTO(struct gh_sgl_desc *sgl_desc),
  226. TP_ARGS(sgl_desc),
  227. TP_STRUCT__entry(
  228. __field(u16, n_sgl_entries)
  229. __dynamic_array(u64, sgl_ipa_base_arr,
  230. ((sgl_desc != NULL)
  231. ? (sgl_desc->n_sgl_entries > MAX_ENTRIES_TO_PRINT
  232. ? MAX_ENTRIES_TO_PRINT
  233. : sgl_desc->n_sgl_entries)
  234. : 0))
  235. __dynamic_array(u64, sgl_size_arr,
  236. ((sgl_desc != NULL)
  237. ? (sgl_desc->n_sgl_entries > MAX_ENTRIES_TO_PRINT
  238. ? MAX_ENTRIES_TO_PRINT
  239. : sgl_desc->n_sgl_entries)
  240. : 0))
  241. __field(int, sgl_entries_to_print)
  242. __field(bool, is_error)
  243. ),
  244. TP_fast_assign(
  245. unsigned int i;
  246. u64 *sgl_ipa_base_arr_ptr = __get_dynamic_array(sgl_ipa_base_arr);
  247. u64 *sgl_size_arr_ptr = __get_dynamic_array(sgl_size_arr);
  248. __entry->is_error = IS_ERR(sgl_desc);
  249. if (sgl_desc != NULL && __entry->is_error == false) {
  250. __entry->n_sgl_entries = sgl_desc->n_sgl_entries;
  251. __entry->sgl_entries_to_print =
  252. __entry->n_sgl_entries > MAX_ENTRIES_TO_PRINT
  253. ? MAX_ENTRIES_TO_PRINT
  254. : __entry->n_sgl_entries;
  255. for (i = 0; i < __entry->sgl_entries_to_print; i++) {
  256. sgl_ipa_base_arr_ptr[i] = sgl_desc->sgl_entries[i].ipa_base;
  257. sgl_size_arr_ptr[i] = sgl_desc->sgl_entries[i].size;
  258. }
  259. } else {
  260. __entry->n_sgl_entries = 0;
  261. __entry->sgl_entries_to_print = 0;
  262. }
  263. ),
  264. TP_printk("sgl_entries = %u sgl_ipa_base = %s sgl_size = %s\t\t",
  265. __entry->n_sgl_entries,
  266. ((__entry->n_sgl_entries && __entry->is_error == false)
  267. ? __print_array(__get_dynamic_array(sgl_ipa_base_arr),
  268. __entry->sgl_entries_to_print, sizeof(u64))
  269. : "N/A"),
  270. ((__entry->n_sgl_entries && __entry->is_error == false)
  271. ? __print_array(__get_dynamic_array(sgl_size_arr),
  272. __entry->sgl_entries_to_print, sizeof(u64))
  273. : "N/A")
  274. )
  275. );
  276. DECLARE_EVENT_CLASS(gh_rm_mem_release_reclaim,
  277. TP_PROTO(gh_memparcel_handle_t handle, u8 flags),
  278. TP_ARGS(handle, flags),
  279. TP_STRUCT__entry(
  280. __field(gh_memparcel_handle_t, handle)
  281. __field(u8, flags)
  282. ),
  283. TP_fast_assign(
  284. __entry->handle = handle;
  285. __entry->flags = flags;
  286. ),
  287. TP_printk("handle_s = %u flags = 0x%x",
  288. __entry->handle,
  289. __entry->flags
  290. )
  291. );
  292. DEFINE_EVENT(gh_rm_mem_release_reclaim, gh_rm_mem_release,
  293. TP_PROTO(gh_memparcel_handle_t handle, u8 flags),
  294. TP_ARGS(handle, flags)
  295. );
  296. DEFINE_EVENT(gh_rm_mem_release_reclaim, gh_rm_mem_reclaim,
  297. TP_PROTO(gh_memparcel_handle_t handle, u8 flags),
  298. TP_ARGS(handle, flags)
  299. );
  300. TRACE_EVENT(gh_rm_mem_call_return,
  301. TP_PROTO(gh_memparcel_handle_t handle, int return_val),
  302. TP_ARGS(handle, return_val),
  303. TP_STRUCT__entry(
  304. __field(gh_memparcel_handle_t, handle)
  305. __field(int, return_val)
  306. ),
  307. TP_fast_assign(
  308. __entry->handle = handle;
  309. __entry->return_val = return_val;
  310. ),
  311. TP_printk("handle = %u, return_value = %d", __entry->handle, __entry->return_val)
  312. );
  313. TRACE_EVENT(gh_rm_mem_notify,
  314. TP_PROTO(gh_memparcel_handle_t handle, u8 flags, gh_label_t mem_info_tag,
  315. struct gh_notify_vmid_desc *vmid_desc),
  316. TP_ARGS(handle, flags, mem_info_tag, vmid_desc),
  317. TP_STRUCT__entry(
  318. __field(gh_memparcel_handle_t, handle)
  319. __field(u8, flags)
  320. __field(gh_label_t, mem_info_tag)
  321. __field(u16, n_vmid_entries)
  322. __dynamic_array(u16, entry_vmid_arr,
  323. ((vmid_desc != NULL) ? vmid_desc->n_vmid_entries : 0))
  324. ),
  325. TP_fast_assign(
  326. unsigned int i;
  327. /* vmid_desc */
  328. u16 *entry_vmid_arr_ptr = __get_dynamic_array(entry_vmid_arr);
  329. __entry->handle = handle;
  330. __entry->flags = flags;
  331. __entry->mem_info_tag = mem_info_tag;
  332. if (vmid_desc != NULL) {
  333. __entry->n_vmid_entries = vmid_desc->n_vmid_entries;
  334. for (i = 0; i < __entry->n_vmid_entries; i++)
  335. entry_vmid_arr_ptr[i] = vmid_desc->vmid_entries[i].vmid;
  336. } else {
  337. __entry->n_vmid_entries = 0;
  338. }
  339. ),
  340. TP_printk("handle = %u flags = 0x%x mem_info_tag = %u\t\t"
  341. "vmid_entries = %u entry_vmid_arr = %s",
  342. __entry->handle,
  343. __entry->flags,
  344. __entry->mem_info_tag,
  345. __entry->n_vmid_entries,
  346. (__entry->n_vmid_entries
  347. ? __print_array(__get_dynamic_array(entry_vmid_arr),
  348. __entry->n_vmid_entries, sizeof(u16))
  349. : "N/A")
  350. )
  351. );
  352. #endif /* _TRACE_GUNYAH_H */
  353. /* This part must be outside protection */
  354. #include <trace/define_trace.h>