trace-mem-buf.h 8.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336
  1. /* SPDX-License-Identifier: GPL-2.0-only */
  2. /*
  3. * Copyright (c) 2020-2021 The Linux Foundation. All rights reserved.
  4. * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
  5. */
  6. #undef TRACE_SYSTEM
  7. #define TRACE_SYSTEM mem_buf
  8. #if !defined(_TRACE_MEM_BUF_H) || defined(TRACE_HEADER_MULTI_READ)
  9. #define _TRACE_MEM_BUF_H
  10. #include <linux/types.h>
  11. #include <linux/tracepoint.h>
  12. #include <linux/mem-buf.h>
  13. #include "mem-buf-msgq.h"
  14. #ifdef CREATE_TRACE_POINTS
  15. static void __maybe_unused gh_acl_to_vmid_perms(struct gh_acl_desc *acl_desc,
  16. u16 *vmids, u8 *perms)
  17. {
  18. unsigned int i;
  19. for (i = 0; i < acl_desc->n_acl_entries; i++) {
  20. vmids[i] = acl_desc->acl_entries[i].vmid;
  21. perms[i] = acl_desc->acl_entries[i].perms;
  22. }
  23. }
  24. static void __maybe_unused
  25. gh_sgl_to_ipa_bases_sizes(struct gh_sgl_desc *sgl_desc,
  26. u64 *ipa_bases, u64 *sizes)
  27. {
  28. unsigned int i;
  29. for (i = 0; i < sgl_desc->n_sgl_entries; i++) {
  30. ipa_bases[i] = sgl_desc->sgl_entries[i].ipa_base;
  31. sizes[i] = sgl_desc->sgl_entries[i].size;
  32. }
  33. }
  34. static char __maybe_unused *mem_type_to_str(enum mem_buf_mem_type type)
  35. {
  36. if (type == MEM_BUF_ION_MEM_TYPE)
  37. return "ION_MEM_TYPE";
  38. return NULL;
  39. }
  40. static char __maybe_unused *msg_type_to_str(enum mem_buf_msg_type type)
  41. {
  42. if (type == MEM_BUF_ALLOC_REQ)
  43. return "MEM_BUF_ALLOC_REQ";
  44. else if (type == MEM_BUF_ALLOC_RESP)
  45. return "MEM_BUF_ALLOC_RESP";
  46. else if (type == MEM_BUF_ALLOC_RELINQUISH)
  47. return "MEM_BUF_ALLOC_RELINQUISH";
  48. else if (type == MEM_BUF_ALLOC_RELINQUISH_RESP)
  49. return "MEM_BUF_ALLOC_RELINQUISH_RESP";
  50. return NULL;
  51. }
  52. #endif /* CREATE_TRACE_POINTS */
  53. TRACE_EVENT(mem_buf_alloc_info,
  54. TP_PROTO(size_t size, enum mem_buf_mem_type src_mem_type,
  55. enum mem_buf_mem_type dst_mem_type,
  56. struct gh_acl_desc *acl_desc),
  57. TP_ARGS(size, src_mem_type, dst_mem_type, acl_desc),
  58. TP_STRUCT__entry(
  59. __field(size_t, size)
  60. __field(u32, nr_acl_entries)
  61. __string(src_type, mem_type_to_str(src_mem_type))
  62. __string(dst_type, mem_type_to_str(dst_mem_type))
  63. __dynamic_array(u16, vmids, acl_desc->n_acl_entries)
  64. __dynamic_array(u8, perms, acl_desc->n_acl_entries)
  65. ),
  66. TP_fast_assign(
  67. __entry->size = size;
  68. __assign_str(src_type, mem_type_to_str(src_mem_type));
  69. __assign_str(dst_type, mem_type_to_str(dst_mem_type));
  70. __entry->nr_acl_entries = acl_desc->n_acl_entries;
  71. gh_acl_to_vmid_perms(acl_desc, __get_dynamic_array(vmids),
  72. __get_dynamic_array(perms));
  73. ),
  74. TP_printk("size: 0x%lx src mem type: %s dst mem type: %s nr ACL entries: %d ACL VMIDs: %s ACL Perms: %s",
  75. __entry->size, __get_str(src_type), __get_str(dst_type),
  76. __entry->nr_acl_entries,
  77. __print_array(__get_dynamic_array(vmids),
  78. __entry->nr_acl_entries, sizeof(u16)),
  79. __print_array(__get_dynamic_array(perms),
  80. __entry->nr_acl_entries, sizeof(u8))
  81. )
  82. );
  83. DECLARE_EVENT_CLASS(alloc_req_msg_class,
  84. TP_PROTO(struct mem_buf_alloc_req *req),
  85. TP_ARGS(req),
  86. TP_STRUCT__entry(
  87. __field(u32, txn_id)
  88. __string(msg_type, msg_type_to_str(req->hdr.msg_type))
  89. __field(u64, size)
  90. __string(src_type, mem_type_to_str(req->src_mem_type))
  91. __field(u32, nr_acl_entries)
  92. __dynamic_array(u16, vmids, req->acl_desc.n_acl_entries)
  93. __dynamic_array(u8, perms, req->acl_desc.n_acl_entries)
  94. ),
  95. TP_fast_assign(
  96. __entry->txn_id = req->hdr.txn_id;
  97. __assign_str(msg_type, msg_type_to_str(req->hdr.msg_type));
  98. __entry->size = req->size;
  99. __assign_str(src_type, mem_type_to_str(req->src_mem_type));
  100. __entry->nr_acl_entries = req->acl_desc.n_acl_entries;
  101. gh_acl_to_vmid_perms(&req->acl_desc, __get_dynamic_array(vmids),
  102. __get_dynamic_array(perms));
  103. ),
  104. TP_printk("txn_id: %d msg_type: %s alloc_sz: 0x%lx src_mem_type: %s nr ACL entries: %d ACL VMIDs: %s ACL Perms: %s",
  105. __entry->txn_id, __get_str(msg_type), __entry->size,
  106. __get_str(src_type), __entry->nr_acl_entries,
  107. __print_array(__get_dynamic_array(vmids),
  108. __entry->nr_acl_entries, sizeof(u16)),
  109. __print_array(__get_dynamic_array(perms),
  110. __entry->nr_acl_entries, sizeof(u8))
  111. )
  112. );
  113. DEFINE_EVENT(alloc_req_msg_class, send_alloc_req,
  114. TP_PROTO(struct mem_buf_alloc_req *req),
  115. TP_ARGS(req)
  116. );
  117. DEFINE_EVENT(alloc_req_msg_class, receive_alloc_req,
  118. TP_PROTO(struct mem_buf_alloc_req *req),
  119. TP_ARGS(req)
  120. );
  121. DECLARE_EVENT_CLASS(relinquish_req_msg_class,
  122. TP_PROTO(struct mem_buf_alloc_relinquish *rel_req),
  123. TP_ARGS(rel_req),
  124. TP_STRUCT__entry(
  125. __string(msg_type, msg_type_to_str(rel_req->hdr.msg_type))
  126. __field(gh_memparcel_handle_t, hdl)
  127. __field(u32, txn_id)
  128. ),
  129. TP_fast_assign(
  130. __assign_str(msg_type, msg_type_to_str(rel_req->hdr.msg_type));
  131. __entry->hdl = rel_req->hdl;
  132. __entry->txn_id = rel_req->hdr.txn_id;
  133. ),
  134. TP_printk("msg_type: %s memparcel_hdl: 0x%x txn_id: 0x%x",
  135. __get_str(msg_type), __entry->hdl, __entry->txn_id)
  136. );
  137. DEFINE_EVENT(relinquish_req_msg_class, send_relinquish_msg,
  138. TP_PROTO(struct mem_buf_alloc_relinquish *rel_req),
  139. TP_ARGS(rel_req)
  140. );
  141. DEFINE_EVENT(relinquish_req_msg_class, receive_relinquish_msg,
  142. TP_PROTO(struct mem_buf_alloc_relinquish *rel_req),
  143. TP_ARGS(rel_req)
  144. );
  145. DECLARE_EVENT_CLASS(alloc_resp_class,
  146. TP_PROTO(struct mem_buf_alloc_resp *resp),
  147. TP_ARGS(resp),
  148. TP_STRUCT__entry(
  149. __field(u32, txn_id)
  150. __string(msg_type, msg_type_to_str(resp->hdr.msg_type))
  151. __field(s32, ret)
  152. __field(gh_memparcel_handle_t, hdl)
  153. ),
  154. TP_fast_assign(
  155. __entry->txn_id = resp->hdr.txn_id;
  156. __assign_str(msg_type, msg_type_to_str(resp->hdr.msg_type));
  157. __entry->ret = resp->ret;
  158. __entry->hdl = resp->hdl;
  159. ),
  160. TP_printk("txn_id: %d msg_type: %s ret: %d memparcel_hdl: 0x%x",
  161. __entry->txn_id, __get_str(msg_type), __entry->ret,
  162. __entry->hdl
  163. )
  164. );
  165. DEFINE_EVENT(alloc_resp_class, send_alloc_resp_msg,
  166. TP_PROTO(struct mem_buf_alloc_resp *resp),
  167. TP_ARGS(resp)
  168. );
  169. DEFINE_EVENT(alloc_resp_class, receive_alloc_resp_msg,
  170. TP_PROTO(struct mem_buf_alloc_resp *resp),
  171. TP_ARGS(resp)
  172. );
  173. DECLARE_EVENT_CLASS(relinquish_resp_class,
  174. TP_PROTO(struct mem_buf_alloc_relinquish *resp),
  175. TP_ARGS(resp),
  176. TP_STRUCT__entry(
  177. __field(u32, txn_id)
  178. __string(msg_type, msg_type_to_str(resp->hdr.msg_type))
  179. ),
  180. TP_fast_assign(
  181. __entry->txn_id = resp->hdr.txn_id;
  182. __assign_str(msg_type, msg_type_to_str(resp->hdr.msg_type));
  183. ),
  184. TP_printk("txn_id: %d msg_type: %s",
  185. __entry->txn_id, __get_str(msg_type)
  186. )
  187. );
  188. DEFINE_EVENT(relinquish_resp_class, send_relinquish_resp_msg,
  189. TP_PROTO(struct mem_buf_alloc_relinquish *resp),
  190. TP_ARGS(resp)
  191. );
  192. DEFINE_EVENT(relinquish_resp_class, receive_relinquish_resp_msg,
  193. TP_PROTO(struct mem_buf_alloc_relinquish *resp),
  194. TP_ARGS(resp)
  195. );
  196. TRACE_EVENT(lookup_sgl,
  197. TP_PROTO(struct gh_sgl_desc *sgl_desc, int ret,
  198. gh_memparcel_handle_t hdl),
  199. TP_ARGS(sgl_desc, ret, hdl),
  200. TP_STRUCT__entry(
  201. __field(u16, nr_sgl_entries)
  202. __dynamic_array(u64, ipa_bases, sgl_desc->n_sgl_entries)
  203. __dynamic_array(u64, sizes, sgl_desc->n_sgl_entries)
  204. __field(int, ret)
  205. __field(gh_memparcel_handle_t, hdl)
  206. ),
  207. TP_fast_assign(
  208. __entry->nr_sgl_entries = sgl_desc->n_sgl_entries;
  209. gh_sgl_to_ipa_bases_sizes(sgl_desc,
  210. __get_dynamic_array(ipa_bases),
  211. __get_dynamic_array(sizes));
  212. __entry->ret = ret;
  213. __entry->hdl = hdl;
  214. ),
  215. TP_printk("SGL entries: %d SGL IPA bases: %s SGL sizes: %s ret: %d memparcel_hdl: 0x%x",
  216. __entry->nr_sgl_entries,
  217. __print_array(__get_dynamic_array(ipa_bases),
  218. __entry->nr_sgl_entries, sizeof(u64)),
  219. __print_array(__get_dynamic_array(sizes),
  220. __entry->nr_sgl_entries, sizeof(u64)),
  221. __entry->ret, __entry->hdl
  222. )
  223. );
  224. TRACE_EVENT(map_mem_s2,
  225. TP_PROTO(gh_memparcel_handle_t hdl, struct gh_sgl_desc *sgl_desc),
  226. TP_ARGS(hdl, sgl_desc),
  227. TP_STRUCT__entry(
  228. __field(gh_memparcel_handle_t, hdl)
  229. __field(u16, nr_sgl_entries)
  230. __dynamic_array(u64, ipa_bases, sgl_desc->n_sgl_entries)
  231. __dynamic_array(u64, sizes, sgl_desc->n_sgl_entries)
  232. ),
  233. TP_fast_assign(
  234. __entry->hdl = hdl;
  235. __entry->nr_sgl_entries = sgl_desc->n_sgl_entries;
  236. gh_sgl_to_ipa_bases_sizes(sgl_desc,
  237. __get_dynamic_array(ipa_bases),
  238. __get_dynamic_array(sizes));
  239. ),
  240. TP_printk("MEM_ACCEPT successful memparcel hdl: 0x%x SGL entries: %d SGL IPA bases: %s SGL sizes: %s",
  241. __entry->hdl, __entry->nr_sgl_entries,
  242. __print_array(__get_dynamic_array(ipa_bases),
  243. __entry->nr_sgl_entries, sizeof(u64)),
  244. __print_array(__get_dynamic_array(sizes),
  245. __entry->nr_sgl_entries, sizeof(u64))
  246. )
  247. );
  248. #endif /* _TRACE_MEM_BUF_H */
  249. #undef TRACE_INCLUDE_PATH
  250. #define TRACE_INCLUDE_PATH ../../../drivers/soc/qcom/mem_buf/
  251. #undef TRACE_INCLUDE_FILE
  252. #define TRACE_INCLUDE_FILE trace-mem-buf
  253. /* This part must be outside protection */
  254. #include <trace/define_trace.h>