msm_cvp_events.h 8.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375
  1. /* SPDX-License-Identifier: GPL-2.0-only
  2. *
  3. * Copyright (c) 2020, The Linux Foundation. All rights reserved.
  4. * Copyright (c) 2021-2023, Qualcomm Innovation Center, Inc. All rights reserved.
  5. */
  6. #if !defined(_MSM_CVP_EVENTS_H_) || defined(TRACE_HEADER_MULTI_READ)
  7. #define _MSM_CVP_EVENTS_H_
  8. #include <linux/types.h>
  9. #include <linux/tracepoint.h>
  10. #undef TRACE_SYSTEM
  11. #define TRACE_SYSTEM msm_cvp
  12. #undef TRACE_INCLUDE_FILE
  13. #define TRACE_INCLUDE_FILE msm_cvp_events
  14. // Since Chrome supports to parse the event “tracing_mark_write” by default
  15. // so we can re-use this to display your own events in Chrome
  16. // enable command as below:
  17. // adb shell "echo 1 > /sys/kernel/tracing/events/msm_cvp/tracing_mark_write/enable"
  18. TRACE_EVENT(tracing_mark_write,
  19. TP_PROTO(int pid, const char *name, bool trace_begin),
  20. TP_ARGS(pid, name, trace_begin),
  21. TP_STRUCT__entry(
  22. __field(int, pid)
  23. __string(trace_name, name)
  24. __field(bool, trace_begin)
  25. ),
  26. TP_fast_assign(
  27. __entry->pid = pid;
  28. __assign_str(trace_name, name);
  29. __entry->trace_begin = trace_begin;
  30. ),
  31. TP_printk("%s|%d|%s", __entry->trace_begin ? "B" : "E",
  32. __entry->pid, __get_str(trace_name))
  33. )
  34. #define CVPKERNEL_ATRACE_END(name) \
  35. trace_tracing_mark_write(current->tgid, name, 0)
  36. #define CVPKERNEL_ATRACE_BEGIN(name) \
  37. trace_tracing_mark_write(current->tgid, name, 1)
  38. DECLARE_EVENT_CLASS(msm_v4l2_cvp,
  39. TP_PROTO(char *dummy),
  40. TP_ARGS(dummy),
  41. TP_STRUCT__entry(
  42. __field(char *, dummy)
  43. ),
  44. TP_fast_assign(
  45. __entry->dummy = dummy;
  46. ),
  47. TP_printk("%s", __entry->dummy)
  48. );
  49. DEFINE_EVENT(msm_v4l2_cvp, msm_v4l2_cvp_open_start,
  50. TP_PROTO(char *dummy),
  51. TP_ARGS(dummy)
  52. );
  53. DEFINE_EVENT(msm_v4l2_cvp, msm_v4l2_cvp_open_end,
  54. TP_PROTO(char *dummy),
  55. TP_ARGS(dummy)
  56. );
  57. DEFINE_EVENT(msm_v4l2_cvp, msm_v4l2_cvp_close_start,
  58. TP_PROTO(char *dummy),
  59. TP_ARGS(dummy)
  60. );
  61. DEFINE_EVENT(msm_v4l2_cvp, msm_v4l2_cvp_close_end,
  62. TP_PROTO(char *dummy),
  63. TP_ARGS(dummy)
  64. );
  65. DEFINE_EVENT(msm_v4l2_cvp, msm_v4l2_cvp_fw_load_start,
  66. TP_PROTO(char *dummy),
  67. TP_ARGS(dummy)
  68. );
  69. DEFINE_EVENT(msm_v4l2_cvp, msm_v4l2_cvp_fw_load_end,
  70. TP_PROTO(char *dummy),
  71. TP_ARGS(dummy)
  72. );
  73. DECLARE_EVENT_CLASS(msm_cvp_common,
  74. TP_PROTO(void *instp, int old_state, int new_state),
  75. TP_ARGS(instp, old_state, new_state),
  76. TP_STRUCT__entry(
  77. __field(void *, instp)
  78. __field(int, old_state)
  79. __field(int, new_state)
  80. ),
  81. TP_fast_assign(
  82. __entry->instp = instp;
  83. __entry->old_state = old_state;
  84. __entry->new_state = new_state;
  85. ),
  86. TP_printk("Moved inst: %p from 0x%x to 0x%x",
  87. __entry->instp,
  88. __entry->old_state,
  89. __entry->new_state)
  90. );
  91. DEFINE_EVENT(msm_cvp_common, msm_cvp_common_state_change,
  92. TP_PROTO(void *instp, int old_state, int new_state),
  93. TP_ARGS(instp, old_state, new_state)
  94. );
  95. DECLARE_EVENT_CLASS(cvp_venus_hfi_var,
  96. TP_PROTO(u32 cp_start, u32 cp_size,
  97. u32 cp_nonpixel_start, u32 cp_nonpixel_size),
  98. TP_ARGS(cp_start, cp_size, cp_nonpixel_start, cp_nonpixel_size),
  99. TP_STRUCT__entry(
  100. __field(u32, cp_start)
  101. __field(u32, cp_size)
  102. __field(u32, cp_nonpixel_start)
  103. __field(u32, cp_nonpixel_size)
  104. ),
  105. TP_fast_assign(
  106. __entry->cp_start = cp_start;
  107. __entry->cp_size = cp_size;
  108. __entry->cp_nonpixel_start = cp_nonpixel_start;
  109. __entry->cp_nonpixel_size = cp_nonpixel_size;
  110. ),
  111. TP_printk(
  112. "TZBSP_MEM_PROTECT_VIDEO_VAR done, cp_start : 0x%x, cp_size : 0x%x, cp_nonpixel_start : 0x%x, cp_nonpixel_size : 0x%x",
  113. __entry->cp_start,
  114. __entry->cp_size,
  115. __entry->cp_nonpixel_start,
  116. __entry->cp_nonpixel_size)
  117. );
  118. DEFINE_EVENT(cvp_venus_hfi_var, cvp_venus_hfi_var_done,
  119. TP_PROTO(u32 cp_start, u32 cp_size,
  120. u32 cp_nonpixel_start, u32 cp_nonpixel_size),
  121. TP_ARGS(cp_start, cp_size, cp_nonpixel_start, cp_nonpixel_size)
  122. );
  123. DECLARE_EVENT_CLASS(msm_v4l2_cvp_buffer_events,
  124. TP_PROTO(char *event_type, u32 device_addr, int64_t timestamp,
  125. u32 alloc_len, u32 filled_len, u32 offset),
  126. TP_ARGS(event_type, device_addr, timestamp, alloc_len,
  127. filled_len, offset),
  128. TP_STRUCT__entry(
  129. __field(char *, event_type)
  130. __field(u32, device_addr)
  131. __field(int64_t, timestamp)
  132. __field(u32, alloc_len)
  133. __field(u32, filled_len)
  134. __field(u32, offset)
  135. ),
  136. TP_fast_assign(
  137. __entry->event_type = event_type;
  138. __entry->device_addr = device_addr;
  139. __entry->timestamp = timestamp;
  140. __entry->alloc_len = alloc_len;
  141. __entry->filled_len = filled_len;
  142. __entry->offset = offset;
  143. ),
  144. TP_printk(
  145. "%s, device_addr : 0x%x, timestamp : %lld, alloc_len : 0x%x, filled_len : 0x%x, offset : 0x%x",
  146. __entry->event_type,
  147. __entry->device_addr,
  148. __entry->timestamp,
  149. __entry->alloc_len,
  150. __entry->filled_len,
  151. __entry->offset)
  152. );
  153. DEFINE_EVENT(msm_v4l2_cvp_buffer_events, msm_v4l2_cvp_buffer_event_start,
  154. TP_PROTO(char *event_type, u32 device_addr, int64_t timestamp,
  155. u32 alloc_len, u32 filled_len, u32 offset),
  156. TP_ARGS(event_type, device_addr, timestamp, alloc_len,
  157. filled_len, offset)
  158. );
  159. DEFINE_EVENT(msm_v4l2_cvp_buffer_events, msm_v4l2_cvp_buffer_event_end,
  160. TP_PROTO(char *event_type, u32 device_addr, int64_t timestamp,
  161. u32 alloc_len, u32 filled_len, u32 offset),
  162. TP_ARGS(event_type, device_addr, timestamp, alloc_len,
  163. filled_len, offset)
  164. );
  165. DECLARE_EVENT_CLASS(msm_cvp_smem_buffer_dma_ops,
  166. TP_PROTO(char *buffer_op, u32 buffer_type, u32 heap_mask,
  167. size_t size, u32 align, u32 flags, int map_kernel),
  168. TP_ARGS(buffer_op, buffer_type, heap_mask, size, align,
  169. flags, map_kernel),
  170. TP_STRUCT__entry(
  171. __field(char *, buffer_op)
  172. __field(u32, buffer_type)
  173. __field(u32, heap_mask)
  174. __field(u32, size)
  175. __field(u32, align)
  176. __field(u32, flags)
  177. __field(int, map_kernel)
  178. ),
  179. TP_fast_assign(
  180. __entry->buffer_op = buffer_op;
  181. __entry->buffer_type = buffer_type;
  182. __entry->heap_mask = heap_mask;
  183. __entry->size = size;
  184. __entry->align = align;
  185. __entry->flags = flags;
  186. __entry->map_kernel = map_kernel;
  187. ),
  188. TP_printk(
  189. "%s, buffer_type : 0x%x, heap_mask : 0x%x, size : 0x%x, align : 0x%x, flags : 0x%x, map_kernel : %d",
  190. __entry->buffer_op,
  191. __entry->buffer_type,
  192. __entry->heap_mask,
  193. __entry->size,
  194. __entry->align,
  195. __entry->flags,
  196. __entry->map_kernel)
  197. );
  198. DEFINE_EVENT(msm_cvp_smem_buffer_dma_ops, msm_cvp_smem_buffer_dma_op_start,
  199. TP_PROTO(char *buffer_op, u32 buffer_type, u32 heap_mask,
  200. size_t size, u32 align, u32 flags, int map_kernel),
  201. TP_ARGS(buffer_op, buffer_type, heap_mask, size, align,
  202. flags, map_kernel)
  203. );
  204. DEFINE_EVENT(msm_cvp_smem_buffer_dma_ops, msm_cvp_smem_buffer_dma_op_end,
  205. TP_PROTO(char *buffer_op, u32 buffer_type, u32 heap_mask,
  206. size_t size, u32 align, u32 flags, int map_kernel),
  207. TP_ARGS(buffer_op, buffer_type, heap_mask, size, align,
  208. flags, map_kernel)
  209. );
  210. DECLARE_EVENT_CLASS(msm_cvp_smem_buffer_iommu_ops,
  211. TP_PROTO(char *buffer_op, int domain_num, int partition_num,
  212. unsigned long align, unsigned long iova,
  213. unsigned long buffer_size),
  214. TP_ARGS(buffer_op, domain_num, partition_num, align, iova, buffer_size),
  215. TP_STRUCT__entry(
  216. __field(char *, buffer_op)
  217. __field(int, domain_num)
  218. __field(int, partition_num)
  219. __field(unsigned long, align)
  220. __field(unsigned long, iova)
  221. __field(unsigned long, buffer_size)
  222. ),
  223. TP_fast_assign(
  224. __entry->buffer_op = buffer_op;
  225. __entry->domain_num = domain_num;
  226. __entry->partition_num = partition_num;
  227. __entry->align = align;
  228. __entry->iova = iova;
  229. __entry->buffer_size = buffer_size;
  230. ),
  231. TP_printk(
  232. "%s, domain : %d, partition : %d, align : %lx, iova : 0x%lx, buffer_size=%lx",
  233. __entry->buffer_op,
  234. __entry->domain_num,
  235. __entry->partition_num,
  236. __entry->align,
  237. __entry->iova,
  238. __entry->buffer_size)
  239. );
  240. DEFINE_EVENT(msm_cvp_smem_buffer_iommu_ops, msm_cvp_smem_buffer_iommu_op_start,
  241. TP_PROTO(char *buffer_op, int domain_num, int partition_num,
  242. unsigned long align, unsigned long iova,
  243. unsigned long buffer_size),
  244. TP_ARGS(buffer_op, domain_num, partition_num, align, iova, buffer_size)
  245. );
  246. DEFINE_EVENT(msm_cvp_smem_buffer_iommu_ops, msm_cvp_smem_buffer_iommu_op_end,
  247. TP_PROTO(char *buffer_op, int domain_num, int partition_num,
  248. unsigned long align, unsigned long iova,
  249. unsigned long buffer_size),
  250. TP_ARGS(buffer_op, domain_num, partition_num, align, iova, buffer_size)
  251. );
  252. DECLARE_EVENT_CLASS(msm_cvp_perf,
  253. TP_PROTO(const char *name, unsigned long value),
  254. TP_ARGS(name, value),
  255. TP_STRUCT__entry(
  256. __field(const char *, name)
  257. __field(unsigned long, value)
  258. ),
  259. TP_fast_assign(
  260. __entry->name = name;
  261. __entry->value = value;
  262. ),
  263. TP_printk("%s %lu", __entry->name, __entry->value)
  264. );
  265. DEFINE_EVENT(msm_cvp_perf, msm_cvp_perf_clock_scale,
  266. TP_PROTO(const char *clock_name, unsigned long frequency),
  267. TP_ARGS(clock_name, frequency)
  268. );
  269. DEFINE_EVENT(msm_cvp_perf, msm_cvp_perf_bus_vote,
  270. TP_PROTO(const char *governor_mode, unsigned long ab),
  271. TP_ARGS(governor_mode, ab)
  272. );
  273. #endif
  274. #undef TRACE_INCLUDE_PATH
  275. #define TRACE_INCLUDE_PATH .
  276. #include <trace/define_trace.h>