kvm.h 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510
  1. /* SPDX-License-Identifier: GPL-2.0 */
  2. #if !defined(_TRACE_KVM_MAIN_H) || defined(TRACE_HEADER_MULTI_READ)
  3. #define _TRACE_KVM_MAIN_H
  4. #include <linux/tracepoint.h>
  5. #undef TRACE_SYSTEM
  6. #define TRACE_SYSTEM kvm
  7. #define ERSN(x) { KVM_EXIT_##x, "KVM_EXIT_" #x }
  8. #define kvm_trace_exit_reason \
  9. ERSN(UNKNOWN), ERSN(EXCEPTION), ERSN(IO), ERSN(HYPERCALL), \
  10. ERSN(DEBUG), ERSN(HLT), ERSN(MMIO), ERSN(IRQ_WINDOW_OPEN), \
  11. ERSN(SHUTDOWN), ERSN(FAIL_ENTRY), ERSN(INTR), ERSN(SET_TPR), \
  12. ERSN(TPR_ACCESS), ERSN(S390_SIEIC), ERSN(S390_RESET), ERSN(DCR),\
  13. ERSN(NMI), ERSN(INTERNAL_ERROR), ERSN(OSI), ERSN(PAPR_HCALL), \
  14. ERSN(S390_UCONTROL), ERSN(WATCHDOG), ERSN(S390_TSCH), ERSN(EPR),\
  15. ERSN(SYSTEM_EVENT), ERSN(S390_STSI), ERSN(IOAPIC_EOI), \
  16. ERSN(HYPERV), ERSN(ARM_NISV), ERSN(X86_RDMSR), ERSN(X86_WRMSR)
  17. TRACE_EVENT(kvm_userspace_exit,
  18. TP_PROTO(__u32 reason, int errno),
  19. TP_ARGS(reason, errno),
  20. TP_STRUCT__entry(
  21. __field( __u32, reason )
  22. __field( int, errno )
  23. ),
  24. TP_fast_assign(
  25. __entry->reason = reason;
  26. __entry->errno = errno;
  27. ),
  28. TP_printk("reason %s (%d)",
  29. __entry->errno < 0 ?
  30. (__entry->errno == -EINTR ? "restart" : "error") :
  31. __print_symbolic(__entry->reason, kvm_trace_exit_reason),
  32. __entry->errno < 0 ? -__entry->errno : __entry->reason)
  33. );
  34. TRACE_EVENT(kvm_vcpu_wakeup,
  35. TP_PROTO(__u64 ns, bool waited, bool valid),
  36. TP_ARGS(ns, waited, valid),
  37. TP_STRUCT__entry(
  38. __field( __u64, ns )
  39. __field( bool, waited )
  40. __field( bool, valid )
  41. ),
  42. TP_fast_assign(
  43. __entry->ns = ns;
  44. __entry->waited = waited;
  45. __entry->valid = valid;
  46. ),
  47. TP_printk("%s time %lld ns, polling %s",
  48. __entry->waited ? "wait" : "poll",
  49. __entry->ns,
  50. __entry->valid ? "valid" : "invalid")
  51. );
  52. #if defined(CONFIG_HAVE_KVM_IRQFD)
  53. TRACE_EVENT(kvm_set_irq,
  54. TP_PROTO(unsigned int gsi, int level, int irq_source_id),
  55. TP_ARGS(gsi, level, irq_source_id),
  56. TP_STRUCT__entry(
  57. __field( unsigned int, gsi )
  58. __field( int, level )
  59. __field( int, irq_source_id )
  60. ),
  61. TP_fast_assign(
  62. __entry->gsi = gsi;
  63. __entry->level = level;
  64. __entry->irq_source_id = irq_source_id;
  65. ),
  66. TP_printk("gsi %u level %d source %d",
  67. __entry->gsi, __entry->level, __entry->irq_source_id)
  68. );
  69. #endif /* defined(CONFIG_HAVE_KVM_IRQFD) */
  70. #if defined(__KVM_HAVE_IOAPIC)
  71. #define kvm_deliver_mode \
  72. {0x0, "Fixed"}, \
  73. {0x1, "LowPrio"}, \
  74. {0x2, "SMI"}, \
  75. {0x3, "Res3"}, \
  76. {0x4, "NMI"}, \
  77. {0x5, "INIT"}, \
  78. {0x6, "SIPI"}, \
  79. {0x7, "ExtINT"}
  80. TRACE_EVENT(kvm_ioapic_set_irq,
  81. TP_PROTO(__u64 e, int pin, bool coalesced),
  82. TP_ARGS(e, pin, coalesced),
  83. TP_STRUCT__entry(
  84. __field( __u64, e )
  85. __field( int, pin )
  86. __field( bool, coalesced )
  87. ),
  88. TP_fast_assign(
  89. __entry->e = e;
  90. __entry->pin = pin;
  91. __entry->coalesced = coalesced;
  92. ),
  93. TP_printk("pin %u dst %x vec %u (%s|%s|%s%s)%s",
  94. __entry->pin, (u8)(__entry->e >> 56), (u8)__entry->e,
  95. __print_symbolic((__entry->e >> 8 & 0x7), kvm_deliver_mode),
  96. (__entry->e & (1<<11)) ? "logical" : "physical",
  97. (__entry->e & (1<<15)) ? "level" : "edge",
  98. (__entry->e & (1<<16)) ? "|masked" : "",
  99. __entry->coalesced ? " (coalesced)" : "")
  100. );
  101. TRACE_EVENT(kvm_ioapic_delayed_eoi_inj,
  102. TP_PROTO(__u64 e),
  103. TP_ARGS(e),
  104. TP_STRUCT__entry(
  105. __field( __u64, e )
  106. ),
  107. TP_fast_assign(
  108. __entry->e = e;
  109. ),
  110. TP_printk("dst %x vec %u (%s|%s|%s%s)",
  111. (u8)(__entry->e >> 56), (u8)__entry->e,
  112. __print_symbolic((__entry->e >> 8 & 0x7), kvm_deliver_mode),
  113. (__entry->e & (1<<11)) ? "logical" : "physical",
  114. (__entry->e & (1<<15)) ? "level" : "edge",
  115. (__entry->e & (1<<16)) ? "|masked" : "")
  116. );
  117. TRACE_EVENT(kvm_msi_set_irq,
  118. TP_PROTO(__u64 address, __u64 data),
  119. TP_ARGS(address, data),
  120. TP_STRUCT__entry(
  121. __field( __u64, address )
  122. __field( __u64, data )
  123. ),
  124. TP_fast_assign(
  125. __entry->address = address;
  126. __entry->data = data;
  127. ),
  128. TP_printk("dst %llx vec %u (%s|%s|%s%s)",
  129. (u8)(__entry->address >> 12) | ((__entry->address >> 32) & 0xffffff00),
  130. (u8)__entry->data,
  131. __print_symbolic((__entry->data >> 8 & 0x7), kvm_deliver_mode),
  132. (__entry->address & (1<<2)) ? "logical" : "physical",
  133. (__entry->data & (1<<15)) ? "level" : "edge",
  134. (__entry->address & (1<<3)) ? "|rh" : "")
  135. );
  136. #define kvm_irqchips \
  137. {KVM_IRQCHIP_PIC_MASTER, "PIC master"}, \
  138. {KVM_IRQCHIP_PIC_SLAVE, "PIC slave"}, \
  139. {KVM_IRQCHIP_IOAPIC, "IOAPIC"}
  140. #endif /* defined(__KVM_HAVE_IOAPIC) */
  141. #if defined(CONFIG_HAVE_KVM_IRQFD)
  142. #ifdef kvm_irqchips
  143. #define kvm_ack_irq_string "irqchip %s pin %u"
  144. #define kvm_ack_irq_parm __print_symbolic(__entry->irqchip, kvm_irqchips), __entry->pin
  145. #else
  146. #define kvm_ack_irq_string "irqchip %d pin %u"
  147. #define kvm_ack_irq_parm __entry->irqchip, __entry->pin
  148. #endif
  149. TRACE_EVENT(kvm_ack_irq,
  150. TP_PROTO(unsigned int irqchip, unsigned int pin),
  151. TP_ARGS(irqchip, pin),
  152. TP_STRUCT__entry(
  153. __field( unsigned int, irqchip )
  154. __field( unsigned int, pin )
  155. ),
  156. TP_fast_assign(
  157. __entry->irqchip = irqchip;
  158. __entry->pin = pin;
  159. ),
  160. TP_printk(kvm_ack_irq_string, kvm_ack_irq_parm)
  161. );
  162. #endif /* defined(CONFIG_HAVE_KVM_IRQFD) */
  163. #define KVM_TRACE_MMIO_READ_UNSATISFIED 0
  164. #define KVM_TRACE_MMIO_READ 1
  165. #define KVM_TRACE_MMIO_WRITE 2
  166. #define kvm_trace_symbol_mmio \
  167. { KVM_TRACE_MMIO_READ_UNSATISFIED, "unsatisfied-read" }, \
  168. { KVM_TRACE_MMIO_READ, "read" }, \
  169. { KVM_TRACE_MMIO_WRITE, "write" }
  170. TRACE_EVENT(kvm_mmio,
  171. TP_PROTO(int type, int len, u64 gpa, void *val),
  172. TP_ARGS(type, len, gpa, val),
  173. TP_STRUCT__entry(
  174. __field( u32, type )
  175. __field( u32, len )
  176. __field( u64, gpa )
  177. __field( u64, val )
  178. ),
  179. TP_fast_assign(
  180. __entry->type = type;
  181. __entry->len = len;
  182. __entry->gpa = gpa;
  183. __entry->val = 0;
  184. if (val)
  185. memcpy(&__entry->val, val,
  186. min_t(u32, sizeof(__entry->val), len));
  187. ),
  188. TP_printk("mmio %s len %u gpa 0x%llx val 0x%llx",
  189. __print_symbolic(__entry->type, kvm_trace_symbol_mmio),
  190. __entry->len, __entry->gpa, __entry->val)
  191. );
  192. #define kvm_fpu_load_symbol \
  193. {0, "unload"}, \
  194. {1, "load"}
  195. TRACE_EVENT(kvm_fpu,
  196. TP_PROTO(int load),
  197. TP_ARGS(load),
  198. TP_STRUCT__entry(
  199. __field( u32, load )
  200. ),
  201. TP_fast_assign(
  202. __entry->load = load;
  203. ),
  204. TP_printk("%s", __print_symbolic(__entry->load, kvm_fpu_load_symbol))
  205. );
  206. #ifdef CONFIG_KVM_ASYNC_PF
  207. DECLARE_EVENT_CLASS(kvm_async_get_page_class,
  208. TP_PROTO(u64 gva, u64 gfn),
  209. TP_ARGS(gva, gfn),
  210. TP_STRUCT__entry(
  211. __field(__u64, gva)
  212. __field(u64, gfn)
  213. ),
  214. TP_fast_assign(
  215. __entry->gva = gva;
  216. __entry->gfn = gfn;
  217. ),
  218. TP_printk("gva = %#llx, gfn = %#llx", __entry->gva, __entry->gfn)
  219. );
  220. DEFINE_EVENT(kvm_async_get_page_class, kvm_try_async_get_page,
  221. TP_PROTO(u64 gva, u64 gfn),
  222. TP_ARGS(gva, gfn)
  223. );
  224. DEFINE_EVENT(kvm_async_get_page_class, kvm_async_pf_repeated_fault,
  225. TP_PROTO(u64 gva, u64 gfn),
  226. TP_ARGS(gva, gfn)
  227. );
  228. DECLARE_EVENT_CLASS(kvm_async_pf_nopresent_ready,
  229. TP_PROTO(u64 token, u64 gva),
  230. TP_ARGS(token, gva),
  231. TP_STRUCT__entry(
  232. __field(__u64, token)
  233. __field(__u64, gva)
  234. ),
  235. TP_fast_assign(
  236. __entry->token = token;
  237. __entry->gva = gva;
  238. ),
  239. TP_printk("token %#llx gva %#llx", __entry->token, __entry->gva)
  240. );
  241. DEFINE_EVENT(kvm_async_pf_nopresent_ready, kvm_async_pf_not_present,
  242. TP_PROTO(u64 token, u64 gva),
  243. TP_ARGS(token, gva)
  244. );
  245. DEFINE_EVENT(kvm_async_pf_nopresent_ready, kvm_async_pf_ready,
  246. TP_PROTO(u64 token, u64 gva),
  247. TP_ARGS(token, gva)
  248. );
  249. TRACE_EVENT(
  250. kvm_async_pf_completed,
  251. TP_PROTO(unsigned long address, u64 gva),
  252. TP_ARGS(address, gva),
  253. TP_STRUCT__entry(
  254. __field(unsigned long, address)
  255. __field(u64, gva)
  256. ),
  257. TP_fast_assign(
  258. __entry->address = address;
  259. __entry->gva = gva;
  260. ),
  261. TP_printk("gva %#llx address %#lx", __entry->gva,
  262. __entry->address)
  263. );
  264. #endif
  265. TRACE_EVENT(kvm_halt_poll_ns,
  266. TP_PROTO(bool grow, unsigned int vcpu_id, unsigned int new,
  267. unsigned int old),
  268. TP_ARGS(grow, vcpu_id, new, old),
  269. TP_STRUCT__entry(
  270. __field(bool, grow)
  271. __field(unsigned int, vcpu_id)
  272. __field(unsigned int, new)
  273. __field(unsigned int, old)
  274. ),
  275. TP_fast_assign(
  276. __entry->grow = grow;
  277. __entry->vcpu_id = vcpu_id;
  278. __entry->new = new;
  279. __entry->old = old;
  280. ),
  281. TP_printk("vcpu %u: halt_poll_ns %u (%s %u)",
  282. __entry->vcpu_id,
  283. __entry->new,
  284. __entry->grow ? "grow" : "shrink",
  285. __entry->old)
  286. );
  287. #define trace_kvm_halt_poll_ns_grow(vcpu_id, new, old) \
  288. trace_kvm_halt_poll_ns(true, vcpu_id, new, old)
  289. #define trace_kvm_halt_poll_ns_shrink(vcpu_id, new, old) \
  290. trace_kvm_halt_poll_ns(false, vcpu_id, new, old)
  291. TRACE_EVENT(kvm_dirty_ring_push,
  292. TP_PROTO(struct kvm_dirty_ring *ring, u32 slot, u64 offset),
  293. TP_ARGS(ring, slot, offset),
  294. TP_STRUCT__entry(
  295. __field(int, index)
  296. __field(u32, dirty_index)
  297. __field(u32, reset_index)
  298. __field(u32, slot)
  299. __field(u64, offset)
  300. ),
  301. TP_fast_assign(
  302. __entry->index = ring->index;
  303. __entry->dirty_index = ring->dirty_index;
  304. __entry->reset_index = ring->reset_index;
  305. __entry->slot = slot;
  306. __entry->offset = offset;
  307. ),
  308. TP_printk("ring %d: dirty 0x%x reset 0x%x "
  309. "slot %u offset 0x%llx (used %u)",
  310. __entry->index, __entry->dirty_index,
  311. __entry->reset_index, __entry->slot, __entry->offset,
  312. __entry->dirty_index - __entry->reset_index)
  313. );
  314. TRACE_EVENT(kvm_dirty_ring_reset,
  315. TP_PROTO(struct kvm_dirty_ring *ring),
  316. TP_ARGS(ring),
  317. TP_STRUCT__entry(
  318. __field(int, index)
  319. __field(u32, dirty_index)
  320. __field(u32, reset_index)
  321. ),
  322. TP_fast_assign(
  323. __entry->index = ring->index;
  324. __entry->dirty_index = ring->dirty_index;
  325. __entry->reset_index = ring->reset_index;
  326. ),
  327. TP_printk("ring %d: dirty 0x%x reset 0x%x (used %u)",
  328. __entry->index, __entry->dirty_index, __entry->reset_index,
  329. __entry->dirty_index - __entry->reset_index)
  330. );
  331. TRACE_EVENT(kvm_dirty_ring_exit,
  332. TP_PROTO(struct kvm_vcpu *vcpu),
  333. TP_ARGS(vcpu),
  334. TP_STRUCT__entry(
  335. __field(int, vcpu_id)
  336. ),
  337. TP_fast_assign(
  338. __entry->vcpu_id = vcpu->vcpu_id;
  339. ),
  340. TP_printk("vcpu %d", __entry->vcpu_id)
  341. );
  342. TRACE_EVENT(kvm_unmap_hva_range,
  343. TP_PROTO(unsigned long start, unsigned long end),
  344. TP_ARGS(start, end),
  345. TP_STRUCT__entry(
  346. __field( unsigned long, start )
  347. __field( unsigned long, end )
  348. ),
  349. TP_fast_assign(
  350. __entry->start = start;
  351. __entry->end = end;
  352. ),
  353. TP_printk("mmu notifier unmap range: %#016lx -- %#016lx",
  354. __entry->start, __entry->end)
  355. );
  356. TRACE_EVENT(kvm_set_spte_hva,
  357. TP_PROTO(unsigned long hva),
  358. TP_ARGS(hva),
  359. TP_STRUCT__entry(
  360. __field( unsigned long, hva )
  361. ),
  362. TP_fast_assign(
  363. __entry->hva = hva;
  364. ),
  365. TP_printk("mmu notifier set pte hva: %#016lx", __entry->hva)
  366. );
  367. TRACE_EVENT(kvm_age_hva,
  368. TP_PROTO(unsigned long start, unsigned long end),
  369. TP_ARGS(start, end),
  370. TP_STRUCT__entry(
  371. __field( unsigned long, start )
  372. __field( unsigned long, end )
  373. ),
  374. TP_fast_assign(
  375. __entry->start = start;
  376. __entry->end = end;
  377. ),
  378. TP_printk("mmu notifier age hva: %#016lx -- %#016lx",
  379. __entry->start, __entry->end)
  380. );
  381. TRACE_EVENT(kvm_test_age_hva,
  382. TP_PROTO(unsigned long hva),
  383. TP_ARGS(hva),
  384. TP_STRUCT__entry(
  385. __field( unsigned long, hva )
  386. ),
  387. TP_fast_assign(
  388. __entry->hva = hva;
  389. ),
  390. TP_printk("mmu notifier test age hva: %#016lx", __entry->hva)
  391. );
  392. #endif /* _TRACE_KVM_MAIN_H */
  393. /* This part must be outside protection */
  394. #include <trace/define_trace.h>