mmutrace.h 10.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451
  1. /* SPDX-License-Identifier: GPL-2.0 */
  2. #if !defined(_TRACE_KVMMMU_H) || defined(TRACE_HEADER_MULTI_READ)
  3. #define _TRACE_KVMMMU_H
  4. #include <linux/tracepoint.h>
  5. #include <linux/trace_events.h>
  6. #undef TRACE_SYSTEM
  7. #define TRACE_SYSTEM kvmmmu
  8. #define KVM_MMU_PAGE_FIELDS \
  9. __field(__u8, mmu_valid_gen) \
  10. __field(__u64, gfn) \
  11. __field(__u32, role) \
  12. __field(__u32, root_count) \
  13. __field(bool, unsync)
  14. #define KVM_MMU_PAGE_ASSIGN(sp) \
  15. __entry->mmu_valid_gen = sp->mmu_valid_gen; \
  16. __entry->gfn = sp->gfn; \
  17. __entry->role = sp->role.word; \
  18. __entry->root_count = sp->root_count; \
  19. __entry->unsync = sp->unsync;
  20. #define KVM_MMU_PAGE_PRINTK() ({ \
  21. const char *saved_ptr = trace_seq_buffer_ptr(p); \
  22. static const char *access_str[] = { \
  23. "---", "--x", "w--", "w-x", "-u-", "-ux", "wu-", "wux" \
  24. }; \
  25. union kvm_mmu_page_role role; \
  26. \
  27. role.word = __entry->role; \
  28. \
  29. trace_seq_printf(p, "sp gen %u gfn %llx l%u %u-byte q%u%s %s%s" \
  30. " %snxe %sad root %u %s%c", \
  31. __entry->mmu_valid_gen, \
  32. __entry->gfn, role.level, \
  33. role.has_4_byte_gpte ? 4 : 8, \
  34. role.quadrant, \
  35. role.direct ? " direct" : "", \
  36. access_str[role.access], \
  37. role.invalid ? " invalid" : "", \
  38. role.efer_nx ? "" : "!", \
  39. role.ad_disabled ? "!" : "", \
  40. __entry->root_count, \
  41. __entry->unsync ? "unsync" : "sync", 0); \
  42. saved_ptr; \
  43. })
  44. #define kvm_mmu_trace_pferr_flags \
  45. { PFERR_PRESENT_MASK, "P" }, \
  46. { PFERR_WRITE_MASK, "W" }, \
  47. { PFERR_USER_MASK, "U" }, \
  48. { PFERR_RSVD_MASK, "RSVD" }, \
  49. { PFERR_FETCH_MASK, "F" }
  50. TRACE_DEFINE_ENUM(RET_PF_CONTINUE);
  51. TRACE_DEFINE_ENUM(RET_PF_RETRY);
  52. TRACE_DEFINE_ENUM(RET_PF_EMULATE);
  53. TRACE_DEFINE_ENUM(RET_PF_INVALID);
  54. TRACE_DEFINE_ENUM(RET_PF_FIXED);
  55. TRACE_DEFINE_ENUM(RET_PF_SPURIOUS);
  56. /*
  57. * A pagetable walk has started
  58. */
  59. TRACE_EVENT(
  60. kvm_mmu_pagetable_walk,
  61. TP_PROTO(u64 addr, u32 pferr),
  62. TP_ARGS(addr, pferr),
  63. TP_STRUCT__entry(
  64. __field(__u64, addr)
  65. __field(__u32, pferr)
  66. ),
  67. TP_fast_assign(
  68. __entry->addr = addr;
  69. __entry->pferr = pferr;
  70. ),
  71. TP_printk("addr %llx pferr %x %s", __entry->addr, __entry->pferr,
  72. __print_flags(__entry->pferr, "|", kvm_mmu_trace_pferr_flags))
  73. );
  74. /* We just walked a paging element */
  75. TRACE_EVENT(
  76. kvm_mmu_paging_element,
  77. TP_PROTO(u64 pte, int level),
  78. TP_ARGS(pte, level),
  79. TP_STRUCT__entry(
  80. __field(__u64, pte)
  81. __field(__u32, level)
  82. ),
  83. TP_fast_assign(
  84. __entry->pte = pte;
  85. __entry->level = level;
  86. ),
  87. TP_printk("pte %llx level %u", __entry->pte, __entry->level)
  88. );
  89. DECLARE_EVENT_CLASS(kvm_mmu_set_bit_class,
  90. TP_PROTO(unsigned long table_gfn, unsigned index, unsigned size),
  91. TP_ARGS(table_gfn, index, size),
  92. TP_STRUCT__entry(
  93. __field(__u64, gpa)
  94. ),
  95. TP_fast_assign(
  96. __entry->gpa = ((u64)table_gfn << PAGE_SHIFT)
  97. + index * size;
  98. ),
  99. TP_printk("gpa %llx", __entry->gpa)
  100. );
  101. /* We set a pte accessed bit */
  102. DEFINE_EVENT(kvm_mmu_set_bit_class, kvm_mmu_set_accessed_bit,
  103. TP_PROTO(unsigned long table_gfn, unsigned index, unsigned size),
  104. TP_ARGS(table_gfn, index, size)
  105. );
  106. /* We set a pte dirty bit */
  107. DEFINE_EVENT(kvm_mmu_set_bit_class, kvm_mmu_set_dirty_bit,
  108. TP_PROTO(unsigned long table_gfn, unsigned index, unsigned size),
  109. TP_ARGS(table_gfn, index, size)
  110. );
  111. TRACE_EVENT(
  112. kvm_mmu_walker_error,
  113. TP_PROTO(u32 pferr),
  114. TP_ARGS(pferr),
  115. TP_STRUCT__entry(
  116. __field(__u32, pferr)
  117. ),
  118. TP_fast_assign(
  119. __entry->pferr = pferr;
  120. ),
  121. TP_printk("pferr %x %s", __entry->pferr,
  122. __print_flags(__entry->pferr, "|", kvm_mmu_trace_pferr_flags))
  123. );
  124. TRACE_EVENT(
  125. kvm_mmu_get_page,
  126. TP_PROTO(struct kvm_mmu_page *sp, bool created),
  127. TP_ARGS(sp, created),
  128. TP_STRUCT__entry(
  129. KVM_MMU_PAGE_FIELDS
  130. __field(bool, created)
  131. ),
  132. TP_fast_assign(
  133. KVM_MMU_PAGE_ASSIGN(sp)
  134. __entry->created = created;
  135. ),
  136. TP_printk("%s %s", KVM_MMU_PAGE_PRINTK(),
  137. __entry->created ? "new" : "existing")
  138. );
  139. DECLARE_EVENT_CLASS(kvm_mmu_page_class,
  140. TP_PROTO(struct kvm_mmu_page *sp),
  141. TP_ARGS(sp),
  142. TP_STRUCT__entry(
  143. KVM_MMU_PAGE_FIELDS
  144. ),
  145. TP_fast_assign(
  146. KVM_MMU_PAGE_ASSIGN(sp)
  147. ),
  148. TP_printk("%s", KVM_MMU_PAGE_PRINTK())
  149. );
  150. DEFINE_EVENT(kvm_mmu_page_class, kvm_mmu_sync_page,
  151. TP_PROTO(struct kvm_mmu_page *sp),
  152. TP_ARGS(sp)
  153. );
  154. DEFINE_EVENT(kvm_mmu_page_class, kvm_mmu_unsync_page,
  155. TP_PROTO(struct kvm_mmu_page *sp),
  156. TP_ARGS(sp)
  157. );
  158. DEFINE_EVENT(kvm_mmu_page_class, kvm_mmu_prepare_zap_page,
  159. TP_PROTO(struct kvm_mmu_page *sp),
  160. TP_ARGS(sp)
  161. );
  162. TRACE_EVENT(
  163. mark_mmio_spte,
  164. TP_PROTO(u64 *sptep, gfn_t gfn, u64 spte),
  165. TP_ARGS(sptep, gfn, spte),
  166. TP_STRUCT__entry(
  167. __field(void *, sptep)
  168. __field(gfn_t, gfn)
  169. __field(unsigned, access)
  170. __field(unsigned int, gen)
  171. ),
  172. TP_fast_assign(
  173. __entry->sptep = sptep;
  174. __entry->gfn = gfn;
  175. __entry->access = spte & ACC_ALL;
  176. __entry->gen = get_mmio_spte_generation(spte);
  177. ),
  178. TP_printk("sptep:%p gfn %llx access %x gen %x", __entry->sptep,
  179. __entry->gfn, __entry->access, __entry->gen)
  180. );
  181. TRACE_EVENT(
  182. handle_mmio_page_fault,
  183. TP_PROTO(u64 addr, gfn_t gfn, unsigned access),
  184. TP_ARGS(addr, gfn, access),
  185. TP_STRUCT__entry(
  186. __field(u64, addr)
  187. __field(gfn_t, gfn)
  188. __field(unsigned, access)
  189. ),
  190. TP_fast_assign(
  191. __entry->addr = addr;
  192. __entry->gfn = gfn;
  193. __entry->access = access;
  194. ),
  195. TP_printk("addr:%llx gfn %llx access %x", __entry->addr, __entry->gfn,
  196. __entry->access)
  197. );
  198. TRACE_EVENT(
  199. fast_page_fault,
  200. TP_PROTO(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault,
  201. u64 *sptep, u64 old_spte, int ret),
  202. TP_ARGS(vcpu, fault, sptep, old_spte, ret),
  203. TP_STRUCT__entry(
  204. __field(int, vcpu_id)
  205. __field(gpa_t, cr2_or_gpa)
  206. __field(u32, error_code)
  207. __field(u64 *, sptep)
  208. __field(u64, old_spte)
  209. __field(u64, new_spte)
  210. __field(int, ret)
  211. ),
  212. TP_fast_assign(
  213. __entry->vcpu_id = vcpu->vcpu_id;
  214. __entry->cr2_or_gpa = fault->addr;
  215. __entry->error_code = fault->error_code;
  216. __entry->sptep = sptep;
  217. __entry->old_spte = old_spte;
  218. __entry->new_spte = *sptep;
  219. __entry->ret = ret;
  220. ),
  221. TP_printk("vcpu %d gva %llx error_code %s sptep %p old %#llx"
  222. " new %llx spurious %d fixed %d", __entry->vcpu_id,
  223. __entry->cr2_or_gpa, __print_flags(__entry->error_code, "|",
  224. kvm_mmu_trace_pferr_flags), __entry->sptep,
  225. __entry->old_spte, __entry->new_spte,
  226. __entry->ret == RET_PF_SPURIOUS, __entry->ret == RET_PF_FIXED
  227. )
  228. );
  229. TRACE_EVENT(
  230. kvm_mmu_zap_all_fast,
  231. TP_PROTO(struct kvm *kvm),
  232. TP_ARGS(kvm),
  233. TP_STRUCT__entry(
  234. __field(__u8, mmu_valid_gen)
  235. __field(unsigned int, mmu_used_pages)
  236. ),
  237. TP_fast_assign(
  238. __entry->mmu_valid_gen = kvm->arch.mmu_valid_gen;
  239. __entry->mmu_used_pages = kvm->arch.n_used_mmu_pages;
  240. ),
  241. TP_printk("kvm-mmu-valid-gen %u used_pages %x",
  242. __entry->mmu_valid_gen, __entry->mmu_used_pages
  243. )
  244. );
  245. TRACE_EVENT(
  246. check_mmio_spte,
  247. TP_PROTO(u64 spte, unsigned int kvm_gen, unsigned int spte_gen),
  248. TP_ARGS(spte, kvm_gen, spte_gen),
  249. TP_STRUCT__entry(
  250. __field(unsigned int, kvm_gen)
  251. __field(unsigned int, spte_gen)
  252. __field(u64, spte)
  253. ),
  254. TP_fast_assign(
  255. __entry->kvm_gen = kvm_gen;
  256. __entry->spte_gen = spte_gen;
  257. __entry->spte = spte;
  258. ),
  259. TP_printk("spte %llx kvm_gen %x spte-gen %x valid %d", __entry->spte,
  260. __entry->kvm_gen, __entry->spte_gen,
  261. __entry->kvm_gen == __entry->spte_gen
  262. )
  263. );
  264. TRACE_EVENT(
  265. kvm_mmu_set_spte,
  266. TP_PROTO(int level, gfn_t gfn, u64 *sptep),
  267. TP_ARGS(level, gfn, sptep),
  268. TP_STRUCT__entry(
  269. __field(u64, gfn)
  270. __field(u64, spte)
  271. __field(u64, sptep)
  272. __field(u8, level)
  273. /* These depend on page entry type, so compute them now. */
  274. __field(bool, r)
  275. __field(bool, x)
  276. __field(signed char, u)
  277. ),
  278. TP_fast_assign(
  279. __entry->gfn = gfn;
  280. __entry->spte = *sptep;
  281. __entry->sptep = virt_to_phys(sptep);
  282. __entry->level = level;
  283. __entry->r = shadow_present_mask || (__entry->spte & PT_PRESENT_MASK);
  284. __entry->x = is_executable_pte(__entry->spte);
  285. __entry->u = shadow_user_mask ? !!(__entry->spte & shadow_user_mask) : -1;
  286. ),
  287. TP_printk("gfn %llx spte %llx (%s%s%s%s) level %d at %llx",
  288. __entry->gfn, __entry->spte,
  289. __entry->r ? "r" : "-",
  290. __entry->spte & PT_WRITABLE_MASK ? "w" : "-",
  291. __entry->x ? "x" : "-",
  292. __entry->u == -1 ? "" : (__entry->u ? "u" : "-"),
  293. __entry->level, __entry->sptep
  294. )
  295. );
  296. TRACE_EVENT(
  297. kvm_mmu_spte_requested,
  298. TP_PROTO(struct kvm_page_fault *fault),
  299. TP_ARGS(fault),
  300. TP_STRUCT__entry(
  301. __field(u64, gfn)
  302. __field(u64, pfn)
  303. __field(u8, level)
  304. ),
  305. TP_fast_assign(
  306. __entry->gfn = fault->gfn;
  307. __entry->pfn = fault->pfn | (fault->gfn & (KVM_PAGES_PER_HPAGE(fault->goal_level) - 1));
  308. __entry->level = fault->goal_level;
  309. ),
  310. TP_printk("gfn %llx pfn %llx level %d",
  311. __entry->gfn, __entry->pfn, __entry->level
  312. )
  313. );
  314. TRACE_EVENT(
  315. kvm_tdp_mmu_spte_changed,
  316. TP_PROTO(int as_id, gfn_t gfn, int level, u64 old_spte, u64 new_spte),
  317. TP_ARGS(as_id, gfn, level, old_spte, new_spte),
  318. TP_STRUCT__entry(
  319. __field(u64, gfn)
  320. __field(u64, old_spte)
  321. __field(u64, new_spte)
  322. /* Level cannot be larger than 5 on x86, so it fits in a u8. */
  323. __field(u8, level)
  324. /* as_id can only be 0 or 1 x86, so it fits in a u8. */
  325. __field(u8, as_id)
  326. ),
  327. TP_fast_assign(
  328. __entry->gfn = gfn;
  329. __entry->old_spte = old_spte;
  330. __entry->new_spte = new_spte;
  331. __entry->level = level;
  332. __entry->as_id = as_id;
  333. ),
  334. TP_printk("as id %d gfn %llx level %d old_spte %llx new_spte %llx",
  335. __entry->as_id, __entry->gfn, __entry->level,
  336. __entry->old_spte, __entry->new_spte
  337. )
  338. );
  339. TRACE_EVENT(
  340. kvm_mmu_split_huge_page,
  341. TP_PROTO(u64 gfn, u64 spte, int level, int errno),
  342. TP_ARGS(gfn, spte, level, errno),
  343. TP_STRUCT__entry(
  344. __field(u64, gfn)
  345. __field(u64, spte)
  346. __field(int, level)
  347. __field(int, errno)
  348. ),
  349. TP_fast_assign(
  350. __entry->gfn = gfn;
  351. __entry->spte = spte;
  352. __entry->level = level;
  353. __entry->errno = errno;
  354. ),
  355. TP_printk("gfn %llx spte %llx level %d errno %d",
  356. __entry->gfn, __entry->spte, __entry->level, __entry->errno)
  357. );
  358. #endif /* _TRACE_KVMMMU_H */
  359. #undef TRACE_INCLUDE_PATH
  360. #define TRACE_INCLUDE_PATH mmu
  361. #undef TRACE_INCLUDE_FILE
  362. #define TRACE_INCLUDE_FILE mmutrace
  363. /* This part must be outside protection */
  364. #include <trace/define_trace.h>