ptdump.c 8.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Copyright (C) 2019 SiFive
  4. */
  5. #include <linux/efi.h>
  6. #include <linux/init.h>
  7. #include <linux/debugfs.h>
  8. #include <linux/seq_file.h>
  9. #include <linux/ptdump.h>
  10. #include <asm/ptdump.h>
  11. #include <linux/pgtable.h>
  12. #include <asm/kasan.h>
  13. #define pt_dump_seq_printf(m, fmt, args...) \
  14. ({ \
  15. if (m) \
  16. seq_printf(m, fmt, ##args); \
  17. })
  18. #define pt_dump_seq_puts(m, fmt) \
  19. ({ \
  20. if (m) \
  21. seq_printf(m, fmt); \
  22. })
  23. /*
  24. * The page dumper groups page table entries of the same type into a single
  25. * description. It uses pg_state to track the range information while
  26. * iterating over the pte entries. When the continuity is broken it then
  27. * dumps out a description of the range.
  28. */
  29. struct pg_state {
  30. struct ptdump_state ptdump;
  31. struct seq_file *seq;
  32. const struct addr_marker *marker;
  33. unsigned long start_address;
  34. unsigned long start_pa;
  35. unsigned long last_pa;
  36. int level;
  37. u64 current_prot;
  38. bool check_wx;
  39. unsigned long wx_pages;
  40. };
  41. /* Address marker */
  42. struct addr_marker {
  43. unsigned long start_address;
  44. const char *name;
  45. };
  46. /* Private information for debugfs */
  47. struct ptd_mm_info {
  48. struct mm_struct *mm;
  49. const struct addr_marker *markers;
  50. unsigned long base_addr;
  51. unsigned long end;
  52. };
  53. enum address_markers_idx {
  54. FIXMAP_START_NR,
  55. FIXMAP_END_NR,
  56. PCI_IO_START_NR,
  57. PCI_IO_END_NR,
  58. #ifdef CONFIG_SPARSEMEM_VMEMMAP
  59. VMEMMAP_START_NR,
  60. VMEMMAP_END_NR,
  61. #endif
  62. VMALLOC_START_NR,
  63. VMALLOC_END_NR,
  64. PAGE_OFFSET_NR,
  65. #ifdef CONFIG_KASAN
  66. KASAN_SHADOW_START_NR,
  67. KASAN_SHADOW_END_NR,
  68. #endif
  69. #ifdef CONFIG_64BIT
  70. MODULES_MAPPING_NR,
  71. KERNEL_MAPPING_NR,
  72. #endif
  73. END_OF_SPACE_NR
  74. };
  75. static struct addr_marker address_markers[] = {
  76. {0, "Fixmap start"},
  77. {0, "Fixmap end"},
  78. {0, "PCI I/O start"},
  79. {0, "PCI I/O end"},
  80. #ifdef CONFIG_SPARSEMEM_VMEMMAP
  81. {0, "vmemmap start"},
  82. {0, "vmemmap end"},
  83. #endif
  84. {0, "vmalloc() area"},
  85. {0, "vmalloc() end"},
  86. {0, "Linear mapping"},
  87. #ifdef CONFIG_KASAN
  88. {0, "Kasan shadow start"},
  89. {0, "Kasan shadow end"},
  90. #endif
  91. #ifdef CONFIG_64BIT
  92. {0, "Modules/BPF mapping"},
  93. {0, "Kernel mapping"},
  94. #endif
  95. {-1, NULL},
  96. };
  97. static struct ptd_mm_info kernel_ptd_info = {
  98. .mm = &init_mm,
  99. .markers = address_markers,
  100. .base_addr = 0,
  101. .end = ULONG_MAX,
  102. };
  103. #ifdef CONFIG_EFI
  104. static struct addr_marker efi_addr_markers[] = {
  105. { 0, "UEFI runtime start" },
  106. { SZ_1G, "UEFI runtime end" },
  107. { -1, NULL }
  108. };
  109. static struct ptd_mm_info efi_ptd_info = {
  110. .mm = &efi_mm,
  111. .markers = efi_addr_markers,
  112. .base_addr = 0,
  113. .end = SZ_2G,
  114. };
  115. #endif
  116. /* Page Table Entry */
  117. struct prot_bits {
  118. u64 mask;
  119. u64 val;
  120. const char *set;
  121. const char *clear;
  122. };
  123. static const struct prot_bits pte_bits[] = {
  124. {
  125. .mask = _PAGE_SOFT,
  126. .val = _PAGE_SOFT,
  127. .set = "RSW",
  128. .clear = " ",
  129. }, {
  130. .mask = _PAGE_DIRTY,
  131. .val = _PAGE_DIRTY,
  132. .set = "D",
  133. .clear = ".",
  134. }, {
  135. .mask = _PAGE_ACCESSED,
  136. .val = _PAGE_ACCESSED,
  137. .set = "A",
  138. .clear = ".",
  139. }, {
  140. .mask = _PAGE_GLOBAL,
  141. .val = _PAGE_GLOBAL,
  142. .set = "G",
  143. .clear = ".",
  144. }, {
  145. .mask = _PAGE_USER,
  146. .val = _PAGE_USER,
  147. .set = "U",
  148. .clear = ".",
  149. }, {
  150. .mask = _PAGE_EXEC,
  151. .val = _PAGE_EXEC,
  152. .set = "X",
  153. .clear = ".",
  154. }, {
  155. .mask = _PAGE_WRITE,
  156. .val = _PAGE_WRITE,
  157. .set = "W",
  158. .clear = ".",
  159. }, {
  160. .mask = _PAGE_READ,
  161. .val = _PAGE_READ,
  162. .set = "R",
  163. .clear = ".",
  164. }, {
  165. .mask = _PAGE_PRESENT,
  166. .val = _PAGE_PRESENT,
  167. .set = "V",
  168. .clear = ".",
  169. }
  170. };
  171. /* Page Level */
  172. struct pg_level {
  173. const char *name;
  174. u64 mask;
  175. };
  176. static struct pg_level pg_level[] = {
  177. { /* pgd */
  178. .name = "PGD",
  179. }, { /* p4d */
  180. .name = (CONFIG_PGTABLE_LEVELS > 4) ? "P4D" : "PGD",
  181. }, { /* pud */
  182. .name = (CONFIG_PGTABLE_LEVELS > 3) ? "PUD" : "PGD",
  183. }, { /* pmd */
  184. .name = (CONFIG_PGTABLE_LEVELS > 2) ? "PMD" : "PGD",
  185. }, { /* pte */
  186. .name = "PTE",
  187. },
  188. };
  189. static void dump_prot(struct pg_state *st)
  190. {
  191. unsigned int i;
  192. for (i = 0; i < ARRAY_SIZE(pte_bits); i++) {
  193. const char *s;
  194. if ((st->current_prot & pte_bits[i].mask) == pte_bits[i].val)
  195. s = pte_bits[i].set;
  196. else
  197. s = pte_bits[i].clear;
  198. if (s)
  199. pt_dump_seq_printf(st->seq, " %s", s);
  200. }
  201. }
  202. #ifdef CONFIG_64BIT
  203. #define ADDR_FORMAT "0x%016lx"
  204. #else
  205. #define ADDR_FORMAT "0x%08lx"
  206. #endif
  207. static void dump_addr(struct pg_state *st, unsigned long addr)
  208. {
  209. static const char units[] = "KMGTPE";
  210. const char *unit = units;
  211. unsigned long delta;
  212. pt_dump_seq_printf(st->seq, ADDR_FORMAT "-" ADDR_FORMAT " ",
  213. st->start_address, addr);
  214. pt_dump_seq_printf(st->seq, " " ADDR_FORMAT " ", st->start_pa);
  215. delta = (addr - st->start_address) >> 10;
  216. while (!(delta & 1023) && unit[1]) {
  217. delta >>= 10;
  218. unit++;
  219. }
  220. pt_dump_seq_printf(st->seq, "%9lu%c %s", delta, *unit,
  221. pg_level[st->level].name);
  222. }
  223. static void note_prot_wx(struct pg_state *st, unsigned long addr)
  224. {
  225. if (!st->check_wx)
  226. return;
  227. if ((st->current_prot & (_PAGE_WRITE | _PAGE_EXEC)) !=
  228. (_PAGE_WRITE | _PAGE_EXEC))
  229. return;
  230. WARN_ONCE(1, "riscv/mm: Found insecure W+X mapping at address %p/%pS\n",
  231. (void *)st->start_address, (void *)st->start_address);
  232. st->wx_pages += (addr - st->start_address) / PAGE_SIZE;
  233. }
  234. static void note_page(struct ptdump_state *pt_st, unsigned long addr,
  235. int level, u64 val)
  236. {
  237. struct pg_state *st = container_of(pt_st, struct pg_state, ptdump);
  238. u64 pa = PFN_PHYS(pte_pfn(__pte(val)));
  239. u64 prot = 0;
  240. if (level >= 0)
  241. prot = val & pg_level[level].mask;
  242. if (st->level == -1) {
  243. st->level = level;
  244. st->current_prot = prot;
  245. st->start_address = addr;
  246. st->start_pa = pa;
  247. st->last_pa = pa;
  248. pt_dump_seq_printf(st->seq, "---[ %s ]---\n", st->marker->name);
  249. } else if (prot != st->current_prot ||
  250. level != st->level || addr >= st->marker[1].start_address) {
  251. if (st->current_prot) {
  252. note_prot_wx(st, addr);
  253. dump_addr(st, addr);
  254. dump_prot(st);
  255. pt_dump_seq_puts(st->seq, "\n");
  256. }
  257. while (addr >= st->marker[1].start_address) {
  258. st->marker++;
  259. pt_dump_seq_printf(st->seq, "---[ %s ]---\n",
  260. st->marker->name);
  261. }
  262. st->start_address = addr;
  263. st->start_pa = pa;
  264. st->last_pa = pa;
  265. st->current_prot = prot;
  266. st->level = level;
  267. } else {
  268. st->last_pa = pa;
  269. }
  270. }
  271. static void ptdump_walk(struct seq_file *s, struct ptd_mm_info *pinfo)
  272. {
  273. struct pg_state st = {
  274. .seq = s,
  275. .marker = pinfo->markers,
  276. .level = -1,
  277. .ptdump = {
  278. .note_page = note_page,
  279. .range = (struct ptdump_range[]) {
  280. {pinfo->base_addr, pinfo->end},
  281. {0, 0}
  282. }
  283. }
  284. };
  285. ptdump_walk_pgd(&st.ptdump, pinfo->mm, NULL);
  286. }
  287. void ptdump_check_wx(void)
  288. {
  289. struct pg_state st = {
  290. .seq = NULL,
  291. .marker = (struct addr_marker[]) {
  292. {0, NULL},
  293. {-1, NULL},
  294. },
  295. .level = -1,
  296. .check_wx = true,
  297. .ptdump = {
  298. .note_page = note_page,
  299. .range = (struct ptdump_range[]) {
  300. {KERN_VIRT_START, ULONG_MAX},
  301. {0, 0}
  302. }
  303. }
  304. };
  305. ptdump_walk_pgd(&st.ptdump, &init_mm, NULL);
  306. if (st.wx_pages)
  307. pr_warn("Checked W+X mappings: failed, %lu W+X pages found\n",
  308. st.wx_pages);
  309. else
  310. pr_info("Checked W+X mappings: passed, no W+X pages found\n");
  311. }
  312. static int ptdump_show(struct seq_file *m, void *v)
  313. {
  314. ptdump_walk(m, m->private);
  315. return 0;
  316. }
  317. DEFINE_SHOW_ATTRIBUTE(ptdump);
  318. static int __init ptdump_init(void)
  319. {
  320. unsigned int i, j;
  321. address_markers[FIXMAP_START_NR].start_address = FIXADDR_START;
  322. address_markers[FIXMAP_END_NR].start_address = FIXADDR_TOP;
  323. address_markers[PCI_IO_START_NR].start_address = PCI_IO_START;
  324. address_markers[PCI_IO_END_NR].start_address = PCI_IO_END;
  325. #ifdef CONFIG_SPARSEMEM_VMEMMAP
  326. address_markers[VMEMMAP_START_NR].start_address = VMEMMAP_START;
  327. address_markers[VMEMMAP_END_NR].start_address = VMEMMAP_END;
  328. #endif
  329. address_markers[VMALLOC_START_NR].start_address = VMALLOC_START;
  330. address_markers[VMALLOC_END_NR].start_address = VMALLOC_END;
  331. address_markers[PAGE_OFFSET_NR].start_address = PAGE_OFFSET;
  332. #ifdef CONFIG_KASAN
  333. address_markers[KASAN_SHADOW_START_NR].start_address = KASAN_SHADOW_START;
  334. address_markers[KASAN_SHADOW_END_NR].start_address = KASAN_SHADOW_END;
  335. #endif
  336. #ifdef CONFIG_64BIT
  337. address_markers[MODULES_MAPPING_NR].start_address = MODULES_VADDR;
  338. address_markers[KERNEL_MAPPING_NR].start_address = kernel_map.virt_addr;
  339. #endif
  340. kernel_ptd_info.base_addr = KERN_VIRT_START;
  341. pg_level[1].name = pgtable_l5_enabled ? "P4D" : "PGD";
  342. pg_level[2].name = pgtable_l4_enabled ? "PUD" : "PGD";
  343. for (i = 0; i < ARRAY_SIZE(pg_level); i++)
  344. for (j = 0; j < ARRAY_SIZE(pte_bits); j++)
  345. pg_level[i].mask |= pte_bits[j].mask;
  346. debugfs_create_file("kernel_page_tables", 0400, NULL, &kernel_ptd_info,
  347. &ptdump_fops);
  348. #ifdef CONFIG_EFI
  349. if (efi_enabled(EFI_RUNTIME_SERVICES))
  350. debugfs_create_file("efi_page_tables", 0400, NULL, &efi_ptd_info,
  351. &ptdump_fops);
  352. #endif
  353. return 0;
  354. }
  355. device_initcall(ptdump_init);