kasan_init.c 14 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474
  1. // SPDX-License-Identifier: GPL-2.0
  2. // Copyright (C) 2019 Andes Technology Corporation
  3. #include <linux/pfn.h>
  4. #include <linux/init_task.h>
  5. #include <linux/kasan.h>
  6. #include <linux/kernel.h>
  7. #include <linux/memblock.h>
  8. #include <linux/pgtable.h>
  9. #include <asm/tlbflush.h>
  10. #include <asm/fixmap.h>
  11. #include <asm/pgalloc.h>
  12. /*
  13. * Kasan shadow region must lie at a fixed address across sv39, sv48 and sv57
  14. * which is right before the kernel.
  15. *
  16. * For sv39, the region is aligned on PGDIR_SIZE so we only need to populate
  17. * the page global directory with kasan_early_shadow_pmd.
  18. *
  19. * For sv48 and sv57, the region is not aligned on PGDIR_SIZE so the mapping
  20. * must be divided as follows:
  21. * - the first PGD entry, although incomplete, is populated with
  22. * kasan_early_shadow_pud/p4d
  23. * - the PGD entries in the middle are populated with kasan_early_shadow_pud/p4d
  24. * - the last PGD entry is shared with the kernel mapping so populated at the
  25. * lower levels pud/p4d
  26. *
  27. * In addition, when shallow populating a kasan region (for example vmalloc),
  28. * this region may also not be aligned on PGDIR size, so we must go down to the
  29. * pud level too.
  30. */
  31. extern pgd_t early_pg_dir[PTRS_PER_PGD];
  32. static void __init kasan_populate_pte(pmd_t *pmd, unsigned long vaddr, unsigned long end)
  33. {
  34. phys_addr_t phys_addr;
  35. pte_t *ptep, *base_pte;
  36. if (pmd_none(*pmd))
  37. base_pte = memblock_alloc(PTRS_PER_PTE * sizeof(pte_t), PAGE_SIZE);
  38. else
  39. base_pte = (pte_t *)pmd_page_vaddr(*pmd);
  40. ptep = base_pte + pte_index(vaddr);
  41. do {
  42. if (pte_none(*ptep)) {
  43. phys_addr = memblock_phys_alloc(PAGE_SIZE, PAGE_SIZE);
  44. set_pte(ptep, pfn_pte(PFN_DOWN(phys_addr), PAGE_KERNEL));
  45. }
  46. } while (ptep++, vaddr += PAGE_SIZE, vaddr != end);
  47. set_pmd(pmd, pfn_pmd(PFN_DOWN(__pa(base_pte)), PAGE_TABLE));
  48. }
  49. static void __init kasan_populate_pmd(pud_t *pud, unsigned long vaddr, unsigned long end)
  50. {
  51. phys_addr_t phys_addr;
  52. pmd_t *pmdp, *base_pmd;
  53. unsigned long next;
  54. if (pud_none(*pud)) {
  55. base_pmd = memblock_alloc(PTRS_PER_PMD * sizeof(pmd_t), PAGE_SIZE);
  56. } else {
  57. base_pmd = (pmd_t *)pud_pgtable(*pud);
  58. if (base_pmd == lm_alias(kasan_early_shadow_pmd))
  59. base_pmd = memblock_alloc(PTRS_PER_PMD * sizeof(pmd_t), PAGE_SIZE);
  60. }
  61. pmdp = base_pmd + pmd_index(vaddr);
  62. do {
  63. next = pmd_addr_end(vaddr, end);
  64. if (pmd_none(*pmdp) && IS_ALIGNED(vaddr, PMD_SIZE) && (next - vaddr) >= PMD_SIZE) {
  65. phys_addr = memblock_phys_alloc(PMD_SIZE, PMD_SIZE);
  66. if (phys_addr) {
  67. set_pmd(pmdp, pfn_pmd(PFN_DOWN(phys_addr), PAGE_KERNEL));
  68. continue;
  69. }
  70. }
  71. kasan_populate_pte(pmdp, vaddr, next);
  72. } while (pmdp++, vaddr = next, vaddr != end);
  73. /*
  74. * Wait for the whole PGD to be populated before setting the PGD in
  75. * the page table, otherwise, if we did set the PGD before populating
  76. * it entirely, memblock could allocate a page at a physical address
  77. * where KASAN is not populated yet and then we'd get a page fault.
  78. */
  79. set_pud(pud, pfn_pud(PFN_DOWN(__pa(base_pmd)), PAGE_TABLE));
  80. }
  81. static void __init kasan_populate_pud(pgd_t *pgd,
  82. unsigned long vaddr, unsigned long end,
  83. bool early)
  84. {
  85. phys_addr_t phys_addr;
  86. pud_t *pudp, *base_pud;
  87. unsigned long next;
  88. if (early) {
  89. /*
  90. * We can't use pgd_page_vaddr here as it would return a linear
  91. * mapping address but it is not mapped yet, but when populating
  92. * early_pg_dir, we need the physical address and when populating
  93. * swapper_pg_dir, we need the kernel virtual address so use
  94. * pt_ops facility.
  95. */
  96. base_pud = pt_ops.get_pud_virt(pfn_to_phys(_pgd_pfn(*pgd)));
  97. } else if (pgd_none(*pgd)) {
  98. base_pud = memblock_alloc(PTRS_PER_PUD * sizeof(pud_t), PAGE_SIZE);
  99. memcpy(base_pud, (void *)kasan_early_shadow_pud,
  100. sizeof(pud_t) * PTRS_PER_PUD);
  101. } else {
  102. base_pud = (pud_t *)pgd_page_vaddr(*pgd);
  103. if (base_pud == lm_alias(kasan_early_shadow_pud)) {
  104. base_pud = memblock_alloc(PTRS_PER_PUD * sizeof(pud_t), PAGE_SIZE);
  105. memcpy(base_pud, (void *)kasan_early_shadow_pud,
  106. sizeof(pud_t) * PTRS_PER_PUD);
  107. }
  108. }
  109. pudp = base_pud + pud_index(vaddr);
  110. do {
  111. next = pud_addr_end(vaddr, end);
  112. if (pud_none(*pudp) && IS_ALIGNED(vaddr, PUD_SIZE) && (next - vaddr) >= PUD_SIZE) {
  113. if (early) {
  114. phys_addr = __pa(((uintptr_t)kasan_early_shadow_pmd));
  115. set_pud(pudp, pfn_pud(PFN_DOWN(phys_addr), PAGE_TABLE));
  116. continue;
  117. } else {
  118. phys_addr = memblock_phys_alloc(PUD_SIZE, PUD_SIZE);
  119. if (phys_addr) {
  120. set_pud(pudp, pfn_pud(PFN_DOWN(phys_addr), PAGE_KERNEL));
  121. continue;
  122. }
  123. }
  124. }
  125. kasan_populate_pmd(pudp, vaddr, next);
  126. } while (pudp++, vaddr = next, vaddr != end);
  127. /*
  128. * Wait for the whole PGD to be populated before setting the PGD in
  129. * the page table, otherwise, if we did set the PGD before populating
  130. * it entirely, memblock could allocate a page at a physical address
  131. * where KASAN is not populated yet and then we'd get a page fault.
  132. */
  133. if (!early)
  134. set_pgd(pgd, pfn_pgd(PFN_DOWN(__pa(base_pud)), PAGE_TABLE));
  135. }
  136. static void __init kasan_populate_p4d(pgd_t *pgd,
  137. unsigned long vaddr, unsigned long end,
  138. bool early)
  139. {
  140. phys_addr_t phys_addr;
  141. p4d_t *p4dp, *base_p4d;
  142. unsigned long next;
  143. if (early) {
  144. /*
  145. * We can't use pgd_page_vaddr here as it would return a linear
  146. * mapping address but it is not mapped yet, but when populating
  147. * early_pg_dir, we need the physical address and when populating
  148. * swapper_pg_dir, we need the kernel virtual address so use
  149. * pt_ops facility.
  150. */
  151. base_p4d = pt_ops.get_p4d_virt(pfn_to_phys(_pgd_pfn(*pgd)));
  152. } else {
  153. base_p4d = (p4d_t *)pgd_page_vaddr(*pgd);
  154. if (base_p4d == lm_alias(kasan_early_shadow_p4d)) {
  155. base_p4d = memblock_alloc(PTRS_PER_PUD * sizeof(p4d_t), PAGE_SIZE);
  156. memcpy(base_p4d, (void *)kasan_early_shadow_p4d,
  157. sizeof(p4d_t) * PTRS_PER_P4D);
  158. }
  159. }
  160. p4dp = base_p4d + p4d_index(vaddr);
  161. do {
  162. next = p4d_addr_end(vaddr, end);
  163. if (p4d_none(*p4dp) && IS_ALIGNED(vaddr, P4D_SIZE) && (next - vaddr) >= P4D_SIZE) {
  164. if (early) {
  165. phys_addr = __pa(((uintptr_t)kasan_early_shadow_pud));
  166. set_p4d(p4dp, pfn_p4d(PFN_DOWN(phys_addr), PAGE_TABLE));
  167. continue;
  168. } else {
  169. phys_addr = memblock_phys_alloc(P4D_SIZE, P4D_SIZE);
  170. if (phys_addr) {
  171. set_p4d(p4dp, pfn_p4d(PFN_DOWN(phys_addr), PAGE_KERNEL));
  172. continue;
  173. }
  174. }
  175. }
  176. kasan_populate_pud((pgd_t *)p4dp, vaddr, next, early);
  177. } while (p4dp++, vaddr = next, vaddr != end);
  178. /*
  179. * Wait for the whole P4D to be populated before setting the P4D in
  180. * the page table, otherwise, if we did set the P4D before populating
  181. * it entirely, memblock could allocate a page at a physical address
  182. * where KASAN is not populated yet and then we'd get a page fault.
  183. */
  184. if (!early)
  185. set_pgd(pgd, pfn_pgd(PFN_DOWN(__pa(base_p4d)), PAGE_TABLE));
  186. }
  187. #define kasan_early_shadow_pgd_next (pgtable_l5_enabled ? \
  188. (uintptr_t)kasan_early_shadow_p4d : \
  189. (pgtable_l4_enabled ? \
  190. (uintptr_t)kasan_early_shadow_pud : \
  191. (uintptr_t)kasan_early_shadow_pmd))
  192. #define kasan_populate_pgd_next(pgdp, vaddr, next, early) \
  193. (pgtable_l5_enabled ? \
  194. kasan_populate_p4d(pgdp, vaddr, next, early) : \
  195. (pgtable_l4_enabled ? \
  196. kasan_populate_pud(pgdp, vaddr, next, early) : \
  197. kasan_populate_pmd((pud_t *)pgdp, vaddr, next)))
  198. static void __init kasan_populate_pgd(pgd_t *pgdp,
  199. unsigned long vaddr, unsigned long end,
  200. bool early)
  201. {
  202. phys_addr_t phys_addr;
  203. unsigned long next;
  204. do {
  205. next = pgd_addr_end(vaddr, end);
  206. if (IS_ALIGNED(vaddr, PGDIR_SIZE) && (next - vaddr) >= PGDIR_SIZE) {
  207. if (early) {
  208. phys_addr = __pa((uintptr_t)kasan_early_shadow_pgd_next);
  209. set_pgd(pgdp, pfn_pgd(PFN_DOWN(phys_addr), PAGE_TABLE));
  210. continue;
  211. } else if (pgd_page_vaddr(*pgdp) ==
  212. (unsigned long)lm_alias(kasan_early_shadow_pgd_next)) {
  213. /*
  214. * pgdp can't be none since kasan_early_init
  215. * initialized all KASAN shadow region with
  216. * kasan_early_shadow_pud: if this is still the
  217. * case, that means we can try to allocate a
  218. * hugepage as a replacement.
  219. */
  220. phys_addr = memblock_phys_alloc(PGDIR_SIZE, PGDIR_SIZE);
  221. if (phys_addr) {
  222. set_pgd(pgdp, pfn_pgd(PFN_DOWN(phys_addr), PAGE_KERNEL));
  223. continue;
  224. }
  225. }
  226. }
  227. kasan_populate_pgd_next(pgdp, vaddr, next, early);
  228. } while (pgdp++, vaddr = next, vaddr != end);
  229. }
  230. asmlinkage void __init kasan_early_init(void)
  231. {
  232. uintptr_t i;
  233. BUILD_BUG_ON(KASAN_SHADOW_OFFSET !=
  234. KASAN_SHADOW_END - (1UL << (64 - KASAN_SHADOW_SCALE_SHIFT)));
  235. for (i = 0; i < PTRS_PER_PTE; ++i)
  236. set_pte(kasan_early_shadow_pte + i,
  237. pfn_pte(virt_to_pfn(kasan_early_shadow_page), PAGE_KERNEL));
  238. for (i = 0; i < PTRS_PER_PMD; ++i)
  239. set_pmd(kasan_early_shadow_pmd + i,
  240. pfn_pmd(PFN_DOWN
  241. (__pa((uintptr_t)kasan_early_shadow_pte)),
  242. PAGE_TABLE));
  243. if (pgtable_l4_enabled) {
  244. for (i = 0; i < PTRS_PER_PUD; ++i)
  245. set_pud(kasan_early_shadow_pud + i,
  246. pfn_pud(PFN_DOWN
  247. (__pa(((uintptr_t)kasan_early_shadow_pmd))),
  248. PAGE_TABLE));
  249. }
  250. if (pgtable_l5_enabled) {
  251. for (i = 0; i < PTRS_PER_P4D; ++i)
  252. set_p4d(kasan_early_shadow_p4d + i,
  253. pfn_p4d(PFN_DOWN
  254. (__pa(((uintptr_t)kasan_early_shadow_pud))),
  255. PAGE_TABLE));
  256. }
  257. kasan_populate_pgd(early_pg_dir + pgd_index(KASAN_SHADOW_START),
  258. KASAN_SHADOW_START, KASAN_SHADOW_END, true);
  259. local_flush_tlb_all();
  260. }
  261. void __init kasan_swapper_init(void)
  262. {
  263. kasan_populate_pgd(pgd_offset_k(KASAN_SHADOW_START),
  264. KASAN_SHADOW_START, KASAN_SHADOW_END, true);
  265. local_flush_tlb_all();
  266. }
  267. static void __init kasan_populate(void *start, void *end)
  268. {
  269. unsigned long vaddr = (unsigned long)start & PAGE_MASK;
  270. unsigned long vend = PAGE_ALIGN((unsigned long)end);
  271. kasan_populate_pgd(pgd_offset_k(vaddr), vaddr, vend, false);
  272. local_flush_tlb_all();
  273. memset(start, KASAN_SHADOW_INIT, end - start);
  274. }
  275. static void __init kasan_shallow_populate_pmd(pgd_t *pgdp,
  276. unsigned long vaddr, unsigned long end)
  277. {
  278. unsigned long next;
  279. pmd_t *pmdp, *base_pmd;
  280. bool is_kasan_pte;
  281. base_pmd = (pmd_t *)pgd_page_vaddr(*pgdp);
  282. pmdp = base_pmd + pmd_index(vaddr);
  283. do {
  284. next = pmd_addr_end(vaddr, end);
  285. is_kasan_pte = (pmd_pgtable(*pmdp) == lm_alias(kasan_early_shadow_pte));
  286. if (is_kasan_pte)
  287. pmd_clear(pmdp);
  288. } while (pmdp++, vaddr = next, vaddr != end);
  289. }
  290. static void __init kasan_shallow_populate_pud(pgd_t *pgdp,
  291. unsigned long vaddr, unsigned long end)
  292. {
  293. unsigned long next;
  294. pud_t *pudp, *base_pud;
  295. pmd_t *base_pmd;
  296. bool is_kasan_pmd;
  297. base_pud = (pud_t *)pgd_page_vaddr(*pgdp);
  298. pudp = base_pud + pud_index(vaddr);
  299. do {
  300. next = pud_addr_end(vaddr, end);
  301. is_kasan_pmd = (pud_pgtable(*pudp) == lm_alias(kasan_early_shadow_pmd));
  302. if (!is_kasan_pmd)
  303. continue;
  304. base_pmd = memblock_alloc(PAGE_SIZE, PAGE_SIZE);
  305. set_pud(pudp, pfn_pud(PFN_DOWN(__pa(base_pmd)), PAGE_TABLE));
  306. if (IS_ALIGNED(vaddr, PUD_SIZE) && (next - vaddr) >= PUD_SIZE)
  307. continue;
  308. memcpy(base_pmd, (void *)kasan_early_shadow_pmd, PAGE_SIZE);
  309. kasan_shallow_populate_pmd((pgd_t *)pudp, vaddr, next);
  310. } while (pudp++, vaddr = next, vaddr != end);
  311. }
  312. static void __init kasan_shallow_populate_p4d(pgd_t *pgdp,
  313. unsigned long vaddr, unsigned long end)
  314. {
  315. unsigned long next;
  316. p4d_t *p4dp, *base_p4d;
  317. pud_t *base_pud;
  318. bool is_kasan_pud;
  319. base_p4d = (p4d_t *)pgd_page_vaddr(*pgdp);
  320. p4dp = base_p4d + p4d_index(vaddr);
  321. do {
  322. next = p4d_addr_end(vaddr, end);
  323. is_kasan_pud = (p4d_pgtable(*p4dp) == lm_alias(kasan_early_shadow_pud));
  324. if (!is_kasan_pud)
  325. continue;
  326. base_pud = memblock_alloc(PAGE_SIZE, PAGE_SIZE);
  327. set_p4d(p4dp, pfn_p4d(PFN_DOWN(__pa(base_pud)), PAGE_TABLE));
  328. if (IS_ALIGNED(vaddr, P4D_SIZE) && (next - vaddr) >= P4D_SIZE)
  329. continue;
  330. memcpy(base_pud, (void *)kasan_early_shadow_pud, PAGE_SIZE);
  331. kasan_shallow_populate_pud((pgd_t *)p4dp, vaddr, next);
  332. } while (p4dp++, vaddr = next, vaddr != end);
  333. }
  334. #define kasan_shallow_populate_pgd_next(pgdp, vaddr, next) \
  335. (pgtable_l5_enabled ? \
  336. kasan_shallow_populate_p4d(pgdp, vaddr, next) : \
  337. (pgtable_l4_enabled ? \
  338. kasan_shallow_populate_pud(pgdp, vaddr, next) : \
  339. kasan_shallow_populate_pmd(pgdp, vaddr, next)))
  340. static void __init kasan_shallow_populate_pgd(unsigned long vaddr, unsigned long end)
  341. {
  342. unsigned long next;
  343. void *p;
  344. pgd_t *pgd_k = pgd_offset_k(vaddr);
  345. bool is_kasan_pgd_next;
  346. do {
  347. next = pgd_addr_end(vaddr, end);
  348. is_kasan_pgd_next = (pgd_page_vaddr(*pgd_k) ==
  349. (unsigned long)lm_alias(kasan_early_shadow_pgd_next));
  350. if (is_kasan_pgd_next) {
  351. p = memblock_alloc(PAGE_SIZE, PAGE_SIZE);
  352. set_pgd(pgd_k, pfn_pgd(PFN_DOWN(__pa(p)), PAGE_TABLE));
  353. }
  354. if (IS_ALIGNED(vaddr, PGDIR_SIZE) && (next - vaddr) >= PGDIR_SIZE)
  355. continue;
  356. memcpy(p, (void *)kasan_early_shadow_pgd_next, PAGE_SIZE);
  357. kasan_shallow_populate_pgd_next(pgd_k, vaddr, next);
  358. } while (pgd_k++, vaddr = next, vaddr != end);
  359. }
  360. static void __init kasan_shallow_populate(void *start, void *end)
  361. {
  362. unsigned long vaddr = (unsigned long)start & PAGE_MASK;
  363. unsigned long vend = PAGE_ALIGN((unsigned long)end);
  364. kasan_shallow_populate_pgd(vaddr, vend);
  365. local_flush_tlb_all();
  366. }
  367. void __init kasan_init(void)
  368. {
  369. phys_addr_t p_start, p_end;
  370. u64 i;
  371. if (IS_ENABLED(CONFIG_KASAN_VMALLOC))
  372. kasan_shallow_populate(
  373. (void *)kasan_mem_to_shadow((void *)VMALLOC_START),
  374. (void *)kasan_mem_to_shadow((void *)VMALLOC_END));
  375. /* Populate the linear mapping */
  376. for_each_mem_range(i, &p_start, &p_end) {
  377. void *start = (void *)__va(p_start);
  378. void *end = (void *)__va(p_end);
  379. if (start >= end)
  380. break;
  381. kasan_populate(kasan_mem_to_shadow(start), kasan_mem_to_shadow(end));
  382. }
  383. /* Populate kernel, BPF, modules mapping */
  384. kasan_populate(kasan_mem_to_shadow((const void *)MODULES_VADDR),
  385. kasan_mem_to_shadow((const void *)MODULES_VADDR + SZ_2G));
  386. for (i = 0; i < PTRS_PER_PTE; i++)
  387. set_pte(&kasan_early_shadow_pte[i],
  388. mk_pte(virt_to_page(kasan_early_shadow_page),
  389. __pgprot(_PAGE_PRESENT | _PAGE_READ |
  390. _PAGE_ACCESSED)));
  391. memset(kasan_early_shadow_page, KASAN_SHADOW_INIT, PAGE_SIZE);
  392. init_task.kasan_depth = 0;
  393. }