kasan_init.c 12 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403
  1. // SPDX-License-Identifier: GPL-2.0
  2. #include <linux/kasan.h>
  3. #include <linux/sched/task.h>
  4. #include <linux/memblock.h>
  5. #include <linux/pgtable.h>
  6. #include <asm/pgalloc.h>
  7. #include <asm/kasan.h>
  8. #include <asm/mem_detect.h>
  9. #include <asm/processor.h>
  10. #include <asm/sclp.h>
  11. #include <asm/facility.h>
  12. #include <asm/sections.h>
  13. #include <asm/setup.h>
  14. #include <asm/uv.h>
  15. static unsigned long segment_pos __initdata;
  16. static unsigned long segment_low __initdata;
  17. static unsigned long pgalloc_pos __initdata;
  18. static unsigned long pgalloc_low __initdata;
  19. static unsigned long pgalloc_freeable __initdata;
  20. static bool has_edat __initdata;
  21. static bool has_nx __initdata;
  22. #define __sha(x) ((unsigned long)kasan_mem_to_shadow((void *)x))
  23. static pgd_t early_pg_dir[PTRS_PER_PGD] __initdata __aligned(PAGE_SIZE);
  24. static void __init kasan_early_panic(const char *reason)
  25. {
  26. sclp_early_printk("The Linux kernel failed to boot with the KernelAddressSanitizer:\n");
  27. sclp_early_printk(reason);
  28. disabled_wait();
  29. }
  30. static void * __init kasan_early_alloc_segment(void)
  31. {
  32. segment_pos -= _SEGMENT_SIZE;
  33. if (segment_pos < segment_low)
  34. kasan_early_panic("out of memory during initialisation\n");
  35. return (void *)segment_pos;
  36. }
  37. static void * __init kasan_early_alloc_pages(unsigned int order)
  38. {
  39. pgalloc_pos -= (PAGE_SIZE << order);
  40. if (pgalloc_pos < pgalloc_low)
  41. kasan_early_panic("out of memory during initialisation\n");
  42. return (void *)pgalloc_pos;
  43. }
  44. static void * __init kasan_early_crst_alloc(unsigned long val)
  45. {
  46. unsigned long *table;
  47. table = kasan_early_alloc_pages(CRST_ALLOC_ORDER);
  48. if (table)
  49. crst_table_init(table, val);
  50. return table;
  51. }
  52. static pte_t * __init kasan_early_pte_alloc(void)
  53. {
  54. static void *pte_leftover;
  55. pte_t *pte;
  56. BUILD_BUG_ON(_PAGE_TABLE_SIZE * 2 != PAGE_SIZE);
  57. if (!pte_leftover) {
  58. pte_leftover = kasan_early_alloc_pages(0);
  59. pte = pte_leftover + _PAGE_TABLE_SIZE;
  60. } else {
  61. pte = pte_leftover;
  62. pte_leftover = NULL;
  63. }
  64. memset64((u64 *)pte, _PAGE_INVALID, PTRS_PER_PTE);
  65. return pte;
  66. }
  67. enum populate_mode {
  68. POPULATE_ONE2ONE,
  69. POPULATE_MAP,
  70. POPULATE_ZERO_SHADOW,
  71. POPULATE_SHALLOW
  72. };
  73. static void __init kasan_early_pgtable_populate(unsigned long address,
  74. unsigned long end,
  75. enum populate_mode mode)
  76. {
  77. unsigned long pgt_prot_zero, pgt_prot, sgt_prot;
  78. pgd_t *pg_dir;
  79. p4d_t *p4_dir;
  80. pud_t *pu_dir;
  81. pmd_t *pm_dir;
  82. pte_t *pt_dir;
  83. pgt_prot_zero = pgprot_val(PAGE_KERNEL_RO);
  84. if (!has_nx)
  85. pgt_prot_zero &= ~_PAGE_NOEXEC;
  86. pgt_prot = pgprot_val(PAGE_KERNEL);
  87. sgt_prot = pgprot_val(SEGMENT_KERNEL);
  88. if (!has_nx || mode == POPULATE_ONE2ONE) {
  89. pgt_prot &= ~_PAGE_NOEXEC;
  90. sgt_prot &= ~_SEGMENT_ENTRY_NOEXEC;
  91. }
  92. /*
  93. * The first 1MB of 1:1 mapping is mapped with 4KB pages
  94. */
  95. while (address < end) {
  96. pg_dir = pgd_offset_k(address);
  97. if (pgd_none(*pg_dir)) {
  98. if (mode == POPULATE_ZERO_SHADOW &&
  99. IS_ALIGNED(address, PGDIR_SIZE) &&
  100. end - address >= PGDIR_SIZE) {
  101. pgd_populate(&init_mm, pg_dir,
  102. kasan_early_shadow_p4d);
  103. address = (address + PGDIR_SIZE) & PGDIR_MASK;
  104. continue;
  105. }
  106. p4_dir = kasan_early_crst_alloc(_REGION2_ENTRY_EMPTY);
  107. pgd_populate(&init_mm, pg_dir, p4_dir);
  108. }
  109. if (mode == POPULATE_SHALLOW) {
  110. address = (address + P4D_SIZE) & P4D_MASK;
  111. continue;
  112. }
  113. p4_dir = p4d_offset(pg_dir, address);
  114. if (p4d_none(*p4_dir)) {
  115. if (mode == POPULATE_ZERO_SHADOW &&
  116. IS_ALIGNED(address, P4D_SIZE) &&
  117. end - address >= P4D_SIZE) {
  118. p4d_populate(&init_mm, p4_dir,
  119. kasan_early_shadow_pud);
  120. address = (address + P4D_SIZE) & P4D_MASK;
  121. continue;
  122. }
  123. pu_dir = kasan_early_crst_alloc(_REGION3_ENTRY_EMPTY);
  124. p4d_populate(&init_mm, p4_dir, pu_dir);
  125. }
  126. pu_dir = pud_offset(p4_dir, address);
  127. if (pud_none(*pu_dir)) {
  128. if (mode == POPULATE_ZERO_SHADOW &&
  129. IS_ALIGNED(address, PUD_SIZE) &&
  130. end - address >= PUD_SIZE) {
  131. pud_populate(&init_mm, pu_dir,
  132. kasan_early_shadow_pmd);
  133. address = (address + PUD_SIZE) & PUD_MASK;
  134. continue;
  135. }
  136. pm_dir = kasan_early_crst_alloc(_SEGMENT_ENTRY_EMPTY);
  137. pud_populate(&init_mm, pu_dir, pm_dir);
  138. }
  139. pm_dir = pmd_offset(pu_dir, address);
  140. if (pmd_none(*pm_dir)) {
  141. if (IS_ALIGNED(address, PMD_SIZE) &&
  142. end - address >= PMD_SIZE) {
  143. if (mode == POPULATE_ZERO_SHADOW) {
  144. pmd_populate(&init_mm, pm_dir, kasan_early_shadow_pte);
  145. address = (address + PMD_SIZE) & PMD_MASK;
  146. continue;
  147. } else if (has_edat && address) {
  148. void *page;
  149. if (mode == POPULATE_ONE2ONE) {
  150. page = (void *)address;
  151. } else {
  152. page = kasan_early_alloc_segment();
  153. memset(page, 0, _SEGMENT_SIZE);
  154. }
  155. set_pmd(pm_dir, __pmd(__pa(page) | sgt_prot));
  156. address = (address + PMD_SIZE) & PMD_MASK;
  157. continue;
  158. }
  159. }
  160. pt_dir = kasan_early_pte_alloc();
  161. pmd_populate(&init_mm, pm_dir, pt_dir);
  162. } else if (pmd_large(*pm_dir)) {
  163. address = (address + PMD_SIZE) & PMD_MASK;
  164. continue;
  165. }
  166. pt_dir = pte_offset_kernel(pm_dir, address);
  167. if (pte_none(*pt_dir)) {
  168. void *page;
  169. switch (mode) {
  170. case POPULATE_ONE2ONE:
  171. page = (void *)address;
  172. set_pte(pt_dir, __pte(__pa(page) | pgt_prot));
  173. break;
  174. case POPULATE_MAP:
  175. page = kasan_early_alloc_pages(0);
  176. memset(page, 0, PAGE_SIZE);
  177. set_pte(pt_dir, __pte(__pa(page) | pgt_prot));
  178. break;
  179. case POPULATE_ZERO_SHADOW:
  180. page = kasan_early_shadow_page;
  181. set_pte(pt_dir, __pte(__pa(page) | pgt_prot_zero));
  182. break;
  183. case POPULATE_SHALLOW:
  184. /* should never happen */
  185. break;
  186. }
  187. }
  188. address += PAGE_SIZE;
  189. }
  190. }
  191. static void __init kasan_set_pgd(pgd_t *pgd, unsigned long asce_type)
  192. {
  193. unsigned long asce_bits;
  194. asce_bits = asce_type | _ASCE_TABLE_LENGTH;
  195. S390_lowcore.kernel_asce = (__pa(pgd) & PAGE_MASK) | asce_bits;
  196. S390_lowcore.user_asce = S390_lowcore.kernel_asce;
  197. __ctl_load(S390_lowcore.kernel_asce, 1, 1);
  198. __ctl_load(S390_lowcore.kernel_asce, 7, 7);
  199. __ctl_load(S390_lowcore.kernel_asce, 13, 13);
  200. }
  201. static void __init kasan_enable_dat(void)
  202. {
  203. psw_t psw;
  204. psw.mask = __extract_psw();
  205. psw_bits(psw).dat = 1;
  206. psw_bits(psw).as = PSW_BITS_AS_HOME;
  207. __load_psw_mask(psw.mask);
  208. }
  209. static void __init kasan_early_detect_facilities(void)
  210. {
  211. if (test_facility(8)) {
  212. has_edat = true;
  213. __ctl_set_bit(0, 23);
  214. }
  215. if (!noexec_disabled && test_facility(130)) {
  216. has_nx = true;
  217. __ctl_set_bit(0, 20);
  218. }
  219. }
  220. void __init kasan_early_init(void)
  221. {
  222. unsigned long shadow_alloc_size;
  223. unsigned long initrd_end;
  224. unsigned long memsize;
  225. unsigned long pgt_prot = pgprot_val(PAGE_KERNEL_RO);
  226. pte_t pte_z;
  227. pmd_t pmd_z = __pmd(__pa(kasan_early_shadow_pte) | _SEGMENT_ENTRY);
  228. pud_t pud_z = __pud(__pa(kasan_early_shadow_pmd) | _REGION3_ENTRY);
  229. p4d_t p4d_z = __p4d(__pa(kasan_early_shadow_pud) | _REGION2_ENTRY);
  230. kasan_early_detect_facilities();
  231. if (!has_nx)
  232. pgt_prot &= ~_PAGE_NOEXEC;
  233. pte_z = __pte(__pa(kasan_early_shadow_page) | pgt_prot);
  234. memsize = get_mem_detect_end();
  235. if (!memsize)
  236. kasan_early_panic("cannot detect physical memory size\n");
  237. /*
  238. * Kasan currently supports standby memory but only if it follows
  239. * online memory (default allocation), i.e. no memory holes.
  240. * - memsize represents end of online memory
  241. * - ident_map_size represents online + standby and memory limits
  242. * accounted.
  243. * Kasan maps "memsize" right away.
  244. * [0, memsize] - as identity mapping
  245. * [__sha(0), __sha(memsize)] - shadow memory for identity mapping
  246. * The rest [memsize, ident_map_size] if memsize < ident_map_size
  247. * could be mapped/unmapped dynamically later during memory hotplug.
  248. */
  249. memsize = min(memsize, ident_map_size);
  250. BUILD_BUG_ON(!IS_ALIGNED(KASAN_SHADOW_START, P4D_SIZE));
  251. BUILD_BUG_ON(!IS_ALIGNED(KASAN_SHADOW_END, P4D_SIZE));
  252. crst_table_init((unsigned long *)early_pg_dir, _REGION2_ENTRY_EMPTY);
  253. /* init kasan zero shadow */
  254. crst_table_init((unsigned long *)kasan_early_shadow_p4d,
  255. p4d_val(p4d_z));
  256. crst_table_init((unsigned long *)kasan_early_shadow_pud,
  257. pud_val(pud_z));
  258. crst_table_init((unsigned long *)kasan_early_shadow_pmd,
  259. pmd_val(pmd_z));
  260. memset64((u64 *)kasan_early_shadow_pte, pte_val(pte_z), PTRS_PER_PTE);
  261. shadow_alloc_size = memsize >> KASAN_SHADOW_SCALE_SHIFT;
  262. pgalloc_low = round_up((unsigned long)_end, _SEGMENT_SIZE);
  263. if (IS_ENABLED(CONFIG_BLK_DEV_INITRD)) {
  264. initrd_end =
  265. round_up(initrd_data.start + initrd_data.size, _SEGMENT_SIZE);
  266. pgalloc_low = max(pgalloc_low, initrd_end);
  267. }
  268. if (pgalloc_low + shadow_alloc_size > memsize)
  269. kasan_early_panic("out of memory during initialisation\n");
  270. if (has_edat) {
  271. segment_pos = round_down(memsize, _SEGMENT_SIZE);
  272. segment_low = segment_pos - shadow_alloc_size;
  273. pgalloc_pos = segment_low;
  274. } else {
  275. pgalloc_pos = memsize;
  276. }
  277. init_mm.pgd = early_pg_dir;
  278. /*
  279. * Current memory layout:
  280. * +- 0 -------------+ +- shadow start -+
  281. * | 1:1 ram mapping | /| 1/8 ram |
  282. * | | / | |
  283. * +- end of ram ----+ / +----------------+
  284. * | ... gap ... | / | |
  285. * | |/ | kasan |
  286. * +- shadow start --+ | zero |
  287. * | 1/8 addr space | | page |
  288. * +- shadow end -+ | mapping |
  289. * | ... gap ... |\ | (untracked) |
  290. * +- vmalloc area -+ \ | |
  291. * | vmalloc_size | \ | |
  292. * +- modules vaddr -+ \ +----------------+
  293. * | 2Gb | \| unmapped | allocated per module
  294. * +-----------------+ +- shadow end ---+
  295. *
  296. * Current memory layout (KASAN_VMALLOC):
  297. * +- 0 -------------+ +- shadow start -+
  298. * | 1:1 ram mapping | /| 1/8 ram |
  299. * | | / | |
  300. * +- end of ram ----+ / +----------------+
  301. * | ... gap ... | / | kasan |
  302. * | |/ | zero |
  303. * +- shadow start --+ | page |
  304. * | 1/8 addr space | | mapping |
  305. * +- shadow end -+ | (untracked) |
  306. * | ... gap ... |\ | |
  307. * +- vmalloc area -+ \ +- vmalloc area -+
  308. * | vmalloc_size | \ |shallow populate|
  309. * +- modules vaddr -+ \ +- modules area -+
  310. * | 2Gb | \|shallow populate|
  311. * +-----------------+ +- shadow end ---+
  312. */
  313. /* populate kasan shadow (for identity mapping and zero page mapping) */
  314. kasan_early_pgtable_populate(__sha(0), __sha(memsize), POPULATE_MAP);
  315. if (IS_ENABLED(CONFIG_KASAN_VMALLOC)) {
  316. /* shallowly populate kasan shadow for vmalloc and modules */
  317. kasan_early_pgtable_populate(__sha(VMALLOC_START), __sha(MODULES_END),
  318. POPULATE_SHALLOW);
  319. }
  320. /* populate kasan shadow for untracked memory */
  321. kasan_early_pgtable_populate(__sha(ident_map_size),
  322. IS_ENABLED(CONFIG_KASAN_VMALLOC) ?
  323. __sha(VMALLOC_START) :
  324. __sha(MODULES_VADDR),
  325. POPULATE_ZERO_SHADOW);
  326. kasan_early_pgtable_populate(__sha(MODULES_END), __sha(_REGION1_SIZE),
  327. POPULATE_ZERO_SHADOW);
  328. /* memory allocated for identity mapping structs will be freed later */
  329. pgalloc_freeable = pgalloc_pos;
  330. /* populate identity mapping */
  331. kasan_early_pgtable_populate(0, memsize, POPULATE_ONE2ONE);
  332. kasan_set_pgd(early_pg_dir, _ASCE_TYPE_REGION2);
  333. kasan_enable_dat();
  334. /* enable kasan */
  335. init_task.kasan_depth = 0;
  336. memblock_reserve(pgalloc_pos, memsize - pgalloc_pos);
  337. sclp_early_printk("KernelAddressSanitizer initialized\n");
  338. }
  339. void __init kasan_copy_shadow_mapping(void)
  340. {
  341. /*
  342. * At this point we are still running on early pages setup early_pg_dir,
  343. * while swapper_pg_dir has just been initialized with identity mapping.
  344. * Carry over shadow memory region from early_pg_dir to swapper_pg_dir.
  345. */
  346. pgd_t *pg_dir_src;
  347. pgd_t *pg_dir_dst;
  348. p4d_t *p4_dir_src;
  349. p4d_t *p4_dir_dst;
  350. pg_dir_src = pgd_offset_raw(early_pg_dir, KASAN_SHADOW_START);
  351. pg_dir_dst = pgd_offset_raw(init_mm.pgd, KASAN_SHADOW_START);
  352. p4_dir_src = p4d_offset(pg_dir_src, KASAN_SHADOW_START);
  353. p4_dir_dst = p4d_offset(pg_dir_dst, KASAN_SHADOW_START);
  354. memcpy(p4_dir_dst, p4_dir_src,
  355. (KASAN_SHADOW_SIZE >> P4D_SHIFT) * sizeof(p4d_t));
  356. }
  357. void __init kasan_free_early_identity(void)
  358. {
  359. memblock_phys_free(pgalloc_pos, pgalloc_freeable - pgalloc_pos);
  360. }