kaslr_booke.c 9.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. //
  3. // Copyright (C) 2019 Jason Yan <[email protected]>
  4. #include <linux/kernel.h>
  5. #include <linux/errno.h>
  6. #include <linux/string.h>
  7. #include <linux/types.h>
  8. #include <linux/mm.h>
  9. #include <linux/swap.h>
  10. #include <linux/stddef.h>
  11. #include <linux/init.h>
  12. #include <linux/delay.h>
  13. #include <linux/memblock.h>
  14. #include <linux/libfdt.h>
  15. #include <linux/crash_core.h>
  16. #include <linux/of.h>
  17. #include <linux/of_fdt.h>
  18. #include <asm/cacheflush.h>
  19. #include <asm/kdump.h>
  20. #include <mm/mmu_decl.h>
  21. #include <generated/utsrelease.h>
  22. struct regions {
  23. unsigned long pa_start;
  24. unsigned long pa_end;
  25. unsigned long kernel_size;
  26. unsigned long dtb_start;
  27. unsigned long dtb_end;
  28. unsigned long initrd_start;
  29. unsigned long initrd_end;
  30. unsigned long crash_start;
  31. unsigned long crash_end;
  32. int reserved_mem;
  33. int reserved_mem_addr_cells;
  34. int reserved_mem_size_cells;
  35. };
  36. struct regions __initdata regions;
  37. static __init void kaslr_get_cmdline(void *fdt)
  38. {
  39. early_init_dt_scan_chosen(boot_command_line);
  40. }
  41. static unsigned long __init rotate_xor(unsigned long hash, const void *area,
  42. size_t size)
  43. {
  44. size_t i;
  45. const unsigned long *ptr = area;
  46. for (i = 0; i < size / sizeof(hash); i++) {
  47. /* Rotate by odd number of bits and XOR. */
  48. hash = (hash << ((sizeof(hash) * 8) - 7)) | (hash >> 7);
  49. hash ^= ptr[i];
  50. }
  51. return hash;
  52. }
  53. /* Attempt to create a simple starting entropy. This can make it defferent for
  54. * every build but it is still not enough. Stronger entropy should
  55. * be added to make it change for every boot.
  56. */
  57. static unsigned long __init get_boot_seed(void *fdt)
  58. {
  59. unsigned long hash = 0;
  60. /* build-specific string for starting entropy. */
  61. hash = rotate_xor(hash, linux_banner, strlen(linux_banner));
  62. hash = rotate_xor(hash, fdt, fdt_totalsize(fdt));
  63. return hash;
  64. }
  65. static __init u64 get_kaslr_seed(void *fdt)
  66. {
  67. int node, len;
  68. fdt64_t *prop;
  69. u64 ret;
  70. node = fdt_path_offset(fdt, "/chosen");
  71. if (node < 0)
  72. return 0;
  73. prop = fdt_getprop_w(fdt, node, "kaslr-seed", &len);
  74. if (!prop || len != sizeof(u64))
  75. return 0;
  76. ret = fdt64_to_cpu(*prop);
  77. *prop = 0;
  78. return ret;
  79. }
  80. static __init bool regions_overlap(u32 s1, u32 e1, u32 s2, u32 e2)
  81. {
  82. return e1 >= s2 && e2 >= s1;
  83. }
  84. static __init bool overlaps_reserved_region(const void *fdt, u32 start,
  85. u32 end)
  86. {
  87. int subnode, len, i;
  88. u64 base, size;
  89. /* check for overlap with /memreserve/ entries */
  90. for (i = 0; i < fdt_num_mem_rsv(fdt); i++) {
  91. if (fdt_get_mem_rsv(fdt, i, &base, &size) < 0)
  92. continue;
  93. if (regions_overlap(start, end, base, base + size))
  94. return true;
  95. }
  96. if (regions.reserved_mem < 0)
  97. return false;
  98. /* check for overlap with static reservations in /reserved-memory */
  99. for (subnode = fdt_first_subnode(fdt, regions.reserved_mem);
  100. subnode >= 0;
  101. subnode = fdt_next_subnode(fdt, subnode)) {
  102. const fdt32_t *reg;
  103. u64 rsv_end;
  104. len = 0;
  105. reg = fdt_getprop(fdt, subnode, "reg", &len);
  106. while (len >= (regions.reserved_mem_addr_cells +
  107. regions.reserved_mem_size_cells)) {
  108. base = fdt32_to_cpu(reg[0]);
  109. if (regions.reserved_mem_addr_cells == 2)
  110. base = (base << 32) | fdt32_to_cpu(reg[1]);
  111. reg += regions.reserved_mem_addr_cells;
  112. len -= 4 * regions.reserved_mem_addr_cells;
  113. size = fdt32_to_cpu(reg[0]);
  114. if (regions.reserved_mem_size_cells == 2)
  115. size = (size << 32) | fdt32_to_cpu(reg[1]);
  116. reg += regions.reserved_mem_size_cells;
  117. len -= 4 * regions.reserved_mem_size_cells;
  118. if (base >= regions.pa_end)
  119. continue;
  120. rsv_end = min(base + size, (u64)U32_MAX);
  121. if (regions_overlap(start, end, base, rsv_end))
  122. return true;
  123. }
  124. }
  125. return false;
  126. }
  127. static __init bool overlaps_region(const void *fdt, u32 start,
  128. u32 end)
  129. {
  130. if (regions_overlap(start, end, __pa(_stext), __pa(_end)))
  131. return true;
  132. if (regions_overlap(start, end, regions.dtb_start,
  133. regions.dtb_end))
  134. return true;
  135. if (regions_overlap(start, end, regions.initrd_start,
  136. regions.initrd_end))
  137. return true;
  138. if (regions_overlap(start, end, regions.crash_start,
  139. regions.crash_end))
  140. return true;
  141. return overlaps_reserved_region(fdt, start, end);
  142. }
  143. static void __init get_crash_kernel(void *fdt, unsigned long size)
  144. {
  145. #ifdef CONFIG_CRASH_CORE
  146. unsigned long long crash_size, crash_base;
  147. int ret;
  148. ret = parse_crashkernel(boot_command_line, size, &crash_size,
  149. &crash_base);
  150. if (ret != 0 || crash_size == 0)
  151. return;
  152. if (crash_base == 0)
  153. crash_base = KDUMP_KERNELBASE;
  154. regions.crash_start = (unsigned long)crash_base;
  155. regions.crash_end = (unsigned long)(crash_base + crash_size);
  156. pr_debug("crash_base=0x%llx crash_size=0x%llx\n", crash_base, crash_size);
  157. #endif
  158. }
  159. static void __init get_initrd_range(void *fdt)
  160. {
  161. u64 start, end;
  162. int node, len;
  163. const __be32 *prop;
  164. node = fdt_path_offset(fdt, "/chosen");
  165. if (node < 0)
  166. return;
  167. prop = fdt_getprop(fdt, node, "linux,initrd-start", &len);
  168. if (!prop)
  169. return;
  170. start = of_read_number(prop, len / 4);
  171. prop = fdt_getprop(fdt, node, "linux,initrd-end", &len);
  172. if (!prop)
  173. return;
  174. end = of_read_number(prop, len / 4);
  175. regions.initrd_start = (unsigned long)start;
  176. regions.initrd_end = (unsigned long)end;
  177. pr_debug("initrd_start=0x%llx initrd_end=0x%llx\n", start, end);
  178. }
  179. static __init unsigned long get_usable_address(const void *fdt,
  180. unsigned long start,
  181. unsigned long offset)
  182. {
  183. unsigned long pa;
  184. unsigned long pa_end;
  185. for (pa = offset; (long)pa > (long)start; pa -= SZ_16K) {
  186. pa_end = pa + regions.kernel_size;
  187. if (overlaps_region(fdt, pa, pa_end))
  188. continue;
  189. return pa;
  190. }
  191. return 0;
  192. }
  193. static __init void get_cell_sizes(const void *fdt, int node, int *addr_cells,
  194. int *size_cells)
  195. {
  196. const int *prop;
  197. int len;
  198. /*
  199. * Retrieve the #address-cells and #size-cells properties
  200. * from the 'node', or use the default if not provided.
  201. */
  202. *addr_cells = *size_cells = 1;
  203. prop = fdt_getprop(fdt, node, "#address-cells", &len);
  204. if (len == 4)
  205. *addr_cells = fdt32_to_cpu(*prop);
  206. prop = fdt_getprop(fdt, node, "#size-cells", &len);
  207. if (len == 4)
  208. *size_cells = fdt32_to_cpu(*prop);
  209. }
  210. static unsigned long __init kaslr_legal_offset(void *dt_ptr, unsigned long index,
  211. unsigned long offset)
  212. {
  213. unsigned long koffset = 0;
  214. unsigned long start;
  215. while ((long)index >= 0) {
  216. offset = memstart_addr + index * SZ_64M + offset;
  217. start = memstart_addr + index * SZ_64M;
  218. koffset = get_usable_address(dt_ptr, start, offset);
  219. if (koffset)
  220. break;
  221. index--;
  222. }
  223. if (koffset != 0)
  224. koffset -= memstart_addr;
  225. return koffset;
  226. }
  227. static inline __init bool kaslr_disabled(void)
  228. {
  229. return strstr(boot_command_line, "nokaslr") != NULL;
  230. }
  231. static unsigned long __init kaslr_choose_location(void *dt_ptr, phys_addr_t size,
  232. unsigned long kernel_sz)
  233. {
  234. unsigned long offset, random;
  235. unsigned long ram, linear_sz;
  236. u64 seed;
  237. unsigned long index;
  238. kaslr_get_cmdline(dt_ptr);
  239. if (kaslr_disabled())
  240. return 0;
  241. random = get_boot_seed(dt_ptr);
  242. seed = get_tb() << 32;
  243. seed ^= get_tb();
  244. random = rotate_xor(random, &seed, sizeof(seed));
  245. /*
  246. * Retrieve (and wipe) the seed from the FDT
  247. */
  248. seed = get_kaslr_seed(dt_ptr);
  249. if (seed)
  250. random = rotate_xor(random, &seed, sizeof(seed));
  251. else
  252. pr_warn("KASLR: No safe seed for randomizing the kernel base.\n");
  253. ram = min_t(phys_addr_t, __max_low_memory, size);
  254. ram = map_mem_in_cams(ram, CONFIG_LOWMEM_CAM_NUM, true, true);
  255. linear_sz = min_t(unsigned long, ram, SZ_512M);
  256. /* If the linear size is smaller than 64M, do not randomize */
  257. if (linear_sz < SZ_64M)
  258. return 0;
  259. /* check for a reserved-memory node and record its cell sizes */
  260. regions.reserved_mem = fdt_path_offset(dt_ptr, "/reserved-memory");
  261. if (regions.reserved_mem >= 0)
  262. get_cell_sizes(dt_ptr, regions.reserved_mem,
  263. &regions.reserved_mem_addr_cells,
  264. &regions.reserved_mem_size_cells);
  265. regions.pa_start = memstart_addr;
  266. regions.pa_end = memstart_addr + linear_sz;
  267. regions.dtb_start = __pa(dt_ptr);
  268. regions.dtb_end = __pa(dt_ptr) + fdt_totalsize(dt_ptr);
  269. regions.kernel_size = kernel_sz;
  270. get_initrd_range(dt_ptr);
  271. get_crash_kernel(dt_ptr, ram);
  272. /*
  273. * Decide which 64M we want to start
  274. * Only use the low 8 bits of the random seed
  275. */
  276. index = random & 0xFF;
  277. index %= linear_sz / SZ_64M;
  278. /* Decide offset inside 64M */
  279. offset = random % (SZ_64M - kernel_sz);
  280. offset = round_down(offset, SZ_16K);
  281. return kaslr_legal_offset(dt_ptr, index, offset);
  282. }
  283. /*
  284. * To see if we need to relocate the kernel to a random offset
  285. * void *dt_ptr - address of the device tree
  286. * phys_addr_t size - size of the first memory block
  287. */
  288. notrace void __init kaslr_early_init(void *dt_ptr, phys_addr_t size)
  289. {
  290. unsigned long tlb_virt;
  291. phys_addr_t tlb_phys;
  292. unsigned long offset;
  293. unsigned long kernel_sz;
  294. kernel_sz = (unsigned long)_end - (unsigned long)_stext;
  295. offset = kaslr_choose_location(dt_ptr, size, kernel_sz);
  296. if (offset == 0)
  297. return;
  298. kernstart_virt_addr += offset;
  299. kernstart_addr += offset;
  300. is_second_reloc = 1;
  301. if (offset >= SZ_64M) {
  302. tlb_virt = round_down(kernstart_virt_addr, SZ_64M);
  303. tlb_phys = round_down(kernstart_addr, SZ_64M);
  304. /* Create kernel map to relocate in */
  305. create_kaslr_tlb_entry(1, tlb_virt, tlb_phys);
  306. }
  307. /* Copy the kernel to it's new location and run */
  308. memcpy((void *)kernstart_virt_addr, (void *)_stext, kernel_sz);
  309. flush_icache_range(kernstart_virt_addr, kernstart_virt_addr + kernel_sz);
  310. reloc_kernel_entry(dt_ptr, kernstart_virt_addr);
  311. }
  312. void __init kaslr_late_init(void)
  313. {
  314. /* If randomized, clear the original kernel */
  315. if (kernstart_virt_addr != KERNELBASE) {
  316. unsigned long kernel_sz;
  317. kernel_sz = (unsigned long)_end - kernstart_virt_addr;
  318. memzero_explicit((void *)KERNELBASE, kernel_sz);
  319. }
  320. }