startup.c 9.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310
  1. // SPDX-License-Identifier: GPL-2.0
  2. #include <linux/string.h>
  3. #include <linux/elf.h>
  4. #include <asm/boot_data.h>
  5. #include <asm/sections.h>
  6. #include <asm/cpu_mf.h>
  7. #include <asm/setup.h>
  8. #include <asm/kasan.h>
  9. #include <asm/kexec.h>
  10. #include <asm/sclp.h>
  11. #include <asm/diag.h>
  12. #include <asm/uv.h>
  13. #include <asm/abs_lowcore.h>
  14. #include "decompressor.h"
  15. #include "boot.h"
  16. #include "uv.h"
  17. unsigned long __bootdata_preserved(__kaslr_offset);
  18. unsigned long __bootdata_preserved(__abs_lowcore);
  19. unsigned long __bootdata_preserved(__memcpy_real_area);
  20. unsigned long __bootdata(__amode31_base);
  21. unsigned long __bootdata_preserved(VMALLOC_START);
  22. unsigned long __bootdata_preserved(VMALLOC_END);
  23. struct page *__bootdata_preserved(vmemmap);
  24. unsigned long __bootdata_preserved(vmemmap_size);
  25. unsigned long __bootdata_preserved(MODULES_VADDR);
  26. unsigned long __bootdata_preserved(MODULES_END);
  27. unsigned long __bootdata(ident_map_size);
  28. int __bootdata(is_full_image) = 1;
  29. struct initrd_data __bootdata(initrd_data);
  30. u64 __bootdata_preserved(stfle_fac_list[16]);
  31. u64 __bootdata_preserved(alt_stfle_fac_list[16]);
  32. struct oldmem_data __bootdata_preserved(oldmem_data);
  33. void error(char *x)
  34. {
  35. sclp_early_printk("\n\n");
  36. sclp_early_printk(x);
  37. sclp_early_printk("\n\n -- System halted");
  38. disabled_wait();
  39. }
  40. static void setup_lpp(void)
  41. {
  42. S390_lowcore.current_pid = 0;
  43. S390_lowcore.lpp = LPP_MAGIC;
  44. if (test_facility(40))
  45. lpp(&S390_lowcore.lpp);
  46. }
  47. #ifdef CONFIG_KERNEL_UNCOMPRESSED
  48. unsigned long mem_safe_offset(void)
  49. {
  50. return vmlinux.default_lma + vmlinux.image_size + vmlinux.bss_size;
  51. }
  52. #endif
  53. static unsigned long rescue_initrd(unsigned long safe_addr)
  54. {
  55. if (!IS_ENABLED(CONFIG_BLK_DEV_INITRD))
  56. return safe_addr;
  57. if (!initrd_data.start || !initrd_data.size)
  58. return safe_addr;
  59. if (initrd_data.start < safe_addr) {
  60. memmove((void *)safe_addr, (void *)initrd_data.start, initrd_data.size);
  61. initrd_data.start = safe_addr;
  62. }
  63. return initrd_data.start + initrd_data.size;
  64. }
  65. static void copy_bootdata(void)
  66. {
  67. if (__boot_data_end - __boot_data_start != vmlinux.bootdata_size)
  68. error(".boot.data section size mismatch");
  69. memcpy((void *)vmlinux.bootdata_off, __boot_data_start, vmlinux.bootdata_size);
  70. if (__boot_data_preserved_end - __boot_data_preserved_start != vmlinux.bootdata_preserved_size)
  71. error(".boot.preserved.data section size mismatch");
  72. memcpy((void *)vmlinux.bootdata_preserved_off, __boot_data_preserved_start, vmlinux.bootdata_preserved_size);
  73. }
  74. static void handle_relocs(unsigned long offset)
  75. {
  76. Elf64_Rela *rela_start, *rela_end, *rela;
  77. int r_type, r_sym, rc;
  78. Elf64_Addr loc, val;
  79. Elf64_Sym *dynsym;
  80. rela_start = (Elf64_Rela *) vmlinux.rela_dyn_start;
  81. rela_end = (Elf64_Rela *) vmlinux.rela_dyn_end;
  82. dynsym = (Elf64_Sym *) vmlinux.dynsym_start;
  83. for (rela = rela_start; rela < rela_end; rela++) {
  84. loc = rela->r_offset + offset;
  85. val = rela->r_addend;
  86. r_sym = ELF64_R_SYM(rela->r_info);
  87. if (r_sym) {
  88. if (dynsym[r_sym].st_shndx != SHN_UNDEF)
  89. val += dynsym[r_sym].st_value + offset;
  90. } else {
  91. /*
  92. * 0 == undefined symbol table index (STN_UNDEF),
  93. * used for R_390_RELATIVE, only add KASLR offset
  94. */
  95. val += offset;
  96. }
  97. r_type = ELF64_R_TYPE(rela->r_info);
  98. rc = arch_kexec_do_relocs(r_type, (void *) loc, val, 0);
  99. if (rc)
  100. error("Unknown relocation type");
  101. }
  102. }
  103. /*
  104. * Merge information from several sources into a single ident_map_size value.
  105. * "ident_map_size" represents the upper limit of physical memory we may ever
  106. * reach. It might not be all online memory, but also include standby (offline)
  107. * memory. "ident_map_size" could be lower then actual standby or even online
  108. * memory present, due to limiting factors. We should never go above this limit.
  109. * It is the size of our identity mapping.
  110. *
  111. * Consider the following factors:
  112. * 1. max_physmem_end - end of physical memory online or standby.
  113. * Always <= end of the last online memory block (get_mem_detect_end()).
  114. * 2. CONFIG_MAX_PHYSMEM_BITS - the maximum size of physical memory the
  115. * kernel is able to support.
  116. * 3. "mem=" kernel command line option which limits physical memory usage.
  117. * 4. OLDMEM_BASE which is a kdump memory limit when the kernel is executed as
  118. * crash kernel.
  119. * 5. "hsa" size which is a memory limit when the kernel is executed during
  120. * zfcp/nvme dump.
  121. */
  122. static void setup_ident_map_size(unsigned long max_physmem_end)
  123. {
  124. unsigned long hsa_size;
  125. ident_map_size = max_physmem_end;
  126. if (memory_limit)
  127. ident_map_size = min(ident_map_size, memory_limit);
  128. ident_map_size = min(ident_map_size, 1UL << MAX_PHYSMEM_BITS);
  129. #ifdef CONFIG_CRASH_DUMP
  130. if (oldmem_data.start) {
  131. kaslr_enabled = 0;
  132. ident_map_size = min(ident_map_size, oldmem_data.size);
  133. } else if (ipl_block_valid && is_ipl_block_dump()) {
  134. kaslr_enabled = 0;
  135. if (!sclp_early_get_hsa_size(&hsa_size) && hsa_size)
  136. ident_map_size = min(ident_map_size, hsa_size);
  137. }
  138. #endif
  139. }
  140. static void setup_kernel_memory_layout(void)
  141. {
  142. unsigned long vmemmap_start;
  143. unsigned long rte_size;
  144. unsigned long pages;
  145. unsigned long vmax;
  146. pages = ident_map_size / PAGE_SIZE;
  147. /* vmemmap contains a multiple of PAGES_PER_SECTION struct pages */
  148. vmemmap_size = SECTION_ALIGN_UP(pages) * sizeof(struct page);
  149. /* choose kernel address space layout: 4 or 3 levels. */
  150. vmemmap_start = round_up(ident_map_size, _REGION3_SIZE);
  151. if (IS_ENABLED(CONFIG_KASAN) ||
  152. vmalloc_size > _REGION2_SIZE ||
  153. vmemmap_start + vmemmap_size + vmalloc_size + MODULES_LEN >
  154. _REGION2_SIZE) {
  155. vmax = _REGION1_SIZE;
  156. rte_size = _REGION2_SIZE;
  157. } else {
  158. vmax = _REGION2_SIZE;
  159. rte_size = _REGION3_SIZE;
  160. }
  161. /*
  162. * forcing modules and vmalloc area under the ultravisor
  163. * secure storage limit, so that any vmalloc allocation
  164. * we do could be used to back secure guest storage.
  165. */
  166. vmax = adjust_to_uv_max(vmax);
  167. #ifdef CONFIG_KASAN
  168. /* force vmalloc and modules below kasan shadow */
  169. vmax = min(vmax, KASAN_SHADOW_START);
  170. #endif
  171. __memcpy_real_area = round_down(vmax - PAGE_SIZE, PAGE_SIZE);
  172. __abs_lowcore = round_down(__memcpy_real_area - ABS_LOWCORE_MAP_SIZE,
  173. sizeof(struct lowcore));
  174. MODULES_END = round_down(__abs_lowcore, _SEGMENT_SIZE);
  175. MODULES_VADDR = MODULES_END - MODULES_LEN;
  176. VMALLOC_END = MODULES_VADDR;
  177. /* allow vmalloc area to occupy up to about 1/2 of the rest virtual space left */
  178. vmalloc_size = min(vmalloc_size, round_down(VMALLOC_END / 2, _REGION3_SIZE));
  179. VMALLOC_START = VMALLOC_END - vmalloc_size;
  180. /* split remaining virtual space between 1:1 mapping & vmemmap array */
  181. pages = VMALLOC_START / (PAGE_SIZE + sizeof(struct page));
  182. pages = SECTION_ALIGN_UP(pages);
  183. /* keep vmemmap_start aligned to a top level region table entry */
  184. vmemmap_start = round_down(VMALLOC_START - pages * sizeof(struct page), rte_size);
  185. /* vmemmap_start is the future VMEM_MAX_PHYS, make sure it is within MAX_PHYSMEM */
  186. vmemmap_start = min(vmemmap_start, 1UL << MAX_PHYSMEM_BITS);
  187. /* make sure identity map doesn't overlay with vmemmap */
  188. ident_map_size = min(ident_map_size, vmemmap_start);
  189. vmemmap_size = SECTION_ALIGN_UP(ident_map_size / PAGE_SIZE) * sizeof(struct page);
  190. /* make sure vmemmap doesn't overlay with vmalloc area */
  191. VMALLOC_START = max(vmemmap_start + vmemmap_size, VMALLOC_START);
  192. vmemmap = (struct page *)vmemmap_start;
  193. }
  194. /*
  195. * This function clears the BSS section of the decompressed Linux kernel and NOT the decompressor's.
  196. */
  197. static void clear_bss_section(void)
  198. {
  199. memset((void *)vmlinux.default_lma + vmlinux.image_size, 0, vmlinux.bss_size);
  200. }
  201. /*
  202. * Set vmalloc area size to an 8th of (potential) physical memory
  203. * size, unless size has been set by kernel command line parameter.
  204. */
  205. static void setup_vmalloc_size(void)
  206. {
  207. unsigned long size;
  208. if (vmalloc_size_set)
  209. return;
  210. size = round_up(ident_map_size / 8, _SEGMENT_SIZE);
  211. vmalloc_size = max(size, vmalloc_size);
  212. }
  213. static void offset_vmlinux_info(unsigned long offset)
  214. {
  215. vmlinux.default_lma += offset;
  216. *(unsigned long *)(&vmlinux.entry) += offset;
  217. vmlinux.bootdata_off += offset;
  218. vmlinux.bootdata_preserved_off += offset;
  219. vmlinux.rela_dyn_start += offset;
  220. vmlinux.rela_dyn_end += offset;
  221. vmlinux.dynsym_start += offset;
  222. }
  223. static unsigned long reserve_amode31(unsigned long safe_addr)
  224. {
  225. __amode31_base = PAGE_ALIGN(safe_addr);
  226. return safe_addr + vmlinux.amode31_size;
  227. }
  228. void startup_kernel(void)
  229. {
  230. unsigned long max_physmem_end;
  231. unsigned long random_lma;
  232. unsigned long safe_addr;
  233. void *img;
  234. initrd_data.start = parmarea.initrd_start;
  235. initrd_data.size = parmarea.initrd_size;
  236. oldmem_data.start = parmarea.oldmem_base;
  237. oldmem_data.size = parmarea.oldmem_size;
  238. setup_lpp();
  239. store_ipl_parmblock();
  240. safe_addr = mem_safe_offset();
  241. safe_addr = reserve_amode31(safe_addr);
  242. safe_addr = read_ipl_report(safe_addr);
  243. uv_query_info();
  244. safe_addr = rescue_initrd(safe_addr);
  245. sclp_early_read_info();
  246. setup_boot_command_line();
  247. parse_boot_command_line();
  248. sanitize_prot_virt_host();
  249. max_physmem_end = detect_memory(&safe_addr);
  250. setup_ident_map_size(max_physmem_end);
  251. setup_vmalloc_size();
  252. setup_kernel_memory_layout();
  253. if (IS_ENABLED(CONFIG_RANDOMIZE_BASE) && kaslr_enabled) {
  254. random_lma = get_random_base(safe_addr);
  255. if (random_lma) {
  256. __kaslr_offset = random_lma - vmlinux.default_lma;
  257. img = (void *)vmlinux.default_lma;
  258. offset_vmlinux_info(__kaslr_offset);
  259. }
  260. }
  261. if (!IS_ENABLED(CONFIG_KERNEL_UNCOMPRESSED)) {
  262. img = decompress_kernel();
  263. memmove((void *)vmlinux.default_lma, img, vmlinux.image_size);
  264. } else if (__kaslr_offset)
  265. memcpy((void *)vmlinux.default_lma, img, vmlinux.image_size);
  266. clear_bss_section();
  267. copy_bootdata();
  268. handle_relocs(__kaslr_offset);
  269. if (__kaslr_offset) {
  270. /*
  271. * Save KASLR offset for early dumps, before vmcore_info is set.
  272. * Mark as uneven to distinguish from real vmcore_info pointer.
  273. */
  274. S390_lowcore.vmcore_info = __kaslr_offset | 0x1UL;
  275. /* Clear non-relocated kernel */
  276. if (IS_ENABLED(CONFIG_KERNEL_UNCOMPRESSED))
  277. memset(img, 0, vmlinux.image_size);
  278. }
  279. vmlinux.entry();
  280. }