setup.c 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
  4. *
  5. * Derived from MIPS:
  6. * Copyright (C) 1995 Linus Torvalds
  7. * Copyright (C) 1995 Waldorf Electronics
  8. * Copyright (C) 1994, 95, 96, 97, 98, 99, 2000, 01, 02, 03 Ralf Baechle
  9. * Copyright (C) 1996 Stoned Elipot
  10. * Copyright (C) 1999 Silicon Graphics, Inc.
  11. * Copyright (C) 2000, 2001, 2002, 2007 Maciej W. Rozycki
  12. */
  13. #include <linux/init.h>
  14. #include <linux/acpi.h>
  15. #include <linux/dmi.h>
  16. #include <linux/efi.h>
  17. #include <linux/export.h>
  18. #include <linux/screen_info.h>
  19. #include <linux/memblock.h>
  20. #include <linux/initrd.h>
  21. #include <linux/ioport.h>
  22. #include <linux/kexec.h>
  23. #include <linux/crash_dump.h>
  24. #include <linux/root_dev.h>
  25. #include <linux/console.h>
  26. #include <linux/pfn.h>
  27. #include <linux/platform_device.h>
  28. #include <linux/sizes.h>
  29. #include <linux/device.h>
  30. #include <linux/dma-map-ops.h>
  31. #include <linux/swiotlb.h>
  32. #include <asm/addrspace.h>
  33. #include <asm/bootinfo.h>
  34. #include <asm/cache.h>
  35. #include <asm/cpu.h>
  36. #include <asm/dma.h>
  37. #include <asm/efi.h>
  38. #include <asm/loongson.h>
  39. #include <asm/numa.h>
  40. #include <asm/pgalloc.h>
  41. #include <asm/sections.h>
  42. #include <asm/setup.h>
  43. #include <asm/time.h>
  44. #define SMBIOS_BIOSSIZE_OFFSET 0x09
  45. #define SMBIOS_BIOSEXTERN_OFFSET 0x13
  46. #define SMBIOS_FREQLOW_OFFSET 0x16
  47. #define SMBIOS_FREQHIGH_OFFSET 0x17
  48. #define SMBIOS_FREQLOW_MASK 0xFF
  49. #define SMBIOS_CORE_PACKAGE_OFFSET 0x23
  50. #define LOONGSON_EFI_ENABLE (1 << 3)
  51. struct screen_info screen_info __section(".data");
  52. unsigned long fw_arg0, fw_arg1, fw_arg2;
  53. DEFINE_PER_CPU(unsigned long, kernelsp);
  54. struct cpuinfo_loongarch cpu_data[NR_CPUS] __read_mostly;
  55. EXPORT_SYMBOL(cpu_data);
  56. struct loongson_board_info b_info;
  57. static const char dmi_empty_string[] = " ";
  58. /*
  59. * Setup information
  60. *
  61. * These are initialized so they are in the .data section
  62. */
  63. static int num_standard_resources;
  64. static struct resource *standard_resources;
  65. static struct resource code_resource = { .name = "Kernel code", };
  66. static struct resource data_resource = { .name = "Kernel data", };
  67. static struct resource bss_resource = { .name = "Kernel bss", };
  68. const char *get_system_type(void)
  69. {
  70. return "generic-loongson-machine";
  71. }
  72. static const char *dmi_string_parse(const struct dmi_header *dm, u8 s)
  73. {
  74. const u8 *bp = ((u8 *) dm) + dm->length;
  75. if (s) {
  76. s--;
  77. while (s > 0 && *bp) {
  78. bp += strlen(bp) + 1;
  79. s--;
  80. }
  81. if (*bp != 0) {
  82. size_t len = strlen(bp)+1;
  83. size_t cmp_len = len > 8 ? 8 : len;
  84. if (!memcmp(bp, dmi_empty_string, cmp_len))
  85. return dmi_empty_string;
  86. return bp;
  87. }
  88. }
  89. return "";
  90. }
  91. static void __init parse_cpu_table(const struct dmi_header *dm)
  92. {
  93. long freq_temp = 0;
  94. char *dmi_data = (char *)dm;
  95. freq_temp = ((*(dmi_data + SMBIOS_FREQHIGH_OFFSET) << 8) +
  96. ((*(dmi_data + SMBIOS_FREQLOW_OFFSET)) & SMBIOS_FREQLOW_MASK));
  97. cpu_clock_freq = freq_temp * 1000000;
  98. loongson_sysconf.cpuname = (void *)dmi_string_parse(dm, dmi_data[16]);
  99. loongson_sysconf.cores_per_package = *(dmi_data + SMBIOS_CORE_PACKAGE_OFFSET);
  100. pr_info("CpuClock = %llu\n", cpu_clock_freq);
  101. }
  102. static void __init parse_bios_table(const struct dmi_header *dm)
  103. {
  104. char *dmi_data = (char *)dm;
  105. b_info.bios_size = (*(dmi_data + SMBIOS_BIOSSIZE_OFFSET) + 1) << 6;
  106. }
  107. static void __init find_tokens(const struct dmi_header *dm, void *dummy)
  108. {
  109. switch (dm->type) {
  110. case 0x0: /* Extern BIOS */
  111. parse_bios_table(dm);
  112. break;
  113. case 0x4: /* Calling interface */
  114. parse_cpu_table(dm);
  115. break;
  116. }
  117. }
  118. static void __init smbios_parse(void)
  119. {
  120. b_info.bios_vendor = (void *)dmi_get_system_info(DMI_BIOS_VENDOR);
  121. b_info.bios_version = (void *)dmi_get_system_info(DMI_BIOS_VERSION);
  122. b_info.bios_release_date = (void *)dmi_get_system_info(DMI_BIOS_DATE);
  123. b_info.board_vendor = (void *)dmi_get_system_info(DMI_BOARD_VENDOR);
  124. b_info.board_name = (void *)dmi_get_system_info(DMI_BOARD_NAME);
  125. dmi_walk(find_tokens, NULL);
  126. }
  127. static int usermem __initdata;
  128. static int __init early_parse_mem(char *p)
  129. {
  130. phys_addr_t start, size;
  131. if (!p) {
  132. pr_err("mem parameter is empty, do nothing\n");
  133. return -EINVAL;
  134. }
  135. /*
  136. * If a user specifies memory size, we
  137. * blow away any automatically generated
  138. * size.
  139. */
  140. if (usermem == 0) {
  141. usermem = 1;
  142. memblock_remove(memblock_start_of_DRAM(),
  143. memblock_end_of_DRAM() - memblock_start_of_DRAM());
  144. }
  145. start = 0;
  146. size = memparse(p, &p);
  147. if (*p == '@')
  148. start = memparse(p + 1, &p);
  149. else {
  150. pr_err("Invalid format!\n");
  151. return -EINVAL;
  152. }
  153. if (!IS_ENABLED(CONFIG_NUMA))
  154. memblock_add(start, size);
  155. else
  156. memblock_add_node(start, size, pa_to_nid(start), MEMBLOCK_NONE);
  157. return 0;
  158. }
  159. early_param("mem", early_parse_mem);
  160. static void __init arch_reserve_vmcore(void)
  161. {
  162. #ifdef CONFIG_PROC_VMCORE
  163. u64 i;
  164. phys_addr_t start, end;
  165. if (!is_kdump_kernel())
  166. return;
  167. if (!elfcorehdr_size) {
  168. for_each_mem_range(i, &start, &end) {
  169. if (elfcorehdr_addr >= start && elfcorehdr_addr < end) {
  170. /*
  171. * Reserve from the elf core header to the end of
  172. * the memory segment, that should all be kdump
  173. * reserved memory.
  174. */
  175. elfcorehdr_size = end - elfcorehdr_addr;
  176. break;
  177. }
  178. }
  179. }
  180. if (memblock_is_region_reserved(elfcorehdr_addr, elfcorehdr_size)) {
  181. pr_warn("elfcorehdr is overlapped\n");
  182. return;
  183. }
  184. memblock_reserve(elfcorehdr_addr, elfcorehdr_size);
  185. pr_info("Reserving %llu KiB of memory at 0x%llx for elfcorehdr\n",
  186. elfcorehdr_size >> 10, elfcorehdr_addr);
  187. #endif
  188. }
  189. static void __init arch_parse_crashkernel(void)
  190. {
  191. #ifdef CONFIG_KEXEC
  192. int ret;
  193. unsigned long long start;
  194. unsigned long long total_mem;
  195. unsigned long long crash_base, crash_size;
  196. total_mem = memblock_phys_mem_size();
  197. ret = parse_crashkernel(boot_command_line, total_mem, &crash_size, &crash_base);
  198. if (ret < 0 || crash_size <= 0)
  199. return;
  200. start = memblock_phys_alloc_range(crash_size, 1, crash_base, crash_base + crash_size);
  201. if (start != crash_base) {
  202. pr_warn("Invalid memory region reserved for crash kernel\n");
  203. return;
  204. }
  205. crashk_res.start = crash_base;
  206. crashk_res.end = crash_base + crash_size - 1;
  207. #endif
  208. }
  209. void __init platform_init(void)
  210. {
  211. arch_reserve_vmcore();
  212. arch_parse_crashkernel();
  213. #ifdef CONFIG_ACPI_TABLE_UPGRADE
  214. acpi_table_upgrade();
  215. #endif
  216. #ifdef CONFIG_ACPI
  217. acpi_gbl_use_default_register_widths = false;
  218. acpi_boot_table_init();
  219. #endif
  220. #ifdef CONFIG_NUMA
  221. init_numa_memory();
  222. #endif
  223. dmi_setup();
  224. smbios_parse();
  225. pr_info("The BIOS Version: %s\n", b_info.bios_version);
  226. efi_runtime_init();
  227. }
  228. static void __init check_kernel_sections_mem(void)
  229. {
  230. phys_addr_t start = __pa_symbol(&_text);
  231. phys_addr_t size = __pa_symbol(&_end) - start;
  232. if (!memblock_is_region_memory(start, size)) {
  233. pr_info("Kernel sections are not in the memory maps\n");
  234. memblock_add(start, size);
  235. }
  236. }
  237. /*
  238. * arch_mem_init - initialize memory management subsystem
  239. */
  240. static void __init arch_mem_init(char **cmdline_p)
  241. {
  242. if (usermem)
  243. pr_info("User-defined physical RAM map overwrite\n");
  244. check_kernel_sections_mem();
  245. /*
  246. * In order to reduce the possibility of kernel panic when failed to
  247. * get IO TLB memory under CONFIG_SWIOTLB, it is better to allocate
  248. * low memory as small as possible before plat_swiotlb_setup(), so
  249. * make sparse_init() using top-down allocation.
  250. */
  251. memblock_set_bottom_up(false);
  252. sparse_init();
  253. memblock_set_bottom_up(true);
  254. swiotlb_init(true, SWIOTLB_VERBOSE);
  255. dma_contiguous_reserve(PFN_PHYS(max_low_pfn));
  256. memblock_dump_all();
  257. early_memtest(PFN_PHYS(ARCH_PFN_OFFSET), PFN_PHYS(max_low_pfn));
  258. }
  259. static void __init resource_init(void)
  260. {
  261. long i = 0;
  262. size_t res_size;
  263. struct resource *res;
  264. struct memblock_region *region;
  265. code_resource.start = __pa_symbol(&_text);
  266. code_resource.end = __pa_symbol(&_etext) - 1;
  267. data_resource.start = __pa_symbol(&_etext);
  268. data_resource.end = __pa_symbol(&_edata) - 1;
  269. bss_resource.start = __pa_symbol(&__bss_start);
  270. bss_resource.end = __pa_symbol(&__bss_stop) - 1;
  271. num_standard_resources = memblock.memory.cnt;
  272. res_size = num_standard_resources * sizeof(*standard_resources);
  273. standard_resources = memblock_alloc(res_size, SMP_CACHE_BYTES);
  274. for_each_mem_region(region) {
  275. res = &standard_resources[i++];
  276. if (!memblock_is_nomap(region)) {
  277. res->name = "System RAM";
  278. res->flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY;
  279. res->start = __pfn_to_phys(memblock_region_memory_base_pfn(region));
  280. res->end = __pfn_to_phys(memblock_region_memory_end_pfn(region)) - 1;
  281. } else {
  282. res->name = "Reserved";
  283. res->flags = IORESOURCE_MEM;
  284. res->start = __pfn_to_phys(memblock_region_reserved_base_pfn(region));
  285. res->end = __pfn_to_phys(memblock_region_reserved_end_pfn(region)) - 1;
  286. }
  287. request_resource(&iomem_resource, res);
  288. /*
  289. * We don't know which RAM region contains kernel data,
  290. * so we try it repeatedly and let the resource manager
  291. * test it.
  292. */
  293. request_resource(res, &code_resource);
  294. request_resource(res, &data_resource);
  295. request_resource(res, &bss_resource);
  296. }
  297. #ifdef CONFIG_KEXEC
  298. if (crashk_res.start < crashk_res.end) {
  299. insert_resource(&iomem_resource, &crashk_res);
  300. pr_info("Reserving %ldMB of memory at %ldMB for crashkernel\n",
  301. (unsigned long)((crashk_res.end - crashk_res.start + 1) >> 20),
  302. (unsigned long)(crashk_res.start >> 20));
  303. }
  304. #endif
  305. }
  306. static int __init reserve_memblock_reserved_regions(void)
  307. {
  308. u64 i, j;
  309. for (i = 0; i < num_standard_resources; ++i) {
  310. struct resource *mem = &standard_resources[i];
  311. phys_addr_t r_start, r_end, mem_size = resource_size(mem);
  312. if (!memblock_is_region_reserved(mem->start, mem_size))
  313. continue;
  314. for_each_reserved_mem_range(j, &r_start, &r_end) {
  315. resource_size_t start, end;
  316. start = max(PFN_PHYS(PFN_DOWN(r_start)), mem->start);
  317. end = min(PFN_PHYS(PFN_UP(r_end)) - 1, mem->end);
  318. if (start > mem->end || end < mem->start)
  319. continue;
  320. reserve_region_with_split(mem, start, end, "Reserved");
  321. }
  322. }
  323. return 0;
  324. }
  325. arch_initcall(reserve_memblock_reserved_regions);
  326. #ifdef CONFIG_SMP
  327. static void __init prefill_possible_map(void)
  328. {
  329. int i, possible;
  330. possible = num_processors + disabled_cpus;
  331. if (possible > nr_cpu_ids)
  332. possible = nr_cpu_ids;
  333. pr_info("SMP: Allowing %d CPUs, %d hotplug CPUs\n",
  334. possible, max((possible - num_processors), 0));
  335. for (i = 0; i < possible; i++)
  336. set_cpu_possible(i, true);
  337. for (; i < NR_CPUS; i++)
  338. set_cpu_possible(i, false);
  339. set_nr_cpu_ids(possible);
  340. }
  341. #endif
  342. void __init setup_arch(char **cmdline_p)
  343. {
  344. cpu_probe();
  345. *cmdline_p = boot_command_line;
  346. init_environ();
  347. efi_init();
  348. memblock_init();
  349. pagetable_init();
  350. parse_early_param();
  351. reserve_initrd_mem();
  352. platform_init();
  353. arch_mem_init(cmdline_p);
  354. resource_init();
  355. #ifdef CONFIG_SMP
  356. plat_smp_setup();
  357. prefill_possible_map();
  358. #endif
  359. paging_init();
  360. }