setup.c 9.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * arch/sh/kernel/setup.c
  4. *
  5. * This file handles the architecture-dependent parts of initialization
  6. *
  7. * Copyright (C) 1999 Niibe Yutaka
  8. * Copyright (C) 2002 - 2010 Paul Mundt
  9. */
  10. #include <linux/screen_info.h>
  11. #include <linux/ioport.h>
  12. #include <linux/init.h>
  13. #include <linux/initrd.h>
  14. #include <linux/console.h>
  15. #include <linux/root_dev.h>
  16. #include <linux/utsname.h>
  17. #include <linux/nodemask.h>
  18. #include <linux/cpu.h>
  19. #include <linux/pfn.h>
  20. #include <linux/fs.h>
  21. #include <linux/mm.h>
  22. #include <linux/kexec.h>
  23. #include <linux/module.h>
  24. #include <linux/smp.h>
  25. #include <linux/err.h>
  26. #include <linux/crash_dump.h>
  27. #include <linux/mmzone.h>
  28. #include <linux/clk.h>
  29. #include <linux/delay.h>
  30. #include <linux/platform_device.h>
  31. #include <linux/memblock.h>
  32. #include <linux/of.h>
  33. #include <linux/of_fdt.h>
  34. #include <linux/uaccess.h>
  35. #include <uapi/linux/mount.h>
  36. #include <asm/io.h>
  37. #include <asm/page.h>
  38. #include <asm/elf.h>
  39. #include <asm/sections.h>
  40. #include <asm/irq.h>
  41. #include <asm/setup.h>
  42. #include <asm/clock.h>
  43. #include <asm/smp.h>
  44. #include <asm/mmu_context.h>
  45. #include <asm/mmzone.h>
  46. #include <asm/processor.h>
  47. #include <asm/sparsemem.h>
  48. #include <asm/platform_early.h>
  49. /*
  50. * Initialize loops_per_jiffy as 10000000 (1000MIPS).
  51. * This value will be used at the very early stage of serial setup.
  52. * The bigger value means no problem.
  53. */
  54. struct sh_cpuinfo cpu_data[NR_CPUS] __read_mostly = {
  55. [0] = {
  56. .type = CPU_SH_NONE,
  57. .family = CPU_FAMILY_UNKNOWN,
  58. .loops_per_jiffy = 10000000,
  59. .phys_bits = MAX_PHYSMEM_BITS,
  60. },
  61. };
  62. EXPORT_SYMBOL(cpu_data);
  63. /*
  64. * The machine vector. First entry in .machvec.init, or clobbered by
  65. * sh_mv= on the command line, prior to .machvec.init teardown.
  66. */
  67. struct sh_machine_vector sh_mv = { .mv_name = "generic", };
  68. EXPORT_SYMBOL(sh_mv);
  69. #ifdef CONFIG_VT
  70. struct screen_info screen_info;
  71. #endif
  72. extern int root_mountflags;
  73. #define RAMDISK_IMAGE_START_MASK 0x07FF
  74. #define RAMDISK_PROMPT_FLAG 0x8000
  75. #define RAMDISK_LOAD_FLAG 0x4000
  76. static char __initdata command_line[COMMAND_LINE_SIZE] = { 0, };
  77. static struct resource code_resource = {
  78. .name = "Kernel code",
  79. .flags = IORESOURCE_BUSY | IORESOURCE_SYSTEM_RAM,
  80. };
  81. static struct resource data_resource = {
  82. .name = "Kernel data",
  83. .flags = IORESOURCE_BUSY | IORESOURCE_SYSTEM_RAM,
  84. };
  85. static struct resource bss_resource = {
  86. .name = "Kernel bss",
  87. .flags = IORESOURCE_BUSY | IORESOURCE_SYSTEM_RAM,
  88. };
  89. unsigned long memory_start;
  90. EXPORT_SYMBOL(memory_start);
  91. unsigned long memory_end = 0;
  92. EXPORT_SYMBOL(memory_end);
  93. unsigned long memory_limit = 0;
  94. static struct resource mem_resources[MAX_NUMNODES];
  95. int l1i_cache_shape, l1d_cache_shape, l2_cache_shape;
  96. static int __init early_parse_mem(char *p)
  97. {
  98. if (!p)
  99. return 1;
  100. memory_limit = PAGE_ALIGN(memparse(p, &p));
  101. pr_notice("Memory limited to %ldMB\n", memory_limit >> 20);
  102. return 0;
  103. }
  104. early_param("mem", early_parse_mem);
  105. void __init check_for_initrd(void)
  106. {
  107. #ifdef CONFIG_BLK_DEV_INITRD
  108. unsigned long start, end;
  109. /*
  110. * Check for the rare cases where boot loaders adhere to the boot
  111. * ABI.
  112. */
  113. if (!LOADER_TYPE || !INITRD_START || !INITRD_SIZE)
  114. goto disable;
  115. start = INITRD_START + __MEMORY_START;
  116. end = start + INITRD_SIZE;
  117. if (unlikely(end <= start))
  118. goto disable;
  119. if (unlikely(start & ~PAGE_MASK)) {
  120. pr_err("initrd must be page aligned\n");
  121. goto disable;
  122. }
  123. if (unlikely(start < __MEMORY_START)) {
  124. pr_err("initrd start (%08lx) < __MEMORY_START(%x)\n",
  125. start, __MEMORY_START);
  126. goto disable;
  127. }
  128. if (unlikely(end > memblock_end_of_DRAM())) {
  129. pr_err("initrd extends beyond end of memory "
  130. "(0x%08lx > 0x%08lx)\ndisabling initrd\n",
  131. end, (unsigned long)memblock_end_of_DRAM());
  132. goto disable;
  133. }
  134. /*
  135. * If we got this far in spite of the boot loader's best efforts
  136. * to the contrary, assume we actually have a valid initrd and
  137. * fix up the root dev.
  138. */
  139. ROOT_DEV = Root_RAM0;
  140. /*
  141. * Address sanitization
  142. */
  143. initrd_start = (unsigned long)__va(start);
  144. initrd_end = initrd_start + INITRD_SIZE;
  145. memblock_reserve(__pa(initrd_start), INITRD_SIZE);
  146. return;
  147. disable:
  148. pr_info("initrd disabled\n");
  149. initrd_start = initrd_end = 0;
  150. #endif
  151. }
  152. #ifndef CONFIG_GENERIC_CALIBRATE_DELAY
  153. void calibrate_delay(void)
  154. {
  155. struct clk *clk = clk_get(NULL, "cpu_clk");
  156. if (IS_ERR(clk))
  157. panic("Need a sane CPU clock definition!");
  158. loops_per_jiffy = (clk_get_rate(clk) >> 1) / HZ;
  159. printk(KERN_INFO "Calibrating delay loop (skipped)... "
  160. "%lu.%02lu BogoMIPS PRESET (lpj=%lu)\n",
  161. loops_per_jiffy/(500000/HZ),
  162. (loops_per_jiffy/(5000/HZ)) % 100,
  163. loops_per_jiffy);
  164. }
  165. #endif
  166. void __init __add_active_range(unsigned int nid, unsigned long start_pfn,
  167. unsigned long end_pfn)
  168. {
  169. struct resource *res = &mem_resources[nid];
  170. unsigned long start, end;
  171. WARN_ON(res->name); /* max one active range per node for now */
  172. start = start_pfn << PAGE_SHIFT;
  173. end = end_pfn << PAGE_SHIFT;
  174. res->name = "System RAM";
  175. res->start = start;
  176. res->end = end - 1;
  177. res->flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY;
  178. if (request_resource(&iomem_resource, res)) {
  179. pr_err("unable to request memory_resource 0x%lx 0x%lx\n",
  180. start_pfn, end_pfn);
  181. return;
  182. }
  183. /*
  184. * We don't know which RAM region contains kernel data or
  185. * the reserved crashkernel region, so try it repeatedly
  186. * and let the resource manager test it.
  187. */
  188. request_resource(res, &code_resource);
  189. request_resource(res, &data_resource);
  190. request_resource(res, &bss_resource);
  191. #ifdef CONFIG_KEXEC
  192. request_resource(res, &crashk_res);
  193. #endif
  194. /*
  195. * Also make sure that there is a PMB mapping that covers this
  196. * range before we attempt to activate it, to avoid reset by MMU.
  197. * We can hit this path with NUMA or memory hot-add.
  198. */
  199. pmb_bolt_mapping((unsigned long)__va(start), start, end - start,
  200. PAGE_KERNEL);
  201. memblock_set_node(PFN_PHYS(start_pfn), PFN_PHYS(end_pfn - start_pfn),
  202. &memblock.memory, nid);
  203. }
  204. void __init __weak plat_early_device_setup(void)
  205. {
  206. }
  207. #ifdef CONFIG_OF_EARLY_FLATTREE
  208. void __ref sh_fdt_init(phys_addr_t dt_phys)
  209. {
  210. static int done = 0;
  211. void *dt_virt;
  212. /* Avoid calling an __init function on secondary cpus. */
  213. if (done) return;
  214. #ifdef CONFIG_USE_BUILTIN_DTB
  215. dt_virt = __dtb_start;
  216. #else
  217. dt_virt = phys_to_virt(dt_phys);
  218. #endif
  219. if (!dt_virt || !early_init_dt_scan(dt_virt)) {
  220. pr_crit("Error: invalid device tree blob"
  221. " at physical address %p\n", (void *)dt_phys);
  222. while (true)
  223. cpu_relax();
  224. }
  225. done = 1;
  226. }
  227. #endif
  228. void __init setup_arch(char **cmdline_p)
  229. {
  230. enable_mmu();
  231. ROOT_DEV = old_decode_dev(ORIG_ROOT_DEV);
  232. printk(KERN_NOTICE "Boot params:\n"
  233. "... MOUNT_ROOT_RDONLY - %08lx\n"
  234. "... RAMDISK_FLAGS - %08lx\n"
  235. "... ORIG_ROOT_DEV - %08lx\n"
  236. "... LOADER_TYPE - %08lx\n"
  237. "... INITRD_START - %08lx\n"
  238. "... INITRD_SIZE - %08lx\n",
  239. MOUNT_ROOT_RDONLY, RAMDISK_FLAGS,
  240. ORIG_ROOT_DEV, LOADER_TYPE,
  241. INITRD_START, INITRD_SIZE);
  242. #ifdef CONFIG_BLK_DEV_RAM
  243. rd_image_start = RAMDISK_FLAGS & RAMDISK_IMAGE_START_MASK;
  244. #endif
  245. if (!MOUNT_ROOT_RDONLY)
  246. root_mountflags &= ~MS_RDONLY;
  247. setup_initial_init_mm(_text, _etext, _edata, _end);
  248. code_resource.start = virt_to_phys(_text);
  249. code_resource.end = virt_to_phys(_etext)-1;
  250. data_resource.start = virt_to_phys(_etext);
  251. data_resource.end = virt_to_phys(_edata)-1;
  252. bss_resource.start = virt_to_phys(__bss_start);
  253. bss_resource.end = virt_to_phys(__bss_stop)-1;
  254. #ifdef CONFIG_CMDLINE_OVERWRITE
  255. strlcpy(command_line, CONFIG_CMDLINE, sizeof(command_line));
  256. #else
  257. strlcpy(command_line, COMMAND_LINE, sizeof(command_line));
  258. #ifdef CONFIG_CMDLINE_EXTEND
  259. strlcat(command_line, " ", sizeof(command_line));
  260. strlcat(command_line, CONFIG_CMDLINE, sizeof(command_line));
  261. #endif
  262. #endif
  263. /* Save unparsed command line copy for /proc/cmdline */
  264. memcpy(boot_command_line, command_line, COMMAND_LINE_SIZE);
  265. *cmdline_p = command_line;
  266. parse_early_param();
  267. plat_early_device_setup();
  268. sh_mv_setup();
  269. /* Let earlyprintk output early console messages */
  270. sh_early_platform_driver_probe("earlyprintk", 1, 1);
  271. #ifdef CONFIG_OF_EARLY_FLATTREE
  272. #ifdef CONFIG_USE_BUILTIN_DTB
  273. unflatten_and_copy_device_tree();
  274. #else
  275. unflatten_device_tree();
  276. #endif
  277. #endif
  278. paging_init();
  279. /* Perform the machine specific initialisation */
  280. if (likely(sh_mv.mv_setup))
  281. sh_mv.mv_setup(cmdline_p);
  282. plat_smp_setup();
  283. }
  284. /* processor boot mode configuration */
  285. int generic_mode_pins(void)
  286. {
  287. pr_warn("generic_mode_pins(): missing mode pin configuration\n");
  288. return 0;
  289. }
  290. int test_mode_pin(int pin)
  291. {
  292. return sh_mv.mv_mode_pins() & pin;
  293. }
  294. void __init arch_cpu_finalize_init(void)
  295. {
  296. char *p = &init_utsname()->machine[2]; /* "sh" */
  297. select_idle_routine();
  298. current_cpu_data.loops_per_jiffy = loops_per_jiffy;
  299. switch (current_cpu_data.family) {
  300. case CPU_FAMILY_SH2:
  301. *p++ = '2';
  302. break;
  303. case CPU_FAMILY_SH2A:
  304. *p++ = '2';
  305. *p++ = 'a';
  306. break;
  307. case CPU_FAMILY_SH3:
  308. *p++ = '3';
  309. break;
  310. case CPU_FAMILY_SH4:
  311. *p++ = '4';
  312. break;
  313. case CPU_FAMILY_SH4A:
  314. *p++ = '4';
  315. *p++ = 'a';
  316. break;
  317. case CPU_FAMILY_SH4AL_DSP:
  318. *p++ = '4';
  319. *p++ = 'a';
  320. *p++ = 'l';
  321. *p++ = '-';
  322. *p++ = 'd';
  323. *p++ = 's';
  324. *p++ = 'p';
  325. break;
  326. case CPU_FAMILY_UNKNOWN:
  327. /*
  328. * Specifically use CPU_FAMILY_UNKNOWN rather than
  329. * default:, so we're able to have the compiler whine
  330. * about unhandled enumerations.
  331. */
  332. break;
  333. }
  334. pr_info("CPU: %s\n", get_cpu_subtype(&current_cpu_data));
  335. #ifndef __LITTLE_ENDIAN__
  336. /* 'eb' means 'Endian Big' */
  337. *p++ = 'e';
  338. *p++ = 'b';
  339. #endif
  340. *p = '\0';
  341. }