prom.c 26 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997
  1. // SPDX-License-Identifier: GPL-2.0-or-later
  2. /*
  3. * Procedures for creating, accessing and interpreting the device tree.
  4. *
  5. * Paul Mackerras August 1996.
  6. * Copyright (C) 1996-2005 Paul Mackerras.
  7. *
  8. * Adapted for 64bit PowerPC by Dave Engebretsen and Peter Bergner.
  9. * {engebret|bergner}@us.ibm.com
  10. */
  11. #undef DEBUG
  12. #include <linux/kernel.h>
  13. #include <linux/string.h>
  14. #include <linux/init.h>
  15. #include <linux/threads.h>
  16. #include <linux/spinlock.h>
  17. #include <linux/types.h>
  18. #include <linux/pci.h>
  19. #include <linux/delay.h>
  20. #include <linux/initrd.h>
  21. #include <linux/bitops.h>
  22. #include <linux/export.h>
  23. #include <linux/kexec.h>
  24. #include <linux/irq.h>
  25. #include <linux/memblock.h>
  26. #include <linux/of.h>
  27. #include <linux/of_fdt.h>
  28. #include <linux/libfdt.h>
  29. #include <linux/cpu.h>
  30. #include <linux/pgtable.h>
  31. #include <linux/seq_buf.h>
  32. #include <asm/rtas.h>
  33. #include <asm/page.h>
  34. #include <asm/processor.h>
  35. #include <asm/irq.h>
  36. #include <asm/io.h>
  37. #include <asm/kdump.h>
  38. #include <asm/smp.h>
  39. #include <asm/mmu.h>
  40. #include <asm/paca.h>
  41. #include <asm/powernv.h>
  42. #include <asm/iommu.h>
  43. #include <asm/btext.h>
  44. #include <asm/sections.h>
  45. #include <asm/setup.h>
  46. #include <asm/pci-bridge.h>
  47. #include <asm/kexec.h>
  48. #include <asm/opal.h>
  49. #include <asm/fadump.h>
  50. #include <asm/epapr_hcalls.h>
  51. #include <asm/firmware.h>
  52. #include <asm/dt_cpu_ftrs.h>
  53. #include <asm/drmem.h>
  54. #include <asm/ultravisor.h>
  55. #include <asm/prom.h>
  56. #include <mm/mmu_decl.h>
  57. #ifdef DEBUG
  58. #define DBG(fmt...) printk(KERN_ERR fmt)
  59. #else
  60. #define DBG(fmt...)
  61. #endif
  62. int *chip_id_lookup_table;
  63. #ifdef CONFIG_PPC64
  64. int __initdata iommu_is_off;
  65. int __initdata iommu_force_on;
  66. unsigned long tce_alloc_start, tce_alloc_end;
  67. u64 ppc64_rma_size;
  68. #endif
  69. static phys_addr_t first_memblock_size;
  70. static int __initdata boot_cpu_count;
  71. static int __init early_parse_mem(char *p)
  72. {
  73. if (!p)
  74. return 1;
  75. memory_limit = PAGE_ALIGN(memparse(p, &p));
  76. DBG("memory limit = 0x%llx\n", memory_limit);
  77. return 0;
  78. }
  79. early_param("mem", early_parse_mem);
  80. /*
  81. * overlaps_initrd - check for overlap with page aligned extension of
  82. * initrd.
  83. */
  84. static inline int overlaps_initrd(unsigned long start, unsigned long size)
  85. {
  86. #ifdef CONFIG_BLK_DEV_INITRD
  87. if (!initrd_start)
  88. return 0;
  89. return (start + size) > ALIGN_DOWN(initrd_start, PAGE_SIZE) &&
  90. start <= ALIGN(initrd_end, PAGE_SIZE);
  91. #else
  92. return 0;
  93. #endif
  94. }
  95. /**
  96. * move_device_tree - move tree to an unused area, if needed.
  97. *
  98. * The device tree may be allocated beyond our memory limit, or inside the
  99. * crash kernel region for kdump, or within the page aligned range of initrd.
  100. * If so, move it out of the way.
  101. */
  102. static void __init move_device_tree(void)
  103. {
  104. unsigned long start, size;
  105. void *p;
  106. DBG("-> move_device_tree\n");
  107. start = __pa(initial_boot_params);
  108. size = fdt_totalsize(initial_boot_params);
  109. if ((memory_limit && (start + size) > PHYSICAL_START + memory_limit) ||
  110. !memblock_is_memory(start + size - 1) ||
  111. overlaps_crashkernel(start, size) || overlaps_initrd(start, size)) {
  112. p = memblock_alloc_raw(size, PAGE_SIZE);
  113. if (!p)
  114. panic("Failed to allocate %lu bytes to move device tree\n",
  115. size);
  116. memcpy(p, initial_boot_params, size);
  117. initial_boot_params = p;
  118. DBG("Moved device tree to 0x%px\n", p);
  119. }
  120. DBG("<- move_device_tree\n");
  121. }
  122. /*
  123. * ibm,pa/pi-features is a per-cpu property that contains a string of
  124. * attribute descriptors, each of which has a 2 byte header plus up
  125. * to 254 bytes worth of processor attribute bits. First header
  126. * byte specifies the number of bytes following the header.
  127. * Second header byte is an "attribute-specifier" type, of which
  128. * zero is the only currently-defined value.
  129. * Implementation: Pass in the byte and bit offset for the feature
  130. * that we are interested in. The function will return -1 if the
  131. * pa-features property is missing, or a 1/0 to indicate if the feature
  132. * is supported/not supported. Note that the bit numbers are
  133. * big-endian to match the definition in PAPR.
  134. */
  135. struct ibm_feature {
  136. unsigned long cpu_features; /* CPU_FTR_xxx bit */
  137. unsigned long mmu_features; /* MMU_FTR_xxx bit */
  138. unsigned int cpu_user_ftrs; /* PPC_FEATURE_xxx bit */
  139. unsigned int cpu_user_ftrs2; /* PPC_FEATURE2_xxx bit */
  140. unsigned char pabyte; /* byte number in ibm,pa/pi-features */
  141. unsigned char pabit; /* bit number (big-endian) */
  142. unsigned char invert; /* if 1, pa bit set => clear feature */
  143. };
  144. static struct ibm_feature ibm_pa_features[] __initdata = {
  145. { .pabyte = 0, .pabit = 0, .cpu_user_ftrs = PPC_FEATURE_HAS_MMU },
  146. { .pabyte = 0, .pabit = 1, .cpu_user_ftrs = PPC_FEATURE_HAS_FPU },
  147. { .pabyte = 0, .pabit = 3, .cpu_features = CPU_FTR_CTRL },
  148. { .pabyte = 0, .pabit = 6, .cpu_features = CPU_FTR_NOEXECUTE },
  149. { .pabyte = 1, .pabit = 2, .mmu_features = MMU_FTR_CI_LARGE_PAGE },
  150. #ifdef CONFIG_PPC_RADIX_MMU
  151. { .pabyte = 40, .pabit = 0, .mmu_features = MMU_FTR_TYPE_RADIX | MMU_FTR_GTSE },
  152. #endif
  153. { .pabyte = 5, .pabit = 0, .cpu_features = CPU_FTR_REAL_LE,
  154. .cpu_user_ftrs = PPC_FEATURE_TRUE_LE },
  155. /*
  156. * If the kernel doesn't support TM (ie CONFIG_PPC_TRANSACTIONAL_MEM=n),
  157. * we don't want to turn on TM here, so we use the *_COMP versions
  158. * which are 0 if the kernel doesn't support TM.
  159. */
  160. { .pabyte = 22, .pabit = 0, .cpu_features = CPU_FTR_TM_COMP,
  161. .cpu_user_ftrs2 = PPC_FEATURE2_HTM_COMP | PPC_FEATURE2_HTM_NOSC_COMP },
  162. { .pabyte = 64, .pabit = 0, .cpu_features = CPU_FTR_DAWR1 },
  163. };
  164. /*
  165. * ibm,pi-features property provides the support of processor specific
  166. * options not described in ibm,pa-features. Right now use byte 0, bit 3
  167. * which indicates the occurrence of DSI interrupt when the paste operation
  168. * on the suspended NX window.
  169. */
  170. static struct ibm_feature ibm_pi_features[] __initdata = {
  171. { .pabyte = 0, .pabit = 3, .mmu_features = MMU_FTR_NX_DSI },
  172. };
  173. static void __init scan_features(unsigned long node, const unsigned char *ftrs,
  174. unsigned long tablelen,
  175. struct ibm_feature *fp,
  176. unsigned long ft_size)
  177. {
  178. unsigned long i, len, bit;
  179. /* find descriptor with type == 0 */
  180. for (;;) {
  181. if (tablelen < 3)
  182. return;
  183. len = 2 + ftrs[0];
  184. if (tablelen < len)
  185. return; /* descriptor 0 not found */
  186. if (ftrs[1] == 0)
  187. break;
  188. tablelen -= len;
  189. ftrs += len;
  190. }
  191. /* loop over bits we know about */
  192. for (i = 0; i < ft_size; ++i, ++fp) {
  193. if (fp->pabyte >= ftrs[0])
  194. continue;
  195. bit = (ftrs[2 + fp->pabyte] >> (7 - fp->pabit)) & 1;
  196. if (bit ^ fp->invert) {
  197. cur_cpu_spec->cpu_features |= fp->cpu_features;
  198. cur_cpu_spec->cpu_user_features |= fp->cpu_user_ftrs;
  199. cur_cpu_spec->cpu_user_features2 |= fp->cpu_user_ftrs2;
  200. cur_cpu_spec->mmu_features |= fp->mmu_features;
  201. } else {
  202. cur_cpu_spec->cpu_features &= ~fp->cpu_features;
  203. cur_cpu_spec->cpu_user_features &= ~fp->cpu_user_ftrs;
  204. cur_cpu_spec->cpu_user_features2 &= ~fp->cpu_user_ftrs2;
  205. cur_cpu_spec->mmu_features &= ~fp->mmu_features;
  206. }
  207. }
  208. }
  209. static void __init check_cpu_features(unsigned long node, char *name,
  210. struct ibm_feature *fp,
  211. unsigned long size)
  212. {
  213. const unsigned char *pa_ftrs;
  214. int tablelen;
  215. pa_ftrs = of_get_flat_dt_prop(node, name, &tablelen);
  216. if (pa_ftrs == NULL)
  217. return;
  218. scan_features(node, pa_ftrs, tablelen, fp, size);
  219. }
  220. #ifdef CONFIG_PPC_64S_HASH_MMU
  221. static void __init init_mmu_slb_size(unsigned long node)
  222. {
  223. const __be32 *slb_size_ptr;
  224. slb_size_ptr = of_get_flat_dt_prop(node, "slb-size", NULL) ? :
  225. of_get_flat_dt_prop(node, "ibm,slb-size", NULL);
  226. if (slb_size_ptr)
  227. mmu_slb_size = be32_to_cpup(slb_size_ptr);
  228. }
  229. #else
  230. #define init_mmu_slb_size(node) do { } while(0)
  231. #endif
  232. static struct feature_property {
  233. const char *name;
  234. u32 min_value;
  235. unsigned long cpu_feature;
  236. unsigned long cpu_user_ftr;
  237. } feature_properties[] __initdata = {
  238. #ifdef CONFIG_ALTIVEC
  239. {"altivec", 0, CPU_FTR_ALTIVEC, PPC_FEATURE_HAS_ALTIVEC},
  240. {"ibm,vmx", 1, CPU_FTR_ALTIVEC, PPC_FEATURE_HAS_ALTIVEC},
  241. #endif /* CONFIG_ALTIVEC */
  242. #ifdef CONFIG_VSX
  243. /* Yes, this _really_ is ibm,vmx == 2 to enable VSX */
  244. {"ibm,vmx", 2, CPU_FTR_VSX, PPC_FEATURE_HAS_VSX},
  245. #endif /* CONFIG_VSX */
  246. #ifdef CONFIG_PPC64
  247. {"ibm,dfp", 1, 0, PPC_FEATURE_HAS_DFP},
  248. {"ibm,purr", 1, CPU_FTR_PURR, 0},
  249. {"ibm,spurr", 1, CPU_FTR_SPURR, 0},
  250. #endif /* CONFIG_PPC64 */
  251. };
  252. #if defined(CONFIG_44x) && defined(CONFIG_PPC_FPU)
  253. static __init void identical_pvr_fixup(unsigned long node)
  254. {
  255. unsigned int pvr;
  256. const char *model = of_get_flat_dt_prop(node, "model", NULL);
  257. /*
  258. * Since 440GR(x)/440EP(x) processors have the same pvr,
  259. * we check the node path and set bit 28 in the cur_cpu_spec
  260. * pvr for EP(x) processor version. This bit is always 0 in
  261. * the "real" pvr. Then we call identify_cpu again with
  262. * the new logical pvr to enable FPU support.
  263. */
  264. if (model && strstr(model, "440EP")) {
  265. pvr = cur_cpu_spec->pvr_value | 0x8;
  266. identify_cpu(0, pvr);
  267. DBG("Using logical pvr %x for %s\n", pvr, model);
  268. }
  269. }
  270. #else
  271. #define identical_pvr_fixup(node) do { } while(0)
  272. #endif
  273. static void __init check_cpu_feature_properties(unsigned long node)
  274. {
  275. int i;
  276. struct feature_property *fp = feature_properties;
  277. const __be32 *prop;
  278. for (i = 0; i < (int)ARRAY_SIZE(feature_properties); ++i, ++fp) {
  279. prop = of_get_flat_dt_prop(node, fp->name, NULL);
  280. if (prop && be32_to_cpup(prop) >= fp->min_value) {
  281. cur_cpu_spec->cpu_features |= fp->cpu_feature;
  282. cur_cpu_spec->cpu_user_features |= fp->cpu_user_ftr;
  283. }
  284. }
  285. }
  286. static int __init early_init_dt_scan_cpus(unsigned long node,
  287. const char *uname, int depth,
  288. void *data)
  289. {
  290. const char *type = of_get_flat_dt_prop(node, "device_type", NULL);
  291. const __be32 *prop;
  292. const __be32 *intserv;
  293. int i, nthreads;
  294. int len;
  295. int found = -1;
  296. int found_thread = 0;
  297. /* We are scanning "cpu" nodes only */
  298. if (type == NULL || strcmp(type, "cpu") != 0)
  299. return 0;
  300. /* Get physical cpuid */
  301. intserv = of_get_flat_dt_prop(node, "ibm,ppc-interrupt-server#s", &len);
  302. if (!intserv)
  303. intserv = of_get_flat_dt_prop(node, "reg", &len);
  304. nthreads = len / sizeof(int);
  305. /*
  306. * Now see if any of these threads match our boot cpu.
  307. * NOTE: This must match the parsing done in smp_setup_cpu_maps.
  308. */
  309. for (i = 0; i < nthreads; i++) {
  310. if (be32_to_cpu(intserv[i]) ==
  311. fdt_boot_cpuid_phys(initial_boot_params)) {
  312. found = boot_cpu_count;
  313. found_thread = i;
  314. }
  315. #ifdef CONFIG_SMP
  316. /* logical cpu id is always 0 on UP kernels */
  317. boot_cpu_count++;
  318. #endif
  319. }
  320. /* Not the boot CPU */
  321. if (found < 0)
  322. return 0;
  323. DBG("boot cpu: logical %d physical %d\n", found,
  324. be32_to_cpu(intserv[found_thread]));
  325. boot_cpuid = found;
  326. if (IS_ENABLED(CONFIG_PPC64))
  327. boot_cpu_hwid = be32_to_cpu(intserv[found_thread]);
  328. /*
  329. * PAPR defines "logical" PVR values for cpus that
  330. * meet various levels of the architecture:
  331. * 0x0f000001 Architecture version 2.04
  332. * 0x0f000002 Architecture version 2.05
  333. * If the cpu-version property in the cpu node contains
  334. * such a value, we call identify_cpu again with the
  335. * logical PVR value in order to use the cpu feature
  336. * bits appropriate for the architecture level.
  337. *
  338. * A POWER6 partition in "POWER6 architected" mode
  339. * uses the 0x0f000002 PVR value; in POWER5+ mode
  340. * it uses 0x0f000001.
  341. *
  342. * If we're using device tree CPU feature discovery then we don't
  343. * support the cpu-version property, and it's the responsibility of the
  344. * firmware/hypervisor to provide the correct feature set for the
  345. * architecture level via the ibm,powerpc-cpu-features binding.
  346. */
  347. if (!dt_cpu_ftrs_in_use()) {
  348. prop = of_get_flat_dt_prop(node, "cpu-version", NULL);
  349. if (prop && (be32_to_cpup(prop) & 0xff000000) == 0x0f000000) {
  350. identify_cpu(0, be32_to_cpup(prop));
  351. seq_buf_printf(&ppc_hw_desc, "0x%04x ", be32_to_cpup(prop));
  352. }
  353. check_cpu_feature_properties(node);
  354. check_cpu_features(node, "ibm,pa-features", ibm_pa_features,
  355. ARRAY_SIZE(ibm_pa_features));
  356. check_cpu_features(node, "ibm,pi-features", ibm_pi_features,
  357. ARRAY_SIZE(ibm_pi_features));
  358. }
  359. identical_pvr_fixup(node);
  360. init_mmu_slb_size(node);
  361. #ifdef CONFIG_PPC64
  362. if (nthreads == 1)
  363. cur_cpu_spec->cpu_features &= ~CPU_FTR_SMT;
  364. else if (!dt_cpu_ftrs_in_use())
  365. cur_cpu_spec->cpu_features |= CPU_FTR_SMT;
  366. #endif
  367. return 0;
  368. }
  369. static int __init early_init_dt_scan_chosen_ppc(unsigned long node,
  370. const char *uname,
  371. int depth, void *data)
  372. {
  373. const unsigned long *lprop; /* All these set by kernel, so no need to convert endian */
  374. /* Use common scan routine to determine if this is the chosen node */
  375. if (early_init_dt_scan_chosen(data) < 0)
  376. return 0;
  377. #ifdef CONFIG_PPC64
  378. /* check if iommu is forced on or off */
  379. if (of_get_flat_dt_prop(node, "linux,iommu-off", NULL) != NULL)
  380. iommu_is_off = 1;
  381. if (of_get_flat_dt_prop(node, "linux,iommu-force-on", NULL) != NULL)
  382. iommu_force_on = 1;
  383. #endif
  384. /* mem=x on the command line is the preferred mechanism */
  385. lprop = of_get_flat_dt_prop(node, "linux,memory-limit", NULL);
  386. if (lprop)
  387. memory_limit = *lprop;
  388. #ifdef CONFIG_PPC64
  389. lprop = of_get_flat_dt_prop(node, "linux,tce-alloc-start", NULL);
  390. if (lprop)
  391. tce_alloc_start = *lprop;
  392. lprop = of_get_flat_dt_prop(node, "linux,tce-alloc-end", NULL);
  393. if (lprop)
  394. tce_alloc_end = *lprop;
  395. #endif
  396. #ifdef CONFIG_KEXEC_CORE
  397. lprop = of_get_flat_dt_prop(node, "linux,crashkernel-base", NULL);
  398. if (lprop)
  399. crashk_res.start = *lprop;
  400. lprop = of_get_flat_dt_prop(node, "linux,crashkernel-size", NULL);
  401. if (lprop)
  402. crashk_res.end = crashk_res.start + *lprop - 1;
  403. #endif
  404. /* break now */
  405. return 1;
  406. }
  407. /*
  408. * Compare the range against max mem limit and update
  409. * size if it cross the limit.
  410. */
  411. #ifdef CONFIG_SPARSEMEM
  412. static bool __init validate_mem_limit(u64 base, u64 *size)
  413. {
  414. u64 max_mem = 1UL << (MAX_PHYSMEM_BITS);
  415. if (base >= max_mem)
  416. return false;
  417. if ((base + *size) > max_mem)
  418. *size = max_mem - base;
  419. return true;
  420. }
  421. #else
  422. static bool __init validate_mem_limit(u64 base, u64 *size)
  423. {
  424. return true;
  425. }
  426. #endif
  427. #ifdef CONFIG_PPC_PSERIES
  428. /*
  429. * Interpret the ibm dynamic reconfiguration memory LMBs.
  430. * This contains a list of memory blocks along with NUMA affinity
  431. * information.
  432. */
  433. static int __init early_init_drmem_lmb(struct drmem_lmb *lmb,
  434. const __be32 **usm,
  435. void *data)
  436. {
  437. u64 base, size;
  438. int is_kexec_kdump = 0, rngs;
  439. base = lmb->base_addr;
  440. size = drmem_lmb_size();
  441. rngs = 1;
  442. /*
  443. * Skip this block if the reserved bit is set in flags
  444. * or if the block is not assigned to this partition.
  445. */
  446. if ((lmb->flags & DRCONF_MEM_RESERVED) ||
  447. !(lmb->flags & DRCONF_MEM_ASSIGNED))
  448. return 0;
  449. if (*usm)
  450. is_kexec_kdump = 1;
  451. if (is_kexec_kdump) {
  452. /*
  453. * For each memblock in ibm,dynamic-memory, a
  454. * corresponding entry in linux,drconf-usable-memory
  455. * property contains a counter 'p' followed by 'p'
  456. * (base, size) duple. Now read the counter from
  457. * linux,drconf-usable-memory property
  458. */
  459. rngs = dt_mem_next_cell(dt_root_size_cells, usm);
  460. if (!rngs) /* there are no (base, size) duple */
  461. return 0;
  462. }
  463. do {
  464. if (is_kexec_kdump) {
  465. base = dt_mem_next_cell(dt_root_addr_cells, usm);
  466. size = dt_mem_next_cell(dt_root_size_cells, usm);
  467. }
  468. if (iommu_is_off) {
  469. if (base >= 0x80000000ul)
  470. continue;
  471. if ((base + size) > 0x80000000ul)
  472. size = 0x80000000ul - base;
  473. }
  474. if (!validate_mem_limit(base, &size))
  475. continue;
  476. DBG("Adding: %llx -> %llx\n", base, size);
  477. memblock_add(base, size);
  478. if (lmb->flags & DRCONF_MEM_HOTREMOVABLE)
  479. memblock_mark_hotplug(base, size);
  480. } while (--rngs);
  481. return 0;
  482. }
  483. #endif /* CONFIG_PPC_PSERIES */
  484. static int __init early_init_dt_scan_memory_ppc(void)
  485. {
  486. #ifdef CONFIG_PPC_PSERIES
  487. const void *fdt = initial_boot_params;
  488. int node = fdt_path_offset(fdt, "/ibm,dynamic-reconfiguration-memory");
  489. if (node > 0)
  490. walk_drmem_lmbs_early(node, NULL, early_init_drmem_lmb);
  491. #endif
  492. return early_init_dt_scan_memory();
  493. }
  494. /*
  495. * For a relocatable kernel, we need to get the memstart_addr first,
  496. * then use it to calculate the virtual kernel start address. This has
  497. * to happen at a very early stage (before machine_init). In this case,
  498. * we just want to get the memstart_address and would not like to mess the
  499. * memblock at this stage. So introduce a variable to skip the memblock_add()
  500. * for this reason.
  501. */
  502. #ifdef CONFIG_RELOCATABLE
  503. static int add_mem_to_memblock = 1;
  504. #else
  505. #define add_mem_to_memblock 1
  506. #endif
  507. void __init early_init_dt_add_memory_arch(u64 base, u64 size)
  508. {
  509. #ifdef CONFIG_PPC64
  510. if (iommu_is_off) {
  511. if (base >= 0x80000000ul)
  512. return;
  513. if ((base + size) > 0x80000000ul)
  514. size = 0x80000000ul - base;
  515. }
  516. #endif
  517. /* Keep track of the beginning of memory -and- the size of
  518. * the very first block in the device-tree as it represents
  519. * the RMA on ppc64 server
  520. */
  521. if (base < memstart_addr) {
  522. memstart_addr = base;
  523. first_memblock_size = size;
  524. }
  525. /* Add the chunk to the MEMBLOCK list */
  526. if (add_mem_to_memblock) {
  527. if (validate_mem_limit(base, &size))
  528. memblock_add(base, size);
  529. }
  530. }
  531. static void __init early_reserve_mem_dt(void)
  532. {
  533. unsigned long i, dt_root;
  534. int len;
  535. const __be32 *prop;
  536. early_init_fdt_reserve_self();
  537. early_init_fdt_scan_reserved_mem();
  538. dt_root = of_get_flat_dt_root();
  539. prop = of_get_flat_dt_prop(dt_root, "reserved-ranges", &len);
  540. if (!prop)
  541. return;
  542. DBG("Found new-style reserved-ranges\n");
  543. /* Each reserved range is an (address,size) pair, 2 cells each,
  544. * totalling 4 cells per range. */
  545. for (i = 0; i < len / (sizeof(*prop) * 4); i++) {
  546. u64 base, size;
  547. base = of_read_number(prop + (i * 4) + 0, 2);
  548. size = of_read_number(prop + (i * 4) + 2, 2);
  549. if (size) {
  550. DBG("reserving: %llx -> %llx\n", base, size);
  551. memblock_reserve(base, size);
  552. }
  553. }
  554. }
  555. static void __init early_reserve_mem(void)
  556. {
  557. __be64 *reserve_map;
  558. reserve_map = (__be64 *)(((unsigned long)initial_boot_params) +
  559. fdt_off_mem_rsvmap(initial_boot_params));
  560. /* Look for the new "reserved-regions" property in the DT */
  561. early_reserve_mem_dt();
  562. #ifdef CONFIG_BLK_DEV_INITRD
  563. /* Then reserve the initrd, if any */
  564. if (initrd_start && (initrd_end > initrd_start)) {
  565. memblock_reserve(ALIGN_DOWN(__pa(initrd_start), PAGE_SIZE),
  566. ALIGN(initrd_end, PAGE_SIZE) -
  567. ALIGN_DOWN(initrd_start, PAGE_SIZE));
  568. }
  569. #endif /* CONFIG_BLK_DEV_INITRD */
  570. if (!IS_ENABLED(CONFIG_PPC32))
  571. return;
  572. /*
  573. * Handle the case where we might be booting from an old kexec
  574. * image that setup the mem_rsvmap as pairs of 32-bit values
  575. */
  576. if (be64_to_cpup(reserve_map) > 0xffffffffull) {
  577. u32 base_32, size_32;
  578. __be32 *reserve_map_32 = (__be32 *)reserve_map;
  579. DBG("Found old 32-bit reserve map\n");
  580. while (1) {
  581. base_32 = be32_to_cpup(reserve_map_32++);
  582. size_32 = be32_to_cpup(reserve_map_32++);
  583. if (size_32 == 0)
  584. break;
  585. DBG("reserving: %x -> %x\n", base_32, size_32);
  586. memblock_reserve(base_32, size_32);
  587. }
  588. return;
  589. }
  590. }
  591. #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
  592. static bool tm_disabled __initdata;
  593. static int __init parse_ppc_tm(char *str)
  594. {
  595. bool res;
  596. if (kstrtobool(str, &res))
  597. return -EINVAL;
  598. tm_disabled = !res;
  599. return 0;
  600. }
  601. early_param("ppc_tm", parse_ppc_tm);
  602. static void __init tm_init(void)
  603. {
  604. if (tm_disabled) {
  605. pr_info("Disabling hardware transactional memory (HTM)\n");
  606. cur_cpu_spec->cpu_user_features2 &=
  607. ~(PPC_FEATURE2_HTM_NOSC | PPC_FEATURE2_HTM);
  608. cur_cpu_spec->cpu_features &= ~CPU_FTR_TM;
  609. return;
  610. }
  611. pnv_tm_init();
  612. }
  613. #else
  614. static void tm_init(void) { }
  615. #endif /* CONFIG_PPC_TRANSACTIONAL_MEM */
  616. static int __init
  617. early_init_dt_scan_model(unsigned long node, const char *uname,
  618. int depth, void *data)
  619. {
  620. const char *prop;
  621. if (depth != 0)
  622. return 0;
  623. prop = of_get_flat_dt_prop(node, "model", NULL);
  624. if (prop)
  625. seq_buf_printf(&ppc_hw_desc, "%s ", prop);
  626. /* break now */
  627. return 1;
  628. }
  629. #ifdef CONFIG_PPC64
  630. static void __init save_fscr_to_task(void)
  631. {
  632. /*
  633. * Ensure the init_task (pid 0, aka swapper) uses the value of FSCR we
  634. * have configured via the device tree features or via __init_FSCR().
  635. * That value will then be propagated to pid 1 (init) and all future
  636. * processes.
  637. */
  638. if (early_cpu_has_feature(CPU_FTR_ARCH_207S))
  639. init_task.thread.fscr = mfspr(SPRN_FSCR);
  640. }
  641. #else
  642. static inline void save_fscr_to_task(void) {}
  643. #endif
  644. void __init early_init_devtree(void *params)
  645. {
  646. phys_addr_t limit;
  647. DBG(" -> early_init_devtree(%px)\n", params);
  648. /* Too early to BUG_ON(), do it by hand */
  649. if (!early_init_dt_verify(params))
  650. panic("BUG: Failed verifying flat device tree, bad version?");
  651. of_scan_flat_dt(early_init_dt_scan_model, NULL);
  652. #ifdef CONFIG_PPC_RTAS
  653. /* Some machines might need RTAS info for debugging, grab it now. */
  654. of_scan_flat_dt(early_init_dt_scan_rtas, NULL);
  655. #endif
  656. #ifdef CONFIG_PPC_POWERNV
  657. /* Some machines might need OPAL info for debugging, grab it now. */
  658. of_scan_flat_dt(early_init_dt_scan_opal, NULL);
  659. /* Scan tree for ultravisor feature */
  660. of_scan_flat_dt(early_init_dt_scan_ultravisor, NULL);
  661. #endif
  662. #if defined(CONFIG_FA_DUMP) || defined(CONFIG_PRESERVE_FA_DUMP)
  663. /* scan tree to see if dump is active during last boot */
  664. of_scan_flat_dt(early_init_dt_scan_fw_dump, NULL);
  665. #endif
  666. /* Retrieve various informations from the /chosen node of the
  667. * device-tree, including the platform type, initrd location and
  668. * size, TCE reserve, and more ...
  669. */
  670. of_scan_flat_dt(early_init_dt_scan_chosen_ppc, boot_command_line);
  671. /* Scan memory nodes and rebuild MEMBLOCKs */
  672. early_init_dt_scan_root();
  673. early_init_dt_scan_memory_ppc();
  674. /*
  675. * As generic code authors expect to be able to use static keys
  676. * in early_param() handlers, we initialize the static keys just
  677. * before parsing early params (it's fine to call jump_label_init()
  678. * more than once).
  679. */
  680. jump_label_init();
  681. parse_early_param();
  682. /* make sure we've parsed cmdline for mem= before this */
  683. if (memory_limit)
  684. first_memblock_size = min_t(u64, first_memblock_size, memory_limit);
  685. setup_initial_memory_limit(memstart_addr, first_memblock_size);
  686. /* Reserve MEMBLOCK regions used by kernel, initrd, dt, etc... */
  687. memblock_reserve(PHYSICAL_START, __pa(_end) - PHYSICAL_START);
  688. /* If relocatable, reserve first 32k for interrupt vectors etc. */
  689. if (PHYSICAL_START > MEMORY_START)
  690. memblock_reserve(MEMORY_START, 0x8000);
  691. reserve_kdump_trampoline();
  692. #if defined(CONFIG_FA_DUMP) || defined(CONFIG_PRESERVE_FA_DUMP)
  693. /*
  694. * If we fail to reserve memory for firmware-assisted dump then
  695. * fallback to kexec based kdump.
  696. */
  697. if (fadump_reserve_mem() == 0)
  698. #endif
  699. reserve_crashkernel();
  700. early_reserve_mem();
  701. /* Ensure that total memory size is page-aligned. */
  702. limit = ALIGN(memory_limit ?: memblock_phys_mem_size(), PAGE_SIZE);
  703. memblock_enforce_memory_limit(limit);
  704. #if defined(CONFIG_PPC_BOOK3S_64) && defined(CONFIG_PPC_4K_PAGES)
  705. if (!early_radix_enabled())
  706. memblock_cap_memory_range(0, 1UL << (H_MAX_PHYSMEM_BITS));
  707. #endif
  708. memblock_allow_resize();
  709. memblock_dump_all();
  710. DBG("Phys. mem: %llx\n", (unsigned long long)memblock_phys_mem_size());
  711. /* We may need to relocate the flat tree, do it now.
  712. * FIXME .. and the initrd too? */
  713. move_device_tree();
  714. DBG("Scanning CPUs ...\n");
  715. dt_cpu_ftrs_scan();
  716. // We can now add the CPU name & PVR to the hardware description
  717. seq_buf_printf(&ppc_hw_desc, "%s 0x%04lx ", cur_cpu_spec->cpu_name, mfspr(SPRN_PVR));
  718. /* Retrieve CPU related informations from the flat tree
  719. * (altivec support, boot CPU ID, ...)
  720. */
  721. of_scan_flat_dt(early_init_dt_scan_cpus, NULL);
  722. if (boot_cpuid < 0) {
  723. printk("Failed to identify boot CPU !\n");
  724. BUG();
  725. }
  726. save_fscr_to_task();
  727. #if defined(CONFIG_SMP) && defined(CONFIG_PPC64)
  728. /* We'll later wait for secondaries to check in; there are
  729. * NCPUS-1 non-boot CPUs :-)
  730. */
  731. spinning_secondaries = boot_cpu_count - 1;
  732. #endif
  733. mmu_early_init_devtree();
  734. #ifdef CONFIG_PPC_POWERNV
  735. /* Scan and build the list of machine check recoverable ranges */
  736. of_scan_flat_dt(early_init_dt_scan_recoverable_ranges, NULL);
  737. #endif
  738. epapr_paravirt_early_init();
  739. /* Now try to figure out if we are running on LPAR and so on */
  740. pseries_probe_fw_features();
  741. /*
  742. * Initialize pkey features and default AMR/IAMR values
  743. */
  744. pkey_early_init_devtree();
  745. #ifdef CONFIG_PPC_PS3
  746. /* Identify PS3 firmware */
  747. if (of_flat_dt_is_compatible(of_get_flat_dt_root(), "sony,ps3"))
  748. powerpc_firmware_features |= FW_FEATURE_PS3_POSSIBLE;
  749. #endif
  750. tm_init();
  751. DBG(" <- early_init_devtree()\n");
  752. }
  753. #ifdef CONFIG_RELOCATABLE
  754. /*
  755. * This function run before early_init_devtree, so we have to init
  756. * initial_boot_params.
  757. */
  758. void __init early_get_first_memblock_info(void *params, phys_addr_t *size)
  759. {
  760. /* Setup flat device-tree pointer */
  761. initial_boot_params = params;
  762. /*
  763. * Scan the memory nodes and set add_mem_to_memblock to 0 to avoid
  764. * mess the memblock.
  765. */
  766. add_mem_to_memblock = 0;
  767. early_init_dt_scan_root();
  768. early_init_dt_scan_memory_ppc();
  769. add_mem_to_memblock = 1;
  770. if (size)
  771. *size = first_memblock_size;
  772. }
  773. #endif
  774. /*******
  775. *
  776. * New implementation of the OF "find" APIs, return a refcounted
  777. * object, call of_node_put() when done. The device tree and list
  778. * are protected by a rw_lock.
  779. *
  780. * Note that property management will need some locking as well,
  781. * this isn't dealt with yet.
  782. *
  783. *******/
  784. /**
  785. * of_get_ibm_chip_id - Returns the IBM "chip-id" of a device
  786. * @np: device node of the device
  787. *
  788. * This looks for a property "ibm,chip-id" in the node or any
  789. * of its parents and returns its content, or -1 if it cannot
  790. * be found.
  791. */
  792. int of_get_ibm_chip_id(struct device_node *np)
  793. {
  794. of_node_get(np);
  795. while (np) {
  796. u32 chip_id;
  797. /*
  798. * Skiboot may produce memory nodes that contain more than one
  799. * cell in chip-id, we only read the first one here.
  800. */
  801. if (!of_property_read_u32(np, "ibm,chip-id", &chip_id)) {
  802. of_node_put(np);
  803. return chip_id;
  804. }
  805. np = of_get_next_parent(np);
  806. }
  807. return -1;
  808. }
  809. EXPORT_SYMBOL(of_get_ibm_chip_id);
  810. /**
  811. * cpu_to_chip_id - Return the cpus chip-id
  812. * @cpu: The logical cpu number.
  813. *
  814. * Return the value of the ibm,chip-id property corresponding to the given
  815. * logical cpu number. If the chip-id can not be found, returns -1.
  816. */
  817. int cpu_to_chip_id(int cpu)
  818. {
  819. struct device_node *np;
  820. int ret = -1, idx;
  821. idx = cpu / threads_per_core;
  822. if (chip_id_lookup_table && chip_id_lookup_table[idx] != -1)
  823. return chip_id_lookup_table[idx];
  824. np = of_get_cpu_node(cpu, NULL);
  825. if (np) {
  826. ret = of_get_ibm_chip_id(np);
  827. of_node_put(np);
  828. if (chip_id_lookup_table)
  829. chip_id_lookup_table[idx] = ret;
  830. }
  831. return ret;
  832. }
  833. EXPORT_SYMBOL(cpu_to_chip_id);
  834. bool arch_match_cpu_phys_id(int cpu, u64 phys_id)
  835. {
  836. #ifdef CONFIG_SMP
  837. /*
  838. * Early firmware scanning must use this rather than
  839. * get_hard_smp_processor_id because we don't have pacas allocated
  840. * until memory topology is discovered.
  841. */
  842. if (cpu_to_phys_id != NULL)
  843. return (int)phys_id == cpu_to_phys_id[cpu];
  844. #endif
  845. return (int)phys_id == get_hard_smp_processor_id(cpu);
  846. }