setup.c 15 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598
  1. // SPDX-License-Identifier: GPL-2.0-or-later
  2. /*
  3. * PowerNV setup code.
  4. *
  5. * Copyright 2011 IBM Corp.
  6. */
  7. #undef DEBUG
  8. #include <linux/cpu.h>
  9. #include <linux/errno.h>
  10. #include <linux/sched.h>
  11. #include <linux/kernel.h>
  12. #include <linux/tty.h>
  13. #include <linux/reboot.h>
  14. #include <linux/init.h>
  15. #include <linux/console.h>
  16. #include <linux/delay.h>
  17. #include <linux/irq.h>
  18. #include <linux/seq_buf.h>
  19. #include <linux/seq_file.h>
  20. #include <linux/of.h>
  21. #include <linux/of_fdt.h>
  22. #include <linux/interrupt.h>
  23. #include <linux/bug.h>
  24. #include <linux/pci.h>
  25. #include <linux/cpufreq.h>
  26. #include <linux/memblock.h>
  27. #include <asm/machdep.h>
  28. #include <asm/firmware.h>
  29. #include <asm/xics.h>
  30. #include <asm/xive.h>
  31. #include <asm/opal.h>
  32. #include <asm/kexec.h>
  33. #include <asm/smp.h>
  34. #include <asm/tm.h>
  35. #include <asm/setup.h>
  36. #include <asm/security_features.h>
  37. #include "powernv.h"
  38. static bool __init fw_feature_is(const char *state, const char *name,
  39. struct device_node *fw_features)
  40. {
  41. struct device_node *np;
  42. bool rc = false;
  43. np = of_get_child_by_name(fw_features, name);
  44. if (np) {
  45. rc = of_property_read_bool(np, state);
  46. of_node_put(np);
  47. }
  48. return rc;
  49. }
  50. static void __init init_fw_feat_flags(struct device_node *np)
  51. {
  52. if (fw_feature_is("enabled", "inst-spec-barrier-ori31,31,0", np))
  53. security_ftr_set(SEC_FTR_SPEC_BAR_ORI31);
  54. if (fw_feature_is("enabled", "fw-bcctrl-serialized", np))
  55. security_ftr_set(SEC_FTR_BCCTRL_SERIALISED);
  56. if (fw_feature_is("enabled", "inst-l1d-flush-ori30,30,0", np))
  57. security_ftr_set(SEC_FTR_L1D_FLUSH_ORI30);
  58. if (fw_feature_is("enabled", "inst-l1d-flush-trig2", np))
  59. security_ftr_set(SEC_FTR_L1D_FLUSH_TRIG2);
  60. if (fw_feature_is("enabled", "fw-l1d-thread-split", np))
  61. security_ftr_set(SEC_FTR_L1D_THREAD_PRIV);
  62. if (fw_feature_is("enabled", "fw-count-cache-disabled", np))
  63. security_ftr_set(SEC_FTR_COUNT_CACHE_DISABLED);
  64. if (fw_feature_is("enabled", "fw-count-cache-flush-bcctr2,0,0", np))
  65. security_ftr_set(SEC_FTR_BCCTR_FLUSH_ASSIST);
  66. if (fw_feature_is("enabled", "needs-count-cache-flush-on-context-switch", np))
  67. security_ftr_set(SEC_FTR_FLUSH_COUNT_CACHE);
  68. /*
  69. * The features below are enabled by default, so we instead look to see
  70. * if firmware has *disabled* them, and clear them if so.
  71. */
  72. if (fw_feature_is("disabled", "speculation-policy-favor-security", np))
  73. security_ftr_clear(SEC_FTR_FAVOUR_SECURITY);
  74. if (fw_feature_is("disabled", "needs-l1d-flush-msr-pr-0-to-1", np))
  75. security_ftr_clear(SEC_FTR_L1D_FLUSH_PR);
  76. if (fw_feature_is("disabled", "needs-l1d-flush-msr-hv-1-to-0", np))
  77. security_ftr_clear(SEC_FTR_L1D_FLUSH_HV);
  78. if (fw_feature_is("disabled", "needs-spec-barrier-for-bound-checks", np))
  79. security_ftr_clear(SEC_FTR_BNDS_CHK_SPEC_BAR);
  80. if (fw_feature_is("enabled", "no-need-l1d-flush-msr-pr-1-to-0", np))
  81. security_ftr_clear(SEC_FTR_L1D_FLUSH_ENTRY);
  82. if (fw_feature_is("enabled", "no-need-l1d-flush-kernel-on-user-access", np))
  83. security_ftr_clear(SEC_FTR_L1D_FLUSH_UACCESS);
  84. if (fw_feature_is("enabled", "no-need-store-drain-on-priv-state-switch", np))
  85. security_ftr_clear(SEC_FTR_STF_BARRIER);
  86. }
  87. static void __init pnv_setup_security_mitigations(void)
  88. {
  89. struct device_node *np, *fw_features;
  90. enum l1d_flush_type type;
  91. bool enable;
  92. /* Default to fallback in case fw-features are not available */
  93. type = L1D_FLUSH_FALLBACK;
  94. np = of_find_node_by_name(NULL, "ibm,opal");
  95. fw_features = of_get_child_by_name(np, "fw-features");
  96. of_node_put(np);
  97. if (fw_features) {
  98. init_fw_feat_flags(fw_features);
  99. of_node_put(fw_features);
  100. if (security_ftr_enabled(SEC_FTR_L1D_FLUSH_TRIG2))
  101. type = L1D_FLUSH_MTTRIG;
  102. if (security_ftr_enabled(SEC_FTR_L1D_FLUSH_ORI30))
  103. type = L1D_FLUSH_ORI;
  104. }
  105. /*
  106. * The issues addressed by the entry and uaccess flush don't affect P7
  107. * or P8, so on bare metal disable them explicitly in case firmware does
  108. * not include the features to disable them. POWER9 and newer processors
  109. * should have the appropriate firmware flags.
  110. */
  111. if (pvr_version_is(PVR_POWER7) || pvr_version_is(PVR_POWER7p) ||
  112. pvr_version_is(PVR_POWER8E) || pvr_version_is(PVR_POWER8NVL) ||
  113. pvr_version_is(PVR_POWER8)) {
  114. security_ftr_clear(SEC_FTR_L1D_FLUSH_ENTRY);
  115. security_ftr_clear(SEC_FTR_L1D_FLUSH_UACCESS);
  116. }
  117. enable = security_ftr_enabled(SEC_FTR_FAVOUR_SECURITY) && \
  118. (security_ftr_enabled(SEC_FTR_L1D_FLUSH_PR) || \
  119. security_ftr_enabled(SEC_FTR_L1D_FLUSH_HV));
  120. setup_rfi_flush(type, enable);
  121. setup_count_cache_flush();
  122. enable = security_ftr_enabled(SEC_FTR_FAVOUR_SECURITY) &&
  123. security_ftr_enabled(SEC_FTR_L1D_FLUSH_ENTRY);
  124. setup_entry_flush(enable);
  125. enable = security_ftr_enabled(SEC_FTR_FAVOUR_SECURITY) &&
  126. security_ftr_enabled(SEC_FTR_L1D_FLUSH_UACCESS);
  127. setup_uaccess_flush(enable);
  128. setup_stf_barrier();
  129. }
  130. static void __init pnv_check_guarded_cores(void)
  131. {
  132. struct device_node *dn;
  133. int bad_count = 0;
  134. for_each_node_by_type(dn, "cpu") {
  135. if (of_property_match_string(dn, "status", "bad") >= 0)
  136. bad_count++;
  137. }
  138. if (bad_count) {
  139. printk(" _ _______________\n");
  140. pr_cont(" | | / \\\n");
  141. pr_cont(" | | | WARNING! |\n");
  142. pr_cont(" | | | |\n");
  143. pr_cont(" | | | It looks like |\n");
  144. pr_cont(" |_| | you have %*d |\n", 3, bad_count);
  145. pr_cont(" _ | guarded cores |\n");
  146. pr_cont(" (_) \\_______________/\n");
  147. }
  148. }
  149. static void __init pnv_setup_arch(void)
  150. {
  151. set_arch_panic_timeout(10, ARCH_PANIC_TIMEOUT);
  152. pnv_setup_security_mitigations();
  153. /* Initialize SMP */
  154. pnv_smp_init();
  155. /* Setup RTC and NVRAM callbacks */
  156. if (firmware_has_feature(FW_FEATURE_OPAL))
  157. opal_nvram_init();
  158. /* Enable NAP mode */
  159. powersave_nap = 1;
  160. pnv_check_guarded_cores();
  161. /* XXX PMCS */
  162. pnv_rng_init();
  163. }
  164. static void __init pnv_add_hw_description(void)
  165. {
  166. struct device_node *dn;
  167. const char *s;
  168. dn = of_find_node_by_path("/ibm,opal/firmware");
  169. if (!dn)
  170. return;
  171. if (of_property_read_string(dn, "version", &s) == 0 ||
  172. of_property_read_string(dn, "git-id", &s) == 0)
  173. seq_buf_printf(&ppc_hw_desc, "opal:%s ", s);
  174. if (of_property_read_string(dn, "mi-version", &s) == 0)
  175. seq_buf_printf(&ppc_hw_desc, "mi:%s ", s);
  176. of_node_put(dn);
  177. }
  178. static void __init pnv_init(void)
  179. {
  180. pnv_add_hw_description();
  181. /*
  182. * Initialize the LPC bus now so that legacy serial
  183. * ports can be found on it
  184. */
  185. opal_lpc_init();
  186. #ifdef CONFIG_HVC_OPAL
  187. if (firmware_has_feature(FW_FEATURE_OPAL))
  188. hvc_opal_init_early();
  189. else
  190. #endif
  191. add_preferred_console("hvc", 0, NULL);
  192. #ifdef CONFIG_PPC_64S_HASH_MMU
  193. if (!radix_enabled()) {
  194. size_t size = sizeof(struct slb_entry) * mmu_slb_size;
  195. int i;
  196. /* Allocate per cpu area to save old slb contents during MCE */
  197. for_each_possible_cpu(i) {
  198. paca_ptrs[i]->mce_faulty_slbs =
  199. memblock_alloc_node(size,
  200. __alignof__(struct slb_entry),
  201. cpu_to_node(i));
  202. }
  203. }
  204. #endif
  205. }
  206. static void __init pnv_init_IRQ(void)
  207. {
  208. /* Try using a XIVE if available, otherwise use a XICS */
  209. if (!xive_native_init())
  210. xics_init();
  211. WARN_ON(!ppc_md.get_irq);
  212. }
  213. static void pnv_show_cpuinfo(struct seq_file *m)
  214. {
  215. struct device_node *root;
  216. const char *model = "";
  217. root = of_find_node_by_path("/");
  218. if (root)
  219. model = of_get_property(root, "model", NULL);
  220. seq_printf(m, "machine\t\t: PowerNV %s\n", model);
  221. if (firmware_has_feature(FW_FEATURE_OPAL))
  222. seq_printf(m, "firmware\t: OPAL\n");
  223. else
  224. seq_printf(m, "firmware\t: BML\n");
  225. of_node_put(root);
  226. if (radix_enabled())
  227. seq_printf(m, "MMU\t\t: Radix\n");
  228. else
  229. seq_printf(m, "MMU\t\t: Hash\n");
  230. }
  231. static void pnv_prepare_going_down(void)
  232. {
  233. /*
  234. * Disable all notifiers from OPAL, we can't
  235. * service interrupts anymore anyway
  236. */
  237. opal_event_shutdown();
  238. /* Print flash update message if one is scheduled. */
  239. opal_flash_update_print_message();
  240. smp_send_stop();
  241. hard_irq_disable();
  242. }
  243. static void __noreturn pnv_restart(char *cmd)
  244. {
  245. long rc;
  246. pnv_prepare_going_down();
  247. do {
  248. if (!cmd || !strlen(cmd))
  249. rc = opal_cec_reboot();
  250. else if (strcmp(cmd, "full") == 0)
  251. rc = opal_cec_reboot2(OPAL_REBOOT_FULL_IPL, NULL);
  252. else if (strcmp(cmd, "mpipl") == 0)
  253. rc = opal_cec_reboot2(OPAL_REBOOT_MPIPL, NULL);
  254. else if (strcmp(cmd, "error") == 0)
  255. rc = opal_cec_reboot2(OPAL_REBOOT_PLATFORM_ERROR, NULL);
  256. else if (strcmp(cmd, "fast") == 0)
  257. rc = opal_cec_reboot2(OPAL_REBOOT_FAST, NULL);
  258. else
  259. rc = OPAL_UNSUPPORTED;
  260. if (rc == OPAL_BUSY || rc == OPAL_BUSY_EVENT) {
  261. /* Opal is busy wait for some time and retry */
  262. opal_poll_events(NULL);
  263. mdelay(10);
  264. } else if (cmd && rc) {
  265. /* Unknown error while issuing reboot */
  266. if (rc == OPAL_UNSUPPORTED)
  267. pr_err("Unsupported '%s' reboot.\n", cmd);
  268. else
  269. pr_err("Unable to issue '%s' reboot. Err=%ld\n",
  270. cmd, rc);
  271. pr_info("Forcing a cec-reboot\n");
  272. cmd = NULL;
  273. rc = OPAL_BUSY;
  274. } else if (rc != OPAL_SUCCESS) {
  275. /* Unknown error while issuing cec-reboot */
  276. pr_err("Unable to reboot. Err=%ld\n", rc);
  277. }
  278. } while (rc == OPAL_BUSY || rc == OPAL_BUSY_EVENT);
  279. for (;;)
  280. opal_poll_events(NULL);
  281. }
  282. static void __noreturn pnv_power_off(void)
  283. {
  284. long rc = OPAL_BUSY;
  285. pnv_prepare_going_down();
  286. while (rc == OPAL_BUSY || rc == OPAL_BUSY_EVENT) {
  287. rc = opal_cec_power_down(0);
  288. if (rc == OPAL_BUSY_EVENT)
  289. opal_poll_events(NULL);
  290. else
  291. mdelay(10);
  292. }
  293. for (;;)
  294. opal_poll_events(NULL);
  295. }
  296. static void __noreturn pnv_halt(void)
  297. {
  298. pnv_power_off();
  299. }
  300. static void pnv_progress(char *s, unsigned short hex)
  301. {
  302. }
  303. static void pnv_shutdown(void)
  304. {
  305. /* Let the PCI code clear up IODA tables */
  306. pnv_pci_shutdown();
  307. /*
  308. * Stop OPAL activity: Unregister all OPAL interrupts so they
  309. * don't fire up while we kexec and make sure all potentially
  310. * DMA'ing ops are complete (such as dump retrieval).
  311. */
  312. opal_shutdown();
  313. }
  314. #ifdef CONFIG_KEXEC_CORE
  315. static void pnv_kexec_wait_secondaries_down(void)
  316. {
  317. int my_cpu, i, notified = -1;
  318. my_cpu = get_cpu();
  319. for_each_online_cpu(i) {
  320. uint8_t status;
  321. int64_t rc, timeout = 1000;
  322. if (i == my_cpu)
  323. continue;
  324. for (;;) {
  325. rc = opal_query_cpu_status(get_hard_smp_processor_id(i),
  326. &status);
  327. if (rc != OPAL_SUCCESS || status != OPAL_THREAD_STARTED)
  328. break;
  329. barrier();
  330. if (i != notified) {
  331. printk(KERN_INFO "kexec: waiting for cpu %d "
  332. "(physical %d) to enter OPAL\n",
  333. i, paca_ptrs[i]->hw_cpu_id);
  334. notified = i;
  335. }
  336. /*
  337. * On crash secondaries might be unreachable or hung,
  338. * so timeout if we've waited too long
  339. * */
  340. mdelay(1);
  341. if (timeout-- == 0) {
  342. printk(KERN_ERR "kexec: timed out waiting for "
  343. "cpu %d (physical %d) to enter OPAL\n",
  344. i, paca_ptrs[i]->hw_cpu_id);
  345. break;
  346. }
  347. }
  348. }
  349. }
  350. static void pnv_kexec_cpu_down(int crash_shutdown, int secondary)
  351. {
  352. u64 reinit_flags;
  353. if (xive_enabled())
  354. xive_teardown_cpu();
  355. else
  356. xics_kexec_teardown_cpu(secondary);
  357. /* On OPAL, we return all CPUs to firmware */
  358. if (!firmware_has_feature(FW_FEATURE_OPAL))
  359. return;
  360. if (secondary) {
  361. /* Return secondary CPUs to firmware on OPAL v3 */
  362. mb();
  363. get_paca()->kexec_state = KEXEC_STATE_REAL_MODE;
  364. mb();
  365. /* Return the CPU to OPAL */
  366. opal_return_cpu();
  367. } else {
  368. /* Primary waits for the secondaries to have reached OPAL */
  369. pnv_kexec_wait_secondaries_down();
  370. /* Switch XIVE back to emulation mode */
  371. if (xive_enabled())
  372. xive_shutdown();
  373. /*
  374. * We might be running as little-endian - now that interrupts
  375. * are disabled, reset the HILE bit to big-endian so we don't
  376. * take interrupts in the wrong endian later
  377. *
  378. * We reinit to enable both radix and hash on P9 to ensure
  379. * the mode used by the next kernel is always supported.
  380. */
  381. reinit_flags = OPAL_REINIT_CPUS_HILE_BE;
  382. if (cpu_has_feature(CPU_FTR_ARCH_300))
  383. reinit_flags |= OPAL_REINIT_CPUS_MMU_RADIX |
  384. OPAL_REINIT_CPUS_MMU_HASH;
  385. opal_reinit_cpus(reinit_flags);
  386. }
  387. }
  388. #endif /* CONFIG_KEXEC_CORE */
  389. #ifdef CONFIG_MEMORY_HOTPLUG
  390. static unsigned long pnv_memory_block_size(void)
  391. {
  392. /*
  393. * We map the kernel linear region with 1GB large pages on radix. For
  394. * memory hot unplug to work our memory block size must be at least
  395. * this size.
  396. */
  397. if (radix_enabled())
  398. return radix_mem_block_size;
  399. else
  400. return 256UL * 1024 * 1024;
  401. }
  402. #endif
  403. static void __init pnv_setup_machdep_opal(void)
  404. {
  405. ppc_md.get_boot_time = opal_get_boot_time;
  406. ppc_md.restart = pnv_restart;
  407. pm_power_off = pnv_power_off;
  408. ppc_md.halt = pnv_halt;
  409. /* ppc_md.system_reset_exception gets filled in by pnv_smp_init() */
  410. ppc_md.machine_check_exception = opal_machine_check;
  411. ppc_md.mce_check_early_recovery = opal_mce_check_early_recovery;
  412. if (opal_check_token(OPAL_HANDLE_HMI2))
  413. ppc_md.hmi_exception_early = opal_hmi_exception_early2;
  414. else
  415. ppc_md.hmi_exception_early = opal_hmi_exception_early;
  416. ppc_md.handle_hmi_exception = opal_handle_hmi_exception;
  417. }
  418. static int __init pnv_probe(void)
  419. {
  420. if (!of_machine_is_compatible("ibm,powernv"))
  421. return 0;
  422. if (firmware_has_feature(FW_FEATURE_OPAL))
  423. pnv_setup_machdep_opal();
  424. pr_debug("PowerNV detected !\n");
  425. pnv_init();
  426. return 1;
  427. }
  428. #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
  429. void __init pnv_tm_init(void)
  430. {
  431. if (!firmware_has_feature(FW_FEATURE_OPAL) ||
  432. !pvr_version_is(PVR_POWER9) ||
  433. early_cpu_has_feature(CPU_FTR_TM))
  434. return;
  435. if (opal_reinit_cpus(OPAL_REINIT_CPUS_TM_SUSPEND_DISABLED) != OPAL_SUCCESS)
  436. return;
  437. pr_info("Enabling TM (Transactional Memory) with Suspend Disabled\n");
  438. cur_cpu_spec->cpu_features |= CPU_FTR_TM;
  439. /* Make sure "normal" HTM is off (it should be) */
  440. cur_cpu_spec->cpu_user_features2 &= ~PPC_FEATURE2_HTM;
  441. /* Turn on no suspend mode, and HTM no SC */
  442. cur_cpu_spec->cpu_user_features2 |= PPC_FEATURE2_HTM_NO_SUSPEND | \
  443. PPC_FEATURE2_HTM_NOSC;
  444. tm_suspend_disabled = true;
  445. }
  446. #endif /* CONFIG_PPC_TRANSACTIONAL_MEM */
  447. /*
  448. * Returns the cpu frequency for 'cpu' in Hz. This is used by
  449. * /proc/cpuinfo
  450. */
  451. static unsigned long pnv_get_proc_freq(unsigned int cpu)
  452. {
  453. unsigned long ret_freq;
  454. ret_freq = cpufreq_get(cpu) * 1000ul;
  455. /*
  456. * If the backend cpufreq driver does not exist,
  457. * then fallback to old way of reporting the clockrate.
  458. */
  459. if (!ret_freq)
  460. ret_freq = ppc_proc_freq;
  461. return ret_freq;
  462. }
  463. static long pnv_machine_check_early(struct pt_regs *regs)
  464. {
  465. long handled = 0;
  466. if (cur_cpu_spec && cur_cpu_spec->machine_check_early)
  467. handled = cur_cpu_spec->machine_check_early(regs);
  468. return handled;
  469. }
  470. define_machine(powernv) {
  471. .name = "PowerNV",
  472. .probe = pnv_probe,
  473. .setup_arch = pnv_setup_arch,
  474. .init_IRQ = pnv_init_IRQ,
  475. .show_cpuinfo = pnv_show_cpuinfo,
  476. .get_proc_freq = pnv_get_proc_freq,
  477. .discover_phbs = pnv_pci_init,
  478. .progress = pnv_progress,
  479. .machine_shutdown = pnv_shutdown,
  480. .power_save = NULL,
  481. .calibrate_decr = generic_calibrate_decr,
  482. .machine_check_early = pnv_machine_check_early,
  483. #ifdef CONFIG_KEXEC_CORE
  484. .kexec_cpu_down = pnv_kexec_cpu_down,
  485. #endif
  486. #ifdef CONFIG_MEMORY_HOTPLUG
  487. .memory_block_size = pnv_memory_block_size,
  488. #endif
  489. };