dt_cpu_ftrs.c 27 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Copyright 2017, Nicholas Piggin, IBM Corporation
  4. */
  5. #define pr_fmt(fmt) "dt-cpu-ftrs: " fmt
  6. #include <linux/export.h>
  7. #include <linux/init.h>
  8. #include <linux/jump_label.h>
  9. #include <linux/libfdt.h>
  10. #include <linux/memblock.h>
  11. #include <linux/of_fdt.h>
  12. #include <linux/printk.h>
  13. #include <linux/sched.h>
  14. #include <linux/string.h>
  15. #include <linux/threads.h>
  16. #include <asm/cputable.h>
  17. #include <asm/dt_cpu_ftrs.h>
  18. #include <asm/mce.h>
  19. #include <asm/mmu.h>
  20. #include <asm/setup.h>
  21. /* Device-tree visible constants follow */
  22. #define ISA_V3_0B 3000
  23. #define ISA_V3_1 3100
  24. #define USABLE_PR (1U << 0)
  25. #define USABLE_OS (1U << 1)
  26. #define USABLE_HV (1U << 2)
  27. #define HV_SUPPORT_HFSCR (1U << 0)
  28. #define OS_SUPPORT_FSCR (1U << 0)
  29. /* For parsing, we define all bits set as "NONE" case */
  30. #define HV_SUPPORT_NONE 0xffffffffU
  31. #define OS_SUPPORT_NONE 0xffffffffU
  32. struct dt_cpu_feature {
  33. const char *name;
  34. uint32_t isa;
  35. uint32_t usable_privilege;
  36. uint32_t hv_support;
  37. uint32_t os_support;
  38. uint32_t hfscr_bit_nr;
  39. uint32_t fscr_bit_nr;
  40. uint32_t hwcap_bit_nr;
  41. /* fdt parsing */
  42. unsigned long node;
  43. int enabled;
  44. int disabled;
  45. };
  46. #define MMU_FTRS_HASH_BASE (MMU_FTRS_POWER8)
  47. #define COMMON_USER_BASE (PPC_FEATURE_32 | PPC_FEATURE_64 | \
  48. PPC_FEATURE_ARCH_2_06 |\
  49. PPC_FEATURE_ICACHE_SNOOP)
  50. #define COMMON_USER2_BASE (PPC_FEATURE2_ARCH_2_07 | \
  51. PPC_FEATURE2_ISEL)
  52. /*
  53. * Set up the base CPU
  54. */
  55. static int hv_mode;
  56. static struct {
  57. u64 lpcr;
  58. u64 hfscr;
  59. u64 fscr;
  60. u64 pcr;
  61. } system_registers;
  62. static void (*init_pmu_registers)(void);
  63. static void __restore_cpu_cpufeatures(void)
  64. {
  65. mtspr(SPRN_LPCR, system_registers.lpcr);
  66. if (hv_mode) {
  67. mtspr(SPRN_LPID, 0);
  68. mtspr(SPRN_AMOR, ~0);
  69. mtspr(SPRN_HFSCR, system_registers.hfscr);
  70. mtspr(SPRN_PCR, system_registers.pcr);
  71. }
  72. mtspr(SPRN_FSCR, system_registers.fscr);
  73. if (init_pmu_registers)
  74. init_pmu_registers();
  75. }
  76. static char dt_cpu_name[64];
  77. static struct cpu_spec __initdata base_cpu_spec = {
  78. .cpu_name = NULL,
  79. .cpu_features = CPU_FTRS_DT_CPU_BASE,
  80. .cpu_user_features = COMMON_USER_BASE,
  81. .cpu_user_features2 = COMMON_USER2_BASE,
  82. .mmu_features = 0,
  83. .icache_bsize = 32, /* minimum block size, fixed by */
  84. .dcache_bsize = 32, /* cache info init. */
  85. .num_pmcs = 0,
  86. .pmc_type = PPC_PMC_DEFAULT,
  87. .cpu_setup = NULL,
  88. .cpu_restore = __restore_cpu_cpufeatures,
  89. .machine_check_early = NULL,
  90. .platform = NULL,
  91. };
  92. static void __init cpufeatures_setup_cpu(void)
  93. {
  94. set_cur_cpu_spec(&base_cpu_spec);
  95. cur_cpu_spec->pvr_mask = -1;
  96. cur_cpu_spec->pvr_value = mfspr(SPRN_PVR);
  97. /* Initialize the base environment -- clear FSCR/HFSCR. */
  98. hv_mode = !!(mfmsr() & MSR_HV);
  99. if (hv_mode) {
  100. cur_cpu_spec->cpu_features |= CPU_FTR_HVMODE;
  101. mtspr(SPRN_HFSCR, 0);
  102. }
  103. mtspr(SPRN_FSCR, 0);
  104. mtspr(SPRN_PCR, PCR_MASK);
  105. /*
  106. * LPCR does not get cleared, to match behaviour with secondaries
  107. * in __restore_cpu_cpufeatures. Once the idle code is fixed, this
  108. * could clear LPCR too.
  109. */
  110. }
  111. static int __init feat_try_enable_unknown(struct dt_cpu_feature *f)
  112. {
  113. if (f->hv_support == HV_SUPPORT_NONE) {
  114. } else if (f->hv_support & HV_SUPPORT_HFSCR) {
  115. u64 hfscr = mfspr(SPRN_HFSCR);
  116. hfscr |= 1UL << f->hfscr_bit_nr;
  117. mtspr(SPRN_HFSCR, hfscr);
  118. } else {
  119. /* Does not have a known recipe */
  120. return 0;
  121. }
  122. if (f->os_support == OS_SUPPORT_NONE) {
  123. } else if (f->os_support & OS_SUPPORT_FSCR) {
  124. u64 fscr = mfspr(SPRN_FSCR);
  125. fscr |= 1UL << f->fscr_bit_nr;
  126. mtspr(SPRN_FSCR, fscr);
  127. } else {
  128. /* Does not have a known recipe */
  129. return 0;
  130. }
  131. if ((f->usable_privilege & USABLE_PR) && (f->hwcap_bit_nr != -1)) {
  132. uint32_t word = f->hwcap_bit_nr / 32;
  133. uint32_t bit = f->hwcap_bit_nr % 32;
  134. if (word == 0)
  135. cur_cpu_spec->cpu_user_features |= 1U << bit;
  136. else if (word == 1)
  137. cur_cpu_spec->cpu_user_features2 |= 1U << bit;
  138. else
  139. pr_err("%s could not advertise to user (no hwcap bits)\n", f->name);
  140. }
  141. return 1;
  142. }
  143. static int __init feat_enable(struct dt_cpu_feature *f)
  144. {
  145. if (f->hv_support != HV_SUPPORT_NONE) {
  146. if (f->hfscr_bit_nr != -1) {
  147. u64 hfscr = mfspr(SPRN_HFSCR);
  148. hfscr |= 1UL << f->hfscr_bit_nr;
  149. mtspr(SPRN_HFSCR, hfscr);
  150. }
  151. }
  152. if (f->os_support != OS_SUPPORT_NONE) {
  153. if (f->fscr_bit_nr != -1) {
  154. u64 fscr = mfspr(SPRN_FSCR);
  155. fscr |= 1UL << f->fscr_bit_nr;
  156. mtspr(SPRN_FSCR, fscr);
  157. }
  158. }
  159. if ((f->usable_privilege & USABLE_PR) && (f->hwcap_bit_nr != -1)) {
  160. uint32_t word = f->hwcap_bit_nr / 32;
  161. uint32_t bit = f->hwcap_bit_nr % 32;
  162. if (word == 0)
  163. cur_cpu_spec->cpu_user_features |= 1U << bit;
  164. else if (word == 1)
  165. cur_cpu_spec->cpu_user_features2 |= 1U << bit;
  166. else
  167. pr_err("CPU feature: %s could not advertise to user (no hwcap bits)\n", f->name);
  168. }
  169. return 1;
  170. }
  171. static int __init feat_disable(struct dt_cpu_feature *f)
  172. {
  173. return 0;
  174. }
  175. static int __init feat_enable_hv(struct dt_cpu_feature *f)
  176. {
  177. u64 lpcr;
  178. if (!hv_mode) {
  179. pr_err("CPU feature hypervisor present in device tree but HV mode not enabled in the CPU. Ignoring.\n");
  180. return 0;
  181. }
  182. mtspr(SPRN_LPID, 0);
  183. mtspr(SPRN_AMOR, ~0);
  184. lpcr = mfspr(SPRN_LPCR);
  185. lpcr &= ~LPCR_LPES0; /* HV external interrupts */
  186. mtspr(SPRN_LPCR, lpcr);
  187. cur_cpu_spec->cpu_features |= CPU_FTR_HVMODE;
  188. return 1;
  189. }
  190. static int __init feat_enable_le(struct dt_cpu_feature *f)
  191. {
  192. cur_cpu_spec->cpu_user_features |= PPC_FEATURE_TRUE_LE;
  193. return 1;
  194. }
  195. static int __init feat_enable_smt(struct dt_cpu_feature *f)
  196. {
  197. cur_cpu_spec->cpu_features |= CPU_FTR_SMT;
  198. cur_cpu_spec->cpu_user_features |= PPC_FEATURE_SMT;
  199. return 1;
  200. }
  201. static int __init feat_enable_idle_nap(struct dt_cpu_feature *f)
  202. {
  203. u64 lpcr;
  204. /* Set PECE wakeup modes for ISA 207 */
  205. lpcr = mfspr(SPRN_LPCR);
  206. lpcr |= LPCR_PECE0;
  207. lpcr |= LPCR_PECE1;
  208. lpcr |= LPCR_PECE2;
  209. mtspr(SPRN_LPCR, lpcr);
  210. return 1;
  211. }
  212. static int __init feat_enable_idle_stop(struct dt_cpu_feature *f)
  213. {
  214. u64 lpcr;
  215. /* Set PECE wakeup modes for ISAv3.0B */
  216. lpcr = mfspr(SPRN_LPCR);
  217. lpcr |= LPCR_PECE0;
  218. lpcr |= LPCR_PECE1;
  219. lpcr |= LPCR_PECE2;
  220. mtspr(SPRN_LPCR, lpcr);
  221. return 1;
  222. }
  223. static int __init feat_enable_mmu_hash(struct dt_cpu_feature *f)
  224. {
  225. u64 lpcr;
  226. if (!IS_ENABLED(CONFIG_PPC_64S_HASH_MMU))
  227. return 0;
  228. lpcr = mfspr(SPRN_LPCR);
  229. lpcr &= ~LPCR_ISL;
  230. /* VRMASD */
  231. lpcr |= LPCR_VPM0;
  232. lpcr &= ~LPCR_VPM1;
  233. lpcr |= 0x10UL << LPCR_VRMASD_SH; /* L=1 LP=00 */
  234. mtspr(SPRN_LPCR, lpcr);
  235. cur_cpu_spec->mmu_features |= MMU_FTRS_HASH_BASE;
  236. cur_cpu_spec->cpu_user_features |= PPC_FEATURE_HAS_MMU;
  237. return 1;
  238. }
  239. static int __init feat_enable_mmu_hash_v3(struct dt_cpu_feature *f)
  240. {
  241. u64 lpcr;
  242. if (!IS_ENABLED(CONFIG_PPC_64S_HASH_MMU))
  243. return 0;
  244. lpcr = mfspr(SPRN_LPCR);
  245. lpcr &= ~(LPCR_ISL | LPCR_UPRT | LPCR_HR);
  246. mtspr(SPRN_LPCR, lpcr);
  247. cur_cpu_spec->mmu_features |= MMU_FTRS_HASH_BASE;
  248. cur_cpu_spec->cpu_user_features |= PPC_FEATURE_HAS_MMU;
  249. return 1;
  250. }
  251. static int __init feat_enable_mmu_radix(struct dt_cpu_feature *f)
  252. {
  253. if (!IS_ENABLED(CONFIG_PPC_RADIX_MMU))
  254. return 0;
  255. cur_cpu_spec->mmu_features |= MMU_FTR_KERNEL_RO;
  256. cur_cpu_spec->mmu_features |= MMU_FTR_TYPE_RADIX;
  257. cur_cpu_spec->mmu_features |= MMU_FTR_GTSE;
  258. cur_cpu_spec->cpu_user_features |= PPC_FEATURE_HAS_MMU;
  259. return 1;
  260. }
  261. static int __init feat_enable_dscr(struct dt_cpu_feature *f)
  262. {
  263. u64 lpcr;
  264. /*
  265. * Linux relies on FSCR[DSCR] being clear, so that we can take the
  266. * facility unavailable interrupt and track the task's usage of DSCR.
  267. * See facility_unavailable_exception().
  268. * Clear the bit here so that feat_enable() doesn't set it.
  269. */
  270. f->fscr_bit_nr = -1;
  271. feat_enable(f);
  272. lpcr = mfspr(SPRN_LPCR);
  273. lpcr &= ~LPCR_DPFD;
  274. lpcr |= (4UL << LPCR_DPFD_SH);
  275. mtspr(SPRN_LPCR, lpcr);
  276. return 1;
  277. }
  278. static void __init hfscr_pmu_enable(void)
  279. {
  280. u64 hfscr = mfspr(SPRN_HFSCR);
  281. hfscr |= PPC_BIT(60);
  282. mtspr(SPRN_HFSCR, hfscr);
  283. }
  284. static void init_pmu_power8(void)
  285. {
  286. if (hv_mode) {
  287. mtspr(SPRN_MMCRC, 0);
  288. mtspr(SPRN_MMCRH, 0);
  289. }
  290. mtspr(SPRN_MMCRA, 0);
  291. mtspr(SPRN_MMCR0, MMCR0_FC);
  292. mtspr(SPRN_MMCR1, 0);
  293. mtspr(SPRN_MMCR2, 0);
  294. mtspr(SPRN_MMCRS, 0);
  295. }
  296. static int __init feat_enable_mce_power8(struct dt_cpu_feature *f)
  297. {
  298. cur_cpu_spec->platform = "power8";
  299. cur_cpu_spec->machine_check_early = __machine_check_early_realmode_p8;
  300. return 1;
  301. }
  302. static int __init feat_enable_pmu_power8(struct dt_cpu_feature *f)
  303. {
  304. hfscr_pmu_enable();
  305. init_pmu_power8();
  306. init_pmu_registers = init_pmu_power8;
  307. cur_cpu_spec->cpu_features |= CPU_FTR_MMCRA;
  308. cur_cpu_spec->cpu_user_features |= PPC_FEATURE_PSERIES_PERFMON_COMPAT;
  309. if (pvr_version_is(PVR_POWER8E))
  310. cur_cpu_spec->cpu_features |= CPU_FTR_PMAO_BUG;
  311. cur_cpu_spec->num_pmcs = 6;
  312. cur_cpu_spec->pmc_type = PPC_PMC_IBM;
  313. return 1;
  314. }
  315. static void init_pmu_power9(void)
  316. {
  317. if (hv_mode)
  318. mtspr(SPRN_MMCRC, 0);
  319. mtspr(SPRN_MMCRA, 0);
  320. mtspr(SPRN_MMCR0, MMCR0_FC);
  321. mtspr(SPRN_MMCR1, 0);
  322. mtspr(SPRN_MMCR2, 0);
  323. }
  324. static int __init feat_enable_mce_power9(struct dt_cpu_feature *f)
  325. {
  326. cur_cpu_spec->platform = "power9";
  327. cur_cpu_spec->machine_check_early = __machine_check_early_realmode_p9;
  328. return 1;
  329. }
  330. static int __init feat_enable_pmu_power9(struct dt_cpu_feature *f)
  331. {
  332. hfscr_pmu_enable();
  333. init_pmu_power9();
  334. init_pmu_registers = init_pmu_power9;
  335. cur_cpu_spec->cpu_features |= CPU_FTR_MMCRA;
  336. cur_cpu_spec->cpu_user_features |= PPC_FEATURE_PSERIES_PERFMON_COMPAT;
  337. cur_cpu_spec->num_pmcs = 6;
  338. cur_cpu_spec->pmc_type = PPC_PMC_IBM;
  339. return 1;
  340. }
  341. static void init_pmu_power10(void)
  342. {
  343. init_pmu_power9();
  344. mtspr(SPRN_MMCR3, 0);
  345. mtspr(SPRN_MMCRA, MMCRA_BHRB_DISABLE);
  346. mtspr(SPRN_MMCR0, MMCR0_FC | MMCR0_PMCCEXT);
  347. }
  348. static int __init feat_enable_pmu_power10(struct dt_cpu_feature *f)
  349. {
  350. hfscr_pmu_enable();
  351. init_pmu_power10();
  352. init_pmu_registers = init_pmu_power10;
  353. cur_cpu_spec->cpu_features |= CPU_FTR_MMCRA;
  354. cur_cpu_spec->cpu_user_features |= PPC_FEATURE_PSERIES_PERFMON_COMPAT;
  355. cur_cpu_spec->num_pmcs = 6;
  356. cur_cpu_spec->pmc_type = PPC_PMC_IBM;
  357. return 1;
  358. }
  359. static int __init feat_enable_mce_power10(struct dt_cpu_feature *f)
  360. {
  361. cur_cpu_spec->platform = "power10";
  362. cur_cpu_spec->machine_check_early = __machine_check_early_realmode_p10;
  363. return 1;
  364. }
  365. static int __init feat_enable_tm(struct dt_cpu_feature *f)
  366. {
  367. #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
  368. feat_enable(f);
  369. cur_cpu_spec->cpu_user_features2 |= PPC_FEATURE2_HTM_NOSC;
  370. return 1;
  371. #endif
  372. return 0;
  373. }
  374. static int __init feat_enable_fp(struct dt_cpu_feature *f)
  375. {
  376. feat_enable(f);
  377. cur_cpu_spec->cpu_features &= ~CPU_FTR_FPU_UNAVAILABLE;
  378. return 1;
  379. }
  380. static int __init feat_enable_vector(struct dt_cpu_feature *f)
  381. {
  382. #ifdef CONFIG_ALTIVEC
  383. feat_enable(f);
  384. cur_cpu_spec->cpu_features |= CPU_FTR_ALTIVEC;
  385. cur_cpu_spec->cpu_features |= CPU_FTR_VMX_COPY;
  386. cur_cpu_spec->cpu_user_features |= PPC_FEATURE_HAS_ALTIVEC;
  387. return 1;
  388. #endif
  389. return 0;
  390. }
  391. static int __init feat_enable_vsx(struct dt_cpu_feature *f)
  392. {
  393. #ifdef CONFIG_VSX
  394. feat_enable(f);
  395. cur_cpu_spec->cpu_features |= CPU_FTR_VSX;
  396. cur_cpu_spec->cpu_user_features |= PPC_FEATURE_HAS_VSX;
  397. return 1;
  398. #endif
  399. return 0;
  400. }
  401. static int __init feat_enable_purr(struct dt_cpu_feature *f)
  402. {
  403. cur_cpu_spec->cpu_features |= CPU_FTR_PURR | CPU_FTR_SPURR;
  404. return 1;
  405. }
  406. static int __init feat_enable_ebb(struct dt_cpu_feature *f)
  407. {
  408. /*
  409. * PPC_FEATURE2_EBB is enabled in PMU init code because it has
  410. * historically been related to the PMU facility. This may have
  411. * to be decoupled if EBB becomes more generic. For now, follow
  412. * existing convention.
  413. */
  414. f->hwcap_bit_nr = -1;
  415. feat_enable(f);
  416. return 1;
  417. }
  418. static int __init feat_enable_dbell(struct dt_cpu_feature *f)
  419. {
  420. u64 lpcr;
  421. /* P9 has an HFSCR for privileged state */
  422. feat_enable(f);
  423. cur_cpu_spec->cpu_features |= CPU_FTR_DBELL;
  424. lpcr = mfspr(SPRN_LPCR);
  425. lpcr |= LPCR_PECEDH; /* hyp doorbell wakeup */
  426. mtspr(SPRN_LPCR, lpcr);
  427. return 1;
  428. }
  429. static int __init feat_enable_hvi(struct dt_cpu_feature *f)
  430. {
  431. u64 lpcr;
  432. /*
  433. * POWER9 XIVE interrupts including in OPAL XICS compatibility
  434. * are always delivered as hypervisor virtualization interrupts (HVI)
  435. * rather than EE.
  436. *
  437. * However LPES0 is not set here, in the chance that an EE does get
  438. * delivered to the host somehow, the EE handler would not expect it
  439. * to be delivered in LPES0 mode (e.g., using SRR[01]). This could
  440. * happen if there is a bug in interrupt controller code, or IC is
  441. * misconfigured in systemsim.
  442. */
  443. lpcr = mfspr(SPRN_LPCR);
  444. lpcr |= LPCR_HVICE; /* enable hvi interrupts */
  445. lpcr |= LPCR_HEIC; /* disable ee interrupts when MSR_HV */
  446. lpcr |= LPCR_PECE_HVEE; /* hvi can wake from stop */
  447. mtspr(SPRN_LPCR, lpcr);
  448. return 1;
  449. }
  450. static int __init feat_enable_large_ci(struct dt_cpu_feature *f)
  451. {
  452. cur_cpu_spec->mmu_features |= MMU_FTR_CI_LARGE_PAGE;
  453. return 1;
  454. }
  455. static int __init feat_enable_mma(struct dt_cpu_feature *f)
  456. {
  457. u64 pcr;
  458. feat_enable(f);
  459. pcr = mfspr(SPRN_PCR);
  460. pcr &= ~PCR_MMA_DIS;
  461. mtspr(SPRN_PCR, pcr);
  462. return 1;
  463. }
  464. struct dt_cpu_feature_match {
  465. const char *name;
  466. int (*enable)(struct dt_cpu_feature *f);
  467. u64 cpu_ftr_bit_mask;
  468. };
  469. static struct dt_cpu_feature_match __initdata
  470. dt_cpu_feature_match_table[] = {
  471. {"hypervisor", feat_enable_hv, 0},
  472. {"big-endian", feat_enable, 0},
  473. {"little-endian", feat_enable_le, CPU_FTR_REAL_LE},
  474. {"smt", feat_enable_smt, 0},
  475. {"interrupt-facilities", feat_enable, 0},
  476. {"system-call-vectored", feat_enable, 0},
  477. {"timer-facilities", feat_enable, 0},
  478. {"timer-facilities-v3", feat_enable, 0},
  479. {"debug-facilities", feat_enable, 0},
  480. {"come-from-address-register", feat_enable, CPU_FTR_CFAR},
  481. {"branch-tracing", feat_enable, 0},
  482. {"floating-point", feat_enable_fp, 0},
  483. {"vector", feat_enable_vector, 0},
  484. {"vector-scalar", feat_enable_vsx, 0},
  485. {"vector-scalar-v3", feat_enable, 0},
  486. {"decimal-floating-point", feat_enable, 0},
  487. {"decimal-integer", feat_enable, 0},
  488. {"quadword-load-store", feat_enable, 0},
  489. {"vector-crypto", feat_enable, 0},
  490. {"mmu-hash", feat_enable_mmu_hash, 0},
  491. {"mmu-radix", feat_enable_mmu_radix, 0},
  492. {"mmu-hash-v3", feat_enable_mmu_hash_v3, 0},
  493. {"virtual-page-class-key-protection", feat_enable, 0},
  494. {"transactional-memory", feat_enable_tm, CPU_FTR_TM},
  495. {"transactional-memory-v3", feat_enable_tm, 0},
  496. {"tm-suspend-hypervisor-assist", feat_enable, CPU_FTR_P9_TM_HV_ASSIST},
  497. {"tm-suspend-xer-so-bug", feat_enable, CPU_FTR_P9_TM_XER_SO_BUG},
  498. {"idle-nap", feat_enable_idle_nap, 0},
  499. /* alignment-interrupt-dsisr ignored */
  500. {"idle-stop", feat_enable_idle_stop, 0},
  501. {"machine-check-power8", feat_enable_mce_power8, 0},
  502. {"performance-monitor-power8", feat_enable_pmu_power8, 0},
  503. {"data-stream-control-register", feat_enable_dscr, CPU_FTR_DSCR},
  504. {"event-based-branch", feat_enable_ebb, 0},
  505. {"target-address-register", feat_enable, 0},
  506. {"branch-history-rolling-buffer", feat_enable, 0},
  507. {"control-register", feat_enable, CPU_FTR_CTRL},
  508. {"processor-control-facility", feat_enable_dbell, CPU_FTR_DBELL},
  509. {"processor-control-facility-v3", feat_enable_dbell, CPU_FTR_DBELL},
  510. {"processor-utilization-of-resources-register", feat_enable_purr, 0},
  511. {"no-execute", feat_enable, 0},
  512. {"strong-access-ordering", feat_enable, CPU_FTR_SAO},
  513. {"cache-inhibited-large-page", feat_enable_large_ci, 0},
  514. {"coprocessor-icswx", feat_enable, 0},
  515. {"hypervisor-virtualization-interrupt", feat_enable_hvi, 0},
  516. {"program-priority-register", feat_enable, CPU_FTR_HAS_PPR},
  517. {"wait", feat_enable, 0},
  518. {"atomic-memory-operations", feat_enable, 0},
  519. {"branch-v3", feat_enable, 0},
  520. {"copy-paste", feat_enable, 0},
  521. {"decimal-floating-point-v3", feat_enable, 0},
  522. {"decimal-integer-v3", feat_enable, 0},
  523. {"fixed-point-v3", feat_enable, 0},
  524. {"floating-point-v3", feat_enable, 0},
  525. {"group-start-register", feat_enable, 0},
  526. {"pc-relative-addressing", feat_enable, 0},
  527. {"machine-check-power9", feat_enable_mce_power9, 0},
  528. {"machine-check-power10", feat_enable_mce_power10, 0},
  529. {"performance-monitor-power9", feat_enable_pmu_power9, 0},
  530. {"performance-monitor-power10", feat_enable_pmu_power10, 0},
  531. {"event-based-branch-v3", feat_enable, 0},
  532. {"random-number-generator", feat_enable, 0},
  533. {"system-call-vectored", feat_disable, 0},
  534. {"trace-interrupt-v3", feat_enable, 0},
  535. {"vector-v3", feat_enable, 0},
  536. {"vector-binary128", feat_enable, 0},
  537. {"vector-binary16", feat_enable, 0},
  538. {"wait-v3", feat_enable, 0},
  539. {"prefix-instructions", feat_enable, 0},
  540. {"matrix-multiply-assist", feat_enable_mma, 0},
  541. {"debug-facilities-v31", feat_enable, CPU_FTR_DAWR1},
  542. };
  543. static bool __initdata using_dt_cpu_ftrs;
  544. static bool __initdata enable_unknown = true;
  545. static int __init dt_cpu_ftrs_parse(char *str)
  546. {
  547. if (!str)
  548. return 0;
  549. if (!strcmp(str, "off"))
  550. using_dt_cpu_ftrs = false;
  551. else if (!strcmp(str, "known"))
  552. enable_unknown = false;
  553. else
  554. return 1;
  555. return 0;
  556. }
  557. early_param("dt_cpu_ftrs", dt_cpu_ftrs_parse);
  558. static void __init cpufeatures_setup_start(u32 isa)
  559. {
  560. pr_info("setup for ISA %d\n", isa);
  561. if (isa >= ISA_V3_0B) {
  562. cur_cpu_spec->cpu_features |= CPU_FTR_ARCH_300;
  563. cur_cpu_spec->cpu_user_features2 |= PPC_FEATURE2_ARCH_3_00;
  564. }
  565. if (isa >= ISA_V3_1) {
  566. cur_cpu_spec->cpu_features |= CPU_FTR_ARCH_31;
  567. cur_cpu_spec->cpu_user_features2 |= PPC_FEATURE2_ARCH_3_1;
  568. }
  569. }
  570. static bool __init cpufeatures_process_feature(struct dt_cpu_feature *f)
  571. {
  572. const struct dt_cpu_feature_match *m;
  573. bool known = false;
  574. int i;
  575. for (i = 0; i < ARRAY_SIZE(dt_cpu_feature_match_table); i++) {
  576. m = &dt_cpu_feature_match_table[i];
  577. if (!strcmp(f->name, m->name)) {
  578. known = true;
  579. if (m->enable(f)) {
  580. cur_cpu_spec->cpu_features |= m->cpu_ftr_bit_mask;
  581. break;
  582. }
  583. pr_info("not enabling: %s (disabled or unsupported by kernel)\n",
  584. f->name);
  585. return false;
  586. }
  587. }
  588. if (!known && (!enable_unknown || !feat_try_enable_unknown(f))) {
  589. pr_info("not enabling: %s (unknown and unsupported by kernel)\n",
  590. f->name);
  591. return false;
  592. }
  593. if (known)
  594. pr_debug("enabling: %s\n", f->name);
  595. else
  596. pr_debug("enabling: %s (unknown)\n", f->name);
  597. return true;
  598. }
  599. /*
  600. * Handle POWER9 broadcast tlbie invalidation issue using
  601. * cpu feature flag.
  602. */
  603. static __init void update_tlbie_feature_flag(unsigned long pvr)
  604. {
  605. if (PVR_VER(pvr) == PVR_POWER9) {
  606. /*
  607. * Set the tlbie feature flag for anything below
  608. * Nimbus DD 2.3 and Cumulus DD 1.3
  609. */
  610. if ((pvr & 0xe000) == 0) {
  611. /* Nimbus */
  612. if ((pvr & 0xfff) < 0x203)
  613. cur_cpu_spec->cpu_features |= CPU_FTR_P9_TLBIE_STQ_BUG;
  614. } else if ((pvr & 0xc000) == 0) {
  615. /* Cumulus */
  616. if ((pvr & 0xfff) < 0x103)
  617. cur_cpu_spec->cpu_features |= CPU_FTR_P9_TLBIE_STQ_BUG;
  618. } else {
  619. WARN_ONCE(1, "Unknown PVR");
  620. cur_cpu_spec->cpu_features |= CPU_FTR_P9_TLBIE_STQ_BUG;
  621. }
  622. cur_cpu_spec->cpu_features |= CPU_FTR_P9_TLBIE_ERAT_BUG;
  623. }
  624. }
  625. static __init void cpufeatures_cpu_quirks(void)
  626. {
  627. unsigned long version = mfspr(SPRN_PVR);
  628. /*
  629. * Not all quirks can be derived from the cpufeatures device tree.
  630. */
  631. if ((version & 0xffffefff) == 0x004e0200) {
  632. /* DD2.0 has no feature flag */
  633. cur_cpu_spec->cpu_features |= CPU_FTR_P9_RADIX_PREFETCH_BUG;
  634. cur_cpu_spec->cpu_features &= ~(CPU_FTR_DAWR);
  635. } else if ((version & 0xffffefff) == 0x004e0201) {
  636. cur_cpu_spec->cpu_features |= CPU_FTR_POWER9_DD2_1;
  637. cur_cpu_spec->cpu_features |= CPU_FTR_P9_RADIX_PREFETCH_BUG;
  638. cur_cpu_spec->cpu_features &= ~(CPU_FTR_DAWR);
  639. } else if ((version & 0xffffefff) == 0x004e0202) {
  640. cur_cpu_spec->cpu_features |= CPU_FTR_P9_TM_HV_ASSIST;
  641. cur_cpu_spec->cpu_features |= CPU_FTR_P9_TM_XER_SO_BUG;
  642. cur_cpu_spec->cpu_features |= CPU_FTR_POWER9_DD2_1;
  643. cur_cpu_spec->cpu_features &= ~(CPU_FTR_DAWR);
  644. } else if ((version & 0xffffefff) == 0x004e0203) {
  645. cur_cpu_spec->cpu_features |= CPU_FTR_P9_TM_HV_ASSIST;
  646. cur_cpu_spec->cpu_features |= CPU_FTR_P9_TM_XER_SO_BUG;
  647. cur_cpu_spec->cpu_features |= CPU_FTR_POWER9_DD2_1;
  648. } else if ((version & 0xffff0000) == 0x004e0000) {
  649. /* DD2.1 and up have DD2_1 */
  650. cur_cpu_spec->cpu_features |= CPU_FTR_POWER9_DD2_1;
  651. }
  652. if ((version & 0xffff0000) == 0x004e0000) {
  653. cur_cpu_spec->cpu_features |= CPU_FTR_P9_TIDR;
  654. }
  655. update_tlbie_feature_flag(version);
  656. }
  657. static void __init cpufeatures_setup_finished(void)
  658. {
  659. cpufeatures_cpu_quirks();
  660. if (hv_mode && !(cur_cpu_spec->cpu_features & CPU_FTR_HVMODE)) {
  661. pr_err("hypervisor not present in device tree but HV mode is enabled in the CPU. Enabling.\n");
  662. cur_cpu_spec->cpu_features |= CPU_FTR_HVMODE;
  663. }
  664. /* Make sure powerpc_base_platform is non-NULL */
  665. powerpc_base_platform = cur_cpu_spec->platform;
  666. system_registers.lpcr = mfspr(SPRN_LPCR);
  667. system_registers.hfscr = mfspr(SPRN_HFSCR);
  668. system_registers.fscr = mfspr(SPRN_FSCR);
  669. system_registers.pcr = mfspr(SPRN_PCR);
  670. pr_info("final cpu/mmu features = 0x%016lx 0x%08x\n",
  671. cur_cpu_spec->cpu_features, cur_cpu_spec->mmu_features);
  672. }
  673. static int __init disabled_on_cmdline(void)
  674. {
  675. unsigned long root, chosen;
  676. const char *p;
  677. root = of_get_flat_dt_root();
  678. chosen = of_get_flat_dt_subnode_by_name(root, "chosen");
  679. if (chosen == -FDT_ERR_NOTFOUND)
  680. return false;
  681. p = of_get_flat_dt_prop(chosen, "bootargs", NULL);
  682. if (!p)
  683. return false;
  684. if (strstr(p, "dt_cpu_ftrs=off"))
  685. return true;
  686. return false;
  687. }
  688. static int __init fdt_find_cpu_features(unsigned long node, const char *uname,
  689. int depth, void *data)
  690. {
  691. if (of_flat_dt_is_compatible(node, "ibm,powerpc-cpu-features")
  692. && of_get_flat_dt_prop(node, "isa", NULL))
  693. return 1;
  694. return 0;
  695. }
  696. bool __init dt_cpu_ftrs_in_use(void)
  697. {
  698. return using_dt_cpu_ftrs;
  699. }
  700. bool __init dt_cpu_ftrs_init(void *fdt)
  701. {
  702. using_dt_cpu_ftrs = false;
  703. /* Setup and verify the FDT, if it fails we just bail */
  704. if (!early_init_dt_verify(fdt))
  705. return false;
  706. if (!of_scan_flat_dt(fdt_find_cpu_features, NULL))
  707. return false;
  708. if (disabled_on_cmdline())
  709. return false;
  710. cpufeatures_setup_cpu();
  711. using_dt_cpu_ftrs = true;
  712. return true;
  713. }
  714. static int nr_dt_cpu_features;
  715. static struct dt_cpu_feature *dt_cpu_features;
  716. static int __init process_cpufeatures_node(unsigned long node,
  717. const char *uname, int i)
  718. {
  719. const __be32 *prop;
  720. struct dt_cpu_feature *f;
  721. int len;
  722. f = &dt_cpu_features[i];
  723. f->node = node;
  724. f->name = uname;
  725. prop = of_get_flat_dt_prop(node, "isa", &len);
  726. if (!prop) {
  727. pr_warn("%s: missing isa property\n", uname);
  728. return 0;
  729. }
  730. f->isa = be32_to_cpup(prop);
  731. prop = of_get_flat_dt_prop(node, "usable-privilege", &len);
  732. if (!prop) {
  733. pr_warn("%s: missing usable-privilege property", uname);
  734. return 0;
  735. }
  736. f->usable_privilege = be32_to_cpup(prop);
  737. prop = of_get_flat_dt_prop(node, "hv-support", &len);
  738. if (prop)
  739. f->hv_support = be32_to_cpup(prop);
  740. else
  741. f->hv_support = HV_SUPPORT_NONE;
  742. prop = of_get_flat_dt_prop(node, "os-support", &len);
  743. if (prop)
  744. f->os_support = be32_to_cpup(prop);
  745. else
  746. f->os_support = OS_SUPPORT_NONE;
  747. prop = of_get_flat_dt_prop(node, "hfscr-bit-nr", &len);
  748. if (prop)
  749. f->hfscr_bit_nr = be32_to_cpup(prop);
  750. else
  751. f->hfscr_bit_nr = -1;
  752. prop = of_get_flat_dt_prop(node, "fscr-bit-nr", &len);
  753. if (prop)
  754. f->fscr_bit_nr = be32_to_cpup(prop);
  755. else
  756. f->fscr_bit_nr = -1;
  757. prop = of_get_flat_dt_prop(node, "hwcap-bit-nr", &len);
  758. if (prop)
  759. f->hwcap_bit_nr = be32_to_cpup(prop);
  760. else
  761. f->hwcap_bit_nr = -1;
  762. if (f->usable_privilege & USABLE_HV) {
  763. if (!(mfmsr() & MSR_HV)) {
  764. pr_warn("%s: HV feature passed to guest\n", uname);
  765. return 0;
  766. }
  767. if (f->hv_support == HV_SUPPORT_NONE && f->hfscr_bit_nr != -1) {
  768. pr_warn("%s: unwanted hfscr_bit_nr\n", uname);
  769. return 0;
  770. }
  771. if (f->hv_support == HV_SUPPORT_HFSCR) {
  772. if (f->hfscr_bit_nr == -1) {
  773. pr_warn("%s: missing hfscr_bit_nr\n", uname);
  774. return 0;
  775. }
  776. }
  777. } else {
  778. if (f->hv_support != HV_SUPPORT_NONE || f->hfscr_bit_nr != -1) {
  779. pr_warn("%s: unwanted hv_support/hfscr_bit_nr\n", uname);
  780. return 0;
  781. }
  782. }
  783. if (f->usable_privilege & USABLE_OS) {
  784. if (f->os_support == OS_SUPPORT_NONE && f->fscr_bit_nr != -1) {
  785. pr_warn("%s: unwanted fscr_bit_nr\n", uname);
  786. return 0;
  787. }
  788. if (f->os_support == OS_SUPPORT_FSCR) {
  789. if (f->fscr_bit_nr == -1) {
  790. pr_warn("%s: missing fscr_bit_nr\n", uname);
  791. return 0;
  792. }
  793. }
  794. } else {
  795. if (f->os_support != OS_SUPPORT_NONE || f->fscr_bit_nr != -1) {
  796. pr_warn("%s: unwanted os_support/fscr_bit_nr\n", uname);
  797. return 0;
  798. }
  799. }
  800. if (!(f->usable_privilege & USABLE_PR)) {
  801. if (f->hwcap_bit_nr != -1) {
  802. pr_warn("%s: unwanted hwcap_bit_nr\n", uname);
  803. return 0;
  804. }
  805. }
  806. /* Do all the independent features in the first pass */
  807. if (!of_get_flat_dt_prop(node, "dependencies", &len)) {
  808. if (cpufeatures_process_feature(f))
  809. f->enabled = 1;
  810. else
  811. f->disabled = 1;
  812. }
  813. return 0;
  814. }
  815. static void __init cpufeatures_deps_enable(struct dt_cpu_feature *f)
  816. {
  817. const __be32 *prop;
  818. int len;
  819. int nr_deps;
  820. int i;
  821. if (f->enabled || f->disabled)
  822. return;
  823. prop = of_get_flat_dt_prop(f->node, "dependencies", &len);
  824. if (!prop) {
  825. pr_warn("%s: missing dependencies property", f->name);
  826. return;
  827. }
  828. nr_deps = len / sizeof(int);
  829. for (i = 0; i < nr_deps; i++) {
  830. unsigned long phandle = be32_to_cpu(prop[i]);
  831. int j;
  832. for (j = 0; j < nr_dt_cpu_features; j++) {
  833. struct dt_cpu_feature *d = &dt_cpu_features[j];
  834. if (of_get_flat_dt_phandle(d->node) == phandle) {
  835. cpufeatures_deps_enable(d);
  836. if (d->disabled) {
  837. f->disabled = 1;
  838. return;
  839. }
  840. }
  841. }
  842. }
  843. if (cpufeatures_process_feature(f))
  844. f->enabled = 1;
  845. else
  846. f->disabled = 1;
  847. }
  848. static int __init scan_cpufeatures_subnodes(unsigned long node,
  849. const char *uname,
  850. void *data)
  851. {
  852. int *count = data;
  853. process_cpufeatures_node(node, uname, *count);
  854. (*count)++;
  855. return 0;
  856. }
  857. static int __init count_cpufeatures_subnodes(unsigned long node,
  858. const char *uname,
  859. void *data)
  860. {
  861. int *count = data;
  862. (*count)++;
  863. return 0;
  864. }
  865. static int __init dt_cpu_ftrs_scan_callback(unsigned long node, const char
  866. *uname, int depth, void *data)
  867. {
  868. const __be32 *prop;
  869. int count, i;
  870. u32 isa;
  871. /* We are scanning "ibm,powerpc-cpu-features" nodes only */
  872. if (!of_flat_dt_is_compatible(node, "ibm,powerpc-cpu-features"))
  873. return 0;
  874. prop = of_get_flat_dt_prop(node, "isa", NULL);
  875. if (!prop)
  876. /* We checked before, "can't happen" */
  877. return 0;
  878. isa = be32_to_cpup(prop);
  879. /* Count and allocate space for cpu features */
  880. of_scan_flat_dt_subnodes(node, count_cpufeatures_subnodes,
  881. &nr_dt_cpu_features);
  882. dt_cpu_features = memblock_alloc(sizeof(struct dt_cpu_feature) * nr_dt_cpu_features, PAGE_SIZE);
  883. if (!dt_cpu_features)
  884. panic("%s: Failed to allocate %zu bytes align=0x%lx\n",
  885. __func__,
  886. sizeof(struct dt_cpu_feature) * nr_dt_cpu_features,
  887. PAGE_SIZE);
  888. cpufeatures_setup_start(isa);
  889. /* Scan nodes into dt_cpu_features and enable those without deps */
  890. count = 0;
  891. of_scan_flat_dt_subnodes(node, scan_cpufeatures_subnodes, &count);
  892. /* Recursive enable remaining features with dependencies */
  893. for (i = 0; i < nr_dt_cpu_features; i++) {
  894. struct dt_cpu_feature *f = &dt_cpu_features[i];
  895. cpufeatures_deps_enable(f);
  896. }
  897. prop = of_get_flat_dt_prop(node, "display-name", NULL);
  898. if (prop && strlen((char *)prop) != 0) {
  899. strscpy(dt_cpu_name, (char *)prop, sizeof(dt_cpu_name));
  900. cur_cpu_spec->cpu_name = dt_cpu_name;
  901. }
  902. cpufeatures_setup_finished();
  903. memblock_free(dt_cpu_features,
  904. sizeof(struct dt_cpu_feature) * nr_dt_cpu_features);
  905. return 0;
  906. }
  907. void __init dt_cpu_ftrs_scan(void)
  908. {
  909. if (!using_dt_cpu_ftrs)
  910. return;
  911. of_scan_flat_dt(dt_cpu_ftrs_scan_callback, NULL);
  912. }